text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```c
/*
*
*/
/**
* @file
*
* System workqueue.
*/
#include <zephyr/kernel.h>
#include <zephyr/init.h>
static K_KERNEL_STACK_DEFINE(sys_work_q_stack,
CONFIG_SYSTEM_WORKQUEUE_STACK_SIZE);
struct k_work_q k_sys_work_q;
static int k_sys_work_q_init(void)
{
struct k_work_queue_config cfg = {
.name = "sysworkq",
.no_yield = IS_ENABLED(CONFIG_SYSTEM_WORKQUEUE_NO_YIELD),
.essential = true,
};
k_work_queue_start(&k_sys_work_q,
sys_work_q_stack,
K_KERNEL_STACK_SIZEOF(sys_work_q_stack),
CONFIG_SYSTEM_WORKQUEUE_PRIORITY, &cfg);
return 0;
}
SYS_INIT(k_sys_work_q_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
``` | /content/code_sandbox/kernel/system_work_q.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 169 |
```unknown
#
menu "Device Options"
config DEVICE_DEPS
bool "Store device dependencies"
help
When enabled, device dependencies will be stored so that they can be
queried at runtime. Device dependencies are typically inferred from
devicetree. Enabling this option will increase ROM usage (or RAM if
dynamic device dependencies are enabled).
config DEVICE_DEPS_DYNAMIC
bool "Dynamic device dependencies"
depends on DEVICE_DEPS
help
Option that makes it possible to manipulate device dependencies at
runtime.
config DEVICE_MUTABLE
bool "Mutable devices [EXPERIMENTAL]"
select EXPERIMENTAL
help
Support mutable devices. Mutable devices are instantiated in SRAM
instead of Flash and are runtime modifiable in kernel mode.
config DEVICE_DT_METADATA
bool "Store additional devicetree metadata for each device"
help
If enabled, additional data from the devicetree will be stored for
each device. This allows you to use device_get_by_dt_nodelabel(),
device_get_dt_metadata(), etc.
endmenu
menu "Initialization Priorities"
config KERNEL_INIT_PRIORITY_OBJECTS
int "Kernel objects initialization priority"
default 30
help
Kernel objects use this priority for initialization. This
priority needs to be higher than minimal default initialization
priority.
config KERNEL_INIT_PRIORITY_LIBC
int "LIBC initialization priority"
default 35
help
LIBC uses this priority for initialization. This
priority needs to be higher than minimal default initialization
priority.
config KERNEL_INIT_PRIORITY_DEFAULT
int "Default init priority"
default 40
help
Default minimal init priority for each init level.
config KERNEL_INIT_PRIORITY_DEVICE
int "Default init priority for device drivers"
default 50
help
Device driver, that depends on common components, such as
interrupt controller, but does not depend on other devices,
uses this init priority.
config APPLICATION_INIT_PRIORITY
int "Default init priority for application level drivers"
default 90
help
This priority level is for end-user drivers such as sensors and display
which have no inward dependencies.
endmenu
``` | /content/code_sandbox/kernel/Kconfig.device | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 444 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <string.h>
#include <zephyr/sys/math_extras.h>
#include <zephyr/sys/rb.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/sys/sys_io.h>
#include <ksched.h>
#include <zephyr/syscall.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/device.h>
#include <zephyr/init.h>
#include <stdbool.h>
#include <zephyr/app_memory/app_memdomain.h>
#include <zephyr/sys/libc-hooks.h>
#include <zephyr/sys/mutex.h>
#include <inttypes.h>
#include <zephyr/linker/linker-defs.h>
#ifdef Z_LIBC_PARTITION_EXISTS
K_APPMEM_PARTITION_DEFINE(z_libc_partition);
#endif /* Z_LIBC_PARTITION_EXISTS */
/* TODO: Find a better place to put this. Since we pull the entire
* lib..__modules__crypto__mbedtls.a globals into app shared memory
* section, we can't put this in zephyr_init.c of the mbedtls module.
*/
#ifdef CONFIG_MBEDTLS
K_APPMEM_PARTITION_DEFINE(k_mbedtls_partition);
#endif /* CONFIG_MBEDTLS */
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
/* The originally synchronization strategy made heavy use of recursive
* irq_locking, which ports poorly to spinlocks which are
* non-recursive. Rather than try to redesign as part of
* spinlockification, this uses multiple locks to preserve the
* original semantics exactly. The locks are named for the data they
* protect where possible, or just for the code that uses them where
* not.
*/
#ifdef CONFIG_DYNAMIC_OBJECTS
static struct k_spinlock lists_lock; /* kobj dlist */
static struct k_spinlock objfree_lock; /* k_object_free */
#ifdef CONFIG_GEN_PRIV_STACKS
/* On ARM & ARC MPU we may have two different alignment requirement
* when dynamically allocating thread stacks, one for the privileged
* stack and other for the user stack, so we need to account the
* worst alignment scenario and reserve space for that.
*/
#if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU)
#define STACK_ELEMENT_DATA_SIZE(size) \
(sizeof(struct z_stack_data) + CONFIG_PRIVILEGED_STACK_SIZE + \
Z_THREAD_STACK_OBJ_ALIGN(size) + K_THREAD_STACK_LEN(size))
#else
#define STACK_ELEMENT_DATA_SIZE(size) (sizeof(struct z_stack_data) + \
K_THREAD_STACK_LEN(size))
#endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU */
#else
#define STACK_ELEMENT_DATA_SIZE(size) K_THREAD_STACK_LEN(size)
#endif /* CONFIG_GEN_PRIV_STACKS */
#endif /* CONFIG_DYNAMIC_OBJECTS */
static struct k_spinlock obj_lock; /* kobj struct data */
#define MAX_THREAD_BITS (CONFIG_MAX_THREAD_BYTES * 8)
#ifdef CONFIG_DYNAMIC_OBJECTS
extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
#endif /* CONFIG_DYNAMIC_OBJECTS */
static void clear_perms_cb(struct k_object *ko, void *ctx_ptr);
const char *otype_to_str(enum k_objects otype)
{
const char *ret;
/* -fdata-sections doesn't work right except in very recent
* GCC and these literal strings would appear in the binary even if
* otype_to_str was omitted by the linker
*/
#ifdef CONFIG_LOG
switch (otype) {
/* otype-to-str.h is generated automatically during build by
* gen_kobject_list.py
*/
case K_OBJ_ANY:
ret = "generic";
break;
#include <zephyr/otype-to-str.h>
default:
ret = "?";
break;
}
#else
ARG_UNUSED(otype);
ret = NULL;
#endif /* CONFIG_LOG */
return ret;
}
struct perm_ctx {
int parent_id;
int child_id;
struct k_thread *parent;
};
#ifdef CONFIG_GEN_PRIV_STACKS
/* See write_gperf_table() in scripts/build/gen_kobject_list.py. The privilege
* mode stacks are allocated as an array. The base of the array is
* aligned to Z_PRIVILEGE_STACK_ALIGN, and all members must be as well.
*/
uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
{
struct k_object *obj = k_object_find(stack);
__ASSERT(obj != NULL, "stack object not found");
__ASSERT(obj->type == K_OBJ_THREAD_STACK_ELEMENT,
"bad stack object");
return obj->data.stack_data->priv;
}
#endif /* CONFIG_GEN_PRIV_STACKS */
#ifdef CONFIG_DYNAMIC_OBJECTS
/*
* Note that dyn_obj->data is where the kernel object resides
* so it is the one that actually needs to be aligned.
* Due to the need to get the fields inside struct dyn_obj
* from kernel object pointers (i.e. from data[]), the offset
* from data[] needs to be fixed at build time. Therefore,
* data[] is declared with __aligned(), such that when dyn_obj
* is allocated with alignment, data[] is also aligned.
* Due to this requirement, data[] needs to be aligned with
* the maximum alignment needed for all kernel objects
* (hence the following DYN_OBJ_DATA_ALIGN).
*/
#ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
#define DYN_OBJ_DATA_ALIGN_K_THREAD (ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT)
#else
#define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *))
#endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
#ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
#ifndef CONFIG_MPU_STACK_GUARD
#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
Z_THREAD_STACK_OBJ_ALIGN(CONFIG_PRIVILEGED_STACK_SIZE)
#else
#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
Z_THREAD_STACK_OBJ_ALIGN(CONFIG_DYNAMIC_THREAD_STACK_SIZE)
#endif /* !CONFIG_MPU_STACK_GUARD */
#else
#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
Z_THREAD_STACK_OBJ_ALIGN(ARCH_STACK_PTR_ALIGN)
#endif /* CONFIG_DYNAMIC_THREAD_STACK_SIZE */
#define DYN_OBJ_DATA_ALIGN \
MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
struct dyn_obj {
struct k_object kobj;
sys_dnode_t dobj_list;
/* The object itself */
void *data;
};
extern struct k_object *z_object_gperf_find(const void *obj);
extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
void *context);
/*
* Linked list of allocated kernel objects, for iteration over all allocated
* objects (and potentially deleting them during iteration).
*/
static sys_dlist_t obj_list = SYS_DLIST_STATIC_INIT(&obj_list);
/*
* TODO: Write some hash table code that will replace obj_list.
*/
static size_t obj_size_get(enum k_objects otype)
{
size_t ret;
switch (otype) {
#include <zephyr/otype-to-size.h>
default:
ret = sizeof(const struct device);
break;
}
return ret;
}
static size_t obj_align_get(enum k_objects otype)
{
size_t ret;
switch (otype) {
case K_OBJ_THREAD:
#ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
ret = ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT;
#else
ret = __alignof(struct dyn_obj);
#endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
break;
default:
ret = __alignof(struct dyn_obj);
break;
}
return ret;
}
static struct dyn_obj *dyn_object_find(const void *obj)
{
struct dyn_obj *node;
k_spinlock_key_t key;
/* For any dynamically allocated kernel object, the object
* pointer is just a member of the containing struct dyn_obj,
* so just a little arithmetic is necessary to locate the
* corresponding struct rbnode
*/
key = k_spin_lock(&lists_lock);
SYS_DLIST_FOR_EACH_CONTAINER(&obj_list, node, dobj_list) {
if (node->kobj.name == obj) {
goto end;
}
}
/* No object found */
node = NULL;
end:
k_spin_unlock(&lists_lock, key);
return node;
}
/**
* @internal
*
* @brief Allocate a new thread index for a new thread.
*
* This finds an unused thread index that can be assigned to a new
* thread. If too many threads have been allocated, the kernel will
* run out of indexes and this function will fail.
*
* Note that if an unused index is found, that index will be marked as
* used after return of this function.
*
* @param tidx The new thread index if successful
*
* @return true if successful, false if failed
**/
static bool thread_idx_alloc(uintptr_t *tidx)
{
int i;
int idx;
int base;
base = 0;
for (i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
idx = find_lsb_set(_thread_idx_map[i]);
if (idx != 0) {
*tidx = base + (idx - 1);
/* Clear the bit. We already know the array index,
* and the bit to be cleared.
*/
_thread_idx_map[i] &= ~(BIT(idx - 1));
/* Clear permission from all objects */
k_object_wordlist_foreach(clear_perms_cb,
(void *)*tidx);
return true;
}
base += 8;
}
return false;
}
/**
* @internal
*
* @brief Free a thread index.
*
* This frees a thread index so it can be used by another
* thread.
*
* @param tidx The thread index to be freed
**/
static void thread_idx_free(uintptr_t tidx)
{
/* To prevent leaked permission when index is recycled */
k_object_wordlist_foreach(clear_perms_cb, (void *)tidx);
/* Figure out which bits to set in _thread_idx_map[] and set it. */
int base = tidx / NUM_BITS(_thread_idx_map[0]);
int offset = tidx % NUM_BITS(_thread_idx_map[0]);
_thread_idx_map[base] |= BIT(offset);
}
static struct k_object *dynamic_object_create(enum k_objects otype, size_t align,
size_t size)
{
struct dyn_obj *dyn;
dyn = z_thread_aligned_alloc(align, sizeof(struct dyn_obj));
if (dyn == NULL) {
return NULL;
}
if (otype == K_OBJ_THREAD_STACK_ELEMENT) {
size_t adjusted_size;
if (size == 0) {
k_free(dyn);
return NULL;
}
adjusted_size = STACK_ELEMENT_DATA_SIZE(size);
dyn->data = z_thread_aligned_alloc(DYN_OBJ_DATA_ALIGN_K_THREAD_STACK,
adjusted_size);
if (dyn->data == NULL) {
k_free(dyn);
return NULL;
}
#ifdef CONFIG_GEN_PRIV_STACKS
struct z_stack_data *stack_data = (struct z_stack_data *)
((uint8_t *)dyn->data + adjusted_size - sizeof(*stack_data));
stack_data->priv = (uint8_t *)dyn->data;
stack_data->size = adjusted_size;
dyn->kobj.data.stack_data = stack_data;
#if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU)
dyn->kobj.name = (void *)ROUND_UP(
((uint8_t *)dyn->data + CONFIG_PRIVILEGED_STACK_SIZE),
Z_THREAD_STACK_OBJ_ALIGN(size));
#else
dyn->kobj.name = dyn->data;
#endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU */
#else
dyn->kobj.name = dyn->data;
dyn->kobj.data.stack_size = adjusted_size;
#endif /* CONFIG_GEN_PRIV_STACKS */
} else {
dyn->data = z_thread_aligned_alloc(align, obj_size_get(otype) + size);
if (dyn->data == NULL) {
k_free(dyn->data);
return NULL;
}
dyn->kobj.name = dyn->data;
}
dyn->kobj.type = otype;
dyn->kobj.flags = 0;
(void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
k_spinlock_key_t key = k_spin_lock(&lists_lock);
sys_dlist_append(&obj_list, &dyn->dobj_list);
k_spin_unlock(&lists_lock, key);
return &dyn->kobj;
}
struct k_object *k_object_create_dynamic_aligned(size_t align, size_t size)
{
struct k_object *obj = dynamic_object_create(K_OBJ_ANY, align, size);
if (obj == NULL) {
LOG_ERR("could not allocate kernel object, out of memory");
}
return obj;
}
static void *z_object_alloc(enum k_objects otype, size_t size)
{
struct k_object *zo;
uintptr_t tidx = 0;
if ((otype <= K_OBJ_ANY) || (otype >= K_OBJ_LAST)) {
LOG_ERR("bad object type %d requested", otype);
return NULL;
}
switch (otype) {
case K_OBJ_THREAD:
if (!thread_idx_alloc(&tidx)) {
LOG_ERR("out of free thread indexes");
return NULL;
}
break;
/* The following are currently not allowed at all */
case K_OBJ_FUTEX: /* Lives in user memory */
case K_OBJ_SYS_MUTEX: /* Lives in user memory */
case K_OBJ_NET_SOCKET: /* Indeterminate size */
LOG_ERR("forbidden object type '%s' requested",
otype_to_str(otype));
return NULL;
default:
/* Remainder within bounds are permitted */
break;
}
zo = dynamic_object_create(otype, obj_align_get(otype), size);
if (zo == NULL) {
if (otype == K_OBJ_THREAD) {
thread_idx_free(tidx);
}
return NULL;
}
if (otype == K_OBJ_THREAD) {
zo->data.thread_id = tidx;
}
/* The allocating thread implicitly gets permission on kernel objects
* that it allocates
*/
k_thread_perms_set(zo, _current);
/* Activates reference counting logic for automatic disposal when
* all permissions have been revoked
*/
zo->flags |= K_OBJ_FLAG_ALLOC;
return zo->name;
}
void *z_impl_k_object_alloc(enum k_objects otype)
{
return z_object_alloc(otype, 0);
}
void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size)
{
return z_object_alloc(otype, size);
}
void k_object_free(void *obj)
{
struct dyn_obj *dyn;
/* This function is intentionally not exposed to user mode.
* There's currently no robust way to track that an object isn't
* being used by some other thread
*/
k_spinlock_key_t key = k_spin_lock(&objfree_lock);
dyn = dyn_object_find(obj);
if (dyn != NULL) {
sys_dlist_remove(&dyn->dobj_list);
if (dyn->kobj.type == K_OBJ_THREAD) {
thread_idx_free(dyn->kobj.data.thread_id);
}
}
k_spin_unlock(&objfree_lock, key);
if (dyn != NULL) {
k_free(dyn->data);
k_free(dyn);
}
}
struct k_object *k_object_find(const void *obj)
{
struct k_object *ret;
ret = z_object_gperf_find(obj);
if (ret == NULL) {
struct dyn_obj *dyn;
/* The cast to pointer-to-non-const violates MISRA
* 11.8 but is justified since we know dynamic objects
* were not declared with a const qualifier.
*/
dyn = dyn_object_find(obj);
if (dyn != NULL) {
ret = &dyn->kobj;
}
}
return ret;
}
void k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
{
struct dyn_obj *obj, *next;
z_object_gperf_wordlist_foreach(func, context);
k_spinlock_key_t key = k_spin_lock(&lists_lock);
SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&obj_list, obj, next, dobj_list) {
func(&obj->kobj, context);
}
k_spin_unlock(&lists_lock, key);
}
#endif /* CONFIG_DYNAMIC_OBJECTS */
static unsigned int thread_index_get(struct k_thread *thread)
{
struct k_object *ko;
ko = k_object_find(thread);
if (ko == NULL) {
return -1;
}
return ko->data.thread_id;
}
static void unref_check(struct k_object *ko, uintptr_t index)
{
k_spinlock_key_t key = k_spin_lock(&obj_lock);
sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
#ifdef CONFIG_DYNAMIC_OBJECTS
if ((ko->flags & K_OBJ_FLAG_ALLOC) == 0U) {
/* skip unref check for static kernel object */
goto out;
}
void *vko = ko;
struct dyn_obj *dyn = CONTAINER_OF(vko, struct dyn_obj, kobj);
__ASSERT(IS_PTR_ALIGNED(dyn, struct dyn_obj), "unaligned z_object");
for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
if (ko->perms[i] != 0U) {
goto out;
}
}
/* This object has no more references. Some objects may have
* dynamically allocated resources, require cleanup, or need to be
* marked as uninitialized when all references are gone. What
* specifically needs to happen depends on the object type.
*/
switch (ko->type) {
#ifdef CONFIG_PIPES
case K_OBJ_PIPE:
k_pipe_cleanup((struct k_pipe *)ko->name);
break;
#endif /* CONFIG_PIPES */
case K_OBJ_MSGQ:
k_msgq_cleanup((struct k_msgq *)ko->name);
break;
case K_OBJ_STACK:
k_stack_cleanup((struct k_stack *)ko->name);
break;
default:
/* Nothing to do */
break;
}
sys_dlist_remove(&dyn->dobj_list);
k_free(dyn->data);
k_free(dyn);
out:
#endif /* CONFIG_DYNAMIC_OBJECTS */
k_spin_unlock(&obj_lock, key);
}
static void wordlist_cb(struct k_object *ko, void *ctx_ptr)
{
struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
((struct k_thread *)ko->name != ctx->parent)) {
sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
}
}
void k_thread_perms_inherit(struct k_thread *parent, struct k_thread *child)
{
struct perm_ctx ctx = {
thread_index_get(parent),
thread_index_get(child),
parent
};
if ((ctx.parent_id != -1) && (ctx.child_id != -1)) {
k_object_wordlist_foreach(wordlist_cb, &ctx);
}
}
void k_thread_perms_set(struct k_object *ko, struct k_thread *thread)
{
int index = thread_index_get(thread);
if (index != -1) {
sys_bitfield_set_bit((mem_addr_t)&ko->perms, index);
}
}
void k_thread_perms_clear(struct k_object *ko, struct k_thread *thread)
{
int index = thread_index_get(thread);
if (index != -1) {
sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
unref_check(ko, index);
}
}
static void clear_perms_cb(struct k_object *ko, void *ctx_ptr)
{
uintptr_t id = (uintptr_t)ctx_ptr;
unref_check(ko, id);
}
void k_thread_perms_all_clear(struct k_thread *thread)
{
uintptr_t index = thread_index_get(thread);
if ((int)index != -1) {
k_object_wordlist_foreach(clear_perms_cb, (void *)index);
}
}
static int thread_perms_test(struct k_object *ko)
{
int index;
if ((ko->flags & K_OBJ_FLAG_PUBLIC) != 0U) {
return 1;
}
index = thread_index_get(_current);
if (index != -1) {
return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
}
return 0;
}
static void dump_permission_error(struct k_object *ko)
{
int index = thread_index_get(_current);
LOG_ERR("thread %p (%d) does not have permission on %s %p",
_current, index,
otype_to_str(ko->type), ko->name);
LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap");
}
void k_object_dump_error(int retval, const void *obj, struct k_object *ko,
enum k_objects otype)
{
switch (retval) {
case -EBADF:
LOG_ERR("%p is not a valid %s", obj, otype_to_str(otype));
if (ko == NULL) {
LOG_ERR("address is not a known kernel object");
} else {
LOG_ERR("address is actually a %s",
otype_to_str(ko->type));
}
break;
case -EPERM:
dump_permission_error(ko);
break;
case -EINVAL:
LOG_ERR("%p used before initialization", obj);
break;
case -EADDRINUSE:
LOG_ERR("%p %s in use", obj, otype_to_str(otype));
break;
default:
/* Not handled error */
break;
}
}
void z_impl_k_object_access_grant(const void *object, struct k_thread *thread)
{
struct k_object *ko = k_object_find(object);
if (ko != NULL) {
k_thread_perms_set(ko, thread);
}
}
void k_object_access_revoke(const void *object, struct k_thread *thread)
{
struct k_object *ko = k_object_find(object);
if (ko != NULL) {
k_thread_perms_clear(ko, thread);
}
}
void z_impl_k_object_release(const void *object)
{
k_object_access_revoke(object, _current);
}
void k_object_access_all_grant(const void *object)
{
struct k_object *ko = k_object_find(object);
if (ko != NULL) {
ko->flags |= K_OBJ_FLAG_PUBLIC;
}
}
int k_object_validate(struct k_object *ko, enum k_objects otype,
enum _obj_init_check init)
{
if (unlikely((ko == NULL) ||
((otype != K_OBJ_ANY) && (ko->type != otype)))) {
return -EBADF;
}
/* Manipulation of any kernel objects by a user thread requires that
* thread be granted access first, even for uninitialized objects
*/
if (unlikely(thread_perms_test(ko) == 0)) {
return -EPERM;
}
/* Initialization state checks. _OBJ_INIT_ANY, we don't care */
if (likely(init == _OBJ_INIT_TRUE)) {
/* Object MUST be initialized */
if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0U)) {
return -EINVAL;
}
} else if (init == _OBJ_INIT_FALSE) { /* _OBJ_INIT_FALSE case */
/* Object MUST NOT be initialized */
if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) != 0U)) {
return -EADDRINUSE;
}
} else {
/* _OBJ_INIT_ANY */
}
return 0;
}
void k_object_init(const void *obj)
{
struct k_object *ko;
/* By the time we get here, if the caller was from userspace, all the
* necessary checks have been done in k_object_validate(), which takes
* place before the object is initialized.
*
* This function runs after the object has been initialized and
* finalizes it
*/
ko = k_object_find(obj);
if (ko == NULL) {
/* Supervisor threads can ignore rules about kernel objects
* and may declare them on stacks, etc. Such objects will never
* be usable from userspace, but we shouldn't explode.
*/
return;
}
/* Allows non-initialization system calls to be made on this object */
ko->flags |= K_OBJ_FLAG_INITIALIZED;
}
void k_object_recycle(const void *obj)
{
struct k_object *ko = k_object_find(obj);
if (ko != NULL) {
(void)memset(ko->perms, 0, sizeof(ko->perms));
k_thread_perms_set(ko, _current);
ko->flags |= K_OBJ_FLAG_INITIALIZED;
}
}
void k_object_uninit(const void *obj)
{
struct k_object *ko;
/* See comments in k_object_init() */
ko = k_object_find(obj);
if (ko == NULL) {
return;
}
ko->flags &= ~K_OBJ_FLAG_INITIALIZED;
}
/*
* Copy to/from helper functions used in syscall handlers
*/
void *k_usermode_alloc_from_copy(const void *src, size_t size)
{
void *dst = NULL;
/* Does the caller in user mode have access to read this memory? */
if (K_SYSCALL_MEMORY_READ(src, size)) {
goto out_err;
}
dst = z_thread_malloc(size);
if (dst == NULL) {
LOG_ERR("out of thread resource pool memory (%zu)", size);
goto out_err;
}
(void)memcpy(dst, src, size);
out_err:
return dst;
}
static int user_copy(void *dst, const void *src, size_t size, bool to_user)
{
int ret = EFAULT;
/* Does the caller in user mode have access to this memory? */
if (to_user ? K_SYSCALL_MEMORY_WRITE(dst, size) :
K_SYSCALL_MEMORY_READ(src, size)) {
goto out_err;
}
(void)memcpy(dst, src, size);
ret = 0;
out_err:
return ret;
}
int k_usermode_from_copy(void *dst, const void *src, size_t size)
{
return user_copy(dst, src, size, false);
}
int k_usermode_to_copy(void *dst, const void *src, size_t size)
{
return user_copy(dst, src, size, true);
}
char *k_usermode_string_alloc_copy(const char *src, size_t maxlen)
{
size_t actual_len;
int err;
char *ret = NULL;
actual_len = k_usermode_string_nlen(src, maxlen, &err);
if (err != 0) {
goto out;
}
if (actual_len == maxlen) {
/* Not NULL terminated */
LOG_ERR("string too long %p (%zu)", src, actual_len);
goto out;
}
if (size_add_overflow(actual_len, 1, &actual_len)) {
LOG_ERR("overflow");
goto out;
}
ret = k_usermode_alloc_from_copy(src, actual_len);
/* Someone may have modified the source string during the above
* checks. Ensure what we actually copied is still terminated
* properly.
*/
if (ret != NULL) {
ret[actual_len - 1U] = '\0';
}
out:
return ret;
}
int k_usermode_string_copy(char *dst, const char *src, size_t maxlen)
{
size_t actual_len;
int ret, err;
actual_len = k_usermode_string_nlen(src, maxlen, &err);
if (err != 0) {
ret = EFAULT;
goto out;
}
if (actual_len == maxlen) {
/* Not NULL terminated */
LOG_ERR("string too long %p (%zu)", src, actual_len);
ret = EINVAL;
goto out;
}
if (size_add_overflow(actual_len, 1, &actual_len)) {
LOG_ERR("overflow");
ret = EINVAL;
goto out;
}
ret = k_usermode_from_copy(dst, src, actual_len);
/* See comment above in k_usermode_string_alloc_copy() */
dst[actual_len - 1] = '\0';
out:
return ret;
}
/*
* Application memory region initialization
*/
extern char __app_shmem_regions_start[];
extern char __app_shmem_regions_end[];
static int app_shmem_bss_zero(void)
{
struct z_app_region *region, *end;
end = (struct z_app_region *)&__app_shmem_regions_end[0];
region = (struct z_app_region *)&__app_shmem_regions_start[0];
for ( ; region < end; region++) {
#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
/* When BSS sections are not present at boot, we need to wait for
* paging mechanism to be initialized before we can zero out BSS.
*/
extern bool z_sys_post_kernel;
bool do_clear = z_sys_post_kernel;
/* During pre-kernel init, z_sys_post_kernel == false, but
* with pinned rodata region, so clear. Otherwise skip.
* In post-kernel init, z_sys_post_kernel == true,
* skip those in pinned rodata region as they have already
* been cleared and possibly already in use. Otherwise clear.
*/
if (((uint8_t *)region->bss_start >= (uint8_t *)_app_smem_pinned_start) &&
((uint8_t *)region->bss_start < (uint8_t *)_app_smem_pinned_end)) {
do_clear = !do_clear;
}
if (do_clear)
#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
{
(void)memset(region->bss_start, 0, region->bss_size);
}
}
return 0;
}
SYS_INIT_NAMED(app_shmem_bss_zero_pre, app_shmem_bss_zero,
PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
/* When BSS sections are not present at boot, we need to wait for
* paging mechanism to be initialized before we can zero out BSS.
*/
SYS_INIT_NAMED(app_shmem_bss_zero_post, app_shmem_bss_zero,
POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
/*
* Default handlers if otherwise unimplemented
*/
static uintptr_t handler_bad_syscall(uintptr_t bad_id, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6,
void *ssf)
{
ARG_UNUSED(arg2);
ARG_UNUSED(arg3);
ARG_UNUSED(arg4);
ARG_UNUSED(arg5);
ARG_UNUSED(arg6);
LOG_ERR("Bad system call id %" PRIuPTR " invoked", bad_id);
arch_syscall_oops(ssf);
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
}
static uintptr_t handler_no_syscall(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6, void *ssf)
{
ARG_UNUSED(arg1);
ARG_UNUSED(arg2);
ARG_UNUSED(arg3);
ARG_UNUSED(arg4);
ARG_UNUSED(arg5);
ARG_UNUSED(arg6);
LOG_ERR("Unimplemented system call");
arch_syscall_oops(ssf);
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
}
#include <zephyr/syscall_dispatch.c>
``` | /content/code_sandbox/kernel/userspace.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6,777 |
```c
/*
*
*/
/**
* @file
* @brief Kernel initialization module
*
* This module contains routines that are used to initialize the kernel.
*/
#include <offsets_short.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/printk.h>
#include <zephyr/debug/stack.h>
#include <zephyr/random/random.h>
#include <zephyr/linker/sections.h>
#include <zephyr/toolchain.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/device.h>
#include <zephyr/init.h>
#include <zephyr/linker/linker-defs.h>
#include <ksched.h>
#include <kthread.h>
#include <string.h>
#include <zephyr/sys/dlist.h>
#include <kernel_internal.h>
#include <zephyr/drivers/entropy.h>
#include <zephyr/logging/log_ctrl.h>
#include <zephyr/tracing/tracing.h>
#include <stdbool.h>
#include <zephyr/debug/gcov.h>
#include <kswap.h>
#include <zephyr/timing/timing.h>
#include <zephyr/logging/log.h>
#include <zephyr/pm/device_runtime.h>
#include <zephyr/internal/syscall_handler.h>
LOG_MODULE_REGISTER(os, CONFIG_KERNEL_LOG_LEVEL);
BUILD_ASSERT(CONFIG_MP_NUM_CPUS == CONFIG_MP_MAX_NUM_CPUS,
"CONFIG_MP_NUM_CPUS and CONFIG_MP_MAX_NUM_CPUS need to be set the same");
/* the only struct z_kernel instance */
__pinned_bss
struct z_kernel _kernel;
#ifdef CONFIG_PM
__pinned_bss atomic_t _cpus_active;
#endif
/* init/main and idle threads */
K_THREAD_PINNED_STACK_DEFINE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
struct k_thread z_main_thread;
#ifdef CONFIG_MULTITHREADING
__pinned_bss
struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS];
static K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_idle_stacks,
CONFIG_MP_MAX_NUM_CPUS,
CONFIG_IDLE_STACK_SIZE);
static void z_init_static_threads(void)
{
STRUCT_SECTION_FOREACH(_static_thread_data, thread_data) {
z_setup_new_thread(
thread_data->init_thread,
thread_data->init_stack,
thread_data->init_stack_size,
thread_data->init_entry,
thread_data->init_p1,
thread_data->init_p2,
thread_data->init_p3,
thread_data->init_prio,
thread_data->init_options,
thread_data->init_name);
thread_data->init_thread->init_data = thread_data;
}
#ifdef CONFIG_USERSPACE
STRUCT_SECTION_FOREACH(k_object_assignment, pos) {
for (int i = 0; pos->objects[i] != NULL; i++) {
k_object_access_grant(pos->objects[i],
pos->thread);
}
}
#endif /* CONFIG_USERSPACE */
/*
* Non-legacy static threads may be started immediately or
* after a previously specified delay. Even though the
* scheduler is locked, ticks can still be delivered and
* processed. Take a sched lock to prevent them from running
* until they are all started.
*
* Note that static threads defined using the legacy API have a
* delay of K_FOREVER.
*/
k_sched_lock();
STRUCT_SECTION_FOREACH(_static_thread_data, thread_data) {
k_timeout_t init_delay = Z_THREAD_INIT_DELAY(thread_data);
if (!K_TIMEOUT_EQ(init_delay, K_FOREVER)) {
thread_schedule_new(thread_data->init_thread,
init_delay);
}
}
k_sched_unlock();
}
#else
#define z_init_static_threads() do { } while (false)
#endif /* CONFIG_MULTITHREADING */
extern const struct init_entry __init_start[];
extern const struct init_entry __init_EARLY_start[];
extern const struct init_entry __init_PRE_KERNEL_1_start[];
extern const struct init_entry __init_PRE_KERNEL_2_start[];
extern const struct init_entry __init_POST_KERNEL_start[];
extern const struct init_entry __init_APPLICATION_start[];
extern const struct init_entry __init_end[];
enum init_level {
INIT_LEVEL_EARLY = 0,
INIT_LEVEL_PRE_KERNEL_1,
INIT_LEVEL_PRE_KERNEL_2,
INIT_LEVEL_POST_KERNEL,
INIT_LEVEL_APPLICATION,
#ifdef CONFIG_SMP
INIT_LEVEL_SMP,
#endif /* CONFIG_SMP */
};
#ifdef CONFIG_SMP
extern const struct init_entry __init_SMP_start[];
#endif /* CONFIG_SMP */
/*
* storage space for the interrupt stack
*
* Note: This area is used as the system stack during kernel initialization,
* since the kernel hasn't yet set up its own stack areas. The dual purposing
* of this area is safe since interrupts are disabled until the kernel context
* switches to the init thread.
*/
K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_interrupt_stacks,
CONFIG_MP_MAX_NUM_CPUS,
CONFIG_ISR_STACK_SIZE);
extern void idle(void *unused1, void *unused2, void *unused3);
#ifdef CONFIG_OBJ_CORE_SYSTEM
static struct k_obj_type obj_type_cpu;
static struct k_obj_type obj_type_kernel;
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
static struct k_obj_core_stats_desc cpu_stats_desc = {
.raw_size = sizeof(struct k_cycle_stats),
.query_size = sizeof(struct k_thread_runtime_stats),
.raw = z_cpu_stats_raw,
.query = z_cpu_stats_query,
.reset = NULL,
.disable = NULL,
.enable = NULL,
};
static struct k_obj_core_stats_desc kernel_stats_desc = {
.raw_size = sizeof(struct k_cycle_stats) * CONFIG_MP_MAX_NUM_CPUS,
.query_size = sizeof(struct k_thread_runtime_stats),
.raw = z_kernel_stats_raw,
.query = z_kernel_stats_query,
.reset = NULL,
.disable = NULL,
.enable = NULL,
};
#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
#endif /* CONFIG_OBJ_CORE_SYSTEM */
/* LCOV_EXCL_START
*
* This code is called so early in the boot process that code coverage
* doesn't work properly. In addition, not all arches call this code,
* some like x86 do this with optimized assembly
*/
/**
* @brief equivalent of memset() for early boot usage
*
* Architectures that can't safely use the regular (optimized) memset very
* early during boot because e.g. hardware isn't yet sufficiently initialized
* may override this with their own safe implementation.
*/
__boot_func
void __weak z_early_memset(void *dst, int c, size_t n)
{
(void) memset(dst, c, n);
}
/**
* @brief equivalent of memcpy() for early boot usage
*
* Architectures that can't safely use the regular (optimized) memcpy very
* early during boot because e.g. hardware isn't yet sufficiently initialized
* may override this with their own safe implementation.
*/
__boot_func
void __weak z_early_memcpy(void *dst, const void *src, size_t n)
{
(void) memcpy(dst, src, n);
}
/**
* @brief Clear BSS
*
* This routine clears the BSS region, so all bytes are 0.
*/
__boot_func
void z_bss_zero(void)
{
if (IS_ENABLED(CONFIG_SKIP_BSS_CLEAR)) {
return;
}
z_early_memset(__bss_start, 0, __bss_end - __bss_start);
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ccm), okay)
z_early_memset(&__ccm_bss_start, 0,
(uintptr_t) &__ccm_bss_end
- (uintptr_t) &__ccm_bss_start);
#endif
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
z_early_memset(&__dtcm_bss_start, 0,
(uintptr_t) &__dtcm_bss_end
- (uintptr_t) &__dtcm_bss_start);
#endif
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ocm), okay)
z_early_memset(&__ocm_bss_start, 0,
(uintptr_t) &__ocm_bss_end
- (uintptr_t) &__ocm_bss_start);
#endif
#ifdef CONFIG_CODE_DATA_RELOCATION
extern void bss_zeroing_relocation(void);
bss_zeroing_relocation();
#endif /* CONFIG_CODE_DATA_RELOCATION */
#ifdef CONFIG_COVERAGE_GCOV
z_early_memset(&__gcov_bss_start, 0,
((uintptr_t) &__gcov_bss_end - (uintptr_t) &__gcov_bss_start));
#endif /* CONFIG_COVERAGE_GCOV */
}
#ifdef CONFIG_LINKER_USE_BOOT_SECTION
/**
* @brief Clear BSS within the bot region
*
* This routine clears the BSS within the boot region.
* This is separate from z_bss_zero() as boot region may
* contain symbols required for the boot process before
* paging is initialized.
*/
__boot_func
void z_bss_zero_boot(void)
{
z_early_memset(&lnkr_boot_bss_start, 0,
(uintptr_t)&lnkr_boot_bss_end
- (uintptr_t)&lnkr_boot_bss_start);
}
#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
#ifdef CONFIG_LINKER_USE_PINNED_SECTION
/**
* @brief Clear BSS within the pinned region
*
* This routine clears the BSS within the pinned region.
* This is separate from z_bss_zero() as pinned region may
* contain symbols required for the boot process before
* paging is initialized.
*/
#ifdef CONFIG_LINKER_USE_BOOT_SECTION
__boot_func
#else
__pinned_func
#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
void z_bss_zero_pinned(void)
{
z_early_memset(&lnkr_pinned_bss_start, 0,
(uintptr_t)&lnkr_pinned_bss_end
- (uintptr_t)&lnkr_pinned_bss_start);
}
#endif /* CONFIG_LINKER_USE_PINNED_SECTION */
#ifdef CONFIG_STACK_CANARIES
#ifdef CONFIG_STACK_CANARIES_TLS
extern __thread volatile uintptr_t __stack_chk_guard;
#else
extern volatile uintptr_t __stack_chk_guard;
#endif /* CONFIG_STACK_CANARIES_TLS */
#endif /* CONFIG_STACK_CANARIES */
/* LCOV_EXCL_STOP */
__pinned_bss
bool z_sys_post_kernel;
static int do_device_init(const struct init_entry *entry)
{
const struct device *dev = entry->dev;
int rc = 0;
if (entry->init_fn.dev != NULL) {
rc = entry->init_fn.dev(dev);
/* Mark device initialized. If initialization
* failed, record the error condition.
*/
if (rc != 0) {
if (rc < 0) {
rc = -rc;
}
if (rc > UINT8_MAX) {
rc = UINT8_MAX;
}
dev->state->init_res = rc;
}
}
dev->state->initialized = true;
if (rc == 0) {
/* Run automatic device runtime enablement */
(void)pm_device_runtime_auto_enable(dev);
}
return rc;
}
/**
* @brief Execute all the init entry initialization functions at a given level
*
* @details Invokes the initialization routine for each init entry object
* created by the INIT_ENTRY_DEFINE() macro using the specified level.
* The linker script places the init entry objects in memory in the order
* they need to be invoked, with symbols indicating where one level leaves
* off and the next one begins.
*
* @param level init level to run.
*/
static void z_sys_init_run_level(enum init_level level)
{
static const struct init_entry *levels[] = {
__init_EARLY_start,
__init_PRE_KERNEL_1_start,
__init_PRE_KERNEL_2_start,
__init_POST_KERNEL_start,
__init_APPLICATION_start,
#ifdef CONFIG_SMP
__init_SMP_start,
#endif /* CONFIG_SMP */
/* End marker */
__init_end,
};
const struct init_entry *entry;
for (entry = levels[level]; entry < levels[level+1]; entry++) {
const struct device *dev = entry->dev;
int result;
sys_trace_sys_init_enter(entry, level);
if (dev != NULL) {
result = do_device_init(entry);
} else {
result = entry->init_fn.sys();
}
sys_trace_sys_init_exit(entry, level, result);
}
}
int z_impl_device_init(const struct device *dev)
{
if (dev == NULL) {
return -ENOENT;
}
STRUCT_SECTION_FOREACH_ALTERNATE(_deferred_init, init_entry, entry) {
if (entry->dev == dev) {
return do_device_init(entry);
}
}
return -ENOENT;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_device_init(const struct device *dev)
{
K_OOPS(K_SYSCALL_OBJ_INIT(dev, K_OBJ_ANY));
return z_impl_device_init(dev);
}
#include <zephyr/syscalls/device_init_mrsh.c>
#endif
extern void boot_banner(void);
/**
* @brief Mainline for kernel's background thread
*
* This routine completes kernel initialization by invoking the remaining
* init functions, then invokes application's main() routine.
*/
__boot_func
static void bg_thread_main(void *unused1, void *unused2, void *unused3)
{
ARG_UNUSED(unused1);
ARG_UNUSED(unused2);
ARG_UNUSED(unused3);
#ifdef CONFIG_MMU
/* Invoked here such that backing store or eviction algorithms may
* initialize kernel objects, and that all POST_KERNEL and later tasks
* may perform memory management tasks (except for
* k_mem_map_phys_bare() which is allowed at any time)
*/
z_mem_manage_init();
#endif /* CONFIG_MMU */
z_sys_post_kernel = true;
z_sys_init_run_level(INIT_LEVEL_POST_KERNEL);
#if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
z_stack_adjust_initialized = 1;
#endif /* CONFIG_STACK_POINTER_RANDOM */
boot_banner();
void z_init_static(void);
z_init_static();
/* Final init level before app starts */
z_sys_init_run_level(INIT_LEVEL_APPLICATION);
z_init_static_threads();
#ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(&_kernel));
#endif /* CONFIG_KERNEL_COHERENCE */
#ifdef CONFIG_SMP
if (!IS_ENABLED(CONFIG_SMP_BOOT_DELAY)) {
z_smp_init();
}
z_sys_init_run_level(INIT_LEVEL_SMP);
#endif /* CONFIG_SMP */
#ifdef CONFIG_MMU
z_mem_manage_boot_finish();
#endif /* CONFIG_MMU */
extern int main(void);
(void)main();
/* Mark non-essential since main() has no more work to do */
z_thread_essential_clear(&z_main_thread);
#ifdef CONFIG_COVERAGE_DUMP
/* Dump coverage data once the main() has exited. */
gcov_coverage_dump();
#endif /* CONFIG_COVERAGE_DUMP */
} /* LCOV_EXCL_LINE ... because we just dumped final coverage data */
#if defined(CONFIG_MULTITHREADING)
__boot_func
static void init_idle_thread(int i)
{
struct k_thread *thread = &z_idle_threads[i];
k_thread_stack_t *stack = z_idle_stacks[i];
size_t stack_size = K_KERNEL_STACK_SIZEOF(z_idle_stacks[i]);
#ifdef CONFIG_THREAD_NAME
#if CONFIG_MP_MAX_NUM_CPUS > 1
char tname[8];
snprintk(tname, 8, "idle %02d", i);
#else
char *tname = "idle";
#endif /* CONFIG_MP_MAX_NUM_CPUS */
#else
char *tname = NULL;
#endif /* CONFIG_THREAD_NAME */
z_setup_new_thread(thread, stack,
stack_size, idle, &_kernel.cpus[i],
NULL, NULL, K_IDLE_PRIO, K_ESSENTIAL,
tname);
z_mark_thread_as_started(thread);
#ifdef CONFIG_SMP
thread->base.is_idle = 1U;
#endif /* CONFIG_SMP */
}
void z_init_cpu(int id)
{
init_idle_thread(id);
_kernel.cpus[id].idle_thread = &z_idle_threads[id];
_kernel.cpus[id].id = id;
_kernel.cpus[id].irq_stack =
(K_KERNEL_STACK_BUFFER(z_interrupt_stacks[id]) +
K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[id]));
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
_kernel.cpus[id].usage = &_kernel.usage[id];
_kernel.cpus[id].usage->track_usage =
CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
#endif
#ifdef CONFIG_PM
/*
* Increment number of CPUs active. The pm subsystem
* will keep track of this from here.
*/
atomic_inc(&_cpus_active);
#endif
#ifdef CONFIG_OBJ_CORE_SYSTEM
k_obj_core_init_and_link(K_OBJ_CORE(&_kernel.cpus[id]), &obj_type_cpu);
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
k_obj_core_stats_register(K_OBJ_CORE(&_kernel.cpus[id]),
_kernel.cpus[id].usage,
sizeof(struct k_cycle_stats));
#endif
#endif
}
/**
*
* @brief Initializes kernel data structures
*
* This routine initializes various kernel data structures, including
* the init and idle threads and any architecture-specific initialization.
*
* Note that all fields of "_kernel" are set to zero on entry, which may
* be all the initialization many of them require.
*
* @return initial stack pointer for the main thread
*/
__boot_func
static char *prepare_multithreading(void)
{
char *stack_ptr;
/* _kernel.ready_q is all zeroes */
z_sched_init();
#ifndef CONFIG_SMP
/*
* prime the cache with the main thread since:
*
* - the cache can never be NULL
* - the main thread will be the one to run first
* - no other thread is initialized yet and thus their priority fields
* contain garbage, which would prevent the cache loading algorithm
* to work as intended
*/
_kernel.ready_q.cache = &z_main_thread;
#endif /* CONFIG_SMP */
stack_ptr = z_setup_new_thread(&z_main_thread, z_main_stack,
K_THREAD_STACK_SIZEOF(z_main_stack),
bg_thread_main,
NULL, NULL, NULL,
CONFIG_MAIN_THREAD_PRIORITY,
K_ESSENTIAL, "main");
z_mark_thread_as_started(&z_main_thread);
z_ready_thread(&z_main_thread);
z_init_cpu(0);
return stack_ptr;
}
__boot_func
static FUNC_NORETURN void switch_to_main_thread(char *stack_ptr)
{
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
arch_switch_to_main_thread(&z_main_thread, stack_ptr, bg_thread_main);
#else
ARG_UNUSED(stack_ptr);
/*
* Context switch to main task (entry function is _main()): the
* current fake thread is not on a wait queue or ready queue, so it
* will never be rescheduled in.
*/
z_swap_unlocked();
#endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
}
#endif /* CONFIG_MULTITHREADING */
__boot_func
void __weak z_early_rand_get(uint8_t *buf, size_t length)
{
static uint64_t state = (uint64_t)CONFIG_TIMER_RANDOM_INITIAL_STATE;
int rc;
#ifdef CONFIG_ENTROPY_HAS_DRIVER
const struct device *const entropy = DEVICE_DT_GET_OR_NULL(DT_CHOSEN(zephyr_entropy));
if ((entropy != NULL) && device_is_ready(entropy)) {
/* Try to see if driver provides an ISR-specific API */
rc = entropy_get_entropy_isr(entropy, buf, length, ENTROPY_BUSYWAIT);
if (rc > 0) {
length -= rc;
buf += rc;
}
}
#endif /* CONFIG_ENTROPY_HAS_DRIVER */
while (length > 0) {
uint32_t val;
state = state + k_cycle_get_32();
state = state * 2862933555777941757ULL + 3037000493ULL;
val = (uint32_t)(state >> 32);
rc = MIN(length, sizeof(val));
z_early_memcpy((void *)buf, &val, rc);
length -= rc;
buf += rc;
}
}
/**
*
* @brief Initialize kernel
*
* This routine is invoked when the system is ready to run C code. The
* processor must be running in 32-bit mode, and the BSS must have been
* cleared/zeroed.
*
* @return Does not return
*/
__boot_func
FUNC_NO_STACK_PROTECTOR
FUNC_NORETURN void z_cstart(void)
{
/* gcov hook needed to get the coverage report.*/
gcov_static_init();
/* initialize early init calls */
z_sys_init_run_level(INIT_LEVEL_EARLY);
/* perform any architecture-specific initialization */
arch_kernel_init();
LOG_CORE_INIT();
#if defined(CONFIG_MULTITHREADING)
z_dummy_thread_init(&_thread_dummy);
#endif /* CONFIG_MULTITHREADING */
/* do any necessary initialization of static devices */
z_device_state_init();
/* perform basic hardware initialization */
z_sys_init_run_level(INIT_LEVEL_PRE_KERNEL_1);
#if defined(CONFIG_SMP)
arch_smp_init();
#endif
z_sys_init_run_level(INIT_LEVEL_PRE_KERNEL_2);
#ifdef CONFIG_STACK_CANARIES
uintptr_t stack_guard;
z_early_rand_get((uint8_t *)&stack_guard, sizeof(stack_guard));
__stack_chk_guard = stack_guard;
__stack_chk_guard <<= 8;
#endif /* CONFIG_STACK_CANARIES */
#ifdef CONFIG_TIMING_FUNCTIONS_NEED_AT_BOOT
timing_init();
timing_start();
#endif /* CONFIG_TIMING_FUNCTIONS_NEED_AT_BOOT */
#ifdef CONFIG_MULTITHREADING
switch_to_main_thread(prepare_multithreading());
#else
#ifdef ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING
/* Custom ARCH-specific routine to switch to main()
* in the case of no multi-threading.
*/
ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING(bg_thread_main,
NULL, NULL, NULL);
#else
bg_thread_main(NULL, NULL, NULL);
/* LCOV_EXCL_START
* We've already dumped coverage data at this point.
*/
irq_lock();
while (true) {
}
/* LCOV_EXCL_STOP */
#endif /* ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING */
#endif /* CONFIG_MULTITHREADING */
/*
* Compiler can't tell that the above routines won't return and issues
* a warning unless we explicitly tell it that control never gets this
* far.
*/
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
}
#ifdef CONFIG_OBJ_CORE_SYSTEM
static int init_cpu_obj_core_list(void)
{
/* Initialize CPU object type */
z_obj_type_init(&obj_type_cpu, K_OBJ_TYPE_CPU_ID,
offsetof(struct _cpu, obj_core));
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
k_obj_type_stats_init(&obj_type_cpu, &cpu_stats_desc);
#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
return 0;
}
static int init_kernel_obj_core_list(void)
{
/* Initialize kernel object type */
z_obj_type_init(&obj_type_kernel, K_OBJ_TYPE_KERNEL_ID,
offsetof(struct z_kernel, obj_core));
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
k_obj_type_stats_init(&obj_type_kernel, &kernel_stats_desc);
#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
k_obj_core_init_and_link(K_OBJ_CORE(&_kernel), &obj_type_kernel);
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
k_obj_core_stats_register(K_OBJ_CORE(&_kernel), _kernel.usage,
sizeof(_kernel.usage));
#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
return 0;
}
SYS_INIT(init_cpu_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
SYS_INIT(init_kernel_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_SYSTEM */
``` | /content/code_sandbox/kernel/init.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,089 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <ksched.h>
#include <zephyr/sys/math_extras.h>
#include <zephyr/sys/dlist.h>
bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
{
struct k_thread *thread_a, *thread_b;
int32_t cmp;
thread_a = CONTAINER_OF(a, struct k_thread, base.qnode_rb);
thread_b = CONTAINER_OF(b, struct k_thread, base.qnode_rb);
cmp = z_sched_prio_cmp(thread_a, thread_b);
if (cmp > 0) {
return true;
} else if (cmp < 0) {
return false;
} else {
return thread_a->base.order_key < thread_b->base.order_key
? 1 : 0;
}
}
``` | /content/code_sandbox/kernel/priority_queues.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 184 |
```unknown
#
menu "Virtual Memory Support"
config KERNEL_VM_SUPPORT
bool
help
Hidden option to enable virtual memory Kconfigs.
if KERNEL_VM_SUPPORT
DT_CHOSEN_Z_SRAM := zephyr,sram
config KERNEL_VM_BASE
hex "Virtual address space base address"
default $(dt_chosen_reg_addr_hex,$(DT_CHOSEN_Z_SRAM))
help
Define the base of the kernel's address space.
By default, this is the same as the DT_CHOSEN_Z_SRAM physical base SRAM
address from DTS, in which case RAM will be identity-mapped. Some
architectures may require RAM to be mapped in this way; they may have
just one RAM region and doing this makes linking much simpler, as
at least when the kernel boots all virtual RAM addresses are the same
as their physical address (demand paging at runtime may later modify
this for non-pinned page frames).
Otherwise, if RAM isn't identity-mapped:
1. It is the architecture's responsibility to transition the
instruction pointer to virtual addresses at early boot before
entering the kernel at z_cstart().
2. The underlying architecture may impose constraints on the bounds of
the kernel's address space, such as not overlapping physical RAM
regions if RAM is not identity-mapped, or the virtual and physical
base addresses being aligned to some common value (which allows
double-linking of paging structures to make the instruction pointer
transition simpler).
Zephyr does not implement a split address space and if multiple
page tables are in use, they all have the same virtual-to-physical
mappings (with potentially different permissions).
config KERNEL_VM_OFFSET
hex "Kernel offset within address space"
default 0
help
Offset that the kernel image begins within its address space,
if this is not the same offset from the beginning of RAM.
Some care may need to be taken in selecting this value. In certain
build-time cases, or when a physical address cannot be looked up
in page tables, the equation:
virt = phys + ((KERNEL_VM_BASE + KERNEL_VM_OFFSET) -
(SRAM_BASE_ADDRESS + SRAM_OFFSET))
Will be used to convert between physical and virtual addresses for
memory that is mapped at boot.
This uncommon and is only necessary if the beginning of VM and
physical memory have dissimilar alignment.
config KERNEL_VM_SIZE
hex "Size of kernel address space in bytes"
default 0x800000
help
Size of the kernel's address space. Constraining this helps control
how much total memory can be used for page tables.
The difference between KERNEL_VM_BASE and KERNEL_VM_SIZE indicates the
size of the virtual region for runtime memory mappings. This is needed
for mapping driver MMIO regions, as well as special RAM mapping use-cases
such as VSDO pages, memory mapped thread stacks, and anonymous memory
mappings. The kernel itself will be mapped in here as well at boot.
Systems with very large amounts of memory (such as 512M or more)
will want to use a 64-bit build of Zephyr, there are no plans to
implement a notion of "high" memory in Zephyr to work around physical
RAM size larger than the defined bounds of the virtual address space.
config KERNEL_DIRECT_MAP
bool "Memory region direct-map support"
depends on MMU
help
This enables the direct-map support, namely the region can be 1:1
mapping between virtual address and physical address.
If the specific memory region is in the virtual memory space and
there isn't overlap with the existed mappings, it will reserve the
region from the virtual memory space and do the mapping, otherwise
it will fail. And any attempt across the boundary of the virtual
memory space will fail.
Note that this is for compatibility and portable apps shouldn't
be using it.
endif # KERNEL_VM_SUPPORT
menuconfig MMU
bool "MMU features"
depends on CPU_HAS_MMU
select KERNEL_VM_SUPPORT
help
This option is enabled when the CPU's memory management unit is active
and the arch_mem_map() API is available.
if MMU
config MMU_PAGE_SIZE
hex "Size of smallest granularity MMU page"
default 0x1000
help
Size of memory pages. Varies per MMU but 4K is common. For MMUs that
support multiple page sizes, put the smallest one here.
menuconfig DEMAND_PAGING
bool "Demand paging [EXPERIMENTAL]"
depends on ARCH_HAS_DEMAND_PAGING
help
Enable demand paging. Requires architecture support in how the kernel
is linked and the implementation of an eviction algorithm and a
backing store for evicted pages.
if DEMAND_PAGING
config DEMAND_PAGING_ALLOW_IRQ
bool "Allow interrupts during page-ins/outs"
help
Allow interrupts to be serviced while pages are being evicted or
retrieved from the backing store. This is much better for system
latency, but any code running in interrupt context that page faults
will cause a kernel panic. Such code must work with exclusively pinned
code and data pages.
The scheduler is still disabled during this operation.
If this option is disabled, the page fault servicing logic
runs with interrupts disabled for the entire operation. However,
ISRs may also page fault.
config DEMAND_PAGING_PAGE_FRAMES_RESERVE
int "Number of page frames reserved for paging"
default 32 if !LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
default 0
help
This sets the number of page frames that will be reserved for
paging that do not count towards free memory. This is to
ensure that there are some page frames available for paging
code and data. Otherwise, it would be possible to exhaust
all page frames via anonymous memory mappings.
config DEMAND_PAGING_STATS
bool "Gather Demand Paging Statistics"
help
This enables gathering various statistics related to demand paging,
e.g. number of pagefaults. This is useful for tuning eviction
algorithms and optimizing backing store.
Should say N in production system as this is not without cost.
config DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
bool "Use Timing Functions to Gather Demand Paging Statistics"
select TIMING_FUNCTIONS_NEED_AT_BOOT
help
Use timing functions to gather various demand paging statistics.
config DEMAND_PAGING_THREAD_STATS
bool "Gather per Thread Demand Paging Statistics"
depends on DEMAND_PAGING_STATS
help
This enables gathering per thread statistics related to demand
paging.
Should say N in production system as this is not without cost.
config DEMAND_PAGING_TIMING_HISTOGRAM
bool "Gather Demand Paging Execution Timing Histogram"
depends on DEMAND_PAGING_STATS
help
This gathers the histogram of execution time on page eviction
selection, and backing store page in and page out.
Should say N in production system as this is not without cost.
config DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS
int "Number of bins (buckets) in Demand Paging Timing Histogram"
depends on DEMAND_PAGING_TIMING_HISTOGRAM
default 10
help
Defines the number of bins (buckets) in the histogram used for
gathering execution timing information for demand paging.
This requires k_mem_paging_eviction_histogram_bounds[] and
k_mem_paging_backing_store_histogram_bounds[] to define
the upper bounds for each bin. See kernel/statistics.c for
information.
endif # DEMAND_PAGING
endif # MMU
config KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK
bool
help
Use custom memory range check functions instead of the generic
checks in k_mem_phys_addr() and k_mem_virt_addr().
sys_mm_is_phys_addr_in_range() and
sys_mm_is_virt_addr_in_range() must be implemented.
endmenu # Virtual Memory Support
``` | /content/code_sandbox/kernel/Kconfig.vm | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,708 |
```c
/*
*
*/
/**
* @file Atomic ops in pure C
*
* This module provides the atomic operators for processors
* which do not support native atomic operations.
*
* The atomic operations are guaranteed to be atomic with respect
* to interrupt service routines, and to operations performed by peer
* processors.
*
* (originally from x86's atomic.c)
*/
#include <zephyr/toolchain.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/spinlock.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/kernel_structs.h>
/* Single global spinlock for atomic operations. This is fallback
* code, not performance sensitive. At least by not using irq_lock()
* in SMP contexts we won't content with legitimate users of the
* global lock.
*/
static struct k_spinlock lock;
/* For those rare CPUs which support user mode, but not native atomic
* operations, the best we can do for them is implement the atomic
* functions as system calls, since in user mode locking a spinlock is
* forbidden.
*/
#ifdef CONFIG_USERSPACE
#include <zephyr/internal/syscall_handler.h>
#define ATOMIC_SYSCALL_HANDLER_TARGET(name) \
static inline atomic_val_t z_vrfy_##name(atomic_t *target) \
{ \
K_OOPS(K_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
return z_impl_##name((atomic_t *)target); \
}
#define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name) \
static inline atomic_val_t z_vrfy_##name(atomic_t *target, \
atomic_val_t value) \
{ \
K_OOPS(K_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
return z_impl_##name((atomic_t *)target, value); \
}
#else
#define ATOMIC_SYSCALL_HANDLER_TARGET(name)
#define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name)
#endif /* CONFIG_USERSPACE */
/**
*
* @brief Atomic compare-and-set primitive
*
* This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns true.
*
* If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns false.
*
* The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>.
*
* @param target address to be tested
* @param old_value value to compare against
* @param new_value value to compare against
* @return Returns true if <new_value> is written, false otherwise.
*/
bool z_impl_atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
{
k_spinlock_key_t key;
int ret = false;
/*
* On SMP the k_spin_lock() definition calls atomic_cas().
* Using k_spin_lock() here would create an infinite loop and
* massive stack overflow. Consider CONFIG_ATOMIC_OPERATIONS_ARCH
* or CONFIG_ATOMIC_OPERATIONS_BUILTIN instead.
*/
BUILD_ASSERT(!IS_ENABLED(CONFIG_SMP));
key = k_spin_lock(&lock);
if (*target == old_value) {
*target = new_value;
ret = true;
}
k_spin_unlock(&lock, key);
return ret;
}
#ifdef CONFIG_USERSPACE
bool z_vrfy_atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
{
K_OOPS(K_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t)));
return z_impl_atomic_cas((atomic_t *)target, old_value, new_value);
}
#include <zephyr/syscalls/atomic_cas_mrsh.c>
#endif /* CONFIG_USERSPACE */
bool z_impl_atomic_ptr_cas(atomic_ptr_t *target, atomic_ptr_val_t old_value,
atomic_ptr_val_t new_value)
{
k_spinlock_key_t key;
int ret = false;
key = k_spin_lock(&lock);
if (*target == old_value) {
*target = new_value;
ret = true;
}
k_spin_unlock(&lock, key);
return ret;
}
#ifdef CONFIG_USERSPACE
static inline bool z_vrfy_atomic_ptr_cas(atomic_ptr_t *target,
atomic_ptr_val_t old_value,
atomic_ptr_val_t new_value)
{
K_OOPS(K_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_ptr_t)));
return z_impl_atomic_ptr_cas(target, old_value, new_value);
}
#include <zephyr/syscalls/atomic_ptr_cas_mrsh.c>
#endif /* CONFIG_USERSPACE */
/**
*
* @brief Atomic addition primitive
*
* This routine provides the atomic addition operator. The <value> is
* atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned.
*
* @param target memory location to add to
* @param value the value to add
*
* @return The previous value from <target>
*/
atomic_val_t z_impl_atomic_add(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
key = k_spin_lock(&lock);
ret = *target;
*target += value;
k_spin_unlock(&lock, key);
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_add);
/**
*
* @brief Atomic subtraction primitive
*
* This routine provides the atomic subtraction operator. The <value> is
* atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned.
*
* @param target the memory location to subtract from
* @param value the value to subtract
*
* @return The previous value from <target>
*/
atomic_val_t z_impl_atomic_sub(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
key = k_spin_lock(&lock);
ret = *target;
*target -= value;
k_spin_unlock(&lock, key);
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_sub);
/**
*
* @brief Atomic get primitive
*
* @param target memory location to read from
*
* This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary.
*
* @return The value read from <target>
*/
atomic_val_t atomic_get(const atomic_t *target)
{
return *target;
}
atomic_ptr_val_t atomic_ptr_get(const atomic_ptr_t *target)
{
return *target;
}
/**
*
* @brief Atomic get-and-set primitive
*
* This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned.
*
* @param target the memory location to write to
* @param value the value to write
*
* @return The previous value from <target>
*/
atomic_val_t z_impl_atomic_set(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
key = k_spin_lock(&lock);
ret = *target;
*target = value;
k_spin_unlock(&lock, key);
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_set);
atomic_ptr_val_t z_impl_atomic_ptr_set(atomic_ptr_t *target,
atomic_ptr_val_t value)
{
k_spinlock_key_t key;
atomic_ptr_val_t ret;
key = k_spin_lock(&lock);
ret = *target;
*target = value;
k_spin_unlock(&lock, key);
return ret;
}
#ifdef CONFIG_USERSPACE
static inline atomic_ptr_val_t z_vrfy_atomic_ptr_set(atomic_ptr_t *target,
atomic_ptr_val_t value)
{
K_OOPS(K_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_ptr_t)));
return z_impl_atomic_ptr_set(target, value);
}
#include <zephyr/syscalls/atomic_ptr_set_mrsh.c>
#endif /* CONFIG_USERSPACE */
/**
*
* @brief Atomic bitwise inclusive OR primitive
*
* This routine provides the atomic bitwise inclusive OR operator. The <value>
* is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to OR
*
* @return The previous value from <target>
*/
atomic_val_t z_impl_atomic_or(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
key = k_spin_lock(&lock);
ret = *target;
*target |= value;
k_spin_unlock(&lock, key);
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_or);
/**
*
* @brief Atomic bitwise exclusive OR (XOR) primitive
*
* This routine provides the atomic bitwise exclusive OR operator. The <value>
* is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to XOR
*
* @return The previous value from <target>
*/
atomic_val_t z_impl_atomic_xor(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
key = k_spin_lock(&lock);
ret = *target;
*target ^= value;
k_spin_unlock(&lock, key);
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_xor);
/**
*
* @brief Atomic bitwise AND primitive
*
* This routine provides the atomic bitwise AND operator. The <value> is
* atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to AND
*
* @return The previous value from <target>
*/
atomic_val_t z_impl_atomic_and(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
key = k_spin_lock(&lock);
ret = *target;
*target &= value;
k_spin_unlock(&lock, key);
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_and);
/**
*
* @brief Atomic bitwise NAND primitive
*
* This routine provides the atomic bitwise NAND operator. The <value> is
* atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to NAND
*
* @return The previous value from <target>
*/
atomic_val_t z_impl_atomic_nand(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
key = k_spin_lock(&lock);
ret = *target;
*target = ~(*target & value);
k_spin_unlock(&lock, key);
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_nand);
#ifdef CONFIG_USERSPACE
#include <zephyr/syscalls/atomic_add_mrsh.c>
#include <zephyr/syscalls/atomic_sub_mrsh.c>
#include <zephyr/syscalls/atomic_set_mrsh.c>
#include <zephyr/syscalls/atomic_or_mrsh.c>
#include <zephyr/syscalls/atomic_xor_mrsh.c>
#include <zephyr/syscalls/atomic_and_mrsh.c>
#include <zephyr/syscalls/atomic_nand_mrsh.c>
#endif /* CONFIG_USERSPACE */
``` | /content/code_sandbox/kernel/atomic_c.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,540 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/spinlock.h>
#include <kswap.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/init.h>
#include <ksched.h>
static struct z_futex_data *k_futex_find_data(struct k_futex *futex)
{
struct k_object *obj;
obj = k_object_find(futex);
if ((obj == NULL) || (obj->type != K_OBJ_FUTEX)) {
return NULL;
}
return obj->data.futex_data;
}
int z_impl_k_futex_wake(struct k_futex *futex, bool wake_all)
{
k_spinlock_key_t key;
unsigned int woken = 0U;
struct k_thread *thread;
struct z_futex_data *futex_data;
futex_data = k_futex_find_data(futex);
if (futex_data == NULL) {
return -EINVAL;
}
key = k_spin_lock(&futex_data->lock);
do {
thread = z_unpend_first_thread(&futex_data->wait_q);
if (thread != NULL) {
woken++;
arch_thread_return_value_set(thread, 0);
z_ready_thread(thread);
}
} while (thread && wake_all);
z_reschedule(&futex_data->lock, key);
return woken;
}
static inline int z_vrfy_k_futex_wake(struct k_futex *futex, bool wake_all)
{
if (K_SYSCALL_MEMORY_WRITE(futex, sizeof(struct k_futex)) != 0) {
return -EACCES;
}
return z_impl_k_futex_wake(futex, wake_all);
}
#include <zephyr/syscalls/k_futex_wake_mrsh.c>
int z_impl_k_futex_wait(struct k_futex *futex, int expected,
k_timeout_t timeout)
{
int ret;
k_spinlock_key_t key;
struct z_futex_data *futex_data;
futex_data = k_futex_find_data(futex);
if (futex_data == NULL) {
return -EINVAL;
}
if (atomic_get(&futex->val) != (atomic_val_t)expected) {
return -EAGAIN;
}
key = k_spin_lock(&futex_data->lock);
ret = z_pend_curr(&futex_data->lock,
key, &futex_data->wait_q, timeout);
if (ret == -EAGAIN) {
ret = -ETIMEDOUT;
}
return ret;
}
static inline int z_vrfy_k_futex_wait(struct k_futex *futex, int expected,
k_timeout_t timeout)
{
if (K_SYSCALL_MEMORY_WRITE(futex, sizeof(struct k_futex)) != 0) {
return -EACCES;
}
return z_impl_k_futex_wait(futex, expected, timeout);
}
#include <zephyr/syscalls/k_futex_wait_mrsh.c>
``` | /content/code_sandbox/kernel/futex.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 639 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <string.h>
#include <zephyr/sys/math_extras.h>
#include <zephyr/sys/util.h>
static void *z_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t size)
{
void *mem;
struct k_heap **heap_ref;
size_t __align;
/*
* Adjust the size to make room for our heap reference.
* Merge a rewind bit with align value (see sys_heap_aligned_alloc()).
* This allows for storing the heap pointer right below the aligned
* boundary without wasting any memory.
*/
if (size_add_overflow(size, sizeof(heap_ref), &size)) {
return NULL;
}
__align = align | sizeof(heap_ref);
mem = k_heap_aligned_alloc(heap, __align, size, K_NO_WAIT);
if (mem == NULL) {
return NULL;
}
heap_ref = mem;
*heap_ref = heap;
mem = ++heap_ref;
__ASSERT(align == 0 || ((uintptr_t)mem & (align - 1)) == 0,
"misaligned memory at %p (align = %zu)", mem, align);
return mem;
}
void k_free(void *ptr)
{
struct k_heap **heap_ref;
if (ptr != NULL) {
heap_ref = ptr;
--heap_ref;
ptr = heap_ref;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_free, *heap_ref, heap_ref);
k_heap_free(*heap_ref, ptr);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_free, *heap_ref, heap_ref);
}
}
#if (K_HEAP_MEM_POOL_SIZE > 0)
K_HEAP_DEFINE(_system_heap, K_HEAP_MEM_POOL_SIZE);
#define _SYSTEM_HEAP (&_system_heap)
void *k_aligned_alloc(size_t align, size_t size)
{
__ASSERT(align / sizeof(void *) >= 1
&& (align % sizeof(void *)) == 0,
"align must be a multiple of sizeof(void *)");
__ASSERT((align & (align - 1)) == 0,
"align must be a power of 2");
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_aligned_alloc, _SYSTEM_HEAP);
void *ret = z_heap_aligned_alloc(_SYSTEM_HEAP, align, size);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_aligned_alloc, _SYSTEM_HEAP, ret);
return ret;
}
void *k_malloc(size_t size)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_malloc, _SYSTEM_HEAP);
void *ret = k_aligned_alloc(sizeof(void *), size);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_malloc, _SYSTEM_HEAP, ret);
return ret;
}
void *k_calloc(size_t nmemb, size_t size)
{
void *ret;
size_t bounds;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_calloc, _SYSTEM_HEAP);
if (size_mul_overflow(nmemb, size, &bounds)) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_calloc, _SYSTEM_HEAP, NULL);
return NULL;
}
ret = k_malloc(bounds);
if (ret != NULL) {
(void)memset(ret, 0, bounds);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_calloc, _SYSTEM_HEAP, ret);
return ret;
}
void *k_realloc(void *ptr, size_t size)
{
struct k_heap *heap, **heap_ref;
void *ret;
if (size == 0) {
k_free(ptr);
return NULL;
}
if (ptr == NULL) {
return k_malloc(size);
}
heap_ref = ptr;
ptr = --heap_ref;
heap = *heap_ref;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_realloc, heap, ptr);
if (size_add_overflow(size, sizeof(heap_ref), &size)) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_realloc, heap, ptr, NULL);
return NULL;
}
ret = k_heap_realloc(heap, ptr, size, K_NO_WAIT);
if (ret != NULL) {
heap_ref = ret;
ret = ++heap_ref;
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_realloc, heap, ptr, ret);
return ret;
}
void k_thread_system_pool_assign(struct k_thread *thread)
{
thread->resource_pool = _SYSTEM_HEAP;
}
#else
#define _SYSTEM_HEAP NULL
#endif /* K_HEAP_MEM_POOL_SIZE */
void *z_thread_aligned_alloc(size_t align, size_t size)
{
void *ret;
struct k_heap *heap;
if (k_is_in_isr()) {
heap = _SYSTEM_HEAP;
} else {
heap = _current->resource_pool;
}
if (heap != NULL) {
ret = z_heap_aligned_alloc(heap, align, size);
} else {
ret = NULL;
}
return ret;
}
``` | /content/code_sandbox/kernel/mempool.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,081 |
```c
/*
*
*/
/**
* @file @brief mutex kernel services
*
* This module contains routines for handling mutex locking and unlocking.
*
* Mutexes implement a priority inheritance algorithm that boosts the priority
* level of the owning thread to match the priority level of the highest
* priority thread waiting on the mutex.
*
* Each mutex that contributes to priority inheritance must be released in the
* reverse order in which it was acquired. Furthermore each subsequent mutex
* that contributes to raising the owning thread's priority level must be
* acquired at a point after the most recent "bumping" of the priority level.
*
* For example, if thread A has two mutexes contributing to the raising of its
* priority level, the second mutex M2 must be acquired by thread A after
* thread A's priority level was bumped due to owning the first mutex M1.
* When releasing the mutex, thread A must release M2 before it releases M1.
* Failure to follow this nested model may result in threads running at
* unexpected priority levels (too high, or too low).
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
#include <ksched.h>
#include <kthread.h>
#include <wait_q.h>
#include <errno.h>
#include <zephyr/init.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/tracing/tracing.h>
#include <zephyr/sys/check.h>
#include <zephyr/logging/log.h>
#include <zephyr/llext/symbol.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
/* We use a global spinlock here because some of the synchronization
* is protecting things like owner thread priorities which aren't
* "part of" a single k_mutex. Should move those bits of the API
* under the scheduler lock so we can break this up.
*/
static struct k_spinlock lock;
#ifdef CONFIG_OBJ_CORE_MUTEX
static struct k_obj_type obj_type_mutex;
#endif /* CONFIG_OBJ_CORE_MUTEX */
int z_impl_k_mutex_init(struct k_mutex *mutex)
{
mutex->owner = NULL;
mutex->lock_count = 0U;
z_waitq_init(&mutex->wait_q);
k_object_init(mutex);
#ifdef CONFIG_OBJ_CORE_MUTEX
k_obj_core_init_and_link(K_OBJ_CORE(mutex), &obj_type_mutex);
#endif /* CONFIG_OBJ_CORE_MUTEX */
SYS_PORT_TRACING_OBJ_INIT(k_mutex, mutex, 0);
return 0;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_mutex_init(struct k_mutex *mutex)
{
K_OOPS(K_SYSCALL_OBJ_INIT(mutex, K_OBJ_MUTEX));
return z_impl_k_mutex_init(mutex);
}
#include <zephyr/syscalls/k_mutex_init_mrsh.c>
#endif /* CONFIG_USERSPACE */
static int32_t new_prio_for_inheritance(int32_t target, int32_t limit)
{
int new_prio = z_is_prio_higher(target, limit) ? target : limit;
new_prio = z_get_new_prio_with_ceiling(new_prio);
return new_prio;
}
static bool adjust_owner_prio(struct k_mutex *mutex, int32_t new_prio)
{
if (mutex->owner->base.prio != new_prio) {
LOG_DBG("%p (ready (y/n): %c) prio changed to %d (was %d)",
mutex->owner, z_is_thread_ready(mutex->owner) ?
'y' : 'n',
new_prio, mutex->owner->base.prio);
return z_thread_prio_set(mutex->owner, new_prio);
}
return false;
}
int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
{
int new_prio;
k_spinlock_key_t key;
bool resched = false;
__ASSERT(!arch_is_in_isr(), "mutexes cannot be used inside ISRs");
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mutex, lock, mutex, timeout);
key = k_spin_lock(&lock);
if (likely((mutex->lock_count == 0U) || (mutex->owner == _current))) {
mutex->owner_orig_prio = (mutex->lock_count == 0U) ?
_current->base.prio :
mutex->owner_orig_prio;
mutex->lock_count++;
mutex->owner = _current;
LOG_DBG("%p took mutex %p, count: %d, orig prio: %d",
_current, mutex, mutex->lock_count,
mutex->owner_orig_prio);
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, 0);
return 0;
}
if (unlikely(K_TIMEOUT_EQ(timeout, K_NO_WAIT))) {
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, -EBUSY);
return -EBUSY;
}
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mutex, lock, mutex, timeout);
new_prio = new_prio_for_inheritance(_current->base.prio,
mutex->owner->base.prio);
LOG_DBG("adjusting prio up on mutex %p", mutex);
if (z_is_prio_higher(new_prio, mutex->owner->base.prio)) {
resched = adjust_owner_prio(mutex, new_prio);
}
int got_mutex = z_pend_curr(&lock, key, &mutex->wait_q, timeout);
LOG_DBG("on mutex %p got_mutex value: %d", mutex, got_mutex);
LOG_DBG("%p got mutex %p (y/n): %c", _current, mutex,
got_mutex ? 'y' : 'n');
if (got_mutex == 0) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, 0);
return 0;
}
/* timed out */
LOG_DBG("%p timeout on mutex %p", _current, mutex);
key = k_spin_lock(&lock);
/*
* Check if mutex was unlocked after this thread was unpended.
* If so, skip adjusting owner's priority down.
*/
if (likely(mutex->owner != NULL)) {
struct k_thread *waiter = z_waitq_head(&mutex->wait_q);
new_prio = (waiter != NULL) ?
new_prio_for_inheritance(waiter->base.prio, mutex->owner_orig_prio) :
mutex->owner_orig_prio;
LOG_DBG("adjusting prio down on mutex %p", mutex);
resched = adjust_owner_prio(mutex, new_prio) || resched;
}
if (resched) {
z_reschedule(&lock, key);
} else {
k_spin_unlock(&lock, key);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, -EAGAIN);
return -EAGAIN;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_mutex_lock(struct k_mutex *mutex,
k_timeout_t timeout)
{
K_OOPS(K_SYSCALL_OBJ(mutex, K_OBJ_MUTEX));
return z_impl_k_mutex_lock(mutex, timeout);
}
#include <zephyr/syscalls/k_mutex_lock_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_mutex_unlock(struct k_mutex *mutex)
{
struct k_thread *new_owner;
__ASSERT(!arch_is_in_isr(), "mutexes cannot be used inside ISRs");
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mutex, unlock, mutex);
CHECKIF(mutex->owner == NULL) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, -EINVAL);
return -EINVAL;
}
/*
* The current thread does not own the mutex.
*/
CHECKIF(mutex->owner != _current) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, -EPERM);
return -EPERM;
}
/*
* Attempt to unlock a mutex which is unlocked. mutex->lock_count
* cannot be zero if the current thread is equal to mutex->owner,
* therefore no underflow check is required. Use assert to catch
* undefined behavior.
*/
__ASSERT_NO_MSG(mutex->lock_count > 0U);
LOG_DBG("mutex %p lock_count: %d", mutex, mutex->lock_count);
/*
* If we are the owner and count is greater than 1, then decrement
* the count and return and keep current thread as the owner.
*/
if (mutex->lock_count > 1U) {
mutex->lock_count--;
goto k_mutex_unlock_return;
}
k_spinlock_key_t key = k_spin_lock(&lock);
adjust_owner_prio(mutex, mutex->owner_orig_prio);
/* Get the new owner, if any */
new_owner = z_unpend_first_thread(&mutex->wait_q);
mutex->owner = new_owner;
LOG_DBG("new owner of mutex %p: %p (prio: %d)",
mutex, new_owner, new_owner ? new_owner->base.prio : -1000);
if (new_owner != NULL) {
/*
* new owner is already of higher or equal prio than first
* waiter since the wait queue is priority-based: no need to
* adjust its priority
*/
mutex->owner_orig_prio = new_owner->base.prio;
arch_thread_return_value_set(new_owner, 0);
z_ready_thread(new_owner);
z_reschedule(&lock, key);
} else {
mutex->lock_count = 0U;
k_spin_unlock(&lock, key);
}
k_mutex_unlock_return:
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, 0);
return 0;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_mutex_unlock(struct k_mutex *mutex)
{
K_OOPS(K_SYSCALL_OBJ(mutex, K_OBJ_MUTEX));
return z_impl_k_mutex_unlock(mutex);
}
#include <zephyr/syscalls/k_mutex_unlock_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_MUTEX
static int init_mutex_obj_core_list(void)
{
/* Initialize mutex object type */
z_obj_type_init(&obj_type_mutex, K_OBJ_TYPE_MUTEX_ID,
offsetof(struct k_mutex, obj_core));
/* Initialize and link statically defined mutexes */
STRUCT_SECTION_FOREACH(k_mutex, mutex) {
k_obj_core_init_and_link(K_OBJ_CORE(mutex), &obj_type_mutex);
}
return 0;
}
SYS_INIT(init_mutex_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_MUTEX */
``` | /content/code_sandbox/kernel/mutex.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,265 |
```c
/*
*
*/
/* Linkers may treat weak functions differently if they are located within
* the same object that calls the symbol or not.
*
* For example, when using armlink, then if the weak symbol is inside the object
* referring to it the weak symbol will be used. This will result in the symbol
* being multiply defined because both the weak and strong symbols are used.
*
* To GNU ld, it doesn't matter if the weak symbol is placed in the same object
* which uses the weak symbol. GNU ld will always link to the strong version.
*
* Having the weak main symbol in an independent file ensures that it will be
* correctly treated by multiple linkers.
*/
#include <kernel_internal.h>
int __weak main(void)
{
/* NOP default main() if the application does not provide one. */
arch_nop();
return 0;
}
``` | /content/code_sandbox/kernel/main_weak.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 183 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/spinlock.h>
#include <ksched.h>
#include <timeout_q.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/drivers/timer/system_timer.h>
#include <zephyr/sys_clock.h>
static uint64_t curr_tick;
static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
static struct k_spinlock timeout_lock;
#define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \
? K_TICKS_FOREVER : INT_MAX)
/* Ticks left to process in the currently-executing sys_clock_announce() */
static int announce_remaining;
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void)
{
return z_impl_sys_clock_hw_cycles_per_sec_runtime_get();
}
#include <zephyr/syscalls/sys_clock_hw_cycles_per_sec_runtime_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */
static struct _timeout *first(void)
{
sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
return (t == NULL) ? NULL : CONTAINER_OF(t, struct _timeout, node);
}
static struct _timeout *next(struct _timeout *t)
{
sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
return (n == NULL) ? NULL : CONTAINER_OF(n, struct _timeout, node);
}
static void remove_timeout(struct _timeout *t)
{
if (next(t) != NULL) {
next(t)->dticks += t->dticks;
}
sys_dlist_remove(&t->node);
}
static int32_t elapsed(void)
{
/* While sys_clock_announce() is executing, new relative timeouts will be
* scheduled relatively to the currently firing timeout's original tick
* value (=curr_tick) rather than relative to the current
* sys_clock_elapsed().
*
* This means that timeouts being scheduled from within timeout callbacks
* will be scheduled at well-defined offsets from the currently firing
* timeout.
*
* As a side effect, the same will happen if an ISR with higher priority
* preempts a timeout callback and schedules a timeout.
*
* The distinction is implemented by looking at announce_remaining which
* will be non-zero while sys_clock_announce() is executing and zero
* otherwise.
*/
return announce_remaining == 0 ? sys_clock_elapsed() : 0U;
}
static int32_t next_timeout(void)
{
struct _timeout *to = first();
int32_t ticks_elapsed = elapsed();
int32_t ret;
if ((to == NULL) ||
((int64_t)(to->dticks - ticks_elapsed) > (int64_t)INT_MAX)) {
ret = MAX_WAIT;
} else {
ret = MAX(0, to->dticks - ticks_elapsed);
}
return ret;
}
void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
k_timeout_t timeout)
{
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
return;
}
#ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(to));
#endif /* CONFIG_KERNEL_COHERENCE */
__ASSERT(!sys_dnode_is_linked(&to->node), "");
to->fn = fn;
K_SPINLOCK(&timeout_lock) {
struct _timeout *t;
if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) &&
(Z_TICK_ABS(timeout.ticks) >= 0)) {
k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick;
to->dticks = MAX(1, ticks);
} else {
to->dticks = timeout.ticks + 1 + elapsed();
}
for (t = first(); t != NULL; t = next(t)) {
if (t->dticks > to->dticks) {
t->dticks -= to->dticks;
sys_dlist_insert(&t->node, &to->node);
break;
}
to->dticks -= t->dticks;
}
if (t == NULL) {
sys_dlist_append(&timeout_list, &to->node);
}
if (to == first() && announce_remaining == 0) {
sys_clock_set_timeout(next_timeout(), false);
}
}
}
int z_abort_timeout(struct _timeout *to)
{
int ret = -EINVAL;
K_SPINLOCK(&timeout_lock) {
if (sys_dnode_is_linked(&to->node)) {
remove_timeout(to);
ret = 0;
}
}
return ret;
}
/* must be locked */
static k_ticks_t timeout_rem(const struct _timeout *timeout)
{
k_ticks_t ticks = 0;
for (struct _timeout *t = first(); t != NULL; t = next(t)) {
ticks += t->dticks;
if (timeout == t) {
break;
}
}
return ticks;
}
k_ticks_t z_timeout_remaining(const struct _timeout *timeout)
{
k_ticks_t ticks = 0;
K_SPINLOCK(&timeout_lock) {
if (!z_is_inactive_timeout(timeout)) {
ticks = timeout_rem(timeout) - elapsed();
}
}
return ticks;
}
k_ticks_t z_timeout_expires(const struct _timeout *timeout)
{
k_ticks_t ticks = 0;
K_SPINLOCK(&timeout_lock) {
ticks = curr_tick;
if (!z_is_inactive_timeout(timeout)) {
ticks += timeout_rem(timeout);
}
}
return ticks;
}
int32_t z_get_next_timeout_expiry(void)
{
int32_t ret = (int32_t) K_TICKS_FOREVER;
K_SPINLOCK(&timeout_lock) {
ret = next_timeout();
}
return ret;
}
void sys_clock_announce(int32_t ticks)
{
k_spinlock_key_t key = k_spin_lock(&timeout_lock);
/* We release the lock around the callbacks below, so on SMP
* systems someone might be already running the loop. Don't
* race (which will cause parallel execution of "sequential"
* timeouts and confuse apps), just increment the tick count
* and return.
*/
if (IS_ENABLED(CONFIG_SMP) && (announce_remaining != 0)) {
announce_remaining += ticks;
k_spin_unlock(&timeout_lock, key);
return;
}
announce_remaining = ticks;
struct _timeout *t;
for (t = first();
(t != NULL) && (t->dticks <= announce_remaining);
t = first()) {
int dt = t->dticks;
curr_tick += dt;
t->dticks = 0;
remove_timeout(t);
k_spin_unlock(&timeout_lock, key);
t->fn(t);
key = k_spin_lock(&timeout_lock);
announce_remaining -= dt;
}
if (t != NULL) {
t->dticks -= announce_remaining;
}
curr_tick += announce_remaining;
announce_remaining = 0;
sys_clock_set_timeout(next_timeout(), false);
k_spin_unlock(&timeout_lock, key);
#ifdef CONFIG_TIMESLICING
z_time_slice();
#endif /* CONFIG_TIMESLICING */
}
int64_t sys_clock_tick_get(void)
{
uint64_t t = 0U;
K_SPINLOCK(&timeout_lock) {
t = curr_tick + elapsed();
}
return t;
}
uint32_t sys_clock_tick_get_32(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
return (uint32_t)sys_clock_tick_get();
#else
return (uint32_t)curr_tick;
#endif /* CONFIG_TICKLESS_KERNEL */
}
int64_t z_impl_k_uptime_ticks(void)
{
return sys_clock_tick_get();
}
#ifdef CONFIG_USERSPACE
static inline int64_t z_vrfy_k_uptime_ticks(void)
{
return z_impl_k_uptime_ticks();
}
#include <zephyr/syscalls/k_uptime_ticks_mrsh.c>
#endif /* CONFIG_USERSPACE */
k_timepoint_t sys_timepoint_calc(k_timeout_t timeout)
{
k_timepoint_t timepoint;
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
timepoint.tick = UINT64_MAX;
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
timepoint.tick = 0;
} else {
k_ticks_t dt = timeout.ticks;
if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(dt) >= 0) {
timepoint.tick = Z_TICK_ABS(dt);
} else {
timepoint.tick = sys_clock_tick_get() + MAX(1, dt);
}
}
return timepoint;
}
k_timeout_t sys_timepoint_timeout(k_timepoint_t timepoint)
{
uint64_t now, remaining;
if (timepoint.tick == UINT64_MAX) {
return K_FOREVER;
}
if (timepoint.tick == 0) {
return K_NO_WAIT;
}
now = sys_clock_tick_get();
remaining = (timepoint.tick > now) ? (timepoint.tick - now) : 0;
return K_TICKS(remaining);
}
#ifdef CONFIG_ZTEST
void z_impl_sys_clock_tick_set(uint64_t tick)
{
curr_tick = tick;
}
void z_vrfy_sys_clock_tick_set(uint64_t tick)
{
z_impl_sys_clock_tick_set(tick);
}
#endif /* CONFIG_ZTEST */
``` | /content/code_sandbox/kernel/timeout.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,046 |
```c
/*
*
*/
/**
* @file
* @brief Compiler stack protection (kernel part)
*
* This module provides functions to support compiler stack protection
* using canaries. This feature is enabled with configuration
* CONFIG_STACK_CANARIES=y.
*
* When this feature is enabled, the compiler generated code refers to
* function __stack_chk_fail and global variable __stack_chk_guard.
*/
#include <zephyr/toolchain.h> /* compiler specific configurations */
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/kernel.h>
#include <zephyr/app_memory/app_memdomain.h>
/**
*
* @brief Stack canary error handler
*
* This function is invoked when a stack canary error is detected.
*
* @return Does not return
*/
void _StackCheckHandler(void)
{
/* Stack canary error is a software fatal condition; treat it as such.
*/
z_except_reason(K_ERR_STACK_CHK_FAIL);
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
}
/* Global variable */
/*
* Symbol referenced by GCC compiler generated code for canary value.
* The canary value gets initialized in z_cstart().
*/
#ifdef CONFIG_STACK_CANARIES_TLS
__thread volatile uintptr_t __stack_chk_guard;
#elif CONFIG_USERSPACE
K_APP_DMEM(z_libc_partition) volatile uintptr_t __stack_chk_guard;
#else
__noinit volatile uintptr_t __stack_chk_guard;
#endif
/**
*
* @brief Referenced by GCC compiler generated code
*
* This routine is invoked when a stack canary error is detected, indicating
* a buffer overflow or stack corruption problem.
*/
FUNC_ALIAS(_StackCheckHandler, __stack_chk_fail, void);
``` | /content/code_sandbox/kernel/compiler_stack_protect.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 374 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/drivers/timer/system_timer.h>
#include <zephyr/sys_clock.h>
#include <kernel_arch_interface.h>
void z_impl_k_busy_wait(uint32_t usec_to_wait)
{
SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait);
if (usec_to_wait == 0U) {
SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
return;
}
#if defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
arch_busy_wait(usec_to_wait);
#elif defined(CONFIG_SYS_CLOCK_EXISTS)
uint32_t start_cycles = k_cycle_get_32();
uint32_t cycles_to_wait = k_us_to_cyc_ceil32(usec_to_wait);
for (;;) {
uint32_t current_cycles = k_cycle_get_32();
/* this handles the rollover on an unsigned 32-bit value */
if ((current_cycles - start_cycles) >= cycles_to_wait) {
break;
}
}
#else
/*
* Crude busy loop for the purpose of being able to configure out
* system timer support.
*/
unsigned int loops_per_usec = CONFIG_BUSYWAIT_CPU_LOOPS_PER_USEC;
unsigned int loops = loops_per_usec * usec_to_wait;
while (loops-- > 0) {
arch_nop();
}
#endif
SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
{
z_impl_k_busy_wait(usec_to_wait);
}
#include <zephyr/syscalls/k_busy_wait_mrsh.c>
#endif /* CONFIG_USERSPACE */
``` | /content/code_sandbox/kernel/busy_wait.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 371 |
```c
/*
*/
#include <zephyr/kernel.h>
#include <kernel_internal.h>
/* We are not building thread.c when MULTITHREADING=n, so we
* need to provide a few stubs here.
*/
bool k_is_in_isr(void)
{
return arch_is_in_isr();
}
/* This is a fallback implementation of k_sleep() for when multi-threading is
* disabled. The main implementation is in sched.c.
*/
int32_t z_impl_k_sleep(k_timeout_t timeout)
{
k_ticks_t ticks;
uint32_t expected_wakeup_ticks;
__ASSERT(!arch_is_in_isr(), "");
SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
/* in case of K_FOREVER, we suspend */
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
/* In Single Thread, just wait for an interrupt saving power */
k_cpu_idle();
SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
return (int32_t) K_TICKS_FOREVER;
}
ticks = timeout.ticks;
if (Z_TICK_ABS(ticks) <= 0) {
expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
} else {
expected_wakeup_ticks = Z_TICK_ABS(ticks);
}
/* busy wait to be time coherent since subsystems may depend on it */
z_impl_k_busy_wait(k_ticks_to_us_ceil32(expected_wakeup_ticks));
int32_t ret = k_ticks_to_ms_ceil64(0);
SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
return ret;
}
``` | /content/code_sandbox/kernel/nothread.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 344 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/timing/timing.h>
#include <ksched.h>
#include <zephyr/spinlock.h>
#include <zephyr/sys/check.h>
/* Need one of these for this to work */
#if !defined(CONFIG_USE_SWITCH) && !defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
#error "No data backend configured for CONFIG_SCHED_THREAD_USAGE"
#endif /* !CONFIG_USE_SWITCH && !CONFIG_INSTRUMENT_THREAD_SWITCHING */
static struct k_spinlock usage_lock;
static uint32_t usage_now(void)
{
uint32_t now;
#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
now = (uint32_t)timing_counter_get();
#else
now = k_cycle_get_32();
#endif /* CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS */
/* Edge case: we use a zero as a null ("stop() already called") */
return (now == 0) ? 1 : now;
}
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
static void sched_cpu_update_usage(struct _cpu *cpu, uint32_t cycles)
{
if (!cpu->usage->track_usage) {
return;
}
if (cpu->current != cpu->idle_thread) {
cpu->usage->total += cycles;
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
cpu->usage->current += cycles;
if (cpu->usage->longest < cpu->usage->current) {
cpu->usage->longest = cpu->usage->current;
}
} else {
cpu->usage->current = 0;
cpu->usage->num_windows++;
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
}
}
#else
#define sched_cpu_update_usage(cpu, cycles) do { } while (0)
#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
static void sched_thread_update_usage(struct k_thread *thread, uint32_t cycles)
{
thread->base.usage.total += cycles;
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
thread->base.usage.current += cycles;
if (thread->base.usage.longest < thread->base.usage.current) {
thread->base.usage.longest = thread->base.usage.current;
}
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
}
void z_sched_usage_start(struct k_thread *thread)
{
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
k_spinlock_key_t key;
key = k_spin_lock(&usage_lock);
_current_cpu->usage0 = usage_now(); /* Always update */
if (thread->base.usage.track_usage) {
thread->base.usage.num_windows++;
thread->base.usage.current = 0;
}
k_spin_unlock(&usage_lock, key);
#else
/* One write through a volatile pointer doesn't require
* synchronization as long as _usage() treats it as volatile
* (we can't race with _stop() by design).
*/
_current_cpu->usage0 = usage_now();
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
}
void z_sched_usage_stop(void)
{
k_spinlock_key_t k = k_spin_lock(&usage_lock);
struct _cpu *cpu = _current_cpu;
uint32_t u0 = cpu->usage0;
if (u0 != 0) {
uint32_t cycles = usage_now() - u0;
if (cpu->current->base.usage.track_usage) {
sched_thread_update_usage(cpu->current, cycles);
}
sched_cpu_update_usage(cpu, cycles);
}
cpu->usage0 = 0;
k_spin_unlock(&usage_lock, k);
}
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
void z_sched_cpu_usage(uint8_t cpu_id, struct k_thread_runtime_stats *stats)
{
k_spinlock_key_t key;
struct _cpu *cpu;
key = k_spin_lock(&usage_lock);
cpu = _current_cpu;
if (&_kernel.cpus[cpu_id] == cpu) {
uint32_t now = usage_now();
uint32_t cycles = now - cpu->usage0;
/*
* Getting stats for the current CPU. Update both its
* current thread stats and the CPU stats as the CPU's
* [usage0] field will also get updated. This keeps all
* that information up-to-date.
*/
if (cpu->current->base.usage.track_usage) {
sched_thread_update_usage(cpu->current, cycles);
}
sched_cpu_update_usage(cpu, cycles);
cpu->usage0 = now;
}
stats->total_cycles = cpu->usage->total;
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
stats->current_cycles = cpu->usage->current;
stats->peak_cycles = cpu->usage->longest;
if (cpu->usage->num_windows == 0) {
stats->average_cycles = 0;
} else {
stats->average_cycles = stats->total_cycles /
cpu->usage->num_windows;
}
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
stats->idle_cycles =
_kernel.cpus[cpu_id].idle_thread->base.usage.total;
stats->execution_cycles = stats->total_cycles + stats->idle_cycles;
k_spin_unlock(&usage_lock, key);
}
#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
void z_sched_thread_usage(struct k_thread *thread,
struct k_thread_runtime_stats *stats)
{
struct _cpu *cpu;
k_spinlock_key_t key;
key = k_spin_lock(&usage_lock);
cpu = _current_cpu;
if (thread == cpu->current) {
uint32_t now = usage_now();
uint32_t cycles = now - cpu->usage0;
/*
* Getting stats for the current thread. Update both the
* current thread stats and its CPU stats as the CPU's
* [usage0] field will also get updated. This keeps all
* that information up-to-date.
*/
if (thread->base.usage.track_usage) {
sched_thread_update_usage(thread, cycles);
}
sched_cpu_update_usage(cpu, cycles);
cpu->usage0 = now;
}
stats->execution_cycles = thread->base.usage.total;
stats->total_cycles = thread->base.usage.total;
/* Copy-out the thread's usage stats */
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
stats->current_cycles = thread->base.usage.current;
stats->peak_cycles = thread->base.usage.longest;
if (thread->base.usage.num_windows == 0) {
stats->average_cycles = 0;
} else {
stats->average_cycles = stats->total_cycles /
thread->base.usage.num_windows;
}
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
stats->idle_cycles = 0;
#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
k_spin_unlock(&usage_lock, key);
}
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
int k_thread_runtime_stats_enable(k_tid_t thread)
{
k_spinlock_key_t key;
CHECKIF(thread == NULL) {
return -EINVAL;
}
key = k_spin_lock(&usage_lock);
if (!thread->base.usage.track_usage) {
thread->base.usage.track_usage = true;
thread->base.usage.num_windows++;
thread->base.usage.current = 0;
}
k_spin_unlock(&usage_lock, key);
return 0;
}
int k_thread_runtime_stats_disable(k_tid_t thread)
{
k_spinlock_key_t key;
CHECKIF(thread == NULL) {
return -EINVAL;
}
key = k_spin_lock(&usage_lock);
struct _cpu *cpu = _current_cpu;
if (thread->base.usage.track_usage) {
thread->base.usage.track_usage = false;
if (thread == cpu->current) {
uint32_t cycles = usage_now() - cpu->usage0;
sched_thread_update_usage(thread, cycles);
sched_cpu_update_usage(cpu, cycles);
}
}
k_spin_unlock(&usage_lock, key);
return 0;
}
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
void k_sys_runtime_stats_enable(void)
{
k_spinlock_key_t key;
key = k_spin_lock(&usage_lock);
if (_current_cpu->usage->track_usage) {
/*
* Usage tracking is already enabled on the current CPU
* and thus on all other CPUs (if applicable). There is
* nothing left to do.
*/
k_spin_unlock(&usage_lock, key);
return;
}
/* Enable gathering of runtime stats on each CPU */
unsigned int num_cpus = arch_num_cpus();
for (uint8_t i = 0; i < num_cpus; i++) {
_kernel.cpus[i].usage->track_usage = true;
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
_kernel.cpus[i].usage->num_windows++;
_kernel.cpus[i].usage->current = 0;
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
}
k_spin_unlock(&usage_lock, key);
}
void k_sys_runtime_stats_disable(void)
{
struct _cpu *cpu;
k_spinlock_key_t key;
key = k_spin_lock(&usage_lock);
if (!_current_cpu->usage->track_usage) {
/*
* Usage tracking is already disabled on the current CPU
* and thus on all other CPUs (if applicable). There is
* nothing left to do.
*/
k_spin_unlock(&usage_lock, key);
return;
}
uint32_t now = usage_now();
unsigned int num_cpus = arch_num_cpus();
for (uint8_t i = 0; i < num_cpus; i++) {
cpu = &_kernel.cpus[i];
if (cpu->usage0 != 0) {
sched_cpu_update_usage(cpu, now - cpu->usage0);
}
cpu->usage->track_usage = false;
}
k_spin_unlock(&usage_lock, key);
}
#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
#ifdef CONFIG_OBJ_CORE_STATS_THREAD
int z_thread_stats_raw(struct k_obj_core *obj_core, void *stats)
{
k_spinlock_key_t key;
key = k_spin_lock(&usage_lock);
memcpy(stats, obj_core->stats, sizeof(struct k_cycle_stats));
k_spin_unlock(&usage_lock, key);
return 0;
}
int z_thread_stats_query(struct k_obj_core *obj_core, void *stats)
{
struct k_thread *thread;
thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
z_sched_thread_usage(thread, stats);
return 0;
}
int z_thread_stats_reset(struct k_obj_core *obj_core)
{
k_spinlock_key_t key;
struct k_cycle_stats *stats;
struct k_thread *thread;
thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
key = k_spin_lock(&usage_lock);
stats = obj_core->stats;
stats->total = 0ULL;
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
stats->current = 0ULL;
stats->longest = 0ULL;
stats->num_windows = (thread->base.usage.track_usage) ? 1U : 0U;
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
if (thread != _current_cpu->current) {
/*
* If the thread is not running, there is nothing else to do.
* If the thread is running on another core, then it is not
* safe to do anything else but unlock and return (and pretend
* that its stats were reset at the start of its execution
* window.
*/
k_spin_unlock(&usage_lock, key);
return 0;
}
/* Update the current CPU stats. */
uint32_t now = usage_now();
uint32_t cycles = now - _current_cpu->usage0;
sched_cpu_update_usage(_current_cpu, cycles);
_current_cpu->usage0 = now;
k_spin_unlock(&usage_lock, key);
return 0;
}
int z_thread_stats_disable(struct k_obj_core *obj_core)
{
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
struct k_thread *thread;
thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
return k_thread_runtime_stats_disable(thread);
#else
return -ENOTSUP;
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
}
int z_thread_stats_enable(struct k_obj_core *obj_core)
{
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
struct k_thread *thread;
thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
return k_thread_runtime_stats_enable(thread);
#else
return -ENOTSUP;
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
}
#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats)
{
k_spinlock_key_t key;
key = k_spin_lock(&usage_lock);
memcpy(stats, obj_core->stats, sizeof(struct k_cycle_stats));
k_spin_unlock(&usage_lock, key);
return 0;
}
int z_cpu_stats_query(struct k_obj_core *obj_core, void *stats)
{
struct _cpu *cpu;
cpu = CONTAINER_OF(obj_core, struct _cpu, obj_core);
z_sched_cpu_usage(cpu->id, stats);
return 0;
}
#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats)
{
k_spinlock_key_t key;
key = k_spin_lock(&usage_lock);
memcpy(stats, obj_core->stats,
CONFIG_MP_MAX_NUM_CPUS * sizeof(struct k_cycle_stats));
k_spin_unlock(&usage_lock, key);
return 0;
}
int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats)
{
ARG_UNUSED(obj_core);
return k_thread_runtime_stats_all_get(stats);
}
#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
``` | /content/code_sandbox/kernel/usage.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,898 |
```c
/*
*
*/
#include "kernel_internal.h"
#include <zephyr/kernel.h>
#include <ksched.h>
#include <zephyr/kernel/thread_stack.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/bitarray.h>
#include <zephyr/sys/kobject.h>
#include <zephyr/internal/syscall_handler.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#if CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0
#define BA_SIZE CONFIG_DYNAMIC_THREAD_POOL_SIZE
#else
#define BA_SIZE 1
#endif /* CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0 */
struct dyn_cb_data {
k_tid_t tid;
k_thread_stack_t *stack;
};
static K_THREAD_STACK_ARRAY_DEFINE(dynamic_stack, CONFIG_DYNAMIC_THREAD_POOL_SIZE,
CONFIG_DYNAMIC_THREAD_STACK_SIZE);
SYS_BITARRAY_DEFINE_STATIC(dynamic_ba, BA_SIZE);
static k_thread_stack_t *z_thread_stack_alloc_dyn(size_t align, size_t size)
{
return z_thread_aligned_alloc(align, size);
}
static k_thread_stack_t *z_thread_stack_alloc_pool(size_t size)
{
int rv;
size_t offset;
k_thread_stack_t *stack;
if (size > CONFIG_DYNAMIC_THREAD_STACK_SIZE) {
LOG_DBG("stack size %zu is > pool stack size %d", size,
CONFIG_DYNAMIC_THREAD_STACK_SIZE);
return NULL;
}
rv = sys_bitarray_alloc(&dynamic_ba, 1, &offset);
if (rv < 0) {
LOG_DBG("unable to allocate stack from pool");
return NULL;
}
__ASSERT_NO_MSG(offset < CONFIG_DYNAMIC_THREAD_POOL_SIZE);
stack = (k_thread_stack_t *)&dynamic_stack[offset];
return stack;
}
static k_thread_stack_t *stack_alloc_dyn(size_t size, int flags)
{
if ((flags & K_USER) == K_USER) {
#ifdef CONFIG_DYNAMIC_OBJECTS
return k_object_alloc_size(K_OBJ_THREAD_STACK_ELEMENT, size);
#else
/* Dynamic user stack needs a kobject, so if this option is not
* enabled we can't proceed.
*/
return NULL;
#endif /* CONFIG_DYNAMIC_OBJECTS */
}
return z_thread_stack_alloc_dyn(Z_KERNEL_STACK_OBJ_ALIGN,
K_KERNEL_STACK_LEN(size));
}
k_thread_stack_t *z_impl_k_thread_stack_alloc(size_t size, int flags)
{
k_thread_stack_t *stack = NULL;
if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) {
stack = stack_alloc_dyn(size, flags);
if (stack == NULL && CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) {
stack = z_thread_stack_alloc_pool(size);
}
} else if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_POOL)) {
if (CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) {
stack = z_thread_stack_alloc_pool(size);
}
if ((stack == NULL) && IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
stack = stack_alloc_dyn(size, flags);
}
}
return stack;
}
#ifdef CONFIG_USERSPACE
static inline k_thread_stack_t *z_vrfy_k_thread_stack_alloc(size_t size, int flags)
{
return z_impl_k_thread_stack_alloc(size, flags);
}
#include <zephyr/syscalls/k_thread_stack_alloc_mrsh.c>
#endif /* CONFIG_USERSPACE */
static void dyn_cb(const struct k_thread *thread, void *user_data)
{
struct dyn_cb_data *const data = (struct dyn_cb_data *)user_data;
if (data->stack == (k_thread_stack_t *)thread->stack_info.start) {
__ASSERT(data->tid == NULL, "stack %p is associated with more than one thread!",
(void *)thread->stack_info.start);
data->tid = (k_tid_t)thread;
}
}
int z_impl_k_thread_stack_free(k_thread_stack_t *stack)
{
struct dyn_cb_data data = {.stack = stack};
/* Get a possible tid associated with stack */
k_thread_foreach(dyn_cb, &data);
if (data.tid != NULL) {
if (!(z_is_thread_state_set(data.tid, _THREAD_DUMMY) ||
z_is_thread_state_set(data.tid, _THREAD_DEAD))) {
LOG_ERR("tid %p is in use!", data.tid);
return -EBUSY;
}
}
if (CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) {
if (IS_ARRAY_ELEMENT(dynamic_stack, stack)) {
if (sys_bitarray_free(&dynamic_ba, 1, ARRAY_INDEX(dynamic_stack, stack))) {
LOG_ERR("stack %p is not allocated!", stack);
return -EINVAL;
}
return 0;
}
}
if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
#ifdef CONFIG_USERSPACE
if (k_object_find(stack)) {
k_object_free(stack);
} else {
k_free(stack);
}
#else
k_free(stack);
#endif /* CONFIG_USERSPACE */
} else {
LOG_DBG("Invalid stack %p", stack);
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_thread_stack_free(k_thread_stack_t *stack)
{
/* The thread stack object must not be in initialized state.
*
* Thread stack objects are initialized when the thread is created
* and de-initialized when the thread is destroyed. Since we can't
* free a stack that is in use, we have to check that the caller
* has access to the object but that it is not in use anymore.
*/
K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(stack, K_OBJ_THREAD_STACK_ELEMENT));
return z_impl_k_thread_stack_free(stack);
}
#include <zephyr/syscalls/k_thread_stack_free_mrsh.c>
#endif /* CONFIG_USERSPACE */
``` | /content/code_sandbox/kernel/dynamic.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,208 |
```c
/*
*
*/
#include <kernel_internal.h>
#include <zephyr/spinlock.h>
bool z_spin_lock_valid(struct k_spinlock *l)
{
uintptr_t thread_cpu = l->thread_cpu;
if (thread_cpu != 0U) {
if ((thread_cpu & 3U) == _current_cpu->id) {
return false;
}
}
return true;
}
bool z_spin_unlock_valid(struct k_spinlock *l)
{
uintptr_t tcpu = l->thread_cpu;
l->thread_cpu = 0;
if (arch_is_in_isr() && _current->base.thread_state & _THREAD_DUMMY) {
/* Edge case where an ISR aborted _current */
return true;
}
if (tcpu != (_current_cpu->id | (uintptr_t)_current)) {
return false;
}
return true;
}
void z_spin_lock_set_owner(struct k_spinlock *l)
{
l->thread_cpu = _current_cpu->id | (uintptr_t)_current;
}
#ifdef CONFIG_KERNEL_COHERENCE
bool z_spin_lock_mem_coherent(struct k_spinlock *l)
{
return arch_mem_coherent((void *)l);
}
#endif /* CONFIG_KERNEL_COHERENCE */
``` | /content/code_sandbox/kernel/spinlock_validate.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 257 |
```c
/*
*
*
* Routines for managing virtual address spaces
*/
#include <stdint.h>
#include <kernel_arch_interface.h>
#include <zephyr/spinlock.h>
#include <mmu.h>
#include <zephyr/init.h>
#include <kernel_internal.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/sys/bitarray.h>
#include <zephyr/sys/check.h>
#include <zephyr/sys/math_extras.h>
#include <zephyr/timing/timing.h>
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_DEMAND_PAGING
#include <zephyr/kernel/mm/demand_paging.h>
#endif /* CONFIG_DEMAND_PAGING */
/*
* General terminology:
* - A page frame is a page-sized physical memory region in RAM. It is a
* container where a data page may be placed. It is always referred to by
* physical address. We have a convention of using uintptr_t for physical
* addresses. We instantiate a struct k_mem_page_frame to store metadata for
* every page frame.
*
* - A data page is a page-sized region of data. It may exist in a page frame,
* or be paged out to some backing store. Its location can always be looked
* up in the CPU's page tables (or equivalent) by virtual address.
* The data type will always be void * or in some cases uint8_t * when we
* want to do pointer arithmetic.
*/
/* Spinlock to protect any globals in this file and serialize page table
* updates in arch code
*/
struct k_spinlock z_mm_lock;
/*
* General page frame management
*/
/* Database of all RAM page frames */
struct k_mem_page_frame k_mem_page_frames[K_MEM_NUM_PAGE_FRAMES];
#if __ASSERT_ON
/* Indicator that k_mem_page_frames has been initialized, many of these APIs do
* not work before POST_KERNEL
*/
static bool page_frames_initialized;
#endif
/* Add colors to page table dumps to indicate mapping type */
#define COLOR_PAGE_FRAMES 1
#if COLOR_PAGE_FRAMES
#define ANSI_DEFAULT "\x1B" "[0m"
#define ANSI_RED "\x1B" "[1;31m"
#define ANSI_GREEN "\x1B" "[1;32m"
#define ANSI_YELLOW "\x1B" "[1;33m"
#define ANSI_BLUE "\x1B" "[1;34m"
#define ANSI_MAGENTA "\x1B" "[1;35m"
#define ANSI_CYAN "\x1B" "[1;36m"
#define ANSI_GREY "\x1B" "[1;90m"
#define COLOR(x) printk(_CONCAT(ANSI_, x))
#else
#define COLOR(x) do { } while (false)
#endif /* COLOR_PAGE_FRAMES */
/* LCOV_EXCL_START */
static void page_frame_dump(struct k_mem_page_frame *pf)
{
if (k_mem_page_frame_is_free(pf)) {
COLOR(GREY);
printk("-");
} else if (k_mem_page_frame_is_reserved(pf)) {
COLOR(CYAN);
printk("R");
} else if (k_mem_page_frame_is_busy(pf)) {
COLOR(MAGENTA);
printk("B");
} else if (k_mem_page_frame_is_pinned(pf)) {
COLOR(YELLOW);
printk("P");
} else if (k_mem_page_frame_is_available(pf)) {
COLOR(GREY);
printk(".");
} else if (k_mem_page_frame_is_mapped(pf)) {
COLOR(DEFAULT);
printk("M");
} else {
COLOR(RED);
printk("?");
}
}
void k_mem_page_frames_dump(void)
{
int column = 0;
__ASSERT(page_frames_initialized, "%s called too early", __func__);
printk("Physical memory from 0x%lx to 0x%lx\n",
K_MEM_PHYS_RAM_START, K_MEM_PHYS_RAM_END);
for (int i = 0; i < K_MEM_NUM_PAGE_FRAMES; i++) {
struct k_mem_page_frame *pf = &k_mem_page_frames[i];
page_frame_dump(pf);
column++;
if (column == 64) {
column = 0;
printk("\n");
}
}
COLOR(DEFAULT);
if (column != 0) {
printk("\n");
}
}
/* LCOV_EXCL_STOP */
#define VIRT_FOREACH(_base, _size, _pos) \
for ((_pos) = (_base); \
(_pos) < ((uint8_t *)(_base) + (_size)); (_pos) += CONFIG_MMU_PAGE_SIZE)
#define PHYS_FOREACH(_base, _size, _pos) \
for ((_pos) = (_base); \
(_pos) < ((uintptr_t)(_base) + (_size)); (_pos) += CONFIG_MMU_PAGE_SIZE)
/*
* Virtual address space management
*
* Call all of these functions with z_mm_lock held.
*
* Overall virtual memory map: When the kernel starts, it resides in
* virtual memory in the region K_MEM_KERNEL_VIRT_START to
* K_MEM_KERNEL_VIRT_END. Unused virtual memory past this, up to the limit
* noted by CONFIG_KERNEL_VM_SIZE may be used for runtime memory mappings.
*
* If CONFIG_ARCH_MAPS_ALL_RAM is set, we do not just map the kernel image,
* but have a mapping for all RAM in place. This is for special architectural
* purposes and does not otherwise affect page frame accounting or flags;
* the only guarantee is that such RAM mapping outside of the Zephyr image
* won't be disturbed by subsequent memory mapping calls.
*
* +--------------+ <- K_MEM_VIRT_RAM_START
* | Undefined VM | <- May contain ancillary regions like x86_64's locore
* +--------------+ <- K_MEM_KERNEL_VIRT_START (often == K_MEM_VIRT_RAM_START)
* | Mapping for |
* | main kernel |
* | image |
* | |
* | |
* +--------------+ <- K_MEM_VM_FREE_START
* | |
* | Unused, |
* | Available VM |
* | |
* |..............| <- mapping_pos (grows downward as more mappings are made)
* | Mapping |
* +--------------+
* | Mapping |
* +--------------+
* | ... |
* +--------------+
* | Mapping |
* +--------------+ <- mappings start here
* | Reserved | <- special purpose virtual page(s) of size K_MEM_VM_RESERVED
* +--------------+ <- K_MEM_VIRT_RAM_END
*/
/* Bitmap of virtual addresses where one bit corresponds to one page.
* This is being used for virt_region_alloc() to figure out which
* region of virtual addresses can be used for memory mapping.
*
* Note that bit #0 is the highest address so that allocation is
* done in reverse from highest address.
*/
SYS_BITARRAY_DEFINE_STATIC(virt_region_bitmap,
CONFIG_KERNEL_VM_SIZE / CONFIG_MMU_PAGE_SIZE);
static bool virt_region_inited;
#define Z_VIRT_REGION_START_ADDR K_MEM_VM_FREE_START
#define Z_VIRT_REGION_END_ADDR (K_MEM_VIRT_RAM_END - K_MEM_VM_RESERVED)
static inline uintptr_t virt_from_bitmap_offset(size_t offset, size_t size)
{
return POINTER_TO_UINT(K_MEM_VIRT_RAM_END)
- (offset * CONFIG_MMU_PAGE_SIZE) - size;
}
static inline size_t virt_to_bitmap_offset(void *vaddr, size_t size)
{
return (POINTER_TO_UINT(K_MEM_VIRT_RAM_END)
- POINTER_TO_UINT(vaddr) - size) / CONFIG_MMU_PAGE_SIZE;
}
static void virt_region_init(void)
{
size_t offset, num_bits;
/* There are regions where we should never map via
* k_mem_map() and k_mem_map_phys_bare(). Mark them as
* already allocated so they will never be used.
*/
if (K_MEM_VM_RESERVED > 0) {
/* Mark reserved region at end of virtual address space */
num_bits = K_MEM_VM_RESERVED / CONFIG_MMU_PAGE_SIZE;
(void)sys_bitarray_set_region(&virt_region_bitmap,
num_bits, 0);
}
/* Mark all bits up to Z_FREE_VM_START as allocated */
num_bits = POINTER_TO_UINT(K_MEM_VM_FREE_START)
- POINTER_TO_UINT(K_MEM_VIRT_RAM_START);
offset = virt_to_bitmap_offset(K_MEM_VIRT_RAM_START, num_bits);
num_bits /= CONFIG_MMU_PAGE_SIZE;
(void)sys_bitarray_set_region(&virt_region_bitmap,
num_bits, offset);
virt_region_inited = true;
}
static void virt_region_free(void *vaddr, size_t size)
{
size_t offset, num_bits;
uint8_t *vaddr_u8 = (uint8_t *)vaddr;
if (unlikely(!virt_region_inited)) {
virt_region_init();
}
#ifndef CONFIG_KERNEL_DIRECT_MAP
/* Without the need to support K_MEM_DIRECT_MAP, the region must be
* able to be represented in the bitmap. So this case is
* simple.
*/
__ASSERT((vaddr_u8 >= Z_VIRT_REGION_START_ADDR)
&& ((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR),
"invalid virtual address region %p (%zu)", vaddr_u8, size);
if (!((vaddr_u8 >= Z_VIRT_REGION_START_ADDR)
&& ((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR))) {
return;
}
offset = virt_to_bitmap_offset(vaddr, size);
num_bits = size / CONFIG_MMU_PAGE_SIZE;
(void)sys_bitarray_free(&virt_region_bitmap, num_bits, offset);
#else /* !CONFIG_KERNEL_DIRECT_MAP */
/* With K_MEM_DIRECT_MAP, the region can be outside of the virtual
* memory space, wholly within it, or overlap partially.
* So additional processing is needed to make sure we only
* mark the pages within the bitmap.
*/
if (((vaddr_u8 >= Z_VIRT_REGION_START_ADDR) &&
(vaddr_u8 < Z_VIRT_REGION_END_ADDR)) ||
(((vaddr_u8 + size - 1) >= Z_VIRT_REGION_START_ADDR) &&
((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR))) {
uint8_t *adjusted_start = MAX(vaddr_u8, Z_VIRT_REGION_START_ADDR);
uint8_t *adjusted_end = MIN(vaddr_u8 + size,
Z_VIRT_REGION_END_ADDR);
size_t adjusted_sz = adjusted_end - adjusted_start;
offset = virt_to_bitmap_offset(adjusted_start, adjusted_sz);
num_bits = adjusted_sz / CONFIG_MMU_PAGE_SIZE;
(void)sys_bitarray_free(&virt_region_bitmap, num_bits, offset);
}
#endif /* !CONFIG_KERNEL_DIRECT_MAP */
}
static void *virt_region_alloc(size_t size, size_t align)
{
uintptr_t dest_addr;
size_t alloc_size;
size_t offset;
size_t num_bits;
int ret;
if (unlikely(!virt_region_inited)) {
virt_region_init();
}
/* Possibly request more pages to ensure we can get an aligned virtual address */
num_bits = (size + align - CONFIG_MMU_PAGE_SIZE) / CONFIG_MMU_PAGE_SIZE;
alloc_size = num_bits * CONFIG_MMU_PAGE_SIZE;
ret = sys_bitarray_alloc(&virt_region_bitmap, num_bits, &offset);
if (ret != 0) {
LOG_ERR("insufficient virtual address space (requested %zu)",
size);
return NULL;
}
/* Remember that bit #0 in bitmap corresponds to the highest
* virtual address. So here we need to go downwards (backwards?)
* to get the starting address of the allocated region.
*/
dest_addr = virt_from_bitmap_offset(offset, alloc_size);
if (alloc_size > size) {
uintptr_t aligned_dest_addr = ROUND_UP(dest_addr, align);
/* Here is the memory organization when trying to get an aligned
* virtual address:
*
* +--------------+ <- K_MEM_VIRT_RAM_START
* | Undefined VM |
* +--------------+ <- K_MEM_KERNEL_VIRT_START (often == K_MEM_VIRT_RAM_START)
* | Mapping for |
* | main kernel |
* | image |
* | |
* | |
* +--------------+ <- K_MEM_VM_FREE_START
* | ... |
* +==============+ <- dest_addr
* | Unused |
* |..............| <- aligned_dest_addr
* | |
* | Aligned |
* | Mapping |
* | |
* |..............| <- aligned_dest_addr + size
* | Unused |
* +==============+ <- offset from K_MEM_VIRT_RAM_END == dest_addr + alloc_size
* | ... |
* +--------------+
* | Mapping |
* +--------------+
* | Reserved |
* +--------------+ <- K_MEM_VIRT_RAM_END
*/
/* Free the two unused regions */
virt_region_free(UINT_TO_POINTER(dest_addr),
aligned_dest_addr - dest_addr);
if (((dest_addr + alloc_size) - (aligned_dest_addr + size)) > 0) {
virt_region_free(UINT_TO_POINTER(aligned_dest_addr + size),
(dest_addr + alloc_size) - (aligned_dest_addr + size));
}
dest_addr = aligned_dest_addr;
}
/* Need to make sure this does not step into kernel memory */
if (dest_addr < POINTER_TO_UINT(Z_VIRT_REGION_START_ADDR)) {
(void)sys_bitarray_free(&virt_region_bitmap, size, offset);
return NULL;
}
return UINT_TO_POINTER(dest_addr);
}
/*
* Free page frames management
*
* Call all of these functions with z_mm_lock held.
*/
/* Linked list of unused and available page frames.
*
* TODO: This is very simple and treats all free page frames as being equal.
* However, there are use-cases to consolidate free pages such that entire
* SRAM banks can be switched off to save power, and so obtaining free pages
* may require a more complex ontology which prefers page frames in RAM banks
* which are still active.
*
* This implies in the future there may be multiple slists managing physical
* pages. Each page frame will still just have one snode link.
*/
static sys_sflist_t free_page_frame_list;
/* Number of unused and available free page frames.
* This information may go stale immediately.
*/
static size_t z_free_page_count;
#define PF_ASSERT(pf, expr, fmt, ...) \
__ASSERT(expr, "page frame 0x%lx: " fmt, k_mem_page_frame_to_phys(pf), \
##__VA_ARGS__)
/* Get an unused page frame. don't care which one, or NULL if there are none */
static struct k_mem_page_frame *free_page_frame_list_get(void)
{
sys_sfnode_t *node;
struct k_mem_page_frame *pf = NULL;
node = sys_sflist_get(&free_page_frame_list);
if (node != NULL) {
z_free_page_count--;
pf = CONTAINER_OF(node, struct k_mem_page_frame, node);
PF_ASSERT(pf, k_mem_page_frame_is_free(pf),
"on free list but not free");
pf->va_and_flags = 0;
}
return pf;
}
/* Release a page frame back into the list of free pages */
static void free_page_frame_list_put(struct k_mem_page_frame *pf)
{
PF_ASSERT(pf, k_mem_page_frame_is_available(pf),
"unavailable page put on free list");
sys_sfnode_init(&pf->node, K_MEM_PAGE_FRAME_FREE);
sys_sflist_append(&free_page_frame_list, &pf->node);
z_free_page_count++;
}
static void free_page_frame_list_init(void)
{
sys_sflist_init(&free_page_frame_list);
}
static void page_frame_free_locked(struct k_mem_page_frame *pf)
{
pf->va_and_flags = 0;
free_page_frame_list_put(pf);
}
/*
* Memory Mapping
*/
/* Called after the frame is mapped in the arch layer, to update our
* local ontology (and do some assertions while we're at it)
*/
static void frame_mapped_set(struct k_mem_page_frame *pf, void *addr)
{
PF_ASSERT(pf, !k_mem_page_frame_is_free(pf),
"attempted to map a page frame on the free list");
PF_ASSERT(pf, !k_mem_page_frame_is_reserved(pf),
"attempted to map a reserved page frame");
/* We do allow multiple mappings for pinned page frames
* since we will never need to reverse map them.
* This is uncommon, use-cases are for things like the
* Zephyr equivalent of VSDOs
*/
PF_ASSERT(pf, !k_mem_page_frame_is_mapped(pf) || k_mem_page_frame_is_pinned(pf),
"non-pinned and already mapped to %p",
k_mem_page_frame_to_virt(pf));
uintptr_t flags_mask = CONFIG_MMU_PAGE_SIZE - 1;
uintptr_t va = (uintptr_t)addr & ~flags_mask;
pf->va_and_flags &= flags_mask;
pf->va_and_flags |= va | K_MEM_PAGE_FRAME_MAPPED;
}
/* LCOV_EXCL_START */
/* Go through page frames to find the physical address mapped
* by a virtual address.
*
* @param[in] virt Virtual Address
* @param[out] phys Physical address mapped to the input virtual address
* if such mapping exists.
*
* @retval 0 if mapping is found and valid
* @retval -EFAULT if virtual address is not mapped
*/
static int virt_to_page_frame(void *virt, uintptr_t *phys)
{
uintptr_t paddr;
struct k_mem_page_frame *pf;
int ret = -EFAULT;
K_MEM_PAGE_FRAME_FOREACH(paddr, pf) {
if (k_mem_page_frame_is_mapped(pf)) {
if (virt == k_mem_page_frame_to_virt(pf)) {
ret = 0;
if (phys != NULL) {
*phys = k_mem_page_frame_to_phys(pf);
}
break;
}
}
}
return ret;
}
/* LCOV_EXCL_STOP */
__weak FUNC_ALIAS(virt_to_page_frame, arch_page_phys_get, int);
#ifdef CONFIG_DEMAND_PAGING
static int page_frame_prepare_locked(struct k_mem_page_frame *pf, bool *dirty_ptr,
bool page_in, uintptr_t *location_ptr);
static inline void do_backing_store_page_in(uintptr_t location);
static inline void do_backing_store_page_out(uintptr_t location);
#endif /* CONFIG_DEMAND_PAGING */
/* Allocate a free page frame, and map it to a specified virtual address
*
* TODO: Add optional support for copy-on-write mappings to a zero page instead
* of allocating, in which case page frames will be allocated lazily as
* the mappings to the zero page get touched. This will avoid expensive
* page-ins as memory is mapped and physical RAM or backing store storage will
* not be used if the mapped memory is unused. The cost is an empty physical
* page of zeroes.
*/
static int map_anon_page(void *addr, uint32_t flags)
{
struct k_mem_page_frame *pf;
uintptr_t phys;
bool lock = (flags & K_MEM_MAP_LOCK) != 0U;
pf = free_page_frame_list_get();
if (pf == NULL) {
#ifdef CONFIG_DEMAND_PAGING
uintptr_t location;
bool dirty;
int ret;
pf = k_mem_paging_eviction_select(&dirty);
__ASSERT(pf != NULL, "failed to get a page frame");
LOG_DBG("evicting %p at 0x%lx",
k_mem_page_frame_to_virt(pf),
k_mem_page_frame_to_phys(pf));
ret = page_frame_prepare_locked(pf, &dirty, false, &location);
if (ret != 0) {
return -ENOMEM;
}
if (dirty) {
do_backing_store_page_out(location);
}
pf->va_and_flags = 0;
#else
return -ENOMEM;
#endif /* CONFIG_DEMAND_PAGING */
}
phys = k_mem_page_frame_to_phys(pf);
arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE, flags | K_MEM_CACHE_WB);
if (lock) {
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_PINNED);
}
frame_mapped_set(pf, addr);
#ifdef CONFIG_DEMAND_PAGING
if (!lock) {
k_mem_paging_eviction_add(pf);
}
#endif
LOG_DBG("memory mapping anon page %p -> 0x%lx", addr, phys);
return 0;
}
void *k_mem_map_phys_guard(uintptr_t phys, size_t size, uint32_t flags, bool is_anon)
{
uint8_t *dst;
size_t total_size;
int ret;
k_spinlock_key_t key;
uint8_t *pos;
bool uninit = (flags & K_MEM_MAP_UNINIT) != 0U;
__ASSERT(!is_anon || (is_anon && page_frames_initialized),
"%s called too early", __func__);
__ASSERT((flags & K_MEM_CACHE_MASK) == 0U,
"%s does not support explicit cache settings", __func__);
if (((flags & K_MEM_PERM_USER) != 0U) &&
((flags & K_MEM_MAP_UNINIT) != 0U)) {
LOG_ERR("user access to anonymous uninitialized pages is forbidden");
return NULL;
}
if ((size % CONFIG_MMU_PAGE_SIZE) != 0U) {
LOG_ERR("unaligned size %zu passed to %s", size, __func__);
return NULL;
}
if (size == 0) {
LOG_ERR("zero sized memory mapping");
return NULL;
}
/* Need extra for the guard pages (before and after) which we
* won't map.
*/
if (size_add_overflow(size, CONFIG_MMU_PAGE_SIZE * 2, &total_size)) {
LOG_ERR("too large size %zu passed to %s", size, __func__);
return NULL;
}
key = k_spin_lock(&z_mm_lock);
dst = virt_region_alloc(total_size, CONFIG_MMU_PAGE_SIZE);
if (dst == NULL) {
/* Address space has no free region */
goto out;
}
/* Unmap both guard pages to make sure accessing them
* will generate fault.
*/
arch_mem_unmap(dst, CONFIG_MMU_PAGE_SIZE);
arch_mem_unmap(dst + CONFIG_MMU_PAGE_SIZE + size,
CONFIG_MMU_PAGE_SIZE);
/* Skip over the "before" guard page in returned address. */
dst += CONFIG_MMU_PAGE_SIZE;
if (is_anon) {
/* Mapping from anonymous memory */
VIRT_FOREACH(dst, size, pos) {
ret = map_anon_page(pos, flags);
if (ret != 0) {
/* TODO: call k_mem_unmap(dst, pos - dst) when
* implemented in #28990 and release any guard virtual
* page as well.
*/
dst = NULL;
goto out;
}
}
} else {
/* Mapping known physical memory.
*
* arch_mem_map() is a void function and does not return
* anything. Arch code usually uses ASSERT() to catch
* mapping errors. Assume this works correctly for now.
*/
arch_mem_map(dst, phys, size, flags);
}
if (!uninit) {
/* If we later implement mappings to a copy-on-write
* zero page, won't need this step
*/
memset(dst, 0, size);
}
out:
k_spin_unlock(&z_mm_lock, key);
return dst;
}
void k_mem_unmap_phys_guard(void *addr, size_t size, bool is_anon)
{
uintptr_t phys;
uint8_t *pos;
struct k_mem_page_frame *pf;
k_spinlock_key_t key;
size_t total_size;
int ret;
/* Need space for the "before" guard page */
__ASSERT_NO_MSG(POINTER_TO_UINT(addr) >= CONFIG_MMU_PAGE_SIZE);
/* Make sure address range is still valid after accounting
* for two guard pages.
*/
pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE;
k_mem_assert_virtual_region(pos, size + (CONFIG_MMU_PAGE_SIZE * 2));
key = k_spin_lock(&z_mm_lock);
/* Check if both guard pages are unmapped.
* Bail if not, as this is probably a region not mapped
* using k_mem_map().
*/
pos = addr;
ret = arch_page_phys_get(pos - CONFIG_MMU_PAGE_SIZE, NULL);
if (ret == 0) {
__ASSERT(ret == 0,
"%s: cannot find preceding guard page for (%p, %zu)",
__func__, addr, size);
goto out;
}
ret = arch_page_phys_get(pos + size, NULL);
if (ret == 0) {
__ASSERT(ret == 0,
"%s: cannot find succeeding guard page for (%p, %zu)",
__func__, addr, size);
goto out;
}
if (is_anon) {
/* Unmapping anonymous memory */
VIRT_FOREACH(addr, size, pos) {
#ifdef CONFIG_DEMAND_PAGING
enum arch_page_location status;
uintptr_t location;
status = arch_page_location_get(pos, &location);
switch (status) {
case ARCH_PAGE_LOCATION_PAGED_OUT:
/*
* No pf is associated with this mapping.
* Simply get rid of the MMU entry and free
* corresponding backing store.
*/
arch_mem_unmap(pos, CONFIG_MMU_PAGE_SIZE);
k_mem_paging_backing_store_location_free(location);
continue;
case ARCH_PAGE_LOCATION_PAGED_IN:
/*
* The page is in memory but it may not be
* accessible in order to manage tracking
* of the ARCH_DATA_PAGE_ACCESSED flag
* meaning arch_page_phys_get() could fail.
* Still, we know the actual phys address.
*/
phys = location;
ret = 0;
break;
default:
ret = arch_page_phys_get(pos, &phys);
break;
}
#else
ret = arch_page_phys_get(pos, &phys);
#endif
__ASSERT(ret == 0,
"%s: cannot unmap an unmapped address %p",
__func__, pos);
if (ret != 0) {
/* Found an address not mapped. Do not continue. */
goto out;
}
__ASSERT(k_mem_is_page_frame(phys),
"%s: 0x%lx is not a page frame", __func__, phys);
if (!k_mem_is_page_frame(phys)) {
/* Physical address has no corresponding page frame
* description in the page frame array.
* This should not happen. Do not continue.
*/
goto out;
}
/* Grab the corresponding page frame from physical address */
pf = k_mem_phys_to_page_frame(phys);
__ASSERT(k_mem_page_frame_is_mapped(pf),
"%s: 0x%lx is not a mapped page frame", __func__, phys);
if (!k_mem_page_frame_is_mapped(pf)) {
/* Page frame is not marked mapped.
* This should not happen. Do not continue.
*/
goto out;
}
arch_mem_unmap(pos, CONFIG_MMU_PAGE_SIZE);
#ifdef CONFIG_DEMAND_PAGING
if (!k_mem_page_frame_is_pinned(pf)) {
k_mem_paging_eviction_remove(pf);
}
#endif
/* Put the page frame back into free list */
page_frame_free_locked(pf);
}
} else {
/*
* Unmapping previous mapped memory with specific physical address.
*
* Note that we don't have to unmap the guard pages, as they should
* have been unmapped. We just need to unmapped the in-between
* region [addr, (addr + size)).
*/
arch_mem_unmap(addr, size);
}
/* There are guard pages just before and after the mapped
* region. So we also need to free them from the bitmap.
*/
pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE;
total_size = size + (CONFIG_MMU_PAGE_SIZE * 2);
virt_region_free(pos, total_size);
out:
k_spin_unlock(&z_mm_lock, key);
}
size_t k_mem_free_get(void)
{
size_t ret;
k_spinlock_key_t key;
__ASSERT(page_frames_initialized, "%s called too early", __func__);
key = k_spin_lock(&z_mm_lock);
#ifdef CONFIG_DEMAND_PAGING
if (z_free_page_count > CONFIG_DEMAND_PAGING_PAGE_FRAMES_RESERVE) {
ret = z_free_page_count - CONFIG_DEMAND_PAGING_PAGE_FRAMES_RESERVE;
} else {
ret = 0;
}
#else
ret = z_free_page_count;
#endif /* CONFIG_DEMAND_PAGING */
k_spin_unlock(&z_mm_lock, key);
return ret * (size_t)CONFIG_MMU_PAGE_SIZE;
}
/* Get the default virtual region alignment, here the default MMU page size
*
* @param[in] phys Physical address of region to be mapped, aligned to MMU_PAGE_SIZE
* @param[in] size Size of region to be mapped, aligned to MMU_PAGE_SIZE
*
* @retval alignment to apply on the virtual address of this region
*/
static size_t virt_region_align(uintptr_t phys, size_t size)
{
ARG_UNUSED(phys);
ARG_UNUSED(size);
return CONFIG_MMU_PAGE_SIZE;
}
__weak FUNC_ALIAS(virt_region_align, arch_virt_region_align, size_t);
/* This may be called from arch early boot code before z_cstart() is invoked.
* Data will be copied and BSS zeroed, but this must not rely on any
* initialization functions being called prior to work correctly.
*/
void k_mem_map_phys_bare(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags)
{
uintptr_t aligned_phys, addr_offset;
size_t aligned_size, align_boundary;
k_spinlock_key_t key;
uint8_t *dest_addr;
size_t num_bits;
size_t offset;
#ifndef CONFIG_KERNEL_DIRECT_MAP
__ASSERT(!(flags & K_MEM_DIRECT_MAP), "The direct-map is not enabled");
#endif /* CONFIG_KERNEL_DIRECT_MAP */
addr_offset = k_mem_region_align(&aligned_phys, &aligned_size,
phys, size,
CONFIG_MMU_PAGE_SIZE);
__ASSERT(aligned_size != 0U, "0-length mapping at 0x%lx", aligned_phys);
__ASSERT(aligned_phys < (aligned_phys + (aligned_size - 1)),
"wraparound for physical address 0x%lx (size %zu)",
aligned_phys, aligned_size);
align_boundary = arch_virt_region_align(aligned_phys, aligned_size);
key = k_spin_lock(&z_mm_lock);
if (IS_ENABLED(CONFIG_KERNEL_DIRECT_MAP) &&
(flags & K_MEM_DIRECT_MAP)) {
dest_addr = (uint8_t *)aligned_phys;
/* Mark the region of virtual memory bitmap as used
* if the region overlaps the virtual memory space.
*
* Basically if either end of region is within
* virtual memory space, we need to mark the bits.
*/
if (IN_RANGE(aligned_phys,
(uintptr_t)K_MEM_VIRT_RAM_START,
(uintptr_t)(K_MEM_VIRT_RAM_END - 1)) ||
IN_RANGE(aligned_phys + aligned_size - 1,
(uintptr_t)K_MEM_VIRT_RAM_START,
(uintptr_t)(K_MEM_VIRT_RAM_END - 1))) {
uint8_t *adjusted_start = MAX(dest_addr, K_MEM_VIRT_RAM_START);
uint8_t *adjusted_end = MIN(dest_addr + aligned_size,
K_MEM_VIRT_RAM_END);
size_t adjusted_sz = adjusted_end - adjusted_start;
num_bits = adjusted_sz / CONFIG_MMU_PAGE_SIZE;
offset = virt_to_bitmap_offset(adjusted_start, adjusted_sz);
if (sys_bitarray_test_and_set_region(
&virt_region_bitmap, num_bits, offset, true)) {
goto fail;
}
}
} else {
/* Obtain an appropriately sized chunk of virtual memory */
dest_addr = virt_region_alloc(aligned_size, align_boundary);
if (!dest_addr) {
goto fail;
}
}
/* If this fails there's something amiss with virt_region_get */
__ASSERT((uintptr_t)dest_addr <
((uintptr_t)dest_addr + (size - 1)),
"wraparound for virtual address %p (size %zu)",
dest_addr, size);
LOG_DBG("arch_mem_map(%p, 0x%lx, %zu, %x) offset %lu", dest_addr,
aligned_phys, aligned_size, flags, addr_offset);
arch_mem_map(dest_addr, aligned_phys, aligned_size, flags);
k_spin_unlock(&z_mm_lock, key);
*virt_ptr = dest_addr + addr_offset;
return;
fail:
/* May re-visit this in the future, but for now running out of
* virtual address space or failing the arch_mem_map() call is
* an unrecoverable situation.
*
* Other problems not related to resource exhaustion we leave as
* assertions since they are clearly programming mistakes.
*/
LOG_ERR("memory mapping 0x%lx (size %zu, flags 0x%x) failed",
phys, size, flags);
k_panic();
}
void k_mem_unmap_phys_bare(uint8_t *virt, size_t size)
{
uintptr_t aligned_virt, addr_offset;
size_t aligned_size;
k_spinlock_key_t key;
addr_offset = k_mem_region_align(&aligned_virt, &aligned_size,
POINTER_TO_UINT(virt), size,
CONFIG_MMU_PAGE_SIZE);
__ASSERT(aligned_size != 0U, "0-length mapping at 0x%lx", aligned_virt);
__ASSERT(aligned_virt < (aligned_virt + (aligned_size - 1)),
"wraparound for virtual address 0x%lx (size %zu)",
aligned_virt, aligned_size);
key = k_spin_lock(&z_mm_lock);
LOG_DBG("arch_mem_unmap(0x%lx, %zu) offset %lu",
aligned_virt, aligned_size, addr_offset);
arch_mem_unmap(UINT_TO_POINTER(aligned_virt), aligned_size);
virt_region_free(UINT_TO_POINTER(aligned_virt), aligned_size);
k_spin_unlock(&z_mm_lock, key);
}
/*
* Miscellaneous
*/
size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
uintptr_t addr, size_t size, size_t align)
{
size_t addr_offset;
/* The actual mapped region must be page-aligned. Round down the
* physical address and pad the region size appropriately
*/
*aligned_addr = ROUND_DOWN(addr, align);
addr_offset = addr - *aligned_addr;
*aligned_size = ROUND_UP(size + addr_offset, align);
return addr_offset;
}
#if defined(CONFIG_LINKER_USE_BOOT_SECTION) || defined(CONFIG_LINKER_USE_PINNED_SECTION)
static void mark_linker_section_pinned(void *start_addr, void *end_addr,
bool pin)
{
struct k_mem_page_frame *pf;
uint8_t *addr;
uintptr_t pinned_start = ROUND_DOWN(POINTER_TO_UINT(start_addr),
CONFIG_MMU_PAGE_SIZE);
uintptr_t pinned_end = ROUND_UP(POINTER_TO_UINT(end_addr),
CONFIG_MMU_PAGE_SIZE);
size_t pinned_size = pinned_end - pinned_start;
VIRT_FOREACH(UINT_TO_POINTER(pinned_start), pinned_size, addr)
{
pf = k_mem_phys_to_page_frame(K_MEM_BOOT_VIRT_TO_PHYS(addr));
frame_mapped_set(pf, addr);
if (pin) {
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_PINNED);
} else {
k_mem_page_frame_clear(pf, K_MEM_PAGE_FRAME_PINNED);
#ifdef CONFIG_DEMAND_PAGING
if (k_mem_page_frame_is_evictable(pf)) {
k_mem_paging_eviction_add(pf);
}
#endif
}
}
}
#endif /* CONFIG_LINKER_USE_BOOT_SECTION) || CONFIG_LINKER_USE_PINNED_SECTION */
void z_mem_manage_init(void)
{
uintptr_t phys;
uint8_t *addr;
struct k_mem_page_frame *pf;
k_spinlock_key_t key = k_spin_lock(&z_mm_lock);
free_page_frame_list_init();
ARG_UNUSED(addr);
#ifdef CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES
/* If some page frames are unavailable for use as memory, arch
* code will mark K_MEM_PAGE_FRAME_RESERVED in their flags
*/
arch_reserved_pages_update();
#endif /* CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES */
#ifdef CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
/* All pages composing the Zephyr image are mapped at boot in a
* predictable way. This can change at runtime.
*/
VIRT_FOREACH(K_MEM_KERNEL_VIRT_START, K_MEM_KERNEL_VIRT_SIZE, addr)
{
pf = k_mem_phys_to_page_frame(K_MEM_BOOT_VIRT_TO_PHYS(addr));
frame_mapped_set(pf, addr);
/* TODO: for now we pin the whole Zephyr image. Demand paging
* currently tested with anonymously-mapped pages which are not
* pinned.
*
* We will need to setup linker regions for a subset of kernel
* code/data pages which are pinned in memory and
* may not be evicted. This will contain critical CPU data
* structures, and any code used to perform page fault
* handling, page-ins, etc.
*/
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_PINNED);
}
#endif /* CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
#ifdef CONFIG_LINKER_USE_BOOT_SECTION
/* Pin the boot section to prevent it from being swapped out during
* boot process. Will be un-pinned once boot process completes.
*/
mark_linker_section_pinned(lnkr_boot_start, lnkr_boot_end, true);
#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
#ifdef CONFIG_LINKER_USE_PINNED_SECTION
/* Pin the page frames correspondng to the pinned symbols */
mark_linker_section_pinned(lnkr_pinned_start, lnkr_pinned_end, true);
#endif /* CONFIG_LINKER_USE_PINNED_SECTION */
/* Any remaining pages that aren't mapped, reserved, or pinned get
* added to the free pages list
*/
K_MEM_PAGE_FRAME_FOREACH(phys, pf) {
if (k_mem_page_frame_is_available(pf)) {
free_page_frame_list_put(pf);
}
}
LOG_DBG("free page frames: %zu", z_free_page_count);
#ifdef CONFIG_DEMAND_PAGING
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
z_paging_histogram_init();
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
k_mem_paging_backing_store_init();
k_mem_paging_eviction_init();
/* start tracking evictable page installed above if any */
K_MEM_PAGE_FRAME_FOREACH(phys, pf) {
if (k_mem_page_frame_is_evictable(pf)) {
k_mem_paging_eviction_add(pf);
}
}
#endif /* CONFIG_DEMAND_PAGING */
#if __ASSERT_ON
page_frames_initialized = true;
#endif
k_spin_unlock(&z_mm_lock, key);
#ifndef CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
/* If BSS section is not present in memory at boot,
* it would not have been cleared. This needs to be
* done now since paging mechanism has been initialized
* and the BSS pages can be brought into physical
* memory to be cleared.
*/
z_bss_zero();
#endif /* CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
}
void z_mem_manage_boot_finish(void)
{
#ifdef CONFIG_LINKER_USE_BOOT_SECTION
/* At the end of boot process, unpin the boot sections
* as they don't need to be in memory all the time anymore.
*/
mark_linker_section_pinned(lnkr_boot_start, lnkr_boot_end, false);
#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
}
#ifdef CONFIG_DEMAND_PAGING
#ifdef CONFIG_DEMAND_PAGING_STATS
struct k_mem_paging_stats_t paging_stats;
extern struct k_mem_paging_histogram_t z_paging_histogram_eviction;
extern struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_in;
extern struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_out;
#endif /* CONFIG_DEMAND_PAGING_STATS */
static inline void do_backing_store_page_in(uintptr_t location)
{
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
uint32_t time_diff;
#ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
timing_t time_start, time_end;
time_start = timing_counter_get();
#else
uint32_t time_start;
time_start = k_cycle_get_32();
#endif /* CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS */
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
k_mem_paging_backing_store_page_in(location);
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
#ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
time_end = timing_counter_get();
time_diff = (uint32_t)timing_cycles_get(&time_start, &time_end);
#else
time_diff = k_cycle_get_32() - time_start;
#endif /* CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS */
z_paging_histogram_inc(&z_paging_histogram_backing_store_page_in,
time_diff);
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
}
static inline void do_backing_store_page_out(uintptr_t location)
{
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
uint32_t time_diff;
#ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
timing_t time_start, time_end;
time_start = timing_counter_get();
#else
uint32_t time_start;
time_start = k_cycle_get_32();
#endif /* CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS */
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
k_mem_paging_backing_store_page_out(location);
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
#ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
time_end = timing_counter_get();
time_diff = (uint32_t)timing_cycles_get(&time_start, &time_end);
#else
time_diff = k_cycle_get_32() - time_start;
#endif /* CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS */
z_paging_histogram_inc(&z_paging_histogram_backing_store_page_out,
time_diff);
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
}
/* Current implementation relies on interrupt locking to any prevent page table
* access, which falls over if other CPUs are active. Addressing this is not
* as simple as using spinlocks as regular memory reads/writes constitute
* "access" in this sense.
*
* Current needs for demand paging are on uniprocessor systems.
*/
BUILD_ASSERT(!IS_ENABLED(CONFIG_SMP));
static void virt_region_foreach(void *addr, size_t size,
void (*func)(void *))
{
k_mem_assert_virtual_region(addr, size);
for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) {
func((uint8_t *)addr + offset);
}
}
/*
* Perform some preparatory steps before paging out. The provided page frame
* must be evicted to the backing store immediately after this is called
* with a call to k_mem_paging_backing_store_page_out() if it contains
* a data page.
*
* - Map page frame to scratch area if requested. This always is true if we're
* doing a page fault, but is only set on manual evictions if the page is
* dirty.
* - If mapped:
* - obtain backing store location and populate location parameter
* - Update page tables with location
* - Mark page frame as busy
*
* Returns -ENOMEM if the backing store is full
*/
static int page_frame_prepare_locked(struct k_mem_page_frame *pf, bool *dirty_ptr,
bool page_fault, uintptr_t *location_ptr)
{
uintptr_t phys;
int ret;
bool dirty = *dirty_ptr;
phys = k_mem_page_frame_to_phys(pf);
__ASSERT(!k_mem_page_frame_is_pinned(pf), "page frame 0x%lx is pinned",
phys);
/* If the backing store doesn't have a copy of the page, even if it
* wasn't modified, treat as dirty. This can happen for a few
* reasons:
* 1) Page has never been swapped out before, and the backing store
* wasn't pre-populated with this data page.
* 2) Page was swapped out before, but the page contents were not
* preserved after swapping back in.
* 3) Page contents were preserved when swapped back in, but were later
* evicted from the backing store to make room for other evicted
* pages.
*/
if (k_mem_page_frame_is_mapped(pf)) {
dirty = dirty || !k_mem_page_frame_is_backed(pf);
}
if (dirty || page_fault) {
arch_mem_scratch(phys);
}
if (k_mem_page_frame_is_mapped(pf)) {
ret = k_mem_paging_backing_store_location_get(pf, location_ptr,
page_fault);
if (ret != 0) {
LOG_ERR("out of backing store memory");
return -ENOMEM;
}
arch_mem_page_out(k_mem_page_frame_to_virt(pf), *location_ptr);
k_mem_paging_eviction_remove(pf);
} else {
/* Shouldn't happen unless this function is mis-used */
__ASSERT(!dirty, "un-mapped page determined to be dirty");
}
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
/* Mark as busy so that k_mem_page_frame_is_evictable() returns false */
__ASSERT(!k_mem_page_frame_is_busy(pf), "page frame 0x%lx is already busy",
phys);
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_BUSY);
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
/* Update dirty parameter, since we set to true if it wasn't backed
* even if otherwise clean
*/
*dirty_ptr = dirty;
return 0;
}
static int do_mem_evict(void *addr)
{
bool dirty;
struct k_mem_page_frame *pf;
uintptr_t location;
int key, ret;
uintptr_t flags, phys;
#if CONFIG_DEMAND_PAGING_ALLOW_IRQ
__ASSERT(!k_is_in_isr(),
"%s is unavailable in ISRs with CONFIG_DEMAND_PAGING_ALLOW_IRQ",
__func__);
k_sched_lock();
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
key = irq_lock();
flags = arch_page_info_get(addr, &phys, false);
__ASSERT((flags & ARCH_DATA_PAGE_NOT_MAPPED) == 0,
"address %p isn't mapped", addr);
if ((flags & ARCH_DATA_PAGE_LOADED) == 0) {
/* Un-mapped or already evicted. Nothing to do */
ret = 0;
goto out;
}
dirty = (flags & ARCH_DATA_PAGE_DIRTY) != 0;
pf = k_mem_phys_to_page_frame(phys);
__ASSERT(k_mem_page_frame_to_virt(pf) == addr, "page frame address mismatch");
ret = page_frame_prepare_locked(pf, &dirty, false, &location);
if (ret != 0) {
goto out;
}
__ASSERT(ret == 0, "failed to prepare page frame");
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
irq_unlock(key);
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
if (dirty) {
do_backing_store_page_out(location);
}
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
key = irq_lock();
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
page_frame_free_locked(pf);
out:
irq_unlock(key);
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
k_sched_unlock();
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
return ret;
}
int k_mem_page_out(void *addr, size_t size)
{
__ASSERT(page_frames_initialized, "%s called on %p too early", __func__,
addr);
k_mem_assert_virtual_region(addr, size);
for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) {
void *pos = (uint8_t *)addr + offset;
int ret;
ret = do_mem_evict(pos);
if (ret != 0) {
return ret;
}
}
return 0;
}
int k_mem_page_frame_evict(uintptr_t phys)
{
int key, ret;
struct k_mem_page_frame *pf;
bool dirty;
uintptr_t flags;
uintptr_t location;
__ASSERT(page_frames_initialized, "%s called on 0x%lx too early",
__func__, phys);
/* Implementation is similar to do_page_fault() except there is no
* data page to page-in, see comments in that function.
*/
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
__ASSERT(!k_is_in_isr(),
"%s is unavailable in ISRs with CONFIG_DEMAND_PAGING_ALLOW_IRQ",
__func__);
k_sched_lock();
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
key = irq_lock();
pf = k_mem_phys_to_page_frame(phys);
if (!k_mem_page_frame_is_mapped(pf)) {
/* Nothing to do, free page */
ret = 0;
goto out;
}
flags = arch_page_info_get(k_mem_page_frame_to_virt(pf), NULL, false);
/* Shouldn't ever happen */
__ASSERT((flags & ARCH_DATA_PAGE_LOADED) != 0, "data page not loaded");
dirty = (flags & ARCH_DATA_PAGE_DIRTY) != 0;
ret = page_frame_prepare_locked(pf, &dirty, false, &location);
if (ret != 0) {
goto out;
}
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
irq_unlock(key);
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
if (dirty) {
do_backing_store_page_out(location);
}
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
key = irq_lock();
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
page_frame_free_locked(pf);
out:
irq_unlock(key);
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
k_sched_unlock();
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
return ret;
}
static inline void paging_stats_faults_inc(struct k_thread *faulting_thread,
int key)
{
#ifdef CONFIG_DEMAND_PAGING_STATS
bool is_irq_unlocked = arch_irq_unlocked(key);
paging_stats.pagefaults.cnt++;
if (is_irq_unlocked) {
paging_stats.pagefaults.irq_unlocked++;
} else {
paging_stats.pagefaults.irq_locked++;
}
#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
faulting_thread->paging_stats.pagefaults.cnt++;
if (is_irq_unlocked) {
faulting_thread->paging_stats.pagefaults.irq_unlocked++;
} else {
faulting_thread->paging_stats.pagefaults.irq_locked++;
}
#else
ARG_UNUSED(faulting_thread);
#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
#ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ
if (k_is_in_isr()) {
paging_stats.pagefaults.in_isr++;
#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
faulting_thread->paging_stats.pagefaults.in_isr++;
#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
}
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
#endif /* CONFIG_DEMAND_PAGING_STATS */
}
static inline void paging_stats_eviction_inc(struct k_thread *faulting_thread,
bool dirty)
{
#ifdef CONFIG_DEMAND_PAGING_STATS
if (dirty) {
paging_stats.eviction.dirty++;
} else {
paging_stats.eviction.clean++;
}
#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
if (dirty) {
faulting_thread->paging_stats.eviction.dirty++;
} else {
faulting_thread->paging_stats.eviction.clean++;
}
#else
ARG_UNUSED(faulting_thread);
#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
#endif /* CONFIG_DEMAND_PAGING_STATS */
}
static inline struct k_mem_page_frame *do_eviction_select(bool *dirty)
{
struct k_mem_page_frame *pf;
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
uint32_t time_diff;
#ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
timing_t time_start, time_end;
time_start = timing_counter_get();
#else
uint32_t time_start;
time_start = k_cycle_get_32();
#endif /* CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS */
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
pf = k_mem_paging_eviction_select(dirty);
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
#ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
time_end = timing_counter_get();
time_diff = (uint32_t)timing_cycles_get(&time_start, &time_end);
#else
time_diff = k_cycle_get_32() - time_start;
#endif /* CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS */
z_paging_histogram_inc(&z_paging_histogram_eviction, time_diff);
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
return pf;
}
static bool do_page_fault(void *addr, bool pin)
{
struct k_mem_page_frame *pf;
int key, ret;
uintptr_t page_in_location, page_out_location;
enum arch_page_location status;
bool result;
bool dirty = false;
struct k_thread *faulting_thread = _current_cpu->current;
__ASSERT(page_frames_initialized, "page fault at %p happened too early",
addr);
LOG_DBG("page fault at %p", addr);
/*
* TODO: Add performance accounting:
* - k_mem_paging_eviction_select() metrics
* * periodic timer execution time histogram (if implemented)
*/
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
/* We lock the scheduler so that other threads are never scheduled
* during the page-in/out operation.
*
* We do however re-enable interrupts during the page-in/page-out
* operation if and only if interrupts were enabled when the exception
* was taken; in this configuration page faults in an ISR are a bug;
* all their code/data must be pinned.
*
* If interrupts were disabled when the exception was taken, the
* arch code is responsible for keeping them that way when entering
* this function.
*
* If this is not enabled, then interrupts are always locked for the
* entire operation. This is far worse for system interrupt latency
* but requires less pinned pages and ISRs may also take page faults.
*
* Support for allowing k_mem_paging_backing_store_page_out() and
* k_mem_paging_backing_store_page_in() to also sleep and allow
* other threads to run (such as in the case where the transfer is
* async DMA) is not implemented. Even if limited to thread context,
* arbitrary memory access triggering exceptions that put a thread to
* sleep on a contended page fault operation will break scheduling
* assumptions of cooperative threads or threads that implement
* crticial sections with spinlocks or disabling IRQs.
*/
k_sched_lock();
__ASSERT(!k_is_in_isr(), "ISR page faults are forbidden");
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
key = irq_lock();
status = arch_page_location_get(addr, &page_in_location);
if (status == ARCH_PAGE_LOCATION_BAD) {
/* Return false to treat as a fatal error */
result = false;
goto out;
}
result = true;
if (status == ARCH_PAGE_LOCATION_PAGED_IN) {
if (pin) {
/* It's a physical memory address */
uintptr_t phys = page_in_location;
pf = k_mem_phys_to_page_frame(phys);
if (!k_mem_page_frame_is_pinned(pf)) {
k_mem_paging_eviction_remove(pf);
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_PINNED);
}
}
/* This if-block is to pin the page if it is
* already present in physical memory. There is
* no need to go through the following code to
* pull in the data pages. So skip to the end.
*/
goto out;
}
__ASSERT(status == ARCH_PAGE_LOCATION_PAGED_OUT,
"unexpected status value %d", status);
paging_stats_faults_inc(faulting_thread, key);
pf = free_page_frame_list_get();
if (pf == NULL) {
/* Need to evict a page frame */
pf = do_eviction_select(&dirty);
__ASSERT(pf != NULL, "failed to get a page frame");
LOG_DBG("evicting %p at 0x%lx",
k_mem_page_frame_to_virt(pf),
k_mem_page_frame_to_phys(pf));
paging_stats_eviction_inc(faulting_thread, dirty);
}
ret = page_frame_prepare_locked(pf, &dirty, true, &page_out_location);
__ASSERT(ret == 0, "failed to prepare page frame");
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
irq_unlock(key);
/* Interrupts are now unlocked if they were not locked when we entered
* this function, and we may service ISRs. The scheduler is still
* locked.
*/
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
if (dirty) {
do_backing_store_page_out(page_out_location);
}
do_backing_store_page_in(page_in_location);
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
key = irq_lock();
k_mem_page_frame_clear(pf, K_MEM_PAGE_FRAME_BUSY);
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
k_mem_page_frame_clear(pf, K_MEM_PAGE_FRAME_MAPPED);
frame_mapped_set(pf, addr);
if (pin) {
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_PINNED);
}
arch_mem_page_in(addr, k_mem_page_frame_to_phys(pf));
k_mem_paging_backing_store_page_finalize(pf, page_in_location);
if (!pin) {
k_mem_paging_eviction_add(pf);
}
out:
irq_unlock(key);
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
k_sched_unlock();
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
return result;
}
static void do_page_in(void *addr)
{
bool ret;
ret = do_page_fault(addr, false);
__ASSERT(ret, "unmapped memory address %p", addr);
(void)ret;
}
void k_mem_page_in(void *addr, size_t size)
{
__ASSERT(!IS_ENABLED(CONFIG_DEMAND_PAGING_ALLOW_IRQ) || !k_is_in_isr(),
"%s may not be called in ISRs if CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled",
__func__);
virt_region_foreach(addr, size, do_page_in);
}
static void do_mem_pin(void *addr)
{
bool ret;
ret = do_page_fault(addr, true);
__ASSERT(ret, "unmapped memory address %p", addr);
(void)ret;
}
void k_mem_pin(void *addr, size_t size)
{
__ASSERT(!IS_ENABLED(CONFIG_DEMAND_PAGING_ALLOW_IRQ) || !k_is_in_isr(),
"%s may not be called in ISRs if CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled",
__func__);
virt_region_foreach(addr, size, do_mem_pin);
}
bool k_mem_page_fault(void *addr)
{
return do_page_fault(addr, false);
}
static void do_mem_unpin(void *addr)
{
struct k_mem_page_frame *pf;
unsigned int key;
uintptr_t flags, phys;
key = irq_lock();
flags = arch_page_info_get(addr, &phys, false);
__ASSERT((flags & ARCH_DATA_PAGE_NOT_MAPPED) == 0,
"invalid data page at %p", addr);
if ((flags & ARCH_DATA_PAGE_LOADED) != 0) {
pf = k_mem_phys_to_page_frame(phys);
if (k_mem_page_frame_is_pinned(pf)) {
k_mem_page_frame_clear(pf, K_MEM_PAGE_FRAME_PINNED);
k_mem_paging_eviction_add(pf);
}
}
irq_unlock(key);
}
void k_mem_unpin(void *addr, size_t size)
{
__ASSERT(page_frames_initialized, "%s called on %p too early", __func__,
addr);
virt_region_foreach(addr, size, do_mem_unpin);
}
#endif /* CONFIG_DEMAND_PAGING */
``` | /content/code_sandbox/kernel/mmu.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 13,199 |
```c
/*
*
*/
/**
* @file
*
* @brief Kernel asynchronous event polling interface.
*
* This polling mechanism allows waiting on multiple events concurrently,
* either events triggered directly, or from kernel objects or other kernel
* constructs.
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <kernel_internal.h>
#include <wait_q.h>
#include <ksched.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/sys/dlist.h>
#include <zephyr/sys/util.h>
#include <zephyr/sys/__assert.h>
#include <stdbool.h>
/* Single subsystem lock. Locking per-event would be better on highly
* contended SMP systems, but the original locking scheme here is
* subtle (it relies on releasing/reacquiring the lock in areas for
* latency control and it's sometimes hard to see exactly what data is
* "inside" a given critical section). Do the synchronization port
* later as an optimization.
*/
static struct k_spinlock lock;
enum POLL_MODE { MODE_NONE, MODE_POLL, MODE_TRIGGERED };
static int signal_poller(struct k_poll_event *event, uint32_t state);
static int signal_triggered_work(struct k_poll_event *event, uint32_t status);
void k_poll_event_init(struct k_poll_event *event, uint32_t type,
int mode, void *obj)
{
__ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
"only NOTIFY_ONLY mode is supported\n");
__ASSERT(type < (BIT(_POLL_NUM_TYPES)), "invalid type\n");
__ASSERT(obj != NULL, "must provide an object\n");
event->poller = NULL;
/* event->tag is left uninitialized: the user will set it if needed */
event->type = type;
event->state = K_POLL_STATE_NOT_READY;
event->mode = mode;
event->unused = 0U;
event->obj = obj;
SYS_PORT_TRACING_FUNC(k_poll_api, event_init, event);
}
/* must be called with interrupts locked */
static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state)
{
switch (event->type) {
case K_POLL_TYPE_SEM_AVAILABLE:
if (k_sem_count_get(event->sem) > 0U) {
*state = K_POLL_STATE_SEM_AVAILABLE;
return true;
}
break;
case K_POLL_TYPE_DATA_AVAILABLE:
if (!k_queue_is_empty(event->queue)) {
*state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
return true;
}
break;
case K_POLL_TYPE_SIGNAL:
if (event->signal->signaled != 0U) {
*state = K_POLL_STATE_SIGNALED;
return true;
}
break;
case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
if (event->msgq->used_msgs > 0) {
*state = K_POLL_STATE_MSGQ_DATA_AVAILABLE;
return true;
}
break;
#ifdef CONFIG_PIPES
case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
if (k_pipe_read_avail(event->pipe)) {
*state = K_POLL_STATE_PIPE_DATA_AVAILABLE;
return true;
}
#endif /* CONFIG_PIPES */
case K_POLL_TYPE_IGNORE:
break;
default:
__ASSERT(false, "invalid event type (0x%x)\n", event->type);
break;
}
return false;
}
static struct k_thread *poller_thread(struct z_poller *p)
{
return p ? CONTAINER_OF(p, struct k_thread, poller) : NULL;
}
static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
struct z_poller *poller)
{
struct k_poll_event *pending;
pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
if ((pending == NULL) ||
(z_sched_prio_cmp(poller_thread(pending->poller),
poller_thread(poller)) > 0)) {
sys_dlist_append(events, &event->_node);
return;
}
SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
if (z_sched_prio_cmp(poller_thread(poller),
poller_thread(pending->poller)) > 0) {
sys_dlist_insert(&pending->_node, &event->_node);
return;
}
}
sys_dlist_append(events, &event->_node);
}
/* must be called with interrupts locked */
static inline void register_event(struct k_poll_event *event,
struct z_poller *poller)
{
switch (event->type) {
case K_POLL_TYPE_SEM_AVAILABLE:
__ASSERT(event->sem != NULL, "invalid semaphore\n");
add_event(&event->sem->poll_events, event, poller);
break;
case K_POLL_TYPE_DATA_AVAILABLE:
__ASSERT(event->queue != NULL, "invalid queue\n");
add_event(&event->queue->poll_events, event, poller);
break;
case K_POLL_TYPE_SIGNAL:
__ASSERT(event->signal != NULL, "invalid poll signal\n");
add_event(&event->signal->poll_events, event, poller);
break;
case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
__ASSERT(event->msgq != NULL, "invalid message queue\n");
add_event(&event->msgq->poll_events, event, poller);
break;
#ifdef CONFIG_PIPES
case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
__ASSERT(event->pipe != NULL, "invalid pipe\n");
add_event(&event->pipe->poll_events, event, poller);
break;
#endif /* CONFIG_PIPES */
case K_POLL_TYPE_IGNORE:
/* nothing to do */
break;
default:
__ASSERT(false, "invalid event type\n");
break;
}
event->poller = poller;
}
/* must be called with interrupts locked */
static inline void clear_event_registration(struct k_poll_event *event)
{
bool remove_event = false;
event->poller = NULL;
switch (event->type) {
case K_POLL_TYPE_SEM_AVAILABLE:
__ASSERT(event->sem != NULL, "invalid semaphore\n");
remove_event = true;
break;
case K_POLL_TYPE_DATA_AVAILABLE:
__ASSERT(event->queue != NULL, "invalid queue\n");
remove_event = true;
break;
case K_POLL_TYPE_SIGNAL:
__ASSERT(event->signal != NULL, "invalid poll signal\n");
remove_event = true;
break;
case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
__ASSERT(event->msgq != NULL, "invalid message queue\n");
remove_event = true;
break;
#ifdef CONFIG_PIPES
case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
__ASSERT(event->pipe != NULL, "invalid pipe\n");
remove_event = true;
break;
#endif /* CONFIG_PIPES */
case K_POLL_TYPE_IGNORE:
/* nothing to do */
break;
default:
__ASSERT(false, "invalid event type\n");
break;
}
if (remove_event && sys_dnode_is_linked(&event->_node)) {
sys_dlist_remove(&event->_node);
}
}
/* must be called with interrupts locked */
static inline void clear_event_registrations(struct k_poll_event *events,
int num_events,
k_spinlock_key_t key)
{
while (num_events--) {
clear_event_registration(&events[num_events]);
k_spin_unlock(&lock, key);
key = k_spin_lock(&lock);
}
}
static inline void set_event_ready(struct k_poll_event *event, uint32_t state)
{
event->poller = NULL;
event->state |= state;
}
static inline int register_events(struct k_poll_event *events,
int num_events,
struct z_poller *poller,
bool just_check)
{
int events_registered = 0;
for (int ii = 0; ii < num_events; ii++) {
k_spinlock_key_t key;
uint32_t state;
key = k_spin_lock(&lock);
if (is_condition_met(&events[ii], &state)) {
set_event_ready(&events[ii], state);
poller->is_polling = false;
} else if (!just_check && poller->is_polling) {
register_event(&events[ii], poller);
events_registered += 1;
} else {
/* Event is not one of those identified in is_condition_met()
* catching non-polling events, or is marked for just check,
* or not marked for polling. No action needed.
*/
;
}
k_spin_unlock(&lock, key);
}
return events_registered;
}
static int signal_poller(struct k_poll_event *event, uint32_t state)
{
struct k_thread *thread = poller_thread(event->poller);
__ASSERT(thread != NULL, "poller should have a thread\n");
if (!z_is_thread_pending(thread)) {
return 0;
}
z_unpend_thread(thread);
arch_thread_return_value_set(thread,
state == K_POLL_STATE_CANCELLED ? -EINTR : 0);
if (!z_is_thread_ready(thread)) {
return 0;
}
z_ready_thread(thread);
return 0;
}
int z_impl_k_poll(struct k_poll_event *events, int num_events,
k_timeout_t timeout)
{
int events_registered;
k_spinlock_key_t key;
struct z_poller *poller = &_current->poller;
poller->is_polling = true;
poller->mode = MODE_POLL;
__ASSERT(!arch_is_in_isr(), "");
__ASSERT(events != NULL, "NULL events\n");
__ASSERT(num_events >= 0, "<0 events\n");
SYS_PORT_TRACING_FUNC_ENTER(k_poll_api, poll, events);
events_registered = register_events(events, num_events, poller,
K_TIMEOUT_EQ(timeout, K_NO_WAIT));
key = k_spin_lock(&lock);
/*
* If we're not polling anymore, it means that at least one event
* condition is met, either when looping through the events here or
* because one of the events registered has had its state changed.
*/
if (!poller->is_polling) {
clear_event_registrations(events, events_registered, key);
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, 0);
return 0;
}
poller->is_polling = false;
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, -EAGAIN);
return -EAGAIN;
}
static _wait_q_t wait_q = Z_WAIT_Q_INIT(&wait_q);
int swap_rc = z_pend_curr(&lock, key, &wait_q, timeout);
/*
* Clear all event registrations. If events happen while we're in this
* loop, and we already had one that triggered, that's OK: they will
* end up in the list of events that are ready; if we timed out, and
* events happen while we're in this loop, that is OK as well since
* we've already know the return code (-EAGAIN), and even if they are
* added to the list of events that occurred, the user has to check the
* return code first, which invalidates the whole list of event states.
*/
key = k_spin_lock(&lock);
clear_event_registrations(events, events_registered, key);
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, swap_rc);
return swap_rc;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_poll(struct k_poll_event *events,
int num_events, k_timeout_t timeout)
{
int ret;
k_spinlock_key_t key;
struct k_poll_event *events_copy = NULL;
uint32_t bounds;
/* Validate the events buffer and make a copy of it in an
* allocated kernel-side buffer.
*/
if (K_SYSCALL_VERIFY(num_events >= 0)) {
ret = -EINVAL;
goto out;
}
if (K_SYSCALL_VERIFY_MSG(!u32_mul_overflow(num_events,
sizeof(struct k_poll_event),
&bounds),
"num_events too large")) {
ret = -EINVAL;
goto out;
}
events_copy = z_thread_malloc(bounds);
if (!events_copy) {
ret = -ENOMEM;
goto out;
}
key = k_spin_lock(&lock);
if (K_SYSCALL_MEMORY_WRITE(events, bounds)) {
k_spin_unlock(&lock, key);
goto oops_free;
}
(void)memcpy(events_copy, events, bounds);
k_spin_unlock(&lock, key);
/* Validate what's inside events_copy */
for (int i = 0; i < num_events; i++) {
struct k_poll_event *e = &events_copy[i];
if (K_SYSCALL_VERIFY(e->mode == K_POLL_MODE_NOTIFY_ONLY)) {
ret = -EINVAL;
goto out_free;
}
switch (e->type) {
case K_POLL_TYPE_IGNORE:
break;
case K_POLL_TYPE_SIGNAL:
K_OOPS(K_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL));
break;
case K_POLL_TYPE_SEM_AVAILABLE:
K_OOPS(K_SYSCALL_OBJ(e->sem, K_OBJ_SEM));
break;
case K_POLL_TYPE_DATA_AVAILABLE:
K_OOPS(K_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE));
break;
case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
K_OOPS(K_SYSCALL_OBJ(e->msgq, K_OBJ_MSGQ));
break;
#ifdef CONFIG_PIPES
case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
K_OOPS(K_SYSCALL_OBJ(e->pipe, K_OBJ_PIPE));
break;
#endif /* CONFIG_PIPES */
default:
ret = -EINVAL;
goto out_free;
}
}
ret = k_poll(events_copy, num_events, timeout);
(void)memcpy((void *)events, events_copy, bounds);
out_free:
k_free(events_copy);
out:
return ret;
oops_free:
k_free(events_copy);
K_OOPS(1);
}
#include <zephyr/syscalls/k_poll_mrsh.c>
#endif /* CONFIG_USERSPACE */
/* must be called with interrupts locked */
static int signal_poll_event(struct k_poll_event *event, uint32_t state)
{
struct z_poller *poller = event->poller;
int retcode = 0;
if (poller != NULL) {
if (poller->mode == MODE_POLL) {
retcode = signal_poller(event, state);
} else if (poller->mode == MODE_TRIGGERED) {
retcode = signal_triggered_work(event, state);
} else {
/* Poller is not poll or triggered mode. No action needed.*/
;
}
poller->is_polling = false;
if (retcode < 0) {
return retcode;
}
}
set_event_ready(event, state);
return retcode;
}
void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state)
{
struct k_poll_event *poll_event;
k_spinlock_key_t key = k_spin_lock(&lock);
poll_event = (struct k_poll_event *)sys_dlist_get(events);
if (poll_event != NULL) {
(void) signal_poll_event(poll_event, state);
}
k_spin_unlock(&lock, key);
}
void z_impl_k_poll_signal_init(struct k_poll_signal *sig)
{
sys_dlist_init(&sig->poll_events);
sig->signaled = 0U;
/* signal->result is left uninitialized */
k_object_init(sig);
SYS_PORT_TRACING_FUNC(k_poll_api, signal_init, sig);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_poll_signal_init(struct k_poll_signal *sig)
{
K_OOPS(K_SYSCALL_OBJ_INIT(sig, K_OBJ_POLL_SIGNAL));
z_impl_k_poll_signal_init(sig);
}
#include <zephyr/syscalls/k_poll_signal_init_mrsh.c>
#endif /* CONFIG_USERSPACE */
void z_impl_k_poll_signal_reset(struct k_poll_signal *sig)
{
sig->signaled = 0U;
SYS_PORT_TRACING_FUNC(k_poll_api, signal_reset, sig);
}
void z_impl_k_poll_signal_check(struct k_poll_signal *sig,
unsigned int *signaled, int *result)
{
*signaled = sig->signaled;
*result = sig->result;
SYS_PORT_TRACING_FUNC(k_poll_api, signal_check, sig);
}
#ifdef CONFIG_USERSPACE
void z_vrfy_k_poll_signal_check(struct k_poll_signal *sig,
unsigned int *signaled, int *result)
{
K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
K_OOPS(K_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int)));
K_OOPS(K_SYSCALL_MEMORY_WRITE(result, sizeof(int)));
z_impl_k_poll_signal_check(sig, signaled, result);
}
#include <zephyr/syscalls/k_poll_signal_check_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_poll_signal_raise(struct k_poll_signal *sig, int result)
{
k_spinlock_key_t key = k_spin_lock(&lock);
struct k_poll_event *poll_event;
sig->result = result;
sig->signaled = 1U;
poll_event = (struct k_poll_event *)sys_dlist_get(&sig->poll_events);
if (poll_event == NULL) {
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, 0);
return 0;
}
int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, rc);
z_reschedule(&lock, key);
return rc;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_poll_signal_raise(struct k_poll_signal *sig,
int result)
{
K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
return z_impl_k_poll_signal_raise(sig, result);
}
#include <zephyr/syscalls/k_poll_signal_raise_mrsh.c>
static inline void z_vrfy_k_poll_signal_reset(struct k_poll_signal *sig)
{
K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
z_impl_k_poll_signal_reset(sig);
}
#include <zephyr/syscalls/k_poll_signal_reset_mrsh.c>
#endif /* CONFIG_USERSPACE */
static void triggered_work_handler(struct k_work *work)
{
struct k_work_poll *twork =
CONTAINER_OF(work, struct k_work_poll, work);
/*
* If callback is not set, the k_work_poll_submit_to_queue()
* already cleared event registrations.
*/
if (twork->poller.mode != MODE_NONE) {
k_spinlock_key_t key;
key = k_spin_lock(&lock);
clear_event_registrations(twork->events,
twork->num_events, key);
k_spin_unlock(&lock, key);
}
/* Drop work ownership and execute real handler. */
twork->workq = NULL;
twork->real_handler(work);
}
static void triggered_work_expiration_handler(struct _timeout *timeout)
{
struct k_work_poll *twork =
CONTAINER_OF(timeout, struct k_work_poll, timeout);
twork->poller.is_polling = false;
twork->poll_result = -EAGAIN;
k_work_submit_to_queue(twork->workq, &twork->work);
}
extern int z_work_submit_to_queue(struct k_work_q *queue,
struct k_work *work);
static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
{
struct z_poller *poller = event->poller;
struct k_work_poll *twork =
CONTAINER_OF(poller, struct k_work_poll, poller);
if (poller->is_polling && twork->workq != NULL) {
struct k_work_q *work_q = twork->workq;
z_abort_timeout(&twork->timeout);
twork->poll_result = 0;
z_work_submit_to_queue(work_q, &twork->work);
}
return 0;
}
static int triggered_work_cancel(struct k_work_poll *work,
k_spinlock_key_t key)
{
/* Check if the work waits for event. */
if (work->poller.is_polling && work->poller.mode != MODE_NONE) {
/* Remove timeout associated with the work. */
z_abort_timeout(&work->timeout);
/*
* Prevent work execution if event arrives while we will be
* clearing registrations.
*/
work->poller.mode = MODE_NONE;
/* Clear registrations and work ownership. */
clear_event_registrations(work->events, work->num_events, key);
work->workq = NULL;
return 0;
}
/*
* If we reached here, the work is either being registered in
* the k_work_poll_submit_to_queue(), executed or is pending.
* Only in the last case we have a chance to cancel it, but
* unfortunately there is no public API performing this task.
*/
return -EINVAL;
}
void k_work_poll_init(struct k_work_poll *work,
k_work_handler_t handler)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_poll, init, work);
*work = (struct k_work_poll) {};
k_work_init(&work->work, triggered_work_handler);
work->real_handler = handler;
z_init_timeout(&work->timeout);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_poll, init, work);
}
int k_work_poll_submit_to_queue(struct k_work_q *work_q,
struct k_work_poll *work,
struct k_poll_event *events,
int num_events,
k_timeout_t timeout)
{
int events_registered;
k_spinlock_key_t key;
__ASSERT(work_q != NULL, "NULL work_q\n");
__ASSERT(work != NULL, "NULL work\n");
__ASSERT(events != NULL, "NULL events\n");
__ASSERT(num_events > 0, "zero events\n");
SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit_to_queue, work_q, work, timeout);
/* Take ownership of the work if it is possible. */
key = k_spin_lock(&lock);
if (work->workq != NULL) {
if (work->workq == work_q) {
int retval;
retval = triggered_work_cancel(work, key);
if (retval < 0) {
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
work, timeout, retval);
return retval;
}
} else {
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
work, timeout, -EADDRINUSE);
return -EADDRINUSE;
}
}
work->poller.is_polling = true;
work->workq = work_q;
work->poller.mode = MODE_NONE;
k_spin_unlock(&lock, key);
/* Save list of events. */
work->events = events;
work->num_events = num_events;
/* Clear result */
work->poll_result = -EINPROGRESS;
/* Register events */
events_registered = register_events(events, num_events,
&work->poller, false);
key = k_spin_lock(&lock);
if (work->poller.is_polling && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
/*
* Poller is still polling.
* No event is ready and all are watched.
*/
__ASSERT(num_events == events_registered,
"Some events were not registered!\n");
/* Setup timeout if such action is requested */
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
z_add_timeout(&work->timeout,
triggered_work_expiration_handler,
timeout);
}
/* From now, any event will result in submitted work. */
work->poller.mode = MODE_TRIGGERED;
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
return 0;
}
/*
* The K_NO_WAIT timeout was specified or at least one event
* was ready at registration time or changed state since
* registration. Hopefully, the poller mode was not set, so
* work was not submitted to workqueue.
*/
/*
* If poller is still polling, no watched event occurred. This means
* we reached here due to K_NO_WAIT timeout "expiration".
*/
if (work->poller.is_polling) {
work->poller.is_polling = false;
work->poll_result = -EAGAIN;
} else {
work->poll_result = 0;
}
/* Clear registrations. */
clear_event_registrations(events, events_registered, key);
k_spin_unlock(&lock, key);
/* Submit work. */
k_work_submit_to_queue(work_q, &work->work);
SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
return 0;
}
int k_work_poll_submit(struct k_work_poll *work,
struct k_poll_event *events,
int num_events,
k_timeout_t timeout)
{
SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit, work, timeout);
int ret = k_work_poll_submit_to_queue(&k_sys_work_q, work,
events, num_events, timeout);
SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit, work, timeout, ret);
return ret;
}
int k_work_poll_cancel(struct k_work_poll *work)
{
k_spinlock_key_t key;
int retval;
SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, cancel, work);
/* Check if the work was submitted. */
if (work == NULL || work->workq == NULL) {
SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, -EINVAL);
return -EINVAL;
}
key = k_spin_lock(&lock);
retval = triggered_work_cancel(work, key);
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, retval);
return retval;
}
``` | /content/code_sandbox/kernel/poll.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,678 |
```c
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/kernel/smp.h>
#include <zephyr/spinlock.h>
#include <kswap.h>
#include <kernel_internal.h>
static atomic_t global_lock;
/**
* Flag to tell recently powered up CPU to start
* initialization routine.
*
* 0 to tell powered up CPU to wait.
* 1 to tell powered up CPU to continue initialization.
*/
static atomic_t cpu_start_flag;
/**
* Flag to tell caller that the target CPU is now
* powered up and ready to be initialized.
*
* 0 if target CPU is not yet ready.
* 1 if target CPU has powered up and ready to be initialized.
*/
static atomic_t ready_flag;
/**
* Struct holding the function to be called before handing off
* to schedule and its argument.
*/
static struct cpu_start_cb {
/**
* Function to be called before handing off to scheduler.
* Can be NULL.
*/
smp_init_fn fn;
/** Argument to @ref cpu_start_fn.fn. */
void *arg;
/** Invoke scheduler after CPU has started if true. */
bool invoke_sched;
#ifdef CONFIG_SYS_CLOCK_EXISTS
/** True if smp_timer_init() needs to be called. */
bool reinit_timer;
#endif /* CONFIG_SYS_CLOCK_EXISTS */
} cpu_start_fn;
static struct k_spinlock cpu_start_lock;
unsigned int z_smp_global_lock(void)
{
unsigned int key = arch_irq_lock();
if (!_current->base.global_lock_count) {
while (!atomic_cas(&global_lock, 0, 1)) {
arch_spin_relax();
}
}
_current->base.global_lock_count++;
return key;
}
void z_smp_global_unlock(unsigned int key)
{
if (_current->base.global_lock_count != 0U) {
_current->base.global_lock_count--;
if (!_current->base.global_lock_count) {
(void)atomic_clear(&global_lock);
}
}
arch_irq_unlock(key);
}
/* Called from within z_swap(), so assumes lock already held */
void z_smp_release_global_lock(struct k_thread *thread)
{
if (!thread->base.global_lock_count) {
(void)atomic_clear(&global_lock);
}
}
/* Tiny delay that relaxes bus traffic to avoid spamming a shared
* memory bus looking at an atomic variable
*/
static inline void local_delay(void)
{
for (volatile int i = 0; i < 1000; i++) {
}
}
static void wait_for_start_signal(atomic_t *start_flag)
{
/* Wait for the signal to begin scheduling */
while (!atomic_get(start_flag)) {
local_delay();
}
}
static inline void smp_init_top(void *arg)
{
struct cpu_start_cb csc = arg ? *(struct cpu_start_cb *)arg : (struct cpu_start_cb){0};
/* Let start_cpu() know that this CPU has powered up. */
(void)atomic_set(&ready_flag, 1);
/* Wait for the CPU start caller to signal that
* we can start initialization.
*/
wait_for_start_signal(&cpu_start_flag);
if ((arg == NULL) || csc.invoke_sched) {
/* Initialize the dummy thread struct so that
* the scheduler can schedule actual threads to run.
*/
z_dummy_thread_init(&_thread_dummy);
}
#ifdef CONFIG_SYS_CLOCK_EXISTS
if ((arg == NULL) || csc.reinit_timer) {
smp_timer_init();
}
#endif /* CONFIG_SYS_CLOCK_EXISTS */
/* Do additional initialization steps if needed. */
if (csc.fn != NULL) {
csc.fn(csc.arg);
}
if ((arg != NULL) && !csc.invoke_sched) {
/* Don't invoke scheduler. */
return;
}
/* Let scheduler decide what thread to run next. */
z_swap_unlocked();
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
}
static void start_cpu(int id, struct cpu_start_cb *csc)
{
/* Clear the ready flag so the newly powered up CPU can
* signal that it has powered up.
*/
(void)atomic_clear(&ready_flag);
/* Power up the CPU */
arch_cpu_start(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE,
smp_init_top, csc);
/* Wait until the newly powered up CPU to signal that
* it has powered up.
*/
while (!atomic_get(&ready_flag)) {
local_delay();
}
}
void k_smp_cpu_start(int id, smp_init_fn fn, void *arg)
{
k_spinlock_key_t key = k_spin_lock(&cpu_start_lock);
cpu_start_fn.fn = fn;
cpu_start_fn.arg = arg;
cpu_start_fn.invoke_sched = true;
#ifdef CONFIG_SYS_CLOCK_EXISTS
cpu_start_fn.reinit_timer = true;
#endif /* CONFIG_SYS_CLOCK_EXISTS */
/* We are only starting one CPU so we do not need to synchronize
* across all CPUs using the start_flag. So just set it to 1.
*/
(void)atomic_set(&cpu_start_flag, 1); /* async, don't care */
/* Initialize various CPU structs related to this CPU. */
z_init_cpu(id);
/* Start the CPU! */
start_cpu(id, &cpu_start_fn);
k_spin_unlock(&cpu_start_lock, key);
}
void k_smp_cpu_resume(int id, smp_init_fn fn, void *arg,
bool reinit_timer, bool invoke_sched)
{
k_spinlock_key_t key = k_spin_lock(&cpu_start_lock);
cpu_start_fn.fn = fn;
cpu_start_fn.arg = arg;
cpu_start_fn.invoke_sched = invoke_sched;
#ifdef CONFIG_SYS_CLOCK_EXISTS
cpu_start_fn.reinit_timer = reinit_timer;
#else
ARG_UNUSED(reinit_timer);
#endif /* CONFIG_SYS_CLOCK_EXISTS */
/* We are only starting one CPU so we do not need to synchronize
* across all CPUs using the start_flag. So just set it to 1.
*/
(void)atomic_set(&cpu_start_flag, 1);
/* Start the CPU! */
start_cpu(id, &cpu_start_fn);
k_spin_unlock(&cpu_start_lock, key);
}
void z_smp_init(void)
{
/* We are powering up all CPUs and we want to synchronize their
* entry into scheduler. So set the start flag to 0 here.
*/
(void)atomic_clear(&cpu_start_flag);
/* Just start CPUs one by one. */
unsigned int num_cpus = arch_num_cpus();
for (int i = 1; i < num_cpus; i++) {
z_init_cpu(i);
start_cpu(i, NULL);
}
/* Let loose those CPUs so they can start scheduling
* threads to run.
*/
(void)atomic_set(&cpu_start_flag, 1);
}
bool z_smp_cpu_mobile(void)
{
unsigned int k = arch_irq_lock();
bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k);
arch_irq_unlock(k);
return !pinned;
}
``` | /content/code_sandbox/kernel/smp.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,499 |
```c
/*
*
*/
/**
* @file
*
* @brief dynamic-size QUEUE object.
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
#include <wait_q.h>
#include <ksched.h>
#include <zephyr/init.h>
#include <zephyr/internal/syscall_handler.h>
#include <kernel_internal.h>
#include <zephyr/sys/check.h>
struct alloc_node {
sys_sfnode_t node;
void *data;
};
void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free)
{
void *ret;
if ((node != NULL) && (sys_sfnode_flags_get(node) != (uint8_t)0)) {
/* If the flag is set, then the enqueue operation for this item
* did a behind-the scenes memory allocation of an alloc_node
* struct, which is what got put in the queue. Free it and pass
* back the data pointer.
*/
struct alloc_node *anode;
anode = CONTAINER_OF(node, struct alloc_node, node);
ret = anode->data;
if (needs_free) {
k_free(anode);
}
} else {
/* Data was directly placed in the queue, the first word
* reserved for the linked list. User mode isn't allowed to
* do this, although it can get data sent this way.
*/
ret = (void *)node;
}
return ret;
}
void z_impl_k_queue_init(struct k_queue *queue)
{
sys_sflist_init(&queue->data_q);
queue->lock = (struct k_spinlock) {};
z_waitq_init(&queue->wait_q);
#if defined(CONFIG_POLL)
sys_dlist_init(&queue->poll_events);
#endif
SYS_PORT_TRACING_OBJ_INIT(k_queue, queue);
k_object_init(queue);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_queue_init(struct k_queue *queue)
{
K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(queue, K_OBJ_QUEUE));
z_impl_k_queue_init(queue);
}
#include <zephyr/syscalls/k_queue_init_mrsh.c>
#endif /* CONFIG_USERSPACE */
static void prepare_thread_to_run(struct k_thread *thread, void *data)
{
z_thread_return_value_set_with_data(thread, 0, data);
z_ready_thread(thread);
}
static inline void handle_poll_events(struct k_queue *queue, uint32_t state)
{
#ifdef CONFIG_POLL
z_handle_obj_poll_events(&queue->poll_events, state);
#else
ARG_UNUSED(queue);
ARG_UNUSED(state);
#endif /* CONFIG_POLL */
}
void z_impl_k_queue_cancel_wait(struct k_queue *queue)
{
SYS_PORT_TRACING_OBJ_FUNC(k_queue, cancel_wait, queue);
k_spinlock_key_t key = k_spin_lock(&queue->lock);
struct k_thread *first_pending_thread;
first_pending_thread = z_unpend_first_thread(&queue->wait_q);
if (first_pending_thread != NULL) {
prepare_thread_to_run(first_pending_thread, NULL);
}
handle_poll_events(queue, K_POLL_STATE_CANCELLED);
z_reschedule(&queue->lock, key);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_queue_cancel_wait(struct k_queue *queue)
{
K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
z_impl_k_queue_cancel_wait(queue);
}
#include <zephyr/syscalls/k_queue_cancel_wait_mrsh.c>
#endif /* CONFIG_USERSPACE */
static int32_t queue_insert(struct k_queue *queue, void *prev, void *data,
bool alloc, bool is_append)
{
struct k_thread *first_pending_thread;
k_spinlock_key_t key = k_spin_lock(&queue->lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, queue_insert, queue, alloc);
if (is_append) {
prev = sys_sflist_peek_tail(&queue->data_q);
}
first_pending_thread = z_unpend_first_thread(&queue->wait_q);
if (first_pending_thread != NULL) {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER);
prepare_thread_to_run(first_pending_thread, data);
z_reschedule(&queue->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc, 0);
return 0;
}
/* Only need to actually allocate if no threads are pending */
if (alloc) {
struct alloc_node *anode;
anode = z_thread_malloc(sizeof(*anode));
if (anode == NULL) {
k_spin_unlock(&queue->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc,
-ENOMEM);
return -ENOMEM;
}
anode->data = data;
sys_sfnode_init(&anode->node, 0x1);
data = anode;
} else {
sys_sfnode_init(data, 0x0);
}
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER);
sys_sflist_insert(&queue->data_q, prev, data);
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
z_reschedule(&queue->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc, 0);
return 0;
}
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, insert, queue);
(void)queue_insert(queue, prev, data, false, false);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, insert, queue);
}
void k_queue_append(struct k_queue *queue, void *data)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append, queue);
(void)queue_insert(queue, NULL, data, false, true);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append, queue);
}
void k_queue_prepend(struct k_queue *queue, void *data)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, prepend, queue);
(void)queue_insert(queue, NULL, data, false, false);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, prepend, queue);
}
int32_t z_impl_k_queue_alloc_append(struct k_queue *queue, void *data)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, alloc_append, queue);
int32_t ret = queue_insert(queue, NULL, data, true, true);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, alloc_append, queue, ret);
return ret;
}
#ifdef CONFIG_USERSPACE
static inline int32_t z_vrfy_k_queue_alloc_append(struct k_queue *queue,
void *data)
{
K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
return z_impl_k_queue_alloc_append(queue, data);
}
#include <zephyr/syscalls/k_queue_alloc_append_mrsh.c>
#endif /* CONFIG_USERSPACE */
int32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, alloc_prepend, queue);
int32_t ret = queue_insert(queue, NULL, data, true, false);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, alloc_prepend, queue, ret);
return ret;
}
#ifdef CONFIG_USERSPACE
static inline int32_t z_vrfy_k_queue_alloc_prepend(struct k_queue *queue,
void *data)
{
K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
return z_impl_k_queue_alloc_prepend(queue, data);
}
#include <zephyr/syscalls/k_queue_alloc_prepend_mrsh.c>
#endif /* CONFIG_USERSPACE */
int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append_list, queue);
/* invalid head or tail of list */
CHECKIF((head == NULL) || (tail == NULL)) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, -EINVAL);
return -EINVAL;
}
k_spinlock_key_t key = k_spin_lock(&queue->lock);
struct k_thread *thread = NULL;
if (head != NULL) {
thread = z_unpend_first_thread(&queue->wait_q);
}
while ((head != NULL) && (thread != NULL)) {
prepare_thread_to_run(thread, head);
head = *(void **)head;
thread = z_unpend_first_thread(&queue->wait_q);
}
if (head != NULL) {
sys_sflist_append_list(&queue->data_q, head, tail);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, 0);
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
z_reschedule(&queue->lock, key);
return 0;
}
int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
{
int ret;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, merge_slist, queue);
/* list must not be empty */
CHECKIF(sys_slist_is_empty(list)) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, -EINVAL);
return -EINVAL;
}
/*
* note: this works as long as:
* - the slist implementation keeps the next pointer as the first
* field of the node object type
* - list->tail->next = NULL.
* - sflist implementation only differs from slist by stuffing
* flag bytes in the lower order bits of the data pointer
* - source list is really an slist and not an sflist with flags set
*/
ret = k_queue_append_list(queue, list->head, list->tail);
CHECKIF(ret != 0) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, ret);
return ret;
}
sys_slist_init(list);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, 0);
return 0;
}
void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
{
k_spinlock_key_t key = k_spin_lock(&queue->lock);
void *data;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, get, queue, timeout);
if (likely(!sys_sflist_is_empty(&queue->data_q))) {
sys_sfnode_t *node;
node = sys_sflist_get_not_empty(&queue->data_q);
data = z_queue_node_peek(node, true);
k_spin_unlock(&queue->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, data);
return data;
}
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, get, queue, timeout);
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&queue->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, NULL);
return NULL;
}
int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout,
(ret != 0) ? NULL : _current->base.swap_data);
return (ret != 0) ? NULL : _current->base.swap_data;
}
bool k_queue_remove(struct k_queue *queue, void *data)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, remove, queue);
bool ret = sys_sflist_find_and_remove(&queue->data_q, (sys_sfnode_t *)data);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, remove, queue, ret);
return ret;
}
bool k_queue_unique_append(struct k_queue *queue, void *data)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, unique_append, queue);
sys_sfnode_t *test;
SYS_SFLIST_FOR_EACH_NODE(&queue->data_q, test) {
if (test == (sys_sfnode_t *) data) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, unique_append, queue, false);
return false;
}
}
k_queue_append(queue, data);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, unique_append, queue, true);
return true;
}
void *z_impl_k_queue_peek_head(struct k_queue *queue)
{
void *ret = z_queue_node_peek(sys_sflist_peek_head(&queue->data_q), false);
SYS_PORT_TRACING_OBJ_FUNC(k_queue, peek_head, queue, ret);
return ret;
}
void *z_impl_k_queue_peek_tail(struct k_queue *queue)
{
void *ret = z_queue_node_peek(sys_sflist_peek_tail(&queue->data_q), false);
SYS_PORT_TRACING_OBJ_FUNC(k_queue, peek_tail, queue, ret);
return ret;
}
#ifdef CONFIG_USERSPACE
static inline void *z_vrfy_k_queue_get(struct k_queue *queue,
k_timeout_t timeout)
{
K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
return z_impl_k_queue_get(queue, timeout);
}
#include <zephyr/syscalls/k_queue_get_mrsh.c>
static inline int z_vrfy_k_queue_is_empty(struct k_queue *queue)
{
K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
return z_impl_k_queue_is_empty(queue);
}
#include <zephyr/syscalls/k_queue_is_empty_mrsh.c>
static inline void *z_vrfy_k_queue_peek_head(struct k_queue *queue)
{
K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
return z_impl_k_queue_peek_head(queue);
}
#include <zephyr/syscalls/k_queue_peek_head_mrsh.c>
static inline void *z_vrfy_k_queue_peek_tail(struct k_queue *queue)
{
K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
return z_impl_k_queue_peek_tail(queue);
}
#include <zephyr/syscalls/k_queue_peek_tail_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_FIFO
struct k_obj_type _obj_type_fifo;
static int init_fifo_obj_core_list(void)
{
/* Initialize fifo object type */
z_obj_type_init(&_obj_type_fifo, K_OBJ_TYPE_FIFO_ID,
offsetof(struct k_fifo, obj_core));
/* Initialize and link statically defined fifos */
STRUCT_SECTION_FOREACH(k_fifo, fifo) {
k_obj_core_init_and_link(K_OBJ_CORE(fifo), &_obj_type_fifo);
}
return 0;
}
SYS_INIT(init_fifo_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_FIFO */
#ifdef CONFIG_OBJ_CORE_LIFO
struct k_obj_type _obj_type_lifo;
static int init_lifo_obj_core_list(void)
{
/* Initialize lifo object type */
z_obj_type_init(&_obj_type_lifo, K_OBJ_TYPE_LIFO_ID,
offsetof(struct k_lifo, obj_core));
/* Initialize and link statically defined lifo */
STRUCT_SECTION_FOREACH(k_lifo, lifo) {
k_obj_core_init_and_link(K_OBJ_CORE(lifo), &_obj_type_lifo);
}
return 0;
}
SYS_INIT(init_lifo_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_LIFO */
``` | /content/code_sandbox/kernel/queue.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,299 |
```c
/*
*
*/
/**
* @brief Mailboxes.
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <string.h>
#include <zephyr/sys/dlist.h>
#include <zephyr/init.h>
/* private kernel APIs */
#include <ksched.h>
#include <kthread.h>
#include <wait_q.h>
#ifdef CONFIG_OBJ_CORE_MAILBOX
static struct k_obj_type obj_type_mailbox;
#endif /* CONFIG_OBJ_CORE_MAILBOX */
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
/* asynchronous message descriptor type */
struct k_mbox_async {
struct _thread_base thread; /* dummy thread object */
struct k_mbox_msg tx_msg; /* transmit message descriptor */
};
/* stack of unused asynchronous message descriptors */
K_STACK_DEFINE(async_msg_free, CONFIG_NUM_MBOX_ASYNC_MSGS);
/* allocate an asynchronous message descriptor */
static inline void mbox_async_alloc(struct k_mbox_async **async)
{
(void)k_stack_pop(&async_msg_free, (stack_data_t *)async, K_FOREVER);
}
/* free an asynchronous message descriptor */
static inline void mbox_async_free(struct k_mbox_async *async)
{
k_stack_push(&async_msg_free, (stack_data_t)async);
}
/*
* Do run-time initialization of mailbox object subsystem.
*/
static int init_mbox_module(void)
{
/* array of asynchronous message descriptors */
static struct k_mbox_async __noinit async_msg[CONFIG_NUM_MBOX_ASYNC_MSGS];
/*
* Create pool of asynchronous message descriptors.
*
* A dummy thread requires minimal initialization, since it never gets
* to execute. The _THREAD_DUMMY flag is sufficient to distinguish a
* dummy thread from a real one. The threads are *not* added to the
* kernel's list of known threads.
*
* Once initialized, the address of each descriptor is added to a stack
* that governs access to them.
*/
int i;
for (i = 0; i < CONFIG_NUM_MBOX_ASYNC_MSGS; i++) {
z_init_thread_base(&async_msg[i].thread, 0, _THREAD_DUMMY, 0);
k_stack_push(&async_msg_free, (stack_data_t)&async_msg[i]);
}
/* Complete initialization of statically defined mailboxes. */
return 0;
}
SYS_INIT(init_mbox_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
void k_mbox_init(struct k_mbox *mbox)
{
z_waitq_init(&mbox->tx_msg_queue);
z_waitq_init(&mbox->rx_msg_queue);
mbox->lock = (struct k_spinlock) {};
#ifdef CONFIG_OBJ_CORE_MAILBOX
k_obj_core_init_and_link(K_OBJ_CORE(mbox), &obj_type_mailbox);
#endif /* CONFIG_OBJ_CORE_MAILBOX */
SYS_PORT_TRACING_OBJ_INIT(k_mbox, mbox);
}
/**
* @brief Check compatibility of sender's and receiver's message descriptors.
*
* Compares sender's and receiver's message descriptors to see if they are
* compatible. If so, the descriptor fields are updated to reflect that a
* match has occurred.
*
* @param tx_msg Pointer to transmit message descriptor.
* @param rx_msg Pointer to receive message descriptor.
*
* @return 0 if successfully matched, otherwise -1.
*/
static int mbox_message_match(struct k_mbox_msg *tx_msg,
struct k_mbox_msg *rx_msg)
{
uint32_t temp_info;
if (((tx_msg->tx_target_thread == (k_tid_t)K_ANY) ||
(tx_msg->tx_target_thread == rx_msg->tx_target_thread)) &&
((rx_msg->rx_source_thread == (k_tid_t)K_ANY) ||
(rx_msg->rx_source_thread == tx_msg->rx_source_thread))) {
/* update thread identifier fields for both descriptors */
rx_msg->rx_source_thread = tx_msg->rx_source_thread;
tx_msg->tx_target_thread = rx_msg->tx_target_thread;
/* update application info fields for both descriptors */
temp_info = rx_msg->info;
rx_msg->info = tx_msg->info;
tx_msg->info = temp_info;
/* update data size field for receiver only */
if (rx_msg->size > tx_msg->size) {
rx_msg->size = tx_msg->size;
}
/* update data location fields for receiver only */
rx_msg->tx_data = tx_msg->tx_data;
/* update syncing thread field for receiver only */
rx_msg->_syncing_thread = tx_msg->_syncing_thread;
return 0;
}
return -1;
}
/**
* @brief Dispose of received message.
*
* Notifies the sender that message processing is complete.
*
* @param rx_msg Pointer to receive message descriptor.
*/
static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
{
struct k_thread *sending_thread;
struct k_mbox_msg *tx_msg;
/* do nothing if message was disposed of when it was received */
if (rx_msg->_syncing_thread == NULL) {
return;
}
/* recover sender info */
sending_thread = rx_msg->_syncing_thread;
rx_msg->_syncing_thread = NULL;
tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
/* update data size field for sender */
tx_msg->size = rx_msg->size;
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
/*
* asynchronous send: free asynchronous message descriptor +
* dummy thread pair, then give semaphore (if needed)
*/
if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0U) {
struct k_sem *async_sem = tx_msg->_async_sem;
mbox_async_free((struct k_mbox_async *)sending_thread);
if (async_sem != NULL) {
k_sem_give(async_sem);
}
return;
}
#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
/* synchronous send: wake up sending thread */
arch_thread_return_value_set(sending_thread, 0);
z_mark_thread_as_not_pending(sending_thread);
z_ready_thread(sending_thread);
z_reschedule_unlocked();
}
/**
* @brief Send a mailbox message.
*
* Helper routine that handles both synchronous and asynchronous sends.
*
* @param mbox Pointer to the mailbox object.
* @param tx_msg Pointer to transmit message descriptor.
* @param timeout Maximum time (milliseconds) to wait for the message to be
* received (although not necessarily completely processed).
* Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long
* as necessary.
*
* @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out
*/
static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
k_timeout_t timeout)
{
struct k_thread *sending_thread;
struct k_thread *receiving_thread;
struct k_mbox_msg *rx_msg;
k_spinlock_key_t key;
/* save sender id so it can be used during message matching */
tx_msg->rx_source_thread = _current;
/* finish readying sending thread (actual or dummy) for send */
sending_thread = tx_msg->_syncing_thread;
sending_thread->base.swap_data = tx_msg;
/* search mailbox's rx queue for a compatible receiver */
key = k_spin_lock(&mbox->lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, message_put, mbox, timeout);
_WAIT_Q_FOR_EACH(&mbox->rx_msg_queue, receiving_thread) {
rx_msg = (struct k_mbox_msg *)receiving_thread->base.swap_data;
if (mbox_message_match(tx_msg, rx_msg) == 0) {
/* take receiver out of rx queue */
z_unpend_thread(receiving_thread);
/* ready receiver for execution */
arch_thread_return_value_set(receiving_thread, 0);
z_ready_thread(receiving_thread);
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
/*
* asynchronous send: swap out current thread
* if receiver has priority, otherwise let it continue
*
* note: dummy sending thread sits (unqueued)
* until the receiver consumes the message
*/
if ((sending_thread->base.thread_state & _THREAD_DUMMY)
!= 0U) {
z_reschedule(&mbox->lock, key);
return 0;
}
#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
/*
* synchronous send: pend current thread (unqueued)
* until the receiver consumes the message
*/
int ret = z_pend_curr(&mbox->lock, key, NULL, K_FOREVER);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, ret);
return ret;
}
}
/* didn't find a matching receiver: don't wait for one */
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, -ENOMSG);
k_spin_unlock(&mbox->lock, key);
return -ENOMSG;
}
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
/* asynchronous send: dummy thread waits on tx queue for receiver */
if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0U) {
z_pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER);
k_spin_unlock(&mbox->lock, key);
return 0;
}
#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
/* synchronous send: sender waits on tx queue for receiver or timeout */
int ret = z_pend_curr(&mbox->lock, key, &mbox->tx_msg_queue, timeout);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, ret);
return ret;
}
int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
k_timeout_t timeout)
{
/* configure things for a synchronous send, then send the message */
tx_msg->_syncing_thread = _current;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, put, mbox, timeout);
int ret = mbox_message_put(mbox, tx_msg, timeout);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, put, mbox, timeout, ret);
return ret;
}
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
struct k_sem *sem)
{
struct k_mbox_async *async;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, async_put, mbox, sem);
/*
* allocate an asynchronous message descriptor, configure both parts,
* then send the message asynchronously
*/
mbox_async_alloc(&async);
async->thread.prio = _current->base.prio;
async->tx_msg = *tx_msg;
async->tx_msg._syncing_thread = (struct k_thread *)&async->thread;
async->tx_msg._async_sem = sem;
(void)mbox_message_put(mbox, &async->tx_msg, K_FOREVER);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, async_put, mbox, sem);
}
#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
{
/* handle case where data is to be discarded */
if (buffer == NULL) {
rx_msg->size = 0;
mbox_message_dispose(rx_msg);
return;
}
/* copy message data to buffer, then dispose of message */
if ((rx_msg->tx_data != NULL) && (rx_msg->size > 0U)) {
(void)memcpy(buffer, rx_msg->tx_data, rx_msg->size);
}
mbox_message_dispose(rx_msg);
}
/**
* @brief Handle immediate consumption of received mailbox message data.
*
* Checks to see if received message data should be kept for later retrieval,
* or if the data should consumed immediately and the message disposed of.
*
* The data is consumed immediately in either of the following cases:
* 1) The receiver requested immediate retrieval by supplying a buffer
* to receive the data.
* 2) There is no data to be retrieved. (i.e. Data size is 0 bytes.)
*
* @param rx_msg Pointer to receive message descriptor.
* @param buffer Pointer to buffer to receive data.
*
* @return 0
*/
static int mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
{
if (buffer != NULL) {
/* retrieve data now, then dispose of message */
k_mbox_data_get(rx_msg, buffer);
} else if (rx_msg->size == 0U) {
/* there is no data to get, so just dispose of message */
mbox_message_dispose(rx_msg);
} else {
/* keep message around for later data retrieval */
}
return 0;
}
int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
k_timeout_t timeout)
{
struct k_thread *sending_thread;
struct k_mbox_msg *tx_msg;
k_spinlock_key_t key;
int result;
/* save receiver id so it can be used during message matching */
rx_msg->tx_target_thread = _current;
/* search mailbox's tx queue for a compatible sender */
key = k_spin_lock(&mbox->lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, get, mbox, timeout);
_WAIT_Q_FOR_EACH(&mbox->tx_msg_queue, sending_thread) {
tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
if (mbox_message_match(tx_msg, rx_msg) == 0) {
/* take sender out of mailbox's tx queue */
z_unpend_thread(sending_thread);
k_spin_unlock(&mbox->lock, key);
/* consume message data immediately, if needed */
result = mbox_message_data_check(rx_msg, buffer);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, result);
return result;
}
}
/* didn't find a matching sender */
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, -ENOMSG);
/* don't wait for a matching sender to appear */
k_spin_unlock(&mbox->lock, key);
return -ENOMSG;
}
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, get, mbox, timeout);
/* wait until a matching sender appears or a timeout occurs */
_current->base.swap_data = rx_msg;
result = z_pend_curr(&mbox->lock, key, &mbox->rx_msg_queue, timeout);
/* consume message data immediately, if needed */
if (result == 0) {
result = mbox_message_data_check(rx_msg, buffer);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, result);
return result;
}
#ifdef CONFIG_OBJ_CORE_MAILBOX
static int init_mailbox_obj_core_list(void)
{
/* Initialize mailbox object type */
z_obj_type_init(&obj_type_mailbox, K_OBJ_TYPE_MBOX_ID,
offsetof(struct k_mbox, obj_core));
/* Initialize and link statically defined mailboxes */
STRUCT_SECTION_FOREACH(k_mbox, mbox) {
k_obj_core_init_and_link(K_OBJ_CORE(mbox), &obj_type_mailbox);
}
return 0;
}
SYS_INIT(init_mailbox_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_MAILBOX */
``` | /content/code_sandbox/kernel/mailbox.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,488 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/init.h>
#include <zephyr/device.h>
#include <zephyr/version.h>
#if defined(CONFIG_BOOT_DELAY) && (CONFIG_BOOT_DELAY > 0)
#define DELAY_STR STRINGIFY(CONFIG_BOOT_DELAY)
#define BANNER_POSTFIX " (delayed boot " DELAY_STR "ms)"
#else
#define BANNER_POSTFIX ""
#endif /* defined(CONFIG_BOOT_DELAY) && (CONFIG_BOOT_DELAY > 0) */
#ifndef BANNER_VERSION
#if defined(BUILD_VERSION) && !IS_EMPTY(BUILD_VERSION)
#define BANNER_VERSION STRINGIFY(BUILD_VERSION)
#else
#define BANNER_VERSION KERNEL_VERSION_STRING
#endif /* BUILD_VERSION */
#endif /* !BANNER_VERSION */
void boot_banner(void)
{
#if defined(CONFIG_BOOT_DELAY) && (CONFIG_BOOT_DELAY > 0)
#ifdef CONFIG_BOOT_BANNER
printk("***** delaying boot " DELAY_STR "ms (per build configuration) *****\n");
#endif /* CONFIG_BOOT_BANNER */
k_busy_wait(CONFIG_BOOT_DELAY * USEC_PER_MSEC);
#endif /* defined(CONFIG_BOOT_DELAY) && (CONFIG_BOOT_DELAY > 0) */
#if defined(CONFIG_BOOT_CLEAR_SCREEN)
/* \x1b[ = escape sequence
* 3J = erase scrollback
* 2J = erase screen
* H = move cursor to top left
*/
printk("\x1b[3J\x1b[2J\x1b[H");
#endif /* CONFIG_BOOT_CLEAR_SCREEN */
#ifdef CONFIG_BOOT_BANNER
printk("*** " CONFIG_BOOT_BANNER_STRING " " BANNER_VERSION BANNER_POSTFIX " ***\n");
#endif /* CONFIG_BOOT_BANNER */
}
``` | /content/code_sandbox/kernel/banner.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 349 |
```unknown
#
menuconfig OBJ_CORE
bool "Object core framework"
default n
help
This option enables the object core framework. This will link
participating kernel objects and their respective types together
in a way that allows them to both have common information stored
together and for that information to be easily retrieved by
automated means.
if OBJ_CORE
config OBJ_CORE_CONDVAR
bool "Integrate condition variables into object core framework"
default y
help
When enabled, this option integrates condition variables into the
object core framework.
config OBJ_CORE_EVENT
bool "Integrate events into object core framework"
default y if EVENTS
help
When enabled, this option integrate kernel events into the object
core framework.
config OBJ_CORE_FIFO
bool "Integrate FIFOs into object core framework"
default y
help
When enabled, this option integrates FIFOs into the object core
framework.
config OBJ_CORE_LIFO
bool "Integrate LIFOs into object core framework"
default y
help
When enabled, this option integrates LIFOs into the object core
framework.
config OBJ_CORE_MAILBOX
bool "Integrate mailboxes into object core framework"
default y
help
When enabled, this option integrates mailboxes into the object core
framework.
config OBJ_CORE_MEM_SLAB
bool "Integrate memory slabs into object core framework"
default y
help
When enabled, this option integrates memory slabs into the object
core framework.
config OBJ_CORE_MUTEX
bool "Integrate mutexes into object core framework"
default y
help
When enabled, this option integrates mutexes into the object core
framework.
config OBJ_CORE_MSGQ
bool "Integrate message queues into object core framework"
default y
help
When enabled, this option integrates message queues into the object
core framework.
config OBJ_CORE_SEM
bool "Integrate semaphores into object core framework"
default y
help
When enabled, this option integrates semaphores into the object core
framework.
config OBJ_CORE_PIPE
bool "Integrate pipe into object core framework"
default y if PIPES
help
When enabled, this option integrates pipes into the object core
framework.
config OBJ_CORE_SEM
bool "Integrate semaphores into object core framework"
default y
help
When enabled, this option integrates semaphores into the object core
framework.
config OBJ_CORE_STACK
bool "Integrate stacks into object core framework"
default y
help
When enabled, this option integrates stacks into the object core
framework.
config OBJ_CORE_THREAD
bool "Integrate threads into object core framework"
default y
help
When enabled, this option integrates threads into the object core
framework.
config OBJ_CORE_TIMER
bool "Integrate timers into object core framework"
default y
help
When enabled, this option integrates timers into the object core
framework.
config OBJ_CORE_SYSTEM
bool
default y
help
When enabled, this option integrates the internal CPU and kernel
system objects into the object core framework. As these are internal
structures, this option is hidden by default and only available to
advanced users.
menuconfig OBJ_CORE_STATS
bool "Object core statistics"
default n
help
This option integrates statistics gathering into the object core
framework.
if OBJ_CORE_STATS
config OBJ_CORE_STATS_MEM_SLAB
bool "Object core statistics for memory slabs"
default y if OBJ_CORE_MEM_SLAB
help
When enabled, this allows memory slab statistics to be integrated
into kernel objects.
config OBJ_CORE_STATS_THREAD
bool "Object core statistics for threads"
default y if OBJ_CORE_THREAD
select THREAD_RUNTIME_STATS
help
When enabled, this integrates thread runtime statistics into the
object core statistics framework.
config OBJ_CORE_STATS_SYSTEM
bool "Object core statistics for system level objects"
default y if OBJ_CORE_SYSTEM
select SCHED_THREAD_USAGE_ALL
help
When enabled, this integrates thread runtime statistics at the
CPU and system level into the object core statistics framework.
endif # OBJ_CORE_STATS
endif # OBJ_CORE
``` | /content/code_sandbox/kernel/Kconfig.obj_core | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 875 |
```unknown
# Kernel configuration options
menu "General Kernel Options"
module = KERNEL
module-str = kernel
source "subsys/logging/Kconfig.template.log_config"
config MULTITHREADING
bool "Multi-threading" if ARCH_HAS_SINGLE_THREAD_SUPPORT
default y
help
If disabled, only the main thread is available, so a main() function
must be provided. Interrupts are available. Kernel objects will most
probably not behave as expected, especially with regards to pending,
since the main thread cannot pend, it being the only thread in the
system.
Many drivers and subsystems will not work with this option
set to 'n'; disable only when you REALLY know what you are
doing.
config NUM_COOP_PRIORITIES
int "Number of coop priorities" if MULTITHREADING
default 1 if !MULTITHREADING
default 16
range 0 128
help
Number of cooperative priorities configured in the system. Gives access
to priorities:
K_PRIO_COOP(0) to K_PRIO_COOP(CONFIG_NUM_COOP_PRIORITIES - 1)
or seen another way, priorities:
-CONFIG_NUM_COOP_PRIORITIES to -1
This can be set to zero to disable cooperative scheduling. Cooperative
threads always preempt preemptible threads.
The total number of priorities is
NUM_COOP_PRIORITIES + NUM_PREEMPT_PRIORITIES + 1
The extra one is for the idle thread, which must run at the lowest
priority, and be the only thread at that priority.
config NUM_PREEMPT_PRIORITIES
int "Number of preemptible priorities" if MULTITHREADING
default 0 if !MULTITHREADING
default 15
range 0 128
help
Number of preemptible priorities available in the system. Gives access
to priorities 0 to CONFIG_NUM_PREEMPT_PRIORITIES - 1.
This can be set to 0 to disable preemptible scheduling.
The total number of priorities is
NUM_COOP_PRIORITIES + NUM_PREEMPT_PRIORITIES + 1
The extra one is for the idle thread, which must run at the lowest
priority, and be the only thread at that priority.
config MAIN_THREAD_PRIORITY
int "Priority of initialization/main thread"
default -2 if !PREEMPT_ENABLED
default 0
help
Priority at which the initialization thread runs, including the start
of the main() function. main() can then change its priority if desired.
config COOP_ENABLED
def_bool (NUM_COOP_PRIORITIES != 0)
config PREEMPT_ENABLED
def_bool (NUM_PREEMPT_PRIORITIES != 0)
config PRIORITY_CEILING
int "Priority inheritance ceiling"
default -127
help
This defines the minimum priority value (i.e. the logically
highest priority) that a thread will acquire as part of
k_mutex priority inheritance.
config NUM_METAIRQ_PRIORITIES
int "Number of very-high priority 'preemptor' threads"
default 0
help
This defines a set of priorities at the (numerically) lowest
end of the range which have "meta-irq" behavior. Runnable
threads at these priorities will always be scheduled before
threads at lower priorities, EVEN IF those threads are
otherwise cooperative and/or have taken a scheduler lock.
Making such a thread runnable in any way thus has the effect
of "interrupting" the current task and running the meta-irq
thread synchronously, like an exception or system call. The
intent is to use these priorities to implement "interrupt
bottom half" or "tasklet" behavior, allowing driver
subsystems to return from interrupt context but be guaranteed
that user code will not be executed (on the current CPU)
until the remaining work is finished. As this breaks the
"promise" of non-preemptibility granted by the current API
for cooperative threads, this tool probably shouldn't be used
from application code.
config SCHED_DEADLINE
bool "Earliest-deadline-first scheduling"
help
This enables a simple "earliest deadline first" scheduling
mode where threads can set "deadline" deltas measured in
k_cycle_get_32() units. Priority decisions within (!!) a
single priority will choose the next expiring deadline and
not simply the least recently added thread.
config SCHED_CPU_MASK
bool "CPU mask affinity/pinning API"
depends on SCHED_DUMB
help
When true, the application will have access to the
k_thread_cpu_mask_*() APIs which control per-CPU affinity masks in
SMP mode, allowing applications to pin threads to specific CPUs or
disallow threads from running on given CPUs. Note that as currently
implemented, this involves an inherent O(N) scaling in the number of
idle-but-runnable threads, and thus works only with the DUMB
scheduler (as SCALABLE and MULTIQ would see no benefit).
Note that this setting does not technically depend on SMP and is
implemented without it for testing purposes, but for obvious reasons
makes sense as an application API only where there is more than one
CPU. With one CPU, it's just a higher overhead version of
k_thread_start/stop().
config SCHED_CPU_MASK_PIN_ONLY
bool "CPU mask variant with single-CPU pinning only"
depends on SMP && SCHED_CPU_MASK
help
When true, enables a variant of SCHED_CPU_MASK where only
one CPU may be specified for every thread. Effectively, all
threads have a single "assigned" CPU and they will never be
scheduled symmetrically. In general this is not helpful,
but some applications have a carefully designed threading
architecture and want to make their own decisions about how
to assign work to CPUs. In that circumstance, some moderate
optimizations can be made (e.g. having a separate run queue
per CPU, keeping the list length shorter). When selected,
the CPU mask becomes an immutable thread attribute. It can
only be modified before a thread is started. Most
applications don't want this.
config MAIN_STACK_SIZE
int "Size of stack for initialization and main thread"
default 2048 if COVERAGE_GCOV
default 512 if ZTEST && !(RISCV || X86 || ARM || ARC || NIOS2)
default 1024
help
When the initialization is complete, the thread executing it then
executes the main() routine, so as to reuse the stack used by the
initialization, which would be wasted RAM otherwise.
After initialization is complete, the thread runs main().
config IDLE_STACK_SIZE
int "Size of stack for idle thread"
default 2048 if COVERAGE_GCOV
default 1024 if XTENSA
default 512 if RISCV
default 384 if DYNAMIC_OBJECTS
default 320 if ARC || (ARM && CPU_HAS_FPU) || (X86 && MMU)
default 256
help
Depending on the work that the idle task must do, most likely due to
power management but possibly to other features like system event
logging (e.g. logging when the system goes to sleep), the idle thread
may need more stack space than the default value.
config ISR_STACK_SIZE
int "ISR and initialization stack size (in bytes)"
default 2048
help
This option specifies the size of the stack used by interrupt
service routines (ISRs), and during kernel initialization.
config THREAD_STACK_INFO
bool "Thread stack info"
help
This option allows each thread to store the thread stack info into
the k_thread data structure.
config THREAD_STACK_MEM_MAPPED
bool "Stack to be memory mapped at runtime"
depends on MMU && ARCH_SUPPORTS_MEM_MAPPED_STACKS
select THREAD_STACK_INFO
select THREAD_ABORT_NEED_CLEANUP
help
This option changes behavior where the thread stack is memory
mapped with guard pages on both ends to catch undesired
accesses.
config THREAD_ABORT_HOOK
bool
help
Used by portability layers to modify locally managed status mask.
config THREAD_ABORT_NEED_CLEANUP
bool
help
This option enables the bits to clean up the current thread if
k_thread_abort(_current) is called, as the cleanup cannot be
running in the current thread stack.
config THREAD_CUSTOM_DATA
bool "Thread custom data"
help
This option allows each thread to store 32 bits of custom data,
which can be accessed using the k_thread_custom_data_xxx() APIs.
config THREAD_USERSPACE_LOCAL_DATA
bool
depends on USERSPACE
default y if ERRNO && !ERRNO_IN_TLS && !LIBC_ERRNO
config USERSPACE_THREAD_MAY_RAISE_PRIORITY
bool "Thread can raise own priority"
depends on USERSPACE
depends on TEST # This should only be enabled by tests.
help
Thread can raise its own priority in userspace mode.
config DYNAMIC_THREAD
bool "Support for dynamic threads [EXPERIMENTAL]"
select EXPERIMENTAL
depends on THREAD_STACK_INFO
select DYNAMIC_OBJECTS if USERSPACE
select THREAD_MONITOR
help
Enable support for dynamic threads and stacks.
if DYNAMIC_THREAD
config DYNAMIC_THREAD_STACK_SIZE
int "Size of each pre-allocated thread stack"
default 1024 if !64BIT
default 2048 if 64BIT
help
Default stack size (in bytes) for dynamic threads.
config DYNAMIC_THREAD_ALLOC
bool "Support heap-allocated thread objects and stacks"
help
Select this option to enable allocating thread object and
thread stacks from the system heap.
Only use this type of allocation in situations
where malloc is permitted.
config DYNAMIC_THREAD_POOL_SIZE
int "Number of statically pre-allocated threads"
default 0
range 0 8192
help
Pre-allocate a fixed number of thread objects and
stacks at build time.
This type of "dynamic" stack is usually suitable in
situations where malloc is not permitted.
choice DYNAMIC_THREAD_PREFER
prompt "Preferred dynamic thread allocator"
default DYNAMIC_THREAD_PREFER_POOL
help
If both CONFIG_DYNAMIC_THREAD_ALLOC=y and
CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0, then the user may
specify the order in which allocation is attempted.
config DYNAMIC_THREAD_PREFER_ALLOC
bool "Prefer heap-based allocation"
depends on DYNAMIC_THREAD_ALLOC
help
Select this option to attempt a heap-based allocation
prior to any pool-based allocation.
config DYNAMIC_THREAD_PREFER_POOL
bool "Prefer pool-based allocation"
help
Select this option to attempt a pool-based allocation
prior to any heap-based allocation.
endchoice # DYNAMIC_THREAD_PREFER
endif # DYNAMIC_THREADS
choice SCHED_ALGORITHM
prompt "Scheduler priority queue algorithm"
default SCHED_DUMB
help
The kernel can be built with several choices for the
ready queue implementation, offering different choices between
code size, constant factor runtime overhead and performance
scaling when many threads are added.
config SCHED_DUMB
bool "Simple linked-list ready queue"
help
When selected, the scheduler ready queue will be implemented
as a simple unordered list, with very fast constant time
performance for single threads and very low code size.
Choose this on systems with constrained code size that will
never see more than a small number (3, maybe) of runnable
threads in the queue at any given time. On most platforms
(that are not otherwise using the red/black tree) this
results in a savings of ~2k of code size.
config SCHED_SCALABLE
bool "Red/black tree ready queue"
help
When selected, the scheduler ready queue will be implemented
as a red/black tree. This has rather slower constant-time
insertion and removal overhead, and on most platforms (that
are not otherwise using the rbtree somewhere) requires an
extra ~2kb of code. But the resulting behavior will scale
cleanly and quickly into the many thousands of threads. Use
this on platforms where you may have many threads (very
roughly: more than 20 or so) marked as runnable at a given
time. Most applications don't want this.
config SCHED_MULTIQ
bool "Traditional multi-queue ready queue"
depends on !SCHED_DEADLINE
help
When selected, the scheduler ready queue will be implemented
as the classic/textbook array of lists, one per priority.
This corresponds to the scheduler algorithm used in Zephyr
versions prior to 1.12. It incurs only a tiny code size
overhead vs. the "dumb" scheduler and runs in O(1) time
in almost all circumstances with very low constant factor.
But it requires a fairly large RAM budget to store those list
heads, and the limited features make it incompatible with
features like deadline scheduling that need to sort threads
more finely, and SMP affinity which need to traverse the list
of threads. Typical applications with small numbers of runnable
threads probably want the DUMB scheduler.
endchoice # SCHED_ALGORITHM
choice WAITQ_ALGORITHM
prompt "Wait queue priority algorithm"
default WAITQ_DUMB
help
The wait_q abstraction used in IPC primitives to pend
threads for later wakeup shares the same backend data
structure choices as the scheduler, and can use the same
options.
config WAITQ_SCALABLE
bool "Use scalable wait_q implementation"
help
When selected, the wait_q will be implemented with a
balanced tree. Choose this if you expect to have many
threads waiting on individual primitives. There is a ~2kb
code size increase over WAITQ_DUMB (which may be shared with
SCHED_SCALABLE) if the rbtree is not used elsewhere in the
application, and pend/unpend operations on "small" queues
will be somewhat slower (though this is not generally a
performance path).
config WAITQ_DUMB
bool "Simple linked-list wait_q"
help
When selected, the wait_q will be implemented with a
doubly-linked list. Choose this if you expect to have only
a few threads blocked on any single IPC primitive.
endchoice # WAITQ_ALGORITHM
menu "Misc Kernel related options"
config LIBC_ERRNO
bool
help
Use external libc errno, not the internal one. This eliminates any
locally allocated errno storage and usage.
config ERRNO
bool "Errno support"
default y
help
Enable per-thread errno in the kernel. Application and library code must
include errno.h provided by the C library (libc) to use the errno
symbol. The C library must access the per-thread errno via the
z_errno() symbol.
config ERRNO_IN_TLS
bool "Store errno in thread local storage (TLS)"
depends on ERRNO && THREAD_LOCAL_STORAGE && !LIBC_ERRNO
default y
help
Use thread local storage to store errno instead of storing it in
the kernel thread struct. This avoids a syscall if userspace is enabled.
config CURRENT_THREAD_USE_NO_TLS
bool
help
Hidden symbol to not use thread local storage to store current
thread.
config CURRENT_THREAD_USE_TLS
bool "Store current thread in thread local storage (TLS)"
depends on THREAD_LOCAL_STORAGE && !CURRENT_THREAD_USE_NO_TLS
default y
help
Use thread local storage to store the current thread. This avoids a
syscall if userspace is enabled.
endmenu
menu "Kernel Debugging and Metrics"
config INIT_STACKS
bool "Initialize stack areas"
help
This option instructs the kernel to initialize stack areas with a
known value (0xaa) before they are first used, so that the high
water mark can be easily determined. This applies to the stack areas
for threads, as well as to the interrupt stack.
config SKIP_BSS_CLEAR
bool
help
This option disables software .bss section zeroing during Zephyr
initialization. Such boot-time optimization could be used for
platforms where .bss section is zeroed-out externally.
Please pay attention that when this option is enabled
the responsibility for .bss zeroing in all possible scenarios
(mind e.g. SW reset) is delegated to the external SW or HW.
config BOOT_BANNER
bool "Boot banner"
default y
select PRINTK
select EARLY_CONSOLE
help
This option outputs a banner to the console device during boot up.
config BOOT_BANNER_STRING
string "Boot banner string"
depends on BOOT_BANNER
default "Booting Zephyr OS build"
help
Use this option to set the boot banner.
config BOOT_DELAY
int "Boot delay in milliseconds"
depends on MULTITHREADING
default 0
help
This option delays bootup for the specified amount of
milliseconds. This is used to allow serial ports to get ready
before starting to print information on them during boot, as
some systems might boot to fast for a receiving endpoint to
detect the new USB serial bus, enumerate it and get ready to
receive before it actually gets data. A similar effect can be
achieved by waiting for DCD on the serial port--however, not
all serial ports have DCD.
config BOOT_CLEAR_SCREEN
bool "Clear screen"
help
Use this option to clear the screen before printing anything else.
Using a VT100 enabled terminal on the client side is required for this to work.
config THREAD_MONITOR
bool "Thread monitoring"
help
This option instructs the kernel to maintain a list of all threads
(excluding those that have not yet started or have already
terminated).
config THREAD_NAME
bool "Thread name"
help
This option allows to set a name for a thread.
config THREAD_MAX_NAME_LEN
int "Max length of a thread name"
default 32
default 64 if ZTEST
range 8 128
depends on THREAD_NAME
help
Thread names get stored in the k_thread struct. Indicate the max
name length, including the terminating NULL byte. Reduce this value
to conserve memory.
config INSTRUMENT_THREAD_SWITCHING
bool
menuconfig THREAD_RUNTIME_STATS
bool "Thread runtime statistics"
help
Gather thread runtime statistics.
For example:
- Thread total execution cycles
- System total execution cycles
if THREAD_RUNTIME_STATS
config THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
bool "Use timing functions to gather statistics"
select TIMING_FUNCTIONS_NEED_AT_BOOT
help
Use timing functions to gather thread runtime statistics.
Note that timing functions may use a different timer than
the default timer for OS timekeeping.
config SCHED_THREAD_USAGE
bool "Collect thread runtime usage"
default y
select INSTRUMENT_THREAD_SWITCHING if !USE_SWITCH
help
Collect thread runtime info at context switch time
config SCHED_THREAD_USAGE_ANALYSIS
bool "Analyze the collected thread runtime usage statistics"
default n
depends on SCHED_THREAD_USAGE
select INSTRUMENT_THREAD_SWITCHING if !USE_SWITCH
help
Collect additional timing information related to thread scheduling
for analysis purposes. This includes the total time that a thread
has been scheduled, the longest time for which it was scheduled and
others.
config SCHED_THREAD_USAGE_ALL
bool "Collect total system runtime usage"
default y if SCHED_THREAD_USAGE
depends on SCHED_THREAD_USAGE
help
Maintain a sum of all non-idle thread cycle usage.
config SCHED_THREAD_USAGE_AUTO_ENABLE
bool "Automatically enable runtime usage statistics"
default y
depends on SCHED_THREAD_USAGE
help
When set, this option automatically enables the gathering of both
the thread and CPU usage statistics.
endif # THREAD_RUNTIME_STATS
endmenu
rsource "Kconfig.obj_core"
menu "System Work Queue Options"
config SYSTEM_WORKQUEUE_STACK_SIZE
int "System workqueue stack size"
default 4096 if COVERAGE_GCOV
default 2560 if WIFI_NM_WPA_SUPPLICANT
default 1024
config SYSTEM_WORKQUEUE_PRIORITY
int "System workqueue priority"
default -2 if COOP_ENABLED && !PREEMPT_ENABLED
default 0 if !COOP_ENABLED
default -1
help
By default, system work queue priority is the lowest cooperative
priority. This means that any work handler, once started, won't
be preempted by any other thread until finished.
config SYSTEM_WORKQUEUE_NO_YIELD
bool "Select whether system work queue yields"
help
By default, the system work queue yields between each work item, to
prevent other threads from being starved. Selecting this removes
this yield, which may be useful if the work queue thread is
cooperative and a sequence of work items is expected to complete
without yielding.
endmenu
menu "Barrier Operations"
config BARRIER_OPERATIONS_BUILTIN
bool
help
Use the compiler builtin functions for barrier operations. This is
the preferred method. However, support for all arches in GCC is
incomplete.
config BARRIER_OPERATIONS_ARCH
bool
help
Use when there isn't support for compiler built-ins, but you have
written optimized assembly code under arch/ which implements these.
endmenu
menu "Atomic Operations"
config ATOMIC_OPERATIONS_BUILTIN
bool
help
Use the compiler builtin functions for atomic operations. This is
the preferred method. However, support for all arches in GCC is
incomplete.
config ATOMIC_OPERATIONS_ARCH
bool
help
Use when there isn't support for compiler built-ins, but you have
written optimized assembly code under arch/ which implements these.
config ATOMIC_OPERATIONS_C
bool
help
Use atomic operations routines that are implemented entirely
in C by locking interrupts. Selected by architectures which either
do not have support for atomic operations in their instruction
set, or haven't been implemented yet during bring-up, and also
the compiler does not have support for the atomic __sync_* builtins.
endmenu
menu "Timer API Options"
config TIMESLICING
bool "Thread time slicing"
default y
depends on SYS_CLOCK_EXISTS && (NUM_PREEMPT_PRIORITIES != 0)
help
This option enables time slicing between preemptible threads of
equal priority.
config TIMESLICE_SIZE
int "Time slice size (in ms)"
default 0
range 0 $(INT32_MAX)
depends on TIMESLICING
help
This option specifies the maximum amount of time a thread can execute
before other threads of equal priority are given an opportunity to run.
A time slice size of zero means "no limit" (i.e. an infinitely large
time slice).
config TIMESLICE_PRIORITY
int "Time slicing thread priority ceiling"
default 0
range 0 NUM_PREEMPT_PRIORITIES
depends on TIMESLICING
help
This option specifies the thread priority level at which time slicing
takes effect; threads having a higher priority than this ceiling are
not subject to time slicing.
config TIMESLICE_PER_THREAD
bool "Support per-thread timeslice values"
depends on TIMESLICING
help
When set, this enables an API for setting timeslice values on
a per-thread basis, with an application callback invoked when
a thread reaches the end of its timeslice.
endmenu
menu "Other Kernel Object Options"
config POLL
bool "Async I/O Framework"
help
Asynchronous notification framework. Enable the k_poll() and
k_poll_signal_raise() APIs. The former can wait on multiple events
concurrently, which can be either directly triggered or triggered by
the availability of some kernel objects (semaphores and FIFOs).
config MEM_SLAB_TRACE_MAX_UTILIZATION
bool "Getting maximum slab utilization"
help
This adds variable to the k_mem_slab structure to hold
maximum utilization of the slab.
config NUM_MBOX_ASYNC_MSGS
int "Maximum number of in-flight asynchronous mailbox messages"
default 10
help
This option specifies the total number of asynchronous mailbox
messages that can exist simultaneously, across all mailboxes
in the system.
Setting this option to 0 disables support for asynchronous
mailbox messages.
config EVENTS
bool "Event objects"
help
This option enables event objects. Threads may wait on event
objects for specific events, but both threads and ISRs may deliver
events to event objects.
Note that setting this option slightly increases the size of the
thread structure.
config PIPES
bool "Pipe objects"
help
This option enables kernel pipes. A pipe is a kernel object that
allows a thread to send a byte stream to another thread. Pipes can
be used to synchronously transfer chunks of data in whole or in part.
Note that setting this option slightly increases the size of the
thread structure.
config KERNEL_MEM_POOL
bool "Use Kernel Memory Pool"
default y
help
Enable the use of kernel memory pool.
Say y if unsure.
if KERNEL_MEM_POOL
config HEAP_MEM_POOL_SIZE
int "Heap memory pool size (in bytes)"
default 0
help
This option specifies the size of the heap memory pool used when
dynamically allocating memory using k_malloc(). The maximum size of
the memory pool is only limited to available memory. If subsystems
specify HEAP_MEM_POOL_ADD_SIZE_* options, these will be added together
and the sum will be compared to the HEAP_MEM_POOL_SIZE value.
If the sum is greater than the HEAP_MEM_POOL_SIZE option (even if this
has the default 0 value), then the actual heap size will be rounded up
to the sum of the individual requirements (unless the
HEAP_MEM_POOL_IGNORE_MIN option is enabled). If the final value, after
considering both this option as well as sum of the custom
requirements, ends up being zero, then no system heap will be
available.
config HEAP_MEM_POOL_IGNORE_MIN
bool "Ignore the minimum heap memory pool requirement"
help
This option can be set to force setting a smaller heap memory pool
size than what's specified by enabled subsystems. This can be useful
when optimizing memory usage and a more precise minimum heap size
is known for a given application.
endif # KERNEL_MEM_POOL
endmenu
config SWAP_NONATOMIC
bool
help
On some architectures, the _Swap() primitive cannot be made
atomic with respect to the irq_lock being released. That
is, interrupts may be received between the entry to _Swap
and the completion of the context switch. There are a
handful of workaround cases in the kernel that need to be
enabled when this is true. Currently, this only happens on
ARM when the PendSV exception priority sits below that of
Zephyr-handled interrupts.
config SYS_CLOCK_TICKS_PER_SEC
int "System tick frequency (in ticks/second)"
default 100 if QEMU_TARGET || SOC_POSIX
default 10000 if TICKLESS_KERNEL
default 100
help
This option specifies the nominal frequency of the system clock in Hz.
For asynchronous timekeeping, the kernel defines a "ticks" concept. A
"tick" is the internal count in which the kernel does all its internal
uptime and timeout bookkeeping. Interrupts are expected to be delivered
on tick boundaries to the extent practical, and no fractional ticks
are tracked.
The choice of tick rate is configurable by this option. Also the number
of cycles per tick should be chosen so that 1 millisecond is exactly
represented by an integral number of ticks. Defaults on most hardware
platforms (ones that support setting arbitrary interrupt timeouts) are
expected to be in the range of 10 kHz, with software emulation
platforms and legacy drivers using a more traditional 100 Hz value.
Note that when available and enabled, in "tickless" mode
this config variable specifies the minimum available timing
granularity, not necessarily the number or frequency of
interrupts delivered to the kernel.
A value of 0 completely disables timer support in the kernel.
config SYS_CLOCK_HW_CYCLES_PER_SEC
int "System clock's h/w timer frequency"
help
This option specifies the frequency of the hardware timer used for the
system clock (in Hz). This option is set by the SOC's or board's Kconfig file
and the user should generally avoid modifying it via the menu configuration.
config SYS_CLOCK_EXISTS
bool "System clock exists and is enabled"
default y
help
This option specifies that the kernel has timer support.
Some device configurations can eliminate significant code if
this is disabled. Obviously timeout-related APIs will not
work when disabled.
config TIMEOUT_64BIT
bool "Store kernel timeouts in 64 bit precision"
default y
help
When this option is true, the k_ticks_t values passed to
kernel APIs will be a 64 bit quantity, allowing the use of
larger values (and higher precision tick rates) without fear
of overflowing the 32 bit word. This feature also gates the
availability of absolute timeout values (which require the
extra precision).
config SYS_CLOCK_MAX_TIMEOUT_DAYS
int "Max timeout (in days) used in conversions"
default 365
help
Value is used in the time conversion static inline function to determine
at compile time which algorithm to use. One algorithm is faster, takes
less code but may overflow if multiplication of source and target
frequency exceeds 64 bits. Second algorithm prevents that. Faster
algorithm is selected for conversion if maximum timeout represented in
source frequency domain multiplied by target frequency fits in 64 bits.
config BUSYWAIT_CPU_LOOPS_PER_USEC
int "Number of CPU loops per microsecond for crude busy looping"
depends on !SYS_CLOCK_EXISTS && !ARCH_HAS_CUSTOM_BUSY_WAIT
default 500
help
Calibration for crude CPU based busy loop duration. The default
is assuming 1 GHz CPU and 2 cycles per loop. Reality is certainly
much worse but all we want here is a ball-park figure that ought
to be good enough for the purpose of being able to configure out
system timer support. If accuracy is very important then
implementing arch_busy_wait() should be considered.
config XIP
bool "Execute in place"
help
This option allows the kernel to operate with its text and read-only
sections residing in ROM (or similar read-only memory). Not all boards
support this option so it must be used with care; you must also
supply a linker command file when building your image. Enabling this
option increases both the code and data footprint of the image.
menu "Security Options"
config STACK_CANARIES
bool "Compiler stack canaries"
depends on ENTROPY_GENERATOR || TEST_RANDOM_GENERATOR
select NEED_LIBC_MEM_PARTITION if !STACK_CANARIES_TLS
help
This option enables compiler stack canaries.
If stack canaries are supported by the compiler, it will emit
extra code that inserts a canary value into the stack frame when
a function is entered and validates this value upon exit.
Stack corruption (such as that caused by buffer overflow) results
in a fatal error condition for the running entity.
Enabling this option can result in a significant increase
in footprint and an associated decrease in performance.
If stack canaries are not supported by the compiler an error
will occur at build time.
if STACK_CANARIES
config STACK_CANARIES_TLS
bool "Stack canaries using thread local storage"
depends on THREAD_LOCAL_STORAGE
depends on ARCH_HAS_STACK_CANARIES_TLS
help
This option enables compiler stack canaries on TLS.
Stack canaries will leave in the thread local storage and
each thread will have its own canary. This makes harder
to predict the canary location and value.
When enabled this causes an additional performance penalty
during thread creations because it needs a new random value
per thread.
endif
config EXECUTE_XOR_WRITE
bool "W^X for memory partitions"
depends on USERSPACE
depends on ARCH_HAS_EXECUTABLE_PAGE_BIT
default y
help
When enabled, will enforce that a writable page isn't executable
and vice versa. This might not be acceptable in all scenarios,
so this option is given for those unafraid of shooting themselves
in the foot.
If unsure, say Y.
config STACK_POINTER_RANDOM
int "Initial stack pointer randomization bounds"
depends on !STACK_GROWS_UP
depends on MULTITHREADING
depends on TEST_RANDOM_GENERATOR || ENTROPY_HAS_DRIVER
default 0
help
This option performs a limited form of Address Space Layout
Randomization by offsetting some random value to a thread's
initial stack pointer upon creation. This hinders some types of
security attacks by making the location of any given stack frame
non-deterministic.
This feature can waste up to the specified size in bytes the stack
region, which is carved out of the total size of the stack region.
A reasonable minimum value would be around 100 bytes if this can
be spared.
This is currently only implemented for systems whose stack pointers
grow towards lower memory addresses.
config BOUNDS_CHECK_BYPASS_MITIGATION
bool "Bounds check bypass mitigations for speculative execution"
depends on USERSPACE
help
Untrusted parameters from user mode may be used in system calls to
index arrays during speculative execution, also known as the Spectre
V1 vulnerability. When enabled, various macros defined in
misc/speculation.h will insert fence instructions or other appropriate
mitigations after bounds checking any array index parameters passed
in from untrusted sources (user mode threads). When disabled, these
macros do nothing.
endmenu
rsource "Kconfig.mem_domain"
rsource "Kconfig.smp"
config TICKLESS_KERNEL
bool "Tickless kernel"
default y if TICKLESS_CAPABLE
depends on TICKLESS_CAPABLE
help
This option enables a fully event driven kernel. Periodic system
clock interrupt generation would be stopped at all times.
config TOOLCHAIN_SUPPORTS_THREAD_LOCAL_STORAGE
bool
default y if "$(ZEPHYR_TOOLCHAIN_VARIANT)" = "zephyr" || "$(ZEPHYR_TOOLCHAIN_SUPPORTS_THREAD_LOCAL_STORAGE)" = "y"
help
Hidden option to signal that toolchain supports generating code
with thread local storage.
config THREAD_LOCAL_STORAGE
bool "Thread Local Storage (TLS)"
depends on ARCH_HAS_THREAD_LOCAL_STORAGE && TOOLCHAIN_SUPPORTS_THREAD_LOCAL_STORAGE
select NEED_LIBC_MEM_PARTITION if (CPU_CORTEX_M && USERSPACE)
help
This option enables thread local storage (TLS) support in kernel.
config KERNEL_WHOLE_ARCHIVE
bool
help
This option forces every object file in the libkernel.a archive
to be included, rather than searching the archive for required object files.
config TOOLCHAIN_SUPPORTS_STATIC_INIT_GNU
# As of today only ARC MWDT toolchain doesn't support GNU-compatible
# initialization of static objects, new toolchains can be added
# here if required.
def_bool "$(ZEPHYR_TOOLCHAIN_VARIANT)" != "arcmwdt"
config STATIC_INIT_GNU
bool "Support GNU-compatible initializers and constructors"
default y if CPP || NATIVE_LIBRARY || COVERAGE
depends on TOOLCHAIN_SUPPORTS_STATIC_INIT_GNU
depends on !CMAKE_LINKER_GENERATOR
help
GNU-compatible initialization of static objects. This is required for
C++ constructor support as well as for initializer functions as
defined by GNU-compatible toolchains. This increases the size
of Zephyr binaries by around 100 bytes. If you know your
application doesn't need any initializers, you can disable this
option.
The ARC MWDT toolchain, does not support or use this setting,
and has instead separate C++ constructor initialization code.
Note the option CMAKE_LINKER_GENERATOR does not yet support this feature
or CPP.
endmenu
rsource "Kconfig.device"
rsource "Kconfig.vm"
``` | /content/code_sandbox/kernel/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 7,788 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/drivers/timer/system_timer.h>
#include <zephyr/pm/pm.h>
#include <stdbool.h>
#include <zephyr/logging/log.h>
/* private kernel APIs */
#include <ksched.h>
#include <kswap.h>
#include <wait_q.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
void idle(void *unused1, void *unused2, void *unused3)
{
ARG_UNUSED(unused1);
ARG_UNUSED(unused2);
ARG_UNUSED(unused3);
__ASSERT_NO_MSG(_current->base.prio >= 0);
while (true) {
/* SMP systems without a working IPI can't actual
* enter an idle state, because they can't be notified
* of scheduler changes (i.e. threads they should
* run). They just spin instead, with a minimal
* relaxation loop to prevent hammering the scheduler
* lock and/or timer driver. This is intended as a
* fallback configuration for new platform bringup.
*/
if (IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
for (volatile int i = 0; i < 100000; i++) {
/* Empty loop */
}
z_swap_unlocked();
}
/* Note weird API: k_cpu_idle() is called with local
* CPU interrupts masked, and returns with them
* unmasked. It does not take a spinlock or other
* higher level construct.
*/
(void) arch_irq_lock();
#ifdef CONFIG_PM
_kernel.idle = z_get_next_timeout_expiry();
/*
* Call the suspend hook function of the soc interface
* to allow entry into a low power state. The function
* returns false if low power state was not entered, in
* which case, kernel does normal idle processing.
*
* This function is entered with interrupts disabled.
* If a low power state was entered, then the hook
* function should enable interrupts before exiting.
* This is because the kernel does not do its own idle
* processing in those cases i.e. skips k_cpu_idle().
* The kernel's idle processing re-enables interrupts
* which is essential for the kernel's scheduling
* logic.
*/
if (k_is_pre_kernel() || !pm_system_suspend(_kernel.idle)) {
k_cpu_idle();
}
#else
k_cpu_idle();
#endif /* CONFIG_PM */
#if !defined(CONFIG_PREEMPT_ENABLED)
# if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
/* A legacy mess: the idle thread is by definition
* preemptible as far as the modern scheduler is
* concerned, but older platforms use
* CONFIG_PREEMPT_ENABLED=n as an optimization hint
* that interrupt exit always returns to the
* interrupted context. So in that setup we need to
* explicitly yield in the idle thread otherwise
* nothing else will run once it starts.
*/
if (_kernel.ready_q.cache != _current) {
z_swap_unlocked();
}
# endif /* !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) */
#endif /* !defined(CONFIG_PREEMPT_ENABLED) */
}
}
void __weak arch_spin_relax(void)
{
__ASSERT(!arch_irq_unlocked(arch_irq_lock()),
"this is meant to be called with IRQs disabled");
arch_nop();
}
``` | /content/code_sandbox/kernel/idle.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 781 |
```c
/*
*
*/
#include <zephyr/types.h>
#include <zephyr/version.h> /* generated by MAKE, at compile time */
/**
* @brief Return the kernel version of the present build
*
* The kernel version is a four-byte value, whose format is described in the
* file "kernel_version.h".
*
* @return kernel version
*/
uint32_t sys_kernel_version_get(void)
{
return KERNELVERSION;
}
``` | /content/code_sandbox/kernel/version.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 90 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <kthread.h>
struct k_spinlock z_thread_monitor_lock;
/*
* Remove a thread from the kernel's list of active threads.
*/
void z_thread_monitor_exit(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
if (thread == _kernel.threads) {
_kernel.threads = _kernel.threads->next_thread;
} else {
struct k_thread *prev_thread;
prev_thread = _kernel.threads;
while ((prev_thread != NULL) &&
(thread != prev_thread->next_thread)) {
prev_thread = prev_thread->next_thread;
}
if (prev_thread != NULL) {
prev_thread->next_thread = thread->next_thread;
}
}
k_spin_unlock(&z_thread_monitor_lock, key);
}
void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
{
struct k_thread *thread;
k_spinlock_key_t key;
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
/*
* Lock is needed to make sure that the _kernel.threads is not being
* modified by the user_cb either directly or indirectly.
* The indirect ways are through calling k_thread_create and
* k_thread_abort from user_cb.
*/
key = k_spin_lock(&z_thread_monitor_lock);
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
user_cb(thread, user_data);
}
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
k_spin_unlock(&z_thread_monitor_lock, key);
}
void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
{
struct k_thread *thread;
k_spinlock_key_t key;
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
key = k_spin_lock(&z_thread_monitor_lock);
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
k_spin_unlock(&z_thread_monitor_lock, key);
user_cb(thread, user_data);
key = k_spin_lock(&z_thread_monitor_lock);
}
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
k_spin_unlock(&z_thread_monitor_lock, key);
}
#ifdef CONFIG_SMP
void k_thread_foreach_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb,
void *user_data)
{
struct k_thread *thread;
k_spinlock_key_t key;
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
__ASSERT(cpu < CONFIG_MP_MAX_NUM_CPUS, "cpu filter out of bounds");
/*
* Lock is needed to make sure that the _kernel.threads is not being
* modified by the user_cb either directly or indirectly.
* The indirect ways are through calling k_thread_create and
* k_thread_abort from user_cb.
*/
key = k_spin_lock(&z_thread_monitor_lock);
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
if (thread->base.cpu == cpu)
user_cb(thread, user_data);
}
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
k_spin_unlock(&z_thread_monitor_lock, key);
}
void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb,
void *user_data)
{
struct k_thread *thread;
k_spinlock_key_t key;
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
__ASSERT(cpu < CONFIG_MP_MAX_NUM_CPUS, "cpu filter out of bounds");
key = k_spin_lock(&z_thread_monitor_lock);
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
if (thread->base.cpu == cpu) {
k_spin_unlock(&z_thread_monitor_lock, key);
user_cb(thread, user_data);
key = k_spin_lock(&z_thread_monitor_lock);
}
}
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
k_spin_unlock(&z_thread_monitor_lock, key);
}
#endif /* CONFIG_SMP */
``` | /content/code_sandbox/kernel/thread_monitor.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 943 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <kernel_internal.h>
#include <zephyr/linker/linker-defs.h>
#ifdef CONFIG_STACK_CANARIES
#ifdef CONFIG_STACK_CANARIES_TLS
extern __thread volatile uintptr_t __stack_chk_guard;
#else
extern volatile uintptr_t __stack_chk_guard;
#endif /* CONFIG_STACK_CANARIES_TLS */
#endif /* CONFIG_STACK_CANARIES */
/**
* @brief Copy the data section from ROM to RAM
*
* This routine copies the data section from ROM to RAM.
*/
void z_data_copy(void)
{
z_early_memcpy(&__data_region_start, &__data_region_load_start,
__data_region_end - __data_region_start);
#ifdef CONFIG_ARCH_HAS_RAMFUNC_SUPPORT
z_early_memcpy(&__ramfunc_start, &__ramfunc_load_start,
(uintptr_t) &__ramfunc_size);
#endif /* CONFIG_ARCH_HAS_RAMFUNC_SUPPORT */
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ccm), okay)
z_early_memcpy(&__ccm_data_start, &__ccm_data_rom_start,
__ccm_data_end - __ccm_data_start);
#endif
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_itcm), okay)
z_early_memcpy(&__itcm_start, &__itcm_load_start,
(uintptr_t) &__itcm_size);
#endif
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
z_early_memcpy(&__dtcm_data_start, &__dtcm_data_load_start,
__dtcm_data_end - __dtcm_data_start);
#endif
#ifdef CONFIG_CODE_DATA_RELOCATION
extern void data_copy_xip_relocation(void);
data_copy_xip_relocation();
#endif /* CONFIG_CODE_DATA_RELOCATION */
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_STACK_CANARIES
/* stack canary checking is active for all C functions.
* __stack_chk_guard is some uninitialized value living in the
* app shared memory sections. Preserve it, and don't make any
* function calls to perform the memory copy. The true canary
* value gets set later in z_cstart().
*/
uintptr_t guard_copy = __stack_chk_guard;
uint8_t *src = (uint8_t *)&_app_smem_rom_start;
uint8_t *dst = (uint8_t *)&_app_smem_start;
uint32_t count = _app_smem_end - _app_smem_start;
guard_copy = __stack_chk_guard;
while (count > 0) {
*(dst++) = *(src++);
count--;
}
__stack_chk_guard = guard_copy;
#else
z_early_memcpy(&_app_smem_start, &_app_smem_rom_start,
_app_smem_end - _app_smem_start);
#endif /* CONFIG_STACK_CANARIES */
#endif /* CONFIG_USERSPACE */
}
``` | /content/code_sandbox/kernel/xip.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 607 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <ksched.h>
#include <zephyr/spinlock.h>
#include <wait_q.h>
#include <kthread.h>
#include <priority_q.h>
#include <kswap.h>
#include <ipi.h>
#include <kernel_arch_func.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/drivers/timer/system_timer.h>
#include <stdbool.h>
#include <kernel_internal.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/sys/math_extras.h>
#include <zephyr/timing/timing.h>
#include <zephyr/sys/util.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_TIMESLICING)
extern struct k_thread *pending_current;
#endif
struct k_spinlock _sched_spinlock;
/* Storage to "complete" the context switch from an invalid/incomplete thread
* context (ex: exiting an ISR that aborted _current)
*/
__incoherent struct k_thread _thread_dummy;
static void update_cache(int preempt_ok);
static void halt_thread(struct k_thread *thread, uint8_t new_state);
static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES,
"You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as "
"CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative "
"threads.");
/*
* Return value same as e.g. memcmp
* > 0 -> thread 1 priority > thread 2 priority
* = 0 -> thread 1 priority == thread 2 priority
* < 0 -> thread 1 priority < thread 2 priority
* Do not rely on the actual value returned aside from the above.
* (Again, like memcmp.)
*/
int32_t z_sched_prio_cmp(struct k_thread *thread_1,
struct k_thread *thread_2)
{
/* `prio` is <32b, so the below cannot overflow. */
int32_t b1 = thread_1->base.prio;
int32_t b2 = thread_2->base.prio;
if (b1 != b2) {
return b2 - b1;
}
#ifdef CONFIG_SCHED_DEADLINE
/* If we assume all deadlines live within the same "half" of
* the 32 bit modulus space (this is a documented API rule),
* then the latest deadline in the queue minus the earliest is
* guaranteed to be (2's complement) non-negative. We can
* leverage that to compare the values without having to check
* the current time.
*/
uint32_t d1 = thread_1->base.prio_deadline;
uint32_t d2 = thread_2->base.prio_deadline;
if (d1 != d2) {
/* Sooner deadline means higher effective priority.
* Doing the calculation with unsigned types and casting
* to signed isn't perfect, but at least reduces this
* from UB on overflow to impdef.
*/
return (int32_t) (d2 - d1);
}
#endif /* CONFIG_SCHED_DEADLINE */
return 0;
}
static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
{
#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
int cpu, m = thread->base.cpu_mask;
/* Edge case: it's legal per the API to "make runnable" a
* thread with all CPUs masked off (i.e. one that isn't
* actually runnable!). Sort of a wart in the API and maybe
* we should address this in docs/assertions instead to avoid
* the extra test.
*/
cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
return &_kernel.cpus[cpu].ready_q.runq;
#else
ARG_UNUSED(thread);
return &_kernel.ready_q.runq;
#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
}
static ALWAYS_INLINE void *curr_cpu_runq(void)
{
#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
return &arch_curr_cpu()->ready_q.runq;
#else
return &_kernel.ready_q.runq;
#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
}
static ALWAYS_INLINE void runq_add(struct k_thread *thread)
{
__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
_priq_run_add(thread_runq(thread), thread);
}
static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
{
__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
_priq_run_remove(thread_runq(thread), thread);
}
static ALWAYS_INLINE struct k_thread *runq_best(void)
{
return _priq_run_best(curr_cpu_runq());
}
/* _current is never in the run queue until context switch on
* SMP configurations, see z_requeue_current()
*/
static inline bool should_queue_thread(struct k_thread *thread)
{
return !IS_ENABLED(CONFIG_SMP) || (thread != _current);
}
static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_QUEUED;
if (should_queue_thread(thread)) {
runq_add(thread);
}
#ifdef CONFIG_SMP
if (thread == _current) {
/* add current to end of queue means "yield" */
_current_cpu->swap_ok = true;
}
#endif /* CONFIG_SMP */
}
static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_QUEUED;
if (should_queue_thread(thread)) {
runq_remove(thread);
}
}
/* Called out of z_swap() when CONFIG_SMP. The current thread can
* never live in the run queue until we are inexorably on the context
* switch path on SMP, otherwise there is a deadlock condition where a
* set of CPUs pick a cycle of threads to run and wait for them all to
* context switch forever.
*/
void z_requeue_current(struct k_thread *thread)
{
if (z_is_thread_queued(thread)) {
runq_add(thread);
}
signal_pending_ipi();
}
/* Return true if the thread is aborting, else false */
static inline bool is_aborting(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
}
/* Return true if the thread is aborting or suspending, else false */
static inline bool is_halting(struct k_thread *thread)
{
return (thread->base.thread_state &
(_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
}
/* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
static inline void clear_halting(struct k_thread *thread)
{
barrier_dmem_fence_full(); /* Other cpus spin on this locklessly! */
thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING);
}
static ALWAYS_INLINE struct k_thread *next_up(void)
{
#ifdef CONFIG_SMP
if (is_halting(_current)) {
halt_thread(_current, is_aborting(_current) ?
_THREAD_DEAD : _THREAD_SUSPENDED);
}
#endif /* CONFIG_SMP */
struct k_thread *thread = runq_best();
#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
/* MetaIRQs must always attempt to return back to a
* cooperative thread they preempted and not whatever happens
* to be highest priority now. The cooperative thread was
* promised it wouldn't be preempted (by non-metairq threads)!
*/
struct k_thread *mirqp = _current_cpu->metairq_preempted;
if (mirqp != NULL && (thread == NULL || !thread_is_metairq(thread))) {
if (!z_is_thread_prevented_from_running(mirqp)) {
thread = mirqp;
} else {
_current_cpu->metairq_preempted = NULL;
}
}
#endif
/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
* CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
*/
#ifndef CONFIG_SMP
/* In uniprocessor mode, we can leave the current thread in
* the queue (actually we have to, otherwise the assembly
* context switch code for all architectures would be
* responsible for putting it back in z_swap and ISR return!),
* which makes this choice simple.
*/
return (thread != NULL) ? thread : _current_cpu->idle_thread;
#else
/* Under SMP, the "cache" mechanism for selecting the next
* thread doesn't work, so we have more work to do to test
* _current against the best choice from the queue. Here, the
* thread selected above represents "the best thread that is
* not current".
*
* Subtle note on "queued": in SMP mode, _current does not
* live in the queue, so this isn't exactly the same thing as
* "ready", it means "is _current already added back to the
* queue such that we don't want to re-add it".
*/
bool queued = z_is_thread_queued(_current);
bool active = !z_is_thread_prevented_from_running(_current);
if (thread == NULL) {
thread = _current_cpu->idle_thread;
}
if (active) {
int32_t cmp = z_sched_prio_cmp(_current, thread);
/* Ties only switch if state says we yielded */
if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
thread = _current;
}
if (!should_preempt(thread, _current_cpu->swap_ok)) {
thread = _current;
}
}
/* Put _current back into the queue */
if ((thread != _current) && active &&
!z_is_idle_thread_object(_current) && !queued) {
queue_thread(_current);
}
/* Take the new _current out of the queue */
if (z_is_thread_queued(thread)) {
dequeue_thread(thread);
}
_current_cpu->swap_ok = false;
return thread;
#endif /* CONFIG_SMP */
}
void move_thread_to_end_of_prio_q(struct k_thread *thread)
{
if (z_is_thread_queued(thread)) {
dequeue_thread(thread);
}
queue_thread(thread);
update_cache(thread == _current);
}
/* Track cooperative threads preempted by metairqs so we can return to
* them specifically. Called at the moment a new thread has been
* selected to run.
*/
static void update_metairq_preempt(struct k_thread *thread)
{
#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
if (thread_is_metairq(thread) && !thread_is_metairq(_current) &&
!thread_is_preemptible(_current)) {
/* Record new preemption */
_current_cpu->metairq_preempted = _current;
} else if (!thread_is_metairq(thread) && !z_is_idle_thread_object(thread)) {
/* Returning from existing preemption */
_current_cpu->metairq_preempted = NULL;
}
#else
ARG_UNUSED(thread);
#endif
/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
* CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
*/
}
static void update_cache(int preempt_ok)
{
#ifndef CONFIG_SMP
struct k_thread *thread = next_up();
if (should_preempt(thread, preempt_ok)) {
#ifdef CONFIG_TIMESLICING
if (thread != _current) {
z_reset_time_slice(thread);
}
#endif /* CONFIG_TIMESLICING */
update_metairq_preempt(thread);
_kernel.ready_q.cache = thread;
} else {
_kernel.ready_q.cache = _current;
}
#else
/* The way this works is that the CPU record keeps its
* "cooperative swapping is OK" flag until the next reschedule
* call or context switch. It doesn't need to be tracked per
* thread because if the thread gets preempted for whatever
* reason the scheduler will make the same decision anyway.
*/
_current_cpu->swap_ok = preempt_ok;
#endif /* CONFIG_SMP */
}
static struct _cpu *thread_active_elsewhere(struct k_thread *thread)
{
/* Returns pointer to _cpu if the thread is currently running on
* another CPU. There are more scalable designs to answer this
* question in constant time, but this is fine for now.
*/
#ifdef CONFIG_SMP
int currcpu = _current_cpu->id;
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
if ((i != currcpu) &&
(_kernel.cpus[i].current == thread)) {
return &_kernel.cpus[i];
}
}
#endif /* CONFIG_SMP */
ARG_UNUSED(thread);
return NULL;
}
static void ready_thread(struct k_thread *thread)
{
#ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(thread));
#endif /* CONFIG_KERNEL_COHERENCE */
/* If thread is queued already, do not try and added it to the
* run queue again
*/
if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
queue_thread(thread);
update_cache(0);
flag_ipi(ipi_mask_create(thread));
}
}
void z_ready_thread_locked(struct k_thread *thread)
{
if (thread_active_elsewhere(thread) == NULL) {
ready_thread(thread);
}
}
void z_ready_thread(struct k_thread *thread)
{
K_SPINLOCK(&_sched_spinlock) {
if (thread_active_elsewhere(thread) == NULL) {
ready_thread(thread);
}
}
}
void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
{
K_SPINLOCK(&_sched_spinlock) {
move_thread_to_end_of_prio_q(thread);
}
}
void z_sched_start(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
if (z_has_thread_started(thread)) {
k_spin_unlock(&_sched_spinlock, key);
return;
}
z_mark_thread_as_started(thread);
ready_thread(thread);
z_reschedule(&_sched_spinlock, key);
}
/* Spins in ISR context, waiting for a thread known to be running on
* another CPU to catch the IPI we sent and halt. Note that we check
* for ourselves being asynchronously halted first to prevent simple
* deadlocks (but not complex ones involving cycles of 3+ threads!).
* Acts to release the provided lock before returning.
*/
static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
{
if (is_halting(_current)) {
halt_thread(_current,
is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
}
k_spin_unlock(&_sched_spinlock, key);
while (is_halting(thread)) {
unsigned int k = arch_irq_lock();
arch_spin_relax(); /* Requires interrupts be masked */
arch_irq_unlock(k);
}
}
/* Shared handler for k_thread_{suspend,abort}(). Called with the
* scheduler lock held and the key passed (which it may
* release/reacquire!) which will be released before a possible return
* (aborting _current will not return, obviously), which may be after
* a context switch.
*/
static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
bool terminate)
{
_wait_q_t *wq = &thread->join_queue;
#ifdef CONFIG_SMP
wq = terminate ? wq : &thread->halt_queue;
#endif
/* If the target is a thread running on another CPU, flag and
* poke (note that we might spin to wait, so a true
* synchronous IPI is needed here, not deferred!), it will
* halt itself in the IPI. Otherwise it's unscheduled, so we
* can clean it up directly.
*/
struct _cpu *cpu = thread_active_elsewhere(thread);
if (cpu != NULL) {
thread->base.thread_state |= (terminate ? _THREAD_ABORTING
: _THREAD_SUSPENDING);
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
#ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
arch_sched_directed_ipi(IPI_CPU_MASK(cpu->id));
#else
arch_sched_broadcast_ipi();
#endif
#endif
if (arch_is_in_isr()) {
thread_halt_spin(thread, key);
} else {
add_to_waitq_locked(_current, wq);
z_swap(&_sched_spinlock, key);
}
} else {
halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
if ((thread == _current) && !arch_is_in_isr()) {
z_swap(&_sched_spinlock, key);
__ASSERT(!terminate, "aborted _current back from dead");
} else {
k_spin_unlock(&_sched_spinlock, key);
}
}
/* NOTE: the scheduler lock has been released. Don't put
* logic here, it's likely to be racy/deadlocky even if you
* re-take the lock!
*/
}
void z_impl_k_thread_suspend(k_tid_t thread)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
(void)z_abort_thread_timeout(thread);
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) {
/* The target thread is already suspended. Nothing to do. */
k_spin_unlock(&_sched_spinlock, key);
return;
}
z_thread_halt(thread, key, false);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_thread_suspend(k_tid_t thread)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
z_impl_k_thread_suspend(thread);
}
#include <zephyr/syscalls/k_thread_suspend_mrsh.c>
#endif /* CONFIG_USERSPACE */
void z_impl_k_thread_resume(k_tid_t thread)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
/* Do not try to resume a thread that was not suspended */
if (!z_is_thread_suspended(thread)) {
k_spin_unlock(&_sched_spinlock, key);
return;
}
z_mark_thread_as_not_suspended(thread);
ready_thread(thread);
z_reschedule(&_sched_spinlock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_thread_resume(k_tid_t thread)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
z_impl_k_thread_resume(thread);
}
#include <zephyr/syscalls/k_thread_resume_mrsh.c>
#endif /* CONFIG_USERSPACE */
static _wait_q_t *pended_on_thread(struct k_thread *thread)
{
__ASSERT_NO_MSG(thread->base.pended_on);
return thread->base.pended_on;
}
static void unready_thread(struct k_thread *thread)
{
if (z_is_thread_queued(thread)) {
dequeue_thread(thread);
}
update_cache(thread == _current);
}
/* _sched_spinlock must be held */
static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
{
unready_thread(thread);
z_mark_thread_as_pending(thread);
SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
if (wait_q != NULL) {
thread->base.pended_on = wait_q;
_priq_wait_add(&wait_q->waitq, thread);
}
}
static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
{
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
z_add_thread_timeout(thread, timeout);
}
}
static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
k_timeout_t timeout)
{
#ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
#endif /* CONFIG_KERNEL_COHERENCE */
add_to_waitq_locked(thread, wait_q);
add_thread_timeout(thread, timeout);
}
void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
k_timeout_t timeout)
{
__ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
K_SPINLOCK(&_sched_spinlock) {
pend_locked(thread, wait_q, timeout);
}
}
static inline void unpend_thread_no_timeout(struct k_thread *thread)
{
_priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
z_mark_thread_as_not_pending(thread);
thread->base.pended_on = NULL;
}
void z_unpend_thread_no_timeout(struct k_thread *thread)
{
K_SPINLOCK(&_sched_spinlock) {
if (thread->base.pended_on != NULL) {
unpend_thread_no_timeout(thread);
}
}
}
void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
{
K_SPINLOCK(&_sched_spinlock) {
bool killed = (thread->base.thread_state &
(_THREAD_DEAD | _THREAD_ABORTING));
#ifdef CONFIG_EVENTS
bool do_nothing = thread->no_wake_on_timeout && is_timeout;
thread->no_wake_on_timeout = false;
if (do_nothing) {
continue;
}
#endif /* CONFIG_EVENTS */
if (!killed) {
/* The thread is not being killed */
if (thread->base.pended_on != NULL) {
unpend_thread_no_timeout(thread);
}
z_mark_thread_as_started(thread);
if (is_timeout) {
z_mark_thread_as_not_suspended(thread);
}
ready_thread(thread);
}
}
}
#ifdef CONFIG_SYS_CLOCK_EXISTS
/* Timeout handler for *_thread_timeout() APIs */
void z_thread_timeout(struct _timeout *timeout)
{
struct k_thread *thread = CONTAINER_OF(timeout,
struct k_thread, base.timeout);
z_sched_wake_thread(thread, true);
}
#endif /* CONFIG_SYS_CLOCK_EXISTS */
int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t *wait_q, k_timeout_t timeout)
{
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = _current;
#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
__ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
/* We do a "lock swap" prior to calling z_swap(), such that
* the caller's lock gets released as desired. But we ensure
* that we hold the scheduler lock and leave local interrupts
* masked until we reach the context switch. z_swap() itself
* has similar code; the duplication is because it's a legacy
* API that doesn't expect to be called with scheduler lock
* held.
*/
(void) k_spin_lock(&_sched_spinlock);
pend_locked(_current, wait_q, timeout);
k_spin_release(lock);
return z_swap(&_sched_spinlock, key);
}
struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
{
struct k_thread *thread = NULL;
K_SPINLOCK(&_sched_spinlock) {
thread = _priq_wait_best(&wait_q->waitq);
if (thread != NULL) {
unpend_thread_no_timeout(thread);
}
}
return thread;
}
struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
{
struct k_thread *thread = NULL;
K_SPINLOCK(&_sched_spinlock) {
thread = _priq_wait_best(&wait_q->waitq);
if (thread != NULL) {
unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
}
}
return thread;
}
void z_unpend_thread(struct k_thread *thread)
{
z_unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
}
/* Priority set utility that does no rescheduling, it just changes the
* run queue state, returning true if a reschedule is needed later.
*/
bool z_thread_prio_set(struct k_thread *thread, int prio)
{
bool need_sched = 0;
int old_prio = thread->base.prio;
K_SPINLOCK(&_sched_spinlock) {
need_sched = z_is_thread_ready(thread);
if (need_sched) {
if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
dequeue_thread(thread);
thread->base.prio = prio;
queue_thread(thread);
if (old_prio > prio) {
flag_ipi(ipi_mask_create(thread));
}
} else {
/*
* This is a running thread on SMP. Update its
* priority, but do not requeue it. An IPI is
* needed if the priority is both being lowered
* and it is running on another CPU.
*/
thread->base.prio = prio;
struct _cpu *cpu;
cpu = thread_active_elsewhere(thread);
if ((cpu != NULL) && (old_prio < prio)) {
flag_ipi(IPI_CPU_MASK(cpu->id));
}
}
update_cache(1);
} else {
thread->base.prio = prio;
}
}
SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
return need_sched;
}
static inline bool resched(uint32_t key)
{
#ifdef CONFIG_SMP
_current_cpu->swap_ok = 0;
#endif /* CONFIG_SMP */
return arch_irq_unlocked(key) && !arch_is_in_isr();
}
/*
* Check if the next ready thread is the same as the current thread
* and save the trip if true.
*/
static inline bool need_swap(void)
{
/* the SMP case will be handled in C based z_swap() */
#ifdef CONFIG_SMP
return true;
#else
struct k_thread *new_thread;
/* Check if the next ready thread is the same as the current thread */
new_thread = _kernel.ready_q.cache;
return new_thread != _current;
#endif /* CONFIG_SMP */
}
void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
{
if (resched(key.key) && need_swap()) {
z_swap(lock, key);
} else {
k_spin_unlock(lock, key);
signal_pending_ipi();
}
}
void z_reschedule_irqlock(uint32_t key)
{
if (resched(key) && need_swap()) {
z_swap_irqlock(key);
} else {
irq_unlock(key);
signal_pending_ipi();
}
}
void k_sched_lock(void)
{
K_SPINLOCK(&_sched_spinlock) {
SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
z_sched_lock();
}
}
void k_sched_unlock(void)
{
K_SPINLOCK(&_sched_spinlock) {
__ASSERT(_current->base.sched_locked != 0U, "");
__ASSERT(!arch_is_in_isr(), "");
++_current->base.sched_locked;
update_cache(0);
}
LOG_DBG("scheduler unlocked (%p:%d)",
_current, _current->base.sched_locked);
SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
z_reschedule_unlocked();
}
struct k_thread *z_swap_next_thread(void)
{
#ifdef CONFIG_SMP
struct k_thread *ret = next_up();
if (ret == _current) {
/* When not swapping, have to signal IPIs here. In
* the context switch case it must happen later, after
* _current gets requeued.
*/
signal_pending_ipi();
}
return ret;
#else
return _kernel.ready_q.cache;
#endif /* CONFIG_SMP */
}
#ifdef CONFIG_USE_SWITCH
/* Just a wrapper around _current = xxx with tracing */
static inline void set_current(struct k_thread *new_thread)
{
z_thread_mark_switched_out();
_current_cpu->current = new_thread;
}
/**
* @brief Determine next thread to execute upon completion of an interrupt
*
* Thread preemption is performed by context switching after the completion
* of a non-recursed interrupt. This function determines which thread to
* switch to if any. This function accepts as @p interrupted either:
*
* - The handle for the interrupted thread in which case the thread's context
* must already be fully saved and ready to be picked up by a different CPU.
*
* - NULL if more work is required to fully save the thread's state after
* it is known that a new thread is to be scheduled. It is up to the caller
* to store the handle resulting from the thread that is being switched out
* in that thread's "switch_handle" field after its
* context has fully been saved, following the same requirements as with
* the @ref arch_switch() function.
*
* If a new thread needs to be scheduled then its handle is returned.
* Otherwise the same value provided as @p interrupted is returned back.
* Those handles are the same opaque types used by the @ref arch_switch()
* function.
*
* @warning
* The @ref _current value may have changed after this call and not refer
* to the interrupted thread anymore. It might be necessary to make a local
* copy before calling this function.
*
* @param interrupted Handle for the thread that was interrupted or NULL.
* @retval Handle for the next thread to execute, or @p interrupted when
* no new thread is to be scheduled.
*/
void *z_get_next_switch_handle(void *interrupted)
{
z_check_stack_sentinel();
#ifdef CONFIG_SMP
void *ret = NULL;
K_SPINLOCK(&_sched_spinlock) {
struct k_thread *old_thread = _current, *new_thread;
if (IS_ENABLED(CONFIG_SMP)) {
old_thread->switch_handle = NULL;
}
new_thread = next_up();
z_sched_usage_switch(new_thread);
if (old_thread != new_thread) {
uint8_t cpu_id;
update_metairq_preempt(new_thread);
z_sched_switch_spin(new_thread);
arch_cohere_stacks(old_thread, interrupted, new_thread);
_current_cpu->swap_ok = 0;
cpu_id = arch_curr_cpu()->id;
new_thread->base.cpu = cpu_id;
set_current(new_thread);
#ifdef CONFIG_TIMESLICING
z_reset_time_slice(new_thread);
#endif /* CONFIG_TIMESLICING */
#ifdef CONFIG_SPIN_VALIDATE
/* Changed _current! Update the spinlock
* bookkeeping so the validation doesn't get
* confused when the "wrong" thread tries to
* release the lock.
*/
z_spin_lock_set_owner(&_sched_spinlock);
#endif /* CONFIG_SPIN_VALIDATE */
/* A queued (runnable) old/current thread
* needs to be added back to the run queue
* here, and atomically with its switch handle
* being set below. This is safe now, as we
* will not return into it.
*/
if (z_is_thread_queued(old_thread)) {
#ifdef CONFIG_SCHED_IPI_CASCADE
if ((new_thread->base.cpu_mask != -1) &&
(old_thread->base.cpu_mask != BIT(cpu_id))) {
flag_ipi(ipi_mask_create(old_thread));
}
#endif
runq_add(old_thread);
}
}
old_thread->switch_handle = interrupted;
ret = new_thread->switch_handle;
if (IS_ENABLED(CONFIG_SMP)) {
/* Active threads MUST have a null here */
new_thread->switch_handle = NULL;
}
}
signal_pending_ipi();
return ret;
#else
z_sched_usage_switch(_kernel.ready_q.cache);
_current->switch_handle = interrupted;
set_current(_kernel.ready_q.cache);
return _current->switch_handle;
#endif /* CONFIG_SMP */
}
#endif /* CONFIG_USE_SWITCH */
int z_unpend_all(_wait_q_t *wait_q)
{
int need_sched = 0;
struct k_thread *thread;
for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
z_unpend_thread(thread);
z_ready_thread(thread);
need_sched = 1;
}
return need_sched;
}
void init_ready_q(struct _ready_q *ready_q)
{
#if defined(CONFIG_SCHED_SCALABLE)
ready_q->runq = (struct _priq_rb) {
.tree = {
.lessthan_fn = z_priq_rb_lessthan,
}
};
#elif defined(CONFIG_SCHED_MULTIQ)
for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
sys_dlist_init(&ready_q->runq.queues[i]);
}
#else
sys_dlist_init(&ready_q->runq);
#endif
}
void z_sched_init(void)
{
#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
init_ready_q(&_kernel.cpus[i].ready_q);
}
#else
init_ready_q(&_kernel.ready_q);
#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
}
void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
{
/*
* Use NULL, since we cannot know what the entry point is (we do not
* keep track of it) and idle cannot change its priority.
*/
Z_ASSERT_VALID_PRIO(prio, NULL);
bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
if ((need_sched) && (IS_ENABLED(CONFIG_SMP) ||
(_current->base.sched_locked == 0U))) {
z_reschedule_unlocked();
}
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
K_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
"invalid thread priority %d", prio));
#ifndef CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY
K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
"thread priority may only be downgraded (%d < %d)",
prio, thread->base.prio));
#endif /* CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY */
z_impl_k_thread_priority_set(thread, prio);
}
#include <zephyr/syscalls/k_thread_priority_set_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_SCHED_DEADLINE
void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
{
deadline = CLAMP(deadline, 0, INT_MAX);
struct k_thread *thread = tid;
int32_t newdl = k_cycle_get_32() + deadline;
/* The prio_deadline field changes the sorting order, so can't
* change it while the thread is in the run queue (dlists
* actually are benign as long as we requeue it before we
* release the lock, but an rbtree will blow up if we break
* sorting!)
*/
K_SPINLOCK(&_sched_spinlock) {
if (z_is_thread_queued(thread)) {
dequeue_thread(thread);
thread->base.prio_deadline = newdl;
queue_thread(thread);
} else {
thread->base.prio_deadline = newdl;
}
}
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
{
struct k_thread *thread = tid;
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
K_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0,
"invalid thread deadline %d",
(int)deadline));
z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
}
#include <zephyr/syscalls/k_thread_deadline_set_mrsh.c>
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_SCHED_DEADLINE */
bool k_can_yield(void)
{
return !(k_is_pre_kernel() || k_is_in_isr() ||
z_is_idle_thread_object(_current));
}
void z_impl_k_yield(void)
{
__ASSERT(!arch_is_in_isr(), "");
SYS_PORT_TRACING_FUNC(k_thread, yield);
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
if (!IS_ENABLED(CONFIG_SMP) ||
z_is_thread_queued(_current)) {
dequeue_thread(_current);
}
queue_thread(_current);
update_cache(1);
z_swap(&_sched_spinlock, key);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_yield(void)
{
z_impl_k_yield();
}
#include <zephyr/syscalls/k_yield_mrsh.c>
#endif /* CONFIG_USERSPACE */
static int32_t z_tick_sleep(k_ticks_t ticks)
{
uint32_t expected_wakeup_ticks;
__ASSERT(!arch_is_in_isr(), "");
LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks);
/* wait of 0 ms is treated as a 'yield' */
if (ticks == 0) {
k_yield();
return 0;
}
if (Z_TICK_ABS(ticks) <= 0) {
expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
} else {
expected_wakeup_ticks = Z_TICK_ABS(ticks);
}
k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = _current;
#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
unready_thread(_current);
z_add_thread_timeout(_current, timeout);
z_mark_thread_as_suspended(_current);
(void)z_swap(&_sched_spinlock, key);
__ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
if (ticks > 0) {
return ticks;
}
return 0;
}
int32_t z_impl_k_sleep(k_timeout_t timeout)
{
k_ticks_t ticks;
__ASSERT(!arch_is_in_isr(), "");
SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
/* in case of K_FOREVER, we suspend */
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
k_thread_suspend(_current);
SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
return (int32_t) K_TICKS_FOREVER;
}
ticks = timeout.ticks;
ticks = z_tick_sleep(ticks);
int32_t ret = k_ticks_to_ms_ceil64(ticks);
SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
return ret;
}
#ifdef CONFIG_USERSPACE
static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
{
return z_impl_k_sleep(timeout);
}
#include <zephyr/syscalls/k_sleep_mrsh.c>
#endif /* CONFIG_USERSPACE */
int32_t z_impl_k_usleep(int32_t us)
{
int32_t ticks;
SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
ticks = k_us_to_ticks_ceil64(us);
ticks = z_tick_sleep(ticks);
int32_t ret = k_ticks_to_us_ceil64(ticks);
SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, ret);
return ret;
}
#ifdef CONFIG_USERSPACE
static inline int32_t z_vrfy_k_usleep(int32_t us)
{
return z_impl_k_usleep(us);
}
#include <zephyr/syscalls/k_usleep_mrsh.c>
#endif /* CONFIG_USERSPACE */
void z_impl_k_wakeup(k_tid_t thread)
{
SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
if (z_is_thread_pending(thread)) {
return;
}
if (z_abort_thread_timeout(thread) < 0) {
/* Might have just been sleeping forever */
if (thread->base.thread_state != _THREAD_SUSPENDED) {
return;
}
}
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
z_mark_thread_as_not_suspended(thread);
if (thread_active_elsewhere(thread) == NULL) {
ready_thread(thread);
}
if (arch_is_in_isr()) {
k_spin_unlock(&_sched_spinlock, key);
} else {
z_reschedule(&_sched_spinlock, key);
}
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_wakeup(k_tid_t thread)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
z_impl_k_wakeup(thread);
}
#include <zephyr/syscalls/k_wakeup_mrsh.c>
#endif /* CONFIG_USERSPACE */
k_tid_t z_impl_k_sched_current_thread_query(void)
{
#ifdef CONFIG_SMP
/* In SMP, _current is a field read from _current_cpu, which
* can race with preemption before it is read. We must lock
* local interrupts when reading it.
*/
unsigned int k = arch_irq_lock();
#endif /* CONFIG_SMP */
k_tid_t ret = _current_cpu->current;
#ifdef CONFIG_SMP
arch_irq_unlock(k);
#endif /* CONFIG_SMP */
return ret;
}
#ifdef CONFIG_USERSPACE
static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
{
return z_impl_k_sched_current_thread_query();
}
#include <zephyr/syscalls/k_sched_current_thread_query_mrsh.c>
#endif /* CONFIG_USERSPACE */
static inline void unpend_all(_wait_q_t *wait_q)
{
struct k_thread *thread;
for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
arch_thread_return_value_set(thread, 0);
ready_thread(thread);
}
}
#ifdef CONFIG_THREAD_ABORT_HOOK
extern void thread_abort_hook(struct k_thread *thread);
#endif /* CONFIG_THREAD_ABORT_HOOK */
/**
* @brief Dequeues the specified thread
*
* Dequeues the specified thread and move it into the specified new state.
*
* @param thread Identify the thread to halt
* @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED)
*/
static void halt_thread(struct k_thread *thread, uint8_t new_state)
{
bool dummify = false;
/* We hold the lock, and the thread is known not to be running
* anywhere.
*/
if ((thread->base.thread_state & new_state) == 0U) {
thread->base.thread_state |= new_state;
if (z_is_thread_queued(thread)) {
dequeue_thread(thread);
}
if (new_state == _THREAD_DEAD) {
if (thread->base.pended_on != NULL) {
unpend_thread_no_timeout(thread);
}
(void)z_abort_thread_timeout(thread);
unpend_all(&thread->join_queue);
/* Edge case: aborting _current from within an
* ISR that preempted it requires clearing the
* _current pointer so the upcoming context
* switch doesn't clobber the now-freed
* memory
*/
if (thread == _current && arch_is_in_isr()) {
dummify = true;
}
}
#ifdef CONFIG_SMP
unpend_all(&thread->halt_queue);
#endif /* CONFIG_SMP */
update_cache(1);
if (new_state == _THREAD_SUSPENDED) {
clear_halting(thread);
return;
}
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_float_disable(thread);
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
z_thread_monitor_exit(thread);
#ifdef CONFIG_THREAD_ABORT_HOOK
thread_abort_hook(thread);
#endif /* CONFIG_THREAD_ABORT_HOOK */
#ifdef CONFIG_OBJ_CORE_THREAD
#ifdef CONFIG_OBJ_CORE_STATS_THREAD
k_obj_core_stats_deregister(K_OBJ_CORE(thread));
#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
k_obj_core_unlink(K_OBJ_CORE(thread));
#endif /* CONFIG_OBJ_CORE_THREAD */
#ifdef CONFIG_USERSPACE
z_mem_domain_exit_thread(thread);
k_thread_perms_all_clear(thread);
k_object_uninit(thread->stack_obj);
k_object_uninit(thread);
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
k_thread_abort_cleanup(thread);
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
/* Do this "set _current to dummy" step last so that
* subsystems above can rely on _current being
* unchanged. Disabled for posix as that arch
* continues to use the _current pointer in its swap
* code. Note that we must leave a non-null switch
* handle for any threads spinning in join() (this can
* never be used, as our thread is flagged dead, but
* it must not be NULL otherwise join can deadlock).
*/
if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
#ifdef CONFIG_USE_SWITCH
_current->switch_handle = _current;
#endif
z_dummy_thread_init(&_thread_dummy);
}
/* Finally update the halting thread state, on which
* other CPUs might be spinning (see
* thread_halt_spin()).
*/
clear_halting(thread);
}
}
void z_thread_abort(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
if (z_is_thread_essential(thread)) {
k_spin_unlock(&_sched_spinlock, key);
__ASSERT(false, "aborting essential thread %p", thread);
k_panic();
return;
}
if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
k_spin_unlock(&_sched_spinlock, key);
return;
}
z_thread_halt(thread, key, true);
}
#if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
void z_impl_k_thread_abort(k_tid_t thread)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
z_thread_abort(thread);
__ASSERT_NO_MSG((thread->base.thread_state & _THREAD_DEAD) != 0);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
}
#endif /* !CONFIG_ARCH_HAS_THREAD_ABORT */
int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
{
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
int ret;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
z_sched_switch_spin(thread);
ret = 0;
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
ret = -EBUSY;
} else if ((thread == _current) ||
(thread->base.pended_on == &_current->join_queue)) {
ret = -EDEADLK;
} else {
__ASSERT(!arch_is_in_isr(), "cannot join in ISR");
add_to_waitq_locked(_current, &thread->join_queue);
add_thread_timeout(_current, timeout);
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
ret = z_swap(&_sched_spinlock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
return ret;
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
k_spin_unlock(&_sched_spinlock, key);
return ret;
}
#ifdef CONFIG_USERSPACE
/* Special case: don't oops if the thread is uninitialized. This is because
* the initialization bit does double-duty for thread objects; if false, means
* the thread object is truly uninitialized, or the thread ran and exited for
* some reason.
*
* Return true in this case indicating we should just do nothing and return
* success to the caller.
*/
static bool thread_obj_validate(struct k_thread *thread)
{
struct k_object *ko = k_object_find(thread);
int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
switch (ret) {
case 0:
return false;
case -EINVAL:
return true;
default:
#ifdef CONFIG_LOG
k_object_dump_error(ret, thread, ko, K_OBJ_THREAD);
#endif /* CONFIG_LOG */
K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied"));
}
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
}
static inline int z_vrfy_k_thread_join(struct k_thread *thread,
k_timeout_t timeout)
{
if (thread_obj_validate(thread)) {
return 0;
}
return z_impl_k_thread_join(thread, timeout);
}
#include <zephyr/syscalls/k_thread_join_mrsh.c>
static inline void z_vrfy_k_thread_abort(k_tid_t thread)
{
if (thread_obj_validate(thread)) {
return;
}
K_OOPS(K_SYSCALL_VERIFY_MSG(!z_is_thread_essential(thread),
"aborting essential thread %p", thread));
z_impl_k_thread_abort((struct k_thread *)thread);
}
#include <zephyr/syscalls/k_thread_abort_mrsh.c>
#endif /* CONFIG_USERSPACE */
/*
* future scheduler.h API implementations
*/
bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
{
struct k_thread *thread;
bool ret = false;
K_SPINLOCK(&_sched_spinlock) {
thread = _priq_wait_best(&wait_q->waitq);
if (thread != NULL) {
z_thread_return_value_set_with_data(thread,
swap_retval,
swap_data);
unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
ready_thread(thread);
ret = true;
}
}
return ret;
}
int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t *wait_q, k_timeout_t timeout, void **data)
{
int ret = z_pend_curr(lock, key, wait_q, timeout);
if (data != NULL) {
*data = _current->base.swap_data;
}
return ret;
}
int z_sched_waitq_walk(_wait_q_t *wait_q,
int (*func)(struct k_thread *, void *), void *data)
{
struct k_thread *thread;
int status = 0;
K_SPINLOCK(&_sched_spinlock) {
_WAIT_Q_FOR_EACH(wait_q, thread) {
/*
* Invoke the callback function on each waiting thread
* for as long as there are both waiting threads AND
* it returns 0.
*/
status = func(thread, data);
if (status != 0) {
break;
}
}
}
return status;
}
``` | /content/code_sandbox/kernel/sched.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 11,137 |
```c
/*
*
*/
/**
* @file
*
* @brief Kernel semaphore object.
*
* The semaphores are of the 'counting' type, i.e. each 'give' operation will
* increment the internal count by 1, if no thread is pending on it. The 'init'
* call initializes the count to 'initial_count'. Following multiple 'give'
* operations, the same number of 'take' operations can be performed without
* the calling thread having to pend on the semaphore, or the calling task
* having to poll.
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
#include <wait_q.h>
#include <zephyr/sys/dlist.h>
#include <ksched.h>
#include <zephyr/init.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/tracing/tracing.h>
#include <zephyr/sys/check.h>
/* We use a system-wide lock to synchronize semaphores, which has
* unfortunate performance impact vs. using a per-object lock
* (semaphores are *very* widely used). But per-object locks require
* significant extra RAM. A properly spin-aware semaphore
* implementation would spin on atomic access to the count variable,
* and not a spinlock per se. Useful optimization for the future...
*/
static struct k_spinlock lock;
#ifdef CONFIG_OBJ_CORE_SEM
static struct k_obj_type obj_type_sem;
#endif /* CONFIG_OBJ_CORE_SEM */
int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
unsigned int limit)
{
/*
* Limit cannot be zero and count cannot be greater than limit
*/
CHECKIF(limit == 0U || initial_count > limit) {
SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, -EINVAL);
return -EINVAL;
}
sem->count = initial_count;
sem->limit = limit;
SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, 0);
z_waitq_init(&sem->wait_q);
#if defined(CONFIG_POLL)
sys_dlist_init(&sem->poll_events);
#endif /* CONFIG_POLL */
k_object_init(sem);
#ifdef CONFIG_OBJ_CORE_SEM
k_obj_core_init_and_link(K_OBJ_CORE(sem), &obj_type_sem);
#endif /* CONFIG_OBJ_CORE_SEM */
return 0;
}
#ifdef CONFIG_USERSPACE
int z_vrfy_k_sem_init(struct k_sem *sem, unsigned int initial_count,
unsigned int limit)
{
K_OOPS(K_SYSCALL_OBJ_INIT(sem, K_OBJ_SEM));
return z_impl_k_sem_init(sem, initial_count, limit);
}
#include <zephyr/syscalls/k_sem_init_mrsh.c>
#endif /* CONFIG_USERSPACE */
static inline bool handle_poll_events(struct k_sem *sem)
{
#ifdef CONFIG_POLL
z_handle_obj_poll_events(&sem->poll_events, K_POLL_STATE_SEM_AVAILABLE);
return true;
#else
ARG_UNUSED(sem);
return false;
#endif /* CONFIG_POLL */
}
void z_impl_k_sem_give(struct k_sem *sem)
{
k_spinlock_key_t key = k_spin_lock(&lock);
struct k_thread *thread;
bool resched = true;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, give, sem);
thread = z_unpend_first_thread(&sem->wait_q);
if (thread != NULL) {
arch_thread_return_value_set(thread, 0);
z_ready_thread(thread);
} else {
sem->count += (sem->count != sem->limit) ? 1U : 0U;
resched = handle_poll_events(sem);
}
if (resched) {
z_reschedule(&lock, key);
} else {
k_spin_unlock(&lock, key);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, give, sem);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_sem_give(struct k_sem *sem)
{
K_OOPS(K_SYSCALL_OBJ(sem, K_OBJ_SEM));
z_impl_k_sem_give(sem);
}
#include <zephyr/syscalls/k_sem_give_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
{
int ret;
__ASSERT(((arch_is_in_isr() == false) ||
K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
k_spinlock_key_t key = k_spin_lock(&lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, take, sem, timeout);
if (likely(sem->count > 0U)) {
sem->count--;
k_spin_unlock(&lock, key);
ret = 0;
goto out;
}
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&lock, key);
ret = -EBUSY;
goto out;
}
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_sem, take, sem, timeout);
ret = z_pend_curr(&lock, key, &sem->wait_q, timeout);
out:
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, take, sem, timeout, ret);
return ret;
}
void z_impl_k_sem_reset(struct k_sem *sem)
{
struct k_thread *thread;
k_spinlock_key_t key = k_spin_lock(&lock);
while (true) {
thread = z_unpend_first_thread(&sem->wait_q);
if (thread == NULL) {
break;
}
arch_thread_return_value_set(thread, -EAGAIN);
z_ready_thread(thread);
}
sem->count = 0;
SYS_PORT_TRACING_OBJ_FUNC(k_sem, reset, sem);
handle_poll_events(sem);
z_reschedule(&lock, key);
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
{
K_OOPS(K_SYSCALL_OBJ(sem, K_OBJ_SEM));
return z_impl_k_sem_take(sem, timeout);
}
#include <zephyr/syscalls/k_sem_take_mrsh.c>
static inline void z_vrfy_k_sem_reset(struct k_sem *sem)
{
K_OOPS(K_SYSCALL_OBJ(sem, K_OBJ_SEM));
z_impl_k_sem_reset(sem);
}
#include <zephyr/syscalls/k_sem_reset_mrsh.c>
static inline unsigned int z_vrfy_k_sem_count_get(struct k_sem *sem)
{
K_OOPS(K_SYSCALL_OBJ(sem, K_OBJ_SEM));
return z_impl_k_sem_count_get(sem);
}
#include <zephyr/syscalls/k_sem_count_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_SEM
static int init_sem_obj_core_list(void)
{
/* Initialize semaphore object type */
z_obj_type_init(&obj_type_sem, K_OBJ_TYPE_SEM_ID,
offsetof(struct k_sem, obj_core));
/* Initialize and link statically defined semaphores */
STRUCT_SECTION_FOREACH(k_sem, sem) {
k_obj_core_init_and_link(K_OBJ_CORE(sem), &obj_type_sem);
}
return 0;
}
SYS_INIT(init_sem_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_SEM */
``` | /content/code_sandbox/kernel/sem.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,518 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <kernel_internal.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/logging/log_ctrl.h>
#include <zephyr/logging/log.h>
#include <zephyr/fatal.h>
#include <zephyr/debug/coredump.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
/* LCOV_EXCL_START */
FUNC_NORETURN __weak void arch_system_halt(unsigned int reason)
{
ARG_UNUSED(reason);
/* TODO: What's the best way to totally halt the system if SMP
* is enabled?
*/
(void)arch_irq_lock();
for (;;) {
/* Spin endlessly */
}
}
/* LCOV_EXCL_STOP */
/* LCOV_EXCL_START */
__weak void k_sys_fatal_error_handler(unsigned int reason,
const struct arch_esf *esf)
{
ARG_UNUSED(esf);
LOG_PANIC();
LOG_ERR("Halting system");
arch_system_halt(reason);
CODE_UNREACHABLE;
}
/* LCOV_EXCL_STOP */
static const char *thread_name_get(struct k_thread *thread)
{
const char *thread_name = (thread != NULL) ? k_thread_name_get(thread) : NULL;
if ((thread_name == NULL) || (thread_name[0] == '\0')) {
thread_name = "unknown";
}
return thread_name;
}
static const char *reason_to_str(unsigned int reason)
{
switch (reason) {
case K_ERR_CPU_EXCEPTION:
return "CPU exception";
case K_ERR_SPURIOUS_IRQ:
return "Unhandled interrupt";
case K_ERR_STACK_CHK_FAIL:
return "Stack overflow";
case K_ERR_KERNEL_OOPS:
return "Kernel oops";
case K_ERR_KERNEL_PANIC:
return "Kernel panic";
default:
return "Unknown error";
}
}
/* LCOV_EXCL_START */
FUNC_NORETURN void k_fatal_halt(unsigned int reason)
{
arch_system_halt(reason);
}
/* LCOV_EXCL_STOP */
void z_fatal_error(unsigned int reason, const struct arch_esf *esf)
{
/* We can't allow this code to be preempted, but don't need to
* synchronize between CPUs, so an arch-layer lock is
* appropriate.
*/
unsigned int key = arch_irq_lock();
struct k_thread *thread = IS_ENABLED(CONFIG_MULTITHREADING) ?
_current : NULL;
/* twister looks for the "ZEPHYR FATAL ERROR" string, don't
* change it without also updating twister
*/
LOG_ERR(">>> ZEPHYR FATAL ERROR %d: %s on CPU %d", reason,
reason_to_str(reason), _current_cpu->id);
/* FIXME: This doesn't seem to work as expected on all arches.
* Need a reliable way to determine whether the fault happened when
* an IRQ or exception was being handled, or thread context.
*
* See #17656
*/
#if defined(CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION)
if ((esf != NULL) && arch_is_in_nested_exception(esf)) {
LOG_ERR("Fault during interrupt handling\n");
}
#endif /* CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION */
if (IS_ENABLED(CONFIG_MULTITHREADING)) {
LOG_ERR("Current thread: %p (%s)", thread, thread_name_get(thread));
}
coredump(reason, esf, thread);
k_sys_fatal_error_handler(reason, esf);
/* If the system fatal error handler returns, then kill the faulting
* thread; a policy decision was made not to hang the system.
*
* Policy for fatal errors in ISRs: unconditionally panic.
*
* There is one exception to this policy: a stack sentinel
* check may be performed (on behalf of the current thread)
* during ISR exit, but in this case the thread should be
* aborted.
*
* Note that k_thread_abort() returns on some architectures but
* not others; e.g. on ARC, x86_64, Xtensa with ASM2, ARM
*/
if (!IS_ENABLED(CONFIG_TEST)) {
__ASSERT(reason != K_ERR_KERNEL_PANIC,
"Attempted to recover from a kernel panic condition");
/* FIXME: #17656 */
#if defined(CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION)
if ((esf != NULL) && arch_is_in_nested_exception(esf)) {
#if defined(CONFIG_STACK_SENTINEL)
if (reason != K_ERR_STACK_CHK_FAIL) {
__ASSERT(0,
"Attempted to recover from a fatal error in ISR");
}
#endif /* CONFIG_STACK_SENTINEL */
}
#endif /* CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION */
} else {
/* Test mode */
#if defined(CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION)
if ((esf != NULL) && arch_is_in_nested_exception(esf)) {
/* Abort the thread only on STACK Sentinel check fail. */
#if defined(CONFIG_STACK_SENTINEL)
if (reason != K_ERR_STACK_CHK_FAIL) {
arch_irq_unlock(key);
return;
}
#else
arch_irq_unlock(key);
return;
#endif /* CONFIG_STACK_SENTINEL */
} else {
/* Abort the thread only if the fault is not due to
* a spurious ISR handler triggered.
*/
if (reason == K_ERR_SPURIOUS_IRQ) {
arch_irq_unlock(key);
return;
}
}
#endif /*CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION */
}
arch_irq_unlock(key);
if (IS_ENABLED(CONFIG_MULTITHREADING)) {
k_thread_abort(thread);
}
}
``` | /content/code_sandbox/kernel/fatal.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,242 |
```c
/*
*
*/
#include <errno.h>
#include <zephyr/kernel.h>
#include <zephyr/kernel/thread_stack.h>
k_thread_stack_t *z_impl_k_thread_stack_alloc(size_t size, int flags)
{
ARG_UNUSED(size);
ARG_UNUSED(flags);
return NULL;
}
int z_impl_k_thread_stack_free(k_thread_stack_t *stack)
{
ARG_UNUSED(stack);
return -ENOSYS;
}
``` | /content/code_sandbox/kernel/dynamic_disabled.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 87 |
```c
/*
*
*/
/**
* @file event objects library
*
* Event objects are used to signal one or more threads that a custom set of
* events has occurred. Threads wait on event objects until another thread or
* ISR posts the desired set of events to the event object. Each time events
* are posted to an event object, all threads waiting on that event object are
* processed to determine if there is a match. All threads that whose wait
* conditions match the current set of events now belonging to the event object
* are awakened.
*
* Threads waiting on an event object have the option of either waking once
* any or all of the events it desires have been posted to the event object.
*
* @brief Kernel event object
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
#include <zephyr/sys/dlist.h>
#include <zephyr/init.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/tracing/tracing.h>
#include <zephyr/sys/check.h>
/* private kernel APIs */
#include <wait_q.h>
#include <ksched.h>
#define K_EVENT_WAIT_ANY 0x00 /* Wait for any events */
#define K_EVENT_WAIT_ALL 0x01 /* Wait for all events */
#define K_EVENT_WAIT_MASK 0x01
#define K_EVENT_WAIT_RESET 0x02 /* Reset events prior to waiting */
struct event_walk_data {
struct k_thread *head;
uint32_t events;
};
#ifdef CONFIG_OBJ_CORE_EVENT
static struct k_obj_type obj_type_event;
#endif /* CONFIG_OBJ_CORE_EVENT */
void z_impl_k_event_init(struct k_event *event)
{
event->events = 0;
event->lock = (struct k_spinlock) {};
SYS_PORT_TRACING_OBJ_INIT(k_event, event);
z_waitq_init(&event->wait_q);
k_object_init(event);
#ifdef CONFIG_OBJ_CORE_EVENT
k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
#endif /* CONFIG_OBJ_CORE_EVENT */
}
#ifdef CONFIG_USERSPACE
void z_vrfy_k_event_init(struct k_event *event)
{
K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(event, K_OBJ_EVENT));
z_impl_k_event_init(event);
}
#include <zephyr/syscalls/k_event_init_mrsh.c>
#endif /* CONFIG_USERSPACE */
/**
* @brief determine if desired set of events been satisfied
*
* This routine determines if the current set of events satisfies the desired
* set of events. If @a wait_condition is K_EVENT_WAIT_ALL, then at least
* all the desired events must be present to satisfy the request. If @a
* wait_condition is not K_EVENT_WAIT_ALL, it is assumed to be K_EVENT_WAIT_ANY.
* In the K_EVENT_WAIT_ANY case, the request is satisfied when any of the
* current set of events are present in the desired set of events.
*/
static bool are_wait_conditions_met(uint32_t desired, uint32_t current,
unsigned int wait_condition)
{
uint32_t match = current & desired;
if (wait_condition == K_EVENT_WAIT_ALL) {
return match == desired;
}
/* wait_condition assumed to be K_EVENT_WAIT_ANY */
return match != 0;
}
static int event_walk_op(struct k_thread *thread, void *data)
{
unsigned int wait_condition;
struct event_walk_data *event_data = data;
wait_condition = thread->event_options & K_EVENT_WAIT_MASK;
if (are_wait_conditions_met(thread->events, event_data->events,
wait_condition)) {
/*
* Events create a list of threads to wake up. We do
* not want z_thread_timeout to wake these threads; they
* will be woken up by k_event_post_internal once they
* have been processed.
*/
thread->no_wake_on_timeout = true;
/*
* The wait conditions have been satisfied. Add this
* thread to the list of threads to unpend.
*/
thread->next_event_link = event_data->head;
event_data->head = thread;
z_abort_timeout(&thread->base.timeout);
}
return 0;
}
static uint32_t k_event_post_internal(struct k_event *event, uint32_t events,
uint32_t events_mask)
{
k_spinlock_key_t key;
struct k_thread *thread;
struct event_walk_data data;
uint32_t previous_events;
data.head = NULL;
key = k_spin_lock(&event->lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, post, event, events,
events_mask);
previous_events = event->events & events_mask;
events = (event->events & ~events_mask) |
(events & events_mask);
event->events = events;
data.events = events;
/*
* Posting an event has the potential to wake multiple pended threads.
* It is desirable to unpend all affected threads simultaneously. This
* is done in three steps:
*
* 1. Walk the waitq and create a linked list of threads to unpend.
* 2. Unpend each of the threads in the linked list
* 3. Ready each of the threads in the linked list
*/
z_sched_waitq_walk(&event->wait_q, event_walk_op, &data);
if (data.head != NULL) {
thread = data.head;
struct k_thread *next;
do {
arch_thread_return_value_set(thread, 0);
thread->events = events;
next = thread->next_event_link;
z_sched_wake_thread(thread, false);
thread = next;
} while (thread != NULL);
}
z_reschedule(&event->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, post, event, events,
events_mask);
return previous_events;
}
uint32_t z_impl_k_event_post(struct k_event *event, uint32_t events)
{
return k_event_post_internal(event, events, events);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_post(struct k_event *event, uint32_t events)
{
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_post(event, events);
}
#include <zephyr/syscalls/k_event_post_mrsh.c>
#endif /* CONFIG_USERSPACE */
uint32_t z_impl_k_event_set(struct k_event *event, uint32_t events)
{
return k_event_post_internal(event, events, ~0);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_set(struct k_event *event, uint32_t events)
{
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_set(event, events);
}
#include <zephyr/syscalls/k_event_set_mrsh.c>
#endif /* CONFIG_USERSPACE */
uint32_t z_impl_k_event_set_masked(struct k_event *event, uint32_t events,
uint32_t events_mask)
{
return k_event_post_internal(event, events, events_mask);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_set_masked(struct k_event *event, uint32_t events,
uint32_t events_mask)
{
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_set_masked(event, events, events_mask);
}
#include <zephyr/syscalls/k_event_set_masked_mrsh.c>
#endif /* CONFIG_USERSPACE */
uint32_t z_impl_k_event_clear(struct k_event *event, uint32_t events)
{
return k_event_post_internal(event, 0, events);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_clear(struct k_event *event, uint32_t events)
{
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_clear(event, events);
}
#include <zephyr/syscalls/k_event_clear_mrsh.c>
#endif /* CONFIG_USERSPACE */
static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
unsigned int options, k_timeout_t timeout)
{
uint32_t rv = 0;
unsigned int wait_condition;
struct k_thread *thread;
__ASSERT(((arch_is_in_isr() == false) ||
K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, wait, event, events,
options, timeout);
if (events == 0) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event, events, 0);
return 0;
}
wait_condition = options & K_EVENT_WAIT_MASK;
thread = k_sched_current_thread_query();
k_spinlock_key_t key = k_spin_lock(&event->lock);
if (options & K_EVENT_WAIT_RESET) {
event->events = 0;
}
/* Test if the wait conditions have already been met. */
if (are_wait_conditions_met(events, event->events, wait_condition)) {
rv = event->events;
k_spin_unlock(&event->lock, key);
goto out;
}
/* Match conditions have not been met. */
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&event->lock, key);
goto out;
}
/*
* The caller must pend to wait for the match. Save the desired
* set of events in the k_thread structure.
*/
thread->events = events;
thread->event_options = options;
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_event, wait, event, events,
options, timeout);
if (z_pend_curr(&event->lock, key, &event->wait_q, timeout) == 0) {
/* Retrieve the set of events that woke the thread */
rv = thread->events;
}
out:
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event,
events, rv & events);
return rv & events;
}
/**
* Wait for any of the specified events
*/
uint32_t z_impl_k_event_wait(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout)
{
uint32_t options = reset ? K_EVENT_WAIT_RESET : 0;
return k_event_wait_internal(event, events, options, timeout);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_wait(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout)
{
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_wait(event, events, reset, timeout);
}
#include <zephyr/syscalls/k_event_wait_mrsh.c>
#endif /* CONFIG_USERSPACE */
/**
* Wait for all of the specified events
*/
uint32_t z_impl_k_event_wait_all(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout)
{
uint32_t options = reset ? (K_EVENT_WAIT_RESET | K_EVENT_WAIT_ALL)
: K_EVENT_WAIT_ALL;
return k_event_wait_internal(event, events, options, timeout);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_wait_all(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout)
{
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_wait_all(event, events, reset, timeout);
}
#include <zephyr/syscalls/k_event_wait_all_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_EVENT
static int init_event_obj_core_list(void)
{
/* Initialize condvar object type */
z_obj_type_init(&obj_type_event, K_OBJ_TYPE_EVENT_ID,
offsetof(struct k_event, obj_core));
/* Initialize and link statically defined condvars */
STRUCT_SECTION_FOREACH(k_event, event) {
k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
}
return 0;
}
SYS_INIT(init_event_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_EVENT */
``` | /content/code_sandbox/kernel/events.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,554 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/init.h>
#include <zephyr/internal/syscall_handler.h>
#include <stdbool.h>
#include <zephyr/spinlock.h>
#include <ksched.h>
#include <wait_q.h>
static struct k_spinlock lock;
#ifdef CONFIG_OBJ_CORE_TIMER
static struct k_obj_type obj_type_timer;
#endif /* CONFIG_OBJ_CORE_TIMER */
/**
* @brief Handle expiration of a kernel timer object.
*
* @param t Timeout used by the timer.
*/
void z_timer_expiration_handler(struct _timeout *t)
{
struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
struct k_thread *thread;
k_spinlock_key_t key = k_spin_lock(&lock);
/* In sys_clock_announce(), when a timeout expires, it is first removed
* from the timeout list, then its expiration handler is called (with
* unlocked interrupts). For kernel timers, the expiration handler is
* this function. Usually, the timeout structure related to the timer
* that is handled here will not be linked to the timeout list at this
* point. But it may happen that before this function is executed and
* interrupts are locked again, a given timer gets restarted from an
* interrupt context that has a priority higher than the system timer
* interrupt. Then, the timeout structure for this timer will turn out
* to be linked to the timeout list. And in such case, since the timer
* was restarted, its expiration handler should not be executed then,
* so the function exits immediately.
*/
if (sys_dnode_is_linked(&t->node)) {
k_spin_unlock(&lock, key);
return;
}
/*
* if the timer is periodic, start it again; don't add _TICK_ALIGN
* since we're already aligned to a tick boundary
*/
if (!K_TIMEOUT_EQ(timer->period, K_NO_WAIT) &&
!K_TIMEOUT_EQ(timer->period, K_FOREVER)) {
k_timeout_t next = timer->period;
/* see note about z_add_timeout() in z_impl_k_timer_start() */
next.ticks = MAX(next.ticks - 1, 0);
#ifdef CONFIG_TIMEOUT_64BIT
/* Exploit the fact that uptime during a kernel
* timeout handler reflects the time of the scheduled
* event and not real time to get some inexpensive
* protection against late interrupts. If we're
* delayed for any reason, we still end up calculating
* the next expiration as a regular stride from where
* we "should" have run. Requires absolute timeouts.
* (Note offset by one: we're nominally at the
* beginning of a tick, so need to defeat the "round
* down" behavior on timeout addition).
*/
next = K_TIMEOUT_ABS_TICKS(k_uptime_ticks() + 1 + next.ticks);
#endif /* CONFIG_TIMEOUT_64BIT */
z_add_timeout(&timer->timeout, z_timer_expiration_handler,
next);
}
/* update timer's status */
timer->status += 1U;
/* invoke timer expiry function */
if (timer->expiry_fn != NULL) {
/* Unlock for user handler. */
k_spin_unlock(&lock, key);
timer->expiry_fn(timer);
key = k_spin_lock(&lock);
}
if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
k_spin_unlock(&lock, key);
return;
}
thread = z_waitq_head(&timer->wait_q);
if (thread == NULL) {
k_spin_unlock(&lock, key);
return;
}
z_unpend_thread_no_timeout(thread);
arch_thread_return_value_set(thread, 0);
k_spin_unlock(&lock, key);
z_ready_thread(thread);
}
void k_timer_init(struct k_timer *timer,
k_timer_expiry_t expiry_fn,
k_timer_stop_t stop_fn)
{
timer->expiry_fn = expiry_fn;
timer->stop_fn = stop_fn;
timer->status = 0U;
if (IS_ENABLED(CONFIG_MULTITHREADING)) {
z_waitq_init(&timer->wait_q);
}
z_init_timeout(&timer->timeout);
SYS_PORT_TRACING_OBJ_INIT(k_timer, timer);
timer->user_data = NULL;
k_object_init(timer);
#ifdef CONFIG_OBJ_CORE_TIMER
k_obj_core_init_and_link(K_OBJ_CORE(timer), &obj_type_timer);
#endif /* CONFIG_OBJ_CORE_TIMER */
}
void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration,
k_timeout_t period)
{
SYS_PORT_TRACING_OBJ_FUNC(k_timer, start, timer, duration, period);
/* Acquire spinlock to ensure safety during concurrent calls to
* k_timer_start for scheduling or rescheduling. This is necessary
* since k_timer_start can be preempted, especially for the same
* timer instance.
*/
k_spinlock_key_t key = k_spin_lock(&lock);
if (K_TIMEOUT_EQ(duration, K_FOREVER)) {
k_spin_unlock(&lock, key);
return;
}
/* z_add_timeout() always adds one to the incoming tick count
* to round up to the next tick (by convention it waits for
* "at least as long as the specified timeout"), but the
* period interval is always guaranteed to be reset from
* within the timer ISR, so no round up is desired and 1 is
* subtracted in there.
*
* Note that the duration (!) value gets the same treatment
* for backwards compatibility. This is unfortunate
* (i.e. k_timer_start() doesn't treat its initial sleep
* argument the same way k_sleep() does), but historical. The
* timer_api test relies on this behavior.
*/
if (Z_TICK_ABS(duration.ticks) < 0) {
duration.ticks = MAX(duration.ticks - 1, 0);
}
(void)z_abort_timeout(&timer->timeout);
timer->period = period;
timer->status = 0U;
z_add_timeout(&timer->timeout, z_timer_expiration_handler,
duration);
k_spin_unlock(&lock, key);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_timer_start(struct k_timer *timer,
k_timeout_t duration,
k_timeout_t period)
{
K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
z_impl_k_timer_start(timer, duration, period);
}
#include <zephyr/syscalls/k_timer_start_mrsh.c>
#endif /* CONFIG_USERSPACE */
void z_impl_k_timer_stop(struct k_timer *timer)
{
SYS_PORT_TRACING_OBJ_FUNC(k_timer, stop, timer);
bool inactive = (z_abort_timeout(&timer->timeout) != 0);
if (inactive) {
return;
}
if (timer->stop_fn != NULL) {
timer->stop_fn(timer);
}
if (IS_ENABLED(CONFIG_MULTITHREADING)) {
struct k_thread *pending_thread = z_unpend1_no_timeout(&timer->wait_q);
if (pending_thread != NULL) {
z_ready_thread(pending_thread);
z_reschedule_unlocked();
}
}
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_timer_stop(struct k_timer *timer)
{
K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
z_impl_k_timer_stop(timer);
}
#include <zephyr/syscalls/k_timer_stop_mrsh.c>
#endif /* CONFIG_USERSPACE */
uint32_t z_impl_k_timer_status_get(struct k_timer *timer)
{
k_spinlock_key_t key = k_spin_lock(&lock);
uint32_t result = timer->status;
timer->status = 0U;
k_spin_unlock(&lock, key);
return result;
}
#ifdef CONFIG_USERSPACE
static inline uint32_t z_vrfy_k_timer_status_get(struct k_timer *timer)
{
K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
return z_impl_k_timer_status_get(timer);
}
#include <zephyr/syscalls/k_timer_status_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
uint32_t z_impl_k_timer_status_sync(struct k_timer *timer)
{
__ASSERT(!arch_is_in_isr(), "");
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_timer, status_sync, timer);
if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
uint32_t result;
do {
k_spinlock_key_t key = k_spin_lock(&lock);
if (!z_is_inactive_timeout(&timer->timeout)) {
result = *(volatile uint32_t *)&timer->status;
timer->status = 0U;
k_spin_unlock(&lock, key);
if (result > 0) {
break;
}
} else {
result = timer->status;
k_spin_unlock(&lock, key);
break;
}
} while (true);
return result;
}
k_spinlock_key_t key = k_spin_lock(&lock);
uint32_t result = timer->status;
if (result == 0U) {
if (!z_is_inactive_timeout(&timer->timeout)) {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_timer, status_sync, timer, K_FOREVER);
/* wait for timer to expire or stop */
(void)z_pend_curr(&lock, key, &timer->wait_q, K_FOREVER);
/* get updated timer status */
key = k_spin_lock(&lock);
result = timer->status;
} else {
/* timer is already stopped */
}
} else {
/* timer has already expired at least once */
}
timer->status = 0U;
k_spin_unlock(&lock, key);
/**
* @note New tracing hook
*/
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_timer, status_sync, timer, result);
return result;
}
#ifdef CONFIG_USERSPACE
static inline uint32_t z_vrfy_k_timer_status_sync(struct k_timer *timer)
{
K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
return z_impl_k_timer_status_sync(timer);
}
#include <zephyr/syscalls/k_timer_status_sync_mrsh.c>
static inline k_ticks_t z_vrfy_k_timer_remaining_ticks(
const struct k_timer *timer)
{
K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
return z_impl_k_timer_remaining_ticks(timer);
}
#include <zephyr/syscalls/k_timer_remaining_ticks_mrsh.c>
static inline k_ticks_t z_vrfy_k_timer_expires_ticks(
const struct k_timer *timer)
{
K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
return z_impl_k_timer_expires_ticks(timer);
}
#include <zephyr/syscalls/k_timer_expires_ticks_mrsh.c>
static inline void *z_vrfy_k_timer_user_data_get(const struct k_timer *timer)
{
K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
return z_impl_k_timer_user_data_get(timer);
}
#include <zephyr/syscalls/k_timer_user_data_get_mrsh.c>
static inline void z_vrfy_k_timer_user_data_set(struct k_timer *timer,
void *user_data)
{
K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
z_impl_k_timer_user_data_set(timer, user_data);
}
#include <zephyr/syscalls/k_timer_user_data_set_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_TIMER
static int init_timer_obj_core_list(void)
{
/* Initialize timer object type */
z_obj_type_init(&obj_type_timer, K_OBJ_TYPE_TIMER_ID,
offsetof(struct k_timer, obj_core));
/* Initialize and link statically defined timers */
STRUCT_SECTION_FOREACH(k_timer, timer) {
k_obj_core_init_and_link(K_OBJ_CORE(timer), &obj_type_timer);
}
return 0;
}
SYS_INIT(init_timer_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_TIMER */
``` | /content/code_sandbox/kernel/timer.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,554 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_KERNEL_INCLUDE_OFFSETS_SHORT_H_
#define ZEPHYR_KERNEL_INCLUDE_OFFSETS_SHORT_H_
#include <zephyr/offsets.h>
#include <offsets_short_arch.h>
/* kernel */
/* main */
#ifndef CONFIG_SMP
/* Relies on _kernel.cpu being the first member of _kernel and having 1 element
*/
#define _kernel_offset_to_nested \
(___cpu_t_nested_OFFSET)
#define _kernel_offset_to_irq_stack \
(___cpu_t_irq_stack_OFFSET)
#define _kernel_offset_to_current \
(___cpu_t_current_OFFSET)
#if defined(CONFIG_FPU_SHARING)
#define _kernel_offset_to_fp_ctx \
(___cpu_t_fp_ctx_OFFSET)
#endif /* CONFIG_FPU_SHARING */
#endif /* CONFIG_SMP */
#define _kernel_offset_to_idle \
(___kernel_t_idle_OFFSET)
#define _kernel_offset_to_current_fp \
(___kernel_t_current_fp_OFFSET)
#define _kernel_offset_to_ready_q_cache \
(___kernel_t_ready_q_OFFSET + ___ready_q_t_cache_OFFSET)
/* end - kernel */
/* threads */
/* main */
#define _thread_offset_to_callee_saved \
(___thread_t_callee_saved_OFFSET)
#ifdef CONFIG_THREAD_LOCAL_STORAGE
#define _thread_offset_to_tls \
(___thread_t_tls_OFFSET)
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
/* base */
#define _thread_offset_to_user_options \
(___thread_t_base_OFFSET + ___thread_base_t_user_options_OFFSET)
/* end - threads */
#endif /* ZEPHYR_KERNEL_INCLUDE_OFFSETS_SHORT_H_ */
``` | /content/code_sandbox/kernel/include/offsets_short.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 315 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <kernel_internal.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/toolchain.h>
#include <zephyr/kernel/mm/demand_paging.h>
extern struct k_mem_paging_stats_t paging_stats;
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
struct k_mem_paging_histogram_t z_paging_histogram_eviction;
struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_in;
struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_out;
#ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
/*
* The frequency of timing functions is highly dependent on
* architecture, SoC or board. It is also not available at build time.
* Therefore, the bounds for the timing histograms needs to be defined
* externally to this file, and must be tailored to the platform
* being used.
*/
extern unsigned long
k_mem_paging_eviction_histogram_bounds[
CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
extern unsigned long
k_mem_paging_backing_store_histogram_bounds[
CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
#else
#define NS_TO_CYC(ns) (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / 1000000U * ns)
/*
* This provides the upper bounds of the bins in eviction timing histogram.
*/
__weak unsigned long
k_mem_paging_eviction_histogram_bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS] = {
NS_TO_CYC(1),
NS_TO_CYC(5),
NS_TO_CYC(10),
NS_TO_CYC(50),
NS_TO_CYC(100),
NS_TO_CYC(200),
NS_TO_CYC(500),
NS_TO_CYC(1000),
NS_TO_CYC(2000),
ULONG_MAX
};
/*
* This provides the upper bounds of the bins in backing store timing histogram
* (both page-in and page-out).
*/
__weak unsigned long
k_mem_paging_backing_store_histogram_bounds[
CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS] = {
NS_TO_CYC(10),
NS_TO_CYC(100),
NS_TO_CYC(125),
NS_TO_CYC(250),
NS_TO_CYC(500),
NS_TO_CYC(1000),
NS_TO_CYC(2000),
NS_TO_CYC(5000),
NS_TO_CYC(10000),
ULONG_MAX
};
#endif /* CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS */
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
unsigned long k_mem_num_pagefaults_get(void)
{
unsigned long ret;
unsigned int key;
key = irq_lock();
ret = paging_stats.pagefaults.cnt;
irq_unlock(key);
return ret;
}
void z_impl_k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats)
{
if (stats == NULL) {
return;
}
/* Copy statistics */
memcpy(stats, &paging_stats, sizeof(paging_stats));
}
#ifdef CONFIG_USERSPACE
static inline
void z_vrfy_k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats)
{
K_OOPS(K_SYSCALL_MEMORY_WRITE(stats, sizeof(*stats)));
z_impl_k_mem_paging_stats_get(stats);
}
#include <zephyr/syscalls/k_mem_paging_stats_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
void z_impl_k_mem_paging_thread_stats_get(struct k_thread *thread,
struct k_mem_paging_stats_t *stats)
{
if ((thread == NULL) || (stats == NULL)) {
return;
}
/* Copy statistics */
memcpy(stats, &thread->paging_stats, sizeof(thread->paging_stats));
}
#ifdef CONFIG_USERSPACE
static inline
void z_vrfy_k_mem_paging_thread_stats_get(struct k_thread *thread,
struct k_mem_paging_stats_t *stats)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
K_OOPS(K_SYSCALL_MEMORY_WRITE(stats, sizeof(*stats)));
z_impl_k_mem_paging_thread_stats_get(thread, stats);
}
#include <zephyr/syscalls/k_mem_paging_thread_stats_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
void z_paging_histogram_init(void)
{
/*
* Zero out the histogram structs and copy the bounds.
* The copying is done as the histogram structs need
* to be pinned in memory and never swapped out, while
* the source bound array may not be pinned.
*/
memset(&z_paging_histogram_eviction, 0, sizeof(z_paging_histogram_eviction));
memcpy(z_paging_histogram_eviction.bounds,
k_mem_paging_eviction_histogram_bounds,
sizeof(z_paging_histogram_eviction.bounds));
memset(&z_paging_histogram_backing_store_page_in, 0,
sizeof(z_paging_histogram_backing_store_page_in));
memcpy(z_paging_histogram_backing_store_page_in.bounds,
k_mem_paging_backing_store_histogram_bounds,
sizeof(z_paging_histogram_backing_store_page_in.bounds));
memset(&z_paging_histogram_backing_store_page_out, 0,
sizeof(z_paging_histogram_backing_store_page_out));
memcpy(z_paging_histogram_backing_store_page_out.bounds,
k_mem_paging_backing_store_histogram_bounds,
sizeof(z_paging_histogram_backing_store_page_out.bounds));
}
/**
* Increment the counter in the timing histogram.
*
* @param hist The timing histogram to be updated.
* @param cycles Time spent in measured operation.
*/
void z_paging_histogram_inc(struct k_mem_paging_histogram_t *hist,
uint32_t cycles)
{
int idx;
for (idx = 0;
idx < CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS;
idx++) {
if (cycles <= hist->bounds[idx]) {
hist->counts[idx]++;
break;
}
}
}
void z_impl_k_mem_paging_histogram_eviction_get(
struct k_mem_paging_histogram_t *hist)
{
if (hist == NULL) {
return;
}
/* Copy statistics */
memcpy(hist, &z_paging_histogram_eviction,
sizeof(z_paging_histogram_eviction));
}
void z_impl_k_mem_paging_histogram_backing_store_page_in_get(
struct k_mem_paging_histogram_t *hist)
{
if (hist == NULL) {
return;
}
/* Copy histogram */
memcpy(hist, &z_paging_histogram_backing_store_page_in,
sizeof(z_paging_histogram_backing_store_page_in));
}
void z_impl_k_mem_paging_histogram_backing_store_page_out_get(
struct k_mem_paging_histogram_t *hist)
{
if (hist == NULL) {
return;
}
/* Copy histogram */
memcpy(hist, &z_paging_histogram_backing_store_page_out,
sizeof(z_paging_histogram_backing_store_page_out));
}
#ifdef CONFIG_USERSPACE
static inline
void z_vrfy_k_mem_paging_histogram_eviction_get(
struct k_mem_paging_histogram_t *hist)
{
K_OOPS(K_SYSCALL_MEMORY_WRITE(hist, sizeof(*hist)));
z_impl_k_mem_paging_histogram_eviction_get(hist);
}
#include <zephyr/syscalls/k_mem_paging_histogram_eviction_get_mrsh.c>
static inline
void z_vrfy_k_mem_paging_histogram_backing_store_page_in_get(
struct k_mem_paging_histogram_t *hist)
{
K_OOPS(K_SYSCALL_MEMORY_WRITE(hist, sizeof(*hist)));
z_impl_k_mem_paging_histogram_backing_store_page_in_get(hist);
}
#include <zephyr/syscalls/k_mem_paging_histogram_backing_store_page_in_get_mrsh.c>
static inline
void z_vrfy_k_mem_paging_histogram_backing_store_page_out_get(
struct k_mem_paging_histogram_t *hist)
{
K_OOPS(K_SYSCALL_MEMORY_WRITE(hist, sizeof(*hist)));
z_impl_k_mem_paging_histogram_backing_store_page_out_get(hist);
}
#include <zephyr/syscalls/k_mem_paging_histogram_backing_store_page_out_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
``` | /content/code_sandbox/kernel/paging/statistics.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,753 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_KERNEL_INCLUDE_PRIORITY_Q_H_
#define ZEPHYR_KERNEL_INCLUDE_PRIORITY_Q_H_
#include <zephyr/sys/math_extras.h>
#include <zephyr/sys/dlist.h>
extern int32_t z_sched_prio_cmp(struct k_thread *thread_1,
struct k_thread *thread_2);
bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
/* Dumb Scheduling */
#if defined(CONFIG_SCHED_DUMB)
#define _priq_run_add z_priq_dumb_add
#define _priq_run_remove z_priq_dumb_remove
# if defined(CONFIG_SCHED_CPU_MASK)
# define _priq_run_best z_priq_dumb_mask_best
# else
# define _priq_run_best z_priq_dumb_best
# endif /* CONFIG_SCHED_CPU_MASK */
/* Scalable Scheduling */
#elif defined(CONFIG_SCHED_SCALABLE)
#define _priq_run_add z_priq_rb_add
#define _priq_run_remove z_priq_rb_remove
#define _priq_run_best z_priq_rb_best
/* Multi Queue Scheduling */
#elif defined(CONFIG_SCHED_MULTIQ)
#if defined(CONFIG_64BIT)
#define NBITS 64
#else
#define NBITS 32
#endif /* CONFIG_64BIT */
#define _priq_run_add z_priq_mq_add
#define _priq_run_remove z_priq_mq_remove
#define _priq_run_best z_priq_mq_best
static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq, struct k_thread *thread);
static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread);
#endif
/* Scalable Wait Queue */
#if defined(CONFIG_WAITQ_SCALABLE)
#define _priq_wait_add z_priq_rb_add
#define _priq_wait_remove z_priq_rb_remove
#define _priq_wait_best z_priq_rb_best
/* Dumb Wait Queue */
#elif defined(CONFIG_WAITQ_DUMB)
#define _priq_wait_add z_priq_dumb_add
#define _priq_wait_remove z_priq_dumb_remove
#define _priq_wait_best z_priq_dumb_best
#endif
static ALWAYS_INLINE void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
{
ARG_UNUSED(pq);
sys_dlist_remove(&thread->base.qnode_dlist);
}
static ALWAYS_INLINE struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
{
struct k_thread *thread = NULL;
sys_dnode_t *n = sys_dlist_peek_head(pq);
if (n != NULL) {
thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
}
return thread;
}
static ALWAYS_INLINE void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
{
struct k_thread *t;
thread->base.order_key = pq->next_order_key;
++pq->next_order_key;
/* Renumber at wraparound. This is tiny code, and in practice
* will almost never be hit on real systems. BUT on very
* long-running systems where a priq never completely empties
* AND that contains very large numbers of threads, it can be
* a latency glitch to loop over all the threads like this.
*/
if (!pq->next_order_key) {
RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) {
t->base.order_key = pq->next_order_key;
++pq->next_order_key;
}
}
rb_insert(&pq->tree, &thread->base.qnode_rb);
}
static ALWAYS_INLINE void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
{
rb_remove(&pq->tree, &thread->base.qnode_rb);
if (!pq->tree.root) {
pq->next_order_key = 0;
}
}
static ALWAYS_INLINE struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
{
struct k_thread *thread = NULL;
struct rbnode *n = rb_get_min(&pq->tree);
if (n != NULL) {
thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb);
}
return thread;
}
static ALWAYS_INLINE struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
{
struct k_thread *thread = NULL;
for (int i = 0; i < PRIQ_BITMAP_SIZE; ++i) {
if (!pq->bitmask[i]) {
continue;
}
#ifdef CONFIG_64BIT
sys_dlist_t *l = &pq->queues[i * 64 + u64_count_trailing_zeros(pq->bitmask[i])];
#else
sys_dlist_t *l = &pq->queues[i * 32 + u32_count_trailing_zeros(pq->bitmask[i])];
#endif
sys_dnode_t *n = sys_dlist_peek_head(l);
if (n != NULL) {
thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
break;
}
}
return thread;
}
#ifdef CONFIG_SCHED_MULTIQ
struct prio_info {
uint8_t offset_prio;
uint8_t idx;
uint8_t bit;
};
static ALWAYS_INLINE struct prio_info get_prio_info(int8_t old_prio)
{
struct prio_info ret;
ret.offset_prio = old_prio - K_HIGHEST_THREAD_PRIO;
ret.idx = ret.offset_prio / NBITS;
ret.bit = ret.offset_prio % NBITS;
return ret;
}
static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq,
struct k_thread *thread)
{
struct prio_info pos = get_prio_info(thread->base.prio);
sys_dlist_append(&pq->queues[pos.offset_prio], &thread->base.qnode_dlist);
pq->bitmask[pos.idx] |= BIT(pos.bit);
}
static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
struct k_thread *thread)
{
struct prio_info pos = get_prio_info(thread->base.prio);
sys_dlist_remove(&thread->base.qnode_dlist);
if (sys_dlist_is_empty(&pq->queues[pos.offset_prio])) {
pq->bitmask[pos.idx] &= ~BIT(pos.bit);
}
}
#endif /* CONFIG_SCHED_MULTIQ */
#ifdef CONFIG_SCHED_CPU_MASK
static ALWAYS_INLINE struct k_thread *z_priq_dumb_mask_best(sys_dlist_t *pq)
{
/* With masks enabled we need to be prepared to walk the list
* looking for one we can run
*/
struct k_thread *thread;
SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) {
if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) {
return thread;
}
}
return NULL;
}
#endif /* CONFIG_SCHED_CPU_MASK */
#if defined(CONFIG_SCHED_DUMB) || defined(CONFIG_WAITQ_DUMB)
static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq,
struct k_thread *thread)
{
struct k_thread *t;
SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) {
if (z_sched_prio_cmp(thread, t) > 0) {
sys_dlist_insert(&t->base.qnode_dlist,
&thread->base.qnode_dlist);
return;
}
}
sys_dlist_append(pq, &thread->base.qnode_dlist);
}
#endif /* CONFIG_SCHED_DUMB || CONFIG_WAITQ_DUMB */
#endif /* ZEPHYR_KERNEL_INCLUDE_PRIORITY_Q_H_ */
``` | /content/code_sandbox/kernel/include/priority_q.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,674 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_KERNEL_INCLUDE_THREAD_H_
#define ZEPHYR_KERNEL_INCLUDE_THREAD_H_
#include <zephyr/kernel.h>
#include <kernel_internal.h>
#include <timeout_q.h>
#define Z_STATE_STR_DUMMY "dummy"
#define Z_STATE_STR_PENDING "pending"
#define Z_STATE_STR_PRESTART "prestart"
#define Z_STATE_STR_DEAD "dead"
#define Z_STATE_STR_SUSPENDED "suspended"
#define Z_STATE_STR_ABORTING "aborting"
#define Z_STATE_STR_SUSPENDING "suspending"
#define Z_STATE_STR_QUEUED "queued"
#ifdef CONFIG_THREAD_MONITOR
/* This lock protects the linked list of active threads; i.e. the
* initial _kernel.threads pointer and the linked list made up of
* thread->next_thread (until NULL)
*/
extern struct k_spinlock z_thread_monitor_lock;
#endif /* CONFIG_THREAD_MONITOR */
void idle(void *unused1, void *unused2, void *unused3);
/* clean up when a thread is aborted */
#if defined(CONFIG_THREAD_MONITOR)
void z_thread_monitor_exit(struct k_thread *thread);
#else
#define z_thread_monitor_exit(thread) \
do {/* nothing */ \
} while (false)
#endif /* CONFIG_THREAD_MONITOR */
static inline void thread_schedule_new(struct k_thread *thread, k_timeout_t delay)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
k_thread_start(thread);
} else {
z_add_thread_timeout(thread, delay);
}
#else
ARG_UNUSED(delay);
k_thread_start(thread);
#endif /* CONFIG_SYS_CLOCK_EXISTS */
}
static inline int thread_is_preemptible(struct k_thread *thread)
{
/* explanation in kernel_struct.h */
return thread->base.preempt <= _PREEMPT_THRESHOLD;
}
static inline int thread_is_metairq(struct k_thread *thread)
{
#if CONFIG_NUM_METAIRQ_PRIORITIES > 0
return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
< CONFIG_NUM_METAIRQ_PRIORITIES;
#else
ARG_UNUSED(thread);
return 0;
#endif /* CONFIG_NUM_METAIRQ_PRIORITIES */
}
#ifdef CONFIG_ASSERT
static inline bool is_thread_dummy(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
}
#endif /* CONFIG_ASSERT */
static inline bool z_is_thread_suspended(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_SUSPENDED) != 0U;
}
static inline bool z_is_thread_pending(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_PENDING) != 0U;
}
static inline bool z_is_thread_prevented_from_running(struct k_thread *thread)
{
uint8_t state = thread->base.thread_state;
return (state & (_THREAD_PENDING | _THREAD_PRESTART | _THREAD_DEAD |
_THREAD_DUMMY | _THREAD_SUSPENDED)) != 0U;
}
static inline bool z_is_thread_timeout_active(struct k_thread *thread)
{
return !z_is_inactive_timeout(&thread->base.timeout);
}
static inline bool z_is_thread_ready(struct k_thread *thread)
{
return !((z_is_thread_prevented_from_running(thread)) != 0U ||
z_is_thread_timeout_active(thread));
}
static inline bool z_has_thread_started(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_PRESTART) == 0U;
}
static inline bool z_is_thread_state_set(struct k_thread *thread, uint32_t state)
{
return (thread->base.thread_state & state) != 0U;
}
static inline bool z_is_thread_queued(struct k_thread *thread)
{
return z_is_thread_state_set(thread, _THREAD_QUEUED);
}
static inline void z_mark_thread_as_suspended(struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_SUSPENDED;
SYS_PORT_TRACING_FUNC(k_thread, sched_suspend, thread);
}
static inline void z_mark_thread_as_not_suspended(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_SUSPENDED;
SYS_PORT_TRACING_FUNC(k_thread, sched_resume, thread);
}
static inline void z_mark_thread_as_started(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_PRESTART;
}
static inline void z_mark_thread_as_pending(struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_PENDING;
}
static inline void z_mark_thread_as_not_pending(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_PENDING;
}
/*
* This function tags the current thread as essential to system operation.
* Exceptions raised by this thread will be treated as a fatal system error.
*/
static inline void z_thread_essential_set(struct k_thread *thread)
{
thread->base.user_options |= K_ESSENTIAL;
}
/*
* This function tags the current thread as not essential to system operation.
* Exceptions raised by this thread may be recoverable.
* (This is the default tag for a thread.)
*/
static inline void z_thread_essential_clear(struct k_thread *thread)
{
thread->base.user_options &= ~K_ESSENTIAL;
}
/*
* This routine indicates if the current thread is an essential system thread.
*
* Returns true if current thread is essential, false if it is not.
*/
static inline bool z_is_thread_essential(struct k_thread *thread)
{
return (thread->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
}
static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
int preempt_ok)
{
/* Preemption is OK if it's being explicitly allowed by
* software state (e.g. the thread called k_yield())
*/
if (preempt_ok != 0) {
return true;
}
__ASSERT(_current != NULL, "");
/* Or if we're pended/suspended/dummy (duh) */
if (z_is_thread_prevented_from_running(_current)) {
return true;
}
/* Edge case on ARM where a thread can be pended out of an
* interrupt handler before the "synchronous" swap starts
* context switching. Platforms with atomic swap can never
* hit this.
*/
if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
&& z_is_thread_timeout_active(thread)) {
return true;
}
/* Otherwise we have to be running a preemptible thread or
* switching to a metairq
*/
if (thread_is_preemptible(_current) || thread_is_metairq(thread)) {
return true;
}
return false;
}
static inline bool z_is_idle_thread_entry(void *entry_point)
{
return entry_point == idle;
}
static inline bool z_is_idle_thread_object(struct k_thread *thread)
{
#ifdef CONFIG_MULTITHREADING
#ifdef CONFIG_SMP
return thread->base.is_idle;
#else
return thread == &z_idle_threads[0];
#endif /* CONFIG_SMP */
#else
return false;
#endif /* CONFIG_MULTITHREADING */
}
#endif /* ZEPHYR_KERNEL_INCLUDE_THREAD_H_ */
``` | /content/code_sandbox/kernel/include/kthread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,487 |
```objective-c
/*
*
*/
/**
* @file
* @brief Macros to generate structure member offset definitions
*
* This header contains macros to allow a kernel implementation to generate
* absolute symbols whose values represents the member offsets for various
* kernel structures. These absolute symbols are typically utilized by
* assembly source files rather than hardcoding the values in some local header
* file.
*
* WARNING: Absolute symbols can potentially be utilized by external tools --
* for example, to locate a specific field within a data structure.
* Consequently, changes made to such symbols may require modifications to the
* associated tool(s). Typically, relocating a member of a structure merely
* requires that a tool be rebuilt; however, moving a member to another
* structure (or to a new sub-structure within an existing structure) may
* require that the tool itself be modified. Likewise, deleting, renaming, or
* changing the meaning of an absolute symbol may require modifications to a
* tool.
*
* The macro "GEN_OFFSET_SYM(structure, member)" is used to generate a single
* absolute symbol. The absolute symbol will appear in the object module
* generated from the source file that utilizes the GEN_OFFSET_SYM() macro.
* Absolute symbols representing a structure member offset have the following
* form:
*
* __<structure>_<member>_OFFSET
*
* The macro "GEN_NAMED_OFFSET_SYM(structure, member, name)" is also provided
* to create the symbol with the following form:
*
* __<structure>_<name>_OFFSET
*
* This header also defines the GEN_ABSOLUTE_SYM macro to simply define an
* absolute symbol, irrespective of whether the value represents a structure
* or offset.
*
* The following sample file illustrates the usage of the macros available
* in this file:
*
* <START of sample source file: offsets.c>
*
* #include <gen_offset.h>
* /@ include struct definitions for which offsets symbols are to be
* generated @/
*
* #include <zephyr/kernel_structs.h>
* GEN_ABS_SYM_BEGIN (_OffsetAbsSyms) /@ the name parameter is arbitrary @/
* /@ _kernel_t structure member offsets @/
*
* GEN_OFFSET_SYM (_kernel_t, nested);
* GEN_OFFSET_SYM (_kernel_t, irq_stack);
* GEN_OFFSET_SYM (_kernel_t, current);
* GEN_OFFSET_SYM (_kernel_t, idle);
*
* GEN_ABSOLUTE_SYM (___kernel_t_SIZEOF, sizeof(_kernel_t));
*
* GEN_ABS_SYM_END
* <END of sample source file: offsets.c>
*
* Compiling the sample offsets.c results in the following symbols in offsets.o:
*
* $ nm offsets.o
* 00000000 A ___kernel_t_nested_OFFSET
* 00000004 A ___kernel_t_irq_stack_OFFSET
* 00000008 A ___kernel_t_current_OFFSET
* 0000000c A ___kernel_t_idle_OFFSET
*/
#ifndef ZEPHYR_KERNEL_INCLUDE_GEN_OFFSET_H_
#define ZEPHYR_KERNEL_INCLUDE_GEN_OFFSET_H_
#include <zephyr/toolchain.h>
#include <stddef.h>
/* definition of the GEN_OFFSET_SYM() macros is toolchain independent */
#define GEN_OFFSET_SYM(S, M) \
GEN_ABSOLUTE_SYM(__##S##_##M##_##OFFSET, offsetof(S, M))
#define GEN_OFFSET_STRUCT(S, M) \
GEN_ABSOLUTE_SYM(__struct_##S##_##M##_##OFFSET, offsetof(struct S, M))
#define GEN_NAMED_OFFSET_SYM(S, M, N) \
GEN_ABSOLUTE_SYM(__##S##_##N##_##OFFSET, offsetof(S, M))
#define GEN_NAMED_OFFSET_STRUCT(S, M, N) \
GEN_ABSOLUTE_SYM(__struct_##S##_##N##_##OFFSET, offsetof(struct S, M))
#endif /* ZEPHYR_KERNEL_INCLUDE_GEN_OFFSET_H_ */
``` | /content/code_sandbox/kernel/include/gen_offset.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 804 |
```objective-c
/*
*/
#ifndef ZEPHYR_KERNEL_INCLUDE_IPI_H_
#define ZEPHYR_KERNEL_INCLUDE_IPI_H_
#include <zephyr/kernel.h>
#include <stdint.h>
#include <zephyr/sys/atomic.h>
#define IPI_ALL_CPUS_MASK ((1 << CONFIG_MP_MAX_NUM_CPUS) - 1)
#define IPI_CPU_MASK(cpu_id) \
(IS_ENABLED(CONFIG_IPI_OPTIMIZE) ? BIT(cpu_id) : IPI_ALL_CPUS_MASK)
/* defined in ipi.c when CONFIG_SMP=y */
#ifdef CONFIG_SMP
void flag_ipi(uint32_t ipi_mask);
void signal_pending_ipi(void);
atomic_val_t ipi_mask_create(struct k_thread *thread);
#else
#define flag_ipi(ipi_mask) do { } while (false)
#define signal_pending_ipi() do { } while (false)
#endif /* CONFIG_SMP */
#endif /* ZEPHYR_KERNEL_INCLUDE_IPI_H_ */
``` | /content/code_sandbox/kernel/include/ipi.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 199 |
```objective-c
/*
*
*/
#ifndef KERNEL_INCLUDE_MMU_H
#define KERNEL_INCLUDE_MMU_H
#ifdef CONFIG_MMU
#include <stdint.h>
#include <zephyr/sys/sflist.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/util.h>
#include <zephyr/kernel/mm.h>
#include <zephyr/linker/linker-defs.h>
/** Start address of physical memory. */
#define K_MEM_PHYS_RAM_START ((uintptr_t)CONFIG_SRAM_BASE_ADDRESS)
/** Size of physical memory. */
#define K_MEM_PHYS_RAM_SIZE (KB(CONFIG_SRAM_SIZE))
/** End address (exclusive) of physical memory. */
#define K_MEM_PHYS_RAM_END (K_MEM_PHYS_RAM_START + K_MEM_PHYS_RAM_SIZE)
/** Start address of virtual memory. */
#define K_MEM_VIRT_RAM_START ((uint8_t *)CONFIG_KERNEL_VM_BASE)
/** Size of virtual memory. */
#define K_MEM_VIRT_RAM_SIZE ((size_t)CONFIG_KERNEL_VM_SIZE)
/** End address (exclusive) of virtual memory. */
#define K_MEM_VIRT_RAM_END (K_MEM_VIRT_RAM_START + K_MEM_VIRT_RAM_SIZE)
/** Boot-time virtual start address of the kernel image. */
#define K_MEM_KERNEL_VIRT_START ((uint8_t *)&z_mapped_start[0])
/** Boot-time virtual end address of the kernel image. */
#define K_MEM_KERNEL_VIRT_END ((uint8_t *)&z_mapped_end[0])
/** Boot-time virtual address space size of the kernel image. */
#define K_MEM_KERNEL_VIRT_SIZE (K_MEM_KERNEL_VIRT_END - K_MEM_KERNEL_VIRT_START)
/**
* @brief Offset for translating between static physical and virtual addresses.
*
* @note Do not use directly unless you know exactly what you are going.
*/
#define K_MEM_VM_OFFSET \
((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
(CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
/**
* @brief Get physical address from virtual address for boot RAM mappings.
*
* @note Only applies to boot RAM mappings within the Zephyr image that have never
* been remapped or paged out. Never use this unless you know exactly what you
* are doing.
*
* @param virt Virtual address.
*
* @return Physical address.
*/
#define K_MEM_BOOT_VIRT_TO_PHYS(virt) ((uintptr_t)(((uint8_t *)(virt)) - K_MEM_VM_OFFSET))
/**
* @brief Get virtual address from physical address for boot RAM mappings.
*
* @note Only applies to boot RAM mappings within the Zephyr image that have never
* been remapped or paged out. Never use this unless you know exactly what you
* are doing.
*
* @param phys Physical address.
*
* @return Virtual address.
*/
#define K_MEM_BOOT_PHYS_TO_VIRT(phys) ((uint8_t *)(((uintptr_t)(phys)) + K_MEM_VM_OFFSET))
/**
* @def K_MEM_VM_FREE_START
* @brief Start address of unused, available virtual addresses.
*
* This is the start address of the virtual memory region where
* addresses can be allocated for memory mapping. This depends on whether
* CONFIG_ARCH_MAPS_ALL_RAM is enabled:
*
* - If it is enabled, which means all physical memory are mapped in virtual
* memory address space, and it is the same as
* (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_SIZE).
*
* - If it is disabled, K_MEM_VM_FREE_START is the same K_MEM_KERNEL_VIRT_END which
* is the end of the kernel image.
*
*/
#ifdef CONFIG_ARCH_MAPS_ALL_RAM
#define K_MEM_VM_FREE_START K_MEM_BOOT_PHYS_TO_VIRT(K_MEM_PHYS_RAM_END)
#else
#define K_MEM_VM_FREE_START K_MEM_KERNEL_VIRT_END
#endif /* CONFIG_ARCH_MAPS_ALL_RAM */
/**
* @defgroup kernel_mm_page_frame_apis Kernel Memory Page Frame Management APIs
* @ingroup kernel_mm_internal_apis
* @{
*
* Macros and data structures for physical page frame accounting,
* APIs for use by eviction and backing store algorithms. This code
* is otherwise not application-facing.
*/
/**
* @brief Number of page frames.
*
* At present, page frame management is only done for main system RAM,
* and we generate paging structures based on CONFIG_SRAM_BASE_ADDRESS
* and CONFIG_SRAM_SIZE.
*
* If we have other RAM regions (DCCM, etc) these typically have special
* properties and shouldn't be used generically for demand paging or
* anonymous mappings. We don't currently maintain an ontology of these in the
* core kernel.
*/
#define K_MEM_NUM_PAGE_FRAMES (K_MEM_PHYS_RAM_SIZE / (size_t)CONFIG_MMU_PAGE_SIZE)
/*
* k_mem_page_frame flags bits
*
* Requirements:
* - K_MEM_PAGE_FRAME_FREE must be one of the possible sfnode flag bits
* - All bit values must be lower than CONFIG_MMU_PAGE_SIZE
*/
/** This physical page is free and part of the free list */
#define K_MEM_PAGE_FRAME_FREE BIT(0)
/** This physical page is reserved by hardware; we will never use it */
#define K_MEM_PAGE_FRAME_RESERVED BIT(1)
/** This page contains critical kernel data and will never be swapped */
#define K_MEM_PAGE_FRAME_PINNED BIT(2)
/**
* This physical page is mapped to some virtual memory address
*
* Currently, we just support one mapping per page frame. If a page frame
* is mapped to multiple virtual pages then it must be pinned.
*/
#define K_MEM_PAGE_FRAME_MAPPED BIT(3)
/**
* This page frame is currently involved in a page-in/out operation
*/
#define K_MEM_PAGE_FRAME_BUSY BIT(4)
/**
* This page frame has a clean copy in the backing store
*/
#define K_MEM_PAGE_FRAME_BACKED BIT(5)
/**
* Data structure for physical page frames
*
* An array of these is instantiated, one element per physical RAM page.
* Hence it's necessary to constrain its size as much as possible.
*/
struct k_mem_page_frame {
union {
/*
* If mapped, K_MEM_PAGE_FRAME_* flags and virtual address
* this page is mapped to.
*/
uintptr_t va_and_flags;
/*
* If unmapped and available, free pages list membership
* with the K_MEM_PAGE_FRAME_FREE flag.
*/
sys_sfnode_t node;
};
/* Backing store and eviction algorithms may both need to
* require additional per-frame custom data for accounting purposes.
* They should declare their own array with indices matching
* k_mem_page_frames[] ones whenever possible.
* They may also want additional flags bits that could be stored here
* and they shouldn't clobber each other. At all costs the total
* size of struct k_mem_page_frame must be minimized.
*/
};
/* Note: this must be false for the other flag bits to be valid */
static inline bool k_mem_page_frame_is_free(struct k_mem_page_frame *pf)
{
return (pf->va_and_flags & K_MEM_PAGE_FRAME_FREE) != 0U;
}
static inline bool k_mem_page_frame_is_pinned(struct k_mem_page_frame *pf)
{
return (pf->va_and_flags & K_MEM_PAGE_FRAME_PINNED) != 0U;
}
static inline bool k_mem_page_frame_is_reserved(struct k_mem_page_frame *pf)
{
return (pf->va_and_flags & K_MEM_PAGE_FRAME_RESERVED) != 0U;
}
static inline bool k_mem_page_frame_is_mapped(struct k_mem_page_frame *pf)
{
return (pf->va_and_flags & K_MEM_PAGE_FRAME_MAPPED) != 0U;
}
static inline bool k_mem_page_frame_is_busy(struct k_mem_page_frame *pf)
{
return (pf->va_and_flags & K_MEM_PAGE_FRAME_BUSY) != 0U;
}
static inline bool k_mem_page_frame_is_backed(struct k_mem_page_frame *pf)
{
return (pf->va_and_flags & K_MEM_PAGE_FRAME_BACKED) != 0U;
}
static inline bool k_mem_page_frame_is_evictable(struct k_mem_page_frame *pf)
{
return (!k_mem_page_frame_is_free(pf) &&
!k_mem_page_frame_is_reserved(pf) &&
k_mem_page_frame_is_mapped(pf) &&
!k_mem_page_frame_is_pinned(pf) &&
!k_mem_page_frame_is_busy(pf));
}
/* If true, page is not being used for anything, is not reserved, is not
* a member of some free pages list, isn't busy, and is ready to be mapped
* in memory
*/
static inline bool k_mem_page_frame_is_available(struct k_mem_page_frame *page)
{
return page->va_and_flags == 0U;
}
static inline void k_mem_page_frame_set(struct k_mem_page_frame *pf, uint8_t flags)
{
pf->va_and_flags |= flags;
}
static inline void k_mem_page_frame_clear(struct k_mem_page_frame *pf, uint8_t flags)
{
/* ensure bit inversion to follow is done on the proper type width */
uintptr_t wide_flags = flags;
pf->va_and_flags &= ~wide_flags;
}
static inline void k_mem_assert_phys_aligned(uintptr_t phys)
{
__ASSERT(phys % CONFIG_MMU_PAGE_SIZE == 0U,
"physical address 0x%lx is not page-aligned", phys);
(void)phys;
}
extern struct k_mem_page_frame k_mem_page_frames[K_MEM_NUM_PAGE_FRAMES];
static inline uintptr_t k_mem_page_frame_to_phys(struct k_mem_page_frame *pf)
{
return (uintptr_t)((pf - k_mem_page_frames) * CONFIG_MMU_PAGE_SIZE) +
K_MEM_PHYS_RAM_START;
}
/* Presumes there is but one mapping in the virtual address space */
static inline void *k_mem_page_frame_to_virt(struct k_mem_page_frame *pf)
{
uintptr_t flags_mask = CONFIG_MMU_PAGE_SIZE - 1;
return (void *)(pf->va_and_flags & ~flags_mask);
}
static inline bool k_mem_is_page_frame(uintptr_t phys)
{
k_mem_assert_phys_aligned(phys);
return IN_RANGE(phys, (uintptr_t)K_MEM_PHYS_RAM_START,
(uintptr_t)(K_MEM_PHYS_RAM_END - 1));
}
static inline struct k_mem_page_frame *k_mem_phys_to_page_frame(uintptr_t phys)
{
__ASSERT(k_mem_is_page_frame(phys),
"0x%lx not an SRAM physical address", phys);
return &k_mem_page_frames[(phys - K_MEM_PHYS_RAM_START) /
CONFIG_MMU_PAGE_SIZE];
}
static inline void k_mem_assert_virtual_region(uint8_t *addr, size_t size)
{
__ASSERT((uintptr_t)addr % CONFIG_MMU_PAGE_SIZE == 0U,
"unaligned addr %p", addr);
__ASSERT(size % CONFIG_MMU_PAGE_SIZE == 0U,
"unaligned size %zu", size);
__ASSERT(!Z_DETECT_POINTER_OVERFLOW(addr, size),
"region %p size %zu zero or wraps around", addr, size);
__ASSERT(IN_RANGE((uintptr_t)addr,
(uintptr_t)K_MEM_VIRT_RAM_START,
((uintptr_t)K_MEM_VIRT_RAM_END - 1)) &&
IN_RANGE(((uintptr_t)addr + size - 1),
(uintptr_t)K_MEM_VIRT_RAM_START,
((uintptr_t)K_MEM_VIRT_RAM_END - 1)),
"invalid virtual address region %p (%zu)", addr, size);
}
/**
* @brief Pretty-print page frame information for all page frames.
*
* Debug function, pretty-print page frame information for all frames
* concisely to printk.
*/
void k_mem_page_frames_dump(void);
/* Convenience macro for iterating over all page frames */
#define K_MEM_PAGE_FRAME_FOREACH(_phys, _pageframe) \
for ((_phys) = K_MEM_PHYS_RAM_START, (_pageframe) = k_mem_page_frames; \
(_phys) < K_MEM_PHYS_RAM_END; \
(_phys) += CONFIG_MMU_PAGE_SIZE, (_pageframe)++)
/** @} */
/**
* @def K_MEM_VM_RESERVED
* @brief Reserve space at the end of virtual memory.
*/
#ifdef CONFIG_DEMAND_PAGING
/* We reserve a virtual page as a scratch area for page-ins/outs at the end
* of the address space
*/
#define K_MEM_VM_RESERVED CONFIG_MMU_PAGE_SIZE
/**
* @brief Location of the scratch page used for demand paging.
*/
#define K_MEM_SCRATCH_PAGE ((void *)((uintptr_t)CONFIG_KERNEL_VM_BASE + \
(uintptr_t)CONFIG_KERNEL_VM_SIZE - \
CONFIG_MMU_PAGE_SIZE))
#else
#define K_MEM_VM_RESERVED 0
#endif /* CONFIG_DEMAND_PAGING */
#ifdef CONFIG_DEMAND_PAGING
/*
* Core kernel demand paging APIs
*/
/**
* Number of page faults since system startup
*
* Counts only those page faults that were handled successfully by the demand
* paging mechanism and were not errors.
*
* @return Number of successful page faults
*/
unsigned long k_mem_num_pagefaults_get(void);
/**
* Free a page frame physical address by evicting its contents
*
* The indicated page frame, if it contains a data page, will have that
* data page evicted to the backing store. The page frame will then be
* marked as available for mappings or page-ins.
*
* This is useful for freeing up entire memory banks so that they may be
* deactivated to save power.
*
* If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
* called by ISRs as the backing store may be in-use.
*
* @param phys Page frame physical address
* @retval 0 Success
* @retval -ENOMEM Insufficient backing store space
*/
int k_mem_page_frame_evict(uintptr_t phys);
/**
* Handle a page fault for a virtual data page
*
* This is invoked from the architecture page fault handler.
*
* If a valid page fault, the core kernel will obtain a page frame,
* populate it with the data page that was evicted to the backing store,
* update page tables, and return so that the faulting instruction may be
* re-tried.
*
* The architecture must not call this function if the page was mapped and
* not paged out at the time the exception was triggered (i.e. a protection
* violation for a mapped page).
*
* If the faulting context had interrupts disabled when the page fault was
* triggered, the entire page fault handling path must have interrupts
* disabled, including the invocation of this function.
*
* Otherwise, interrupts may be enabled and the page fault handler may be
* preemptible. Races to page-in will be appropriately handled by the kernel.
*
* @param addr Faulting virtual address
* @retval true Page fault successfully handled, or nothing needed to be done.
* The arch layer should retry the faulting instruction.
* @retval false This page fault was from an un-mapped page, should
* be treated as an error, and not re-tried.
*/
bool k_mem_page_fault(void *addr);
#endif /* CONFIG_DEMAND_PAGING */
#endif /* CONFIG_MMU */
#endif /* KERNEL_INCLUDE_MMU_H */
``` | /content/code_sandbox/kernel/include/mmu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,211 |
```objective-c
/*
*
*/
#include <zephyr/device.h>
#include <zephyr/pm/device.h>
#include "kernel_internal.h"
#ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_OFFSETS_H_
#define ZEPHYR_KERNEL_INCLUDE_KERNEL_OFFSETS_H_
#include <zephyr/syscall_list.h>
/* All of this is build time magic, but LCOV gets confused. Disable coverage
* for this whole file.
*
* LCOV_EXCL_START
*/
/*
* The final link step uses the symbol _OffsetAbsSyms to force the linkage of
* offsets.o into the ELF image.
*/
GEN_ABS_SYM_BEGIN(_OffsetAbsSyms)
GEN_OFFSET_SYM(_cpu_t, current);
GEN_OFFSET_SYM(_cpu_t, nested);
GEN_OFFSET_SYM(_cpu_t, irq_stack);
GEN_OFFSET_SYM(_cpu_t, arch);
GEN_OFFSET_SYM(_kernel_t, cpus);
#if defined(CONFIG_FPU_SHARING)
GEN_OFFSET_SYM(_cpu_t, fp_ctx);
#endif /* CONFIG_FPU_SHARING */
#ifdef CONFIG_PM
GEN_OFFSET_SYM(_kernel_t, idle);
#endif /* CONFIG_PM */
#ifndef CONFIG_SCHED_CPU_MASK_PIN_ONLY
GEN_OFFSET_SYM(_kernel_t, ready_q);
#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
#ifndef CONFIG_SMP
GEN_OFFSET_SYM(_ready_q_t, cache);
#endif /* CONFIG_SMP */
#ifdef CONFIG_FPU_SHARING
GEN_OFFSET_SYM(_kernel_t, current_fp);
#endif /* CONFIG_FPU_SHARING */
GEN_OFFSET_SYM(_thread_base_t, user_options);
GEN_OFFSET_SYM(_thread_t, base);
GEN_OFFSET_SYM(_thread_t, callee_saved);
GEN_OFFSET_SYM(_thread_t, arch);
#ifdef CONFIG_USE_SWITCH
GEN_OFFSET_SYM(_thread_t, switch_handle);
#endif /* CONFIG_USE_SWITCH */
#ifdef CONFIG_THREAD_STACK_INFO
GEN_OFFSET_SYM(_thread_t, stack_info);
#endif /* CONFIG_THREAD_STACK_INFO */
#ifdef CONFIG_THREAD_LOCAL_STORAGE
GEN_OFFSET_SYM(_thread_t, tls);
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
GEN_ABSOLUTE_SYM(__z_interrupt_stack_SIZEOF, sizeof(z_interrupt_stacks[0]));
/* member offsets in the device structure. Used in image post-processing */
#ifdef CONFIG_DEVICE_DEPS
GEN_ABSOLUTE_SYM(_DEVICE_STRUCT_HANDLES_OFFSET,
offsetof(struct device, deps));
#endif /* CONFIG_DEVICE_DEPS */
#ifdef CONFIG_PM_DEVICE
GEN_ABSOLUTE_SYM(_DEVICE_STRUCT_PM_OFFSET,
offsetof(struct device, pm));
#endif /* CONFIG_PM_DEVICE */
/* member offsets in the pm_device structure. Used in image post-processing */
GEN_ABSOLUTE_SYM(_PM_DEVICE_STRUCT_FLAGS_OFFSET,
offsetof(struct pm_device_base, flags));
GEN_ABSOLUTE_SYM(_PM_DEVICE_FLAG_PD, PM_DEVICE_FLAG_PD);
/* LCOV_EXCL_STOP */
#endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_OFFSETS_H_ */
``` | /content/code_sandbox/kernel/include/kernel_offsets.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 568 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_KERNEL_INCLUDE_TIMEOUT_Q_H_
#define ZEPHYR_KERNEL_INCLUDE_TIMEOUT_Q_H_
/**
* @file
* @brief timeout queue for threads on kernel objects
*/
#include <zephyr/kernel.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CONFIG_SYS_CLOCK_EXISTS
static inline void z_init_timeout(struct _timeout *to)
{
sys_dnode_init(&to->node);
}
void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
k_timeout_t timeout);
int z_abort_timeout(struct _timeout *to);
static inline bool z_is_inactive_timeout(const struct _timeout *to)
{
return !sys_dnode_is_linked(&to->node);
}
static inline void z_init_thread_timeout(struct _thread_base *thread_base)
{
z_init_timeout(&thread_base->timeout);
}
extern void z_thread_timeout(struct _timeout *timeout);
static inline void z_add_thread_timeout(struct k_thread *thread, k_timeout_t ticks)
{
z_add_timeout(&thread->base.timeout, z_thread_timeout, ticks);
}
static inline int z_abort_thread_timeout(struct k_thread *thread)
{
return z_abort_timeout(&thread->base.timeout);
}
int32_t z_get_next_timeout_expiry(void);
k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
#else
/* Stubs when !CONFIG_SYS_CLOCK_EXISTS */
#define z_init_thread_timeout(thread_base) do {} while (false)
#define z_abort_thread_timeout(to) (0)
#define z_is_inactive_timeout(to) 1
#define z_get_next_timeout_expiry() ((int32_t) K_TICKS_FOREVER)
#define z_set_timeout_expiry(ticks, is_idle) do {} while (false)
static inline void z_add_thread_timeout(struct k_thread *thread, k_timeout_t ticks)
{
ARG_UNUSED(thread);
ARG_UNUSED(ticks);
}
#endif /* CONFIG_SYS_CLOCK_EXISTS */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_KERNEL_INCLUDE_TIMEOUT_Q_H_ */
``` | /content/code_sandbox/kernel/include/timeout_q.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 415 |
```objective-c
/* wait queue for multiple threads on kernel objects */
/*
*
*/
#ifndef ZEPHYR_KERNEL_INCLUDE_WAIT_Q_H_
#define ZEPHYR_KERNEL_INCLUDE_WAIT_Q_H_
#include <zephyr/kernel_structs.h>
#include <zephyr/sys/dlist.h>
#include <zephyr/sys/rb.h>
#include <timeout_q.h>
#include <priority_q.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CONFIG_WAITQ_SCALABLE
#define _WAIT_Q_FOR_EACH(wq, thread_ptr) \
RB_FOR_EACH_CONTAINER(&(wq)->waitq.tree, thread_ptr, base.qnode_rb)
static inline void z_waitq_init(_wait_q_t *w)
{
w->waitq = (struct _priq_rb) {
.tree = {
.lessthan_fn = z_priq_rb_lessthan
}
};
}
static inline struct k_thread *z_waitq_head(_wait_q_t *w)
{
return (struct k_thread *)rb_get_min(&w->waitq.tree);
}
#else /* !CONFIG_WAITQ_SCALABLE: */
#define _WAIT_Q_FOR_EACH(wq, thread_ptr) \
SYS_DLIST_FOR_EACH_CONTAINER(&((wq)->waitq), thread_ptr, \
base.qnode_dlist)
static inline void z_waitq_init(_wait_q_t *w)
{
sys_dlist_init(&w->waitq);
}
static inline struct k_thread *z_waitq_head(_wait_q_t *w)
{
return (struct k_thread *)sys_dlist_peek_head(&w->waitq);
}
#endif /* !CONFIG_WAITQ_SCALABLE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_KERNEL_INCLUDE_WAIT_Q_H_ */
``` | /content/code_sandbox/kernel/include/wait_q.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 369 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_KERNEL_INCLUDE_KSCHED_H_
#define ZEPHYR_KERNEL_INCLUDE_KSCHED_H_
#include <zephyr/kernel_structs.h>
#include <kernel_internal.h>
#include <timeout_q.h>
#include <kthread.h>
#include <zephyr/tracing/tracing.h>
#include <stdbool.h>
BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
>= K_HIGHEST_APPLICATION_THREAD_PRIO);
#ifdef CONFIG_MULTITHREADING
#define Z_VALID_PRIO(prio, entry_point) \
(((prio) == K_IDLE_PRIO && z_is_idle_thread_entry(entry_point)) || \
((K_LOWEST_APPLICATION_THREAD_PRIO \
>= K_HIGHEST_APPLICATION_THREAD_PRIO) \
&& (prio) >= K_HIGHEST_APPLICATION_THREAD_PRIO \
&& (prio) <= K_LOWEST_APPLICATION_THREAD_PRIO))
#define Z_ASSERT_VALID_PRIO(prio, entry_point) do { \
__ASSERT(Z_VALID_PRIO((prio), (entry_point)), \
"invalid priority (%d); allowed range: %d to %d", \
(prio), \
K_LOWEST_APPLICATION_THREAD_PRIO, \
K_HIGHEST_APPLICATION_THREAD_PRIO); \
} while (false)
#else
#define Z_VALID_PRIO(prio, entry_point) ((prio) == -1)
#define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
#endif /* CONFIG_MULTITHREADING */
extern struct k_thread _thread_dummy;
void z_sched_init(void);
void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
void z_unpend_thread_no_timeout(struct k_thread *thread);
struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q);
int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t *wait_q, k_timeout_t timeout);
void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
k_timeout_t timeout);
void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
void z_reschedule_irqlock(uint32_t key);
struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q);
void z_unpend_thread(struct k_thread *thread);
int z_unpend_all(_wait_q_t *wait_q);
bool z_thread_prio_set(struct k_thread *thread, int prio);
void *z_get_next_switch_handle(void *interrupted);
void z_time_slice(void);
void z_reset_time_slice(struct k_thread *curr);
void z_sched_ipi(void);
void z_sched_start(struct k_thread *thread);
void z_ready_thread(struct k_thread *thread);
void z_ready_thread_locked(struct k_thread *thread);
void z_requeue_current(struct k_thread *curr);
struct k_thread *z_swap_next_thread(void);
void z_thread_abort(struct k_thread *thread);
void move_thread_to_end_of_prio_q(struct k_thread *thread);
bool thread_is_sliceable(struct k_thread *thread);
static inline void z_reschedule_unlocked(void)
{
(void) z_reschedule_irqlock(arch_irq_lock());
}
static inline bool z_is_under_prio_ceiling(int prio)
{
return prio >= CONFIG_PRIORITY_CEILING;
}
static inline int z_get_new_prio_with_ceiling(int prio)
{
return z_is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING;
}
static inline bool z_is_prio1_higher_than_or_equal_to_prio2(int prio1, int prio2)
{
return prio1 <= prio2;
}
static inline bool z_is_prio_higher_or_equal(int prio1, int prio2)
{
return z_is_prio1_higher_than_or_equal_to_prio2(prio1, prio2);
}
static inline bool z_is_prio1_lower_than_or_equal_to_prio2(int prio1, int prio2)
{
return prio1 >= prio2;
}
static inline bool z_is_prio1_higher_than_prio2(int prio1, int prio2)
{
return prio1 < prio2;
}
static inline bool z_is_prio_higher(int prio, int test_prio)
{
return z_is_prio1_higher_than_prio2(prio, test_prio);
}
static inline bool z_is_prio_lower_or_equal(int prio1, int prio2)
{
return z_is_prio1_lower_than_or_equal_to_prio2(prio1, prio2);
}
int32_t z_sched_prio_cmp(struct k_thread *thread_1, struct k_thread *thread_2);
static inline bool _is_valid_prio(int prio, void *entry_point)
{
if ((prio == K_IDLE_PRIO) && z_is_idle_thread_entry(entry_point)) {
return true;
}
if (!z_is_prio_higher_or_equal(prio,
K_LOWEST_APPLICATION_THREAD_PRIO)) {
return false;
}
if (!z_is_prio_lower_or_equal(prio,
K_HIGHEST_APPLICATION_THREAD_PRIO)) {
return false;
}
return true;
}
static inline void z_sched_lock(void)
{
__ASSERT(!arch_is_in_isr(), "");
__ASSERT(_current->base.sched_locked != 1U, "");
--_current->base.sched_locked;
compiler_barrier();
}
/*
* APIs for working with the Zephyr kernel scheduler. Intended for use in
* management of IPC objects, either in the core kernel or other IPC
* implemented by OS compatibility layers, providing basic wait/wake operations
* with spinlocks used for synchronization.
*
* These APIs are public and will be treated as contract, even if the
* underlying scheduler implementation changes.
*/
/**
* Wake up a thread pending on the provided wait queue
*
* Given a wait_q, wake up the highest priority thread on the queue. If the
* queue was empty just return false.
*
* Otherwise, do the following, in order, holding _sched_spinlock the entire
* time so that the thread state is guaranteed not to change:
* - Set the thread's swap return values to swap_retval and swap_data
* - un-pend and ready the thread, but do not invoke the scheduler.
*
* Repeated calls to this function until it returns false is a suitable
* way to wake all threads on the queue.
*
* It is up to the caller to implement locking such that the return value of
* this function (whether a thread was woken up or not) does not immediately
* become stale. Calls to wait and wake on the same wait_q object must have
* synchronization. Calling this without holding any spinlock is a sign that
* this API is not being used properly.
*
* @param wait_q Wait queue to wake up the highest prio thread
* @param swap_retval Swap return value for woken thread
* @param swap_data Data return value to supplement swap_retval. May be NULL.
* @retval true If a thread was woken up
* @retval false If the wait_q was empty
*/
bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data);
/**
* Wakes the specified thread.
*
* Given a specific thread, wake it up. This routine assumes that the given
* thread is not on the timeout queue.
*
* @param thread Given thread to wake up.
* @param is_timeout True if called from the timer ISR; false otherwise.
*
*/
void z_sched_wake_thread(struct k_thread *thread, bool is_timeout);
/**
* Wake up all threads pending on the provided wait queue
*
* Convenience function to invoke z_sched_wake() on all threads in the queue
* until there are no more to wake up.
*
* @param wait_q Wait queue to wake up the highest prio thread
* @param swap_retval Swap return value for woken thread
* @param swap_data Data return value to supplement swap_retval. May be NULL.
* @retval true If any threads were woken up
* @retval false If the wait_q was empty
*/
static inline bool z_sched_wake_all(_wait_q_t *wait_q, int swap_retval,
void *swap_data)
{
bool woken = false;
while (z_sched_wake(wait_q, swap_retval, swap_data)) {
woken = true;
}
/* True if we woke at least one thread up */
return woken;
}
/**
* Atomically put the current thread to sleep on a wait queue, with timeout
*
* The thread will be added to the provided waitqueue. The lock, which should
* be held by the caller with the provided key, will be released once this is
* completely done and we have swapped out.
*
* The return value and data pointer is set by whoever woke us up via
* z_sched_wake.
*
* @param lock Address of spinlock to release when we swap out
* @param key Key to the provided spinlock when it was locked
* @param wait_q Wait queue to go to sleep on
* @param timeout Waiting period to be woken up, or K_FOREVER to wait
* indefinitely.
* @param data Storage location for data pointer set when thread was woken up.
* May be NULL if not used.
* @retval Return value set by whatever woke us up, or -EAGAIN if the timeout
* expired without being woken up.
*/
int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t *wait_q, k_timeout_t timeout, void **data);
/**
* @brief Walks the wait queue invoking the callback on each waiting thread
*
* This function walks the wait queue invoking the callback function on each
* waiting thread while holding _sched_spinlock. This can be useful for routines
* that need to operate on multiple waiting threads.
*
* CAUTION! As a wait queue is of indeterminate length, the scheduler will be
* locked for an indeterminate amount of time. This may impact system
* performance. As such, care must be taken when using both this function and
* the specified callback.
*
* @param wait_q Identifies the wait queue to walk
* @param func Callback to invoke on each waiting thread
* @param data Custom data passed to the callback
*
* @retval non-zero if walk is terminated by the callback; otherwise 0
*/
int z_sched_waitq_walk(_wait_q_t *wait_q,
int (*func)(struct k_thread *, void *), void *data);
/** @brief Halt thread cycle usage accounting.
*
* Halts the accumulation of thread cycle usage and adds the current
* total to the thread's counter. Called on context switch.
*
* Note that this function is idempotent. The core kernel code calls
* it at the end of interrupt handlers (because that is where we have
* a portable hook) where we are context switching, which will include
* any cycles spent in the ISR in the per-thread accounting. But
* architecture code can also call it earlier out of interrupt entry
* to improve measurement fidelity.
*
* This function assumes local interrupts are masked (so that the
* current CPU pointer and current thread are safe to modify), but
* requires no other synchronization. Architecture layers don't need
* to do anything more.
*/
void z_sched_usage_stop(void);
void z_sched_usage_start(struct k_thread *thread);
/**
* @brief Retrieves CPU cycle usage data for specified core
*/
void z_sched_cpu_usage(uint8_t core_id, struct k_thread_runtime_stats *stats);
/**
* @brief Retrieves thread cycle usage data for specified thread
*/
void z_sched_thread_usage(struct k_thread *thread,
struct k_thread_runtime_stats *stats);
static inline void z_sched_usage_switch(struct k_thread *thread)
{
ARG_UNUSED(thread);
#ifdef CONFIG_SCHED_THREAD_USAGE
z_sched_usage_stop();
z_sched_usage_start(thread);
#endif /* CONFIG_SCHED_THREAD_USAGE */
}
#endif /* ZEPHYR_KERNEL_INCLUDE_KSCHED_H_ */
``` | /content/code_sandbox/kernel/include/ksched.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,551 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
#define ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
#include <ksched.h>
#include <zephyr/spinlock.h>
#include <zephyr/sys/barrier.h>
#include <kernel_arch_func.h>
#ifdef CONFIG_STACK_SENTINEL
extern void z_check_stack_sentinel(void);
#else
#define z_check_stack_sentinel() /**/
#endif /* CONFIG_STACK_SENTINEL */
extern struct k_spinlock _sched_spinlock;
/* In SMP, the irq_lock() is a spinlock which is implicitly released
* and reacquired on context switch to preserve the existing
* semantics. This means that whenever we are about to return to a
* thread (via either z_swap() or interrupt/exception return!) we need
* to restore the lock state to whatever the thread's counter
* expects.
*/
void z_smp_release_global_lock(struct k_thread *thread);
/* context switching and scheduling-related routines */
#ifdef CONFIG_USE_SWITCH
/* Spin, with the scheduler lock held (!), on a thread that is known
* (!!) to have released the lock and be on a path where it will
* deterministically (!!!) reach arch_switch() in very small constant
* time.
*
* This exists to treat an unavoidable SMP race when threads swap --
* their thread record is in the queue (and visible to other CPUs)
* before arch_switch() finishes saving state. We must spin for the
* switch handle before entering a new thread. See docs on
* arch_switch().
*
* Stated differently: there's a chicken and egg bug with the question
* of "is a thread running or not?". The thread needs to mark itself
* "not running" from its own context, but at that moment it obviously
* is still running until it reaches arch_switch()! Locking can't
* treat this because the scheduler lock can't be released by the
* switched-to thread, which is going to (obviously) be running its
* own code and doesn't know it was switched out.
*/
static inline void z_sched_switch_spin(struct k_thread *thread)
{
#ifdef CONFIG_SMP
volatile void **shp = (void *)&thread->switch_handle;
while (*shp == NULL) {
arch_spin_relax();
}
/* Read barrier: don't allow any subsequent loads in the
* calling code to reorder before we saw switch_handle go
* non-null.
*/
barrier_dmem_fence_full();
#endif /* CONFIG_SMP */
}
/* New style context switching. arch_switch() is a lower level
* primitive that doesn't know about the scheduler or return value.
* Needed for SMP, where the scheduler requires spinlocking that we
* don't want to have to do in per-architecture assembly.
*
* Note that is_spinlock is a compile-time construct which will be
* optimized out when this function is expanded.
*/
static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
struct k_spinlock *lock,
bool is_spinlock)
{
ARG_UNUSED(lock);
struct k_thread *new_thread, *old_thread;
#ifdef CONFIG_SPIN_VALIDATE
/* Make sure the key acts to unmask interrupts, if it doesn't,
* then we are context switching out of a nested lock
* (i.e. breaking the lock of someone up the stack) which is
* forbidden! The sole exception are dummy threads used
* during initialization (where we start with interrupts
* masked and switch away to begin scheduling) and the case of
* a dead current thread that was just aborted (where the
* damage was already done by the abort anyway).
*
* (Note that this is disabled on ARM64, where system calls
* can sometimes run with interrupts masked in ways that don't
* represent lock state. See #35307)
*/
# ifndef CONFIG_ARM64
__ASSERT(arch_irq_unlocked(key) ||
_current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
"Context switching while holding lock!");
# endif /* CONFIG_ARM64 */
#endif /* CONFIG_SPIN_VALIDATE */
old_thread = _current;
z_check_stack_sentinel();
old_thread->swap_retval = -EAGAIN;
/* We always take the scheduler spinlock if we don't already
* have it. We "release" other spinlocks here. But we never
* drop the interrupt lock.
*/
if (is_spinlock && lock != NULL && lock != &_sched_spinlock) {
k_spin_release(lock);
}
if (!is_spinlock || lock != &_sched_spinlock) {
(void) k_spin_lock(&_sched_spinlock);
}
new_thread = z_swap_next_thread();
if (new_thread != old_thread) {
z_sched_usage_switch(new_thread);
#ifdef CONFIG_SMP
_current_cpu->swap_ok = 0;
new_thread->base.cpu = arch_curr_cpu()->id;
if (!is_spinlock) {
z_smp_release_global_lock(new_thread);
}
#endif /* CONFIG_SMP */
z_thread_mark_switched_out();
z_sched_switch_spin(new_thread);
_current_cpu->current = new_thread;
#ifdef CONFIG_TIMESLICING
z_reset_time_slice(new_thread);
#endif /* CONFIG_TIMESLICING */
#ifdef CONFIG_SPIN_VALIDATE
z_spin_lock_set_owner(&_sched_spinlock);
#endif /* CONFIG_SPIN_VALIDATE */
arch_cohere_stacks(old_thread, NULL, new_thread);
#ifdef CONFIG_SMP
/* Now add _current back to the run queue, once we are
* guaranteed to reach the context switch in finite
* time. See z_sched_switch_spin().
*/
z_requeue_current(old_thread);
#endif /* CONFIG_SMP */
void *newsh = new_thread->switch_handle;
if (IS_ENABLED(CONFIG_SMP)) {
/* Active threads must have a null here. And
* it must be seen before the scheduler lock
* is released!
*/
new_thread->switch_handle = NULL;
barrier_dmem_fence_full(); /* write barrier */
}
k_spin_release(&_sched_spinlock);
arch_switch(newsh, &old_thread->switch_handle);
} else {
k_spin_release(&_sched_spinlock);
}
if (is_spinlock) {
arch_irq_unlock(key);
} else {
irq_unlock(key);
}
return _current->swap_retval;
}
static inline int z_swap_irqlock(unsigned int key)
{
return do_swap(key, NULL, false);
}
static inline int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
{
return do_swap(key.key, lock, true);
}
static inline void z_swap_unlocked(void)
{
(void) do_swap(arch_irq_lock(), NULL, true);
}
#else /* !CONFIG_USE_SWITCH */
extern int arch_swap(unsigned int key);
static inline void z_sched_switch_spin(struct k_thread *thread)
{
ARG_UNUSED(thread);
}
static inline int z_swap_irqlock(unsigned int key)
{
int ret;
z_check_stack_sentinel();
ret = arch_swap(key);
return ret;
}
/* If !USE_SWITCH, then spinlocks are guaranteed degenerate as we
* can't be in SMP. The k_spin_release() call is just for validation
* handling.
*/
static ALWAYS_INLINE int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
{
k_spin_release(lock);
return z_swap_irqlock(key.key);
}
static inline void z_swap_unlocked(void)
{
(void) z_swap_irqlock(arch_irq_lock());
}
#endif /* !CONFIG_USE_SWITCH */
/**
* Set up a "dummy" thread, used at early initialization to launch the
* first thread on a CPU.
*
* Needs to set enough fields such that the context switching code can
* use it to properly store state, which will just be discarded.
*
* The memory of the dummy thread can be completely uninitialized.
*/
static inline void z_dummy_thread_init(struct k_thread *dummy_thread)
{
dummy_thread->base.thread_state = _THREAD_DUMMY;
#ifdef CONFIG_SCHED_CPU_MASK
dummy_thread->base.cpu_mask = -1;
#endif /* CONFIG_SCHED_CPU_MASK */
dummy_thread->base.user_options = K_ESSENTIAL;
#ifdef CONFIG_THREAD_STACK_INFO
dummy_thread->stack_info.start = 0U;
dummy_thread->stack_info.size = 0U;
#endif /* CONFIG_THREAD_STACK_INFO */
#ifdef CONFIG_USERSPACE
dummy_thread->mem_domain_info.mem_domain = &k_mem_domain_default;
#endif /* CONFIG_USERSPACE */
#if (K_HEAP_MEM_POOL_SIZE > 0)
k_thread_system_pool_assign(dummy_thread);
#else
dummy_thread->resource_pool = NULL;
#endif /* K_HEAP_MEM_POOL_SIZE */
#ifdef CONFIG_TIMESLICE_PER_THREAD
dummy_thread->base.slice_ticks = 0;
#endif /* CONFIG_TIMESLICE_PER_THREAD */
_current_cpu->current = dummy_thread;
}
#endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */
``` | /content/code_sandbox/kernel/include/kswap.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,918 |
```objective-c
/*
*
*/
/**
* @file
* @brief Architecture-independent private kernel APIs
*
* This file contains private kernel APIs that are not architecture-specific.
*/
#ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
#define ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
#include <zephyr/kernel.h>
#include <kernel_arch_interface.h>
#include <string.h>
#ifndef _ASMLANGUAGE
#ifdef __cplusplus
extern "C" {
#endif
/* Initialize per-CPU kernel data */
void z_init_cpu(int id);
/* Initialize a thread */
void z_init_thread_base(struct _thread_base *thread_base, int priority,
uint32_t initial_state, unsigned int options);
/* Early boot functions */
void z_early_memset(void *dst, int c, size_t n);
void z_early_memcpy(void *dst, const void *src, size_t n);
void z_bss_zero(void);
#ifdef CONFIG_XIP
void z_data_copy(void);
#else
static inline void z_data_copy(void)
{
/* Do nothing */
}
#endif /* CONFIG_XIP */
#ifdef CONFIG_LINKER_USE_BOOT_SECTION
void z_bss_zero_boot(void);
#else
static inline void z_bss_zero_boot(void)
{
/* Do nothing */
}
#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
#ifdef CONFIG_LINKER_USE_PINNED_SECTION
void z_bss_zero_pinned(void);
#else
static inline void z_bss_zero_pinned(void)
{
/* Do nothing */
}
#endif /* CONFIG_LINKER_USE_PINNED_SECTION */
FUNC_NORETURN void z_cstart(void);
void z_device_state_init(void);
extern FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
void *p1, void *p2, void *p3);
extern char *z_setup_new_thread(struct k_thread *new_thread,
k_thread_stack_t *stack, size_t stack_size,
k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, uint32_t options, const char *name);
/**
* @brief Allocate aligned memory from the current thread's resource pool
*
* Threads may be assigned a resource pool, which will be used to allocate
* memory on behalf of certain kernel and driver APIs. Memory reserved
* in this way should be freed with k_free().
*
* If called from an ISR, the k_malloc() system heap will be used if it exists.
*
* @param align Required memory alignment
* @param size Memory allocation size
* @return A pointer to the allocated memory, or NULL if there is insufficient
* RAM in the pool or there is no pool to draw memory from
*/
void *z_thread_aligned_alloc(size_t align, size_t size);
/**
* @brief Allocate some memory from the current thread's resource pool
*
* Threads may be assigned a resource pool, which will be used to allocate
* memory on behalf of certain kernel and driver APIs. Memory reserved
* in this way should be freed with k_free().
*
* If called from an ISR, the k_malloc() system heap will be used if it exists.
*
* @param size Memory allocation size
* @return A pointer to the allocated memory, or NULL if there is insufficient
* RAM in the pool or there is no pool to draw memory from
*/
static inline void *z_thread_malloc(size_t size)
{
return z_thread_aligned_alloc(0, size);
}
#ifdef CONFIG_USE_SWITCH
/* This is a arch function traditionally, but when the switch-based
* z_swap() is in use it's a simple inline provided by the kernel.
*/
static ALWAYS_INLINE void
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
thread->swap_retval = value;
}
#endif
static ALWAYS_INLINE void
z_thread_return_value_set_with_data(struct k_thread *thread,
unsigned int value,
void *data)
{
arch_thread_return_value_set(thread, value);
thread->base.swap_data = data;
}
#ifdef CONFIG_SMP
extern void z_smp_init(void);
#ifdef CONFIG_SYS_CLOCK_EXISTS
extern void smp_timer_init(void);
#endif /* CONFIG_SYS_CLOCK_EXISTS */
#endif /* CONFIG_SMP */
extern void z_early_rand_get(uint8_t *buf, size_t length);
#if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
extern int z_stack_adjust_initialized;
#endif /* CONFIG_STACK_POINTER_RANDOM */
extern struct k_thread z_main_thread;
#ifdef CONFIG_MULTITHREADING
extern struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS];
#endif /* CONFIG_MULTITHREADING */
K_KERNEL_PINNED_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
CONFIG_ISR_STACK_SIZE);
#ifdef CONFIG_GEN_PRIV_STACKS
extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
#endif /* CONFIG_GEN_PRIV_STACKS */
/* Calculate stack usage. */
int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr);
#ifdef CONFIG_USERSPACE
bool z_stack_is_user_capable(k_thread_stack_t *stack);
/* Memory domain setup hook, called from z_setup_new_thread() */
void z_mem_domain_init_thread(struct k_thread *thread);
/* Memory domain teardown hook, called from z_thread_abort() */
void z_mem_domain_exit_thread(struct k_thread *thread);
/* This spinlock:
*
* - Protects the full set of active k_mem_domain objects and their contents
* - Serializes calls to arch_mem_domain_* APIs
*
* If architecture code needs to access k_mem_domain structures or the
* partitions they contain at any other point, this spinlock should be held.
* Uniprocessor systems can get away with just locking interrupts but this is
* not recommended.
*/
extern struct k_spinlock z_mem_domain_lock;
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_GDBSTUB
struct gdb_ctx;
/* Should be called by the arch layer. This is the gdbstub main loop
* and synchronously communicate with gdb on host.
*/
extern int z_gdb_main_loop(struct gdb_ctx *ctx);
#endif /* CONFIG_GDBSTUB */
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
void z_thread_mark_switched_in(void);
void z_thread_mark_switched_out(void);
#else
/**
* @brief Called after a thread has been selected to run
*/
#define z_thread_mark_switched_in()
/**
* @brief Called before a thread has been selected to run
*/
#define z_thread_mark_switched_out()
#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
/* Init hook for page frame management, invoked immediately upon entry of
* main thread, before POST_KERNEL tasks
*/
void z_mem_manage_init(void);
/**
* @brief Finalize page frame management at the end of boot process.
*/
void z_mem_manage_boot_finish(void);
void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
#ifdef CONFIG_PM
/* When the kernel is about to go idle, it calls this function to notify the
* power management subsystem, that the kernel is ready to enter the idle state.
*
* At this point, the kernel has disabled interrupts and computed the maximum
* time the system can remain idle. The function passes the time that the system
* can remain idle. The SOC interface performs power operations that can be done
* in the available time. The power management operations must halt execution of
* the CPU.
*
* This function assumes that a wake up event has already been set up by the
* application.
*
* This function is entered with interrupts disabled. It should re-enable
* interrupts if it had entered a power state.
*
* @return True if the system suspended, otherwise return false
*/
bool pm_system_suspend(int32_t ticks);
#endif /* CONFIG_PM */
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
/**
* Initialize the timing histograms for demand paging.
*/
void z_paging_histogram_init(void);
/**
* Increment the counter in the timing histogram.
*
* @param hist The timing histogram to be updated.
* @param cycles Time spent in measured operation.
*/
void z_paging_histogram_inc(struct k_mem_paging_histogram_t *hist,
uint32_t cycles);
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
#ifdef CONFIG_OBJ_CORE_STATS_THREAD
int z_thread_stats_raw(struct k_obj_core *obj_core, void *stats);
int z_thread_stats_query(struct k_obj_core *obj_core, void *stats);
int z_thread_stats_reset(struct k_obj_core *obj_core);
int z_thread_stats_disable(struct k_obj_core *obj_core);
int z_thread_stats_enable(struct k_obj_core *obj_core);
#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats);
int z_cpu_stats_query(struct k_obj_core *obj_core, void *stats);
int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats);
int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats);
#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
#if defined(CONFIG_THREAD_ABORT_NEED_CLEANUP)
/**
* Perform cleanup at the end of k_thread_abort().
*
* This performs additional cleanup steps at the end of k_thread_abort()
* where these steps require that the thread is no longer running.
* If the target thread is not the current running thread, the cleanup
* steps will be performed immediately. However, if the target thread is
* the current running thread (e.g. k_thread_abort(_current)), it defers
* the cleanup steps to later when the work will be finished in another
* context.
*
* @param thread Pointer to thread to be cleaned up.
*/
void k_thread_abort_cleanup(struct k_thread *thread);
/**
* Check if thread is the same as the one waiting for cleanup.
*
* This is used to guard against reusing the same thread object
* before the previous cleanup has finished. This will perform
* the necessary cleanups before the thread object can be
* reused. Should mainly be used during thread creation.
*
* @param thread Pointer to thread to be checked.
*/
void k_thread_abort_cleanup_check_reuse(struct k_thread *thread);
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ */
``` | /content/code_sandbox/kernel/include/kernel_internal.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,153 |
```objective-c
/*
*
*/
/**
* @file
* @brief Kernel Thread Local Storage APIs.
*
* Kernel APIs related to thread local storage.
*/
#ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_TLS_H_
#define ZEPHYR_KERNEL_INCLUDE_KERNEL_TLS_H_
#include <zephyr/linker/linker-defs.h>
/**
* @brief Return the total size of TLS data/bss areas
*
* This returns the total size of thread local storage (TLS)
* data and bss areas as defined in the linker script.
* Note that this does not include any architecture specific
* bits required for proper functionality of TLS.
*
* @return Total size of TLS data/bss areas
*/
static inline size_t z_tls_data_size(void)
{
return (size_t)(uintptr_t)__tdata_size +
(size_t)(uintptr_t)__tbss_size;
}
/**
* @brief Copy the TLS data/bss areas into destination
*
* This copies the TLS data into destination and clear the area
* of TLS bss size after the data section.
*
* @param dest Pointer to destination
*/
static inline void z_tls_copy(char *dest)
{
/* Copy initialized data (tdata) */
memcpy(dest, __tdata_start, (size_t)(uintptr_t)__tdata_size);
/* Clear BSS data (tbss) */
dest += (size_t)(uintptr_t)__tdata_size;
memset(dest, 0, (size_t)(uintptr_t)__tbss_size);
}
#endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_TLS_H_ */
``` | /content/code_sandbox/kernel/include/kernel_tls.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 320 |
```unknown
config VEGA_SDK_HAL
bool "RV32M1 VEGA SDK support"
depends on SOC_OPENISA_RV32M1
config HAS_RV32M1_LPUART
bool
help
Set if the low power uart (LPUART) module is present in the SoC.
config HAS_RV32M1_LPI2C
bool
help
Set if the low power i2c (LPI2C) module is present in the SoC.
config HAS_RV32M1_LPSPI
bool
help
Set if the low power spi (LPSPI) module is present in the SoC.
config HAS_RV32M1_TPM
bool
help
Set if the Timer/PWM (TPM) module is present in the SoC.
config HAS_RV32M1_FTFX
bool
help
Set if the flash memory (FTFA, FTFE, or FTFL) module is present in
the SoC.
``` | /content/code_sandbox/modules/Kconfig.vega | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 216 |
```unknown
# MCUXpresso SDK
config HAS_MCUX
bool
depends on SOC_FAMILY_KINETIS || SOC_FAMILY_NXP_IMX || SOC_FAMILY_LPC || \
SOC_FAMILY_NXP_S32 || SOC_FAMILY_NXP_IMXRT || SOC_FAMILY_NXP_RW || \
SOC_FAMILY_NXP_MCX
if HAS_MCUX
config MCUX_CORE_SUFFIX
string
help
String describing the core identifer used by MCUX SDK when using
dual core parts
config HAS_MCUX_12B1MSPS_SAR
bool
help
Set if the 12B1MSPS SAR ADC module is present in the SoC.
config HAS_MCUX_ADC12
bool
help
Set if the 12-bit ADC (ADC12) module is present in the SoC.
config HAS_MCUX_ADC16
bool
help
Set if the 16-bit ADC (ADC16) module is present in the SoC.
config HAS_MCUX_LPADC
bool
help
Set if the LPADC module is present in the SoC.
config HAS_MCUX_CACHE
bool
help
Set if the L1 or L2 cache is present in the SoC.
config HAS_MCUX_CCM
bool
help
Set if the clock control module (CCM) module is present in the SoC.
config HAS_MCUX_CCM_REV2
bool
help
Set if the revision 2 of clock control module (CCM) module is present in the SoC.
This is present in SoC's such as RT1170
config HAS_MCUX_SYSCON
bool
help
Set if the syscon module is present in the SoC.
config HAS_MCUX_PCC
bool
help
Set if the peripheral clock controller module (PCC) module is
present in the SoC.
config HAS_MCUX_ELCDIF
bool
help
Set if the enhanced LCD interface (eLCDIF) module is present in the
SoC.
config HAS_MCUX_MIPI_DSI
bool
help
Set if the MIPI DSI module is present in the SoC.
config HAS_MCUX_ENET
bool
help
Set if the ethernet (ENET) module is present in the SoC.
config HAS_MCUX_FLEXCAN
bool
help
Set if the FlexCAN module is presents in the SoC.
config HAS_MCUX_FLEXCOMM
bool
help
Set if the flexcomm (FLEXCOMM) module is present in the SoC.
config HAS_MCUX_FLEXSPI
bool
help
Set if the flexible SPI (FlexSPI) module is present in the SoC.
config HAS_MCUX_FTFX
bool
help
Set if the flash memory (FTFA, FTFE, or FTFL) module is present in
the SoC.
config HAS_MCUX_FTM
bool
help
Set if the FlexTimer (FTM) module is present in the SoC.
config HAS_MCUX_IAP
bool
help
Set if the flash memory In Application Programming is present in
the LPC55xxx family SoCs.
config HAS_MCUX_IAP_LEGACY
bool
help
Set if the flash memory In Application Programming is present in
the older LPC family SoCs (LPC54xxx, LPC11xxx).
config HAS_MCUX_IGPIO
bool
help
Set if the iMX GPIO (IGPIO) module is present in the SoC.
config HAS_MCUX_IOMUXC
bool
help
Set if the iMX I/O mux controller (IOMUXC) is present in the SoC.
config HAS_MCUX_LPI2C
bool
help
Set if the low power I2C (LPI2C) module is present in the SoC.
config HAS_MCUX_LPSCI
bool
help
Set if the low power uart (LPSCI) module is present in the SoC.
config HAS_MCUX_LPSPI
bool
help
Set if the low power SPI (LPSPI) module is present in the SoC.
config HAS_MCUX_LPUART
bool
help
Set if the low power uart (LPUART) module is present in the SoC.
config HAS_MCUX_GPT
bool
help
Set if the general purpose timer (GPT) module is present in the SoC.
config HAS_MCUX_QTMR
bool
help
Set if the quad timer (QTMR) module is present in the SoC.
config HAS_MCUX_GPC
bool
help
Set if the general power controller (GPC) module is present in the SoC.
config HAS_MCUX_PMU
bool
help
Set if the power management unit (PMU) module is present in the SoC.
config HAS_MCUX_DCDC
bool
help
Set if the DCDC converter module is present in the SoC.
config HAS_MCUX_SNVS
bool
help
Set if the SNVS module is present on the SoC.
config HAS_MCUX_RNG
bool
help
Set if the LPC specific random number generator (RNG) module is
present in the SoC.
config HAS_MCUX_RNGA
bool
help
Set if the random number generator accelerator (RNGA) module is
present in the SoC.
config HAS_MCUX_RTC
bool
help
Set if the real time clock (RTC) modules is present in the SoC.
config HAS_MCUX_LPC_RTC
bool
help
Set if the LPC real time clock (RTC) modules is present in the SoC.
config HAS_MCUX_SCG
bool
help
Set if the system clock generator (SCG) module is present in the
SoC.
config HAS_MCUX_SEMC
bool
help
Set if the smart external memory controller (SEMC) module is present
in the SoC.
config HAS_MCUX_SIM
bool
help
Set if the system integration module (SIM) module is present in the
SoC.
config HAS_MCUX_SRC
bool
help
Set if the system reset controller (SRC) module is present in the
SoC.
config HAS_MCUX_SRC_V2
bool
help
Set if version 2 of the system reset controller (SRC) module is
present in the SoC.
config HAS_MCUX_TRNG
bool
help
Set if the true random number generator (TRNG) module is present in
the SoC.
config HAS_MCUX_USB_EHCI
bool
help
Set if the USB controller EHCI module is present in the SoC.
config HAS_MCUX_USB_LPCIP3511
bool
help
Set if the USB controller LPCIP3511 module is present in the SoC.
config HAS_MCUX_USDHC1
bool
help
Set if the USDHC instance 1 module is present in the SoC.
config HAS_MCUX_USDHC2
bool
help
Set if the USDHC2 instance 2 module is present in the SoC.
config HAS_MCUX_WDOG32
bool
help
Set if the watchdog (WDOG32) module is present in the SoC.
config HAS_MCUX_WWDT
bool
help
Set if the watchdog (WWDT) module is present in the SoC.
config HAS_MCUX_PWM
bool
help
Set if the PWM module is present in the SoC.
config HAS_MCUX_SCTIMER
bool
help
Set if the sctimer module is present in the SoC.
config HAS_MCUX_SMC
bool
help
Set if the SMC module is present in the SoC.
config HAS_MCUX_LPTMR
bool
help
Set if the Low Power Timer (LPTMR) module is present in the SoC.
config HAS_MCUX_DAC
bool
help
Set if the Digital-to-Analog (DAC) module is present in the SoC.
config HAS_MCUX_DAC32
bool
help
Set if the Digital-to-Analog (DAC32) module is present in the SoC.
config HAS_MCUX_TPM
bool
help
Set if the Timer/PWM Module is present in the SoC
config HAS_MCUX_EDMA
bool
help
Set if the EDMA module is present on the SoC.
config HAS_MCUX_LPC_DMA
bool
help
Set if the DMA module is present on the SoC.
config HAS_MCUX_RDC
bool
help
Set if the RDC module is present in the SoC.
config HAS_MCUX_PIT
bool
help
Set if the PIT module is present on the SoC.
config HAS_MCUX_OS_TIMER
bool
help
Set if the OS timer is used as a kernel timer on the SoC.
config HAS_MCUX_PWT
bool
help
Set if the PWT module is present on the SoC.
config HAS_MCUX_RCM
bool
help
Set if the Reset Control Module (RCM) module is present in
the SoC.
config HAS_MCUX_CTIMER
bool
help
Set if the CTIMER module is present in the SoC.
config HAS_MCUX_I2S
bool
help
Set if the I2S/SAI module is present on the Soc
config HAS_MCUX_MCAN
bool
help
Set if the MCAN module is present on the SoC.
config HAS_MCUX_ADC_ETC
bool
help
Set if the ADC External Trigger Control module is present
on the SoC.
config HAS_MCUX_XBARA
bool
help
Set if the XBARA module is present on the SoC.
config NXP_FW_LOADER
bool "Include firmware loader component"
help
The firmware loader is used to load firmwares to embedded tranceivers.
It is needed to enable connectivity features.
config NXP_MONOLITHIC_WIFI
bool "WiFi firmware monolithic build"
help
If enabled, the WiFi firmware used by the device will be linked with the
application directly.
config NXP_MONOLITHIC_BT
bool "BT firmware monolithic build"
help
If enabled, the BT firmware used by the device will be linked with the
application directly.
config NXP_RF_IMU
bool "Include RF_IMU adapter for intercore messaging"
select EVENTS
help
RF_IMU adapter is needed for intercore messaging.
endif # HAS_MCUX
``` | /content/code_sandbox/modules/Kconfig.mcux | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,264 |
```unknown
# Atmel SDK
config ASF
bool
select HAS_CMSIS_CORE
depends on SOC_FAMILY_ATMEL_SAM || SOC_FAMILY_ATMEL_SAM0
config ATMEL_WINC1500
bool
``` | /content/code_sandbox/modules/Kconfig.atmel | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 48 |
```unknown
# Gecko SDK
config HAS_SILABS_GECKO
bool
select HAS_CMSIS_CORE
depends on SOC_FAMILY_SILABS_S0 || SOC_FAMILY_SILABS_S1 || SOC_FAMILY_SILABS_S2
``` | /content/code_sandbox/modules/Kconfig.silabs | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 50 |
```unknown
# CC3220/CC32XX SDK HAL configuration
config HAS_CC3220SDK
bool
# Notes:
# SimpleLink drivers require types (stdint.h) from c library which is not
# provided by minimal lbc
# Selecting ERRNO lets host driver use Zephyr's __errno
# Selecting POSIX_THREADS and POSIX_API are needed to build the host driver
config SIMPLELINK_HOST_DRIVER
bool "Build the SimpleLink WiFi Host Driver"
depends on HAS_CC3220SDK
depends on MULTITHREADING
select REQUIRES_FULL_LIBC
select ERRNO
select POSIX_THREADS
select POSIX_TIMERS
help
Build the SimpleLink host driver
# MSP432 SDK HAL configuration
config HAS_MSP432P4XXSDK
bool
select HAS_CMSIS_CORE
# CC13X2 / CC26X2 SDK HAL configuration
config HAS_CC13X2_CC26X2_SDK
bool
config HAS_CC13X2X7_CC26X2X7_SDK
bool
``` | /content/code_sandbox/modules/Kconfig.simplelink | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 215 |
```unknown
config ZEPHYR_SOF_MODULE
bool
config SOF
bool "Sound Open Firmware (SOF)"
depends on ZEPHYR_SOF_MODULE
help
Build Sound Open Firmware (SOF) support.
``` | /content/code_sandbox/modules/Kconfig.sof | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 50 |
```unknown
# Wrth Elektronik HAL config
config HAS_WESENSORS
bool
``` | /content/code_sandbox/modules/Kconfig.wurthelektronik | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 19 |
```objective-c
/*
*
*/
/**
* @file
* @brief Internal kernel APIs implemented at the architecture layer.
*
* Not all architecture-specific defines are here, APIs that are used
* by public functions and macros are defined in include/zephyr/arch/arch_interface.h.
*
* For all inline functions prototyped here, the implementation is expected
* to be provided by arch/ARCH/include/kernel_arch_func.h
*/
#ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_
#define ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_
#include <zephyr/kernel.h>
#include <zephyr/arch/arch_interface.h>
#ifndef _ASMLANGUAGE
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup arch-timing Architecture timing APIs
* @{
*/
#ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
/**
* Architecture-specific implementation of busy-waiting
*
* @param usec_to_wait Wait period, in microseconds
*/
void arch_busy_wait(uint32_t usec_to_wait);
#endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
/** @} */
/**
* @defgroup arch-threads Architecture thread APIs
* @ingroup arch-interface
* @{
*/
/** Handle arch-specific logic for setting up new threads
*
* The stack and arch-specific thread state variables must be set up
* such that a later attempt to switch to this thread will succeed
* and we will enter z_thread_entry with the requested thread and
* arguments as its parameters.
*
* At some point in this function's implementation, z_setup_new_thread() must
* be called with the true bounds of the available stack buffer within the
* thread's stack object.
*
* The provided stack pointer is guaranteed to be properly aligned with respect
* to the CPU and ABI requirements. There may be space reserved between the
* stack pointer and the bounds of the stack buffer for initial stack pointer
* randomization and thread-local storage.
*
* Fields in thread->base will be initialized when this is called.
*
* @param thread Pointer to uninitialized struct k_thread
* @param stack Pointer to the stack object
* @param stack_ptr Aligned initial stack pointer
* @param entry Thread entry function
* @param p1 1st entry point parameter
* @param p2 2nd entry point parameter
* @param p3 3rd entry point parameter
*/
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *stack_ptr, k_thread_entry_t entry,
void *p1, void *p2, void *p3);
#ifdef CONFIG_USE_SWITCH
/** Cooperative context switch primitive
*
* The action of arch_switch() should be to switch to a new context
* passed in the first argument, and save a pointer to the current
* context into the address passed in the second argument.
*
* The actual type and interpretation of the switch handle is specified
* by the architecture. It is the same data structure stored in the
* "switch_handle" field of a newly-created thread in arch_new_thread(),
* and passed to the kernel as the "interrupted" argument to
* z_get_next_switch_handle().
*
* Note that on SMP systems, the kernel uses the store through the
* second pointer as a synchronization point to detect when a thread
* context is completely saved (so another CPU can know when it is
* safe to switch). This store must be done AFTER all relevant state
* is saved, and must include whatever memory barriers or cache
* management code is required to be sure another CPU will see the
* result correctly.
*
* The simplest implementation of arch_switch() is generally to push
* state onto the thread stack and use the resulting stack pointer as the
* switch handle. Some architectures may instead decide to use a pointer
* into the thread struct as the "switch handle" type. These can legally
* assume that the second argument to arch_switch() is the address of the
* switch_handle field of struct thread_base and can use an offset on
* this value to find other parts of the thread struct. For example a (C
* pseudocode) implementation of arch_switch() might look like:
*
* void arch_switch(void *switch_to, void **switched_from)
* {
* struct k_thread *new = switch_to;
* struct k_thread *old = CONTAINER_OF(switched_from, struct k_thread,
* switch_handle);
*
* // save old context...
* *switched_from = old;
* // restore new context...
* }
*
* Note that the kernel manages the switch_handle field for
* synchronization as described above. So it is not legal for
* architecture code to assume that it has any particular value at any
* other time. In particular it is not legal to read the field from the
* address passed in the second argument.
*
* @param switch_to Incoming thread's switch handle
* @param switched_from Pointer to outgoing thread's switch handle storage
* location, which must be updated.
*/
static inline void arch_switch(void *switch_to, void **switched_from);
#else
/**
* Cooperatively context switch
*
* Must be called with interrupts locked with the provided key.
* This is the older-style context switching method, which is incompatible
* with SMP. New arch ports, either SMP or UP, are encouraged to implement
* arch_switch() instead.
*
* @param key Interrupt locking key
* @return If woken from blocking on some kernel object, the result of that
* blocking operation.
*/
int arch_swap(unsigned int key);
/**
* Set the return value for the specified thread.
*
* It is assumed that the specified @a thread is pending.
*
* @param thread Pointer to thread object
* @param value value to set as return value
*/
static ALWAYS_INLINE void
arch_thread_return_value_set(struct k_thread *thread, unsigned int value);
#endif /* CONFIG_USE_SWITCH */
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
/**
* Custom logic for entering main thread context at early boot
*
* Used by architectures where the typical trick of setting up a dummy thread
* in early boot context to "switch out" of isn't workable.
*
* @param main_thread main thread object
* @param stack_ptr Initial stack pointer
* @param _main Entry point for application main function.
*/
void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
k_thread_entry_t _main);
#endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/**
* @brief Disable floating point context preservation
*
* The function is used to disable the preservation of floating
* point context information for a particular thread.
*
* @note For ARM architecture, disabling floating point preservation may only
* be requested for the current thread and cannot be requested in ISRs.
*
* @retval 0 On success.
* @retval -EINVAL If the floating point disabling could not be performed.
* @retval -ENOTSUP If the operation is not supported
*/
int arch_float_disable(struct k_thread *thread);
/**
* @brief Enable floating point context preservation
*
* The function is used to enable the preservation of floating
* point context information for a particular thread.
* This API depends on each architecture implementation. If the architecture
* does not support enabling, this API will always be failed.
*
* The @a options parameter indicates which floating point register sets will
* be used by the specified thread. Currently it is used by x86 only.
*
* @param thread ID of thread.
* @param options architecture dependent options
*
* @retval 0 On success.
* @retval -EINVAL If the floating point enabling could not be performed.
* @retval -ENOTSUP If the operation is not supported
*/
int arch_float_enable(struct k_thread *thread, unsigned int options);
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
/** @} */
/**
* @defgroup arch-pm Architecture-specific power management APIs
* @ingroup arch-interface
* @{
*/
/** Halt the system, optionally propagating a reason code */
FUNC_NORETURN void arch_system_halt(unsigned int reason);
/** @} */
/**
* @defgroup arch-irq Architecture-specific IRQ APIs
* @ingroup arch-interface
* @{
*/
/**
* Test if the current context is in interrupt context
*
* XXX: This is inconsistently handled among arches wrt exception context
* See: #17656
*
* @return true if we are in interrupt context
*/
static inline bool arch_is_in_isr(void);
/** @} */
/**
* @defgroup arch-mmu Architecture-specific memory-mapping APIs
* @ingroup arch-interface
* @{
*/
/**
* Map physical memory into the virtual address space
*
* This is a low-level interface to mapping pages into the address space.
* Behavior when providing unaligned addresses/sizes is undefined, these
* are assumed to be aligned to CONFIG_MMU_PAGE_SIZE.
*
* The core kernel handles all management of the virtual address space;
* by the time we invoke this function, we know exactly where this mapping
* will be established. If the page tables already had mappings installed
* for the virtual memory region, these will be overwritten.
*
* If the target architecture supports multiple page sizes, currently
* only the smallest page size will be used.
*
* The memory range itself is never accessed by this operation.
*
* This API must be safe to call in ISRs or exception handlers. Calls
* to this API are assumed to be serialized, and indeed all usage will
* originate from kernel/mm.c which handles virtual memory management.
*
* Architectures are expected to pre-allocate page tables for the entire
* address space, as defined by CONFIG_KERNEL_VM_BASE and
* CONFIG_KERNEL_VM_SIZE. This operation should never require any kind of
* allocation for paging structures.
*
* Validation of arguments should be done via assertions.
*
* This API is part of infrastructure still under development and may
* change.
*
* @param virt Page-aligned Destination virtual address to map
* @param phys Page-aligned Source physical address to map
* @param size Page-aligned size of the mapped memory region in bytes
* @param flags Caching, access and control flags, see K_MAP_* macros
*/
void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags);
/**
* Remove mappings for a provided virtual address range
*
* This is a low-level interface for un-mapping pages from the address space.
* When this completes, the relevant page table entries will be updated as
* if no mapping was ever made for that memory range. No previous context
* needs to be preserved. This function must update mappings in all active
* page tables.
*
* Behavior when providing unaligned addresses/sizes is undefined, these
* are assumed to be aligned to CONFIG_MMU_PAGE_SIZE.
*
* Behavior when providing an address range that is not already mapped is
* undefined.
*
* This function should never require memory allocations for paging structures,
* and it is not necessary to free any paging structures. Empty page tables
* due to all contained entries being un-mapped may remain in place.
*
* Implementations must invalidate TLBs as necessary.
*
* This API is part of infrastructure still under development and may change.
*
* @param addr Page-aligned base virtual address to un-map
* @param size Page-aligned region size
*/
void arch_mem_unmap(void *addr, size_t size);
/**
* Get the mapped physical memory address from virtual address.
*
* The function only needs to query the current set of page tables as
* the information it reports must be common to all of them if multiple
* page tables are in use. If multiple page tables are active it is unnecessary
* to iterate over all of them.
*
* Unless otherwise specified, virtual pages have the same mappings
* across all page tables. Calling this function on data pages that are
* exceptions to this rule (such as the scratch page) is undefined behavior.
* Just check the currently installed page tables and return the information
* in that.
*
* @param virt Page-aligned virtual address
* @param[out] phys Mapped physical address (can be NULL if only checking
* if virtual address is mapped)
*
* @retval 0 if mapping is found and valid
* @retval -EFAULT if virtual address is not mapped
*/
int arch_page_phys_get(void *virt, uintptr_t *phys);
/**
* Update page frame database with reserved pages
*
* Some page frames within system RAM may not be available for use. A good
* example of this is reserved regions in the first megabyte on PC-like systems.
*
* Implementations of this function should mark all relevant entries in
* k_mem_page_frames with K_PAGE_FRAME_RESERVED. This function is called at
* early system initialization with mm_lock held.
*/
void arch_reserved_pages_update(void);
/**
* Update all page tables for a paged-out data page
*
* This function:
* - Sets the data page virtual address to trigger a fault if accessed that
* can be distinguished from access violations or un-mapped pages.
* - Saves the provided location value so that it can retrieved for that
* data page in the page fault handler.
* - The location value semantics are undefined here but the value will be
* always be page-aligned. It could be 0.
*
* If multiple page tables are in use, this must update all page tables.
* This function is called with interrupts locked.
*
* Calling this function on data pages which are already paged out is
* undefined behavior.
*
* This API is part of infrastructure still under development and may change.
*/
void arch_mem_page_out(void *addr, uintptr_t location);
/**
* Update all page tables for a paged-in data page
*
* This function:
* - Maps the specified virtual data page address to the provided physical
* page frame address, such that future memory accesses will function as
* expected. Access and caching attributes are undisturbed.
* - Clears any accounting for "accessed" and "dirty" states.
*
* If multiple page tables are in use, this must update all page tables.
* This function is called with interrupts locked.
*
* Calling this function on data pages which are already paged in is
* undefined behavior.
*
* This API is part of infrastructure still under development and may change.
*/
void arch_mem_page_in(void *addr, uintptr_t phys);
/**
* Update current page tables for a temporary mapping
*
* Map a physical page frame address to a special virtual address
* K_MEM_SCRATCH_PAGE, with read/write access to supervisor mode, such that
* when this function returns, the calling context can read/write the page
* frame's contents from the K_MEM_SCRATCH_PAGE address.
*
* This mapping only needs to be done on the current set of page tables,
* as it is only used for a short period of time exclusively by the caller.
* This function is called with interrupts locked.
*
* This API is part of infrastructure still under development and may change.
*/
void arch_mem_scratch(uintptr_t phys);
/**
* Status of a particular page location.
*/
enum arch_page_location {
/** The page has been evicted to the backing store. */
ARCH_PAGE_LOCATION_PAGED_OUT,
/** The page is resident in memory. */
ARCH_PAGE_LOCATION_PAGED_IN,
/** The page is not mapped. */
ARCH_PAGE_LOCATION_BAD
};
/**
* Fetch location information about a page at a particular address
*
* The function only needs to query the current set of page tables as
* the information it reports must be common to all of them if multiple
* page tables are in use. If multiple page tables are active it is unnecessary
* to iterate over all of them. This may allow certain types of optimizations
* (such as reverse page table mapping on x86).
*
* This function is called with interrupts locked, so that the reported
* information can't become stale while decisions are being made based on it.
*
* Unless otherwise specified, virtual data pages have the same mappings
* across all page tables. Calling this function on data pages that are
* exceptions to this rule (such as the scratch page) is undefined behavior.
* Just check the currently installed page tables and return the information
* in that.
*
* @param addr Virtual data page address that took the page fault
* @param [out] location In the case of ARCH_PAGE_LOCATION_PAGED_OUT, the backing
* store location value used to retrieve the data page. In the case of
* ARCH_PAGE_LOCATION_PAGED_IN, the physical address the page is mapped to.
* @retval ARCH_PAGE_LOCATION_PAGED_OUT The page was evicted to the backing store.
* @retval ARCH_PAGE_LOCATION_PAGED_IN The data page is resident in memory.
* @retval ARCH_PAGE_LOCATION_BAD The page is un-mapped or otherwise has had
* invalid access
*/
enum arch_page_location arch_page_location_get(void *addr, uintptr_t *location);
/**
* @def ARCH_DATA_PAGE_ACCESSED
*
* Bit indicating the data page was accessed since the value was last cleared.
*
* Used by marking eviction algorithms. Safe to set this if uncertain.
*
* This bit is undefined if ARCH_DATA_PAGE_LOADED is not set.
*/
/**
* @def ARCH_DATA_PAGE_DIRTY
*
* Bit indicating the data page, if evicted, will need to be paged out.
*
* Set if the data page was modified since it was last paged out, or if
* it has never been paged out before. Safe to set this if uncertain.
*
* This bit is undefined if ARCH_DATA_PAGE_LOADED is not set.
*/
/**
* @def ARCH_DATA_PAGE_LOADED
*
* Bit indicating that the data page is loaded into a physical page frame.
*
* If un-set, the data page is paged out or not mapped.
*/
/**
* @def ARCH_DATA_PAGE_NOT_MAPPED
*
* If ARCH_DATA_PAGE_LOADED is un-set, this will indicate that the page
* is not mapped at all. This bit is undefined if ARCH_DATA_PAGE_LOADED is set.
*/
/**
* Retrieve page characteristics from the page table(s)
*
* The architecture is responsible for maintaining "accessed" and "dirty"
* states of data pages to support marking eviction algorithms. This can
* either be directly supported by hardware or emulated by modifying
* protection policy to generate faults on reads or writes. In all cases
* the architecture must maintain this information in some way.
*
* For the provided virtual address, report the logical OR of the accessed
* and dirty states for the relevant entries in all active page tables in
* the system if the page is mapped and not paged out.
*
* If clear_accessed is true, the ARCH_DATA_PAGE_ACCESSED flag will be reset.
* This function will report its prior state. If multiple page tables are in
* use, this function clears accessed state in all of them.
*
* This function is called with interrupts locked, so that the reported
* information can't become stale while decisions are being made based on it.
*
* The return value may have other bits set which the caller must ignore.
*
* Clearing accessed state for data pages that are not ARCH_DATA_PAGE_LOADED
* is undefined behavior.
*
* ARCH_DATA_PAGE_DIRTY and ARCH_DATA_PAGE_ACCESSED bits in the return value
* are only significant if ARCH_DATA_PAGE_LOADED is set, otherwise ignore
* them.
*
* ARCH_DATA_PAGE_NOT_MAPPED bit in the return value is only significant
* if ARCH_DATA_PAGE_LOADED is un-set, otherwise ignore it.
*
* Unless otherwise specified, virtual data pages have the same mappings
* across all page tables. Calling this function on data pages that are
* exceptions to this rule (such as the scratch page) is undefined behavior.
*
* This API is part of infrastructure still under development and may change.
*
* @param addr Virtual address to look up in page tables
* @param [out] location If non-NULL, updated with either physical page frame
* address or backing store location depending on
* ARCH_DATA_PAGE_LOADED state. This is not touched if
* ARCH_DATA_PAGE_NOT_MAPPED.
* @param clear_accessed Whether to clear ARCH_DATA_PAGE_ACCESSED state
* @retval Value with ARCH_DATA_PAGE_* bits set reflecting the data page
* configuration
*/
uintptr_t arch_page_info_get(void *addr, uintptr_t *location,
bool clear_accessed);
/** @} */
/**
* @defgroup arch-misc Miscellaneous architecture APIs
* @ingroup arch-interface
* @{
*/
/**
* Early boot console output hook
*
* Definition of this function is optional. If implemented, any invocation
* of printk() (or logging calls with CONFIG_LOG_MODE_MINIMAL which are backed by
* printk) will default to sending characters to this function. It is
* useful for early boot debugging before main serial or console drivers
* come up.
*
* This can be overridden at runtime with __printk_hook_install().
*
* The default __weak implementation of this does nothing.
*
* @param c Character to print
* @return The character printed
*/
int arch_printk_char_out(int c);
/**
* Architecture-specific kernel initialization hook
*
* This function is invoked near the top of z_cstart, for additional
* architecture-specific setup before the rest of the kernel is brought up.
*/
static inline void arch_kernel_init(void);
/** Do nothing and return. Yawn. */
static inline void arch_nop(void);
/** @} */
/**
* @defgroup arch-coredump Architecture-specific core dump APIs
* @ingroup arch-interface
* @{
*/
/**
* @brief Architecture-specific handling during coredump
*
* This dumps architecture-specific information during coredump.
*
* @param esf Exception Stack Frame (arch-specific)
*/
void arch_coredump_info_dump(const struct arch_esf *esf);
/**
* @brief Get the target code specified by the architecture.
*/
uint16_t arch_coredump_tgt_code_get(void);
/** @} */
/**
* @defgroup arch-tls Architecture-specific Thread Local Storage APIs
* @ingroup arch-interface
* @{
*/
/**
* @brief Setup Architecture-specific TLS area in stack
*
* This sets up the stack area for thread local storage.
* The structure inside TLS area is architecture specific.
*
* @param new_thread New thread object
* @param stack_ptr Stack pointer
* @return Number of bytes taken by the TLS area
*/
size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr);
/** @} */
/* Include arch-specific inline function implementation */
#include <kernel_arch_func.h>
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_ */
``` | /content/code_sandbox/kernel/include/kernel_arch_interface.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,795 |
```unknown
config MIPI_SYST_LIB
bool "MIPI SyS-T Library Support"
help
This option enables the MIPI SyS-T Library
if MIPI_SYST_LIB
config MIPI_SYST_STP
bool "STP Transport Layer for MIPI SyS-T"
help
This option enables support for the STP
Transport Layer for MIPI SyS-T
config MIPI_SYST_RAW_DATA
bool "output MIPI SyS-T raw data packet"
help
This option outputs MIPI SyS-T raw data packet
config MIPI_SYST_NO_WHCAR
bool
default y if MINIMAL_LIBC
help
Tell MIPI Sys-T library to not build with
wchar support.
endif
``` | /content/code_sandbox/modules/Kconfig.syst | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 158 |
```cmake
file(GLOB cmake_modules "${CMAKE_CURRENT_LIST_DIR}/*/CMakeLists.txt")
foreach(module ${cmake_modules})
get_filename_component(module_dir ${module} DIRECTORY)
get_filename_component(module_name ${module_dir} NAME)
zephyr_string(SANITIZE TOUPPER MODULE_NAME_UPPER ${module_name})
set(ZEPHYR_${MODULE_NAME_UPPER}_CMAKE_DIR ${module_dir})
endforeach()
file(GLOB kconfig_modules "${CMAKE_CURRENT_LIST_DIR}/*/Kconfig")
foreach(module ${kconfig_modules})
get_filename_component(module_dir ${module} DIRECTORY)
get_filename_component(module_name ${module_dir} NAME)
zephyr_string(SANITIZE TOUPPER MODULE_NAME_UPPER ${module_name})
set(ZEPHYR_${MODULE_NAME_UPPER}_KCONFIG ${module_dir}/Kconfig)
endforeach()
``` | /content/code_sandbox/modules/modules.cmake | cmake | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 177 |
```unknown
# NXP S32 MCUs family
config HAS_NXP_S32_HAL
bool
select HAS_CMSIS_CORE
depends on SOC_FAMILY_NXP_S32
``` | /content/code_sandbox/modules/Kconfig.nxp_s32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 37 |
```unknown
#
# Author: Parthiban Nallathambi <parthiban@linumiz.com>
config HAS_XMCLIB
bool
select HAS_CMSIS_CORE
depends on SOC_FAMILY_INFINEON_XMC
if HAS_XMCLIB
config HAS_XMCLIB_UART
bool
help
Enable XMCLIB Universal asynchronous receiver transmitter (UART)
config HAS_XMCLIB_FLASH
bool
help
Enable XMCLIB Flash
config HAS_XMCLIB_ERU
bool
help
Enable XMCLIB Event Request Unit (ERU) for GPIO interrupt support
config HAS_XMCLIB_VADC
bool
help
Enable XMCLIB VADC
config HAS_XMCLIB_DMA
bool
help
Enable XMCLIB DMA
config HAS_XMCLIB_SPI
bool
help
Enable XMCLIB SPI
config HAS_XMCLIB_I2C
bool
help
Enable XMCLIB I2C
config HAS_XMCLIB_CCU
bool
help
Enable XMCLIB CCU4/CCU8
config HAS_XMCLIB_WDT
bool
help
Enable XMCLIB WDT
config HAS_XMCLIB_ETH
bool
help
Enable XMCLIB Ethernet MAC
config HAS_XMCLIB_CAN
bool
help
Enable XMCLIB CAN
endif # HAS_XMCLIB
``` | /content/code_sandbox/modules/Kconfig.infineon | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 305 |
```unknown
# Renesas FSP HAL config
config HAS_RENESAS_RA_FSP
bool
help
Enable Renesas RA FSP support
config USE_RA_FSP_SCI_B_UART
bool
help
Enable RA FSP SCI-B UART driver
config USE_RA_FSP_DTC
bool
help
Enable RA FSP DTC driver
config USE_RA_FSP_I2C_IIC
bool
help
Enable Renesas RA I2C IIC Master driver
config USE_RA_FSP_SCI_UART
bool
help
Enable RA FSP SCI UART driver
config USE_RA_FSP_ADC
bool
help
Enable RA FSP ADC driver
``` | /content/code_sandbox/modules/Kconfig.renesas_fsp | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 147 |
```unknown
config HAS_ESPRESSIF_HAL
bool
depends on SOC_FAMILY_ESPRESSIF_ESP32
``` | /content/code_sandbox/modules/Kconfig.esp32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 23 |
```unknown
#
comment "Available modules."
source "$(KCONFIG_BINARY_DIR)/Kconfig.sysbuild.modules"
comment "Unavailable modules, please install those via the project manifest."
# List of comments to display when Zephyr modules are not available, please
# use the following syntax:
# ---------------------------------------------------
# comment "<module_name> module not available."
# depends on !SYSBUILD_<MODULE_NAME_UPPER>_MODULE
#
# Remember to add the following code inside the `<module>/Kconfig file:
# ---------------------------------------------------
# config SYSBUILD_<MODULE_NAME_UPPER>_MODULE
# bool
# This ensures that symbols are available in Kconfig for dependency checking
# and referencing, while keeping the settings themselves unavailable when the
# modules are not present in the workspace
if 0
osource "modules/*/Kconfig.sysbuild"
endif
``` | /content/code_sandbox/modules/Kconfig.sysbuild | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 170 |
```unknown
# Altera HAL drivers configuration
config HAS_ALTERA_HAL
bool "Altera HAL drivers support"
depends on NIOS2
``` | /content/code_sandbox/modules/Kconfig.altera | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 30 |
```unknown
config HAS_TELINK_DRIVERS
bool "Telink Drivers"
help
This option enables Telink Drivers APIs.
``` | /content/code_sandbox/modules/Kconfig.telink | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 27 |
```unknown
# Cryptography primitive options for TinyCrypt version 2.0
config ZEPHYR_TINYCRYPT_MODULE
bool
config TINYCRYPT
bool "TinyCrypt Support"
depends on ZEPHYR_TINYCRYPT_MODULE
help
This option enables the TinyCrypt cryptography library.
if TINYCRYPT
config TINYCRYPT_CTR_PRNG
bool "PRNG in counter mode"
help
This option enables support for the pseudo-random number
generator in counter mode.
config TINYCRYPT_SHA256
bool "SHA-256 Hash function support"
help
This option enables support for SHA-256
hash function primitive.
config TINYCRYPT_SHA256_HMAC
bool "HMAC (via SHA256) message auth support"
depends on TINYCRYPT_SHA256
help
This option enables support for HMAC using SHA-256
message authentication code.
config TINYCRYPT_SHA256_HMAC_PRNG
bool "PRNG (via HMAC-SHA256) support"
depends on TINYCRYPT_SHA256_HMAC
help
This option enables support for pseudo-random number
generator.
config TINYCRYPT_ECC_DH
bool "ECC_DH anonymous key agreement protocol"
help
This option enables support for the Elliptic curve
Diffie-Hellman anonymous key agreement protocol.
Enabling ECC requires a cryptographically secure random number
generator.
config TINYCRYPT_ECC_DSA
bool "ECC_DSA digital signature algorithm"
help
This option enables support for the Elliptic Curve Digital
Signature Algorithm (ECDSA).
Enabling ECC requires a cryptographically secure random number
generator.
config TINYCRYPT_AES
bool "AES-128 decrypt/encrypt"
help
This option enables support for AES-128 decrypt and encrypt.
config TINYCRYPT_AES_CBC
bool "AES-128 block cipher"
depends on TINYCRYPT_AES
help
This option enables support for AES-128 block cipher mode.
config TINYCRYPT_AES_CTR
bool "AES-128 counter mode"
depends on TINYCRYPT_AES
help
This option enables support for AES-128 counter mode.
config TINYCRYPT_AES_CCM
bool "AES-128 CCM mode"
depends on TINYCRYPT_AES
help
This option enables support for AES-128 CCM mode.
config TINYCRYPT_AES_CMAC
bool "AES-128 CMAC mode"
depends on TINYCRYPT_AES
help
This option enables support for AES-128 CMAC mode.
endif
``` | /content/code_sandbox/modules/Kconfig.tinycrypt | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 538 |
```unknown
config INTEL_HAL
bool
help
Build the Intel HAL module during build process.
This is selected by the ARCH kconfig automatically.
``` | /content/code_sandbox/modules/Kconfig.intel | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 32 |
```unknown
config ZEPHYR_LIBMETAL_MODULE
bool
menuconfig LIBMETAL
bool "libmetal Support"
depends on ZEPHYR_LIBMETAL_MODULE
help
This option enables the libmetal HAL abstraction layer
config LIBMETAL_SRC_PATH
string "libmetal library source path"
default "libmetal"
depends on LIBMETAL
help
This option specifies the path to the source for the libmetal library
``` | /content/code_sandbox/modules/Kconfig.libmetal | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 95 |
```unknown
config ZEPHYR_OPEN_AMP_MODULE
bool
config OPENAMP
bool "OpenAMP Support"
depends on ZEPHYR_OPEN_AMP_MODULE
select LIBMETAL
help
This option enables the OpenAMP IPC library
if OPENAMP
config OPENAMP_SRC_PATH
string "OpenAMP library source path"
default "open-amp"
depends on OPENAMP
help
This option specifies the path to the source for the open-amp library
config OPENAMP_MASTER
bool "OpenAMP Master Support"
default y
help
This option enables support for OpenAMP VirtIO Master
config OPENAMP_SLAVE
bool "OpenAMP Slave Support"
default y
help
This option enables support for OpenAMP VirtIO Slave
config OPENAMP_WITH_DCACHE
bool "Build OpenAMP with vrings cache operations enabled"
depends on CACHE_MANAGEMENT
help
Build OpenAMP with vrings cache operations enabled.
endif # OPENAMP
``` | /content/code_sandbox/modules/Kconfig.open-amp | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 203 |
```unknown
# QuickLogic HAL
config EOS_S3_HAL
bool
depends on SOC_EOS_S3
``` | /content/code_sandbox/modules/Kconfig.eos_s3 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 23 |
```unknown
config ZEPHYR_PICOLIBC_MODULE
bool
``` | /content/code_sandbox/modules/Kconfig.picolibc | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 15 |
```unknown
# Microchip MEC HAL drivers configuration
config HAS_MEC_HAL
bool "Microchip MEC HAL drivers support"
config HAS_MPFS_HAL
bool "Microchip MPFS HAL drivers support"
config HAS_MEC5_HAL
bool "Microchip MEC5 HAL drivers support"
``` | /content/code_sandbox/modules/Kconfig.microchip | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 60 |
```unknown
config ZEPHYR_CHRE_MODULE
bool
``` | /content/code_sandbox/modules/Kconfig.chre | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 13 |
```unknown
config XTENSA_HAL
bool
help
Build the Xtensa HAL module during build process.
This is selected by the Xtensa ARCH kconfig automatically.
``` | /content/code_sandbox/modules/Kconfig.xtensa | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 36 |
```unknown
# IMX M4 Core SDK
config HAS_IMX_HAL
bool
select HAS_CMSIS_CORE
depends on SOC_FAMILY_NXP_IMX
if HAS_IMX_HAL
config HAS_IMX_GPIO
bool
help
Set if the GPIO module is present in the SoC.
config HAS_IMX_I2C
bool
help
Set if the I2C module is present in the SoC.
config HAS_IMX_EPIT
bool
help
Set if the EPIT module is present in the SoC.
config HAS_IMX_IOMUXC
bool
help
Set if the IOMUXC module is present in the SoC.
endif # HAS_IMX_HAL
``` | /content/code_sandbox/modules/Kconfig.imx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 149 |
```unknown
config TAINT_BLOBS
bool
select TAINT
help
This option is selected when binary blobs are present locally at
build time to reflect that the build might have been tainted by them.
comment "Available modules."
osource "$(KCONFIG_BINARY_DIR)/Kconfig.modules"
source "modules/Kconfig.altera"
source "modules/Kconfig.atmel"
source "modules/Kconfig.chre"
source "modules/Kconfig.cypress"
source "modules/Kconfig.eos_s3"
source "modules/Kconfig.esp32"
source "modules/Kconfig.imx"
source "modules/Kconfig.infineon"
source "modules/Kconfig.libmetal"
source "modules/lvgl/Kconfig"
source "modules/Kconfig.mcux"
source "modules/Kconfig.microchip"
source "modules/Kconfig.nuvoton"
source "modules/Kconfig.open-amp"
source "modules/Kconfig.picolibc"
source "modules/Kconfig.nxp_s32"
source "modules/Kconfig.renesas_fsp"
source "modules/Kconfig.silabs"
source "modules/Kconfig.simplelink"
source "modules/Kconfig.sof"
source "modules/Kconfig.stm32"
source "modules/Kconfig.syst"
source "modules/Kconfig.telink"
source "modules/thrift/Kconfig"
source "modules/Kconfig.tinycrypt"
source "modules/Kconfig.vega"
source "modules/Kconfig.wurthelektronik"
source "modules/Kconfig.xtensa"
source "modules/zcbor/Kconfig"
source "modules/Kconfig.mcuboot"
source "modules/Kconfig.intel"
source "modules/hostap/Kconfig"
comment "Unavailable modules, please install those via the project manifest."
# List of comments to display when Zephyr modules are not available, please
# use the following syntax:
# ---------------------------------------------------
# comment "<module_name> module not available."
# depends on !ZEPHYR_<MODULE_NAME_UPPER>_MODULE
#
# Remember to add the following code inside the `<module>/Kconfig file:
# ---------------------------------------------------
# config ZEPHYR_<MODULE_NAME_UPPER>_MODULE
# bool
comment "hal_gigadevice module not available."
depends on !ZEPHYR_HAL_GIGADEVICE_MODULE
comment "hal_nordic module not available."
depends on !ZEPHYR_HAL_NORDIC_MODULE
comment "liblc3 module not available."
depends on !ZEPHYR_LIBLC3_MODULE
comment "LittleFS module not available."
depends on !ZEPHYR_LITTLEFS_MODULE
comment "mbedtls module not available."
depends on !ZEPHYR_MBEDTLS_MODULE
comment "Trusted-firmware-m module not available."
depends on !ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
comment "Trusted-firmware-a module not available."
depends on !ZEPHYR_TRUSTED_FIRMWARE_A_MODULE
comment "Nanopb module not available."
depends on !ZEPHYR_NANOPB_MODULE
comment "Lz4 module not available."
depends on !ZEPHYR_LZ4_MODULE
comment "loramac-node module not available."
depends on !ZEPHYR_LORAMAC_NODE_MODULE
comment "CANopenNode module not available."
depends on !ZEPHYR_CANOPENNODE_MODULE
comment "zcbor module not available."
depends on !ZEPHYR_ZCBOR_MODULE
comment "CHRE module not available."
depends on !ZEPHYR_CHRE_MODULE
comment "THRIFT module not available."
depends on !ZEPHYR_THRIFT_MODULE
comment "Segger module not available."
depends on !ZEPHYR_SEGGER_MODULE
comment "LVGL module not available."
depends on !ZEPHYR_LVGL_MODULE
comment "cmsis module not available."
depends on !ZEPHYR_CMSIS_MODULE
comment "cmsis-dsp module not available."
depends on !ZEPHYR_CMSIS_DSP_MODULE
comment "cmsis-nn module not available."
depends on !ZEPHYR_CMSIS_NN_MODULE
# This ensures that symbols are available in Kconfig for dependency checking
# and referencing, while keeping the settings themselves unavailable when the
# modules are not present in the workspace
if 0
osource "modules/*/Kconfig"
endif
``` | /content/code_sandbox/modules/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 929 |
```unknown
#
# Author: Saravanan Sekar <saravanan@linumiz.com>
config HAS_NUMICRO_HAL
bool
select HAS_CMSIS_CORE
depends on SOC_FAMILY_NUMICRO
menu "Nuvoton drivers"
depends on HAS_NUMICRO_HAL
config HAS_NUMICRO_UART
bool "NuMicro UART"
help
Enable Nuvoton Universal asynchronous receiver transmitter HAL
module driver
endmenu
config HAS_NUMAKER_HAL
bool
select HAS_CMSIS_CORE
depends on SOC_FAMILY_NUMAKER
menu "Nuvoton NuMaker drivers"
depends on HAS_NUMAKER_HAL
config HAS_NUMAKER_UART
bool "NuMaker UART"
help
Enable Nuvoton Universal asynchronous receiver transmitter HAL
module driver
config HAS_NUMAKER_GPIO
bool "NuMaker GPIO"
help
Enable Nuvoton gpio HAL module driver
config HAS_NUMAKER_FMC
bool "NuMaker FMC"
help
Enable Nuvoton FMC HAL module driver
config HAS_NUMAKER_I2C
bool "NuMaker I2C"
help
Enable Nuvoton I2C HAL module driver
config HAS_NUMAKER_SPI
bool "NuMaker SPI"
help
Enable Nuvoton SPI HAL module driver
config HAS_NUMAKER_PWM
bool "NuMaker PWM"
help
Enable Nuvoton PWM HAL module driver
config HAS_NUMAKER_USBD
bool "NuMaker USB 1.1 device controller"
help
Enable Nuvoton USB 1.1 device controller HAL module driver
config HAS_NUMAKER_HSUSBD
bool "NuMaker high-speed USB 2.0 device controller"
help
Enable Nuvoton high-speed USB 2.0 device controller HAL module driver
config HAS_NUMAKER_ETH
bool "NuMaker ETH"
help
Enable Nuvoton ETH EMAC HAL module driver
config HAS_NUMAKER_CANFD
bool "NuMaker CAN FD"
help
Enable Nuvoton CAN FD HAL module driver
config HAS_NUMAKER_ADC
bool "NuMaker ADC"
help
Enable Nuvoton ADC HAL module driver
config HAS_NUMAKER_RMC
bool "NuMaker RMC"
help
Enable Nuvoton RMC HAL module driver
config HAS_NUMAKER_RTC
bool "NuMaker RTC"
help
Enable Nuvoton RTC HAL module driver
config HAS_NUMAKER_TMR
bool "NuMaker Timer"
help
Enable Nuvoton Timer HAL module driver
endmenu
``` | /content/code_sandbox/modules/Kconfig.nuvoton | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 555 |
```unknown
config HAS_CYPRESS_DRIVERS
bool
select HAS_CMSIS_CORE
``` | /content/code_sandbox/modules/Kconfig.cypress | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 19 |
```unknown
# STM32CUBE HAL config
config HAS_STM32LIB
bool
config HAS_STM32CUBE
bool
select HAS_CMSIS_CORE
depends on SOC_FAMILY_STM32
if HAS_STM32CUBE
config USE_STM32_HAL_ADC
bool
help
Enable STM32Cube Analog-to-Digital Converter (ADC) HAL module driver
config USE_STM32_HAL_ADC_EX
bool
help
Enable STM32Cube Extended Analog-to-Digital Converter (ADC) HAL
module driver
config USE_STM32_HAL_CAN
bool
help
Enable STM32Cube Controller Area Network (CAN) HAL module driver
config USE_STM32_HAL_CEC
bool
help
Enable STM32Cube HDMI-CEC controller (CEC) HAL module driver
config USE_STM32_HAL_COMP
bool
help
Enable STM32Cube Ultra Low Power Comparator channels (COMP) HAL module
driver
config USE_STM32_HAL_CORDIC
bool
help
Enable STM32Cube CORDIC co-processor (CORDIC) functions HAL module
driver
config USE_STM32_HAL_CORTEX
bool
help
Enable STM32Cube CORTEX HAL module driver
config USE_STM32_HAL_CRC
bool
help
Enable STM32Cube Cyclic redundancy check calculation unit (CRC) HAL
module driver
config USE_STM32_HAL_CRC_EX
bool
help
Enable STM32Cube Extended Cyclic redundancy check calculation unit
(CRC) HAL module driver
config USE_STM32_HAL_CRYP
bool
help
Enable STM32Cube Cryptographic processor (CRYP) HAL module driver
config USE_STM32_HAL_CRYP_EX
bool
help
Enable STM32Cube Extended Cryptographic processor (CRYP) HAL module
driver
config USE_STM32_HAL_DAC
bool
help
Enable STM32Cube Digital-to-analog converter (DAC) HAL module driver
config USE_STM32_HAL_DAC_EX
bool
help
Enable STM32Cube Extended Digital-to-analog converter (DAC) HAL module
driver
config USE_STM32_HAL_DCACHE
bool
help
Enable STM32Cube data cache (DCACHE) HAL module driver
config USE_STM32_HAL_DCMI
bool
help
Enable STM32Cube Digital camera interface (DCM) HAL module driver
config USE_STM32_HAL_DCMI_EX
bool
help
Enable STM32Cube Extended Digital camera interface (DCM) HAL module
driver
config USE_STM32_HAL_DFSDM
bool
help
Enable STM32Cube Digital filter for sigma delta modulators (DFSDM) HAL
module driver
config USE_STM32_HAL_DFSDM_EX
bool
help
Enable STM32Cube Extended Digital filter for sigma delta modulators
(DFSDM) HAL module driver
config USE_STM32_HAL_DMA
bool
help
Enable STM32Cube Direct Memory Access controller (DMA) HAL module
driver
config USE_STM32_HAL_DMA2D
bool
help
Enable STM32Cube Chrom-Art Accelerator controller (DMA2D) HAL module
driver
config USE_STM32_HAL_DMA_EX
bool
help
Enable STM32Cube Extended Direct Memory Access controller (DMA) HAL
module driver
config USE_STM32_HAL_DSI
bool
help
Enable STM32Cube Display Serial Interface Host (DSI) HAL module driver
config USE_STM32_HAL_DTS
bool
help
Enable STM32Cube Digital temperature sensor (DTS) HAL module driver
config USE_STM32_HAL_ETH
bool
help
Enable STM32Cube Ethernet (ETH) HAL module driver
config USE_STM32_HAL_ETH_EX
bool
help
Enable STM32Cube Extended Ethernet (ETH) HAL module driver
config USE_STM32_HAL_EXTI
bool
help
Enable STM32Cube Extended interrupt and event controller (EXTI) HAL
module driver
config USE_STM32_HAL_FDCAN
bool
help
Enable STM32Cube Controller area network with flexible data rate
(FDCAN) HAL module driver
config USE_STM32_HAL_FIREWALL
bool
help
Enable STM32Cube Firewall HAL module driver
config USE_STM32_HAL_FLASH
bool
help
Enable STM32Cube Embedded Flash Memory (FLASH) HAL module driver
config USE_STM32_HAL_FLASH_EX
bool
help
Enable STM32Cube Extended Embedded Flash Memory (FLASH) HAL module
driver
config USE_STM32_HAL_FLASH_RAMFUNC
bool
help
Enable STM32Cube Embedded Flash Memory RAM functions (FLASH_RAMFUNC)
HAL module driver
config USE_STM32_HAL_FMAC
bool
help
Enable STM32Cube Filter Math Accelerator (FMAC) HAL module driver
config USE_STM32_HAL_FMPI2C
bool
help
Enable STM32Cube Fast-mode Plus Inter-integrated circuit (FMPI2C)
HAL module driver
config USE_STM32_HAL_FMPI2C_EX
bool
help
Enable STM32Cube Extended Fast-mode Plus Inter-integrated circuit
(FMPI2C) HAL module driver
config USE_STM32_HAL_GFXMMU
bool
help
Enable STM32Cube Chrom-GRCTM (GFXMMU) HAL module driver
config USE_STM32_HAL_GPIO
bool
help
Enable STM32Cube General-purpose I/Os (GPIO) HAL module driver
config USE_STM32_HAL_GPIO_EX
bool
help
Enable STM32Cube Extended General-purpose I/Os (GPIO) HAL module
driver
config USE_STM32_HAL_GPU2D
bool
help
Enable STM32Cube Neo-Chrom graphic processor (GPU2D) HAL module
driver
config USE_STM32_HAL_GTZC
bool
help
Enable STM32Cube Global TrustZone controller (GTZC) HAL module
driver
config USE_STM32_HAL_HASH
bool
help
Enable STM32Cube Hash processor (HASH) HAL module driver
config USE_STM32_HAL_HASH_EX
bool
help
Enable STM32Cube Extended Hash processor (HASH) HAL module driver
config USE_STM32_HAL_HCD
bool
help
Enable STM32Cube Host Controller device (HCD) HAL module driver
config USE_STM32_HAL_HRTIM
bool
help
Enable STM32Cube High-Resolution Timer (HRTIM) HAL module driver
config USE_STM32_HAL_HSEM
bool
help
Enable STM32Cube Hardware Semaphore (HSEM) HAL module driver
config USE_STM32_HAL_I2C
bool
help
Enable STM32Cube Inter-integrated circuit (I2C) interface HAL module
driver
config USE_STM32_HAL_I2C_EX
bool
help
Enable STM32Cube Extended Inter-integrated circuit (I2C) interface HAL
module driver
config USE_STM32_HAL_I2S
bool
help
Enable STM32Cube Inter-IC sound (I2S) HAL module driver
config USE_STM32_HAL_I2S_EX
bool
help
Enable STM32Cube Extended Inter-IC sound (I2S) HAL module driver
config USE_STM32_HAL_I3C
bool
help
Enable STM32Cube Improved inter-integrated circuit (I3C) HAL module
driver
config USE_STM32_HAL_ICACHE
bool
help
Enable STM32Cube Instruction cache (ICACHE) HAL module driver
config USE_STM32_HAL_IPCC
bool
help
Enable STM32Cube Inter-Processor communication controller (IPCC) HAL
module driver
config USE_STM32_HAL_IRDA
bool
help
Enable STM32Cube Infrared Data Association (IRDA) HAL module driver
config USE_STM32_HAL_IWDG
bool
help
Enable STM32Cube Independent watchdog (IWDG) HAL module driver
config USE_STM32_HAL_JPEG
bool
help
Enable STM32Cube Jpeg codec (JPEG) HAL module driver
config USE_STM32_HAL_LCD
bool
help
Enable STM32Cube LCD controller (LCD) HAL module driver
config USE_STM32_HAL_LPTIM
bool
help
Enable STM32Cube Low Power Timer (LPTIM) HAL module driver
config USE_STM32_HAL_LTDC
bool
help
Enable STM32Cube LCD-TFT controller (LTDC) HAL module driver
config USE_STM32_HAL_LTDC_EX
bool
help
Enable STM32Cube Extended LCD-TFT controller (LTDC) HAL module driver
config USE_STM32_HAL_MDF
bool
help
Enable STM32Cube Multi-function digital filter (MDF) HAL module driver
config USE_STM32_HAL_MDIOS
bool
help
Enable STM32Cube Management data input/output (MDIOS) HAL module
driver
config USE_STM32_HAL_MDMA
bool
help
Enable STM32Cube Master Direct Memory Access controller (MDMA) HAL
module driver
config USE_STM32_HAL_MMC
bool
help
Enable STM32Cube MultiMediaCard interface (SDMMC) HAL module driver
config USE_STM32_HAL_MMC_EX
bool
help
Enable STM32Cube Extended MultiMediaCard interface (SDMMC) HAL module
driver
config USE_STM32_HAL_MSP
bool
help
Enable STM32Cube MCU Support Package (MSP) HAL module driver
config USE_STM32_HAL_NAND
bool
help
Enable STM32Cube NAND Controller (NAND) HAL module driver
config USE_STM32_HAL_NOR
bool
help
Enable STM32Cube NOR Controller (NOR) HAL module driver
config USE_STM32_HAL_OPAMP
bool
help
Enable STM32Cube Operational amplifiers (OPAMP) HAL module driver
config USE_STM32_HAL_OPAMP_EX
bool
help
Enable STM32Cube Extended Operational amplifiers (OPAMP) HAL module
driver
config USE_STM32_HAL_OSPI
bool
help
Enable STM32Cube Octo-SPI interface (OSPI) HAL module driver
config USE_STM32_HAL_OTFDEC
bool
help
Enable STM32Cube On-the-fly decryption engine (OTFDEC) HAL module
driver
config USE_STM32_HAL_PCCARD
bool
help
Enable STM32Cube PCCard memories (PCCARD) HAL module driver
config USE_STM32_HAL_PCD
bool
help
Enable STM32Cube USB Peripheral Controller (PCD) HAL module driver
config USE_STM32_HAL_PCD_EX
bool
help
Enable STM32Cube Extended USB Peripheral Controller (PCD) HAL module
driver
config USE_STM32_HAL_PKA
bool
help
Enable STM32Cube Public key accelerator (PKA) HAL module driver
config USE_STM32_HAL_PSSI
bool
help
Enable STM32Cube Parallel Synchronous Slave Interface (PSSI)
HAL module driver
config USE_STM32_HAL_PWR
bool
help
Enable STM32Cube Power control (PWR) HAL module driver
config USE_STM32_HAL_PWR_EX
bool
help
Enable STM32Cube Extended Power control (PWR) HAL module driver
config USE_STM32_HAL_QSPI
bool
help
Enable STM32Cube Quad-SPI interface (QSPI) HAL module driver
config USE_STM32_HAL_RAMCFG
bool
help
Enable STM32Cube RAMs configuration controller (RAMCFG) HAL module
driver
config USE_STM32_HAL_RAMECC
bool
help
Enable STM32Cube RAM ECC monitoring (RAMECC) HAL module driver
config USE_STM32_HAL_RNG
bool
help
Enable STM32Cube True random number generator (RNG) HAL module driver
config USE_STM32_HAL_RTC
bool
help
Enable STM32Cube Real-time clock (RTC) HAL module driver
config USE_STM32_HAL_RTC_EX
bool
help
Enable STM32Cube Extended Real-time clock (RTC) HAL module driver
config USE_STM32_HAL_SAI
bool
help
Enable STM32Cube Serial audio interface (SAI) HAL module driver
config USE_STM32_HAL_SAI_EX
bool
help
Enable STM32Cube Extended Serial audio interface (SAI) HAL module
driver
config USE_STM32_HAL_SD
bool
help
Enable STM32Cube Secure digital input/output MultiMediaCard interface
(SDMMC) HAL module driver
config USE_STM32_HAL_SD_EX
bool
help
Enable STM32Cube Extended Secure digital input/output MultiMediaCard
interface (SDMMC) HAL module driver
config USE_STM32_HAL_SDADC
bool
help
Enable STM32Cube SDADC HAL module driver
config USE_STM32_HAL_SDRAM
bool
help
Enable STM32Cube SDRAM controller (SDRAM) HAL module driver
config USE_STM32_HAL_SMARTCARD
bool
help
Enable STM32Cube Smartcard controller (SMARTCARD) HAL module driver
config USE_STM32_HAL_SMARTCARD_EX
bool
help
Enable STM32Cube Extended Smartcard controller (SMARTCARD) HAL module
driver
config USE_STM32_HAL_SMBUS
bool
help
Enable STM32Cube System Management Bus (SMBus) HAL module driver
config USE_STM32_HAL_SPDIFRX
bool
help
Enable STM32Cube SPDIF receiver interface (SPDIFRX) HAL module driver
config USE_STM32_HAL_SPI
bool
help
Enable STM32Cube Serial peripheral interface (SPI) HAL module driver
config USE_STM32_HAL_SPI_EX
bool
help
Enable STM32Cube Extended Serial peripheral interface (SPI) HAL module
driver
config USE_STM32_HAL_SRAM
bool
help
Enable STM32Cube SRAM controller (SRAM) HAL module driver
config USE_STM32_HAL_SWPMI
bool
help
Enable STM32Cube Single Wire Protocol Master Interface (SWPMI) HAL
module
config USE_STM32_HAL_TIM
bool
help
Enable STM32Cube Timer (TIM) HAL module driver
config USE_STM32_HAL_TIM_EX
bool
help
Enable STM32Cube Extended Timer (TIM) HAL module driver
config USE_STM32_HAL_TSC
bool
help
Enable STM32Cube Touch sensing controller (TSC) HAL module driver
config USE_STM32_HAL_UART
bool
help
Enable STM32Cube Universal asynchronous receiver transmitter (USART)
HAL module driver
config USE_STM32_HAL_UART_EX
bool
help
Enable STM32Cube Extended Universal asynchronous receiver transmitter
(USART) HAL module driver
config USE_STM32_HAL_USART
bool
help
Enable STM32Cube Universal synchronous asynchronous receiver
transmitter (USART) HAL module driver
config USE_STM32_HAL_USART_EX
bool
help
Enable STM32Cube Extended Universal synchronous asynchronous receiver
transmitter (USART) HAL module driver
config USE_STM32_HAL_WWDG
bool
help
Enable STM32Cube System window watchdog (WWDG) HAL module driver
config USE_STM32_HAL_XSPI
bool
help
Enable STM32Cube OctoSPI (XSPI) HAL module driver
config USE_STM32_LL_ADC
bool
help
Enable STM32Cube Analog-to-Digital Converter (ADC) LL module driver
config USE_STM32_LL_BDMA
bool
help
Enable STM32Cube Basic direct memory access controller (BDMA) LL
module driver
config USE_STM32_LL_COMP
bool
help
Enable STM32Cube Ultra Low Power Comparator channels (COMP) LL module
driver
config USE_STM32_LL_CORDIC
bool
help
Enable STM32Cube CORDIC co-processor (CORDIC) functions LL module
driver
config USE_STM32_LL_CRC
bool
help
Enable STM32Cube Cyclic redundancy check calculation unit (CRC) LL
module driver
config USE_STM32_LL_CRS
bool
help
Enable STM32Cube Clock recovery system (CRS) LL module driver
config USE_STM32_LL_DAC
bool
help
Enable STM32Cube Digital-to-analog converter (DAC) LL module driver
config USE_STM32_LL_DELAYBLOCK
bool
help
Enable STM32Cube DelayBlock (DELAYBLOCK) LL module driver (stm32H7 or stm32MP1)
config USE_STM32_LL_DLYB
bool
help
Enable STM32Cube DelayBlock (DELAYBLOCK) LL module driver (stm32U5)
config USE_STM32_LL_DMA
bool
help
Enable STM32Cube Direct Memory Access controller (DMA) LL module
driver
config USE_STM32_LL_DMA2D
bool
help
Enable STM32Cube Chrom-Art Accelerator controller (DMA2D) LL module
driver
config USE_STM32_LL_EXTI
bool
help
Enable STM32Cube Extended interrupt and event controller (EXTI) LL
module driver
config USE_STM32_LL_FMAC
bool
help
Enable STM32Cube Filter Math Accelerator (FMAC) LL module driver
config USE_STM32_LL_FMC
bool
help
Enable STM32Cube Flexible memory controller (FMC) LL module driver
config USE_STM32_LL_FSMC
bool
help
Enable STM32Cube Flexible static memory controller (FSMC) LL module
driver
config USE_STM32_LL_GPIO
bool
help
Enable STM32Cube Extended General-purpose I/Os (GPIO) LL module driver
config USE_STM32_LL_HRTIM
bool
help
Enable STM32Cube High-Resolution Timer (HRTIM) LL module driver
config USE_STM32_LL_I2C
bool
help
Enable STM32Cube Inter-integrated circuit (I2C) interface LL module
driver
config USE_STM32_LL_I3C
bool
help
Enable STM32Cube Improved inter-integrated circuit (I3C) LL module
driver
config USE_STM32_LL_ICACHE
bool
help
Enable STM32Cube Instruction cache (ICACHE) LL module driver
config USE_STM32_LL_IPCC
bool
help
Enable STM32Cube Inter-Processor communication controller (IPCC) LL
module driver
config USE_STM32_LL_LPGPIO
bool
help
Enable STM32Cube Low-power general-purpose I/Os (LPGPIO) LL
module driver
config USE_STM32_LL_LPTIM
bool
help
Enable STM32Cube Low Power Timer (LPTIM) LL module driver
config USE_STM32_LL_LPUART
bool
help
Enable STM32Cube Low-power universal asynchronous receiver
transmitter (LPUART) LL module driver
config USE_STM32_LL_MDMA
bool
help
Enable STM32Cube Master Direct Memory Access controller (MDMA) LL
module driver
config USE_STM32_LL_OPAMP
bool
help
Enable STM32Cube Operational amplifiers (OPAMP) LL module driver
config USE_STM32_LL_PKA
bool
help
Enable STM32Cube Public key accelerator (PKA) LL module driver
config USE_STM32_LL_PWR
bool
help
Enable STM32Cube Power control (PWR) LL module driver
config USE_STM32_LL_RCC
bool
help
Enable STM32Cube Reset and Clock Control (RCC) LL module driver
config USE_STM32_LL_RNG
bool
help
Enable STM32Cube True random number generator (RNG) LL module driver
config USE_STM32_LL_RTC
bool
help
Enable STM32Cube Real-time clock (RTC) LL module driver
config USE_STM32_LL_SDMMC
bool
help
Enable STM32Cube SD/SDIO/MMC card host interface (SDMMC) LL module
driver
config USE_STM32_LL_SPI
bool
help
Enable STM32Cube Serial peripheral interface (SPI) LL module driver
config USE_STM32_LL_SWPMI
bool
help
Enable STM32Cube Single Wire Protocol Master Interface (SWPMI) LL
module driver
config USE_STM32_LL_TIM
bool
help
Enable STM32Cube Timer (TIM) LL module driver
config USE_STM32_LL_UCPD
bool
help
Enable STM32Cube USB Power Delivery device interface
(UCPD) LL module driver
config USE_STM32_LL_USART
bool
help
Enable STM32Cube Universal synchronous asynchronous receiver
transmitter (USART) LL module driver
config USE_STM32_LL_USB
bool
help
Enable STM32Cube Universal serial bus full-speed device interface
(USB) LL module driver
config USE_STM32_LL_UTILS
bool
help
Enable STM32Cube Utility functions (UTILS) LL module driver
endif # HAS_STM32CUBE
``` | /content/code_sandbox/modules/Kconfig.stm32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,579 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/irq.h>
#include <zephyr/init.h>
#include "SEGGER_RTT.h"
/*
* Common mutex for locking access to terminal buffer.
* Note that SEGGER uses same lock macros for both SEGGER_RTT_Write and
* SEGGER_RTT_Read functions. Because of this we are not able generally
* separate up and down access using two mutexes until SEGGER library fix
* this.
*
* If sharing access cause performance problems, consider using another
* non terminal buffers.
*/
K_MUTEX_DEFINE(rtt_term_mutex);
static int rtt_init(void)
{
SEGGER_RTT_Init();
return 0;
}
#ifdef CONFIG_MULTITHREADING
void zephyr_rtt_mutex_lock(void)
{
k_mutex_lock(&rtt_term_mutex, K_FOREVER);
}
void zephyr_rtt_mutex_unlock(void)
{
k_mutex_unlock(&rtt_term_mutex);
}
#endif /* CONFIG_MULTITHREADING */
unsigned int zephyr_rtt_irq_lock(void)
{
return irq_lock();
}
void zephyr_rtt_irq_unlock(unsigned int key)
{
irq_unlock(key);
}
SYS_INIT(rtt_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
``` | /content/code_sandbox/modules/segger/SEGGER_RTT_zephyr.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 262 |
```linker script
SECTION_DATA_PROLOGUE(_RTT_SECTION_NAME,(NOLOAD),)
{
__rtt_buff_data_start = .;
*(CONFIG_SEGGER_RTT_SECTION_CUSTOM_NAME)
__rtt_buff_data_end = ALIGN(4);
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
__rtt_buff_data_size = __rtt_buff_data_end - __rtt_buff_data_start;
``` | /content/code_sandbox/modules/segger/segger_rtt.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 82 |
```unknown
config ZEPHYR_SEGGER_MODULE
bool
config HAS_SEGGER_RTT
bool
help
Indicates that the platform supports SEGGER J-Link RTT.
config USE_SEGGER_RTT
bool "SEGGER RTT libraries."
depends on HAS_SEGGER_RTT
select STM32_ENABLE_DEBUG_SLEEP_STOP if SOC_FAMILY_STM32
help
Enable Segger J-Link RTT libraries for platforms that support it.
Selection of this option enables use of RTT for various subsystems.
Note that by enabling this option, RTT buffers consume more RAM.
if USE_SEGGER_RTT
config SEGGER_RTT_CUSTOM_LOCKING
bool "Custom locking"
help
Enable custom locking using a mutex.
config SEGGER_RTT_MAX_NUM_UP_BUFFERS
int "Maximum number of up-buffers"
default 3
config SEGGER_RTT_MAX_NUM_DOWN_BUFFERS
int "Maximum number of down-buffers"
default 3
config SEGGER_RTT_BUFFER_SIZE_UP
int "Size of the buffer for terminal output of target, up to host"
default 1024
config SEGGER_RTT_BUFFER_SIZE_DOWN
int "Size of the buffer for terminal input of target, from host"
default 32 if SHELL_BACKEND_RTT
default 16
config SEGGER_RTT_PRINTF_BUFFER_SIZE
int "Size of buffer for RTT printf to bulk-send chars via RTT"
default 64
choice SEGGER_RTT_MODE
prompt "Mode for pre-initialized terminal channel (buffer 0)"
default SEGGER_RTT_MODE_NO_BLOCK_SKIP
config SEGGER_RTT_MODE_NO_BLOCK_SKIP
bool "Skip. Do not block, output nothing."
config SEGGER_RTT_MODE_NO_BLOCK_TRIM
bool "Trim: Do not block, output as much as fits."
config SEGGER_RTT_MODE_BLOCK_IF_FIFO_FULL
bool "Block: Wait until there is space in the buffer."
endchoice
config SEGGER_RTT_MODE
int
default 2 if SEGGER_RTT_MODE_BLOCK_IF_FIFO_FULL
default 1 if SEGGER_RTT_MODE_NO_BLOCK_TRIM
default 0
config SEGGER_RTT_MEMCPY_USE_BYTELOOP
bool "Use a simple byte-loop instead of standard memcpy"
choice SEGGER_RTT_SECTION
prompt "Choose RTT data linker section"
default SEGGER_RTT_SECTION_CUSTOM
config SEGGER_RTT_SECTION_NONE
bool "Place RTT data in the default linker section"
config SEGGER_RTT_SECTION_DTCM
bool "Place RTT data in the DTCM linker section"
config SEGGER_RTT_SECTION_CCM
bool "Place RTT data in the CCM linker section"
if CPU_CORTEX_M
config SEGGER_RTT_SECTION_CUSTOM
bool "Place RTT data in custom linker section at RAM start"
config SEGGER_RTT_SECTION_CUSTOM_DTS_REGION
bool "Place RTT data in custom linker section defined by a memory region in DTS"
endif
endchoice
if SEGGER_RTT_SECTION_CUSTOM || SEGGER_RTT_SECTION_CUSTOM_DTS_REGION
config SEGGER_RTT_SECTION_CUSTOM_NAME
string "Name of RTT data custom linker section"
default ".rtt_buff_data"
endif
choice SEGGER_RTT_INIT_MODE
prompt "RTT Initialization mode"
help
RTT inizialization function can avoid re-init of Cntrol Block
if another program (e.g. bootloader) has already initialized it.
default SEGGER_RTT_INIT_MODE_STRONG_CHECK if SEGGER_RTT_SECTION_CUSTOM
default SEGGER_RTT_INIT_MODE_STRONG_CHECK
config SEGGER_RTT_INIT_MODE_ALWAYS
bool "RTT Initialization done without conditions"
config SEGGER_RTT_INIT_MODE_STRONG_CHECK
bool "RTT Initialization done if full check on Control Block ID fails"
config SEGGER_RTT_INIT_MODE_WEAK_CHECK
bool "RTT Initialization done if partial check on Control Block ID fails"
endchoice
endif
``` | /content/code_sandbox/modules/segger/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 826 |
```unknown
config ZEPHYR_LIBLC3_MODULE
bool
config LIBLC3
bool "liblc3 Support"
depends on FPU
select REQUIRES_FULL_LIBC
help
This option enables the Android liblc3 library for Bluetooth LE Audio
``` | /content/code_sandbox/modules/liblc3/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 56 |
```unknown
#
#
#
config ZEPHYR_LORAMAC_NODE_MODULE
bool
config HAS_SEMTECH_RADIO_DRIVERS
bool "Semtech LoRa Radio Drivers"
help
This option enables the use of Semtech's Radio drivers
config HAS_SEMTECH_SX1272
bool
select HAS_SEMTECH_RADIO_DRIVERS
config HAS_SEMTECH_SX1276
bool
select HAS_SEMTECH_RADIO_DRIVERS
config HAS_SEMTECH_SX126X
bool
select HAS_SEMTECH_RADIO_DRIVERS
config HAS_SEMTECH_LORAMAC
bool "Semtech LoRaMac Stack"
depends on HAS_SEMTECH_RADIO_DRIVERS
help
This option enables the use of Semtech's LoRaMac stack
config HAS_SEMTECH_SOFT_SE
bool "Semtech Secure Element software implementation"
depends on HAS_SEMTECH_LORAMAC
help
This option enables the use of Semtech's Secure Element
software implementation
``` | /content/code_sandbox/modules/loramac-node/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 220 |
```unknown
# General configuration options
config MCUBOOT
bool
help
Hidden option used to indicate that the current image is MCUBoot
config BOOTLOADER_MCUBOOT
bool "MCUboot bootloader support"
select USE_DT_CODE_PARTITION
imply INIT_ARCH_HW_AT_BOOT if ARCH_SUPPORTS_ARCH_HW_INIT
depends on !MCUBOOT
help
This option signifies that the target uses MCUboot as a bootloader,
or in other words that the image is to be chain-loaded by MCUboot.
This sets several required build system and Device Tree options in
order for the image generated to be bootable using the MCUboot open
source bootloader. Currently this includes:
* Setting ROM_START_OFFSET to a default value that allows space
for the MCUboot image header
* Activating SW_VECTOR_RELAY_CLIENT on Cortex-M0
(or Armv8-M baseline) targets with no built-in vector relocation
mechanisms
By default, this option instructs Zephyr to initialize the core
architecture HW registers during boot, when this is supported by
the application. This removes the need by MCUboot to reset
the core registers' state itself.
if BOOTLOADER_MCUBOOT
config MCUBOOT_CMAKE_WEST_SIGN_PARAMS
string "Extra parameters to west sign"
default "--quiet"
help
Parameters that are passed by cmake to west sign, just after
the command, before all other parameters needed for image
signing.
By default this is set to "--quiet" to prevent extra, non-error,
diagnostic messages from west sign. This does not affect signing
tool for which extra parameters are passed with
MCUBOOT_EXTRA_IMGTOOL_ARGS.
config MCUBOOT_SIGNATURE_KEY_FILE
string "Path to the mcuboot signing key file"
default ""
depends on !MCUBOOT_GENERATE_UNSIGNED_IMAGE
help
The file contains a key pair whose public half is verified
by your target's MCUboot image. The file is in PEM format.
If set to a non-empty value, the build system tries to
sign the final binaries using a 'west sign -t imgtool' command.
The signed binaries are placed in the build directory
at zephyr/zephyr.signed.bin and zephyr/zephyr.signed.hex.
The file names can be customized with CONFIG_KERNEL_BIN_NAME.
The existence of bin and hex files depends on CONFIG_BUILD_OUTPUT_BIN
and CONFIG_BUILD_OUTPUT_HEX.
This option should contain a path to the same file as the
BOOT_SIGNATURE_KEY_FILE option in your MCUboot .config. The path
may be absolute or relative to the west workspace topdir. (The MCUboot
config option is used for the MCUboot bootloader image; this option is
for your application which is to be loaded by MCUboot. The MCUboot
config option can be a relative path from the MCUboot repository
root.)
If left empty, you must sign the Zephyr binaries manually.
config MCUBOOT_ENCRYPTION_KEY_FILE
string "Path to the mcuboot encryption key file"
default ""
depends on MCUBOOT_SIGNATURE_KEY_FILE != ""
help
The file contains the public key that is used to encrypt the
ephemeral key that encrypts the image. The corresponding
private key is hard coded in the MCUboot source code and is
used to decrypt the ephemeral key that is embedded in the
image. The file is in PEM format.
If set to a non-empty value, the build system tries to
sign and encrypt the final binaries using a 'west sign -t imgtool'
command. The binaries are placed in the build directory at
zephyr/zephyr.signed.encrypted.bin and
zephyr/zephyr.signed.encrypted.hex.
The file names can be customized with CONFIG_KERNEL_BIN_NAME.
The existence of bin and hex files depends on CONFIG_BUILD_OUTPUT_BIN
and CONFIG_BUILD_OUTPUT_HEX.
This option should either be an absolute path or a path relative to
the west workspace topdir.
Example: './bootloader/mcuboot/enc-rsa2048-pub.pem'
If left empty, you must encrypt the Zephyr binaries manually.
config MCUBOOT_IMGTOOL_SIGN_VERSION
string "Version to pass to imgtool when signing"
default "$(APP_VERSION_TWEAK_STRING)" if "$(VERSION_MAJOR)" != ""
default "0.0.0+0"
help
When signing with imgtool then this setting will be passed as version
argument to the tool.
The format is major.minor.revision+build.
config MCUBOOT_IMGTOOL_OVERWRITE_ONLY
bool "Use overwrite-only instead of swap upgrades"
help
If enabled, --overwrite-only option passed to imgtool to avoid
adding the swap status area size when calculating overflow.
config MCUBOOT_EXTRA_IMGTOOL_ARGS
string "Extra arguments to pass to imgtool when signing"
default ""
help
When signing (CONFIG_MCUBOOT_SIGNATURE_KEY_FILE is a non-empty
string) you can use this option to pass extra options to
imgtool. For example, you could set this to "--version 1.2".
config MCUBOOT_GENERATE_UNSIGNED_IMAGE
bool "Generate unsigned binary image bootable with MCUboot"
help
Enabling this configuration allows automatic unsigned binary image
generation when MCUboot signing key is not provided,
i.e., MCUBOOT_SIGNATURE_KEY_FILE is left empty.
config MCUBOOT_GENERATE_CONFIRMED_IMAGE
bool "Also generate a padded, confirmed image"
help
The signed, padded, and confirmed binaries are placed in the build
directory at zephyr/zephyr.signed.confirmed.bin and
zephyr/zephyr.signed.confirmed.hex.
The file names can be customized with CONFIG_KERNEL_BIN_NAME.
The existence of bin and hex files depends on CONFIG_BUILD_OUTPUT_BIN
and CONFIG_BUILD_OUTPUT_HEX.
menu "On board MCUboot operation mode"
choice MCUBOOT_BOOTLOADER_MODE
prompt "Application assumed MCUboot mode of operation"
default MCUBOOT_BOOTLOADER_MODE_SWAP_WITHOUT_SCRATCH # MCUBOOT_BOOTLOADER_MODE
help
Informs application build on assumed MCUboot mode of operation.
This is important for validataing application against DT configuration,
which is done by west sign.
config MCUBOOT_BOOTLOADER_MODE_SINGLE_APP
bool "MCUboot has been configured for single slot execution"
select MCUBOOT_IMGTOOL_OVERWRITE_ONLY
help
MCUboot will only boot slot0_partition placed application and does
not care about other slots. In this mode application is not able
to DFU its own update to secondary slot and all updates need to
be performed using MCUboot serial recovery.
config MCUBOOT_BOOTLOADER_MODE_SWAP_WITHOUT_SCRATCH
bool "MCUboot has been configured for swap without scratch operation"
select MCUBOOT_BOOTLOADER_MODE_HAS_NO_DOWNGRADE
help
MCUboot expects slot0_partition and slot1_partition to be present
in DT and application will boot from slot0_partition.
MCUBOOT_BOOTLOADER_NO_DOWNGRADE should also be selected
if MCUboot has been built with MCUBOOT_DOWNGRADE_PREVENTION.
config MCUBOOT_BOOTLOADER_MODE_SWAP_SCRATCH
bool "MCUboot has been configured for swap using scratch operation"
select MCUBOOT_BOOTLOADER_MODE_HAS_NO_DOWNGRADE
help
MCUboot expects slot0_partition, slot1_partition and scratch_partition
to be present in DT, and application will boot from slot0_partition.
In this mode scratch_partition is used as temporary storage when
MCUboot swaps application from the secondary slot to the primary
slot.
MCUBOOT_BOOTLOADER_NO_DOWNGRADE should also be selected
if MCUboot has been built with MCUBOOT_DOWNGRADE_PREVENTION.
config MCUBOOT_BOOTLOADER_MODE_OVERWRITE_ONLY
bool "MCUboot has been configured to just overwrite primary slot"
select MCUBOOT_BOOTLOADER_MODE_HAS_NO_DOWNGRADE
select MCUBOOT_IMGTOOL_OVERWRITE_ONLY
help
MCUboot will take contents of secondary slot of an image and will
overwrite primary slot with it.
In this mode it is not possible to revert back to previous version
as it is not stored in the secondary slot.
This mode supports MCUBOOT_BOOTLOADER_NO_DOWNGRADE which means
that the overwrite will not happen unless the version of secondary
slot is higher than the version in primary slot.
config MCUBOOT_BOOTLOADER_MODE_DIRECT_XIP
bool "MCUboot has been configured for DirectXIP operation"
select MCUBOOT_BOOTLOADER_MODE_HAS_NO_DOWNGRADE
select MCUBOOT_BOOTLOADER_NO_DOWNGRADE
help
MCUboot expects slot0_partition and slot1_partition to exist in DT.
In this mode MCUboot can boot from either partition and will
select one with higher application image version, which usually
means major.minor.patch triple, unless BOOT_VERSION_CMP_USE_BUILD_NUMBER
is also selected that enables comparison of build number.
This option automatically selectes
MCUBOOT_BOOTLOADER_NO_DOWNGRADE as it is not possible
to swap back to older version of application.
config MCUBOOT_BOOTLOADER_MODE_DIRECT_XIP_WITH_REVERT
bool "MCUboot has been configured for DirectXIP with revert"
select MCUBOOT_BOOTUTIL_LIB_FOR_DIRECT_XIP
select MCUBOOT_BOOTLOADER_MODE_HAS_NO_DOWNGRADE
select MCUBOOT_BOOTLOADER_NO_DOWNGRADE
help
MCUboot expects slot0_partition and slot1_partition to exist in DT.
In this mode MCUboot will boot the application with the higher version
from either slot, as long as it has been marked to be boot
next time for test or permanently. In case when application is marked
for test it needs to confirm itself, on the first boot, or it will
be removed and MCUboot will revert to booting previously approved
application.
This mode does not allow freely switching between application
versions, as, once higher version application is approved, it is
not possible to select lower version for boot.
This mode selects MCUBOOT_BOOTLOADER_NO_DOWNGRADE as it is not possible
to downgrade running application, but note that MCUboot may do that
if application with higher version will not get confirmed.
config MCUBOOT_BOOTLOADER_MODE_FIRMWARE_UPDATER
bool "MCUboot has been configured in firmware updater mode"
select MCUBOOT_IMGTOOL_OVERWRITE_ONLY
help
MCUboot will only boot slot0_partition for the main application but has
an entrance mechanism defined for entering the slot1_partition which is
a dedicated firmware updater application used to update the slot0_partition
application.
endchoice # MCUBOOT_BOOTLOADER_MODE
config MCUBOOT_BOOTLOADER_MODE_HAS_NO_DOWNGRADE
bool
help
Selected mode supports downgrade prevention, where you cannot switch to
an application with lower version than the currently running application.
if MCUBOOT_BOOTLOADER_MODE_HAS_NO_DOWNGRADE
config MCUBOOT_BOOTLOADER_NO_DOWNGRADE
bool "MCUboot mode has downgrade prevention enabled"
help
Selected MCUboot mode has downgrade prevention enabled, where you are not
able to change back to image with lower version number.
This options should be selected when MCUboot has been built with
MCUBOOT_DOWNGRADE_PREVENTION option enabled.
endif
endmenu # On board MCUboot operation mode
endif # BOOTLOADER_MCUBOOT
menuconfig MCUBOOT_BOOTUTIL_LIB
bool "MCUboot utility library"
help
Enable MCUboot utility library which implements functions
required by the chain-loaded application and the MCUboot.
if MCUBOOT_BOOTUTIL_LIB
# hidden option for disabling module-own log configuration
# while building MCUboot bootloader
config MCUBOOT_BOOTUTIL_LIB_OWN_LOG
bool
default y
if MCUBOOT_BOOTUTIL_LIB_OWN_LOG
module = MCUBOOT_UTIL
module-str = MCUboot bootutil
source "subsys/logging/Kconfig.template.log_config"
endif
config BOOT_IMAGE_ACCESS_HOOKS
bool "Hooks for overriding MCUboot's bootutil native routines"
help
Allow to provide procedures for override or extend native
MCUboot's routines required for access the image data.
It is up to the application project to add source file which
implements hooks to the build.
if MCUBOOT_BOOTLOADER_MODE_DIRECT_XIP_WITH_REVERT
config MCUBOOT_BOOTUTIL_LIB_FOR_DIRECT_XIP
bool
help
Adds support for setting for test and confirming images
when bootloader is in DirectXIP-revert mode.
endif
endif # MCUBOOT_BOOTUTIL_LIB
``` | /content/code_sandbox/modules/Kconfig.mcuboot | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,782 |
```unknown
# Zephyr module config for percepio.
# The real Kconfig for the module is located in the module repository,
# this file is to ensure ZEPHYR_PERCEPIO_MODULE is defined also when the
# module is unavailable.
config ZEPHYR_PERCEPIO_MODULE
bool
``` | /content/code_sandbox/modules/percepio/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 63 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_MODULES_CANOPENNODE_CO_DRIVER_H
#define ZEPHYR_MODULES_CANOPENNODE_CO_DRIVER_H
/*
* Zephyr RTOS CAN driver interface and configuration for CANopenNode
* CANopen protocol stack.
*
* See CANopenNode/stack/drvTemplate/CO_driver.h for API description.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <zephyr/kernel.h>
#include <zephyr/types.h>
#include <zephyr/device.h>
#include <zephyr/toolchain.h>
#include <zephyr/dsp/types.h> /* float32_t, float64_t */
/* Use static variables instead of calloc() */
#define CO_USE_GLOBALS
/* Use Zephyr provided crc16 implementation */
#define CO_USE_OWN_CRC16
/* Use SDO buffer size from Kconfig */
#define CO_SDO_BUFFER_SIZE CONFIG_CANOPENNODE_SDO_BUFFER_SIZE
/* Use trace buffer size from Kconfig */
#define CO_TRACE_BUFFER_SIZE_FIXED CONFIG_CANOPENNODE_TRACE_BUFFER_SIZE
#ifdef CONFIG_CANOPENNODE_LEDS
#define CO_USE_LEDS 1
#endif
#ifdef CONFIG_LITTLE_ENDIAN
#define CO_LITTLE_ENDIAN
#else
#define CO_BIG_ENDIAN
#endif
typedef bool bool_t;
typedef char char_t;
typedef unsigned char oChar_t;
typedef unsigned char domain_t;
BUILD_ASSERT(sizeof(float32_t) >= 4);
BUILD_ASSERT(sizeof(float64_t) >= 8);
typedef struct canopen_rx_msg {
uint8_t data[8];
uint16_t ident;
uint8_t DLC;
} CO_CANrxMsg_t;
typedef void (*CO_CANrxBufferCallback_t)(void *object,
const CO_CANrxMsg_t *message);
typedef struct canopen_rx {
int filter_id;
void *object;
CO_CANrxBufferCallback_t pFunct;
uint16_t ident;
uint16_t mask;
#ifdef CONFIG_CAN_ACCEPT_RTR
bool rtr;
#endif /* CONFIG_CAN_ACCEPT_RTR */
} CO_CANrx_t;
typedef struct canopen_tx {
uint8_t data[8];
uint16_t ident;
uint8_t DLC;
bool_t rtr : 1;
bool_t bufferFull : 1;
bool_t syncFlag : 1;
} CO_CANtx_t;
typedef struct canopen_module {
const struct device *dev;
CO_CANrx_t *rx_array;
CO_CANtx_t *tx_array;
uint16_t rx_size;
uint16_t tx_size;
uint32_t errors;
void *em;
bool_t configured : 1;
bool_t CANnormal : 1;
bool_t first_tx_msg : 1;
} CO_CANmodule_t;
void canopen_send_lock(void);
void canopen_send_unlock(void);
#define CO_LOCK_CAN_SEND() canopen_send_lock()
#define CO_UNLOCK_CAN_SEND() canopen_send_unlock()
void canopen_emcy_lock(void);
void canopen_emcy_unlock(void);
#define CO_LOCK_EMCY() canopen_emcy_lock()
#define CO_UNLOCK_EMCY() canopen_emcy_unlock()
void canopen_od_lock(void);
void canopen_od_unlock(void);
#define CO_LOCK_OD() canopen_od_lock()
#define CO_UNLOCK_OD() canopen_od_unlock()
/*
* CANopenNode RX callbacks run in interrupt context, no memory
* barrier needed.
*/
#define CANrxMemoryBarrier()
#define IS_CANrxNew(rxNew) ((uintptr_t)rxNew)
#define SET_CANrxNew(rxNew) { CANrxMemoryBarrier(); rxNew = (void *)1L; }
#define CLEAR_CANrxNew(rxNew) { CANrxMemoryBarrier(); rxNew = (void *)0L; }
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_MODULES_CANOPENNODE_CO_DRIVER_H */
``` | /content/code_sandbox/modules/canopennode/CO_driver_target.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 777 |
```unknown
config ZEPHYR_CMSIS_DSP_MODULE
bool
menuconfig CMSIS_DSP
bool "CMSIS-DSP Library Support"
help
This option enables the CMSIS-DSP library.
if CMSIS_DSP
comment "Components"
config CMSIS_DSP_BASICMATH
bool "Basic Math Functions"
help
This option enables the Basic Math Functions, which support the
following operations:
* Elementwise Clipping
* Vector Absolute Value
* Vector Addition
* Vector Subtraction
* Vector Multiplication
* Vector Dot Product
* Vector Absolute Value
* Vector Negate
* Vector Offset
* Vector Scale
* Vector Shift
* Vector Bitwise AND
* Vector Bitwise OR
* Vector Bitwise Exclusive OR
* Vector Bitwise NOT
config CMSIS_DSP_COMPLEXMATH
bool "Complex Math Functions"
imply CMSIS_DSP_FASTMATH
help
This option enables the Complex Math Functions, which support the
following operations:
* Complex-by-Complex Multiplication
* Complex-by-Real Multiplication
* Complex Dot Product
* Complex Magnitude
* Complex Magnitude Squared
* Complex Conjugate
config CMSIS_DSP_CONTROLLER
bool "Controller Functions"
help
This option enables the Controller Functions, which support the
following operations:
* PID Control
* Vector Clarke Transform
* Vector Inverse Clarke Transform
* Vector Park Transform
* Vector Inverse Park Transform
* Sine-Cosine
These functions can be used to implement a generic PID controller, as
well as field oriented motor control using Space Vector Modulation
algorithm.
config CMSIS_DSP_FASTMATH
bool "Fast Math Functions"
imply CMSIS_DSP_BASICMATH
help
This option enables the Fast Math Functions, which support the
following operations:
* Fixed-Point Division
* Sine
* Cosine
* Square Root
config CMSIS_DSP_FILTERING
bool "Filtering Functions"
imply CMSIS_DSP_BASICMATH
imply CMSIS_DSP_FASTMATH
imply CMSIS_DSP_SUPPORT
help
This option enables the Filtering Functions, which support the
following operations:
* Convolution
* Partial Convolution
* Correlation
* Levinson-Durbin Algorithm
The following filter types are supported:
* FIR (finite impulse response) Filter
* FIR Lattice Filter
* FIR Sparse Filter
* FIR Filter with Decimator
* FIR Filter with Interpolator
* IIR (infinite impulse response) Lattice Filter
* Biquad Cascade IIR Filter, Direct Form I Structure
* Biquad Cascade IIR Filter, Direct Form II Transposed Structure
* High Precision Q31 Biquad Cascade Filter
* LMS (least mean square) Filter
* Normalized LMS Filter
config CMSIS_DSP_INTERPOLATION
bool "Interpolation Functions"
help
This option enables the Interpolation Functions, which support the
following operations:
* Bilinear Interpolation
* Linear Interpolation
* Cubic Spline Interpolation
config CMSIS_DSP_MATRIX
bool "Matrix Functions"
help
This option enables the Matrix Functions, which support the following
operations:
* Matrix Initialization
* Matrix Addition
* Matrix Subtraction
* Matrix Multiplication
* Complex Matrix Multiplication
* Matrix Vector Multiplication
* Matrix Inverse
* Matrix Scale
* Matrix Transpose
* Complex Matrix Transpose
* Cholesky and LDLT Decompositions
config CMSIS_DSP_QUATERNIONMATH
bool "Quaternion Math Functions"
help
This option enables the Quaternion Math Functions, which support the
following operations:
* Quaternion Conversions
* Quaternion Conjugate
* Quaternion Inverse
* Quaternion Norm
* Quaternion Normalization
* Quaternion Product
config CMSIS_DSP_STATISTICS
bool "Statistics Functions"
imply CMSIS_DSP_BASICMATH
imply CMSIS_DSP_FASTMATH
help
This option enables the Statistics Functions, which support the
following operations:
* Minimum
* Absolute Minimum
* Maximum
* Absolute Maximum
* Mean
* Root Mean Square (RMS)
* Variance
* Standard Deviation
* Power
* Entropy
* Kullback-Leibler Divergence
* LogSumExp (LSE)
config CMSIS_DSP_SUPPORT
bool "Support Functions"
help
This option enables the Support Functions, which support the
following operations:
* Vector 8-bit Integer Value Conversion
* Vector 16-bit Integer Value Conversion
* Vector 32-bit Integer Value Conversion
* Vector 16-bit Floating-Point Value Conversion
* Vector 32-bit Floating-Point Value Conversion
* Vector Copy
* Vector Fill
* Vector Sorting
* Weighted Sum
* Barycenter
config CMSIS_DSP_TRANSFORM
bool "Transform Functions"
imply CMSIS_DSP_BASICMATH
help
This option enables the Transform Functions, which support the
following transformations:
* Real Fast Fourier Transform (RFFT)
* Complex Fast Fourier Transform (CFFT)
* Type IV Discrete Cosine Transform (DCT4)
config CMSIS_DSP_SVM
bool "Support Vector Machine Functions"
help
This option enables the Support Vector Machine Functions, which
support the following algorithms:
* Linear
* Polynomial
* Sigmoid
* Radial Basis Function (RBF)
config CMSIS_DSP_BAYES
bool "Bayesian Estimators"
imply CMSIS_DSP_STATISTICS
help
This option enables the Bayesian Estimator Functions, which
implements the naive gaussian Bayes estimator.
config CMSIS_DSP_DISTANCE
bool "Distance Functions"
imply CMSIS_DSP_STATISTICS
help
This option enables the Distance Functions, which support the
following distance computation algorithms:
* Boolean Vectors
* Hamming
* Jaccard
* Kulsinski
* Rogers-Tanimoto
* Russell-Rao
* Sokal-Michener
* Sokal-Sneath
* Yule
* Dice
* Floating-Point Vectors
* Canberra
* Chebyshev
* Cityblock
* Correlation
* Cosine
* Euclidean
* Jensen-Shannon
* Minkowski
* Bray-Curtis
config CMSIS_DSP_WINDOW
bool "Windowing Functions"
help
This option enabled the Window Functions, which support the
following windowing functions:
* Bartlett
* Hamming
* Hanning
* Nuttall
* Blackman Harris
* HFT
comment "Instruction Set"
# NOTE: These configurations should eventually be derived from the arch ISA and
# FP support configurations.
config CMSIS_DSP_NEON
bool "Neon Instruction Set"
default y
depends on CPU_CORTEX_A
help
This option enables the NEON Advanced SIMD instruction set, which is
available on most Cortex-A and some Cortex-R processors.
config CMSIS_DSP_NEON_EXPERIMENTAL
bool "Neon Instruction Set"
depends on CPU_CORTEX_A
help
This option enables the NEON Advanced SIMD instruction set, which is
available on most Cortex-A and some Cortex-R processors.
config CMSIS_DSP_HELIUM_EXPERIMENTAL
bool "Helium Instruction Set"
depends on FP_HARDABI && (ARMV8_1_M_MVEI || ARMV8_1_M_MVEF)
help
This option enables the Helium Advanced SIMD instruction set, which is
available on some Cortex-M processors.
comment "Features"
config CMSIS_DSP_LOOPUNROLL
bool "Loop Unrolling"
help
This option enables manual loop unrolling in the DSP functions.
config CMSIS_DSP_ROUNDING
bool "Rounding"
help
This option enables rounding on the support functions.
config CMSIS_DSP_MATRIXCHECK
bool "Matrix Check"
help
This option enables validation of the input and output sizes of
matrices.
config CMSIS_DSP_AUTOVECTORIZE
bool "Auto Vectorize"
help
This option prefers autovectorizable code to one using C intrinsics
in the DSP functions.
config CMSIS_DSP_FLOAT16
bool "Half-Precision (16-bit Float) Support"
default y
depends on FP16
help
This option enables the half-precision (16-bit) floating-point
operations support.
config CMSIS_DSP_LAX_VECTOR_CONVERSIONS
bool "Lax Vector Conversions"
default y
depends on FP_HARDABI && (ARMV8_1_M_MVEI || ARMV8_1_M_MVEF)
help
This option enables lax vector conversions
endif #CMSIS_DSP
``` | /content/code_sandbox/modules/cmsis-dsp/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,933 |
```c
/*
*
*/
#include <CANopen.h>
/**
* @brief CANopen sync thread.
*
* The CANopen real-time sync thread processes SYNC RPDOs and TPDOs
* through the CANopenNode stack with an interval of 1 millisecond.
*
* @param p1 Unused
* @param p2 Unused
* @param p3 Unused
*/
static void canopen_sync_thread(void *p1, void *p2, void *p3)
{
uint32_t start; /* cycles */
uint32_t stop; /* cycles */
uint32_t delta; /* cycles */
uint32_t elapsed = 0; /* microseconds */
bool sync;
ARG_UNUSED(p1);
ARG_UNUSED(p2);
ARG_UNUSED(p3);
while (true) {
start = k_cycle_get_32();
if (CO && CO->CANmodule[0] && CO->CANmodule[0]->CANnormal) {
CO_LOCK_OD();
sync = CO_process_SYNC(CO, elapsed);
CO_process_RPDO(CO, sync);
CO_process_TPDO(CO, sync, elapsed);
CO_UNLOCK_OD();
}
k_sleep(K_MSEC(1));
stop = k_cycle_get_32();
delta = stop - start;
elapsed = (uint32_t)k_cyc_to_ns_floor64(delta) / NSEC_PER_USEC;
}
}
K_THREAD_DEFINE(canopen_sync, CONFIG_CANOPENNODE_SYNC_THREAD_STACK_SIZE,
canopen_sync_thread, NULL, NULL, NULL,
CONFIG_CANOPENNODE_SYNC_THREAD_PRIORITY, 0, 1);
``` | /content/code_sandbox/modules/canopennode/canopen_sync.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 342 |
```c
/*
*
*/
#include <zephyr/settings/settings.h>
#include <CANopen.h>
#include <CO_Emergency.h>
#include <CO_SDO.h>
#include <canopennode.h>
#define LOG_LEVEL CONFIG_CANOPEN_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(canopen_storage);
/* 's', 'a', 'v', 'e' from LSB to MSB */
#define STORE_PARAM_MAGIC 0x65766173UL
/* 'l', 'o', 'a', 'd' from LSB to MSB */
#define RESTORE_PARAM_MAGIC 0x64616F6CUL
/* Variables for reporting errors through CANopen once the stack is up */
static int canopen_storage_rom_error;
static int canopen_storage_eeprom_error;
static CO_SDO_abortCode_t canopen_odf_1010(CO_ODF_arg_t *odf_arg)
{
CO_EM_t *em = odf_arg->object;
uint32_t value;
int err;
value = CO_getUint32(odf_arg->data);
if (odf_arg->reading) {
return CO_SDO_AB_NONE;
}
/* Preserve old value */
memcpy(odf_arg->data, odf_arg->ODdataStorage, sizeof(uint32_t));
if (odf_arg->subIndex != 1U) {
return CO_SDO_AB_NONE;
}
if (value != STORE_PARAM_MAGIC) {
return CO_SDO_AB_DATA_TRANSF;
}
err = canopen_storage_save(CANOPEN_STORAGE_ROM);
if (err) {
LOG_ERR("failed to save object dictionary ROM entries (err %d)",
err);
CO_errorReport(em, CO_EM_NON_VOLATILE_MEMORY, CO_EMC_HARDWARE,
err);
return CO_SDO_AB_HW;
} else {
LOG_DBG("saved object dictionary ROM entries");
}
return CO_SDO_AB_NONE;
}
static CO_SDO_abortCode_t canopen_odf_1011(CO_ODF_arg_t *odf_arg)
{
CO_EM_t *em = odf_arg->object;
bool failed = false;
uint32_t value;
int err;
value = CO_getUint32(odf_arg->data);
if (odf_arg->reading) {
return CO_SDO_AB_NONE;
}
/* Preserve old value */
memcpy(odf_arg->data, odf_arg->ODdataStorage, sizeof(uint32_t));
if (odf_arg->subIndex < 1U) {
return CO_SDO_AB_NONE;
}
if (value != RESTORE_PARAM_MAGIC) {
return CO_SDO_AB_DATA_TRANSF;
}
err = canopen_storage_erase(CANOPEN_STORAGE_ROM);
if (err == -ENOENT) {
LOG_DBG("no object dictionary ROM entries to delete");
} else if (err) {
LOG_ERR("failed to delete object dictionary ROM entries"
" (err %d)", err);
CO_errorReport(em, CO_EM_NON_VOLATILE_MEMORY, CO_EMC_HARDWARE,
err);
failed = true;
} else {
LOG_DBG("deleted object dictionary ROM entries");
}
#ifdef CONFIG_CANOPENNODE_STORAGE_HANDLER_ERASES_EEPROM
err = canopen_storage_erase(CANOPEN_STORAGE_EEPROM);
if (err == -ENOENT) {
LOG_DBG("no object dictionary EEPROM entries to delete");
} else if (err) {
LOG_ERR("failed to delete object dictionary EEPROM entries"
" (err %d)", err);
CO_errorReport(em, CO_EM_NON_VOLATILE_MEMORY, CO_EMC_HARDWARE,
err);
failed = true;
} else {
LOG_DBG("deleted object dictionary EEPROM entries");
}
#endif
if (failed) {
return CO_SDO_AB_HW;
}
return CO_SDO_AB_NONE;
}
static int canopen_settings_set(const char *key, size_t len_rd,
settings_read_cb read_cb, void *cb_arg)
{
const char *next;
int nlen;
ssize_t len;
nlen = settings_name_next(key, &next);
if (!strncmp(key, "eeprom", nlen)) {
struct sCO_OD_EEPROM eeprom;
len = read_cb(cb_arg, &eeprom, sizeof(eeprom));
if (len < 0) {
LOG_ERR("failed to restore object dictionary EEPROM"
" entries (err %zu)", len);
canopen_storage_eeprom_error = len;
} else {
if ((eeprom.FirstWord == CO_OD_FIRST_LAST_WORD) &&
(eeprom.LastWord == CO_OD_FIRST_LAST_WORD)) {
memcpy(&CO_OD_EEPROM, &eeprom,
sizeof(CO_OD_EEPROM));
LOG_DBG("restored object dictionary EEPROM"
" entries");
} else {
LOG_WRN("object dictionary EEPROM entries"
" signature mismatch, skipping"
" restore");
}
}
return 0;
} else if (!strncmp(key, "rom", nlen)) {
struct sCO_OD_ROM rom;
len = read_cb(cb_arg, &rom, sizeof(rom));
if (len < 0) {
LOG_ERR("failed to restore object dictionary ROM"
" entries (err %zu)", len);
canopen_storage_rom_error = len;
} else {
if ((rom.FirstWord == CO_OD_FIRST_LAST_WORD) &&
(rom.LastWord == CO_OD_FIRST_LAST_WORD)) {
memcpy(&CO_OD_ROM, &rom, sizeof(CO_OD_ROM));
LOG_DBG("restored object dictionary ROM"
" entries");
} else {
LOG_WRN("object dictionary ROM entries"
" signature mismatch, skipping"
" restore");
}
}
return 0;
}
return 0;
}
SETTINGS_STATIC_HANDLER_DEFINE(canopen, "canopen", NULL,
canopen_settings_set, NULL, NULL);
void canopen_storage_attach(CO_SDO_t *sdo, CO_EM_t *em)
{
CO_OD_configure(sdo, OD_H1010_STORE_PARAM_FUNC, canopen_odf_1010,
em, 0U, 0U);
CO_OD_configure(sdo, OD_H1011_REST_PARAM_FUNC, canopen_odf_1011,
em, 0U, 0U);
if (canopen_storage_eeprom_error) {
CO_errorReport(em, CO_EM_NON_VOLATILE_MEMORY, CO_EMC_HARDWARE,
canopen_storage_eeprom_error);
}
if (canopen_storage_rom_error) {
CO_errorReport(em, CO_EM_NON_VOLATILE_MEMORY, CO_EMC_HARDWARE,
canopen_storage_rom_error);
}
}
int canopen_storage_save(enum canopen_storage storage)
{
int ret = 0;
if (storage == CANOPEN_STORAGE_ROM) {
ret = settings_save_one("canopen/rom", &CO_OD_ROM,
sizeof(CO_OD_ROM));
} else if (storage == CANOPEN_STORAGE_EEPROM) {
ret = settings_save_one("canopen/eeprom", &CO_OD_EEPROM,
sizeof(CO_OD_EEPROM));
}
return ret;
}
int canopen_storage_erase(enum canopen_storage storage)
{
int ret = 0;
if (storage == CANOPEN_STORAGE_ROM) {
ret = settings_delete("canopen/rom");
} else if (storage == CANOPEN_STORAGE_EEPROM) {
ret = settings_delete("canopen/eeprom");
}
return ret;
}
``` | /content/code_sandbox/modules/canopennode/canopen_storage.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,606 |
```c
/*
*
*/
#include <CANopen.h>
#include <canopennode.h>
#include <zephyr/dfu/flash_img.h>
#include <zephyr/dfu/mcuboot.h>
#include <zephyr/storage/flash_map.h>
#include <zephyr/sys/crc.h>
#define LOG_LEVEL CONFIG_CANOPEN_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(canopen_program);
/* Object dictionary indexes */
#define OD_H1F50_PROGRAM_DATA 0x1F50
#define OD_H1F51_PROGRAM_CTRL 0x1F51
#define OD_H1F56_PROGRAM_SWID 0x1F56
#define OD_H1F57_FLASH_STATUS 0x1F57
/* Common program control commands and status */
#define PROGRAM_CTRL_STOP 0x00
#define PROGRAM_CTRL_START 0x01
#define PROGRAM_CTRL_RESET 0x02
#define PROGRAM_CTRL_CLEAR 0x03
/* Zephyr specific program control and status */
#define PROGRAM_CTRL_ZEPHYR_CONFIRM 0x80
/* Flash status bits */
#define FLASH_STATUS_IN_PROGRESS BIT(0)
/* Flash common error bits values */
#define FLASH_STATUS_NO_ERROR (0U << 1U)
#define FLASH_STATUS_NO_VALID_PROGRAM (1U << 1U)
#define FLASH_STATUS_DATA_FORMAT_UNKNOWN (2U << 1U)
#define FLASH_STATUS_DATA_FORMAT_ERROR (3U << 1U)
#define FLASH_STATUS_FLASH_NOT_CLEARED (4U << 1U)
#define FLASH_STATUS_FLASH_WRITE_ERROR (5U << 1U)
#define FLASH_STATUS_GENERAL_ADDR_ERROR (6U << 1U)
#define FLASH_STATUS_FLASH_SECURED (7U << 1U)
#define FLASH_STATUS_UNSPECIFIED_ERROR (63U << 1)
struct canopen_program_context {
uint32_t flash_status;
size_t total;
CO_NMT_t *nmt;
CO_EM_t *em;
struct flash_img_context flash_img_ctx;
uint8_t program_status;
bool flash_written;
};
static struct canopen_program_context ctx;
static void canopen_program_set_status(uint32_t status)
{
ctx.program_status = status;
}
static uint32_t canopen_program_get_status(void)
{
/*
* Non-confirmed boot image takes precedence over other
* status. This must be checked on every invocation since the
* app may be using other means of confirming the image.
*/
if (!boot_is_img_confirmed()) {
return PROGRAM_CTRL_ZEPHYR_CONFIRM;
}
return ctx.program_status;
}
static CO_SDO_abortCode_t canopen_odf_1f50(CO_ODF_arg_t *odf_arg)
{
int err;
if (odf_arg->subIndex != 1U) {
return CO_SDO_AB_NONE;
}
if (odf_arg->reading) {
return CO_SDO_AB_WRITEONLY;
}
if (canopen_program_get_status() != PROGRAM_CTRL_CLEAR) {
ctx.flash_status = FLASH_STATUS_FLASH_NOT_CLEARED;
return CO_SDO_AB_DATA_DEV_STATE;
}
if (odf_arg->firstSegment) {
err = flash_img_init(&ctx.flash_img_ctx);
if (err) {
LOG_ERR("failed to initialize flash img (err %d)", err);
CO_errorReport(ctx.em, CO_EM_NON_VOLATILE_MEMORY,
CO_EMC_HARDWARE, err);
ctx.flash_status = FLASH_STATUS_FLASH_WRITE_ERROR;
return CO_SDO_AB_HW;
}
ctx.flash_status = FLASH_STATUS_IN_PROGRESS;
if (IS_ENABLED(CONFIG_CANOPENNODE_LEDS)) {
canopen_leds_program_download(true);
}
ctx.total = odf_arg->dataLengthTotal;
LOG_DBG("total = %d", ctx.total);
}
err = flash_img_buffered_write(&ctx.flash_img_ctx, odf_arg->data,
odf_arg->dataLength,
odf_arg->lastSegment);
if (err) {
CO_errorReport(ctx.em, CO_EM_NON_VOLATILE_MEMORY,
CO_EMC_HARDWARE, err);
ctx.flash_status = FLASH_STATUS_FLASH_WRITE_ERROR;
canopen_leds_program_download(false);
return CO_SDO_AB_HW;
}
if (odf_arg->lastSegment) {
/* ctx.total is zero if not provided by download process */
if (ctx.total != 0 &&
ctx.total != flash_img_bytes_written(&ctx.flash_img_ctx)) {
LOG_WRN("premature end of program download");
ctx.flash_status = FLASH_STATUS_DATA_FORMAT_ERROR;
} else {
LOG_DBG("program downloaded");
ctx.flash_written = true;
ctx.flash_status = FLASH_STATUS_NO_ERROR;
}
canopen_program_set_status(PROGRAM_CTRL_STOP);
canopen_leds_program_download(false);
}
return CO_SDO_AB_NONE;
}
static inline CO_SDO_abortCode_t canopen_program_cmd_stop(void)
{
if (canopen_program_get_status() == PROGRAM_CTRL_ZEPHYR_CONFIRM) {
return CO_SDO_AB_DATA_DEV_STATE;
}
LOG_DBG("program stopped");
canopen_program_set_status(PROGRAM_CTRL_STOP);
return CO_SDO_AB_NONE;
}
static inline CO_SDO_abortCode_t canopen_program_cmd_start(void)
{
int err;
if (canopen_program_get_status() == PROGRAM_CTRL_ZEPHYR_CONFIRM) {
return CO_SDO_AB_DATA_DEV_STATE;
}
if (ctx.flash_written) {
LOG_DBG("requesting upgrade and reset");
err = boot_request_upgrade(BOOT_UPGRADE_TEST);
if (err) {
LOG_ERR("failed to request upgrade (err %d)", err);
CO_errorReport(ctx.em, CO_EM_NON_VOLATILE_MEMORY,
CO_EMC_HARDWARE, err);
return CO_SDO_AB_HW;
}
ctx.nmt->resetCommand = CO_RESET_APP;
} else {
LOG_DBG("program started");
canopen_program_set_status(PROGRAM_CTRL_START);
}
return CO_SDO_AB_NONE;
}
static inline CO_SDO_abortCode_t canopen_program_cmd_clear(void)
{
int err;
if (canopen_program_get_status() != PROGRAM_CTRL_STOP) {
return CO_SDO_AB_DATA_DEV_STATE;
}
if (!IS_ENABLED(CONFIG_IMG_ERASE_PROGRESSIVELY)) {
LOG_DBG("erasing flash area");
err = boot_erase_img_bank(FIXED_PARTITION_ID(slot1_partition));
if (err) {
LOG_ERR("failed to erase image bank (err %d)", err);
CO_errorReport(ctx.em, CO_EM_NON_VOLATILE_MEMORY,
CO_EMC_HARDWARE, err);
return CO_SDO_AB_HW;
}
}
LOG_DBG("program cleared");
canopen_program_set_status(PROGRAM_CTRL_CLEAR);
ctx.flash_status = FLASH_STATUS_NO_ERROR;
ctx.flash_written = false;
return CO_SDO_AB_NONE;
}
static inline CO_SDO_abortCode_t canopen_program_cmd_confirm(void)
{
int err;
if (canopen_program_get_status() == PROGRAM_CTRL_ZEPHYR_CONFIRM) {
err = boot_write_img_confirmed();
if (err) {
LOG_ERR("failed to confirm image (err %d)", err);
CO_errorReport(ctx.em, CO_EM_NON_VOLATILE_MEMORY,
CO_EMC_HARDWARE, err);
return CO_SDO_AB_HW;
}
LOG_DBG("program confirmed");
canopen_program_set_status(PROGRAM_CTRL_START);
}
return CO_SDO_AB_NONE;
}
static CO_SDO_abortCode_t canopen_odf_1f51(CO_ODF_arg_t *odf_arg)
{
CO_SDO_abortCode_t ab;
uint8_t cmd;
if (odf_arg->subIndex != 1U) {
return CO_SDO_AB_NONE;
}
if (odf_arg->reading) {
odf_arg->data[0] = canopen_program_get_status();
return CO_SDO_AB_NONE;
}
if (CO_NMT_getInternalState(ctx.nmt) != CO_NMT_PRE_OPERATIONAL) {
LOG_DBG("not in pre-operational state");
return CO_SDO_AB_DATA_DEV_STATE;
}
/* Preserve old value */
cmd = odf_arg->data[0];
memcpy(odf_arg->data, odf_arg->ODdataStorage, sizeof(uint8_t));
LOG_DBG("program status = %d, cmd = %d", canopen_program_get_status(),
cmd);
switch (cmd) {
case PROGRAM_CTRL_STOP:
ab = canopen_program_cmd_stop();
break;
case PROGRAM_CTRL_START:
ab = canopen_program_cmd_start();
break;
case PROGRAM_CTRL_CLEAR:
ab = canopen_program_cmd_clear();
break;
case PROGRAM_CTRL_ZEPHYR_CONFIRM:
ab = canopen_program_cmd_confirm();
break;
case PROGRAM_CTRL_RESET:
__fallthrough;
default:
LOG_DBG("unsupported command '%d'", cmd);
ab = CO_SDO_AB_INVALID_VALUE;
}
return ab;
}
#ifdef CONFIG_BOOTLOADER_MCUBOOT
/** @brief Calculate crc for region in flash
*
* @param flash_area Flash area to read from, must be open
* @offset Offset to read from
* @size Number of bytes to include in calculation
* @pcrc Pointer to uint32_t where crc will be written if return value is 0
*
* @return 0 if successful, negative errno on failure
*/
static int flash_crc(const struct flash_area *flash_area,
off_t offset, size_t size, uint32_t *pcrc)
{
uint32_t crc = 0;
uint8_t buffer[32];
while (size > 0) {
size_t len = MIN(size, sizeof(buffer));
int err = flash_area_read(flash_area, offset, buffer, len);
if (err) {
return err;
}
crc = crc32_ieee_update(crc, buffer, len);
offset += len;
size -= len;
}
*pcrc = crc;
return 0;
}
static CO_SDO_abortCode_t canopen_odf_1f56(CO_ODF_arg_t *odf_arg)
{
const struct flash_area *flash_area;
struct mcuboot_img_header header;
off_t offset = 0;
uint32_t crc = 0;
uint8_t fa_id;
uint32_t len;
int err;
if (odf_arg->subIndex != 1U) {
return CO_SDO_AB_NONE;
}
if (!odf_arg->reading) {
/* Preserve old value */
memcpy(odf_arg->data, odf_arg->ODdataStorage, sizeof(uint32_t));
return CO_SDO_AB_READONLY;
}
/* Reading from flash and calculating crc can take 100ms or more, and
* this function is called with the can od lock taken.
*
* Release the lock before performing time consuming work, and reacquire
* before return.
*/
CO_UNLOCK_OD();
/*
* Calculate the CRC32 of the image that is running or will be
* started upon receiveing the next 'start' command.
*/
if (ctx.flash_written) {
fa_id = FIXED_PARTITION_ID(slot1_partition);
} else {
fa_id = FIXED_PARTITION_ID(slot0_partition);
}
err = boot_read_bank_header(fa_id, &header, sizeof(header));
if (err) {
LOG_WRN("failed to read bank header (err %d)", err);
CO_setUint32(odf_arg->data, 0U);
CO_LOCK_OD();
return CO_SDO_AB_NONE;
}
if (header.mcuboot_version != 1) {
LOG_WRN("unsupported mcuboot header version %d",
header.mcuboot_version);
CO_setUint32(odf_arg->data, 0U);
CO_LOCK_OD();
return CO_SDO_AB_NONE;
}
len = header.h.v1.image_size;
err = flash_area_open(fa_id, &flash_area);
if (err) {
LOG_ERR("failed to open flash area (err %d)", err);
CO_errorReport(ctx.em, CO_EM_NON_VOLATILE_MEMORY,
CO_EMC_HARDWARE, err);
CO_LOCK_OD();
return CO_SDO_AB_HW;
}
err = flash_crc(flash_area, offset, len, &crc);
flash_area_close(flash_area);
if (err) {
LOG_ERR("failed to read flash (err %d)", err);
CO_errorReport(ctx.em, CO_EM_NON_VOLATILE_MEMORY,
CO_EMC_HARDWARE, err);
CO_LOCK_OD();
return CO_SDO_AB_HW;
}
CO_setUint32(odf_arg->data, crc);
CO_LOCK_OD();
return CO_SDO_AB_NONE;
}
#endif /* CONFIG_BOOTLOADER_MCUBOOT */
static CO_SDO_abortCode_t canopen_odf_1f57(CO_ODF_arg_t *odf_arg)
{
if (odf_arg->subIndex != 1U) {
return CO_SDO_AB_NONE;
}
if (!odf_arg->reading) {
/* Preserve old value */
memcpy(odf_arg->data, odf_arg->ODdataStorage, sizeof(uint32_t));
return CO_SDO_AB_READONLY;
}
CO_setUint32(odf_arg->data, ctx.flash_status);
return CO_SDO_AB_NONE;
}
void canopen_program_download_attach(CO_NMT_t *nmt, CO_SDO_t *sdo, CO_EM_t *em)
{
canopen_program_set_status(PROGRAM_CTRL_START);
ctx.flash_status = FLASH_STATUS_NO_ERROR;
ctx.flash_written = false;
ctx.nmt = nmt;
ctx.em = em;
CO_OD_configure(sdo, OD_H1F50_PROGRAM_DATA, canopen_odf_1f50,
NULL, 0U, 0U);
CO_OD_configure(sdo, OD_H1F51_PROGRAM_CTRL, canopen_odf_1f51,
NULL, 0U, 0U);
if (IS_ENABLED(CONFIG_BOOTLOADER_MCUBOOT)) {
CO_OD_configure(sdo, OD_H1F56_PROGRAM_SWID, canopen_odf_1f56,
NULL, 0U, 0U);
}
CO_OD_configure(sdo, OD_H1F57_FLASH_STATUS, canopen_odf_1f57,
NULL, 0U, 0U);
}
``` | /content/code_sandbox/modules/canopennode/canopen_program.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,110 |
```unknown
# CANopenNode CANopen protocol stack configuration options
config ZEPHYR_CANOPENNODE_MODULE
bool
config CANOPENNODE
bool "CANopenNode support"
select CRC
depends on CAN && !CAN_FD_MODE
help
This option enables the CANopenNode library.
if CANOPENNODE
config CANOPENNODE_SDO_BUFFER_SIZE
int "CANopen SDO buffer size"
default 32
range 7 889
help
Size of the internal CANopen SDO buffer in bytes. Size must
be at least equal to the size of the largest variable in the
object dictionary. If data type is DOMAIN, data length is
not limited to the SDO buffer size. If block transfer is
implemented, value should be set to 889.
config CANOPENNODE_TRACE_BUFFER_SIZE
int "CANopen trace buffer size"
default 100
help
Size of the CANopen trace buffer in bytes.
config CANOPENNODE_TX_WORKQUEUE_STACK_SIZE
int "Stack size for the CANopen transmit workqueue"
default 512
help
Size of the stack used for the internal CANopen transmit
workqueue.
config CANOPENNODE_TX_WORKQUEUE_PRIORITY
int "Priority for CANopen transmit workqueue"
default 0 if !COOP_ENABLED
default -1
help
Priority level of the internal CANopen transmit workqueue.
config CANOPENNODE_STORAGE
bool "CANopen object dictionary storage"
depends on SETTINGS
default y
help
Enable support for storing the CANopen object dictionary to
non-volatile storage.
config CANOPENNODE_STORAGE_HANDLER_ERASES_EEPROM
bool "Erase CANopen object dictionary EEPROM entries in storage handler"
depends on CANOPENNODE_STORAGE
help
Erase CANopen object dictionary EEPROM entries upon write to
object dictionary index 0x1011 subindex 1.
config CANOPENNODE_LEDS
bool "CANopen LED indicators"
default y
help
Enable support for CANopen LED indicators according to the CiA
303-3 specification.
config CANOPENNODE_LEDS_BICOLOR
bool "CANopen bicolor LED indicator"
depends on CANOPENNODE_LEDS
help
Handle CANopen LEDs as one bicolor LED, favoring the red LED
over the green LED in accordance with the CiA 303-3
specification.
config CANOPENNODE_SYNC_THREAD
bool "CANopen SYNC thread"
default y
help
Enable internal thread for processing CANopen SYNC RPDOs and
TPDOs. Application layer must take care of SYNC RPDO and
TPDO processing on its own if this is disabled.
config CANOPENNODE_SYNC_THREAD_STACK_SIZE
int "Stack size for the CANopen SYNC thread"
depends on CANOPENNODE_SYNC_THREAD
default 512
help
Size of the stack used for the internal thread which
processes CANopen SYNC RPDOs and TPDOs.
config CANOPENNODE_SYNC_THREAD_PRIORITY
int "Priority for CANopen SYNC thread"
depends on CANOPENNODE_SYNC_THREAD
default 0 if !COOP_ENABLED
default -5
help
Priority level of the internal thread which processes
CANopen SYNC RPDOs and TPDOs.
config CANOPENNODE_PROGRAM_DOWNLOAD
bool "CANopen program download"
depends on BOOTLOADER_MCUBOOT
select IMG_MANAGER
default y
help
Enable support for program download over CANopen according
to the CiA 302-3 (draft) specification.
endif # CANOPENNODE
``` | /content/code_sandbox/modules/canopennode/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 750 |
```c
/*
*
*/
#include <CANopen.h>
#include <canopennode.h>
struct canopen_leds_state {
CO_NMT_t *nmt;
canopen_led_callback_t green_cb;
void *green_arg;
canopen_led_callback_t red_cb;
void *red_arg;
bool green : 1;
bool red : 1;
bool program_download : 1;
};
static struct canopen_leds_state canopen_leds;
static void canopen_leds_update(struct k_timer *timer_id)
{
bool green = false;
bool red = false;
ARG_UNUSED(timer_id);
CO_NMT_blinkingProcess50ms(canopen_leds.nmt);
if (canopen_leds.program_download) {
green = LED_TRIPLE_FLASH(canopen_leds.nmt);
} else {
green = LED_GREEN_RUN(canopen_leds.nmt);
}
red = LED_RED_ERROR(canopen_leds.nmt);
#ifdef CONFIG_CANOPENNODE_LEDS_BICOLOR
if (red && canopen_leds.red_cb) {
green = false;
}
#endif
if (canopen_leds.green_cb) {
if (green != canopen_leds.green) {
canopen_leds.green_cb(green, canopen_leds.green_arg);
canopen_leds.green = green;
}
}
if (canopen_leds.red_cb) {
if (red != canopen_leds.red) {
canopen_leds.red_cb(red, canopen_leds.red_arg);
canopen_leds.red = red;
}
}
}
K_TIMER_DEFINE(canopen_leds_timer, canopen_leds_update, NULL);
void canopen_leds_init(CO_NMT_t *nmt,
canopen_led_callback_t green_cb, void *green_arg,
canopen_led_callback_t red_cb, void *red_arg)
{
k_timer_stop(&canopen_leds_timer);
canopen_leds.nmt = nmt;
/* Call existing callbacks to turn off LEDs */
if (canopen_leds.green_cb) {
canopen_leds.green_cb(false, canopen_leds.green_arg);
}
if (canopen_leds.red_cb) {
canopen_leds.red_cb(false, canopen_leds.red_arg);
}
canopen_leds.green_cb = green_cb;
canopen_leds.green_arg = green_arg;
canopen_leds.green = false;
canopen_leds.red_cb = red_cb;
canopen_leds.red_arg = red_arg;
canopen_leds.red = false;
/* Call new callbacks to turn off LEDs */
if (canopen_leds.green_cb) {
canopen_leds.green_cb(false, canopen_leds.green_arg);
}
if (canopen_leds.red_cb) {
canopen_leds.red_cb(false, canopen_leds.red_arg);
}
if (nmt && (green_cb || red_cb)) {
k_timer_start(&canopen_leds_timer, K_MSEC(50), K_MSEC(50));
}
}
void canopen_leds_program_download(bool in_progress)
{
canopen_leds.program_download = in_progress;
}
``` | /content/code_sandbox/modules/canopennode/canopen_leds.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 667 |
```objective-c
/*
*
*/
/**
* @defgroup CAN CAN BUS
* @{
* @}
*/
/**
* @brief CANopen Network Stack
* @defgroup canopen CANopen Network Stack
* @ingroup CAN
* @{
*/
#ifndef ZEPHYR_MODULES_CANOPENNODE_CANOPENNODE_H_
#define ZEPHYR_MODULES_CANOPENNODE_CANOPENNODE_H_
#include <CANopen.h>
#include <CO_Emergency.h>
#include <CO_SDO.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief CANopen object dictionary storage types.
*/
enum canopen_storage {
CANOPEN_STORAGE_RAM,
CANOPEN_STORAGE_ROM,
CANOPEN_STORAGE_EEPROM,
};
struct canopen_context {
const struct device *dev;
};
/**
* @brief Attach CANopen object dictionary storage handlers.
*
* Attach CANopen storage handler functions to object dictionary
* indexes 0x1010 (Store parameters) and 0x1011 (Restore default
* parameters). This function must be called after calling CANopenNode
* `CO_init()`.
*
* The handlers will save object dictionary entries of type @ref
* CANOPEN_STORAGE_ROM to non-volatile storage when a CANopen SDO
* client writes 0x65766173 ('s', 'a', 'v', 'e' from LSB to MSB) to
* object dictionary index 0x1010 sub-index 1.
*
* Object dictionary entries of types @ref CANOPEN_STORAGE_ROM (and
* optionally @ref CANOPEN_STORAGE_EEPROM) will be deleted from
* non-volatile storage when a CANopen SDO client writes 0x64616F6C
* ('l', 'o', 'a', 'd' from LSB to MSB) to object dictionary index
* 0x1011 sub-index 1.
*
* Object dictionary entries of type @ref CANOPEN_STORAGE_EEPROM may be
* saved by the application by periodically calling @ref
* canopen_storage_save().
*
* Object dictionary entries of type @ref CANOPEN_STORAGE_RAM are
* never saved to non-volatile storage.
*
* @param sdo CANopenNode SDO server object
* @param em CANopenNode Emergency object
*/
void canopen_storage_attach(CO_SDO_t *sdo, CO_EM_t *em);
/**
* @brief Save CANopen object dictionary entries to non-volatile storage.
*
* Save object dictionary entries of a given type to non-volatile
* storage.
*
* @param storage CANopen object dictionary entry type
*
* @return 0 if successful, negative errno code if failure
*/
int canopen_storage_save(enum canopen_storage storage);
/**
* @brief Erase CANopen object dictionary entries from non-volatile storage.
*
* Erase object dictionary entries of a given type from non-volatile
* storage.
*
* @param storage CANopen object dictionary entry type
*
* @return 0 if successful, negative errno code if failure
*/
int canopen_storage_erase(enum canopen_storage storage);
/**
* @brief Attach CANopen object dictionary program download handlers.
*
* Attach CANopen program download functions to object dictionary
* indexes 0x1F50, 0x1F51, 0x1F56, and 0x1F57. This function must be
* called after calling CANopenNode `CO_init()`.
*
* @param nmt CANopenNode NMT object
* @param sdo CANopenNode SDO server object
* @param em CANopenNode Emergency object
*/
void canopen_program_download_attach(CO_NMT_t *nmt, CO_SDO_t *sdo, CO_EM_t *em);
/**
* @typedef canopen_led_callback_t
* @brief CANopen LED indicator callback function signature.
*
* @param value true if the LED indicator shall be turned on, false otherwise.
* @param arg argument that was passed when LEDs were initialized.
*/
typedef void (*canopen_led_callback_t)(bool value, void *arg);
/**
* @brief Initialize CANopen LED indicators.
*
* Initialize CANopen LED indicators and attach callbacks for setting
* their state. Two LED indicators, a red and a green, are supported
* according to CiA 303-3.
*
* @param nmt CANopenNode NMT object.
* @param green_cb callback for changing state on the green LED indicator.
* @param green_arg argument to pass to the green LED indicator callback.
* @param red_cb callback for changing state on the red LED indicator.
* @param red_arg argument to pass to the red LED indicator callback.
*/
void canopen_leds_init(CO_NMT_t *nmt,
canopen_led_callback_t green_cb, void *green_arg,
canopen_led_callback_t red_cb, void *red_arg);
/**
* @brief Indicate CANopen program download in progress
*
* Indicate that a CANopen program download is in progress.
*
* @param in_progress true if program download is in progress, false otherwise
*/
void canopen_leds_program_download(bool in_progress);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_MODULES_CANOPENNODE_CANOPENNODE_H_ */
``` | /content/code_sandbox/modules/canopennode/canopennode.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,093 |
```c
/*
*
*/
#include <zephyr/shell/shell.h>
#include <mbedtls/memory_buffer_alloc.h>
#if defined(MBEDTLS_MEMORY_DEBUG)
static int cmd_mbedtls_heap_details(const struct shell *sh, size_t argc,
char **argv)
{
mbedtls_memory_buffer_alloc_status();
return 0;
}
static int cmd_mbedtls_heap_max_reset(const struct shell *sh, size_t argc,
char **argv)
{
mbedtls_memory_buffer_alloc_max_reset();
return 0;
}
static int cmd_mbedtls_heap(const struct shell *sh, size_t argc, char **argv)
{
size_t max_used, max_blocks;
size_t cur_used, cur_blocks;
mbedtls_memory_buffer_alloc_max_get(&max_used, &max_blocks);
mbedtls_memory_buffer_alloc_cur_get(&cur_used, &cur_blocks);
shell_print(sh, "Maximum (peak): %zu bytes, %zu blocks",
max_used, max_blocks);
shell_print(sh, "Current: %zu bytes, %zu blocks",
cur_used, cur_blocks);
return 0;
}
SHELL_STATIC_SUBCMD_SET_CREATE(mbedtls_heap_cmds,
SHELL_CMD_ARG(details, NULL, "Print heap details",
cmd_mbedtls_heap_details, 1, 0),
SHELL_CMD_ARG(max_reset, NULL, "Reset max heap statistics",
cmd_mbedtls_heap_max_reset, 1, 0),
SHELL_SUBCMD_SET_END /* Array terminated. */
);
#endif
SHELL_STATIC_SUBCMD_SET_CREATE(mbedtls_cmds,
#if defined(MBEDTLS_MEMORY_DEBUG)
SHELL_CMD_ARG(heap, &mbedtls_heap_cmds, "Show heap status",
cmd_mbedtls_heap, 1, 0),
#endif
SHELL_SUBCMD_SET_END /* Array terminated. */
);
SHELL_CMD_REGISTER(mbedtls, &mbedtls_cmds, "mbed TLS commands", NULL);
``` | /content/code_sandbox/modules/mbedtls/shell.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 387 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/drivers/can.h>
#include <zephyr/init.h>
#include <zephyr/sys/util.h>
#include <canopennode.h>
#define LOG_LEVEL CONFIG_CANOPEN_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(canopen_driver);
K_KERNEL_STACK_DEFINE(canopen_tx_workq_stack,
CONFIG_CANOPENNODE_TX_WORKQUEUE_STACK_SIZE);
struct k_work_q canopen_tx_workq;
struct canopen_tx_work_container {
struct k_work work;
CO_CANmodule_t *CANmodule;
};
struct canopen_tx_work_container canopen_tx_queue;
K_MUTEX_DEFINE(canopen_send_mutex);
K_MUTEX_DEFINE(canopen_emcy_mutex);
K_MUTEX_DEFINE(canopen_co_mutex);
inline void canopen_send_lock(void)
{
k_mutex_lock(&canopen_send_mutex, K_FOREVER);
}
inline void canopen_send_unlock(void)
{
k_mutex_unlock(&canopen_send_mutex);
}
inline void canopen_emcy_lock(void)
{
k_mutex_lock(&canopen_emcy_mutex, K_FOREVER);
}
inline void canopen_emcy_unlock(void)
{
k_mutex_unlock(&canopen_emcy_mutex);
}
inline void canopen_od_lock(void)
{
k_mutex_lock(&canopen_co_mutex, K_FOREVER);
}
inline void canopen_od_unlock(void)
{
k_mutex_unlock(&canopen_co_mutex);
}
static void canopen_detach_all_rx_filters(CO_CANmodule_t *CANmodule)
{
uint16_t i;
if (!CANmodule || !CANmodule->rx_array || !CANmodule->configured) {
return;
}
for (i = 0U; i < CANmodule->rx_size; i++) {
if (CANmodule->rx_array[i].filter_id != -ENOSPC) {
can_remove_rx_filter(CANmodule->dev,
CANmodule->rx_array[i].filter_id);
CANmodule->rx_array[i].filter_id = -ENOSPC;
}
}
}
static void canopen_rx_callback(const struct device *dev, struct can_frame *frame, void *user_data)
{
CO_CANmodule_t *CANmodule = (CO_CANmodule_t *)user_data;
CO_CANrxMsg_t rxMsg;
CO_CANrx_t *buffer;
int i;
ARG_UNUSED(dev);
/* Loop through registered rx buffers in priority order */
for (i = 0; i < CANmodule->rx_size; i++) {
buffer = &CANmodule->rx_array[i];
if (buffer->filter_id == -ENOSPC || buffer->pFunct == NULL) {
continue;
}
if (((frame->id ^ buffer->ident) & buffer->mask) == 0U) {
#ifdef CONFIG_CAN_ACCEPT_RTR
if (buffer->rtr && ((frame->flags & CAN_FRAME_RTR) == 0U)) {
continue;
}
#endif /* CONFIG_CAN_ACCEPT_RTR */
rxMsg.ident = frame->id;
rxMsg.DLC = frame->dlc;
memcpy(rxMsg.data, frame->data, frame->dlc);
buffer->pFunct(buffer->object, &rxMsg);
break;
}
}
}
static void canopen_tx_callback(const struct device *dev, int error, void *arg)
{
CO_CANmodule_t *CANmodule = arg;
ARG_UNUSED(dev);
if (!CANmodule) {
LOG_ERR("failed to process CAN tx callback");
return;
}
if (error == 0) {
CANmodule->first_tx_msg = false;
}
k_work_submit_to_queue(&canopen_tx_workq, &canopen_tx_queue.work);
}
static void canopen_tx_retry(struct k_work *item)
{
struct canopen_tx_work_container *container =
CONTAINER_OF(item, struct canopen_tx_work_container, work);
CO_CANmodule_t *CANmodule = container->CANmodule;
struct can_frame frame;
CO_CANtx_t *buffer;
int err;
uint16_t i;
memset(&frame, 0, sizeof(frame));
CO_LOCK_CAN_SEND();
for (i = 0; i < CANmodule->tx_size; i++) {
buffer = &CANmodule->tx_array[i];
if (buffer->bufferFull) {
frame.id = buffer->ident;
frame.dlc = buffer->DLC;
frame.flags |= (buffer->rtr ? CAN_FRAME_RTR : 0);
memcpy(frame.data, buffer->data, buffer->DLC);
err = can_send(CANmodule->dev, &frame, K_NO_WAIT,
canopen_tx_callback, CANmodule);
if (err == -EAGAIN) {
break;
} else if (err != 0) {
LOG_ERR("failed to send CAN frame (err %d)",
err);
CO_errorReport(CANmodule->em,
CO_EM_GENERIC_SOFTWARE_ERROR,
CO_EMC_COMMUNICATION, 0);
}
buffer->bufferFull = false;
}
}
CO_UNLOCK_CAN_SEND();
}
void CO_CANsetConfigurationMode(void *CANdriverState)
{
struct canopen_context *ctx = (struct canopen_context *)CANdriverState;
int err;
err = can_stop(ctx->dev);
if (err != 0 && err != -EALREADY) {
LOG_ERR("failed to stop CAN interface (err %d)", err);
}
}
void CO_CANsetNormalMode(CO_CANmodule_t *CANmodule)
{
int err;
err = can_start(CANmodule->dev);
if (err != 0 && err != -EALREADY) {
LOG_ERR("failed to start CAN interface (err %d)", err);
return;
}
CANmodule->CANnormal = true;
}
CO_ReturnError_t CO_CANmodule_init(CO_CANmodule_t *CANmodule,
void *CANdriverState,
CO_CANrx_t rxArray[], uint16_t rxSize,
CO_CANtx_t txArray[], uint16_t txSize,
uint16_t CANbitRate)
{
struct canopen_context *ctx = (struct canopen_context *)CANdriverState;
uint16_t i;
int err;
int max_filters;
LOG_DBG("rxSize = %d, txSize = %d", rxSize, txSize);
if (!CANmodule || !rxArray || !txArray || !CANdriverState) {
LOG_ERR("failed to initialize CAN module");
return CO_ERROR_ILLEGAL_ARGUMENT;
}
max_filters = can_get_max_filters(ctx->dev, false);
if (max_filters != -ENOSYS) {
if (max_filters < 0) {
LOG_ERR("unable to determine number of CAN RX filters");
return CO_ERROR_SYSCALL;
}
if (rxSize > max_filters) {
LOG_ERR("insufficient number of concurrent CAN RX filters"
" (needs %d, %d available)", rxSize, max_filters);
return CO_ERROR_OUT_OF_MEMORY;
} else if (rxSize < max_filters) {
LOG_DBG("excessive number of concurrent CAN RX filters enabled"
" (needs %d, %d available)", rxSize, max_filters);
}
}
canopen_detach_all_rx_filters(CANmodule);
canopen_tx_queue.CANmodule = CANmodule;
CANmodule->dev = ctx->dev;
CANmodule->rx_array = rxArray;
CANmodule->rx_size = rxSize;
CANmodule->tx_array = txArray;
CANmodule->tx_size = txSize;
CANmodule->CANnormal = false;
CANmodule->first_tx_msg = true;
CANmodule->errors = 0;
CANmodule->em = NULL;
for (i = 0U; i < rxSize; i++) {
rxArray[i].ident = 0U;
rxArray[i].pFunct = NULL;
rxArray[i].filter_id = -ENOSPC;
}
for (i = 0U; i < txSize; i++) {
txArray[i].bufferFull = false;
}
err = can_set_bitrate(CANmodule->dev, KHZ(CANbitRate));
if (err) {
LOG_ERR("failed to configure CAN bitrate (err %d)", err);
return CO_ERROR_ILLEGAL_ARGUMENT;
}
err = can_set_mode(CANmodule->dev, CAN_MODE_NORMAL);
if (err) {
LOG_ERR("failed to configure CAN interface (err %d)", err);
return CO_ERROR_ILLEGAL_ARGUMENT;
}
CANmodule->configured = true;
return CO_ERROR_NO;
}
void CO_CANmodule_disable(CO_CANmodule_t *CANmodule)
{
int err;
if (!CANmodule || !CANmodule->dev) {
return;
}
canopen_detach_all_rx_filters(CANmodule);
err = can_stop(CANmodule->dev);
if (err != 0 && err != -EALREADY) {
LOG_ERR("failed to disable CAN interface (err %d)", err);
}
}
uint16_t CO_CANrxMsg_readIdent(const CO_CANrxMsg_t *rxMsg)
{
return rxMsg->ident;
}
CO_ReturnError_t CO_CANrxBufferInit(CO_CANmodule_t *CANmodule, uint16_t index,
uint16_t ident, uint16_t mask, bool_t rtr,
void *object,
CO_CANrxBufferCallback_t pFunct)
{
struct can_filter filter;
CO_CANrx_t *buffer;
if (CANmodule == NULL) {
return CO_ERROR_ILLEGAL_ARGUMENT;
}
if (!pFunct || (index >= CANmodule->rx_size)) {
LOG_ERR("failed to initialize CAN rx buffer, illegal argument");
CO_errorReport(CANmodule->em, CO_EM_GENERIC_SOFTWARE_ERROR,
CO_EMC_SOFTWARE_INTERNAL, 0);
return CO_ERROR_ILLEGAL_ARGUMENT;
}
buffer = &CANmodule->rx_array[index];
buffer->object = object;
buffer->pFunct = pFunct;
buffer->ident = ident;
buffer->mask = mask;
#ifndef CONFIG_CAN_ACCEPT_RTR
if (rtr) {
LOG_ERR("request for RTR frames, but RTR frames are rejected");
CO_errorReport(CANmodule->em, CO_EM_GENERIC_SOFTWARE_ERROR,
CO_EMC_SOFTWARE_INTERNAL, 0);
return CO_ERROR_ILLEGAL_ARGUMENT;
}
#else /* !CONFIG_CAN_ACCEPT_RTR */
buffer->rtr = rtr;
#endif /* CONFIG_CAN_ACCEPT_RTR */
filter.flags = 0U;
filter.id = ident;
filter.mask = mask;
if (buffer->filter_id != -ENOSPC) {
can_remove_rx_filter(CANmodule->dev, buffer->filter_id);
}
buffer->filter_id = can_add_rx_filter(CANmodule->dev,
canopen_rx_callback,
CANmodule, &filter);
if (buffer->filter_id == -ENOSPC) {
LOG_ERR("failed to add CAN rx callback, no free filter");
CO_errorReport(CANmodule->em, CO_EM_MEMORY_ALLOCATION_ERROR,
CO_EMC_SOFTWARE_INTERNAL, 0);
return CO_ERROR_OUT_OF_MEMORY;
}
return CO_ERROR_NO;
}
CO_CANtx_t *CO_CANtxBufferInit(CO_CANmodule_t *CANmodule, uint16_t index,
uint16_t ident, bool_t rtr, uint8_t noOfBytes,
bool_t syncFlag)
{
CO_CANtx_t *buffer;
if (CANmodule == NULL) {
return NULL;
}
if (index >= CANmodule->tx_size) {
LOG_ERR("failed to initialize CAN rx buffer, illegal argument");
CO_errorReport(CANmodule->em, CO_EM_GENERIC_SOFTWARE_ERROR,
CO_EMC_SOFTWARE_INTERNAL, 0);
return NULL;
}
buffer = &CANmodule->tx_array[index];
buffer->ident = ident;
buffer->rtr = rtr;
buffer->DLC = noOfBytes;
buffer->bufferFull = false;
buffer->syncFlag = syncFlag;
return buffer;
}
CO_ReturnError_t CO_CANsend(CO_CANmodule_t *CANmodule, CO_CANtx_t *buffer)
{
CO_ReturnError_t ret = CO_ERROR_NO;
struct can_frame frame;
int err;
if (!CANmodule || !CANmodule->dev || !buffer) {
return CO_ERROR_ILLEGAL_ARGUMENT;
}
memset(&frame, 0, sizeof(frame));
CO_LOCK_CAN_SEND();
if (buffer->bufferFull) {
if (!CANmodule->first_tx_msg) {
CO_errorReport(CANmodule->em, CO_EM_CAN_TX_OVERFLOW,
CO_EMC_CAN_OVERRUN, buffer->ident);
}
buffer->bufferFull = false;
ret = CO_ERROR_TX_OVERFLOW;
}
frame.id = buffer->ident;
frame.dlc = buffer->DLC;
frame.flags = (buffer->rtr ? CAN_FRAME_RTR : 0);
memcpy(frame.data, buffer->data, buffer->DLC);
err = can_send(CANmodule->dev, &frame, K_NO_WAIT, canopen_tx_callback,
CANmodule);
if (err == -EAGAIN) {
buffer->bufferFull = true;
} else if (err != 0) {
LOG_ERR("failed to send CAN frame (err %d)", err);
CO_errorReport(CANmodule->em, CO_EM_GENERIC_SOFTWARE_ERROR,
CO_EMC_COMMUNICATION, 0);
ret = CO_ERROR_TX_UNCONFIGURED;
}
CO_UNLOCK_CAN_SEND();
return ret;
}
void CO_CANclearPendingSyncPDOs(CO_CANmodule_t *CANmodule)
{
bool_t tpdoDeleted = false;
CO_CANtx_t *buffer;
uint16_t i;
if (!CANmodule) {
return;
}
CO_LOCK_CAN_SEND();
for (i = 0; i < CANmodule->tx_size; i++) {
buffer = &CANmodule->tx_array[i];
if (buffer->bufferFull && buffer->syncFlag) {
buffer->bufferFull = false;
tpdoDeleted = true;
}
}
CO_UNLOCK_CAN_SEND();
if (tpdoDeleted) {
CO_errorReport(CANmodule->em, CO_EM_TPDO_OUTSIDE_WINDOW,
CO_EMC_COMMUNICATION, 0);
}
}
void CO_CANverifyErrors(CO_CANmodule_t *CANmodule)
{
CO_EM_t *em = (CO_EM_t *)CANmodule->em;
struct can_bus_err_cnt err_cnt;
enum can_state state;
uint8_t rx_overflows;
uint32_t errors;
int err;
/*
* TODO: Zephyr lacks an API for reading the rx mailbox
* overflow counter.
*/
rx_overflows = 0;
err = can_get_state(CANmodule->dev, &state, &err_cnt);
if (err != 0) {
LOG_ERR("failed to get CAN controller state (err %d)", err);
return;
}
errors = ((uint32_t)err_cnt.tx_err_cnt << 16) |
((uint32_t)err_cnt.rx_err_cnt << 8) |
rx_overflows;
if (errors != CANmodule->errors) {
CANmodule->errors = errors;
if (state == CAN_STATE_BUS_OFF) {
/* Bus off */
CO_errorReport(em, CO_EM_CAN_TX_BUS_OFF,
CO_EMC_BUS_OFF_RECOVERED, errors);
} else {
/* Bus not off */
CO_errorReset(em, CO_EM_CAN_TX_BUS_OFF, errors);
if ((err_cnt.rx_err_cnt >= 96U) ||
(err_cnt.tx_err_cnt >= 96U)) {
/* Bus warning */
CO_errorReport(em, CO_EM_CAN_BUS_WARNING,
CO_EMC_NO_ERROR, errors);
} else {
/* Bus not warning */
CO_errorReset(em, CO_EM_CAN_BUS_WARNING,
errors);
}
if (err_cnt.rx_err_cnt >= 128U) {
/* Bus rx passive */
CO_errorReport(em, CO_EM_CAN_RX_BUS_PASSIVE,
CO_EMC_CAN_PASSIVE, errors);
} else {
/* Bus not rx passive */
CO_errorReset(em, CO_EM_CAN_RX_BUS_PASSIVE,
errors);
}
if (err_cnt.tx_err_cnt >= 128U &&
!CANmodule->first_tx_msg) {
/* Bus tx passive */
CO_errorReport(em, CO_EM_CAN_TX_BUS_PASSIVE,
CO_EMC_CAN_PASSIVE, errors);
} else if (CO_isError(em, CO_EM_CAN_TX_BUS_PASSIVE)) {
/* Bus not tx passive */
CO_errorReset(em, CO_EM_CAN_TX_BUS_PASSIVE,
errors);
CO_errorReset(em, CO_EM_CAN_TX_OVERFLOW,
errors);
}
}
/* This code can be activated if we can read the overflows*/
if (false && rx_overflows != 0U) {
CO_errorReport(em, CO_EM_CAN_RXB_OVERFLOW,
CO_EMC_CAN_OVERRUN, errors);
}
}
}
static int canopen_init(void)
{
k_work_queue_start(&canopen_tx_workq, canopen_tx_workq_stack,
K_KERNEL_STACK_SIZEOF(canopen_tx_workq_stack),
CONFIG_CANOPENNODE_TX_WORKQUEUE_PRIORITY, NULL);
k_thread_name_set(&canopen_tx_workq.thread, "canopen_tx_workq");
k_work_init(&canopen_tx_queue.work, canopen_tx_retry);
return 0;
}
SYS_INIT(canopen_init, APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
``` | /content/code_sandbox/modules/canopennode/CO_driver.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,782 |
```python
#!/bin/python3
import re
import os
import sys
import argparse
from typing import List
SCRIPT_PATH = os.path.dirname(__file__)
INPUT_REL_PATH = os.path.join("..", "..", "..", "modules", "crypto", "mbedtls",
"include", "psa", "crypto_config.h")
INPUT_FILE = os.path.normpath(os.path.join(SCRIPT_PATH, INPUT_REL_PATH))
KCONFIG_PATH=os.path.join(SCRIPT_PATH, "Kconfig.psa")
HEADER_PATH=os.path.join(SCRIPT_PATH, "configs", "config-psa.h")
KCONFIG_HEADER="""\
# This file was automatically generated by {}
# from: {}.
# Do not edit it manually.
config PSA_CRYPTO_CLIENT
bool
help
Promptless symbol to state that there is a PSA crypto API provider
enabled in the system. This allows to select desired PSA_WANT features.
if PSA_CRYPTO_CLIENT
config PSA_CRYPTO_ENABLE_ALL
bool "All PSA crypto features"
""".format(os.path.basename(__file__), INPUT_REL_PATH)
KCONFIG_FOOTER="\nendif # PSA_CRYPTO_CLIENT\n"
H_HEADER="""\
/*
*
*/
/* This file was automatically generated by {}
* from: {}
* Do not edit it manually.
*/
#ifndef CONFIG_PSA_H
#define CONFIG_PSA_H
""".format(os.path.basename(__file__), INPUT_REL_PATH)
H_FOOTER="\n#endif /* CONFIG_PSA_H */\n"
def parse_psa_symbols(input_file: str):
symbols = []
with open(input_file) as file:
content = file.readlines()
for line in content:
res = re.findall(r"^#define *(PSA_WANT_\w+)", line)
if len(res) > 0:
symbols.append(res[0])
return symbols
def generate_kconfig_content(symbols: List[str]) -> str:
output = []
for sym in symbols:
output.append("""
config {0}
\tbool "{0}" if !MBEDTLS_PROMPTLESS
\tdefault y if PSA_CRYPTO_ENABLE_ALL
""".format(sym))
return KCONFIG_HEADER + "".join(output) + KCONFIG_FOOTER
def generate_header_content(symbols: List[str]) -> str:
output = []
for sym in symbols:
output.append("""
#if defined(CONFIG_{0})
#define {0} 1
#endif
""".format(sym))
return H_HEADER + "".join(output) + H_FOOTER
def generate_output_file(content: str, file_name: str):
with open(file_name, "wt") as output_file:
output_file.write(content)
def check_file(content: str, file_name: str):
file_content = ""
with open(file_name) as input_file:
file_content = input_file.read()
if file_content != content:
print()
return False
return True
def main():
arg_parser = argparse.ArgumentParser(allow_abbrev = False)
arg_parser.add_argument("--check", action = "store_true", default = False)
args = arg_parser.parse_args()
check_files = args.check
psa_symbols = parse_psa_symbols(INPUT_FILE)
kconfig_content = generate_kconfig_content(psa_symbols)
header_content = generate_header_content(psa_symbols)
if check_files:
if ((not check_file(kconfig_content, KCONFIG_PATH)) or
(not check_file(header_content, HEADER_PATH))):
print("Error: PSA Kconfig and header files do not match with the current"
"version of MbedTLS. Please update them.")
sys.exit(1)
else:
generate_output_file(kconfig_content, KCONFIG_PATH)
generate_output_file(header_content, HEADER_PATH)
sys.exit(0)
if __name__ == "__main__":
main()
``` | /content/code_sandbox/modules/mbedtls/create_psa_files.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 789 |
```unknown
# This file was automatically generated by create_psa_files.py
# from: ../../../modules/crypto/mbedtls/include/psa/crypto_config.h.
# Do not edit it manually.
config PSA_CRYPTO_CLIENT
bool
help
Promptless symbol to state that there is a PSA crypto API provider
enabled in the system. This allows to select desired PSA_WANT features.
if PSA_CRYPTO_CLIENT
config PSA_CRYPTO_ENABLE_ALL
bool "All PSA crypto features"
config PSA_WANT_ALG_CBC_NO_PADDING
bool "PSA_WANT_ALG_CBC_NO_PADDING" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_CBC_PKCS7
bool "PSA_WANT_ALG_CBC_PKCS7" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_CCM
bool "PSA_WANT_ALG_CCM" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_CCM_STAR_NO_TAG
bool "PSA_WANT_ALG_CCM_STAR_NO_TAG" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_CMAC
bool "PSA_WANT_ALG_CMAC" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_CFB
bool "PSA_WANT_ALG_CFB" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_CHACHA20_POLY1305
bool "PSA_WANT_ALG_CHACHA20_POLY1305" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_CTR
bool "PSA_WANT_ALG_CTR" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_DETERMINISTIC_ECDSA
bool "PSA_WANT_ALG_DETERMINISTIC_ECDSA" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_ECB_NO_PADDING
bool "PSA_WANT_ALG_ECB_NO_PADDING" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_ECDH
bool "PSA_WANT_ALG_ECDH" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_FFDH
bool "PSA_WANT_ALG_FFDH" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_ECDSA
bool "PSA_WANT_ALG_ECDSA" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_JPAKE
bool "PSA_WANT_ALG_JPAKE" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_GCM
bool "PSA_WANT_ALG_GCM" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_HKDF
bool "PSA_WANT_ALG_HKDF" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_HKDF_EXTRACT
bool "PSA_WANT_ALG_HKDF_EXTRACT" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_HKDF_EXPAND
bool "PSA_WANT_ALG_HKDF_EXPAND" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_HMAC
bool "PSA_WANT_ALG_HMAC" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_MD5
bool "PSA_WANT_ALG_MD5" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_OFB
bool "PSA_WANT_ALG_OFB" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_PBKDF2_HMAC
bool "PSA_WANT_ALG_PBKDF2_HMAC" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_PBKDF2_AES_CMAC_PRF_128
bool "PSA_WANT_ALG_PBKDF2_AES_CMAC_PRF_128" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_RIPEMD160
bool "PSA_WANT_ALG_RIPEMD160" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_RSA_OAEP
bool "PSA_WANT_ALG_RSA_OAEP" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_RSA_PKCS1V15_CRYPT
bool "PSA_WANT_ALG_RSA_PKCS1V15_CRYPT" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_RSA_PKCS1V15_SIGN
bool "PSA_WANT_ALG_RSA_PKCS1V15_SIGN" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_RSA_PSS
bool "PSA_WANT_ALG_RSA_PSS" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_SHA_1
bool "PSA_WANT_ALG_SHA_1" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_SHA_224
bool "PSA_WANT_ALG_SHA_224" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_SHA_256
bool "PSA_WANT_ALG_SHA_256" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_SHA_384
bool "PSA_WANT_ALG_SHA_384" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_SHA_512
bool "PSA_WANT_ALG_SHA_512" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_SHA3_224
bool "PSA_WANT_ALG_SHA3_224" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_SHA3_256
bool "PSA_WANT_ALG_SHA3_256" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_SHA3_384
bool "PSA_WANT_ALG_SHA3_384" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_SHA3_512
bool "PSA_WANT_ALG_SHA3_512" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_STREAM_CIPHER
bool "PSA_WANT_ALG_STREAM_CIPHER" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_TLS12_PRF
bool "PSA_WANT_ALG_TLS12_PRF" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_TLS12_PSK_TO_MS
bool "PSA_WANT_ALG_TLS12_PSK_TO_MS" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ALG_TLS12_ECJPAKE_TO_PMS
bool "PSA_WANT_ALG_TLS12_ECJPAKE_TO_PMS" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ECC_BRAINPOOL_P_R1_256
bool "PSA_WANT_ECC_BRAINPOOL_P_R1_256" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ECC_BRAINPOOL_P_R1_384
bool "PSA_WANT_ECC_BRAINPOOL_P_R1_384" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ECC_BRAINPOOL_P_R1_512
bool "PSA_WANT_ECC_BRAINPOOL_P_R1_512" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ECC_MONTGOMERY_255
bool "PSA_WANT_ECC_MONTGOMERY_255" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ECC_MONTGOMERY_448
bool "PSA_WANT_ECC_MONTGOMERY_448" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ECC_SECP_K1_192
bool "PSA_WANT_ECC_SECP_K1_192" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ECC_SECP_K1_256
bool "PSA_WANT_ECC_SECP_K1_256" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ECC_SECP_R1_192
bool "PSA_WANT_ECC_SECP_R1_192" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ECC_SECP_R1_224
bool "PSA_WANT_ECC_SECP_R1_224" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ECC_SECP_R1_256
bool "PSA_WANT_ECC_SECP_R1_256" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ECC_SECP_R1_384
bool "PSA_WANT_ECC_SECP_R1_384" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_ECC_SECP_R1_521
bool "PSA_WANT_ECC_SECP_R1_521" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_DH_RFC7919_2048
bool "PSA_WANT_DH_RFC7919_2048" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_DH_RFC7919_3072
bool "PSA_WANT_DH_RFC7919_3072" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_DH_RFC7919_4096
bool "PSA_WANT_DH_RFC7919_4096" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_DH_RFC7919_6144
bool "PSA_WANT_DH_RFC7919_6144" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_DH_RFC7919_8192
bool "PSA_WANT_DH_RFC7919_8192" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_DERIVE
bool "PSA_WANT_KEY_TYPE_DERIVE" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_PASSWORD
bool "PSA_WANT_KEY_TYPE_PASSWORD" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_PASSWORD_HASH
bool "PSA_WANT_KEY_TYPE_PASSWORD_HASH" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_HMAC
bool "PSA_WANT_KEY_TYPE_HMAC" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_AES
bool "PSA_WANT_KEY_TYPE_AES" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_ARIA
bool "PSA_WANT_KEY_TYPE_ARIA" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_CAMELLIA
bool "PSA_WANT_KEY_TYPE_CAMELLIA" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_CHACHA20
bool "PSA_WANT_KEY_TYPE_CHACHA20" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_DES
bool "PSA_WANT_KEY_TYPE_DES" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_ECC_PUBLIC_KEY
bool "PSA_WANT_KEY_TYPE_ECC_PUBLIC_KEY" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_DH_PUBLIC_KEY
bool "PSA_WANT_KEY_TYPE_DH_PUBLIC_KEY" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_RAW_DATA
bool "PSA_WANT_KEY_TYPE_RAW_DATA" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY
bool "PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_BASIC
bool "PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_BASIC" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_IMPORT
bool "PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_IMPORT" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_EXPORT
bool "PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_EXPORT" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_GENERATE
bool "PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_GENERATE" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_DERIVE
bool "PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_DERIVE" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_BASIC
bool "PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_BASIC" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_IMPORT
bool "PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_IMPORT" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_EXPORT
bool "PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_EXPORT" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_GENERATE
bool "PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_GENERATE" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_DH_KEY_PAIR_BASIC
bool "PSA_WANT_KEY_TYPE_DH_KEY_PAIR_BASIC" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_DH_KEY_PAIR_IMPORT
bool "PSA_WANT_KEY_TYPE_DH_KEY_PAIR_IMPORT" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_DH_KEY_PAIR_EXPORT
bool "PSA_WANT_KEY_TYPE_DH_KEY_PAIR_EXPORT" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
config PSA_WANT_KEY_TYPE_DH_KEY_PAIR_GENERATE
bool "PSA_WANT_KEY_TYPE_DH_KEY_PAIR_GENERATE" if !MBEDTLS_PROMPTLESS
default y if PSA_CRYPTO_ENABLE_ALL
endif # PSA_CRYPTO_CLIENT
``` | /content/code_sandbox/modules/mbedtls/Kconfig.psa | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,528 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.