text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```c
/*
*
*/
#include "nsi_cpu_if.h"
#include "nsi_tracing.h"
/*
* Stubbed embedded CPU images, which do nothing:
* The CPU does not boot, and interrupts are just ignored
* These are all defined as weak, so if an actual image is present for that CPU,
* that will be linked against.
*
* This exists in case the total device image is assembled lacking some of the embedded CPU images
*/
static void nsi_boot_warning(const char *func)
{
nsi_print_trace("%s: Attempted boot of CPU without image. "
"CPU shut down permanently\n", func);
}
/*
* These will define N functions like
* int nsif_cpu<n>_cleanup(void) { return 0; }
*/
F_TRAMP_BODY_LIST(__attribute__((weak)) void nsif_cpu, _pre_cmdline_hooks(void) { })
F_TRAMP_BODY_LIST(__attribute__((weak)) void nsif_cpu, _pre_hw_init_hooks(void) { })
F_TRAMP_BODY_LIST(__attribute__((weak)) void nsif_cpu,
_boot(void) { nsi_boot_warning(__func__); })
F_TRAMP_BODY_LIST(__attribute__((weak)) int nsif_cpu, _cleanup(void) { return 0; })
F_TRAMP_BODY_LIST(__attribute__((weak)) void nsif_cpu, _irq_raised(void) { })
F_TRAMP_BODY_LIST(__attribute__((weak)) void nsif_cpu, _irq_raised_from_sw(void) { })
F_TRAMP_BODY_LIST(__attribute__((weak)) int nsif_cpu, _test_hook(void *p) { return 0; })
``` | /content/code_sandbox/scripts/native_simulator/common/src/nsi_weak_stubs.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 337 |
```objective-c
/*
*
*
* The native simulator provides a set of trampolines to some of the simplest
* host C library symbols.
* These are intended to facilitate test embedded code interacting with the host.
*
* We should never include here symbols which require host headers be exposed
* to the embedded side, for example due to non-basic types being used in
* function calls, as that would break the include path isolation
*
* Naming convention: nsi_host_<fun>() where <func> is the name of the equivalent
* C library function we call through
*/
#ifndef NSI_COMMON_SRC_INCL_NSI_HOST_TRAMPOLINES_H
#define NSI_COMMON_SRC_INCL_NSI_HOST_TRAMPOLINES_H
#ifdef __cplusplus
extern "C" {
#endif
void *nsi_host_calloc(unsigned long nmemb, unsigned long size);
int nsi_host_close(int fd);
/* void nsi_host_exit (int status); Use nsi_exit() instead */
void nsi_host_free(void *ptr);
char *nsi_host_getcwd(char *buf, unsigned long size);
int nsi_host_isatty(int fd);
void *nsi_host_malloc(unsigned long size);
int nsi_host_open(const char *pathname, int flags);
/* int nsi_host_printf (const char *fmt, ...); Use the nsi_tracing.h equivalents */
long nsi_host_random(void);
long nsi_host_read(int fd, void *buffer, unsigned long size);
void *nsi_host_realloc(void *ptr, unsigned long size);
void nsi_host_srandom(unsigned int seed);
char *nsi_host_strdup(const char *s);
long nsi_host_write(int fd, const void *buffer, unsigned long size);
#ifdef __cplusplus
}
#endif
#endif /* NSI_COMMON_SRC_INCL_NSI_HOST_TRAMPOLINES_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nsi_host_trampolines.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 381 |
```objective-c
/*
*
*/
#ifndef NSI_COMMON_SRC_INCL_NSI_MAIN_H
#define NSI_COMMON_SRC_INCL_NSI_MAIN_H
#include "nsi_utils.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Like nsi_exit(), do all cleanup required to terminate the
* execution of the native_simulator, but instead of exiting,
* return to the caller what would have been passed to exit()
*
* @param[in] exit_code: Requested exit code to the shell
* Note that other components may have requested a different
* exit code which may have precedence if it was !=0
*
* @returns Code which would have been passed to exit()
*/
int nsi_exit_inner(int exit_code);
/**
* @brief Terminate the execution of the native simulator
*
* @param[in] exit_code: Requested exit code to the shell
* Note that other components may have requested a different
* exit code which may have precedence if it was !=0
*/
NSI_FUNC_NORETURN void nsi_exit(int exit_code);
#ifdef __cplusplus
}
#endif
#endif /* NSI_COMMON_SRC_INCL_NSI_MAIN_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nsi_main.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 248 |
```objective-c
/*
*
*/
#ifndef NSI_COMMON_SRC_INCL_NSI_CPU_IF_INTERNAL_H
#define NSI_COMMON_SRC_INCL_NSI_CPU_IF_INTERNAL_H
#include "nsi_utils.h"
#ifdef __cplusplus
extern "C" {
#endif
#define FUNCT(i, pre, post) \
pre##i##post
#define FUNCT_LIST(pre, post, sep) \
FUNCT(0, pre, post) NSI_DEBRACKET sep \
FUNCT(1, pre, post) NSI_DEBRACKET sep \
FUNCT(2, pre, post) NSI_DEBRACKET sep \
FUNCT(3, pre, post) NSI_DEBRACKET sep \
FUNCT(4, pre, post) NSI_DEBRACKET sep \
FUNCT(5, pre, post) NSI_DEBRACKET sep \
FUNCT(6, pre, post) NSI_DEBRACKET sep \
FUNCT(7, pre, post) NSI_DEBRACKET sep \
FUNCT(8, pre, post) NSI_DEBRACKET sep \
FUNCT(9, pre, post) NSI_DEBRACKET sep \
FUNCT(10, pre, post) NSI_DEBRACKET sep \
FUNCT(11, pre, post) NSI_DEBRACKET sep \
FUNCT(12, pre, post) NSI_DEBRACKET sep \
FUNCT(13, pre, post) NSI_DEBRACKET sep \
FUNCT(14, pre, post) NSI_DEBRACKET sep \
FUNCT(15, pre, post) NSI_DEBRACKET sep \
#define F_TRAMP_TABLE(pre, post) FUNCT_LIST(pre, post, (,))
#define F_TRAMP_LIST(pre, post) FUNCT_LIST(pre, post, (;))
#define F_TRAMP_BODY_LIST(pre, post) FUNCT_LIST(pre, post, ())
#define TRAMPOLINES(pre, post) \
void pre ## n ## post(int n) \
{ \
void(*fptrs[])(void) = { \
F_TRAMP_TABLE(pre, post) \
}; \
fptrs[n](); \
}
#define TRAMPOLINES_i_vp(pre, post) \
int pre ## n ## post(int n, void *p) \
{ \
int(*fptrs[])(void *p) = { \
F_TRAMP_TABLE(pre, post) \
}; \
return fptrs[n](p); \
}
#define TRAMPOLINES_i_(pre, post) \
int pre ## n ## post(int n) \
{ \
int(*fptrs[])(void) = { \
F_TRAMP_TABLE(pre, post) \
}; \
return fptrs[n](); \
}
#ifdef __cplusplus
}
#endif
#endif /* NSI_COMMON_SRC_INCL_NSI_CPU_IF_INTERNAL_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nsi_cpu_if_internal.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 616 |
```objective-c
/*
*
*/
#ifndef NSI_COMMON_SRC_INCL_NSI_MAIN_SEMIPUBLIC_H
#define NSI_COMMON_SRC_INCL_NSI_MAIN_SEMIPUBLIC_H
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* These APIs are exposed for special use cases in which a developer needs to
* replace the native simulator main loop.
* An example of such a case is LLVMs fuzzing support. For this one sets
* NSI_NO_MAIN, and provides an specialized main() or hooks into the tooling
* provided main().
*
* These APIs should be used with care, and not be used when the native
* simulator main() is built in.
*
* Check nsi_main.c for more information.
*/
void nsi_init(int argc, char *argv[]);
void nsi_exec_for(uint64_t us);
#ifdef __cplusplus
}
#endif
#endif /* NSI_COMMON_SRC_INCL_NSI_MAIN_SEMIPUBLIC_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nsi_main_semipublic.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 202 |
```objective-c
/*
*
*/
#ifndef NSI_COMMON_SRC_INCL_HWS_MODELS_IF_H
#define NSI_COMMON_SRC_INCL_HWS_MODELS_IF_H
#include <stdint.h>
#include "nsi_utils.h"
#include "nsi_hw_scheduler.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Internal structure used to link HW events */
struct nsi_hw_event_st {
void (*const callback)(void);
uint64_t *timer;
};
/**
* Register an event timer and event callback
*
* The HW scheduler will keep track of this event, and call its callback whenever its
* timer is reached.
* The ordering of events in the same microsecond is given by prio (lowest first).
* (Normally HW models will not care about the event ordering, and will simply set a prio like 100)
*
* Only very particular models will need to execute before or after others.
*
* Priority can be a number between 0 and 999.
*/
#define NSI_HW_EVENT(t, fn, prio) \
static const struct nsi_hw_event_st NSI_CONCAT(NSI_CONCAT(__nsi_hw_event_, fn), t) \
__attribute__((__used__)) NSI_NOASAN \
__attribute__((__section__(".nsi_hw_event_" NSI_STRINGIFY(prio)))) \
= { \
.callback = fn, \
.timer = &t, \
}
#ifdef __cplusplus
}
#endif
#endif /* NSI_COMMON_SRC_INCL_HWS_MODELS_IF_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nsi_hws_models_if.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 327 |
```objective-c
/*
*
*/
#ifndef NSI_COMMON_SRC_INCL_HW_SCHEDULER_H
#define NSI_COMMON_SRC_INCL_HW_SCHEDULER_H
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define NSI_NEVER UINT64_MAX
/* API intended for the native simulator specific embedded drivers: */
static inline uint64_t nsi_hws_get_time(void)
{
extern uint64_t nsi_simu_time;
return nsi_simu_time;
}
/* Internal APIs to the native_simulator and its HW models: */
void nsi_hws_init(void);
void nsi_hws_cleanup(void);
void nsi_hws_one_event(void);
void nsi_hws_set_end_of_time(uint64_t new_end_of_time);
void nsi_hws_find_next_event(void);
uint64_t nsi_hws_get_next_event_time(void);
#ifdef __cplusplus
}
#endif
#endif /* NSI_COMMON_SRC_INCL_HW_SCHEDULER_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nsi_hw_scheduler.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 196 |
```objective-c
/*
*
*/
#ifndef NSI_COMMON_SRC_INCL_NSI_UTILS_H
#define NSI_COMMON_SRC_INCL_NSI_UTILS_H
/* Remove brackets from around a single argument: */
#define NSI_DEBRACKET(...) __VA_ARGS__
#define _NSI_STRINGIFY(x) #x
#define NSI_STRINGIFY(s) _NSI_STRINGIFY(s)
/* concatenate the values of the arguments into one */
#define NSI_DO_CONCAT(x, y) x ## y
#define NSI_CONCAT(x, y) NSI_DO_CONCAT(x, y)
#define NSI_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define NSI_MIN(a, b) (((a) < (b)) ? (a) : (b))
#ifndef NSI_ARG_UNUSED
#define NSI_ARG_UNUSED(x) (void)(x)
#endif
#define NSI_CODE_UNREACHABLE __builtin_unreachable()
#define NSI_FUNC_NORETURN __attribute__((__noreturn__))
#if defined(__clang__)
/* The address sanitizer in llvm adds padding (redzones) after data
* But for those we are re-grouping using the linker script
* we cannot have that extra padding as we intend to iterate over them
*/
#define NSI_NOASAN __attribute__((no_sanitize("address")))
#else
#define NSI_NOASAN
#endif
#endif /* NSI_COMMON_SRC_INCL_NSI_UTILS_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nsi_utils.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 304 |
```objective-c
/*
*
*/
/*
* Interfaces the Native Simulator provides to
* the embedded CPU SW
*/
#ifndef NSI_COMMON_SRC_INCL_NSI_CPU_ES_IF_H
#define NSI_COMMON_SRC_INCL_NSI_CPU_ES_IF_H
#include "nsi_tracing.h"
#include "nsi_main.h"
#include "nsi_hw_scheduler.h"
#endif /* NSI_COMMON_SRC_INCL_NSI_CPU_ES_IF_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nsi_cpu_es_if.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 86 |
```objective-c
/*
*
*/
#ifndef NSI_COMMON_SRC_INCL_NSI_CPU_CTRL_H
#define NSI_COMMON_SRC_INCL_NSI_CPU_CTRL_H
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Define if a CPU should automatically start at boot or not
*
* @param[in] cpu_n: Which CPU
* @param[in] auto_start: If true, it will autostart on its own,
* if 0, it won't
*/
void nsi_cpu_set_auto_start(int cpu_n, bool auto_start);
bool nsi_cpu_get_auto_start(int cpu_n);
/**
* @brief Boot CPU <cpu_n>
*
* Note: This API may only be called if that CPU was not stared before
*
* @param[in] cpu_n: Which CPU
*/
void nsi_cpu_boot(int cpu_n);
/*
* Internal native simulator runner API.
* Boot all CPUs which are configured to boot automatically
*/
void nsi_cpu_auto_boot(void);
#ifdef __cplusplus
}
#endif
#endif /* NSI_COMMON_SRC_INCL_NSI_CPU_CTRL_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nsi_cpu_ctrl.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 232 |
```objective-c
/*
*
*/
#ifndef NSI_COMMON_SRC_INCL_NCE_IF_H
#define NSI_COMMON_SRC_INCL_NCE_IF_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* Native simulator CPU start/stop emulation module interface
*
* Check docs/NCE.md for an overview.
*
* A descriptions of each function can be found in the .c file
*/
void *nce_init(void);
void nce_terminate(void *this);
void nce_boot_cpu(void *this, void (*start_routine)(void));
void nce_halt_cpu(void *this);
void nce_wake_cpu(void *this);
int nce_is_cpu_running(void *this);
#ifdef __cplusplus
}
#endif
#endif /* NSI_COMMON_SRC_INCL_NCE_IF_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nce_if.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 161 |
```objective-c
/*
*
*/
#ifndef NSI_COMMON_SRC_INCL_NSI_TRACING_H
#define NSI_COMMON_SRC_INCL_NSI_TRACING_H
#include <stdarg.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Native simulator tracing API:
* Print a message/warning/error to the tracing backend
* and in case of nsi_print_error_and_exit() also call nsi_exit()
*
* All print()/vprint() APIs take the same arguments as printf()/vprintf().
*/
void nsi_print_error_and_exit(const char *format, ...);
void nsi_print_warning(const char *format, ...);
void nsi_print_trace(const char *format, ...);
void nsi_vprint_error_and_exit(const char *format, va_list vargs);
void nsi_vprint_warning(const char *format, va_list vargs);
void nsi_vprint_trace(const char *format, va_list vargs);
/*
* @brief Is the tracing backend connected to a ptty/terminal or not
*
* @param nbr: Which output. Options are: 0 trace output, 1: warning and error output
*
* @return
* 0 : Not a ptty (i.e. probably a pipe to another program)
* 1 : Connected to a ptty (for ex. stdout/err to the invoking terminal)
* -1: Unknown at this point
*/
int nsi_trace_over_tty(int nbr);
#ifdef __cplusplus
}
#endif
#endif /* NSI_COMMON_SRC_INCL_NSI_TRACING_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nsi_tracing.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 325 |
```objective-c
/*
*
*/
#ifndef NSI_COMMON_SRC_INCL_NSI_CPU_IF_H
#define NSI_COMMON_SRC_INCL_NSI_CPU_IF_H
#ifdef __cplusplus
extern "C" {
#endif
#include "nsi_cpu_if_internal.h"
/*
* Any symbol annotated by this macro will be visible outside of the
* embedded SW library, both by the native simulator runner,
* and other possible embedded CPU's SW.
*/
#define NATIVE_SIMULATOR_IF __attribute__((visibility("default"))) \
__attribute__((__section__(".native_sim_if")))
/*
* Implementation note:
* The interface between the embedded SW and the native simulator is allocated in its
* own section to allow the embedded software developers to, using a linker script,
* direct the linker to keep those symbols even when doing its linking with garbage collection.
* It is also be possible for the embedded SW to require the linker to keep those
* symbols by requiring each of them to be kept explicitly by name (either by defining them
* as entry points, or as required in the output).
* It is also possible for the embedded SW developers to not use garbage collection
* during their SW linking.
*/
/*
* Interfaces the Native Simulator _expects_ from the embedded CPUs:
*/
/*
* Called during the earliest initialization (before command line parsing)
*
* The embedded SW library may provide this function to perform any
* early initialization, including registering its own command line arguments
* in the runner.
*/
NATIVE_SIMULATOR_IF void nsif_cpu0_pre_cmdline_hooks(void);
/*
* Called during initialization (before the HW models are initialized)
*
* The embedded SW library may provide this function to perform any
* early initialization, after the command line arguments have been parsed.
*/
NATIVE_SIMULATOR_IF void nsif_cpu0_pre_hw_init_hooks(void);
/*
* Called by the runner to boot the CPU.
*
* The embedded SW library must provide this function.
* This function is expected to return after the embedded CPU
* has gone to sleep for the first time.
*
* The expectation is that the embedded CPU SW will spawn a
* new pthread while in this call, and run the embedded SW
* initialization in that pthread.
*
* It is recommended for the embedded SW to use the NCE (CPU start/stop emulation)
* component to achieve this.
*/
NATIVE_SIMULATOR_IF void nsif_cpu0_boot(void);
/*
* Called by the runner when the simulation is ending/exiting
*
* The embedded SW library may provide this function.
* to do any cleanup it needs.
*/
NATIVE_SIMULATOR_IF int nsif_cpu0_cleanup(void);
/*
* Called by the runner each time an interrupt is raised by the HW
*
* The embedded SW library must provide this function.
* This function is expected to return after the embedded CPU
* has gone back to sleep.
*/
NATIVE_SIMULATOR_IF void nsif_cpu0_irq_raised(void);
/*
* Called by the runner each time an interrupt is raised in SW context itself.
* That is, when a embedded SW action in the HW models, causes an immediate
* interrupt to be raised (while the execution is still in the
* context of the calling SW thread).
*/
NATIVE_SIMULATOR_IF void nsif_cpu0_irq_raised_from_sw(void);
/*
* Optional hook which may be used for test functionality.
* When the runner HW models use them and for what is up to those
* specific models.
*/
NATIVE_SIMULATOR_IF int nsif_cpu0_test_hook(void *p);
/* Provide prototypes for all n instances of these hooks */
F_TRAMP_LIST(NATIVE_SIMULATOR_IF void nsif_cpu, _pre_cmdline_hooks(void))
F_TRAMP_LIST(NATIVE_SIMULATOR_IF void nsif_cpu, _pre_hw_init_hooks(void))
F_TRAMP_LIST(NATIVE_SIMULATOR_IF void nsif_cpu, _boot(void))
F_TRAMP_LIST(NATIVE_SIMULATOR_IF int nsif_cpu, _cleanup(void))
F_TRAMP_LIST(NATIVE_SIMULATOR_IF void nsif_cpu, _irq_raised(void))
F_TRAMP_LIST(NATIVE_SIMULATOR_IF void nsif_cpu, _irq_raised_from_sw(void))
F_TRAMP_LIST(NATIVE_SIMULATOR_IF int nsif_cpu, _test_hook(void *p))
#ifdef __cplusplus
}
#endif
#endif /* NSI_COMMON_SRC_INCL_NSI_CPU_IF_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nsi_cpu_if.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 897 |
```objective-c
/*
*
*/
/**
* @file
* @brief API to the native simulator expects any integrating program has for handling
* command line argument parsing
*/
#ifndef NATIVE_SIMULATOR_COMMON_SRC_NSI_CMDLINE_MAIN_IF_H
#define NATIVE_SIMULATOR_COMMON_SRC_NSI_CMDLINE_MAIN_IF_H
#ifdef __cplusplus
extern "C" {
#endif
void nsi_handle_cmd_line(int argc, char *argv[]);
void nsi_register_extra_args(int argc, char *argv[]);
#ifdef __cplusplus
}
#endif
#endif /* NATIVE_SIMULATOR_COMMON_SRC_NSI_CMDLINE_MAIN_IF_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nsi_cmdline_main_if.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 124 |
```objective-c
/*
*
*/
#ifndef NSI_COMMON_SRC_INCL_NCT_IF_H
#define NSI_COMMON_SRC_INCL_NCT_IF_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* Interface provided by the Native simulator CPU threading emulation
*
* A description of each function can be found in the C file
*
* In docs/NCT.md you can find more information
*/
void *nct_init(void (*fptr)(void *));
void nct_clean_up(void *this);
void nct_swap_threads(void *this, int next_allowed_thread_nbr);
void nct_first_thread_start(void *this, int next_allowed_thread_nbr);
int nct_new_thread(void *this, void *payload);
void nct_abort_thread(void *this, int thread_idx);
int nct_get_unique_thread_id(void *this, int thread_idx);
#ifdef __cplusplus
}
#endif
#endif /* NSI_COMMON_SRC_INCL_NCT_IF_H */
``` | /content/code_sandbox/scripts/native_simulator/common/src/include/nct_if.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 193 |
```c
/*
*
*/
#include <stdint.h>
#include "nsi_tracing.h"
#include "native_rtc.h"
#include "nsi_hw_scheduler.h"
#include "nsi_timer_model.h"
/**
* Return the (simulation) time in microseconds
* where clock_type is one of RTC_CLOCK_*
*/
uint64_t native_rtc_gettime_us(int clock_type)
{
if (clock_type == RTC_CLOCK_BOOT) {
return nsi_hws_get_time();
} else if (clock_type == RTC_CLOCK_REALTIME) { /* RTC_CLOCK_REALTIME */
return hwtimer_get_simu_rtc_time();
} else if (clock_type == RTC_CLOCK_PSEUDOHOSTREALTIME) {
uint32_t nsec;
uint64_t sec;
hwtimer_get_pseudohost_rtc_time(&nsec, &sec);
return sec * 1000000UL + nsec / 1000U;
}
nsi_print_error_and_exit("Unknown clock source %i\n",
clock_type);
return 0;
}
/**
* Similar to POSIX clock_gettime()
* get the simulation time split in nsec and seconds
* where clock_type is one of RTC_CLOCK_*
*/
void native_rtc_gettime(int clock_type, uint32_t *nsec, uint64_t *sec)
{
if (clock_type == RTC_CLOCK_BOOT || clock_type == RTC_CLOCK_REALTIME) {
uint64_t us = native_rtc_gettime_us(clock_type);
*nsec = (us % 1000000UL) * 1000U;
*sec = us / 1000000UL;
} else { /* RTC_CLOCK_PSEUDOHOSTREALTIME */
hwtimer_get_pseudohost_rtc_time(nsec, sec);
}
}
/**
* Offset the real time clock by a number of microseconds.
* Note that this only affects the RTC_CLOCK_REALTIME and
* RTC_CLOCK_PSEUDOHOSTREALTIME clocks.
*/
void native_rtc_offset(int64_t delta_us)
{
hwtimer_adjust_rtc_offset(delta_us);
}
/**
* Adjust the speed of the clock source by a multiplicative factor
*/
void native_rtc_adjust_clock(double clock_correction)
{
hwtimer_adjust_rt_ratio(clock_correction);
}
``` | /content/code_sandbox/scripts/native_simulator/native/src/native_rtc.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 473 |
```c
/*
*
*/
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include "nsi_cmdline.h"
#include "nsi_cmdline_internal.h"
#include "nsi_tracing.h"
#include "nsi_timer_model.h"
#include "nsi_hw_scheduler.h"
#include "nsi_tasks.h"
static int s_argc, test_argc;
static char **s_argv, **test_argv;
/* Extra "command line options" provided programmatically: */
static int extra_argc;
static char **extra_argv;
static struct args_struct_t *args_struct;
static int used_args;
static int args_aval;
#define ARGS_ALLOC_CHUNK_SIZE 20
static void nsi_cleanup_cmd_line(void)
{
if (args_struct != NULL) { /* LCOV_EXCL_BR_LINE */
free(args_struct);
args_struct = NULL;
}
}
NSI_TASK(nsi_cleanup_cmd_line, ON_EXIT_POST, 0);
/**
* Add a set of command line options to the program.
*
* Each option to be added is described in one entry of the input <args>
* This input must be terminated with an entry containing ARG_TABLE_ENDMARKER.
*/
void nsi_add_command_line_opts(struct args_struct_t *args)
{
int count = 0;
while (args[count].option != NULL) {
count++;
}
count++; /*for the end marker*/
if (used_args + count >= args_aval) {
int growby = count;
/* reallocs are expensive let's do them only in big chunks */
if (growby < ARGS_ALLOC_CHUNK_SIZE) {
growby = ARGS_ALLOC_CHUNK_SIZE;
}
struct args_struct_t *new_args_struct = realloc(args_struct,
(args_aval + growby)*
sizeof(struct args_struct_t));
args_aval += growby;
/* LCOV_EXCL_START */
if (new_args_struct == NULL) {
nsi_print_error_and_exit("Could not allocate memory");
} else {
args_struct = new_args_struct;
}
/* LCOV_EXCL_STOP */
}
memcpy(&args_struct[used_args], args,
count*sizeof(struct args_struct_t));
used_args += count - 1;
/*
* -1 as the end marker should be overwritten next time something
* is added
*/
}
void nsi_add_testargs_option(void)
{
static struct args_struct_t testargs_options[] = {
{
.manual = true,
.option = "testargs",
.name = "arg",
.type = 'l',
.descript = "Any argument that follows will be ignored by the top level, "
"and made available for possible tests"
},
ARG_TABLE_ENDMARKER
};
nsi_add_command_line_opts(testargs_options);
}
static void print_invalid_opt_error(char *argv)
{
nsi_print_error_and_exit("Incorrect option '%s'. Did you misspell it?"
" Is that feature supported in this build?\n",
argv);
}
/**
* Handle possible command line arguments.
*
* We also store them for later use by possible test applications
*/
void nsi_handle_cmd_line(int argc, char *argv[])
{
int i;
nsi_add_testargs_option();
s_argv = argv;
s_argc = argc;
nsi_cmd_args_set_defaults(args_struct);
for (int i = 0; i < extra_argc; i++) {
if (!nsi_cmd_parse_one_arg(extra_argv[i], args_struct)) {
nsi_cmd_print_switches_help(args_struct);
print_invalid_opt_error(extra_argv[i]);
}
}
for (i = 1; i < argc; i++) {
if ((nsi_cmd_is_option(argv[i], "testargs", 0))) {
test_argc = argc - i - 1;
test_argv = &argv[i+1];
break;
}
if (!nsi_cmd_parse_one_arg(argv[i], args_struct)) {
nsi_cmd_print_switches_help(args_struct);
print_invalid_opt_error(argv[i]);
}
}
}
void nsi_register_extra_args(int argc, char *argv[])
{
int new_size = extra_argc + argc;
extra_argv = realloc(extra_argv, new_size*sizeof(char *));
for (int i = 0; i < argc; i++) {
memcpy(&extra_argv[extra_argc], argv, argc*sizeof(char *));
}
extra_argc += argc;
}
static void clear_extra_args(void)
{
free(extra_argv);
}
NSI_TASK(clear_extra_args, ON_EXIT_PRE, 100);
/**
* The application/test can use this function to inspect all the command line
* arguments
*/
void nsi_get_cmd_line_args(int *argc, char ***argv)
{
*argc = s_argc;
*argv = s_argv;
}
/**
* The application/test can use this function to inspect the command line
* arguments received after --testargs
*/
void nsi_get_test_cmd_line_args(int *argc, char ***argv)
{
*argc = test_argc;
*argv = test_argv;
}
``` | /content/code_sandbox/scripts/native_simulator/native/src/nsi_cmdline.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,092 |
```objective-c
/*
*
*/
#ifndef NATIVE_SIMULATOR_NATIVE_SRC_NSI_CMDLINE_INTERNAL_H
#define NATIVE_SIMULATOR_NATIVE_SRC_NSI_CMDLINE_INTERNAL_H
#include <stdbool.h>
#include <stddef.h>
#include "nsi_cmdline.h"
#ifdef __cplusplus
extern "C" {
#endif
#define _MAX_LINE_WIDTH 100 /*Total width of the help message*/
/* Horizontal alignment of the 2nd column of the help message */
#define _LONG_HELP_ALIGN 30
#define _MAXOPT_SWITCH_LEN 32 /* Maximum allowed length for a switch name */
#define _MAXOPT_NAME_LEN 32 /* Maximum allowed length for a variable name */
#define _HELP_SWITCH "[-h] [--h] [--help] [-?]"
#define _HELP_DESCR "Display this help"
#define _MAX_STRINGY_LEN (_MAXOPT_SWITCH_LEN + _MAXOPT_NAME_LEN + 2 + 1 + 2 + 1)
int nsi_cmd_is_option(const char *arg, const char *option, int with_value);
int nsi_cmd_is_help_option(const char *arg);
void nsi_cmd_read_option_value(const char *str, void *dest, const char type,
const char *option);
void nsi_cmd_args_set_defaults(struct args_struct_t args_struct[]);
bool nsi_cmd_parse_one_arg(char *argv, struct args_struct_t args_struct[]);
void nsi_cmd_print_switches_help(struct args_struct_t args_struct[]);
#ifdef __cplusplus
}
#endif
#endif /* NATIVE_SIMULATOR_NATIVE_SRC_NSI_CMDLINE_INTERNAL_H */
``` | /content/code_sandbox/scripts/native_simulator/native/src/nsi_cmdline_internal.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 321 |
```c
/*
*
*/
#include <stdint.h>
#include <string.h>
#include <strings.h>
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "nsi_tracing.h"
#include "nsi_cmdline.h"
#include "nsi_cmdline_internal.h"
#include "nsi_cpu_es_if.h"
/**
* Check if <arg> is the option <option>
* The accepted syntax is:
* * For options without a value following:
* [-[-]]<option>
* * For options with value:
* [-[-]]<option>{:|=}<value>
*
* Returns 0 if it is not, or a number > 0 if it is.
* The returned number is the number of characters it went through
* to find the end of the option including the ':' or '=' character in case of
* options with value
*/
int nsi_cmd_is_option(const char *arg, const char *option, int with_value)
{
int of = 0;
size_t to_match_len = strlen(option);
if (arg[of] == '-') {
of++;
}
if (arg[of] == '-') {
of++;
}
if (!with_value) {
if (strcmp(&arg[of], option) != 0) {
return 0;
} else {
return of + to_match_len;
}
}
while (!(arg[of] == 0 && *option == 0)) {
if (*option == 0) {
if ((arg[of] == ':') || (arg[of] == '=')) {
of++;
break;
}
return 0;
}
if (arg[of] != *option) {
return 0;
}
of++;
option++;
}
if (arg[of] == 0) { /* we need a value to follow */
nsi_print_error_and_exit("Incorrect option syntax '%s'. The "
"value should follow the options. "
"For example --ratio=3\n",
arg);
}
return of;
}
/**
* Return 1 if <arg> matches an accepted help option.
* 0 otherwise
*
* Valid help options are [-[-]]{?|h|help}
* with the h or help in any case combination
*/
int nsi_cmd_is_help_option(const char *arg)
{
if (arg[0] == '-') {
arg++;
}
if (arg[0] == '-') {
arg++;
}
if ((strcasecmp(arg, "?") == 0) ||
(strcasecmp(arg, "h") == 0) ||
(strcasecmp(arg, "help") == 0)) {
return 1;
} else {
return 0;
}
}
#define CMD_TYPE_ERROR "Coding error: type %c not understood"
#define CMD_ERR_BOOL_SWI "Programming error: I only know how to "\
"automatically read boolean switches\n"
/**
* Read out a the value following an option from str, and store it into
* <dest>
* <type> indicates the type of parameter (and type of dest pointer)
* 'b' : boolean
* 's' : string (char *)
* 'u' : 32 bit unsigned integer
* 'U' : 64 bit unsigned integer
* 'i' : 32 bit signed integer
* 'I' : 64 bit signed integer
* 'd' : *double* float
*
* Note: list type ('l') cannot be handled by this function and must always be
* manual
*
* <long_d> is the long name of the option
*/
void nsi_cmd_read_option_value(const char *str, void *dest, const char type,
const char *option)
{
int error = 0;
char *endptr = NULL;
switch (type) {
case 'b':
if (strcasecmp(str, "false") == 0) {
*(bool *)dest = false;
endptr = (char *)str + 5;
} else if (strcmp(str, "0") == 0) {
*(bool *)dest = false;
endptr = (char *)str + 1;
} else if (strcasecmp(str, "true") == 0) {
*(bool *)dest = true;
endptr = (char *)str + 4;
} else if (strcmp(str, "1") == 0) {
*(bool *)dest = true;
endptr = (char *)str + 1;
} else {
error = 1;
}
break;
case 's':
*(char **)dest = (char *)str;
endptr = (char *)str + strlen(str);
break;
case 'u':
*(uint32_t *)dest = strtoul(str, &endptr, 0);
break;
case 'U':
*(uint64_t *)dest = strtoull(str, &endptr, 0);
break;
case 'i':
*(int32_t *)dest = strtol(str, &endptr, 0);
break;
case 'I':
*(int64_t *)dest = strtoll(str, &endptr, 0);
break;
case 'd':
*(double *)dest = strtod(str, &endptr);
break;
default:
nsi_print_error_and_exit(CMD_TYPE_ERROR, type);
/* Unreachable */
break;
}
if (!error && endptr && *endptr != 0) {
error = 1;
}
if (error) {
nsi_print_error_and_exit("Error reading value of %s '%s'. Use"
" --help for usage information\n",
option, str);
}
}
/**
* Initialize existing dest* to defaults based on type
*/
void nsi_cmd_args_set_defaults(struct args_struct_t args_struct[])
{
int count = 0;
while (args_struct[count].option != NULL) {
if (args_struct[count].dest == NULL) {
count++;
continue;
}
switch (args_struct[count].type) {
case 0: /* does not have storage */
break;
case 'b':
*(bool *)args_struct[count].dest = false;
break;
case 's':
*(char **)args_struct[count].dest = NULL;
break;
case 'u':
*(uint32_t *)args_struct[count].dest = UINT32_MAX;
break;
case 'U':
*(uint64_t *)args_struct[count].dest = UINT64_MAX;
break;
case 'i':
*(int32_t *)args_struct[count].dest = INT32_MAX;
break;
case 'I':
*(int64_t *)args_struct[count].dest = INT64_MAX;
break;
case 'd':
*(double *)args_struct[count].dest = (double)NAN;
break;
default:
nsi_print_error_and_exit(CMD_TYPE_ERROR,
args_struct[count].type);
break;
}
count++;
}
}
/**
* For the help messages:
* Generate a string containing how the option described by <args_s_el>
* should be used
*
* The string is saved in <buf> which has been allocated <size> bytes by the
* caller
*/
static void nsi_cmd_gen_switch_syntax(char *buf, int size,
struct args_struct_t *args_s_el)
{
int ret = 0;
if (size <= 0) {
return;
}
if (args_s_el->is_mandatory == false) {
*buf++ = '[';
size--;
}
if (args_s_el->is_switch == true) {
ret = snprintf(buf, size, "-%s", args_s_el->option);
} else {
if (args_s_el->type != 'l') {
ret = snprintf(buf, size, "-%s=<%s>",
args_s_el->option, args_s_el->name);
} else {
ret = snprintf(buf, size, "-%s <%s>...",
args_s_el->option, args_s_el->name);
}
}
if (ret < 0) {
nsi_print_error_and_exit("Unexpected error in %s %i\n",
__FILE__, __LINE__);
}
if (size - ret < 0) {
/*
* If we run out of space we can just stop,
* this is not critical
*/
return;
}
buf += ret;
size -= ret;
if (args_s_el->is_mandatory == false) {
snprintf(buf, size, "] ");
} else {
snprintf(buf, size, " ");
}
}
/**
* Print short list of available switches
*/
void nsi_cmd_print_switches_help(struct args_struct_t args_struct[])
{
int count = 0;
int printed_in_line = strlen(_HELP_SWITCH) + 1;
fprintf(stdout, "%s ", _HELP_SWITCH);
while (args_struct[count].option != NULL) {
char stringy[_MAX_STRINGY_LEN];
nsi_cmd_gen_switch_syntax(stringy, _MAX_STRINGY_LEN,
&args_struct[count]);
if (printed_in_line + strlen(stringy) > _MAX_LINE_WIDTH) {
fprintf(stdout, "\n");
printed_in_line = 0;
}
fprintf(stdout, "%s", stringy);
printed_in_line += strlen(stringy);
count++;
}
fprintf(stdout, "\n");
}
/**
* Print the long help message of the program
*/
void nsi_cmd_print_long_help(struct args_struct_t args_struct[])
{
int ret;
int count = 0;
int printed_in_line = 0;
char stringy[_MAX_STRINGY_LEN];
nsi_cmd_print_switches_help(args_struct);
fprintf(stdout, "\n %-*s:%s\n", _LONG_HELP_ALIGN-1,
_HELP_SWITCH, _HELP_DESCR);
while (args_struct[count].option != NULL) {
int printed_right;
char *toprint;
int total_to_print;
nsi_cmd_gen_switch_syntax(stringy, _MAX_STRINGY_LEN,
&args_struct[count]);
ret = fprintf(stdout, " %-*s:", _LONG_HELP_ALIGN-1, stringy);
printed_in_line = ret;
printed_right = 0;
toprint = args_struct[count].descript;
total_to_print = strlen(toprint);
ret = fprintf(stdout, "%.*s\n",
_MAX_LINE_WIDTH - printed_in_line,
&toprint[printed_right]);
printed_right += ret - 1;
while (printed_right < total_to_print) {
fprintf(stdout, "%*s", _LONG_HELP_ALIGN, "");
ret = fprintf(stdout, "%.*s\n",
_MAX_LINE_WIDTH - _LONG_HELP_ALIGN,
&toprint[printed_right]);
printed_right += ret - 1;
}
count++;
}
fprintf(stdout, "\n");
fprintf(stdout, "Note that which options are available depends on the "
"enabled features/drivers\n\n");
}
/*
* <argv> matched the argument described in <arg_element>
*
* If arg_element->dest points to a place to store a possible value, read it
* If there is a callback registered, call it after
*/
static void nsi_cmd_handle_this_matched_arg(char *argv, int offset,
struct args_struct_t *arg_element)
{
if (arg_element->dest != NULL) {
if (arg_element->is_switch) {
if (arg_element->type == 'b') {
*(bool *)arg_element->dest = true;
} else {
nsi_print_error_and_exit(CMD_ERR_BOOL_SWI);
}
} else { /* if not a switch we need to read its value */
nsi_cmd_read_option_value(&argv[offset],
arg_element->dest,
arg_element->type,
arg_element->option);
}
}
if (arg_element->call_when_found) {
arg_element->call_when_found(argv, offset);
}
}
/**
* Try to find if this argument is in the list (and it is not manual)
* if it does, try to parse it, set its dest accordingly, and return true
* if it is not found, return false
*/
bool nsi_cmd_parse_one_arg(char *argv, struct args_struct_t args_struct[])
{
int count = 0;
int ret;
if (nsi_cmd_is_help_option(argv)) {
nsi_cmd_print_long_help(args_struct);
nsi_exit(0);
}
while (args_struct[count].option != NULL) {
if (args_struct[count].manual) {
count++;
continue;
}
ret = nsi_cmd_is_option(argv, args_struct[count].option,
!args_struct[count].is_switch);
if (ret) {
nsi_cmd_handle_this_matched_arg(argv,
ret,
&args_struct[count]);
return true;
}
count++;
}
return false;
}
``` | /content/code_sandbox/scripts/native_simulator/native/src/nsi_cmdline_common.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,820 |
```c
/*
*
*/
#include <stdlib.h>
#include <stdio.h> /* for printfs */
#include <stdarg.h> /* for va args */
#include <unistd.h>
#include "nsi_tasks.h"
#include "nsi_cmdline.h"
#include "nsi_tracing.h"
#include "nsi_main.h"
/**
* Are stdout and stderr connected to a tty
* 0 = no
* 1 = yes
* -1 = we do not know yet
* Indexed 0:stdout, 1:stderr
*/
static int is_a_tty[2] = {-1, -1};
#define ST_OUT 0
#define ST_ERR 1
static void decide_about_color(void)
{
if (is_a_tty[0] == -1) {
is_a_tty[0] = isatty(STDOUT_FILENO);
}
if (is_a_tty[1] == -1) {
is_a_tty[1] = isatty(STDERR_FILENO);
}
}
#define ERROR 0
#define WARN 1
#define TRACE 2
static const char * const trace_type_esc_start[] = {
"\x1b[1;31m", /* ERROR - Foreground color = red, bold */
"\x1b[95m", /* WARNING - Foreground color = magenta */
"\x1b[0;39m", /* TRACE - reset all styles */
};
static const char trace_esc_end[] = "\x1b[0;39m"; /* Reset all styles */
void nsi_vprint_warning(const char *format, va_list vargs)
{
if (is_a_tty[ST_ERR] == -1) {
decide_about_color();
}
if (is_a_tty[ST_ERR]) {
fprintf(stderr, "%s", trace_type_esc_start[WARN]);
}
vfprintf(stderr, format, vargs);
if (is_a_tty[ST_ERR]) {
fprintf(stderr, "%s", trace_esc_end);
}
}
void nsi_vprint_error_and_exit(const char *format, va_list vargs)
{
if (is_a_tty[ST_ERR] == -1) {
decide_about_color();
}
if (is_a_tty[ST_ERR]) {
fprintf(stderr, "%s", trace_type_esc_start[ERROR]);
}
vfprintf(stderr, format, vargs);
if (is_a_tty[ST_ERR]) {
fprintf(stderr, "%s\n", trace_esc_end);
}
nsi_exit(1);
}
void nsi_vprint_trace(const char *format, va_list vargs)
{
if (is_a_tty[ST_OUT] == -1) {
decide_about_color();
}
if (is_a_tty[ST_OUT]) {
fprintf(stdout, "%s", trace_type_esc_start[TRACE]);
}
vfprintf(stdout, format, vargs);
if (is_a_tty[ST_OUT]) {
fprintf(stdout, "%s", trace_esc_end);
}
}
static void trace_disable_color(char *argv, int offset)
{
is_a_tty[0] = 0;
is_a_tty[1] = 0;
}
static void trace_enable_color(char *argv, int offset)
{
is_a_tty[0] = -1;
is_a_tty[1] = -1;
}
static void trace_force_color(char *argv, int offset)
{
is_a_tty[0] = 1;
is_a_tty[1] = 1;
}
int nsi_trace_over_tty(int file_number)
{
return is_a_tty[file_number];
}
NSI_TASK(decide_about_color, PRE_BOOT_2, 0);
static void nsi_add_tracing_options(void)
{
static struct args_struct_t trace_options[] = {
{
.is_switch = true,
.option = "color",
.type = 'b',
.call_when_found = trace_enable_color,
.descript = "(default) Enable color in traces if printing to console"
},
{
.is_switch = true,
.option = "no-color",
.type = 'b',
.call_when_found = trace_disable_color,
.descript = "Disable color in traces even if printing to console"
},
{
.is_switch = true,
.option = "force-color",
.type = 'b',
.call_when_found = trace_force_color,
.descript = "Enable color in traces even if printing to files/pipes"
},
ARG_TABLE_ENDMARKER
};
nsi_add_command_line_opts(trace_options);
}
NSI_TASK(nsi_add_tracing_options, PRE_BOOT_1, 0);
``` | /content/code_sandbox/scripts/native_simulator/native/src/nsi_trace.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,008 |
```c
/*
*
*
* HW IRQ controller model
*/
#include <stdint.h>
#include <stdbool.h>
#include "nsi_internal.h"
#include "nsi_cpu_if.h"
#include "nsi_cpu0_interrupts.h"
#include "irq_ctrl.h"
#include "nsi_tasks.h"
#include "nsi_hws_models_if.h"
static uint64_t irq_ctrl_timer = NSI_NEVER;
static uint64_t irq_status; /* pending interrupts */
static uint64_t irq_premask; /* interrupts before the mask */
/*
* Mask of which interrupts will actually cause the cpu to vector into its
* irq handler
* If an interrupt is masked in this way, it will be pending in the premask in
* case it is enabled later before clearing it.
* If the irq_mask enables and interrupt pending in irq_premask, it will cause
* the controller to raise the interrupt immediately
*/
static uint64_t irq_mask;
/*
* Interrupts lock/disable. When set, interrupts are registered
* (in the irq_status) but do not awake the cpu. if when unlocked,
* irq_status != 0 an interrupt will be raised immediately
*/
static bool irqs_locked;
static bool lock_ignore; /* For the hard fake IRQ, temporarily ignore lock */
static uint8_t irq_prio[N_IRQS]; /* Priority of each interrupt */
/* note that prio = 0 == highest, prio=255 == lowest */
static int currently_running_prio = 256; /* 255 is the lowest prio interrupt */
static void hw_irq_ctrl_init(void)
{
irq_mask = 0U; /* Let's assume all interrupts are disable at boot */
irq_premask = 0U;
irqs_locked = false;
lock_ignore = false;
for (int i = 0 ; i < N_IRQS; i++) {
irq_prio[i] = 255U;
}
}
NSI_TASK(hw_irq_ctrl_init, HW_INIT, 10);
void hw_irq_ctrl_set_cur_prio(int new)
{
currently_running_prio = new;
}
int hw_irq_ctrl_get_cur_prio(void)
{
return currently_running_prio;
}
void hw_irq_ctrl_prio_set(unsigned int irq, unsigned int prio)
{
irq_prio[irq] = prio;
}
uint8_t hw_irq_ctrl_get_prio(unsigned int irq)
{
return irq_prio[irq];
}
/**
* Get the currently pending highest priority interrupt which has a priority
* higher than a possibly currently running interrupt
*
* If none, return -1
*/
int hw_irq_ctrl_get_highest_prio_irq(void)
{
if (irqs_locked) {
return -1;
}
uint64_t irq_status = hw_irq_ctrl_get_irq_status();
int winner = -1;
int winner_prio = 256;
while (irq_status != 0U) {
int irq_nbr = nsi_find_lsb_set64(irq_status) - 1;
irq_status &= ~((uint64_t) 1 << irq_nbr);
if ((winner_prio > (int)irq_prio[irq_nbr])
&& (currently_running_prio > (int)irq_prio[irq_nbr])) {
winner = irq_nbr;
winner_prio = irq_prio[irq_nbr];
}
}
return winner;
}
uint32_t hw_irq_ctrl_get_current_lock(void)
{
return irqs_locked;
}
/*
* Change the overall interrupt controller "interrupt lock"
* The interrupt lock is a flag that provisionally disables all interrupts
* without affecting their status or their ability to be pended in the meanwhile
*/
uint32_t hw_irq_ctrl_change_lock(uint32_t new_lock)
{
uint32_t previous_lock = irqs_locked;
irqs_locked = new_lock;
if ((previous_lock == true) && (new_lock == false)) {
if (irq_status != 0U) {
nsif_cpu0_irq_raised_from_sw();
}
}
return previous_lock;
}
uint64_t hw_irq_ctrl_get_irq_status(void)
{
return irq_status;
}
void hw_irq_ctrl_clear_all_enabled_irqs(void)
{
irq_status = 0U;
irq_premask &= ~irq_mask;
}
void hw_irq_ctrl_clear_all_irqs(void)
{
irq_status = 0U;
irq_premask = 0U;
}
void hw_irq_ctrl_disable_irq(unsigned int irq)
{
irq_mask &= ~((uint64_t)1<<irq);
}
int hw_irq_ctrl_is_irq_enabled(unsigned int irq)
{
return (irq_mask & ((uint64_t)1 << irq))?1:0;
}
/**
* Get the current interrupt enable mask
*/
uint64_t hw_irq_ctrl_get_irq_mask(void)
{
return irq_mask;
}
/*
* Un-pend an interrupt from the interrupt controller.
*
* This is an API between the MCU model/IRQ handling side and the IRQ controller
* model
*/
void hw_irq_ctrl_clear_irq(unsigned int irq)
{
irq_status &= ~((uint64_t)1<<irq);
irq_premask &= ~((uint64_t)1<<irq);
}
/**
* Enable an interrupt
*
* This function may only be called from SW threads
*
* If the enabled interrupt is pending, it will immediately vector to its
* interrupt handler and continue (maybe with some swap() before)
*/
void hw_irq_ctrl_enable_irq(unsigned int irq)
{
irq_mask |= ((uint64_t)1<<irq);
if (irq_premask & ((uint64_t)1<<irq)) { /* if the interrupt is pending */
hw_irq_ctrl_raise_im_from_sw(irq);
}
}
static inline void hw_irq_ctrl_irq_raise_prefix(unsigned int irq)
{
if (irq < N_IRQS) {
irq_premask |= ((uint64_t)1<<irq);
if (irq_mask & ((uint64_t)1 << irq)) {
irq_status |= ((uint64_t)1<<irq);
}
} else if (irq == PHONY_HARD_IRQ) {
lock_ignore = true;
}
}
/**
* Set/Raise/Pend an interrupt
*
* This function is meant to be used by either the SW manual IRQ raising
* or by HW which wants the IRQ to be raised in one delta cycle from now
*/
void hw_irq_ctrl_set_irq(unsigned int irq)
{
hw_irq_ctrl_irq_raise_prefix(irq);
if ((irqs_locked == false) || (lock_ignore)) {
/*
* Awake CPU in 1 delta
* Note that we awake the CPU even if the IRQ is disabled
* => we assume the CPU is always idling in a WFE() like
* instruction and the CPU is allowed to awake just with the irq
* being marked as pending
*/
irq_ctrl_timer = nsi_hws_get_time();
nsi_hws_find_next_event();
}
}
static void irq_raising_from_hw_now(void)
{
/*
* We always awake the CPU even if the IRQ was masked,
* but not if irqs are locked unless this is due to a
* PHONY_HARD_IRQ
*/
if ((irqs_locked == false) || (lock_ignore)) {
lock_ignore = false;
nsif_cpu0_irq_raised();
}
}
/**
* Set/Raise/Pend an interrupt immediately.
* Like hw_irq_ctrl_set_irq() but awake immediately the CPU instead of in
* 1 delta cycle
*
* Call only from HW threads; Should be used with care
*/
void hw_irq_ctrl_raise_im(unsigned int irq)
{
hw_irq_ctrl_irq_raise_prefix(irq);
irq_raising_from_hw_now();
}
/**
* Like hw_irq_ctrl_raise_im() but for SW threads
*
* Call only from SW threads; Should be used with care
*/
void hw_irq_ctrl_raise_im_from_sw(unsigned int irq)
{
hw_irq_ctrl_irq_raise_prefix(irq);
if (irqs_locked == false) {
nsif_cpu0_irq_raised_from_sw();
}
}
static void hw_irq_ctrl_timer_triggered(void)
{
irq_ctrl_timer = NSI_NEVER;
irq_raising_from_hw_now();
nsi_hws_find_next_event();
}
NSI_HW_EVENT(irq_ctrl_timer, hw_irq_ctrl_timer_triggered, 900);
``` | /content/code_sandbox/scripts/native_simulator/native/src/irq_ctrl.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,753 |
```c
/*
*
*/
#include <stdbool.h>
#include <stdint.h>
#include "nsi_cpu0_interrupts.h"
#include "irq_ctrl.h"
#include "nsi_tasks.h"
#include "nsi_hws_models_if.h"
static uint64_t hw_counter_timer;
static bool counter_running;
static uint64_t counter_value;
static uint64_t counter_target;
static uint64_t counter_period;
static uint64_t counter_wrap;
/**
* Initialize the counter with prescaler of HW
*/
static void hw_counter_init(void)
{
hw_counter_timer = NSI_NEVER;
counter_target = NSI_NEVER;
counter_value = 0;
counter_running = false;
counter_period = NSI_NEVER;
counter_wrap = NSI_NEVER;
}
NSI_TASK(hw_counter_init, HW_INIT, 10);
static void hw_counter_triggered(void)
{
if (!counter_running) {
hw_counter_timer = NSI_NEVER;
return;
}
hw_counter_timer = nsi_hws_get_time() + counter_period;
counter_value = (counter_value + 1) % counter_wrap;
if (counter_value == counter_target) {
hw_irq_ctrl_set_irq(COUNTER_EVENT_IRQ);
}
}
NSI_HW_EVENT(hw_counter_timer, hw_counter_triggered, 20);
/**
* Configures the counter period.
* The counter will be incremented every 'period' microseconds.
*/
void hw_counter_set_period(uint64_t period)
{
counter_period = period;
}
/*
* Set the count value at which the counter will wrap
* The counter will count up to (counter_wrap-1), i.e.:
* 0, 1, 2,.., (counter_wrap - 1), 0
*/
void hw_counter_set_wrap_value(uint64_t wrap_value)
{
counter_wrap = wrap_value;
}
/**
* Starts the counter. It must be previously configured with
* hw_counter_set_period() and hw_counter_set_target().
*/
void hw_counter_start(void)
{
if (counter_running) {
return;
}
counter_running = true;
hw_counter_timer = nsi_hws_get_time() + counter_period;
nsi_hws_find_next_event();
}
/**
* Stops the counter at current value.
* On the next call to hw_counter_start, the counter will
* start from the value at which it was stopped.
*/
void hw_counter_stop(void)
{
counter_running = false;
hw_counter_timer = NSI_NEVER;
nsi_hws_find_next_event();
}
bool hw_counter_is_started(void)
{
return counter_running;
}
/**
* Returns the current counter value.
*/
uint64_t hw_counter_get_value(void)
{
return counter_value;
}
/**
* Resets the counter value.
*/
void hw_counter_reset(void)
{
counter_value = 0;
}
/**
* Configures the counter to generate an interrupt
* when its count value reaches target.
*/
void hw_counter_set_target(uint64_t target)
{
counter_target = target;
}
``` | /content/code_sandbox/scripts/native_simulator/native/src/hw_counter.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 614 |
```objective-c
/*
*
*/
#ifndef NATIVE_SIMULATOR_NATIVE_SRC_NSI_CPU0_INTERRUPTS_H
#define NATIVE_SIMULATOR_NATIVE_SRC_NSI_CPU0_INTERRUPTS_H
#define TIMER_TICK_IRQ 0
#define OFFLOAD_SW_IRQ 1
#define COUNTER_EVENT_IRQ 2
/*
* This interrupt will awake the CPU if IRQs are not locked,
* This interrupt does not have an associated status bit or handler
*/
#define PHONY_WEAK_IRQ 0xFFFE
/*
* This interrupt will awake the CPU even if IRQs are locked,
* This interrupt does not have an associated status bit or handler
* (the lock is only ignored when the interrupt is raised from the HW models,
* SW threads should not try to use this)
*/
#define PHONY_HARD_IRQ 0xFFFF
#endif /* NATIVE_SIMULATOR_NATIVE_SRC_NSI_CPU0_INTERRUPTS_H */
``` | /content/code_sandbox/scripts/native_simulator/native/src/include/nsi_cpu0_interrupts.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 178 |
```objective-c
/*
*
*/
#ifndef NATIVE_SIMULATOR_NATIVE_SRC_NSI_TIMER_MODEL_H
#define NATIVE_SIMULATOR_NATIVE_SRC_NSI_TIMER_MODEL_H
#include <stdbool.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
void hwtimer_set_real_time_mode(bool new_rt);
void hwtimer_timer_reached(void);
void hwtimer_wake_in_time(uint64_t time);
void hwtimer_set_silent_ticks(int64_t sys_ticks);
void hwtimer_enable(uint64_t period);
int64_t hwtimer_get_pending_silent_ticks(void);
void hwtimer_reset_rtc(void);
void hwtimer_set_rtc_offset(int64_t offset);
void hwtimer_set_rt_ratio(double ratio);
void hwtimer_adjust_rtc_offset(int64_t offset_delta);
void hwtimer_adjust_rt_ratio(double ratio_correction);
int64_t hwtimer_get_simu_rtc_time(void);
void hwtimer_get_pseudohost_rtc_time(uint32_t *nsec, uint64_t *sec);
#ifdef __cplusplus
}
#endif
#endif /* NATIVE_SIMULATOR_NATIVE_SRC_NSI_TIMER_MODEL_H */
``` | /content/code_sandbox/scripts/native_simulator/native/src/include/nsi_timer_model.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 239 |
```objective-c
/*
*
*/
/**
* @file
* @brief API to the native simulator - native (Real) Time Clock
*/
#ifndef NATIVE_SIMULATOR_NATIVE_SRC_NATIVE_RTC_H
#define NATIVE_SIMULATOR_NATIVE_SRC_NATIVE_RTC_H
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Types of clocks this RTC provides:
*/
/** Time since boot, cannot be offset. Microsecond resolution */
#define RTC_CLOCK_BOOT 0
/** Persistent clock, can be offset. Microsecond resolution */
#define RTC_CLOCK_REALTIME 1
/**
* Pseudo-host real time clock (Please see documentation).
* Nanosecond resolution
*/
#define RTC_CLOCK_PSEUDOHOSTREALTIME 2
/**
* @brief Get the value of a clock in microseconds
*
* @param clock_type Which clock to measure from
*
* @return Number of microseconds
*/
uint64_t native_rtc_gettime_us(int clock_type);
/**
* @brief Get the value of a clock split in nsec and seconds
*
* @param clock_type Which clock to measure from
* @param nsec Pointer to store the nanoseconds
* @param nsec Pointer to store the seconds
*/
void native_rtc_gettime(int clock_type, uint32_t *nsec, uint64_t *sec);
/**
* @brief Offset the real time clock by a number of microseconds.
* Note that this only affects the RTC_CLOCK_REALTIME and
* RTC_CLOCK_PSEUDOHOSTREALTIME clocks.
*
* @param delta_us Number of microseconds to offset. The value is added to all
* offsetable clocks.
*/
void native_rtc_offset(int64_t delta_us);
/**
* @brief Adjust the speed of the clock source by a multiplicative factor
*
* @param clock_correction Factor by which to correct the clock speed
*/
void native_rtc_adjust_clock(double clock_correction);
#ifdef __cplusplus
}
#endif
#endif /* NATIVE_SIMULATOR_NATIVE_SRC_NATIVE_RTC_H */
``` | /content/code_sandbox/scripts/native_simulator/native/src/include/native_rtc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 406 |
```objective-c
/*
*
*/
/**
* @file
* @brief API to the native simulator - native interrupt controller
*/
#ifndef NATIVE_SIMULATOR_NATIVE_SRC_IRQ_CTRL_H
#define NATIVE_SIMULATOR_NATIVE_SRC_IRQ_CTRL_H
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
void hw_irq_ctrl_set_cur_prio(int new);
int hw_irq_ctrl_get_cur_prio(void);
void hw_irq_ctrl_prio_set(unsigned int irq, unsigned int prio);
uint8_t hw_irq_ctrl_get_prio(unsigned int irq);
int hw_irq_ctrl_get_highest_prio_irq(void);
uint32_t hw_irq_ctrl_get_current_lock(void);
uint32_t hw_irq_ctrl_change_lock(uint32_t new_lock);
uint64_t hw_irq_ctrl_get_irq_status(void);
void hw_irq_ctrl_disable_irq(unsigned int irq);
int hw_irq_ctrl_is_irq_enabled(unsigned int irq);
void hw_irq_ctrl_clear_irq(unsigned int irq);
void hw_irq_ctrl_enable_irq(unsigned int irq);
void hw_irq_ctrl_set_irq(unsigned int irq);
void hw_irq_ctrl_raise_im(unsigned int irq);
void hw_irq_ctrl_raise_im_from_sw(unsigned int irq);
#define N_IRQS 32
#ifdef __cplusplus
}
#endif
#endif /* NATIVE_SIMULATOR_NATIVE_SRC_IRQ_CTRL_H */
``` | /content/code_sandbox/scripts/native_simulator/native/src/include/irq_ctrl.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 257 |
```objective-c
/*
*
*/
/**
* @file
* @brief API to the native simulator - native HW counter
*/
#ifndef NATIVE_SIMULATOR_NATIVE_SRC_HW_COUNTER_H
#define NATIVE_SIMULATOR_NATIVE_SRC_HW_COUNTER_H
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
void hw_counter_triggered(void);
void hw_counter_set_period(uint64_t period);
void hw_counter_set_target(uint64_t counter_target);
void hw_counter_set_wrap_value(uint64_t wrap_value);
void hw_counter_start(void);
void hw_counter_stop(void);
bool hw_counter_is_started(void);
uint64_t hw_counter_get_value(void);
void hw_counter_reset(void);
#ifdef __cplusplus
}
#endif
#endif /* NATIVE_SIMULATOR_NATIVE_SRC_HW_COUNTER_H */
``` | /content/code_sandbox/scripts/native_simulator/native/src/include/hw_counter.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 153 |
```c
/*
*
*/
/**
* This provides a model of:
* - A system tick timer
* - A real time clock
* - A one shot HW timer which can be used to awake the CPU at a given time
* - The clock source for all of this, and therefore for the native simulator
* in the native configuration
*/
#include <stdint.h>
#include <time.h>
#include <stdbool.h>
#include <math.h>
#include "nsi_utils.h"
#include "nsi_cmdline.h"
#include "nsi_tracing.h"
#include "nsi_cpu0_interrupts.h"
#include "irq_ctrl.h"
#include "nsi_tasks.h"
#include "nsi_hws_models_if.h"
#define DEBUG_NP_TIMER 0
#if DEBUG_NP_TIMER
/**
* Helper function to convert a 64 bit time in microseconds into a string.
* The format will always be: hh:mm:ss.ssssss\0
*
* Note: the caller has to allocate the destination buffer (at least 17 chars)
*/
#include <stdio.h>
static char *us_time_to_str(char *dest, uint64_t time)
{
if (time != NSI_NEVER) {
unsigned int hour;
unsigned int minute;
unsigned int second;
unsigned int us;
hour = (time / 3600U / 1000000U) % 24;
minute = (time / 60U / 1000000U) % 60;
second = (time / 1000000U) % 60;
us = time % 1000000;
sprintf(dest, "%02u:%02u:%02u.%06u", hour, minute, second, us);
} else {
sprintf(dest, " NEVER/UNKNOWN ");
}
return dest;
}
#endif
static uint64_t hw_timer_timer; /* Event timer exposed to the HW scheduler */
static uint64_t hw_timer_tick_timer;
static uint64_t hw_timer_awake_timer;
static uint64_t tick_p; /* Period of the ticker */
static int64_t silent_ticks;
static bool real_time_mode;
static bool reset_rtc; /*"Reset" the RTC on boot*/
/*
* When this executable started running, this value shall not be changed after
* boot
*/
static uint64_t boot_time;
/*
* Ratio of the simulated clock to the real host time
* For ex. a clock_ratio = 1+100e-6 means the simulated time is 100ppm faster
* than real time
*/
static double clock_ratio = 1.0;
#if DEBUG_NP_TIMER
/*
* Offset of the simulated time vs the real host time due to drift/clock ratio
* until "last_radj_*time"
*
* A positive value means simulated time is ahead of the host time
*
* This variable is only kept for debugging purposes
*/
static int64_t last_drift_offset;
#endif
/*
* Offsets of the RTC relative to the hardware models simu_time
* "simu_time" == simulated time which starts at 0 on boot
*/
static int64_t rtc_offset;
/* Last host/real time when the ratio was adjusted */
static uint64_t last_radj_rtime;
/* Last simulated time when the ratio was adjusted */
static uint64_t last_radj_stime;
void hwtimer_set_real_time_mode(bool new_rt)
{
real_time_mode = new_rt;
}
static void hwtimer_update_timer(void)
{
hw_timer_timer = NSI_MIN(hw_timer_tick_timer, hw_timer_awake_timer);
}
static inline void host_clock_gettime(struct timespec *tv)
{
#if defined(CLOCK_MONOTONIC_RAW)
clock_gettime(CLOCK_MONOTONIC_RAW, tv);
#else
clock_gettime(CLOCK_MONOTONIC, tv);
#endif
}
/*
* This function is globally available only for tests purposes
* It should not be used for any functional purposes,
* and as such is not present in this component header.
*/
uint64_t get_host_us_time(void)
{
struct timespec tv;
host_clock_gettime(&tv);
return (uint64_t)tv.tv_sec * 1e6 + tv.tv_nsec / 1000;
}
static void hwtimer_init(void)
{
silent_ticks = 0;
hw_timer_tick_timer = NSI_NEVER;
hw_timer_awake_timer = NSI_NEVER;
hwtimer_update_timer();
if (real_time_mode) {
boot_time = get_host_us_time();
last_radj_rtime = boot_time;
last_radj_stime = 0U;
}
if (!reset_rtc) {
struct timespec tv;
uint64_t realhosttime;
clock_gettime(CLOCK_REALTIME, &tv);
realhosttime = (uint64_t)tv.tv_sec * 1e6 + tv.tv_nsec / 1000;
rtc_offset += realhosttime;
}
}
NSI_TASK(hwtimer_init, HW_INIT, 10);
/**
* Enable the HW timer tick interrupts with a period <period> in microseconds
*/
void hwtimer_enable(uint64_t period)
{
tick_p = period;
hw_timer_tick_timer = nsi_hws_get_time() + tick_p;
hwtimer_update_timer();
nsi_hws_find_next_event();
}
static void hwtimer_tick_timer_reached(void)
{
if (real_time_mode) {
uint64_t expected_rt = (hw_timer_tick_timer - last_radj_stime)
/ clock_ratio
+ last_radj_rtime;
uint64_t real_time = get_host_us_time();
int64_t diff = expected_rt - real_time;
#if DEBUG_NP_TIMER
char es[30];
char rs[30];
us_time_to_str(es, expected_rt - boot_time);
us_time_to_str(rs, real_time - boot_time);
printf("tick @%5llims: diff = expected_rt - real_time = "
"%5lli = %s - %s\n",
hw_timer_tick_timer/1000U, diff, es, rs);
#endif
if (diff > 0) { /* we need to slow down */
struct timespec requested_time;
struct timespec remaining;
requested_time.tv_sec = diff / 1e6;
requested_time.tv_nsec = (diff -
requested_time.tv_sec*1e6)*1e3;
(void) nanosleep(&requested_time, &remaining);
}
}
hw_timer_tick_timer += tick_p;
hwtimer_update_timer();
if (silent_ticks > 0) {
silent_ticks -= 1;
} else {
hw_irq_ctrl_set_irq(TIMER_TICK_IRQ);
}
}
static void hwtimer_awake_timer_reached(void)
{
hw_timer_awake_timer = NSI_NEVER;
hwtimer_update_timer();
hw_irq_ctrl_set_irq(PHONY_HARD_IRQ);
}
static void hwtimer_timer_reached(void)
{
uint64_t Now = hw_timer_timer;
if (hw_timer_awake_timer == Now) {
hwtimer_awake_timer_reached();
}
if (hw_timer_tick_timer == Now) {
hwtimer_tick_timer_reached();
}
}
NSI_HW_EVENT(hw_timer_timer, hwtimer_timer_reached, 0);
/**
* The timer HW will awake the CPU (without an interrupt) at least when <time>
* comes (it may awake it earlier)
*
* If there was a previous request for an earlier time, the old one will prevail
*
* This is meant for busy_wait() like functionality
*/
void hwtimer_wake_in_time(uint64_t time)
{
if (hw_timer_awake_timer > time) {
hw_timer_awake_timer = time;
hwtimer_update_timer();
nsi_hws_find_next_event();
}
}
/**
* The kernel wants to skip the next sys_ticks tick interrupts
* If sys_ticks == 0, the next interrupt will be raised.
*/
void hwtimer_set_silent_ticks(int64_t sys_ticks)
{
silent_ticks = sys_ticks;
}
int64_t hwtimer_get_pending_silent_ticks(void)
{
return silent_ticks;
}
/**
* During boot set the real time clock simulated time not start
* from the real host time
*/
void hwtimer_reset_rtc(void)
{
reset_rtc = true;
}
/**
* Set a time offset (microseconds) of the RTC simulated time
* Note: This should not be used after starting
*/
void hwtimer_set_rtc_offset(int64_t offset)
{
rtc_offset = offset;
}
/**
* Set the ratio of the simulated time to host (real) time.
* Note: This should not be used after starting
*/
void hwtimer_set_rt_ratio(double ratio)
{
clock_ratio = ratio;
}
/**
* Increase or decrease the RTC simulated time by offset_delta
*/
void hwtimer_adjust_rtc_offset(int64_t offset_delta)
{
rtc_offset += offset_delta;
}
/**
* Adjust the ratio of the simulated time by a factor
*/
void hwtimer_adjust_rt_ratio(double ratio_correction)
{
uint64_t current_stime = nsi_hws_get_time();
int64_t s_diff = current_stime - last_radj_stime;
/* Accumulated real time drift time since last adjustment: */
last_radj_rtime += s_diff / clock_ratio;
last_radj_stime = current_stime;
#if DEBUG_NP_TIMER
char ct[30];
int64_t r_drift = (long double)(clock_ratio-1.0)/(clock_ratio)*s_diff;
last_drift_offset += r_drift;
us_time_to_str(ct, current_stime);
printf("%s(): @%s, s_diff= %llius after last adjust\n"
" during which we drifted %.3fms\n"
" total acc drift (last_drift_offset) = %.3fms\n"
" last_radj_rtime = %.3fms (+%.3fms )\n"
" Ratio adjusted to %f\n",
__func__, ct, s_diff,
r_drift/1000.0,
last_drift_offset/1000.0,
last_radj_rtime/1000.0,
s_diff/clock_ratio/1000.0,
clock_ratio*ratio_correction);
#endif
clock_ratio *= ratio_correction;
}
/**
* Return the current simulated RTC time in microseconds
*/
int64_t hwtimer_get_simu_rtc_time(void)
{
return nsi_hws_get_time() + rtc_offset;
}
/**
* Return a version of the host time which would have drifted as if the host
* real time clock had been running from the simulated clock, and adjusted
* both in rate and in offsets as the simulated one has been.
*
* Note that this time may be significantly ahead of the simulated time
* (the time the embedded kernel thinks it is).
* This will be the case in general if the linux runner is not able to run at or
* faster than real time.
*/
void hwtimer_get_pseudohost_rtc_time(uint32_t *nsec, uint64_t *sec)
{
/*
* Note: long double has a 64bits mantissa in x86.
* Therefore to avoid loss of precision after 500 odd years into
* the epoch, we first calculate the offset from the last adjustment
* time split in us and ns. So we keep the full precision for 500 odd
* years after the last clock ratio adjustment (or boot,
* whichever is latest).
* Meaning, we will still start to loose precision after 500 odd
* years of runtime without a clock ratio adjustment, but that really
* should not be much of a problem, given that the ns lower digits are
* pretty much noise anyhow.
* (So, all this is a huge overkill)
*
* The operation below in plain is just:
* st = (rt - last_rt_adj_time)*ratio + last_dt_adj_time
* where st = simulated time
* rt = real time
* last_rt_adj_time = time (real) when the last ratio
* adjustment took place
* last_st_adj_time = time (simulated) when the last ratio
* adjustment took place
* ratio = ratio between simulated time and real time
*/
struct timespec tv;
host_clock_gettime(&tv);
uint64_t rt_us = (uint64_t)tv.tv_sec * 1000000ULL + tv.tv_nsec / 1000;
uint32_t rt_ns = tv.tv_nsec % 1000;
long double drt_us = (long double)rt_us - last_radj_rtime;
long double drt_ns = drt_us * 1000.0L + (long double)rt_ns;
long double st = drt_ns * (long double)clock_ratio +
(long double)(last_radj_stime + rtc_offset) * 1000.0L;
*nsec = fmodl(st, 1e9L);
*sec = st / 1e9L;
}
static struct {
double stop_at;
double rtc_offset;
double rt_drift;
double rt_ratio;
} args;
static void cmd_stop_at_found(char *argv, int offset)
{
NSI_ARG_UNUSED(offset);
if (args.stop_at < 0) {
nsi_print_error_and_exit("Error: stop-at must be positive "
"(%s)\n", argv);
}
nsi_hws_set_end_of_time(args.stop_at*1e6);
}
static void cmd_realtime_found(char *argv, int offset)
{
NSI_ARG_UNUSED(argv);
NSI_ARG_UNUSED(offset);
hwtimer_set_real_time_mode(true);
}
static void cmd_no_realtime_found(char *argv, int offset)
{
NSI_ARG_UNUSED(argv);
NSI_ARG_UNUSED(offset);
hwtimer_set_real_time_mode(false);
}
static void cmd_rtcoffset_found(char *argv, int offset)
{
NSI_ARG_UNUSED(argv);
NSI_ARG_UNUSED(offset);
hwtimer_set_rtc_offset(args.rtc_offset*1e6);
}
static void cmd_rt_drift_found(char *argv, int offset)
{
NSI_ARG_UNUSED(argv);
NSI_ARG_UNUSED(offset);
if (!(args.rt_drift > -1)) {
nsi_print_error_and_exit("The drift needs to be > -1. "
"Please use --help for more info\n");
}
args.rt_ratio = args.rt_drift + 1;
hwtimer_set_rt_ratio(args.rt_ratio);
}
static void cmd_rt_ratio_found(char *argv, int offset)
{
NSI_ARG_UNUSED(argv);
NSI_ARG_UNUSED(offset);
if ((args.rt_ratio <= 0)) {
nsi_print_error_and_exit("The ratio needs to be > 0. "
"Please use --help for more info\n");
}
hwtimer_set_rt_ratio(args.rt_ratio);
}
static void cmd_rtcreset_found(char *argv, int offset)
{
(void) argv;
(void) offset;
hwtimer_reset_rtc();
}
static void nsi_add_time_options(void)
{
static struct args_struct_t timer_options[] = {
{
.is_switch = true,
.option = "rt",
.type = 'b',
.call_when_found = cmd_realtime_found,
.descript = "Slow down the execution to the host real time, "
"or a ratio of it (see --rt-ratio below)"
},
{
.is_switch = true,
.option = "no-rt",
.type = 'b',
.call_when_found = cmd_no_realtime_found,
.descript = "Do NOT slow down the execution to real time, but advance "
"the simulated time as fast as possible and decoupled from "
"the host time"
},
{
.option = "rt-drift",
.name = "dratio",
.type = 'd',
.dest = (void *)&args.rt_drift,
.call_when_found = cmd_rt_drift_found,
.descript = "Drift of the simulated clock relative to the host real time. "
"Normally this would be set to a value of a few ppm (e.g. 50e-6"
") This option has no effect in non real time mode"
},
{
.option = "rt-ratio",
.name = "ratio",
.type = 'd',
.dest = (void *)&args.rt_ratio,
.call_when_found = cmd_rt_ratio_found,
.descript = "Relative speed of the simulated time vs real time. "
"For ex. set to 2 to have simulated time pass at double the "
"speed of real time. "
"Note that both rt-drift & rt-ratio adjust the same clock "
"speed, and therefore it does not make sense to use them "
"simultaneously. "
"This option has no effect in non real time mode"
},
{
.option = "rtc-offset",
.name = "time_offset",
.type = 'd',
.dest = (void *)&args.rtc_offset,
.call_when_found = cmd_rtcoffset_found,
.descript = "At boot, offset the RTC clock by this amount of seconds"
},
{
.is_switch = true,
.option = "rtc-reset",
.type = 'b',
.call_when_found = cmd_rtcreset_found,
.descript = "Start the simulated real time clock at 0. Otherwise it starts "
"matching the value provided by the host real time clock"
},
{
.option = "stop_at",
.name = "time",
.type = 'd',
.dest = (void *)&args.stop_at,
.call_when_found = cmd_stop_at_found,
.descript = "In simulated seconds, when to stop automatically"
},
ARG_TABLE_ENDMARKER};
nsi_add_command_line_opts(timer_options);
}
NSI_TASK(nsi_add_time_options, PRE_BOOT_1, 1);
``` | /content/code_sandbox/scripts/native_simulator/native/src/timer_model.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,894 |
```objective-c
/*
*
*/
/**
* @file
* @brief API to the native simulator - native command line parsing utilities
*
* Note: The arguments structure definitions is kept fully compatible with Zephyr's native_posix
* and BabbleSim's command line options to enable to reuse components between them.
* And for APIs to be accessible thru a trivial shim.
*/
#ifndef NATIVE_SIMULATOR_NATIVE_SRC_NSI_CMDLINE_H
#define NATIVE_SIMULATOR_NATIVE_SRC_NSI_CMDLINE_H
#include <stdbool.h>
#include <stddef.h>
#include "nsi_cmdline_main_if.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Prototype for a callback function when an option is found:
* inputs:
* argv: Whole argv[i] option as received in main
* offset: Offset to the end of the option string
* (including a possible ':' or '=')
* If the option had a value, it would be placed in &argv[offset]
*/
typedef void (*option_found_callback_f)(char *argv, int offset);
/*
* Structure defining each command line option
*/
struct args_struct_t {
/*
* if manual is set nsi_cmd_args_parse*() will ignore it except for
* displaying it the help messages and initializing <dest> to its
* default
*/
bool manual;
/* For help messages, should it be wrapped in "[]" */
bool is_mandatory;
/* It is just a switch: it does not have something to store after */
bool is_switch;
/* Option name we search for: --<option> */
char *option;
/*
* Name of the option destination in the help messages:
* "--<option>=<name>"
*/
char *name;
/* Type of option (see nsi_cmd_read_option_value()) */
char type;
/* Pointer to where the read value will be stored (may be NULL) */
void *dest;
/* Optional callback to be called when the switch is found */
option_found_callback_f call_when_found;
/* Long description for the help messages */
char *descript;
};
#define ARG_TABLE_ENDMARKER \
{false, false, false, NULL, NULL, 0, NULL, NULL, NULL}
void nsi_get_cmd_line_args(int *argc, char ***argv);
void nsi_get_test_cmd_line_args(int *argc, char ***argv);
void nsi_add_command_line_opts(struct args_struct_t *args);
#ifdef __cplusplus
}
#endif
#endif /* NATIVE_SIMULATOR_NATIVE_SRC_NSI_CMDLINE_H */
``` | /content/code_sandbox/scripts/native_simulator/native/src/include/nsi_cmdline.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 534 |
```python
#!/usr/bin/env python3
#
#
import argparse
import binascii
import sys
COREDUMP_PREFIX_STR = "#CD:"
COREDUMP_BEGIN_STR = COREDUMP_PREFIX_STR + "BEGIN#"
COREDUMP_END_STR = COREDUMP_PREFIX_STR + "END#"
COREDUMP_ERROR_STR = COREDUMP_PREFIX_STR + "ERROR CANNOT DUMP#"
def parse_args():
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("infile", help="Serial Log File")
parser.add_argument("outfile",
help="Output file for use with coredump GDB server")
return parser.parse_args()
def main():
args = parse_args()
infile = open(args.infile, "r")
if not infile:
print(f"ERROR: Cannot open input file: {args.infile}, exiting...")
sys.exit(1)
outfile = open(args.outfile, "wb")
if not outfile:
print(f"ERROR: Cannot open output file for write: {args.outfile}, exiting...")
sys.exit(1)
print(f"Input file {args.infile}")
print(f"Output file {args.outfile}")
has_begin = False
has_end = False
has_error = False
go_parse_line = False
bytes_written = 0
for line in infile.readlines():
if line.find(COREDUMP_BEGIN_STR) >= 0:
# Found "BEGIN#" - beginning of log
has_begin = True
go_parse_line = True
continue
if line.find(COREDUMP_END_STR) >= 0:
# Found "END#" - end of log
has_end = True
go_parse_line = False
break
if line.find(COREDUMP_ERROR_STR) >= 0:
# Error was encountered during dumping:
# log is not usable
has_error = True
go_parse_line = False
break
if not go_parse_line:
continue
prefix_idx = line.find(COREDUMP_PREFIX_STR)
if prefix_idx < 0:
continue
prefix_idx += len(COREDUMP_PREFIX_STR)
hex_str = line[prefix_idx:].strip()
binary_data = binascii.unhexlify(hex_str)
outfile.write(binary_data)
bytes_written += len(binary_data)
if not has_begin:
print("ERROR: Beginning of log not found!")
elif not has_end:
print("WARN: End of log not found! Is log complete?")
elif has_error:
print("ERROR: log has error.")
else:
print(f"Bytes written {bytes_written}")
infile.close()
outfile.close()
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/coredump/coredump_serial_log_parser.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 582 |
```python
#!/usr/bin/env python3
#
#
``` | /content/code_sandbox/scripts/coredump/coredump_parser/__init__.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 10 |
```python
#!/usr/bin/env python3
#
#
import argparse
import logging
import os
import socket
import sys
from coredump_parser.log_parser import CoredumpLogFile
from coredump_parser.elf_parser import CoredumpElfFile
import gdbstubs
LOGGING_FORMAT = "[%(levelname)s][%(name)s] %(message)s"
# Only bind to local host
GDBSERVER_HOST = ""
class FakeSocket:
def __init__(self) -> None:
self.in_stream = sys.stdin.buffer
self.out_stream = sys.stdout.buffer
def recv(self, bufsize):
return self.in_stream.read(bufsize)
def send(self, data):
n = self.out_stream.write(data)
self.out_stream.flush()
return n
def close(self):
pass
def parse_args():
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("elffile", help="Zephyr ELF binary")
parser.add_argument("logfile", help="Coredump binary log file")
parser.add_argument("--debug", action="store_true",
help="Print extra debugging information")
parser.add_argument("--port", type=int, default=1234,
help="GDB server port")
parser.add_argument("--pipe", action="store_true",
help="Use stdio to communicate with gdb")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print more information")
return parser.parse_args()
def main():
args = parse_args()
# Setup logging
logging.basicConfig(format=LOGGING_FORMAT)
# Setup logging for "parser"
logger = logging.getLogger("parser")
if args.debug:
logger.setLevel(logging.DEBUG)
elif args.verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
# Setup logging for follow code
logger = logging.getLogger("gdbserver")
if args.debug:
logger.setLevel(logging.DEBUG)
else:
# Use INFO as default since we need to let user
# know what is going on
logger.setLevel(logging.INFO)
# Setup logging for "gdbstub"
logger = logging.getLogger("gdbstub")
if args.debug:
logger.setLevel(logging.DEBUG)
elif args.verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
if not os.path.isfile(args.elffile):
logger.error(f"Cannot find file {args.elffile}, exiting...")
sys.exit(1)
if not os.path.isfile(args.logfile):
logger.error(f"Cannot find file {args.logfile}, exiting...")
sys.exit(1)
logger.info(f"Log file: {args.logfile}")
logger.info(f"ELF file: {args.elffile}")
# Parse the coredump binary log file
logf = CoredumpLogFile(args.logfile)
logf.open()
if not logf.parse():
logger.error("Cannot parse log file, exiting...")
logf.close()
sys.exit(1)
# Parse ELF file for code and read-only data
elff = CoredumpElfFile(args.elffile)
elff.open()
if not elff.parse():
logger.error("Cannot parse ELF file, exiting...")
elff.close()
logf.close()
sys.exit(1)
gdbstub = gdbstubs.get_gdbstub(logf, elff)
if not args.pipe:
# Start a GDB server
gdbserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Reuse address so we don't have to wait for socket to be
# close before we can bind to the port again
gdbserver.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
gdbserver.bind((GDBSERVER_HOST, args.port))
gdbserver.listen(1)
logger.info(f"Waiting GDB connection on port {args.port}...")
conn, remote = gdbserver.accept()
else:
conn = FakeSocket()
remote = "pipe"
if conn:
logger.info(f"Accepted GDB connection from {remote}")
gdbstub.run(conn)
conn.close()
gdbserver.close()
logger.info("GDB session finished.")
elff.close()
logf.close()
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/coredump/coredump_gdbserver.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 921 |
```python
#!/usr/bin/env python3
#
#
import logging
import struct
# Note: keep sync with C code
COREDUMP_HDR_ID = b'ZE'
COREDUMP_HDR_VER = 2
LOG_HDR_STRUCT = "<ccHHBBI"
LOG_HDR_SIZE = struct.calcsize(LOG_HDR_STRUCT)
COREDUMP_ARCH_HDR_ID = b'A'
LOG_ARCH_HDR_STRUCT = "<cHH"
LOG_ARCH_HDR_SIZE = struct.calcsize(LOG_ARCH_HDR_STRUCT)
COREDUMP_THREADS_META_HDR_ID = b'T'
LOG_THREADS_META_HDR_STRUCT = "<cHH"
LOG_THREADS_META_HDR_SIZE = struct.calcsize(LOG_THREADS_META_HDR_STRUCT)
COREDUMP_MEM_HDR_ID = b'M'
COREDUMP_MEM_HDR_VER = 1
LOG_MEM_HDR_STRUCT = "<cH"
LOG_MEM_HDR_SIZE = struct.calcsize(LOG_MEM_HDR_STRUCT)
logger = logging.getLogger("parser")
def reason_string(reason):
# Keep sync with "enum k_fatal_error_reason"
ret = "(Unknown)"
if reason == 0:
ret = "K_ERR_CPU_EXCEPTION"
elif reason == 1:
ret = "K_ERR_SPURIOUS_IRQ"
elif reason == 2:
ret = "K_ERR_STACK_CHK_FAIL"
elif reason == 3:
ret = "K_ERR_KERNEL_OOPS"
elif reason == 4:
ret = "K_ERR_KERNEL_PANIC"
return ret
class CoredumpLogFile:
"""
Process the binary coredump file for register block
and memory blocks.
"""
def __init__(self, logfile):
self.logfile = logfile
self.fd = None
self.log_hdr = None
self.arch_data = list()
self.memory_regions = list()
def open(self):
self.fd = open(self.logfile, "rb")
def close(self):
self.fd.close()
def get_arch_data(self):
return self.arch_data
def get_memory_regions(self):
return self.memory_regions
def get_threads_metadata(self):
return self.threads_metadata
def parse_arch_section(self):
hdr = self.fd.read(LOG_ARCH_HDR_SIZE)
_, hdr_ver, num_bytes = struct.unpack(LOG_ARCH_HDR_STRUCT, hdr)
arch_data = self.fd.read(num_bytes)
self.arch_data = {"hdr_ver" : hdr_ver, "data" : arch_data}
return True
def parse_threads_metadata_section(self):
hdr = self.fd.read(LOG_THREADS_META_HDR_SIZE)
_, hdr_ver, num_bytes = struct.unpack(LOG_THREADS_META_HDR_STRUCT, hdr)
data = self.fd.read(num_bytes)
self.threads_metadata = {"hdr_ver" : hdr_ver, "data" : data}
return True
def parse_memory_section(self):
hdr = self.fd.read(LOG_MEM_HDR_SIZE)
_, hdr_ver = struct.unpack(LOG_MEM_HDR_STRUCT, hdr)
if hdr_ver != COREDUMP_MEM_HDR_VER:
logger.error(f"Memory block version: {hdr_ver}, expected {COREDUMP_MEM_HDR_VER}!")
return False
# Figure out how to read the start and end addresses
ptr_fmt = None
if self.log_hdr["ptr_size"] == 64:
ptr_fmt = "QQ"
elif self.log_hdr["ptr_size"] == 32:
ptr_fmt = "II"
else:
return False
data = self.fd.read(struct.calcsize(ptr_fmt))
saddr, eaddr = struct.unpack(ptr_fmt, data)
size = eaddr - saddr
data = self.fd.read(size)
mem = {"start": saddr, "end": eaddr, "data": data}
self.memory_regions.append(mem)
logger.info("Memory: 0x%x to 0x%x of size %d" %
(saddr, eaddr, size))
return True
def parse(self):
if self.fd is None:
self.open()
hdr = self.fd.read(LOG_HDR_SIZE)
id1, id2, hdr_ver, tgt_code, ptr_size, flags, reason = struct.unpack(LOG_HDR_STRUCT, hdr)
if (id1 + id2) != COREDUMP_HDR_ID:
# ID in header does not match
logger.error("Log header ID not found...")
return False
if hdr_ver > COREDUMP_HDR_VER:
logger.error(f"Log version: {hdr_ver}, expected: {COREDUMP_HDR_VER}!")
return False
ptr_size = 2 ** ptr_size
self.log_hdr = {
"hdr_version": hdr_ver,
"tgt_code": tgt_code,
"ptr_size": ptr_size,
"flags": flags,
"reason": reason,
}
logger.info("Reason: {0}".format(reason_string(reason)))
logger.info(f"Pointer size {ptr_size}")
del id1, id2, hdr_ver, tgt_code, ptr_size, flags, reason
while True:
section_id = self.fd.read(1)
if not section_id:
# no more data to read
break
self.fd.seek(-1, 1) # go back 1 byte
if section_id == COREDUMP_ARCH_HDR_ID:
if not self.parse_arch_section():
logger.error("Cannot parse architecture section")
return False
elif section_id == COREDUMP_THREADS_META_HDR_ID:
if not self.parse_threads_metadata_section():
logger.error("Cannot parse threads metadata section")
return False
elif section_id == COREDUMP_MEM_HDR_ID:
if not self.parse_memory_section():
logger.error("Cannot parse memory section")
return False
else:
# Unknown section in log file
logger.error(f"Unknown section in log file with ID {section_id}")
return False
return True
``` | /content/code_sandbox/scripts/coredump/coredump_parser/log_parser.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,229 |
```python
#!/usr/bin/env python3
#
#
from gdbstubs.arch.x86 import GdbStub_x86
from gdbstubs.arch.x86_64 import GdbStub_x86_64
from gdbstubs.arch.arm_cortex_m import GdbStub_ARM_CortexM
from gdbstubs.arch.risc_v import GdbStub_RISC_V
from gdbstubs.arch.xtensa import GdbStub_Xtensa
from gdbstubs.arch.arm64 import GdbStub_ARM64
class TgtCode:
UNKNOWN = 0
X86 = 1
X86_64 = 2
ARM_CORTEX_M = 3
RISC_V = 4
XTENSA = 5
ARM64 = 6
def get_gdbstub(logfile, elffile):
stub = None
tgt_code = logfile.log_hdr['tgt_code']
if tgt_code == TgtCode.X86:
stub = GdbStub_x86(logfile=logfile, elffile=elffile)
elif tgt_code == TgtCode.X86_64:
stub = GdbStub_x86_64(logfile=logfile, elffile=elffile)
elif tgt_code == TgtCode.ARM_CORTEX_M:
stub = GdbStub_ARM_CortexM(logfile=logfile, elffile=elffile)
elif tgt_code == TgtCode.RISC_V:
stub = GdbStub_RISC_V(logfile=logfile, elffile=elffile)
elif tgt_code == TgtCode.XTENSA:
stub = GdbStub_Xtensa(logfile=logfile, elffile=elffile)
elif tgt_code == TgtCode.ARM64:
stub = GdbStub_ARM64(logfile=logfile, elffile=elffile)
return stub
``` | /content/code_sandbox/scripts/coredump/gdbstubs/__init__.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 391 |
```python
#!/usr/bin/env python3
#
#
import logging
import struct
import elftools
from elftools.elf.elffile import ELFFile
from enum import IntEnum
# ELF section flags
SHF_WRITE = 0x1
SHF_ALLOC = 0x2
SHF_EXEC = 0x4
SHF_WRITE_ALLOC = SHF_WRITE | SHF_ALLOC
SHF_ALLOC_EXEC = SHF_ALLOC | SHF_EXEC
# Must match enum in thread_info.c
class ThreadInfoOffset(IntEnum):
THREAD_INFO_OFFSET_VERSION = 0
THREAD_INFO_OFFSET_K_CURR_THREAD = 1
THREAD_INFO_OFFSET_K_THREADS = 2
THREAD_INFO_OFFSET_T_ENTRY = 3
THREAD_INFO_OFFSET_T_NEXT_THREAD = 4
THREAD_INFO_OFFSET_T_STATE = 5
THREAD_INFO_OFFSET_T_USER_OPTIONS = 6
THREAD_INFO_OFFSET_T_PRIO = 7
THREAD_INFO_OFFSET_T_STACK_PTR = 8
THREAD_INFO_OFFSET_T_NAME = 9
THREAD_INFO_OFFSET_T_ARCH = 10
THREAD_INFO_OFFSET_T_PREEMPT_FLOAT = 11
THREAD_INFO_OFFSET_T_COOP_FLOAT = 12
THREAD_INFO_OFFSET_T_ARM_EXC_RETURN = 13
THREAD_INFO_OFFSET_T_ARC_RELINQUISH_CAUSE = 14
def __int__(self):
return self.value
logger = logging.getLogger("parser")
class CoredumpElfFile():
"""
Class to parse ELF file for memory content in various sections.
There are read-only sections (e.g. text and rodata) where
the memory content does not need to be dumped via coredump
and can be retrieved from the ELF file.
"""
def __init__(self, elffile):
self.elffile = elffile
self.fd = None
self.elf = None
self.memory_regions = list()
self.kernel_thread_info_offsets = None
self.kernel_thread_info_num_offsets = None
self.kernel_thread_info_size_t_size = None
def open(self):
self.fd = open(self.elffile, "rb")
self.elf = ELFFile(self.fd)
def close(self):
self.fd.close()
def get_memory_regions(self):
return self.memory_regions
def get_kernel_thread_info_size_t_size(self):
return self.kernel_thread_info_size_t_size
def has_kernel_thread_info(self):
return self.kernel_thread_info_offsets is not None
def get_kernel_thread_info_offset(self, thread_info_offset_index):
if self.has_kernel_thread_info() and thread_info_offset_index <= ThreadInfoOffset.THREAD_INFO_OFFSET_T_ARC_RELINQUISH_CAUSE:
return self.kernel_thread_info_offsets[thread_info_offset_index]
else:
return None
def parse(self):
if self.fd is None:
self.open()
kernel_thread_info_offsets_segment = None
kernel_thread_info_num_offsets_segment = None
_kernel_thread_info_offsets = None
_kernel_thread_info_num_offsets = None
_kernel_thread_info_size_t_size = None
for section in self.elf.iter_sections():
# Find symbols for _kernel_thread_info data
if isinstance(section, elftools.elf.sections.SymbolTableSection):
_kernel_thread_info_offsets = section.get_symbol_by_name("_kernel_thread_info_offsets")
_kernel_thread_info_num_offsets = section.get_symbol_by_name("_kernel_thread_info_num_offsets")
_kernel_thread_info_size_t_size = section.get_symbol_by_name("_kernel_thread_info_size_t_size")
# REALLY NEED to match exact type as all other sections
# (debug, text, etc.) are descendants where
# isinstance() would match.
if type(section) is not elftools.elf.sections.Section: # pylint: disable=unidiomatic-typecheck
continue
size = section['sh_size']
flags = section['sh_flags']
sec_start = section['sh_addr']
sec_end = sec_start + size - 1
store = False
sect_desc = "?"
if section['sh_type'] == 'SHT_PROGBITS':
if (flags & SHF_ALLOC_EXEC) == SHF_ALLOC_EXEC:
# Text section
store = True
sect_desc = "text"
elif (flags & SHF_WRITE_ALLOC) == SHF_WRITE_ALLOC:
# Data section
#
# Running app changes the content so no need
# to store
pass
elif (flags & SHF_ALLOC) == SHF_ALLOC:
# Read only data section
store = True
sect_desc = "read-only data"
if store:
mem_region = {"start": sec_start, "end": sec_end, "data": section.data()}
logger.info("ELF Section: 0x%x to 0x%x of size %d (%s)" %
(mem_region["start"],
mem_region["end"],
len(mem_region["data"]),
sect_desc))
self.memory_regions.append(mem_region)
if _kernel_thread_info_size_t_size is not None and \
_kernel_thread_info_num_offsets is not None and \
_kernel_thread_info_offsets is not None:
for seg in self.elf.iter_segments():
if seg.header['p_type'] != 'PT_LOAD':
continue
# Store segment of kernel_thread_info_offsets
info_offsets_symbol = _kernel_thread_info_offsets[0]
if info_offsets_symbol['st_value'] >= seg['p_vaddr'] and info_offsets_symbol['st_value'] < seg['p_vaddr'] + seg['p_filesz']:
kernel_thread_info_offsets_segment = seg
# Store segment of kernel_thread_info_num_offsets
num_offsets_symbol = _kernel_thread_info_num_offsets[0]
if num_offsets_symbol['st_value'] >= seg['p_vaddr'] and num_offsets_symbol['st_value'] < seg['p_vaddr'] + seg['p_filesz']:
kernel_thread_info_num_offsets_segment = seg
# Read and store size_t size
size_t_size_symbol = _kernel_thread_info_size_t_size[0]
if size_t_size_symbol['st_value'] >= seg['p_vaddr'] and size_t_size_symbol['st_value'] < seg['p_vaddr'] + seg['p_filesz']:
offset = size_t_size_symbol['st_value'] - seg['p_vaddr'] + seg['p_offset']
self.elf.stream.seek(offset)
self.kernel_thread_info_size_t_size = struct.unpack('B', self.elf.stream.read(size_t_size_symbol['st_size']))[0]
struct_format = "I"
if self.kernel_thread_info_size_t_size == 8:
struct_format = "Q"
# Read and store count of offset values
num_offsets_symbol = _kernel_thread_info_num_offsets[0]
offset = num_offsets_symbol['st_value'] - kernel_thread_info_num_offsets_segment['p_vaddr'] + kernel_thread_info_num_offsets_segment['p_offset']
self.elf.stream.seek(offset)
self.kernel_thread_info_num_offsets = struct.unpack(struct_format, self.elf.stream.read(num_offsets_symbol['st_size']))[0]
array_format = ""
for _ in range(self.kernel_thread_info_num_offsets):
array_format = array_format + struct_format
# Read and store array of offset values
info_offsets_symbol = _kernel_thread_info_offsets[0]
offset = info_offsets_symbol['st_value'] - kernel_thread_info_offsets_segment['p_vaddr'] + kernel_thread_info_offsets_segment['p_offset']
self.elf.stream.seek(offset)
self.kernel_thread_info_offsets = struct.unpack(array_format, self.elf.stream.read(info_offsets_symbol['st_size']))
return True
``` | /content/code_sandbox/scripts/coredump/coredump_parser/elf_parser.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,637 |
```python
#!/usr/bin/env python3
#
#
``` | /content/code_sandbox/scripts/coredump/gdbstubs/arch/__init__.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 10 |
```python
#!/usr/bin/env python3
#
#
import binascii
import logging
import struct
from gdbstubs.gdbstub import GdbStub
logger = logging.getLogger("gdbstub")
class RegNum():
X0 = 0 # X0-X29 - 30 GP registers
X1 = 1
X2 = 2
X3 = 3
X4 = 4
X5 = 5
X6 = 6
X7 = 7
X8 = 8
X9 = 9
X10 = 10
X11 = 11
X12 = 12
X13 = 13
X14 = 14
X15 = 15
X16 = 16
X17 = 17
X18 = 18
X19 = 19
X20 = 20
X21 = 21
X22 = 22
X23 = 23
X24 = 24
X25 = 25
X26 = 26
X27 = 27
X28 = 28
X29 = 29 # Frame pointer register
LR = 30 # X30 Link Register(LR)
SP_EL0 = 31 # Stack pointer EL0 (SP_EL0)
PC = 32 # Program Counter (PC)
class GdbStub_ARM64(GdbStub):
ARCH_DATA_BLK_STRUCT = "<QQQQQQQQQQQQQQQQQQQQQQ"
# Default signal used by all other script, just using the same
GDB_SIGNAL_DEFAULT = 7
# The number of registers expected by GDB
GDB_G_PKT_NUM_REGS = 33
def __init__(self, logfile, elffile):
super().__init__(logfile=logfile, elffile=elffile)
self.registers = None
self.gdb_signal = self.GDB_SIGNAL_DEFAULT
self.parse_arch_data_block()
def parse_arch_data_block(self):
arch_data_blk = self.logfile.get_arch_data()['data']
tu = struct.unpack(self.ARCH_DATA_BLK_STRUCT, arch_data_blk)
self.registers = dict()
self.registers[RegNum.X0] = tu[0]
self.registers[RegNum.X1] = tu[1]
self.registers[RegNum.X2] = tu[2]
self.registers[RegNum.X3] = tu[3]
self.registers[RegNum.X4] = tu[4]
self.registers[RegNum.X5] = tu[5]
self.registers[RegNum.X6] = tu[6]
self.registers[RegNum.X7] = tu[7]
self.registers[RegNum.X8] = tu[8]
self.registers[RegNum.X9] = tu[9]
self.registers[RegNum.X10] = tu[10]
self.registers[RegNum.X11] = tu[11]
self.registers[RegNum.X12] = tu[12]
self.registers[RegNum.X13] = tu[13]
self.registers[RegNum.X14] = tu[14]
self.registers[RegNum.X15] = tu[15]
self.registers[RegNum.X16] = tu[16]
self.registers[RegNum.X17] = tu[17]
self.registers[RegNum.X18] = tu[18]
# Callee saved registers are not provided in arch_esf structure
# So they will be omitted (set to undefined) when stub generates the
# packet in handle_register_group_read_packet.
self.registers[RegNum.LR] = tu[19]
self.registers[RegNum.SP_EL0] = tu[20]
self.registers[RegNum.PC] = tu[21]
def handle_register_group_read_packet(self):
reg_fmt = "<Q"
idx = 0
pkt = b''
while idx < self.GDB_G_PKT_NUM_REGS:
if idx in self.registers:
bval = struct.pack(reg_fmt, self.registers[idx])
pkt += binascii.hexlify(bval)
else:
# Register not in coredump -> unknown value
# Send in "xxxxxxxx"
pkt += b'x' * 16
idx += 1
self.put_gdb_packet(pkt)
def handle_register_single_read_packet(self, pkt):
# Mark registers as "<unavailable>".
# 'p' packets are usually used for registers
# other than the general ones (e.g. eax, ebx)
# so we can safely reply "xxxxxxxx" here.
self.put_gdb_packet(b'x' * 16)
``` | /content/code_sandbox/scripts/coredump/gdbstubs/arch/arm64.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,080 |
```python
#!/usr/bin/env python3
#
#
import abc
import binascii
import logging
from coredump_parser.elf_parser import ThreadInfoOffset
logger = logging.getLogger("gdbstub")
class GdbStub(abc.ABC):
def __init__(self, logfile, elffile):
self.logfile = logfile
self.elffile = elffile
self.socket = None
self.gdb_signal = None
self.thread_ptrs = list()
self.selected_thread = 0
mem_regions = list()
for r in logfile.get_memory_regions():
mem_regions.append(r)
for r in elffile.get_memory_regions():
mem_regions.append(r)
self.mem_regions = mem_regions
def get_gdb_packet(self):
socket = self.socket
if socket is None:
return None
data = b''
checksum = 0
# Wait for '$'
while True:
ch = socket.recv(1)
if ch == b'$':
break
# Get a full packet
while True:
ch = socket.recv(1)
if ch == b'#':
# End of packet
break
checksum += ord(ch)
data += ch
# Get checksum (2-bytes)
ch = socket.recv(2)
in_chksum = ord(binascii.unhexlify(ch))
logger.debug(f"Received GDB packet: {data}")
if (checksum % 256) == in_chksum:
# ACK
logger.debug("ACK")
socket.send(b'+')
return data
else:
# NACK
logger.debug(f"NACK (checksum {in_chksum} != {checksum}")
socket.send(b'-')
return None
def put_gdb_packet(self, data):
socket = self.socket
if socket is None:
return
checksum = 0
for d in data:
checksum += d
pkt = b'$' + data + b'#'
checksum = checksum % 256
pkt += format(checksum, "02X").encode()
logger.debug(f"Sending GDB packet: {pkt}")
socket.send(pkt)
def get_memory(self, start_address, length):
def get_mem_region(addr):
for r in self.mem_regions:
if r['start'] <= addr < r['end']:
return r
return None
# FIXME: Need more efficient way of extracting memory content
remaining = length
addr = start_address
barray = b''
r = get_mem_region(addr)
while remaining > 0:
if r is None:
barray = None
break
if addr > r['end']:
r = get_mem_region(addr)
continue
offset = addr - r['start']
barray += r['data'][offset:offset+1]
addr += 1
remaining -= 1
return barray
def handle_signal_query_packet(self):
# the '?' packet
pkt = b'S'
pkt += format(self.gdb_signal, "02X").encode()
self.put_gdb_packet(pkt)
@abc.abstractmethod
def handle_register_group_read_packet(self):
# the 'g' packet for reading a group of registers
pass
def handle_register_group_write_packet(self):
# the 'G' packet for writing to a group of registers
#
# We don't support writing so return error
self.put_gdb_packet(b"E01")
def handle_register_single_read_packet(self, pkt):
# the 'p' packet for reading a single register
self.put_gdb_packet(b"E01")
def handle_register_single_write_packet(self, pkt):
# the 'P' packet for writing to registers
#
# We don't support writing so return error
self.put_gdb_packet(b"E01")
def handle_memory_read_packet(self, pkt):
# the 'm' packet for reading memory: m<addr>,<len>
# extract address and length from packet
# and convert them into usable integer values
str_addr, str_length = pkt[1:].split(b',')
s_addr = int(b'0x' + str_addr, 16)
length = int(b'0x' + str_length, 16)
barray = self.get_memory(s_addr, length)
if barray is not None:
pkt = binascii.hexlify(barray)
self.put_gdb_packet(pkt)
else:
self.put_gdb_packet(b"E01")
def handle_memory_write_packet(self, pkt):
# the 'M' packet for writing to memory
#
# We don't support writing so return error
self.put_gdb_packet(b"E02")
def handle_general_query_packet(self, pkt):
if self.arch_supports_thread_operations() and self.elffile.has_kernel_thread_info():
# For packets qfThreadInfo/qsThreadInfo, obtain a list of all active thread IDs
if pkt[0:12] == b"qfThreadInfo":
threads_metadata_data = self.logfile.get_threads_metadata()["data"]
size_t_size = self.elffile.get_kernel_thread_info_size_t_size()
# First, find and store the thread that _kernel considers current
k_curr_thread_offset = self.elffile.get_kernel_thread_info_offset(ThreadInfoOffset.THREAD_INFO_OFFSET_K_CURR_THREAD)
curr_thread_ptr_bytes = threads_metadata_data[k_curr_thread_offset:(k_curr_thread_offset + size_t_size)]
curr_thread_ptr = int.from_bytes(curr_thread_ptr_bytes, "little")
self.thread_ptrs.append(curr_thread_ptr)
thread_count = 1
response = b"m1"
# Next, find the pointer to the linked list of threads in the _kernel struct
k_threads_offset = self.elffile.get_kernel_thread_info_offset(ThreadInfoOffset.THREAD_INFO_OFFSET_K_THREADS)
thread_ptr_bytes = threads_metadata_data[k_threads_offset:(k_threads_offset + size_t_size)]
thread_ptr = int.from_bytes(thread_ptr_bytes, "little")
if thread_ptr != curr_thread_ptr:
self.thread_ptrs.append(thread_ptr)
thread_count += 1
response += b"," + bytes(str(thread_count), 'ascii')
# Next walk the linked list, counting the number of threads and construct the response for qfThreadInfo along the way
t_next_thread_offset = self.elffile.get_kernel_thread_info_offset(ThreadInfoOffset.THREAD_INFO_OFFSET_T_NEXT_THREAD)
while thread_ptr is not None:
thread_ptr_bytes = self.get_memory(thread_ptr + t_next_thread_offset, size_t_size)
if thread_ptr_bytes is not None:
thread_ptr = int.from_bytes(thread_ptr_bytes, "little")
if thread_ptr == 0:
thread_ptr = None
continue
if thread_ptr != curr_thread_ptr:
self.thread_ptrs.append(thread_ptr)
thread_count += 1
response += b"," + bytes(f'{thread_count:x}', 'ascii')
else:
thread_ptr = None
self.put_gdb_packet(response)
elif pkt[0:12] == b"qsThreadInfo":
self.put_gdb_packet(b"l")
# For qThreadExtraInfo, obtain a printable string description of thread attributes for the provided thread
elif pkt[0:16] == b"qThreadExtraInfo":
thread_info_bytes = b''
thread_index_str = ''
for n in range(17, len(pkt)):
thread_index_str += chr(pkt[n])
thread_id = int(thread_index_str, 16)
if len(self.thread_ptrs) > thread_id:
thread_info_bytes += b'name: '
thread_ptr = self.thread_ptrs[thread_id - 1]
t_name_offset = self.elffile.get_kernel_thread_info_offset(ThreadInfoOffset.THREAD_INFO_OFFSET_T_NAME)
thread_name_next_byte = self.get_memory(thread_ptr + t_name_offset, 1)
index = 0
while (thread_name_next_byte is not None) and (thread_name_next_byte != b'\x00'):
thread_info_bytes += thread_name_next_byte
index += 1
thread_name_next_byte = self.get_memory(thread_ptr + t_name_offset + index, 1)
t_state_offset = self.elffile.get_kernel_thread_info_offset(ThreadInfoOffset.THREAD_INFO_OFFSET_T_STATE)
thread_state_byte = self.get_memory(thread_ptr + t_state_offset, 1)
if thread_state_byte is not None:
thread_state = int.from_bytes(thread_state_byte, "little")
thread_info_bytes += b', state: ' + bytes(hex(thread_state), 'ascii')
t_user_options_offset = self.elffile.get_kernel_thread_info_offset(ThreadInfoOffset.THREAD_INFO_OFFSET_T_USER_OPTIONS)
thread_user_options_byte = self.get_memory(thread_ptr + t_user_options_offset, 1)
if thread_user_options_byte is not None:
thread_user_options = int.from_bytes(thread_user_options_byte, "little")
thread_info_bytes += b', user_options: ' + bytes(hex(thread_user_options), 'ascii')
t_prio_offset = self.elffile.get_kernel_thread_info_offset(ThreadInfoOffset.THREAD_INFO_OFFSET_T_PRIO)
thread_prio_byte = self.get_memory(thread_ptr + t_prio_offset, 1)
if thread_prio_byte is not None:
thread_prio = int.from_bytes(thread_prio_byte, "little")
thread_info_bytes += b', prio: ' + bytes(hex(thread_prio), 'ascii')
self.put_gdb_packet(binascii.hexlify(thread_info_bytes))
else:
self.put_gdb_packet(b'')
else:
self.put_gdb_packet(b'')
def arch_supports_thread_operations(self):
return False
def handle_thread_alive_packet(self, pkt):
# the 'T' packet for finding out if a thread is alive.
if self.arch_supports_thread_operations() and self.elffile.has_kernel_thread_info():
# Reply OK to report thread alive, allowing GDB to perform other thread operations
self.put_gdb_packet(b'OK')
else:
self.put_gdb_packet(b'')
def handle_thread_register_group_read_packet(self):
self.put_gdb_packet(b'')
def handle_thread_op_packet(self, pkt):
# the 'H' packet for setting thread for subsequent operations.
if self.arch_supports_thread_operations() and self.elffile.has_kernel_thread_info():
if pkt[0:2] == b"Hg":
thread_index_str = ''
for n in range(2, len(pkt)):
thread_index_str += chr(pkt[n])
# Thread-id of '0' indicates an arbitrary process or thread
if thread_index_str in ('0', ''):
self.selected_thread = 0
self.handle_register_group_read_packet()
return
self.selected_thread = int(thread_index_str, 16) - 1
self.handle_thread_register_group_read_packet()
else:
self.put_gdb_packet(b'')
else:
self.put_gdb_packet(b'')
def run(self, socket):
self.socket = socket
while True:
pkt = self.get_gdb_packet()
if pkt is None:
continue
pkt_type = pkt[0:1]
logger.debug(f"Got packet type: {pkt_type}")
if pkt_type == b'?':
self.handle_signal_query_packet()
elif pkt_type in (b'C', b'S'):
# Continue/stepping execution, which is not supported.
# So signal exception again
self.handle_signal_query_packet()
elif pkt_type == b'g':
self.handle_register_group_read_packet()
elif pkt_type == b'G':
self.handle_register_group_write_packet()
elif pkt_type == b'p':
self.handle_register_single_read_packet(pkt)
elif pkt_type == b'P':
self.handle_register_single_write_packet(pkt)
elif pkt_type == b'm':
self.handle_memory_read_packet(pkt)
elif pkt_type == b'M':
self.handle_memory_write_packet(pkt)
elif pkt_type == b'q':
self.handle_general_query_packet(pkt)
elif pkt_type == b'T':
self.handle_thread_alive_packet(pkt)
elif pkt_type == b'H':
self.handle_thread_op_packet(pkt)
elif pkt_type == b'k':
# GDB quits
break
else:
self.put_gdb_packet(b'')
``` | /content/code_sandbox/scripts/coredump/gdbstubs/gdbstub.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,664 |
```python
#!/usr/bin/env python3
#
#
import binascii
import logging
import struct
from gdbstubs.gdbstub import GdbStub
logger = logging.getLogger("gdbstub")
class RegNum():
# Matches the enum amd64_regnum in GDB
RAX = 0
RBX = 1
RCX = 2
RDX = 3
RSI = 4
RDI = 5
RBP = 6
RSP = 7
R8 = 8
R9 = 9
R10 = 10
R11 = 11
R12 = 12
R13 = 13
R14 = 14
R15 = 15
RIP = 16
EFLAGS = 17
CS = 18
SS = 19
DS = 20
ES = 21
FS = 22
GS = 23
FS_BASE = 24
GS_BASE = 25
K_GS_BASE = 26
class ExceptionVectors():
# Matches arch/x86/include/kernel_arch_data.h
IV_DIVIDE_ERROR = 0
IV_DEBUG = 1
IV_NON_MASKABLE_INTERRUPT = 2
IV_BREAKPOINT = 3
IV_OVERFLOW = 4
IV_BOUND_RANGE = 5
IV_INVALID_OPCODE = 6
IV_DEVICE_NOT_AVAILABLE = 7
IV_DOUBLE_FAULT = 8
IV_COPROC_SEGMENT_OVERRUN = 9
IV_INVALID_TSS = 10
IV_SEGMENT_NOT_PRESENT = 11
IV_STACK_FAULT = 12
IV_GENERAL_PROTECTION = 13
IV_PAGE_FAULT = 14
IV_RESERVED = 15
IV_X87_FPU_FP_ERROR = 16
IV_ALIGNMENT_CHECK = 17
IV_MACHINE_CHECK = 18
IV_SIMD_FP = 19
IV_VIRT_EXCEPTION = 20
IV_SECURITY_EXCEPTION = 30
class GdbStub_x86_64(GdbStub):
GDB_SIGNAL_DEFAULT = 7
# Mapping is from GDB's gdb/i386-stubs.c
GDB_SIGNAL_MAPPING = {
ExceptionVectors.IV_DIVIDE_ERROR: 8,
ExceptionVectors.IV_DEBUG: 5,
ExceptionVectors.IV_BREAKPOINT: 5,
ExceptionVectors.IV_OVERFLOW: 16,
ExceptionVectors.IV_BOUND_RANGE: 16,
ExceptionVectors.IV_INVALID_OPCODE: 4,
ExceptionVectors.IV_DEVICE_NOT_AVAILABLE: 8,
ExceptionVectors.IV_DOUBLE_FAULT: 7,
ExceptionVectors.IV_COPROC_SEGMENT_OVERRUN: 11,
ExceptionVectors.IV_INVALID_TSS: 11,
ExceptionVectors.IV_SEGMENT_NOT_PRESENT: 11,
ExceptionVectors.IV_STACK_FAULT: 11,
ExceptionVectors.IV_GENERAL_PROTECTION: 11,
ExceptionVectors.IV_PAGE_FAULT: 11,
ExceptionVectors.IV_X87_FPU_FP_ERROR: 7,
}
GDB_G_PKT_NUM_REGS = 34
GDB_32BIT_REGS = {
RegNum.EFLAGS,
RegNum.CS,
RegNum.SS,
RegNum.DS,
RegNum.ES,
RegNum.FS,
RegNum.GS,
}
def __init__(self, logfile, elffile):
super().__init__(logfile=logfile, elffile=elffile)
self.registers = None
self.exception_vector = None
self.exception_code = None
self.gdb_signal = self.GDB_SIGNAL_DEFAULT
self.parse_arch_data_block()
self.compute_signal()
def parse_arch_data_block(self):
arch_data_blk = self.logfile.get_arch_data()['data']
arch_data_blk_struct = "<QQQQQQQQQQQQQQQQQQQQQQ"
cfg_exception_debug = True
if len(arch_data_blk) != struct.calcsize(arch_data_blk_struct):
# There are fewer registers dumped
# when CONFIG_EXCEPTION_DEBUG=n
arch_data_blk_struct = "<QQQQQQQQQQQQQQQQQ"
cfg_exception_debug = False
tu = struct.unpack(arch_data_blk_struct, arch_data_blk)
self.registers = dict()
self.exception_vector = tu[0]
self.exception_code = tu[1]
self.registers[RegNum.RAX] = tu[2]
self.registers[RegNum.RCX] = tu[3]
self.registers[RegNum.RDX] = tu[4]
self.registers[RegNum.RSI] = tu[5]
self.registers[RegNum.RDI] = tu[6]
self.registers[RegNum.RSP] = tu[7]
self.registers[RegNum.R8 ] = tu[8]
self.registers[RegNum.R9 ] = tu[9]
self.registers[RegNum.R10] = tu[10]
self.registers[RegNum.R11] = tu[11]
self.registers[RegNum.RIP] = tu[12]
self.registers[RegNum.EFLAGS] = tu[13]
self.registers[RegNum.CS] = tu[14]
self.registers[RegNum.SS] = tu[15]
self.registers[RegNum.RBP] = tu[16]
if cfg_exception_debug:
self.registers[RegNum.RBX] = tu[17]
self.registers[RegNum.R12] = tu[18]
self.registers[RegNum.R13] = tu[19]
self.registers[RegNum.R14] = tu[20]
self.registers[RegNum.R15] = tu[21]
def compute_signal(self):
sig = self.GDB_SIGNAL_DEFAULT
vector = self.exception_vector
if vector is None:
sig = self.GDB_SIGNAL_DEFAULT
# Map vector number to GDB signal number
if vector in self.GDB_SIGNAL_MAPPING:
sig = self.GDB_SIGNAL_MAPPING[vector]
self.gdb_signal = sig
def handle_register_group_read_packet(self):
idx = 0
pkt = b''
while idx < self.GDB_G_PKT_NUM_REGS:
if idx in self.GDB_32BIT_REGS:
reg_fmt = "<I"
reg_bytes = 4
else:
reg_fmt = "<Q"
reg_bytes = 8
if idx in self.registers:
bval = struct.pack(reg_fmt, self.registers[idx])
pkt += binascii.hexlify(bval)
else:
# Register not in coredump -> unknown value
# Send in "xxxxxxxx"
pkt += b'x' * (reg_bytes * 2)
idx += 1
self.put_gdb_packet(pkt)
def handle_register_single_read_packet(self, pkt):
# Mark registers as "<unavailable>".
# 'p' packets are usually used for registers
# other than the general ones (e.g. eax, ebx)
# so we can safely reply "xxxxxxxx" here.
self.put_gdb_packet(b'x' * 16)
``` | /content/code_sandbox/scripts/coredump/gdbstubs/arch/x86_64.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,571 |
```python
#!/usr/bin/env python3
#
#
import binascii
import logging
import struct
from gdbstubs.gdbstub import GdbStub
logger = logging.getLogger("gdbstub")
class RegNum():
ZERO = 0
RA = 1
SP = 2
GP = 3
TP = 4
T0 = 5
T1 = 6
T2 = 7
FP = 8
S1 = 9
A0 = 10
A1 = 11
A2 = 12
A3 = 13
A4 = 14
A5 = 15
A6 = 16
A7 = 17
S2 = 18
S3 = 19
S4 = 20
S5 = 21
S6 = 22
S7 = 23
S8 = 24
S9 = 25
S10 = 26
S11 = 27
T3 = 28
T4 = 29
T5 = 30
T6 = 31
PC = 32
class GdbStub_RISC_V(GdbStub):
ARCH_DATA_BLK_STRUCT = "<IIIIIIIIIIIIIIIIII"
ARCH_DATA_BLK_STRUCT_2 = "<QQQQQQQQQQQQQQQQQQ"
GDB_SIGNAL_DEFAULT = 7
GDB_G_PKT_NUM_REGS = 33
def __init__(self, logfile, elffile):
super().__init__(logfile=logfile, elffile=elffile)
self.registers = None
self.gdb_signal = self.GDB_SIGNAL_DEFAULT
self.parse_arch_data_block()
def parse_arch_data_block(self):
arch_data_blk = self.logfile.get_arch_data()['data']
self.arch_data_ver = self.logfile.get_arch_data()['hdr_ver']
if self.arch_data_ver == 1:
tu = struct.unpack(self.ARCH_DATA_BLK_STRUCT, arch_data_blk)
elif self.arch_data_ver == 2:
tu = struct.unpack(self.ARCH_DATA_BLK_STRUCT_2, arch_data_blk)
self.registers = dict()
self.registers[RegNum.RA] = tu[0]
self.registers[RegNum.TP] = tu[1]
self.registers[RegNum.T0] = tu[2]
self.registers[RegNum.T1] = tu[3]
self.registers[RegNum.T2] = tu[4]
self.registers[RegNum.A0] = tu[5]
self.registers[RegNum.A1] = tu[6]
self.registers[RegNum.A2] = tu[7]
self.registers[RegNum.A3] = tu[8]
self.registers[RegNum.A4] = tu[9]
self.registers[RegNum.A5] = tu[10]
self.registers[RegNum.A6] = tu[11]
self.registers[RegNum.A7] = tu[12]
self.registers[RegNum.T3] = tu[13]
self.registers[RegNum.T4] = tu[14]
self.registers[RegNum.T5] = tu[15]
self.registers[RegNum.T6] = tu[16]
self.registers[RegNum.PC] = tu[17]
def handle_register_group_read_packet(self):
reg_fmt = "<I" if self.arch_data_ver == 1 else "<Q"
idx = 0
pkt = b''
while idx < self.GDB_G_PKT_NUM_REGS:
if idx in self.registers:
bval = struct.pack(reg_fmt, self.registers[idx])
pkt += binascii.hexlify(bval)
else:
# Register not in coredump -> unknown value
# Send in "xxxxxxxx"
length = 8 if self.arch_data_ver == 1 else 16
pkt += b'x' * length
idx += 1
self.put_gdb_packet(pkt)
def handle_register_single_read_packet(self, pkt):
# Mark registers as "<unavailable>". 'p' packets are not sent for the registers
# currently handled in this file so we can safely reply "xxxxxxxx" here.
length = 8 if self.arch_data_ver == 1 else 16
self.put_gdb_packet(b'x' * length)
``` | /content/code_sandbox/scripts/coredump/gdbstubs/arch/risc_v.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 985 |
```python
#!/usr/bin/env python3
#
#
import binascii
import logging
import struct
import sys
from enum import Enum
from gdbstubs.gdbstub import GdbStub
logger = logging.getLogger("gdbstub")
# Matches same in coredump.c
XTENSA_BLOCK_HDR_DUMMY_SOC = 255
# Must match --soc arg options; see get_soc
class XtensaSoc(Enum):
UNKNOWN = 0
SAMPLE_CONTROLLER = 1
ESP32 = 2
INTEL_ADSP_CAVS = 3
ESP32S2 = 4
ESP32S3 = 5
DC233C = 6
# The previous version of this script didn't need to know
# what toolchain Zephyr was built with; it assumed sample_controller
# was built with the Zephyr SDK and ESP32 with Espressif's.
# However, if a SOC can be built by two separate toolchains,
# there is a chance that the GDBs provided by the toolchains will
# assign different indices to the same registers. For example, the
# Intel ADSP family of SOCs can be built with both Zephyr's
# SDK and Cadence's XCC toolchain. With the same SOC APL,
# the SDK's GDB assigns PC the index 0, while XCC's GDB assigns
# it the index 32.
#
# (The Espressif value isn't really required, since ESP32 can
# only be built with Espressif's toolchain, but it's included for
# completeness.)
class XtensaToolchain(Enum):
UNKNOWN = 0
ZEPHYR = 1
XCC = 2
ESPRESSIF = 3
def get_gdb_reg_definition(soc, toolchain):
if soc == XtensaSoc.SAMPLE_CONTROLLER:
return GdbRegDef_Sample_Controller
elif soc == XtensaSoc.ESP32:
return GdbRegDef_ESP32
elif soc == XtensaSoc.INTEL_ADSP_CAVS:
if toolchain == XtensaToolchain.ZEPHYR:
return GdbRegDef_Intel_Adsp_CAVS_Zephyr
elif toolchain == XtensaToolchain.XCC:
return GdbRegDef_Intel_Adsp_CAVS_XCC
elif toolchain == XtensaToolchain.ESPRESSIF:
logger.error("Can't use espressif toolchain with CAVS. " +
"Use zephyr or xcc instead. Exiting...")
sys.exit(1)
else:
raise NotImplementedError
elif soc == XtensaSoc.ESP32S2:
return GdbRegDef_ESP32S2
elif soc == XtensaSoc.ESP32S3:
return GdbRegDef_ESP32S3
elif soc == XtensaSoc.DC233C:
return GdbRegDef_DC233C
else:
raise NotImplementedError
class ExceptionCodes(Enum):
# Matches arch/xtensa/core/fatal.c->xtensa_exccause
ILLEGAL_INSTRUCTION = 0
# Syscall not fatal
INSTR_FETCH_ERROR = 2
LOAD_STORE_ERROR = 3
# Level-1 interrupt not fatal
ALLOCA = 5
DIVIDE_BY_ZERO = 6
PRIVILEGED = 8
LOAD_STORE_ALIGNMENT = 9
INSTR_PIF_DATA_ERROR = 12
LOAD_STORE_PIF_DATA_ERROR = 13
INSTR_PIF_ADDR_ERROR = 14
LOAD_STORE_PIF_ADDR_ERROR = 15
INSTR_TLB_MISS = 16
INSTR_TLB_MULTI_HIT = 17
INSTR_FETCH_PRIVILEGE = 18
INST_FETCH_PROHIBITED = 20
LOAD_STORE_TLB_MISS = 24
LOAD_STORE_TLB_MULTI_HIT = 25
LOAD_STORE_PRIVILEGE = 26
LOAD_PROHIBITED = 28
STORE_PROHIBITED = 29
# Coprocessor disabled spans 32 - 39
COPROCESSOR_DISABLED_START = 32
COPROCESSOR_DISABLED_END = 39
Z_EXCEPT_REASON = 63
# Others (reserved / unknown) map to default signal
class GdbStub_Xtensa(GdbStub):
GDB_SIGNAL_DEFAULT = 7
# Mapping based on section 4.4.1.5 of the
# Xtensa ISA Reference Manual (Table 464. Exception Causes)
# Somewhat arbitrary; included only because GDB requests it
GDB_SIGNAL_MAPPING = {
ExceptionCodes.ILLEGAL_INSTRUCTION: 4,
ExceptionCodes.INSTR_FETCH_ERROR: 7,
ExceptionCodes.LOAD_STORE_ERROR: 11,
ExceptionCodes.ALLOCA: 7,
ExceptionCodes.DIVIDE_BY_ZERO: 8,
ExceptionCodes.PRIVILEGED: 11,
ExceptionCodes.LOAD_STORE_ALIGNMENT: 7,
ExceptionCodes.INSTR_PIF_DATA_ERROR: 7,
ExceptionCodes.LOAD_STORE_PIF_DATA_ERROR: 7,
ExceptionCodes.INSTR_PIF_ADDR_ERROR: 11,
ExceptionCodes.LOAD_STORE_PIF_ADDR_ERROR: 11,
ExceptionCodes.INSTR_TLB_MISS: 11,
ExceptionCodes.INSTR_TLB_MULTI_HIT: 11,
ExceptionCodes.INSTR_FETCH_PRIVILEGE: 11,
ExceptionCodes.INST_FETCH_PROHIBITED: 11,
ExceptionCodes.LOAD_STORE_TLB_MISS: 11,
ExceptionCodes.LOAD_STORE_TLB_MULTI_HIT: 11,
ExceptionCodes.LOAD_STORE_PRIVILEGE: 11,
ExceptionCodes.LOAD_PROHIBITED: 11,
ExceptionCodes.STORE_PROHIBITED: 11,
ExceptionCodes.Z_EXCEPT_REASON: 6,
}
reg_fmt = "<I"
def __init__(self, logfile, elffile):
super().__init__(logfile=logfile, elffile=elffile)
self.registers = None
self.exception_code = None
self.gdb_signal = self.GDB_SIGNAL_DEFAULT
self.parse_arch_data_block()
self.compute_signal()
def parse_arch_data_block(self):
arch_data_blk = self.logfile.get_arch_data()['data']
self.version = struct.unpack('H', arch_data_blk[1:3])[0]
logger.debug("Xtensa GDB stub version: %d" % self.version)
# Get SOC and toolchain to get correct format for unpack
self.soc = XtensaSoc(bytearray(arch_data_blk)[0])
logger.debug("Xtensa SOC: %s" % self.soc.name)
if self.version >= 2:
self.toolchain = XtensaToolchain(bytearray(arch_data_blk)[3])
arch_data_blk_regs = arch_data_blk[4:]
else:
# v1 only supported ESP32 and sample_controller, each of which
# only build with one toolchain
if self.soc == XtensaSoc.ESP32:
self.toolchain = XtensaToolchain.ESPRESSIF
else:
self.toolchain = XtensaToolchain.ZEPHYR
arch_data_blk_regs = arch_data_blk[3:]
logger.debug("Xtensa toolchain: %s" % self.toolchain.name)
self.gdb_reg_def = get_gdb_reg_definition(self.soc, self.toolchain)
tu = struct.unpack(self.gdb_reg_def.ARCH_DATA_BLK_STRUCT_REGS,
arch_data_blk_regs)
self.registers = dict()
self.map_registers(tu)
def map_registers(self, tu):
i = 0
for r in self.gdb_reg_def.RegNum:
reg_num = r.value
# Dummy WINDOWBASE and WINDOWSTART to enable GDB
# without dumping them and all AR registers;
# avoids interfering with interrupts / exceptions
if r == self.gdb_reg_def.RegNum.WINDOWBASE:
self.registers[reg_num] = 0
elif r == self.gdb_reg_def.RegNum.WINDOWSTART:
self.registers[reg_num] = 1
else:
if r == self.gdb_reg_def.RegNum.EXCCAUSE:
self.exception_code = tu[i]
self.registers[reg_num] = tu[i]
i += 1
def compute_signal(self):
sig = self.GDB_SIGNAL_DEFAULT
code = ExceptionCodes(self.exception_code)
if code is None:
sig = self.GDB_SIGNAL_DEFAULT
if code in self.GDB_SIGNAL_MAPPING:
sig = self.GDB_SIGNAL_MAPPING[code]
elif ExceptionCodes.COPROCESSOR_DISABLED_START.value <= code <= \
ExceptionCodes.COPROCESSOR_DISABLED_END.value:
sig = 8
self.gdb_signal = sig
def handle_register_group_read_packet(self):
idx = 0
pkt = b''
GDB_G_PKT_MAX_REG = \
max([reg_num.value for reg_num in self.gdb_reg_def.RegNum])
# We try to send as many of the registers listed
# as possible, but we are constrained by the
# maximum length of the g packet
while idx <= GDB_G_PKT_MAX_REG and idx * 4 < self.gdb_reg_def.SOC_GDB_GPKT_BIN_SIZE:
if idx in self.registers:
bval = struct.pack(self.reg_fmt, self.registers[idx])
pkt += binascii.hexlify(bval)
else:
pkt += b'x' * 8
idx += 1
self.put_gdb_packet(pkt)
def handle_register_single_read_packet(self, pkt):
# format is pXX, where XX is the hex representation of the idx
regIdx = int('0x' + pkt[1:].decode('utf8'), 16)
try:
bval = struct.pack(self.reg_fmt, self.registers[regIdx])
self.put_gdb_packet(binascii.hexlify(bval))
except KeyError:
self.put_gdb_packet(b'x' * 8)
# The following classes map registers to their index used by
# the GDB of a specific SOC and toolchain. See xtensa_config.c.
# WARNING: IF YOU CHANGE THE ORDER OF THE REGISTERS IN ONE
# MAPPING, YOU MUST CHANGE THE ORDER TO MATCH IN THE OTHERS
# AND IN arch/xtensa/core/coredump.c's xtensa_arch_block.r.
# See map_registers.
# For the same reason, even though the WINDOWBASE and WINDOWSTART
# values are dummied by this script, they have to be last in the
# mapping below.
# sample_controller is unique to Zephyr SDK
# sdk-ng -> overlays/xtensa_sample_controller/gdb/gdb/xtensa-config.c
class GdbRegDef_Sample_Controller:
ARCH_DATA_BLK_STRUCT_REGS = '<IIIIIIIIIIIIIIIIIIIIII'
# This fits the maximum possible register index (110).
# Unlike on ESP32 GDB, there doesn't seem to be an
# actual hard limit to how big the g packet can be.
SOC_GDB_GPKT_BIN_SIZE = 444
class RegNum(Enum):
PC = 0
EXCCAUSE = 77
EXCVADDR = 83
SAR = 33
PS = 38
SCOMPARE1 = 39
A0 = 89
A1 = 90
A2 = 91
A3 = 92
A4 = 93
A5 = 94
A6 = 95
A7 = 96
A8 = 97
A9 = 98
A10 = 99
A11 = 100
A12 = 101
A13 = 102
A14 = 103
A15 = 104
# LBEG, LEND, and LCOUNT not on sample_controller
WINDOWBASE = 34
WINDOWSTART = 35
# ESP32 is unique to espressif toolchain
# espressif xtensa-overlays -> xtensa_esp32/gdb/gdb/xtensa-config.c
class GdbRegDef_ESP32:
ARCH_DATA_BLK_STRUCT_REGS = '<IIIIIIIIIIIIIIIIIIIIIIIII'
SOC_GDB_GPKT_BIN_SIZE = 420
class RegNum(Enum):
PC = 0
EXCCAUSE = 143
EXCVADDR = 149
SAR = 68
PS = 73
SCOMPARE1 = 76
A0 = 157
A1 = 158
A2 = 159
A3 = 160
A4 = 161
A5 = 162
A6 = 163
A7 = 164
A8 = 165
A9 = 166
A10 = 167
A11 = 168
A12 = 169
A13 = 170
A14 = 171
A15 = 172
LBEG = 65
LEND = 66
LCOUNT = 67
WINDOWBASE = 69
WINDOWSTART = 70
class GdbRegDef_ESP32S2:
ARCH_DATA_BLK_STRUCT_REGS = '<IIIIIIIIIIIIIIIIIIIII'
SOC_GDB_GPKT_BIN_SIZE = 420
class RegNum(Enum):
PC = 0
EXCCAUSE = 99
EXCVADDR = 115
SAR = 65
PS = 70
A0 = 155
A1 = 156
A2 = 157
A3 = 158
A4 = 159
A5 = 160
A6 = 161
A7 = 162
A8 = 163
A9 = 164
A10 = 165
A11 = 166
A12 = 167
A13 = 168
A14 = 169
A15 = 170
WINDOWBASE = 66
WINDOWSTART = 67
class GdbRegDef_ESP32S3:
ARCH_DATA_BLK_STRUCT_REGS = '<IIIIIIIIIIIIIIIIIIIIIIIII'
SOC_GDB_GPKT_BIN_SIZE = 420
class RegNum(Enum):
PC = 0
EXCCAUSE = 166
EXCVADDR = 172
SAR = 68
PS = 73
SCOMPARE1 = 76
A0 = 212
A1 = 213
A2 = 214
A3 = 215
A4 = 216
A5 = 217
A6 = 218
A7 = 219
A8 = 220
A9 = 221
A10 = 222
A11 = 223
A12 = 224
A13 = 225
A14 = 226
A15 = 227
LBEG = 65
LEND = 66
LCOUNT = 67
WINDOWBASE = 69
WINDOWSTART = 70
# sdk-ng -> overlays/xtensa_intel_apl/gdb/gdb/xtensa-config.c
class GdbRegDef_Intel_Adsp_CAVS_Zephyr:
ARCH_DATA_BLK_STRUCT_REGS = '<IIIIIIIIIIIIIIIIIIIIIIIII'
# If you send all the registers below (up to index 173)
# GDB incorrectly assigns 0 to EXCCAUSE / EXCVADDR... for some
# reason. Since APL GDB sends p packets for every An register
# even if it was sent in the g packet, I arbitrarily shrunk the
# G packet to include up to A1, which fixed the issue.
SOC_GDB_GPKT_BIN_SIZE = 640
class RegNum(Enum):
PC = 0
EXCCAUSE = 148
EXCVADDR = 154
SAR = 68
PS = 74
SCOMPARE1 = 77
A0 = 158
A1 = 159
A2 = 160
A3 = 161
A4 = 162
A5 = 163
A6 = 164
A7 = 165
A8 = 166
A9 = 167
A10 = 168
A11 = 169
A12 = 170
A13 = 171
A14 = 172
A15 = 173
LBEG = 65
LEND = 66
LCOUNT = 67
WINDOWBASE = 70
WINDOWSTART = 71
# Reverse-engineered from:
# sof -> src/debug/gdb/gdb.c
# sof -> src/arch/xtensa/include/xtensa/specreg.h
class GdbRegDef_Intel_Adsp_CAVS_XCC:
ARCH_DATA_BLK_STRUCT_REGS = '<IIIIIIIIIIIIIIIIIIIIIIIII'
# xt-gdb doesn't use the g packet at all
SOC_GDB_GPKT_BIN_SIZE = 0
class RegNum(Enum):
PC = 32
EXCCAUSE = 744
EXCVADDR = 750
SAR = 515
PS = 742
SCOMPARE1 = 524
A0 = 256
A1 = 257
A2 = 258
A3 = 259
A4 = 260
A5 = 261
A6 = 262
A7 = 263
A8 = 264
A9 = 265
A10 = 266
A11 = 267
A12 = 268
A13 = 269
A14 = 270
A15 = 271
LBEG = 512
LEND = 513
LCOUNT = 514
WINDOWBASE = 584
WINDOWSTART = 585
# sdk-ng -> overlays/xtensa_dc233c/gdb/gdb/xtensa-config.c
class GdbRegDef_DC233C:
ARCH_DATA_BLK_STRUCT_REGS = '<IIIIIIIIIIIIIIIIIIIIIIIII'
SOC_GDB_GPKT_BIN_SIZE = 568
class RegNum(Enum):
PC = 0
EXCCAUSE = 93
EXCVADDR = 99
SAR = 36
PS = 42
SCOMPARE1 = 44
A0 = 105
A1 = 106
A2 = 107
A3 = 108
A4 = 109
A5 = 110
A6 = 111
A7 = 112
A8 = 113
A9 = 114
A10 = 115
A11 = 116
A12 = 117
A13 = 118
A14 = 119
A15 = 120
LBEG = 33
LEND = 34
LCOUNT = 35
WINDOWBASE = 38
WINDOWSTART = 39
``` | /content/code_sandbox/scripts/coredump/gdbstubs/arch/xtensa.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,264 |
```python
#!/usr/bin/env python3
#
#
import binascii
import logging
import struct
from coredump_parser.elf_parser import ThreadInfoOffset
from gdbstubs.gdbstub import GdbStub
logger = logging.getLogger("gdbstub")
class RegNum():
R0 = 0
R1 = 1
R2 = 2
R3 = 3
R4 = 4
R5 = 5
R6 = 6
R7 = 7
R8 = 8
R9 = 9
R10 = 10
R11 = 11
R12 = 12
SP = 13
LR = 14
PC = 15
XPSR = 16
class GdbStub_ARM_CortexM(GdbStub):
ARCH_DATA_BLK_STRUCT = "<IIIIIIIII"
ARCH_DATA_BLK_STRUCT_V2 = "<IIIIIIIIIIIIIIIII"
GDB_SIGNAL_DEFAULT = 7
GDB_G_PKT_NUM_REGS = 17
def __init__(self, logfile, elffile):
super().__init__(logfile=logfile, elffile=elffile)
self.registers = None
self.gdb_signal = self.GDB_SIGNAL_DEFAULT
self.parse_arch_data_block()
def parse_arch_data_block(self):
arch_data_blk = self.logfile.get_arch_data()['data']
arch_data_ver = self.logfile.get_arch_data()['hdr_ver']
if arch_data_ver == 1:
tu = struct.unpack(self.ARCH_DATA_BLK_STRUCT, arch_data_blk)
elif arch_data_ver == 2:
tu = struct.unpack(self.ARCH_DATA_BLK_STRUCT_V2, arch_data_blk)
self.registers = dict()
self.registers[RegNum.R0] = tu[0]
self.registers[RegNum.R1] = tu[1]
self.registers[RegNum.R2] = tu[2]
self.registers[RegNum.R3] = tu[3]
self.registers[RegNum.R12] = tu[4]
self.registers[RegNum.LR] = tu[5]
self.registers[RegNum.PC] = tu[6]
self.registers[RegNum.XPSR] = tu[7]
self.registers[RegNum.SP] = tu[8]
if arch_data_ver > 1:
self.registers[RegNum.R4] = tu[9]
self.registers[RegNum.R5] = tu[10]
self.registers[RegNum.R6] = tu[11]
self.registers[RegNum.R7] = tu[12]
self.registers[RegNum.R8] = tu[13]
self.registers[RegNum.R9] = tu[14]
self.registers[RegNum.R10] = tu[15]
self.registers[RegNum.R11] = tu[16]
def send_registers_packet(self, registers):
reg_fmt = "<I"
idx = 0
pkt = b''
while idx < self.GDB_G_PKT_NUM_REGS:
if idx in registers:
bval = struct.pack(reg_fmt, registers[idx])
pkt += binascii.hexlify(bval)
else:
# Register not in coredump -> unknown value
# Send in "xxxxxxxx"
pkt += b'x' * 8
idx += 1
self.put_gdb_packet(pkt)
def handle_register_group_read_packet(self):
if not self.elffile.has_kernel_thread_info():
self.send_registers_packet(self.registers)
else:
self.handle_thread_register_group_read_packet()
def handle_register_single_read_packet(self, pkt):
# Mark registers as "<unavailable>".
# 'p' packets are usually used for registers
# other than the general ones (e.g. eax, ebx)
# so we can safely reply "xxxxxxxx" here.
self.put_gdb_packet(b'x' * 8)
def handle_register_single_write_packet(self, pkt):
pkt_str = pkt.decode("ascii")
reg = int(pkt_str[1:pkt_str.index('=')], 16)
self.registers[reg] = int.from_bytes(binascii.unhexlify(pkt[3:]), byteorder = 'little')
self.put_gdb_packet(b'+')
def arch_supports_thread_operations(self):
return True
def handle_thread_register_group_read_packet(self):
# For selected_thread 0, use the register data retrieved from the dump's arch section
if self.selected_thread == 0:
self.send_registers_packet(self.registers)
else:
thread_ptr = self.thread_ptrs[self.selected_thread]
# Get stack pointer out of thread struct
t_stack_ptr_offset = self.elffile.get_kernel_thread_info_offset(ThreadInfoOffset.THREAD_INFO_OFFSET_T_STACK_PTR)
size_t_size = self.elffile.get_kernel_thread_info_size_t_size()
stack_ptr_bytes = self.get_memory(thread_ptr + t_stack_ptr_offset, size_t_size)
thread_registers = dict()
if stack_ptr_bytes is not None:
# Read registers stored at top of stack
stack_ptr = int.from_bytes(stack_ptr_bytes, "little")
barray = self.get_memory(stack_ptr, (size_t_size * 8))
if barray is not None:
tu = struct.unpack("<IIIIIIII", barray)
thread_registers[RegNum.R0] = tu[0]
thread_registers[RegNum.R1] = tu[1]
thread_registers[RegNum.R2] = tu[2]
thread_registers[RegNum.R3] = tu[3]
thread_registers[RegNum.R12] = tu[4]
thread_registers[RegNum.LR] = tu[5]
thread_registers[RegNum.PC] = tu[6]
thread_registers[RegNum.XPSR] = tu[7]
# Set SP to point to stack just after these registers
thread_registers[RegNum.SP] = stack_ptr + 32
# Read the exc_return value from the thread's arch struct
t_arch_offset = self.elffile.get_kernel_thread_info_offset(ThreadInfoOffset.THREAD_INFO_OFFSET_T_ARCH)
t_exc_return_offset = self.elffile.get_kernel_thread_info_offset(ThreadInfoOffset.THREAD_INFO_OFFSET_T_ARM_EXC_RETURN)
# Value of 0xffffffff indicates THREAD_INFO_UNIMPLEMENTED
if t_exc_return_offset != 0xffffffff:
exc_return_bytes = self.get_memory(thread_ptr + t_arch_offset + t_exc_return_offset, 1)
exc_return = int.from_bytes(exc_return_bytes, "little")
# If the bit 4 is not set, the stack frame is extended for floating point data, adjust the SP accordingly
if (exc_return & (1 << 4)) == 0:
thread_registers[RegNum.SP] = thread_registers[RegNum.SP] + 72
# Set R7 to match the stack pointer in case the frame pointer is not omitted
thread_registers[RegNum.R7] = thread_registers[RegNum.SP]
self.send_registers_packet(thread_registers)
``` | /content/code_sandbox/scripts/coredump/gdbstubs/arch/arm_cortex_m.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,564 |
```restructuredtext
:orphan:
.. _zephyr_licensing:
Licensing of Zephyr Project components
######################################
The Zephyr kernel tree imports or reuses packages, scripts and other files that
there is no LICENSE file or way to put a LICENSE file there, so we describe the
licensing in this document.
path_to_url
path_to_url
*scripts/{checkpatch.pl,checkstack.pl,spelling.txt}*
*Origin:* Linux Kernel
*scripts/{coccicheck,coccinelle/array_size.cocci,coccinelle/deref_null.cocci,coccinelle/deref_null.cocci,coccinelle/deref_null.cocci,coccinelle/mini_lock.cocci,coccinelle/mini_lock.cocci,coccinelle/mini_lock.cocci,coccinelle/noderef.cocci,coccinelle/noderef.cocci,coccinelle/returnvar.cocci,coccinelle/semicolon.cocci}*
*Origin:* Coccinelle
*subsys/testsuite/coverage/coverage.h*
*Origin:* GCC, the GNU Compiler Collection
*boards/ene/kb1200_evb/support/openocd.cfg*
``` | /content/code_sandbox/doc/LICENSING.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 266 |
```python
#!/usr/bin/env python3
#
#
import binascii
import logging
import struct
from gdbstubs.gdbstub import GdbStub
logger = logging.getLogger("gdbstub")
class RegNum():
# Matches the enum i386_regnum in GDB
EAX = 0
ECX = 1
EDX = 2
EBX = 3
ESP = 4
EBP = 5
ESI = 6
EDI = 7
EIP = 8
EFLAGS = 9
CS = 10
SS = 11
DS = 12
ES = 13
FS = 14
GS = 15
class ExceptionVectors():
# Matches arch/x86/include/kernel_arch_data.h
IV_DIVIDE_ERROR = 0
IV_DEBUG = 1
IV_NON_MASKABLE_INTERRUPT = 2
IV_BREAKPOINT = 3
IV_OVERFLOW = 4
IV_BOUND_RANGE = 5
IV_INVALID_OPCODE = 6
IV_DEVICE_NOT_AVAILABLE = 7
IV_DOUBLE_FAULT = 8
IV_COPROC_SEGMENT_OVERRUN = 9
IV_INVALID_TSS = 10
IV_SEGMENT_NOT_PRESENT = 11
IV_STACK_FAULT = 12
IV_GENERAL_PROTECTION = 13
IV_PAGE_FAULT = 14
IV_RESERVED = 15
IV_X87_FPU_FP_ERROR = 16
IV_ALIGNMENT_CHECK = 17
IV_MACHINE_CHECK = 18
IV_SIMD_FP = 19
IV_VIRT_EXCEPTION = 20
IV_SECURITY_EXCEPTION = 30
class GdbStub_x86(GdbStub):
ARCH_DATA_BLK_STRUCT = "<IIIIIIIIIIIII"
GDB_SIGNAL_DEFAULT = 7
# Mapping is from GDB's gdb/i386-stubs.c
GDB_SIGNAL_MAPPING = {
ExceptionVectors.IV_DIVIDE_ERROR: 8,
ExceptionVectors.IV_DEBUG: 5,
ExceptionVectors.IV_BREAKPOINT: 5,
ExceptionVectors.IV_OVERFLOW: 16,
ExceptionVectors.IV_BOUND_RANGE: 16,
ExceptionVectors.IV_INVALID_OPCODE: 4,
ExceptionVectors.IV_DEVICE_NOT_AVAILABLE: 8,
ExceptionVectors.IV_DOUBLE_FAULT: 7,
ExceptionVectors.IV_COPROC_SEGMENT_OVERRUN: 11,
ExceptionVectors.IV_INVALID_TSS: 11,
ExceptionVectors.IV_SEGMENT_NOT_PRESENT: 11,
ExceptionVectors.IV_STACK_FAULT: 11,
ExceptionVectors.IV_GENERAL_PROTECTION: 11,
ExceptionVectors.IV_PAGE_FAULT: 11,
ExceptionVectors.IV_X87_FPU_FP_ERROR: 7,
}
GDB_G_PKT_NUM_REGS = 16
def __init__(self, logfile, elffile):
super().__init__(logfile=logfile, elffile=elffile)
self.registers = None
self.exception_vector = None
self.exception_code = None
self.gdb_signal = self.GDB_SIGNAL_DEFAULT
self.parse_arch_data_block()
self.compute_signal()
def parse_arch_data_block(self):
arch_data_blk = self.logfile.get_arch_data()['data']
tu = struct.unpack(self.ARCH_DATA_BLK_STRUCT, arch_data_blk)
self.registers = dict()
self.exception_vector = tu[0]
self.exception_code = tu[1]
self.registers[RegNum.EAX] = tu[2]
self.registers[RegNum.ECX] = tu[3]
self.registers[RegNum.EDX] = tu[4]
self.registers[RegNum.EBX] = tu[5]
self.registers[RegNum.ESP] = tu[6]
self.registers[RegNum.EBP] = tu[7]
self.registers[RegNum.ESI] = tu[8]
self.registers[RegNum.EDI] = tu[9]
self.registers[RegNum.EIP] = tu[10]
self.registers[RegNum.EFLAGS] = tu[11]
self.registers[RegNum.CS] = tu[12]
def compute_signal(self):
sig = self.GDB_SIGNAL_DEFAULT
vector = self.exception_vector
if vector is None:
sig = self.GDB_SIGNAL_DEFAULT
# Map vector number to GDB signal number
if vector in self.GDB_SIGNAL_MAPPING:
sig = self.GDB_SIGNAL_MAPPING[vector]
self.gdb_signal = sig
def handle_register_group_read_packet(self):
reg_fmt = "<I"
idx = 0
pkt = b''
while idx < self.GDB_G_PKT_NUM_REGS:
if idx in self.registers:
bval = struct.pack(reg_fmt, self.registers[idx])
pkt += binascii.hexlify(bval)
else:
# Register not in coredump -> unknown value
# Send in "xxxxxxxx"
pkt += b'x' * 8
idx += 1
self.put_gdb_packet(pkt)
def handle_register_single_read_packet(self, pkt):
# Mark registers as "<unavailable>".
# 'p' packets are usually used for registers
# other than the general ones (e.g. eax, ebx)
# so we can safely reply "xxxxxxxx" here.
self.put_gdb_packet(b'x' * 8)
``` | /content/code_sandbox/scripts/coredump/gdbstubs/arch/x86.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,187 |
```restructuredtext
..
Zephyr Project documentation main file
.. _zephyr-home:
Zephyr Project Documentation
############################
.. only:: release
Welcome to the Zephyr Project's documentation for version |version|.
Documentation for the latest (main) development branch of Zephyr
can be found at path_to_url
.. only:: (development or daily)
**Welcome to the Zephyr Project's documentation
for the main tree under development** (version |version|).
Use the version selection menu on the left to view
documentation for a specific version of Zephyr.
For information about the changes and additions for releases, please
consult the published :ref:`zephyr_release_notes` documentation.
The Zephyr OS is provided under the `Apache 2.0 license`_ (as found in
the LICENSE file in the project's `GitHub repo`_). The Zephyr OS also
imports or reuses packages, scripts, and other files that use other
licensing, as described in :ref:`Zephyr_Licensing`.
.. raw:: html
<ul class="grid">
<li class="grid-item">
<a href="introduction/index.html">
<img alt="" src="_static/images/kite.png"/>
<h2>Introduction</h2>
</a>
<p>Introducing the Zephyr Project: overview, architecture, features, and licensing</p>
</li>
<li class="grid-item">
<a href="develop/getting_started/index.html">
<span class="grid-icon fa fa-map-signs"></span>
<h2>Getting Started Guide</h2>
</a>
<p>Follow this guide to set up a Zephyr development environment on your
system, and then build and run a sample application.</p>
</li>
<li class="grid-item">
<a href="contribute/index.html">
<span class="grid-icon fa fa-github"></span>
<h2>Contribution Guidelines</h2>
</a>
<p>As an open-source project, we welcome and encourage the community
to submit patches directly to the project.</p>
</li>
<li class="grid-item">
<a href="samples/index.html">
<span class="grid-icon fa fa-cogs"></span>
<h2>Samples and Demos</h2>
</a>
<p>A list of samples and demos that can run on a variety of boards supported
by Zephyr</p>
</li>
<li class="grid-item">
<a href="hardware/index.html">
<span class="grid-icon fa fa-sign-in"></span>
<h2>Hardware Support</h2>
</a>
<p>Information about supported architectures, supported hardware and porting guides</p>
</li>
<li class="grid-item">
<a href="security/index.html">
<span class="grid-icon fa fa-lock"></span>
<h2>Security</h2>
</a>
<p>Requirements, processes, and developer guidelines for ensuring security is addressed within the Zephyr project.</p>
</li>
<li class="grid-item">
<a href="boards/index.html">
<span class="grid-icon fa fa-object-group"></span>
<h2>Supported Boards</h2>
</a>
<p>List of supported boards and platforms.</p>
</li>
<li class="grid-item">
<a href="services/index.html">
<span class="grid-icon fa fa-puzzle-piece"></span>
<h2>OS Services</h2>
</a>
<p>OS Services and guides how to use them with Zephyr</p>
</li>
</ul>
Sections
********
.. toctree::
:maxdepth: 1
:caption: Contents
introduction/index.rst
develop/index.rst
kernel/index.rst
services/index.rst
build/index.rst
connectivity/index.rst
hardware/index.rst
contribute/index.rst
project/index.rst
security/index.rst
safety/index.rst
samples/index.rst
boards/index.rst
releases/index.rst
Indices and Tables
******************
* :ref:`glossary`
* :ref:`genindex`
.. _Apache 2.0 license:
path_to_url
.. _GitHub repo: path_to_url
``` | /content/code_sandbox/doc/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 986 |
```restructuredtext
:orphan:
.. _glossary:
Glossary of Terms
#################
.. glossary::
:sorted:
API
(Application Program Interface) A defined set of routines and protocols for
building application software.
application
The set of user-supplied files that the Zephyr build system uses
to build an application image for a specified board configuration.
It can contain application-specific code, kernel configuration settings,
and at least one CMakeLists.txt file.
The application's kernel configuration settings direct the build system
to create a custom kernel that makes efficient use of the board's
resources.
An application can sometimes be built for more than one type of board
configuration (including boards with different CPU architectures),
if it does not require any board-specific capabilities.
application image
A binary file that is loaded and executed by the board for which
it was built.
Each application image contains both the application's code and the
Zephyr kernel code needed to support it. They are compiled as a single,
fully-linked binary.
Once an application image is loaded onto a board, the image takes control
of the system, initializes it, and runs as the system's sole application.
Both application code and kernel code execute as privileged code
within a single shared address space.
architecture
An instruction set architecture (ISA) along with a programming model.
board
A target system with a defined set of devices and capabilities,
which can load and execute an application image. It may be an actual
hardware system or a simulated system running under QEMU. A board can
contain one or more :term:`SoCs <SoC>`.
The Zephyr kernel supports a :ref:`variety of boards <boards>`.
board configuration
A set of kernel configuration options that specify how the devices
present on a board are used by the kernel.
The Zephyr build system defines one or more board configurations
for each board it supports. The kernel configuration settings that are
specified by the build system can be over-ridden by the application,
if desired.
board name
The human-readable name of a :term:`board`. Uniquely and descriptively
identifies a particular system, but does not include additional
information that may be required to actually build a Zephyr image for it.
See :ref:`board_terminology` for additional details.
board qualifiers
The set of additional tokens, separated by a forward slash (`/`) that
follow the :term:`board name` (and optionally :term:`board revision`) to
form the :term:`board target`. The currently accepted qualifiers are
:term:`SoC`, :term:`CPU cluster` and :term:`variant`.
See :ref:`board_terminology` for additional details.
board revision
An optional version string that identifies a particular revision of a
hardware system. This is useful to avoid duplication of board files
whenever small changes are introduced to a hardware system.
See :ref:`porting_board_revisions` and :ref:`application_board_version`
for more information.
board target
The full string that can be provided to any of the Zephyr build tools to
compile and link an image for a particular hardware system. This string
uniquely identifies the combination of :term:`board name`, :term:`board
revision` and :term:`board qualifiers`.
See :ref:`board_terminology` for additional details.
CPU cluster
A group of one or more :term:`CPU cores <CPU core>`, all executing the same image
within the same address space and in a symmetrical (SMP) configuration.
Only :term:`CPU cores <CPU core>` of the same :term:`architecture` can be in a single
cluster. Multiple CPU clusters (each of one or more cores) can coexist in
the same :term:`SoC`.
CPU core
A single processing unit, with its own Program Counter, executing program
instructions sequentially. CPU cores are part of a :term:`CPU cluster`,
which can contain one or more cores.
device runtime power management
Device Runtime Power Management (PM) refers the capability of devices to
save energy independently of the system power state. Devices will keep
reference of their usage and will automatically be suspended or resumed.
This feature is enabled via the :kconfig:option:`CONFIG_PM_DEVICE_RUNTIME`
Kconfig option.
idle thread
A system thread that runs when there are no other threads ready to run.
IDT
(Interrupt Descriptor Table) a data structure used by the x86
architecture to implement an interrupt vector table. The IDT is used
to determine the correct response to interrupts and exceptions.
ISR
(Interrupt Service Routine) Also known as an interrupt handler, an ISR
is a callback function whose execution is triggered by a hardware
interrupt (or software interrupt instructions) and is used to handle
high-priority conditions that require interrupting the current code
executing on the processor.
kernel
The set of Zephyr-supplied files that implement the Zephyr kernel,
including its core services, device drivers, network stack, and so on.
power domain
A power domain is a collection of devices for which power is
applied and removed collectively in a single action. Power
domains are represented by :c:struct:`device`.
power gating
Power gating reduces power consumption by shutting off areas of an
integrated circuit that are not in use.
SoC
A `System on a chip`_, that is, an integrated circuit that contains at
least one :term:`CPU cluster` (in turn with at least one :term:`CPU core`),
as well as peripherals and memory.
SoC family
One or more :term:`SoCs <SoC>` or :term:`SoC series` that share enough
in common to consider them related and under a single family denomination.
SoC series
A number of different :term:`SoCs <SoC>` that share similar characteristics and
features, and that the vendor typically names and markets together.
subsystem
A subsystem refers to a logically distinct part of the operating system
that handles specific functionality or provides certain services.
system power state
System power states describe the power consumption of the system as a
whole. System power states are represented by :c:enum:`pm_state`.
variant
In the context of :term:`board qualifiers`, a variant designates a
particular type or configuration of a build for a combination of :term:`SoC`
and :term:`CPU cluster`. Common uses of the variant concept include
introducing both secure and non-secure builds for platforms with Trusted
Execution Environment support, or selecting the type of RAM used in a
build.
west
A multi-repo meta-tool developed for the Zephyr project. See :ref:`west`.
west installation
An obsolete term for a :term:`west workspace` used prior to west 0.7.
west manifest
A YAML file, usually named :file:`west.yml`, which describes projects, or
the Git repositories which make up a :term:`west workspace`, along with
additional metadata. See :ref:`west-basics` for general information
and :ref:`west-manifests` for details.
west manifest repository
The Git repository in a :term:`west workspace` which contains the
:term:`west manifest`. Its location is given by the :ref:`manifest.path
configuration option <west-config-index>`. See :ref:`west-basics`.
west project
Each of the entries in a :term:`west manifest`, which describe a Git
repository that will be cloned and managed by west when working with the
corresponding :term:`west manifest repository`. Note that a west project
is different from a :term:`zephyr module`, although many projects are also
modules. See :ref:`west-manifests-projects` for additional information.
west workspace
A folder on your system with a :file:`.west` subdirectory and a
:term:`west manifest repository` in it. You clone the Zephyr source code,
as well as that of its :term:`west projects <west project>` onto your
system by creating a west workspace using the ``west init`` command. See
:ref:`west-basics`.
XIP
(eXecute In Place) a method of executing programs directly from long
term storage rather than copying it into RAM, saving writable memory for
dynamic data and not the static program code.
zephyr module
A Git repository containing a :file:`zephyr/module.yml` file, used by the
Zephyr build system to integrate the source code and configuration files
of the module into a regular Zephyr build. Zephyr modules may be west
projects, but they do not have to. See :ref:`modules` for additional
details.
.. _System on a chip: path_to_url
``` | /content/code_sandbox/doc/glossary.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,982 |
```restructuredtext
:orphan:
.. _page-not-found:
Sorry, Page Not Found
#####################
.. image:: images/Zephyr-Kite-in-tree.png
:align: right
.. raw:: html
<noscript>
Sorry, the page you requested was not found on this site.
</noscript>
<script type="text/javaScript">
<!--
var strReferrer=document.referrer;
if (strReferrer.length > 0) {
document.write("<p>Sorry, the page you requested: " +
"<a href=\"" + strReferrer + "\">" +
strReferrer + "</a> was not found on this site.</p>");
} else {
document.write("<p>Sorry, the page you requested was not found on this site.</p>")
}
//-->
</script>
Please check the url for misspellings.
It's also possible we've removed or renamed the page you're looking for.
Please try using the navigation links on the left of this page to navigate
the major sections of our site, or use the search box.
If you got this error by following a link, please let us know by creating an
issue on `GitHub`_.
.. _GitHub: path_to_url
.. raw:: html
<div style='clear:both'></div>
``` | /content/code_sandbox/doc/404.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 281 |
```python
# Zephyr documentation build configuration file.
# Reference: path_to_url
import sys
import os
from pathlib import Path
import re
import textwrap
from sphinx.cmd.build import get_parser
args = get_parser().parse_args()
ZEPHYR_BASE = Path(__file__).resolve().parents[1]
ZEPHYR_BUILD = Path(args.outputdir).resolve()
# Add the '_extensions' directory to sys.path, to enable finding Sphinx
# extensions within.
sys.path.insert(0, str(ZEPHYR_BASE / "doc" / "_extensions"))
# Add the '_scripts' directory to sys.path, to enable finding utility
# modules.
sys.path.insert(0, str(ZEPHYR_BASE / "doc" / "_scripts"))
# Add the directory which contains the runners package as well,
# for autodoc directives on runners.xyz.
sys.path.insert(0, str(ZEPHYR_BASE / "scripts" / "west_commands"))
# Add the directory which contains the pytest-twister-pytest
sys.path.insert(0, str(ZEPHYR_BASE / "scripts" / "pylib" / "pytest-twister-harness" / "src"))
import redirects
try:
import west as west_found
except ImportError:
west_found = False
# -- Project --------------------------------------------------------------
project = "Zephyr Project"
copyright = "2015-2024 Zephyr Project members and individual contributors"
author = "The Zephyr Project Contributors"
# parse version from 'VERSION' file
with open(ZEPHYR_BASE / "VERSION") as f:
m = re.match(
(
r"^VERSION_MAJOR\s*=\s*(\d+)$\n"
+ r"^VERSION_MINOR\s*=\s*(\d+)$\n"
+ r"^PATCHLEVEL\s*=\s*(\d+)$\n"
+ r"^VERSION_TWEAK\s*=\s*\d+$\n"
+ r"^EXTRAVERSION\s*=\s*(.*)$"
),
f.read(),
re.MULTILINE,
)
if not m:
sys.stderr.write("Warning: Could not extract kernel version\n")
version = "Unknown"
else:
major, minor, patch, extra = m.groups(1)
version = ".".join((major, minor, patch))
if extra:
version += "-" + extra
release = version
# parse SDK version from 'SDK_VERSION' file
with open(ZEPHYR_BASE / "SDK_VERSION") as f:
sdk_version = f.read().strip()
# -- General configuration ------------------------------------------------
extensions = [
"sphinx_rtd_theme",
"sphinx.ext.todo",
"sphinx.ext.extlinks",
"sphinx.ext.autodoc",
"sphinx.ext.graphviz",
"sphinxcontrib.jquery",
"zephyr.application",
"zephyr.html_redirects",
"zephyr.kconfig",
"zephyr.dtcompatible-role",
"zephyr.link-roles",
"sphinx_tabs.tabs",
"sphinx_sitemap",
"zephyr.doxyrunner",
"zephyr.doxybridge",
"zephyr.gh_utils",
"zephyr.manifest_projects_table",
"notfound.extension",
"sphinx_copybutton",
"sphinx_togglebutton",
"zephyr.external_content",
"zephyr.domain",
"zephyr.api_overview",
]
# Only use image conversion when it is really needed, e.g. LaTeX build.
# Ensure "sphinxcontrib.rsvgconverter" is added before "sphinx.ext.imgconverter"
# as it's better at converting SVG with extended features (like the ones from
# draw.io) to PDF format).
if tags.has("convertimages"): # pylint: disable=undefined-variable
extensions.append("sphinxcontrib.rsvgconverter")
extensions.append("sphinx.ext.imgconverter")
templates_path = ["_templates"]
exclude_patterns = ["_build"]
if not west_found:
exclude_patterns.append("**/*west-apis*")
else:
exclude_patterns.append("**/*west-not-found*")
pygments_style = "sphinx"
highlight_language = "none"
todo_include_todos = False
nitpick_ignore = [
# ignore C standard identifiers (they are not defined in Zephyr docs)
("c:identifier", "FILE"),
("c:identifier", "int8_t"),
("c:identifier", "int16_t"),
("c:identifier", "int32_t"),
("c:identifier", "int64_t"),
("c:identifier", "intptr_t"),
("c:identifier", "off_t"),
("c:identifier", "size_t"),
("c:identifier", "ssize_t"),
("c:identifier", "time_t"),
("c:identifier", "uint8_t"),
("c:identifier", "uint16_t"),
("c:identifier", "uint32_t"),
("c:identifier", "uint64_t"),
("c:identifier", "uintptr_t"),
("c:identifier", "va_list"),
]
SDK_URL_BASE="path_to_url"
rst_epilog = f"""
.. include:: /substitutions.txt
.. |sdk-version-literal| replace:: ``{sdk_version}``
.. |sdk-version-trim| unicode:: {sdk_version}
:trim:
.. |sdk-version-ltrim| unicode:: {sdk_version}
:ltrim:
.. _Zephyr SDK bundle: path_to_url{sdk_version}
.. |sdk-url-linux| replace:: `{SDK_URL_BASE}/v{sdk_version}/zephyr-sdk-{sdk_version}_linux-x86_64.tar.xz`
.. |sdk-url-linux-sha| replace:: `{SDK_URL_BASE}/v{sdk_version}/sha256.sum`
.. |sdk-url-macos| replace:: `{SDK_URL_BASE}/v{sdk_version}/zephyr-sdk-{sdk_version}_macos-x86_64.tar.xz`
.. |sdk-url-macos-sha| replace:: `{SDK_URL_BASE}/v{sdk_version}/sha256.sum`
.. |sdk-url-windows| replace:: `{SDK_URL_BASE}/v{sdk_version}/zephyr-sdk-{sdk_version}_windows-x86_64.7z`
"""
# -- Options for HTML output ----------------------------------------------
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"logo_only": True,
"prev_next_buttons_location": None
}
html_baseurl = "path_to_url"
html_title = "Zephyr Project Documentation"
html_logo = str(ZEPHYR_BASE / "doc" / "_static" / "images" / "logo.svg")
html_favicon = str(ZEPHYR_BASE / "doc" / "_static" / "images" / "favicon.png")
html_static_path = [str(ZEPHYR_BASE / "doc" / "_static")]
html_last_updated_fmt = "%b %d, %Y"
html_domain_indices = False
html_split_index = True
html_show_sourcelink = False
html_show_sphinx = False
html_search_scorer = str(ZEPHYR_BASE / "doc" / "_static" / "js" / "scorer.js")
html_additional_pages = {
"gsearch": "gsearch.html"
}
is_release = tags.has("release") # pylint: disable=undefined-variable
reference_prefix = ""
if tags.has("publish"): # pylint: disable=undefined-variable
reference_prefix = f"/{version}" if is_release else "/latest"
docs_title = "Docs / {}".format(version if is_release else "Latest")
html_context = {
"show_license": True,
"docs_title": docs_title,
"is_release": is_release,
"current_version": version,
"versions": (
("latest", "/"),
("3.7.0 (LTS)", "/3.7.0/"),
("3.6.0", "/3.6.0/"),
("2.7.6 (LTS)", "/2.7.6/"),
),
"display_gh_links": True,
"reference_links": {
"API": f"{reference_prefix}/doxygen/html/index.html",
"Kconfig Options": f"{reference_prefix}/kconfig.html",
"Devicetree Bindings": f"{reference_prefix}/build/dts/api/bindings.html",
"West Projects": f"{reference_prefix}/develop/manifest/index.html",
},
# Set google_searchengine_id to your Search Engine ID to replace built-in search
# engine with Google's Programmable Search Engine.
# See path_to_url for details.
"google_searchengine_id": "746031aa0d56d4912",
}
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
"papersize": "a4paper",
"maketitle": open(ZEPHYR_BASE / "doc" / "_static" / "latex" / "title.tex").read(),
"preamble": open(ZEPHYR_BASE / "doc" / "_static" / "latex" / "preamble.tex").read(),
"makeindex": r"\usepackage[columns=1]{idxlayout}\makeindex",
"fontpkg": textwrap.dedent(r"""
\usepackage{noto}
\usepackage{inconsolata-nerd-font}
\usepackage[T1]{fontenc}
"""),
"sphinxsetup": ",".join(
(
# NOTE: colors match those found in light.css stylesheet
"verbatimwithframe=false",
"VerbatimColor={HTML}{f0f2f4}",
"InnerLinkColor={HTML}{2980b9}",
"warningBgColor={HTML}{e9a499}",
"warningborder=0pt",
r"HeaderFamily=\rmfamily\bfseries",
)
),
}
latex_logo = str(ZEPHYR_BASE / "doc" / "_static" / "images" / "logo-latex.pdf")
latex_documents = [
("index-tex", "zephyr.tex", "Zephyr Project Documentation", author, "manual"),
]
latex_engine = "xelatex"
# -- Options for zephyr.doxyrunner plugin ---------------------------------
doxyrunner_doxygen = os.environ.get("DOXYGEN_EXECUTABLE", "doxygen")
doxyrunner_doxyfile = ZEPHYR_BASE / "doc" / "zephyr.doxyfile.in"
doxyrunner_outdir = ZEPHYR_BUILD / "doxygen"
doxyrunner_fmt = True
doxyrunner_fmt_vars = {"ZEPHYR_BASE": str(ZEPHYR_BASE), "ZEPHYR_VERSION": version}
doxyrunner_outdir_var = "DOXY_OUT"
# -- Options for zephyr.doxybridge plugin ---------------------------------
doxybridge_dir = doxyrunner_outdir
# -- Options for html_redirect plugin -------------------------------------
html_redirect_pages = redirects.REDIRECTS
# -- Options for zephyr.link-roles ----------------------------------------
link_roles_manifest_project = "zephyr"
link_roles_manifest_baseurl = "path_to_url"
# -- Options for notfound.extension ---------------------------------------
notfound_urls_prefix = f"/{version}/" if is_release else "/latest/"
# -- Options for zephyr.gh_utils ------------------------------------------
gh_link_version = f"v{version}" if is_release else "main"
gh_link_base_url = f"path_to_url"
gh_link_prefixes = {
"samples/.*": "",
"boards/.*": "",
"snippets/.*": "",
".*": "doc",
}
gh_link_exclude = [
"reference/kconfig.*",
"build/dts/api/bindings.*",
"build/dts/api/compatibles.*",
]
# -- Options for zephyr.kconfig -------------------------------------------
kconfig_generate_db = True
kconfig_ext_paths = [ZEPHYR_BASE]
# -- Options for zephyr.external_content ----------------------------------
external_content_contents = [
(ZEPHYR_BASE / "doc", "[!_]*"),
(ZEPHYR_BASE, "boards/**/*.rst"),
(ZEPHYR_BASE, "boards/**/doc"),
(ZEPHYR_BASE, "samples/**/*.html"),
(ZEPHYR_BASE, "samples/**/*.rst"),
(ZEPHYR_BASE, "samples/**/doc"),
(ZEPHYR_BASE, "snippets/**/*.rst"),
(ZEPHYR_BASE, "snippets/**/doc"),
(ZEPHYR_BASE, "tests/**/*.pts"),
]
external_content_keep = [
"reference/kconfig/*",
"develop/manifest/index.rst",
"build/dts/api/bindings.rst",
"build/dts/api/bindings/**/*",
"build/dts/api/compatibles/**/*",
]
# -- Options for zephyr.domain --------------------------------------------
zephyr_breathe_insert_related_samples = True
# -- Options for sphinx.ext.graphviz --------------------------------------
graphviz_dot = os.environ.get("DOT_EXECUTABLE", "dot")
graphviz_output_format = "svg"
graphviz_dot_args = [
"-Gbgcolor=transparent",
"-Nstyle=filled",
"-Nfillcolor=white",
"-Ncolor=gray60",
"-Nfontcolor=gray25",
"-Ecolor=gray60",
]
# -- Options for sphinx_copybutton ----------------------------------------
copybutton_prompt_text = r"\$ |uart:~\$ "
copybutton_prompt_is_regexp = True
# -- Options for sphinx-sitemap ----------------------------------------
sitemap_url_scheme = "{link}"
# -- Linkcheck options ----------------------------------------------------
linkcheck_ignore = [
r"path_to_url"
]
extlinks = {
"github": ("path_to_url", "GitHub #%s"),
}
linkcheck_timeout = 30
linkcheck_workers = 10
linkcheck_anchors = False
# -- Options for zephyr.api_overview --------------------------------------
api_overview_doxygen_base_url = "../../doxygen/html"
def setup(app):
# theme customizations
app.add_css_file("css/custom.css")
app.add_js_file("js/custom.js")
app.add_js_file("js/dark-mode-toggle.min.mjs", type="module")
``` | /content/code_sandbox/doc/conf.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,039 |
```restructuredtext
:orphan:
.. _kconfig-search:
Kconfig Search
==============
.. kconfig:search::
``` | /content/code_sandbox/doc/kconfig.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 23 |
```restructuredtext
:orphan:
..
Zephyr Project documentation main file
.. _zephyr-home-tex:
Zephyr Project Documentation
############################
.. toctree::
:maxdepth: 1
:caption: Contents
introduction/index.rst
develop/index.rst
kernel/index.rst
services/index.rst
build/index.rst
connectivity/index.rst
hardware/index.rst
contribute/index.rst
project/index.rst
security/index.rst
safety/index.rst
glossary.rst
``` | /content/code_sandbox/doc/index-tex.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 123 |
```unknown
# your_sha256_hash--------------
# Makefile for documentation build
BUILDDIR ?= _build
DOC_TAG ?= development
SPHINXOPTS ?= -j auto -W --keep-going -T
SPHINXOPTS_EXTRA ?=
LATEXMKOPTS ?= -halt-on-error -no-shell-escape
DT_TURBO_MODE ?= 0
# your_sha256_hash--------------
# Documentation targets
.PHONY: configure clean html html-fast latex pdf doxygen
html-fast:
${MAKE} html DT_TURBO_MODE=1
html latex pdf linkcheck doxygen: configure
cmake --build ${BUILDDIR} --target $@
configure:
cmake \
-GNinja \
-B${BUILDDIR} \
-S. \
-DDOC_TAG=${DOC_TAG} \
-DSPHINXOPTS="${SPHINXOPTS}" \
-DSPHINXOPTS_EXTRA="${SPHINXOPTS_EXTRA}" \
-DLATEXMKOPTS="${LATEXMKOPTS}" \
-DDT_TURBO_MODE=${DT_TURBO_MODE}
clean:
cmake --build ${BUILDDIR} --target clean
``` | /content/code_sandbox/doc/Makefile | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 272 |
```restructuredtext
.. _formatted_output:
Formatted Output
################
Applications as well as Zephyr itself requires infrastructure to format
values for user consumption. The standard C99 library ``*printf()``
functionality fulfills this need for streaming output devices or memory
buffers, but in an embedded system devices may not accept streamed data
and memory may not be available to store the formatted output.
Internal Zephyr API traditionally provided this both for
:c:func:`printk` and for Zephyr's internal minimal libc, but with
separate internal interfaces. Logging, tracing, shell, and other
applications made use of either these APIs or standard libc routines
based on build options.
The :c:func:`cbprintf` public APIs convert C99 format strings and
arguments, providing output produced one character at a time through a
callback mechanism, replacing the original internal functions and
providing support for almost all C99 format specifications. Existing
use of ``s*printf()`` C libraries in Zephyr can be converted to
:c:func:`snprintfcb()` to avoid pulling in libc implementations.
Several Kconfig options control the set of features that are enabled,
allowing some control over features and memory usage:
* :kconfig:option:`CONFIG_CBPRINTF_FULL_INTEGRAL`
or :kconfig:option:`CONFIG_CBPRINTF_REDUCED_INTEGRAL`
* :kconfig:option:`CONFIG_CBPRINTF_FP_SUPPORT`
* :kconfig:option:`CONFIG_CBPRINTF_FP_A_SUPPORT`
* :kconfig:option:`CONFIG_CBPRINTF_FP_ALWAYS_A`
* :kconfig:option:`CONFIG_CBPRINTF_N_SPECIFIER`
:kconfig:option:`CONFIG_CBPRINTF_LIBC_SUBSTS` can be used to provide functions
that behave like standard libc functions but use the selected cbprintf
formatter rather than pulling in another formatter from libc.
In addition :kconfig:option:`CONFIG_CBPRINTF_NANO` can be used to revert back to
the very space-optimized but limited formatter used for :c:func:`printk`
before this capability was added.
.. _cbprintf_packaging:
Cbprintf Packaging
******************
Typically, strings are formatted synchronously when a function from ``printf``
family is called. However, there are cases when it is beneficial that formatting
is deferred. In that case, a state (format string and arguments) must be captured.
Such state forms a self-contained package which contains format string and
arguments. Additionally, package may contain copies of strings which are
part of a format string (format string or any ``%s`` argument). Package primary
content resembles va_list stack frame thus standard formatting functions are
used to process a package. Since package contains data which is processed as
va_list frame, strict alignment must be maintained. Due to required padding,
size of the package depends on alignment. When package is copied, it should be
copied to a memory block with the same alignment as origin.
Package can have following variants:
* **Self-contained** - non read-only strings appended to the package. String can be
formatted from such package as long as there is access to read-only string
locations. Package may contain information where read-only strings are located
within the package. That information can be used to convert packet to fully
self-contained package.
* **Fully self-contained** - all strings are appended to the package. String can be
formatted from such package without any external data.
* **Transient**- only arguments are stored. Package contain information
where pointers to non read-only strings are located within the package. Optionally,
it may contain read-only string location information. String can be formatted
from such package as long as non read-only strings are still valid and read-only
strings are accessible. Alternatively, package can be converted to **self-contained**
package or **fully self-contained** if information about read-only string
locations is present in the package.
Package can be created using two methods:
* runtime - using :c:func:`cbprintf_package` or :c:func:`cbvprintf_package`. This
method scans format string and based on detected format specifiers builds the
package.
* static - types of arguments are detected at compile time by the preprocessor
and package is created as simple assignments to a provided memory. This method
is significantly faster than runtime (more than 15 times) but has following
limitations: requires ``_Generic`` keyword (C11 feature) to be supported by
the compiler and cannot distinguish between ``%p`` and ``%s`` if char pointer
is used. It treats all (unsigned) char pointers as ``%s`` thus it will attempt
to append string to a package. It can be handled correctly during conversion
from **transient** package to **self-contained** package using
:c:macro:`CBPRINTF_PACKAGE_CONVERT_PTR_CHECK` flag. However, it requires access
to the format string and it is not always possible thus it is recommended to
cast char pointers used for ``%p`` to ``void *``. There is a logging warning
generated by :c:func:`cbprintf_package_convert` called with
:c:macro:`CBPRINTF_PACKAGE_CONVERT_PTR_CHECK` flag when char pointer is used with
``%p``.
Several Kconfig options control behavior of the packaging:
* :kconfig:option:`CONFIG_CBPRINTF_PACKAGE_LONGDOUBLE`
* :kconfig:option:`CONFIG_CBPRINTF_STATIC_PACKAGE_CHECK_ALIGNMENT`
Cbprintf package conversion
===========================
It is possible to convert package to a variant which contains more information, e.g
**transient** package can be converted to **self-contained**. Conversion to
**fully self-contained** package is possible if :c:macro:`CBPRINTF_PACKAGE_ADD_RO_STR_POS`
flag was used when package was created.
:c:func:`cbprintf_package_copy` is used to calculate space needed for the new
package and to copy and convert a package.
Cbprintf package format
=======================
Format of the package contains paddings which are platform specific. Package consists
of header which contains size of package (excluding appended strings) and number of
appended strings. It is followed by the arguments which contains alignment paddings
and resembles *va_list* stack frame. It is followed by data associated with character
pointer arguments used by the string which are not appended to the string (but may
be appended later by :c:func:`cbprinf_package_convert`). Finally, package, optionally,
contains appended strings. Each string contains 1 byte header which contains index
of the location where address argument is stored. During packaging address is set
to null and before string formatting it is updated to point to the current string
location within the package. Updating address argument must happen just before string
formatting since address changes whenever package is copied.
+------------------+your_sha256_hash---------+
| Header | 1 byte: Argument list size including header and *fmt* (in 32 bit words) |
| +your_sha256_hash---------+
| sizeof(void \*) | 1 byte: Number of strings appended to the package |
| +your_sha256_hash---------+
| | 1 byte: Number of read-only string argument locations |
| +your_sha256_hash---------+
| | 1 byte: Number of transient string argument locations |
| +your_sha256_hash---------+
| | platform specific padding to sizeof(void \*) |
+------------------+your_sha256_hash---------+
| Arguments | Pointer to *fmt* (or null if *fmt* is appended to the package) |
| +your_sha256_hash---------+
| | (optional padding for platform specific alignment) |
| +your_sha256_hash---------+
| | argument 0 |
| +your_sha256_hash---------+
| | (optional padding for platform specific alignment) |
| +your_sha256_hash---------+
| | argument 1 |
| +your_sha256_hash---------+
| | ... |
+------------------+your_sha256_hash---------+
| String location | Indexes of words within the package where read-only strings are located |
| information +your_sha256_hash---------+
| (optional) | Pairs of argument index and argument location index where transient |
| | strings are located |
+------------------+your_sha256_hash---------+
| Appended | 1 byte: Index within the package to the location of associated argument |
| strings +your_sha256_hash---------+
| (optional) | Null terminated string |
| +your_sha256_hash---------+
| | ... |
+------------------+your_sha256_hash---------+
.. warning::
If :kconfig:option:`CONFIG_MINIMAL_LIBC` is selected in combination with
:kconfig:option:`CONFIG_CBPRINTF_NANO` formatting with C standard library
functions like ``printf`` or ``snprintf`` is limited. Among other
things the ``%n`` specifier, most format flags, precision control, and
floating point are not supported.
.. _cbprintf_packaging_limitations:
Limitations and recommendations
===============================
* C11 ``_Generic`` support is required by the compiler to use static (fast) packaging.
* It is recommended to cast any character pointer used with ``%p`` format specifier to
other pointer type (e.g. ``void *``). If format string is not accessible then only
static packaging is possible and it will append all detected strings. Character pointer
used for ``%p`` will be considered as string pointer. Copying from unexpected location
can have serious consequences (e.g., memory fault or security violation).
API Reference
*************
.. doxygengroup:: cbprintf_apis
``` | /content/code_sandbox/doc/services/formatted_output.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,066 |
```restructuredtext
.. _poweroff:
Power off
#########
.. doxygengroup:: sys_poweroff
``` | /content/code_sandbox/doc/services/poweroff.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 22 |
```restructuredtext
.. _os_services:
OS Services
###########
.. toctree::
:maxdepth: 1
binary_descriptors/index.rst
console.rst
crypto/index
debugging/index.rst
device_mgmt/index
dsp/index.rst
file_system/index.rst
formatted_output.rst
input/index.rst
ipc/index.rst
llext/index.rst
logging/index.rst
tracing/index.rst
resource_management/index.rst
mem_mgmt/index.rst
modbus/index.rst
modem/index.rst
notify.rst
pm/index.rst
portability/index.rst
poweroff.rst
profiling/index.rst
shell/index.rst
serialization/index.rst
settings/index.rst
smf/index.rst
storage/index.rst
sensing/index.rst
task_wdt/index.rst
tfm/index
virtualization/index.rst
retention/index.rst
rtio/index.rst
zbus/index.rst
misc.rst
``` | /content/code_sandbox/doc/services/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 241 |
```restructuredtext
.. _misc_api:
Miscellaneous
#############
.. comment
not documenting
.. doxygengroup:: checksum
.. doxygengroup:: structured_data
Checksum APIs
*************
CRC
=====
.. doxygengroup:: crc
Structured Data APIs
********************
JSON
====
.. doxygengroup:: json
JWT
===
JSON Web Tokens (JWT) are an open, industry standard [RFC
7519](path_to_url method for representing
claims securely between two parties. Although JWT is fairly flexible,
this API is limited to creating the simplistic tokens needed to
authenticate with the Google Core IoT infrastructure.
.. doxygengroup:: jwt
``` | /content/code_sandbox/doc/services/misc.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 147 |
```restructuredtext
.. _async_notification:
Asynchronous Notifications
##########################
Zephyr APIs often include :ref:`api_term_async` functions where an
operation is initiated and the application needs to be informed when it
completes, and whether it succeeded. Using :c:func:`k_poll` is
often a good method, but some application architectures may be more
suited to a callback notification, and operations like enabling clocks
and power rails may need to be invoked before kernel functions are
available so a busy-wait for completion may be needed.
This API is intended to be embedded within specific subsystems such as
:ref:`resource_mgmt_onoff` and other APIs that support async
transactions. The subsystem wrappers are responsible for extracting
operation-specific data from requests that include a notification
element, and for invoking callbacks with the parameters required by the
API.
A limitation is that this API is not suitable for :ref:`syscalls`
because:
* :c:struct:`sys_notify` is not a kernel object;
* copying the notification content from userspace will break use of
:c:macro:`CONTAINER_OF` in the implementing function;
* neither the spin-wait nor callback notification methods can be
accepted from userspace callers.
Where a notification is required for an asynchronous operation invoked
from a user mode thread the subsystem or driver should provide a syscall
API that uses :c:struct:`k_poll_signal` for notification.
API Reference
*************
.. doxygengroup:: sys_notify_apis
``` | /content/code_sandbox/doc/services/notify.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 317 |
```restructuredtext
.. _console:
Console
#######
.. doxygengroup:: console_api
``` | /content/code_sandbox/doc/services/console.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 19 |
```restructuredtext
.. _resource_mgmt:
Resource Management
###################
There are various situations where it's necessary to coordinate resource
use at runtime among multiple clients. These include power rails,
clocks, other peripherals, and binary device power management. The
complexity of properly managing multiple consumers of a device in a
multithreaded system, especially when transitions may be asynchronous,
suggests that a shared implementation is desirable.
Zephyr provides managers for several coordination policies. These
managers are embedded into services that use them for specific
functions.
.. contents::
:local:
:depth: 2
.. _resource_mgmt_onoff:
On-Off Manager
**************
An on-off manager supports an arbitrary number of clients of a service
which has a binary state. Example applications are power rails, clocks,
and binary device power management.
The manager has the following properties:
* The stable states are off, on, and error. The service always begins
in the off state. The service may also be in a transition to a given
state.
* The core operations are request (add a dependency) and release (remove
a dependency). Supporting operations are reset (to clear an error
state) and cancel (to reclaim client data from an in-progress
transition). The service manages the state based on calls to
functions that initiate these operations.
* The service transitions from off to on when first client request is
received.
* The service transitions from on to off when last client release is
received.
* Each service configuration provides functions that implement the
transition from off to on, from on to off, and optionally from an
error state to off. Transitions must be invokable from both thread
and interrupt context.
* The request and reset operations are asynchronous using
:ref:`async_notification`. Both operations may be cancelled, but
cancellation has no effect on the in-progress transition.
* Requests to turn on may be queued while a transition to off is in
progress: when the service has turned off successfully it will be
immediately turned on again (where context allows) and waiting clients
notified when the start completes.
Requests are reference counted, but not tracked. That means clients are
responsible for recording whether their requests were accepted, and for
initiating a release only if they have previously successfully completed
a request. Improper use of the API can cause an active client to be
shut out, and the manager does not maintain a record of specific clients
that have been granted a request.
Failures in executing a transition are recorded and inhibit further
requests or releases until the manager is reset. Pending requests are
notified (and cancelled) when errors are discovered.
Transition operation completion notifications are provided through
:ref:`async_notification`.
Clients and other components interested in tracking all service state
changes, including when a service begins turning off or enters an error
state, can be informed of state transitions by registering a monitor
with onoff_monitor_register(). Notification of changes are provided
before issuing completion notifications associated with the new
state.
.. note::
A generic API may be implemented by multiple drivers where the common
case is asynchronous. The on-off client structure may be an
appropriate solution for the generic API. Where drivers that can
guarantee synchronous context-independent transitions a driver may
use :c:struct:`onoff_sync_service` and its supporting API rather than
:c:struct:`onoff_manager`, with only a small reduction in functionality
(primarily no support for the monitor API).
.. doxygengroup:: resource_mgmt_onoff_apis
``` | /content/code_sandbox/doc/services/resource_management/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 769 |
```restructuredtext
Building extensions
###################
The LLEXT subsystem allows for the creation of extensions that can be loaded
into a running Zephyr application. When building these extensions, it's very
often useful to have access to the headers and compiler flags used by the main
Zephyr application.
The easiest path to achieve this is to build the extension as part of the
Zephyr application, using the `native Zephyr CMake features
<llext_build_native_>`_. This will result in a single build providing both the
main Zephyr application and the extension(s), which will all automatically be
built with the same parameters.
In some cases, involving the full Zephyr build system may not be feasible or
convenient; maybe the extension is built using a different compiler suite or as
part of a different project altogether. In this case, the extension developer
needs to export the headers and compiler flags used by the main Zephyr
application. This can be done using the `LLEXT Extension Development Kit
<llext_build_edk_>`_.
.. _llext_build_native:
Using the Zephyr CMake features
*******************************
The Zephyr build system provides a set of features that can be used to build
extensions as part of the Zephyr application. This is the simplest way to build
extensions, as it requires minimal additions to an application build system.
Building the extension
----------------------
An extension can be defined in the app's ``CMakeLists.txt`` by invoking the
``add_llext_target`` function, providing the target name, the output and the
source files. Usage is similar to the standard ``add_custom_target`` CMake
function:
.. code-block:: cmake
add_llext_target(
<target_name>
OUTPUT <ext_file.llext>
SOURCES <src1> [<src2>...]
)
where:
- ``<target_name>`` is the name of the final CMake target that will result in
the LLEXT binary being created;
- ``<ext_file.llext>`` is the name of the output file that will contain the
packaged extension;
- ``<src1> [<src2>...]`` is the list of source files that will be compiled to
create the extension.
The exact steps of the extension building process depend on the currently
selected :ref:`ELF object format <llext_kconfig_type>`.
The following custom properties of ``<target_name>`` are defined and can be
retrieved using the ``get_target_property()`` CMake function:
``lib_target``
Target name for the source compilation and/or link step.
``lib_output``
The binary file resulting from compilation and/or linking steps.
``pkg_input``
The file to be used as input for the packaging step.
``pkg_output``
The final extension file name.
Tweaking the build process
--------------------------
The following CMake functions can be used to modify the build system behavior
during the extension build process to a fine degree. Each of the below
functions takes the LLEXT target name as its first argument; it is otherwise
functionally equivalent to the common Zephyr ``target_*`` version.
* ``llext_compile_definitions``
* ``llext_compile_features``
* ``llext_compile_options``
* ``llext_include_directories``
* ``llext_link_options``
Custom build steps
------------------
The ``add_llext_command`` CMake function can be used to add custom build steps
that will be executed during the extension build process. The command will be
run at the specified build step and can refer to the properties of the target
for build-specific details.
The function signature is:
.. code-block:: cmake
add_llext_command(
TARGET <target_name>
[PRE_BUILD | POST_BUILD | POST_PKG]
COMMAND <command> [args...]
)
The different build steps are:
``PRE_BUILD``
Before the extension code is linked, if the architecture uses dynamic
libraries. This step can access `lib_target` and its own properties.
``POST_BUILD``
After the extension code is built, but before packaging it in an ``.llext``
file. This step is expected to create a `pkg_input` file by reading the
contents of `lib_output`.
``POST_PKG``
After the extension output file has been created. The command can operate
on the final llext file `pkg_output`.
Anything else after ``COMMAND`` will be passed to ``add_custom_command()`` as-is
(including multiple commands and other options).
.. _llext_build_edk:
LLEXT Extension Development Kit (EDK)
*************************************
When building extensions as a standalone project, outside of the main Zephyr
build system, it's important to have access to the same set of generated
headers and compiler flags used by the main Zephyr application, since they have
a direct impact on how Zephyr headers are interpreted and the extension is
compiled in general.
This can be achieved by asking Zephyr to generate an Extension Development Kit
(EDK) from the build artifacts of the main Zephyr application, by running the
following command which uses the ``llext-edk`` target:
.. code-block:: shell
west build -t llext-edk
The generated EDK can be found in the build directory under the ``zephyr``
directory. It's a tarball that contains the headers and compile flags needed
to build extensions. The extension developer can then include the headers
and use the compile flags in their build system to build the extension.
Compile flags
-------------
The EDK includes the convenience files ``cmake.cflags`` (for CMake-based
projects) and ``Makefile.cflags`` (for Make-based ones), which define a set of
variables that contain the compile flags needed by the project. The full list
of flags needed to build an extension is provided by ``LLEXT_CFLAGS``. Also
provided is a more granular set of flags that can be used in support of
different use cases, such as when building mocks for unit tests:
``LLEXT_INCLUDE_CFLAGS``
Compiler flags to add directories containing non-autogenerated headers
to the compiler's include search paths.
``LLEXT_GENERATED_INCLUDE_CFLAGS``
Compiler flags to add directories containing autogenerated headers to
the compiler's include search paths.
``LLEXT_ALL_INCLUDE_CFLAGS``
Compiler flags to add all directories containing headers used in the
build to the compiler's include search paths. This is a combination of
``LLEXT_INCLUDE_CFLAGS`` and ``LLEXT_GENERATED_INCLUDE_CFLAGS``.
``LLEXT_GENERATED_IMACROS_CFLAGS``
Compiler flags for autogenerated headers that must be included in the
build via ``-imacros``.
``LLEXT_BASE_CFLAGS``
Other compiler flags that control code generation for the target CPU.
None of these flags are included in the above lists.
``LLEXT_CFLAGS``
All flags required to build an extension. This is a combination of
``LLEXT_ALL_INCLUDE_CFLAGS``, ``LLEXT_GENERATED_IMACROS_CFLAGS`` and
``LLEXT_BASE_CFLAGS``.
.. _llext_kconfig_edk:
LLEXT EDK Kconfig options
-------------------------
The LLEXT EDK can be configured using the following Kconfig options:
:kconfig:option:`CONFIG_LLEXT_EDK_NAME`
The name of the generated EDK tarball.
:kconfig:option:`CONFIG_LLEXT_EDK_USERSPACE_ONLY`
If set, the EDK will include headers that do not contain code to route
syscalls to the kernel. This is useful when building extensions that will
run exclusively in user mode.
EDK Sample
----------
Refer to :zephyr:code-sample:`llext-edk` for an example of how to use the
LLEXT EDK.
``` | /content/code_sandbox/doc/services/llext/build.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,697 |
```restructuredtext
.. _llext:
Linkable Loadable Extensions (LLEXT)
####################################
The LLEXT subsystem provides a toolbox for extending the functionality of an
application at runtime with linkable loadable code.
Extensions are precompiled executables in ELF format that can be verified,
loaded, and linked with the main Zephyr binary. Extensions can be manipulated
and introspected to some degree, as well as unloaded when no longer needed.
.. toctree::
:maxdepth: 1
config
build
load
api
.. note::
The LLEXT subsystem requires architecture-specific support. It is currently
available only on ARM and Xtensa cores.
``` | /content/code_sandbox/doc/services/llext/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 144 |
```restructuredtext
Configuration
#############
The following Kconfig options are available for the LLEXT subsystem:
.. _llext_kconfig_heap:
Heap size
----------
The LLEXT subsystem needs a static heap to be allocated for extension related
data. The following option controls this allocation.
:kconfig:option:`CONFIG_LLEXT_HEAP_SIZE`
Size of the LLEXT heap in kilobytes.
.. note::
When :ref:`user mode <usermode_api>` is enabled, the heap size must be
large enough to allow the extension sections to be allocated with the
alignment required by the architecture.
.. _llext_kconfig_type:
ELF object type
---------------
The LLEXT subsystem supports loading different types of extensions; the type
can be set by choosing among the following Kconfig options:
:kconfig:option:`CONFIG_LLEXT_TYPE_ELF_OBJECT`
Build and expect relocatable files as binary object type for the LLEXT
subsystem. A single compiler invocation is used to generate the object
file.
:kconfig:option:`CONFIG_LLEXT_TYPE_ELF_RELOCATABLE`
Build and expect relocatable (partially linked) files as the binary
object type for the LLEXT subsystem. These object files are generated
by the linker by combining multiple object files into a single one.
:kconfig:option:`CONFIG_LLEXT_TYPE_ELF_SHAREDLIB`
Build and expect shared libraries as binary object type for the LLEXT
subsystem. The standard linking process is used to generate the shared
library from multiple object files.
.. note::
This is not currently supported on ARM architectures.
.. _llext_kconfig_storage:
Minimize allocations
--------------------
The LLEXT subsystem loading mechanism, by default, uses a seek/read abstraction
and copies all data into allocated memory; this is done to allow the extension
to be loaded from any storage medium. Sometimes, however, data is already in a
buffer in RAM and copying it is not necessary. The following option allows the
LLEXT subsystem to optimize memory footprint in this case.
:kconfig:option:`CONFIG_LLEXT_STORAGE_WRITABLE`
Allow the extension to be loaded by directly referencing section data
into the ELF buffer. To be effective, this requires the use of an ELF
loader that supports the ``peek`` functionality, such as the
:c:struct:`llext_buf_loader`.
.. warning::
The application must ensure that the buffer used to load the
extension remains allocated until the extension is unloaded.
.. note::
This will directly modify the contents of the buffer during the link
phase. Once the extension is unloaded, the buffer must be reloaded
before it can be used again in a call to :c:func:`llext_load`.
.. note::
This is currently required by the Xtensa architecture. Further
information on this topic is available on GitHub issue `#75341
<path_to_url`_.
.. _llext_kconfig_slid:
Using SLID for symbol lookups
-----------------------------
When an extension is loaded, the LLEXT subsystem must find the address of all
the symbols residing in the main application that the extension references.
To this end, the main binary contains a LLEXT-dedicated symbol table, filled
with one symbol-name-to-address mapping entry for each symbol exported by the
main application to extensions. This table can then be searched into by the
LLEXT linker at extension load time. This process is pretty slow due to the
nature of string comparisons, and the size consumed by the table can become
significant as the number of exported symbols increases.
:kconfig:option:`CONFIG_LLEXT_EXPORT_BUILTINS_BY_SLID`
Perform an extra processing step on the Zephyr binary and on all
extensions being built, converting every string in the symbol tables to
a pointer-sized hash called `Symbol Link Identifier` (SLID), which is
stored in the binary.
This speeds up the symbol lookup process by allowing usage of
integer-based comparisons rather than string-based ones. Another
benefit of SLID-based linking is that storing symbol names in the
binary is no longer necessary, which provides a significant decrease in
symbol table size.
.. note::
This option is not currently compatible with the :ref:`LLEXT EDK
<llext_build_edk>`.
.. note::
Using a different value for this option in the main binary and in
extensions is not supported. For example, if the main application
is built with ``CONFIG_LLEXT_EXPORT_BUILTINS_BY_SLID=y``, it is
forbidden to load an extension that was compiled with
``CONFIG_LLEXT_EXPORT_BUILTINS_BY_SLID=n``.
EDK configuration
-----------------
Options influencing the generation and behavior of the LLEXT EDK are described
in :ref:`llext_kconfig_edk`.
``` | /content/code_sandbox/doc/services/llext/config.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,046 |
```restructuredtext
Loading extensions
##################
Once an extension is built and the ELF file is available, it can be loaded into
the Zephyr application using the LLEXT API, which provides a way to load the
extension into memory, access its symbols and call its functions.
Loading an extension
====================
An extension may be loaded using any implementation of a :c:struct:`llext_loader`
which has a set of function pointers that provide the necessary functionality
to read the ELF data. A loader also provides some minimal context (memory)
needed by the :c:func:`llext_load` function. An implementation over a buffer
containing an ELF in addressable memory in memory is available as
:c:struct:`llext_buf_loader`.
The extensions are loaded with a call to the :c:func:`llext_load` function,
passing in the extension name and the configured loader. Once that completes
successfully, the extension is loaded into memory and is ready to be used.
.. note::
When :ref:`User Mode <usermode_api>` is enabled, the extension will not be
included in any user memory domain. To allow access from user mode, the
:c:func:`llext_add_domain` function must be called.
Accessing code and data
=======================
To interact with the newly loaded extension, the host application must use the
:c:func:`llext_find_sym` function to get the address of the exported symbol.
The returned ``void *`` can then be cast to the appropriate type and used.
A wrapper for calling a function with no arguments is provided in
:c:func:`llext_call_fn`.
Cleaning up after use
=====================
The :c:func:`llext_unload` function must be called to free the memory used by
the extension once it is no longer required. After this call completes, all
pointers to symbols in the extension that were obtained will be invalid.
Troubleshooting
###############
This feature is being actively developed and as such it is possible that some
issues may arise. Since linking does modify the binary code, in case of errors
the results are difficult to predict. Some common issues may be:
* Results from :c:func:`llext_find_sym` point to an invalid address;
* Constants and variables defined in the extension do not have the expected
values;
* Calling a function defined in an extension results in a hard fault, or memory
in the main application is corrupted after returning from it.
If any of this happens, the following tips may help understand the issue:
* Make sure :kconfig:option:`CONFIG_LLEXT_LOG_LEVEL` is set to ``DEBUG``, then
obtain a log of the :c:func:`llext_load` invocation.
* If possible, disable memory protection (MMU/MPU) and see if this results in
different behavior.
* Try to simplify the extension to the minimum possible code that reproduces
the issue.
* Use a debugger to inspect the memory and registers to try to understand what
is happening.
.. note::
When using GDB, the ``add_symbol_file`` command may be used to load the
debugging information and symbols from the ELF file. Make sure to specify
the proper offset (usually the start of the ``.text`` section, reported
as ``region 0`` in the debug logs.)
If the issue persists, please open an issue in the GitHub repository, including
all the above information.
``` | /content/code_sandbox/doc/services/llext/load.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 729 |
```restructuredtext
API Reference
*************
.. doxygengroup:: llext_apis
.. doxygengroup:: llext_symbols
.. doxygengroup:: llext_loader_apis
``` | /content/code_sandbox/doc/services/llext/api.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 42 |
```restructuredtext
.. _seialization_reference:
Serialization
#############
Zephyr has support for several data serialization subsystems. These can be used to encode/decode
structured data with a known format on-the-wire.
.. toctree::
:maxdepth: 1
nanopb.rst
``` | /content/code_sandbox/doc/services/serialization/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 63 |
```restructuredtext
.. _nanopb_reference:
Nanopb
######
`Nanopb <path_to_url`_ is a C implementation of Google's
`Protocol Buffers <path_to_url`_.
Requirements
************
Nanopb uses the protocol buffer compiler to generate source and header files,
make sure the ``protoc`` executable is installed and available.
.. tabs::
.. group-tab:: Ubuntu
Use ``apt`` to install dependency:
.. code-block:: shell
sudo apt install protobuf-compiler
.. group-tab:: macOS
Use ``brew`` to install dependency:
.. code-block:: shell
brew install protobuf
.. group-tab:: Windows
Use ``choco`` to install dependency:
.. code-block:: shell
choco install protoc
Additionally, Nanopb is an optional module and needs to be added explicitly to the workspace:
.. code-block:: shell
west config manifest.project-filter -- +nanopb
west update
Configuration
*************
Make sure to include ``nanopb`` within your ``CMakeLists.txt`` file as follows:
.. code-block:: cmake
list(APPEND CMAKE_MODULE_PATH ${ZEPHYR_BASE}/modules/nanopb)
include(nanopb)
Adding ``proto`` files can be done with the ``zephyr_nanopb_sources()`` CMake function which
ensures the generated header and source files are created before building the specified target.
Nanopb has `generator options <path_to_url#generator-options>`_
that can be used to configure messages or fields. This allows to set fixed sizes or skip fields
entirely.
The internal CMake generator has an extension to configure ``*.options.in`` files automatically
with CMake variables.
See :zephyr_file:`samples/modules/nanopb/src/simple.options.in` and
:zephyr_file:`samples/modules/nanopb/CMakeLists.txt` for usage example.
``` | /content/code_sandbox/doc/services/serialization/nanopb.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 417 |
```restructuredtext
.. _binary_descriptors:
Binary Descriptors
##################
Binary Descriptors are constant data objects storing information about the binary executable.
Unlike "regular" constants, binary descriptors are linked to a known offset in the binary, making
them accessible to other programs, such as a different image running on the same device or a host tool.
A few examples of constants that would make useful binary descriptors are: kernel version, app version,
build time, compiler version, environment variables, compiling host name, etc.
Binary descriptors are created by using the ``DEFINE_BINDESC_*`` macros. For example:
.. code-block:: c
#include <zephyr/bindesc.h>
BINDESC_STR_DEFINE(my_string, 2, "Hello world!"); // Unique ID is 2
``my_string`` could then be accessed using:
.. code-block:: c
printk("my_string: %s\n", BINDESC_GET_STR(my_string));
But it could also be retrieved by ``west bindesc``:
.. code-block:: bash
$ west bindesc custom_search STR 2 build/zephyr/zephyr.bin
"Hello world!"
Internals
*********
Binary descriptors are implemented with a TLV (tag, length, value) header linked
to a known offset in the binary image. This offset may vary between architectures,
but generally the descriptors are linked as close to the beginning of the image as
possible. In architectures where the image must begin with a vector table (such as
ARM), the descriptors are linked right after the vector table. The reset vector points
to the beginning of the text section, which is after the descriptors. In architectures
where the image must begin with executable code (e.g. x86), a jump instruction is injected at
the beginning of the image, in order to skip over the binary descriptors, which are right
after the jump instruction.
Each tag is a 16 bit unsigned integer, where the most significant nibble (4 bits) is the type
(currently uint, string or bytes), and the rest is the ID. The ID is globally unique to each
descriptor. For example, the ID of the app version string is ``0x800``, and a string
is denoted by 0x1, making the app version tag ``0x1800``. The length is a 16 bit
number equal to the length of the data in bytes. The data is the actual descriptor
value. All binary descriptor numbers (magic, tags, uints) are laid out in memory
in the endianness native to the SoC. ``west bindesc`` assumes little endian by default,
so if the image belongs to a big endian SoC, the appropriate flag should be given to the
tool.
The binary descriptor header starts with the magic number ``0xb9863e5a7ea46046``. It's followed
by the TLVs, and ends with the ``DESCRIPTORS_END`` (``0xffff``) tag. The tags are
always aligned to 32 bits. If the value of the previous descriptor had a non-aligned
length, zero padding will be added to ensure that the current tag is aligned.
Putting it all together, here is what the example above would look like in memory
(of a little endian SoC):
.. code-block::
46 60 a4 7e 5a 3e 86 b9 02 10 0d 00 48 65 6c 6c 6f 20 77 6f 72 6c 64 21 00 00 00 00 ff ff
| magic | tag |length| H e l l o w o r l d ! | pad | end |
Usage
*****
Binary descriptors are always created by the ``BINDESC_*_DEFINE`` macros. As shown in
the example above, a descriptor can be generated from any string or integer, with any
ID. However, it is recommended to comply with the standard tags defined in
``include/zephyr/bindesc.h``, as that would have the following benefits:
1. The ``west bindesc`` tool would be able to recognize what the descriptor means and
print a meaningful tag
2. It would enforce consistency between various apps from various sources
3. It allows upstream-ability of descriptor generation (see Standard Descriptors)
To define a descriptor with a standard tag, just use the tags included from ``bindesc.h``:
.. code-block:: c
#include <zephyr/bindesc.h>
BINDESC_STR_DEFINE(app_version, BINDESC_ID_APP_VERSION_STRING, "1.2.3");
Standard Descriptors
====================
Some descriptors might be trivial to implement, and could therefore be implemented
in a standard way in upstream Zephyr. These could then be enabled via Kconfig, instead
of requiring every user to reimplement them. These include build times, kernel version,
and host info. For example, to add the build date and time as a string, the following
configs should be enabled:
.. code-block:: kconfig
# Enable binary descriptors
CONFIG_BINDESC=y
# Enable definition of binary descriptors
CONFIG_BINDESC_DEFINE=y
# Enable default build time binary descriptors
CONFIG_BINDESC_DEFINE_BUILD_TIME=y
CONFIG_BINDESC_BUILD_DATE_TIME_STRING=y
To avoid collisions with user defined descriptors, the standard descriptors were allotted
the range between ``0x800-0xfff``. This leaves ``0x000-0x7ff`` to users.
For more information read the ``help`` sections of these Kconfig symbols.
By convention, each Kconfig symbol corresponds to a binary descriptor whose
name is the Kconfig name (with ``CONFIG_BINDESC_`` removed) in lower case. For example,
``CONFIG_BINDESC_KERNEL_VERSION_STRING`` creates a descriptor that can be
accessed using ``BINDESC_GET_STR(kernel_version_string)``.
west bindesc tool
=================
``west`` is able to parse and display binary descriptors from a given executable image.
For more information refer to ``west bindesc --help`` or the :ref:`documentation<west-bindesc>`.
API Reference
*************
.. doxygengroup:: bindesc_define
``` | /content/code_sandbox/doc/services/binary_descriptors/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,319 |
```restructuredtext
.. _modbus:
Modbus
######
Modbus is an industrial messaging protocol. The protocol is specified
for different types of networks or buses. Zephyr OS implementation
supports communication over serial line and may be used
with different physical interfaces, like RS485 or RS232.
TCP support is not implemented directly, but there are helper functions
to realize TCP support according to the application's needs.
Modbus communication is based on client/server model.
Only one client may be present on the bus. Client can communicate with several
server devices. Server devices themselves are passive and must not send
requests or unsolicited responses.
Services requested by the client are specified by function codes (FCxx),
and can be found in the specification or documentation of the API below.
Zephyr RTOS implementation supports both client and server roles.
More information about Modbus and Modbus RTU can be found on the website
`MODBUS Protocol Specifications`_.
Samples
*******
* :zephyr:code-sample:`modbus-rtu-server` and :zephyr:code-sample:`modbus-rtu-client` samples give
the possibility to try out RTU server and RTU client implementation with an evaluation board.
* :zephyr:code-sample:`modbus-tcp-server` sample is a simple Modbus TCP server.
* :zephyr:code-sample:`modbus-gateway` sample shows how to build a TCP to serial line
gateway with Zephyr OS.
API Reference
*************
.. doxygengroup:: modbus
.. _`MODBUS Protocol Specifications`: path_to_url
``` | /content/code_sandbox/doc/services/modbus/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 337 |
```restructuredtext
.. _shell_api:
Shell
######
.. contents::
:local:
:depth: 2
Overview
********
This module allows you to create and handle a shell with a user-defined command
set. You can use it in examples where more than simple button or LED user
interaction is required. This module is a Unix-like shell with these features:
* Support for multiple instances.
* Advanced cooperation with the :ref:`logging_api`.
* Support for static and dynamic commands.
* Support for dictionary commands.
* Smart command completion with the :kbd:`Tab` key.
* Built-in commands: :command:`clear`, :command:`shell`, :command:`colors`,
:command:`echo`, :command:`history` and :command:`resize`.
* Viewing recently executed commands using keys: :kbd:`` :kbd:`` or meta keys.
* Text edition using keys: :kbd:``, :kbd:``, :kbd:`Backspace`,
:kbd:`Delete`, :kbd:`End`, :kbd:`Home`, :kbd:`Insert`.
* Support for ANSI escape codes: ``VT100`` and ``ESC[n~`` for cursor control
and color printing.
* Support for editing multiline commands.
* Built-in handler to display help for the commands.
* Support for wildcards: ``*`` and ``?``.
* Support for meta keys.
* Support for getopt and getopt_long.
* Kconfig configuration to optimize memory usage.
.. note::
Some of these features have a significant impact on RAM and flash usage,
but many can be disabled when not needed. To default to options which
favor reduced RAM and flash requirements instead of features, you should
enable :kconfig:option:`CONFIG_SHELL_MINIMAL` and selectively enable just the
features you want.
.. _backends:
Backends
********
The module can be connected to any transport for command input and output.
At this point, the following transport layers are implemented:
* MQTT
* Segger RTT
* SMP
* Telnet
* UART
* USB
* Bluetooth LE (NUS)
* RPMSG
* DUMMY - not a physical transport layer.
Telnet
======
Enabling :kconfig:option:`CONFIG_SHELL_BACKEND_TELNET` will allow users to use telnet
as a shell backend. Connecting to it can be done using PuTTY or any ``telnet`` client.
For example:
.. code-block:: none
telnet <ip address> <port>
By default the telnet client won't handle telnet commands and configuration. Although
command support can be enabled with :kconfig:option:`CONFIG_SHELL_TELNET_SUPPORT_COMMAND`.
This will give the telnet client access to a very limited set of supported commands but
still can be turned on if needed. One of the command options it supports is the ``ECHO``
option. This will allow the client to be in character mode (character at a time),
similar to a UART backend in that regard. This will make the client send a character
as soon as it is typed having the effect of increasing the network traffic
considerably. For that cost, it will enable the line editing,
`tab completion <tab-feature_>`_, and `history <history-feature_>`_
features of the shell.
USB CDC ACM
===========
To configure Shell USB CDC ACM backend, simply add the snippet ``cdc-acm-console``
to your build:
.. code-block:: console
west build -S cdc-acm-console [...]
Details on the configuration settings are captured in the following files:
- :zephyr_file:`snippets/cdc-acm-console/cdc-acm-console.conf`.
- :zephyr_file:`snippets/cdc-acm-console/cdc-acm-console.overlay`.
Bluetooth LE (NUS)
==================
To configure Bluetooth LE (NUS) backend, simply add the snippet ``nus-console``
to your build:
.. code-block:: console
west build -S nus-console [...]
Details on the configuration settings are captured in the following files:
- :zephyr_file:`snippets/nus-console/nus-console.conf`.
- :zephyr_file:`snippets/nus-console/nus-console.overlay`.
Segget RTT
==========
To configure Segger RTT backend, add the following configurations to your build:
- :kconfig:option:`CONFIG_USE_SEGGER_RTT`
- :kconfig:option:`CONFIG_SHELL_BACKEND_RTT`
- :kconfig:option:`CONFIG_SHELL_BACKEND_SERIAL`
Details on additional configuration settings are captured in:
:zephyr_file:`samples/subsys/shell/shell_module/prj_minimal_rtt.conf`.
Connecting to Segger RTT via TCP (on macOS, for example)
--------------------------------------------------------
On macOS JLinkRTTClient won't let you enter input. Instead, please use following
procedure:
* Open up a first Terminal window and enter:
.. code-block:: none
JLinkRTTLogger -Device NRF52840_XXAA -RTTChannel 1 -if SWD -Speed 4000 ~/rtt.log
(change device if required)
* Open up a second Terminal window and enter:
.. code-block:: none
nc localhost 19021
* Now you should have a network connection to RTT that will let you enter input
to the shell.
Commands
********
Shell commands are organized in a tree structure and grouped into the following
types:
* Root command (level 0): Gathered and alphabetically sorted in a dedicated
memory section.
* Static subcommand (level > 0): Number and syntax must be known during compile
time. Created in the software module.
* Dynamic subcommand (level > 0): Number and syntax does not need to be known
during compile time. Created in the software module.
Commonly-used command groups
============================
The following list is a set of useful command groups and how to enable them:
GPIO
----
- :kconfig:option:`CONFIG_GPIO`
- :kconfig:option:`CONFIG_GPIO_SHELL`
I2C
---
- :kconfig:option:`CONFIG_I2C`
- :kconfig:option:`CONFIG_I2C_SHELL`
Sensor
------
- :kconfig:option:`CONFIG_SENSOR`
- :kconfig:option:`CONFIG_SENSOR_SHELL`
Flash
-----
- :kconfig:option:`CONFIG_FLASH`
- :kconfig:option:`CONFIG_FLASH_SHELL`
File-System
-----------
- :kconfig:option:`CONFIG_FILE_SYSTEM`
- :kconfig:option:`CONFIG_FILE_SYSTEM_SHELL`
Creating commands
=================
Use the following macros for adding shell commands:
* :c:macro:`SHELL_CMD_REGISTER` - Create root command. All root commands must
have different name.
* :c:macro:`SHELL_COND_CMD_REGISTER` - Conditionally (if compile time flag is
set) create root command. All root commands must have different name.
* :c:macro:`SHELL_CMD_ARG_REGISTER` - Create root command with arguments.
All root commands must have different name.
* :c:macro:`SHELL_COND_CMD_ARG_REGISTER` - Conditionally (if compile time flag
is set) create root command with arguments. All root commands must have
different name.
* :c:macro:`SHELL_CMD` - Initialize a command.
* :c:macro:`SHELL_COND_CMD` - Initialize a command if compile time flag is set.
* :c:macro:`SHELL_EXPR_CMD` - Initialize a command if compile time expression is
non-zero.
* :c:macro:`SHELL_CMD_ARG` - Initialize a command with arguments.
* :c:macro:`SHELL_COND_CMD_ARG` - Initialize a command with arguments if compile
time flag is set.
* :c:macro:`SHELL_EXPR_CMD_ARG` - Initialize a command with arguments if compile
time expression is non-zero.
* :c:macro:`SHELL_STATIC_SUBCMD_SET_CREATE` - Create a static subcommands
array.
* :c:macro:`SHELL_SUBCMD_DICT_SET_CREATE` - Create a dictionary subcommands
array.
* :c:macro:`SHELL_DYNAMIC_CMD_CREATE` - Create a dynamic subcommands array.
Commands can be created in any file in the system that includes
:zephyr_file:`include/zephyr/shell/shell.h`. All created commands are available for all
shell instances.
Static commands
---------------
Example code demonstrating how to create a root command with static
subcommands.
.. image:: images/static_cmd.PNG
:align: center
:alt: Command tree with static commands.
.. code-block:: c
/* Creating subcommands (level 1 command) array for command "demo". */
SHELL_STATIC_SUBCMD_SET_CREATE(sub_demo,
SHELL_CMD(params, NULL, "Print params command.",
cmd_demo_params),
SHELL_CMD(ping, NULL, "Ping command.", cmd_demo_ping),
SHELL_SUBCMD_SET_END
);
/* Creating root (level 0) command "demo" */
SHELL_CMD_REGISTER(demo, &sub_demo, "Demo commands", NULL);
Example implementation can be found under following location:
:zephyr_file:`samples/subsys/shell/shell_module/src/main.c`.
Dictionary commands
===================
This is a special kind of static commands. Dictionary commands can be used
every time you want to use a pair: (string <-> corresponding data) in
a command handler. The string is usually a verbal description of a given data.
The idea is to use the string as a command syntax that can be prompted by the
shell and corresponding data can be used to process the command.
Let's use an example. Suppose you created a command to set an ADC gain.
It is a perfect place where a dictionary can be used. The dictionary would
be a set of pairs: (string: gain_value, int: value) where int value could
be used with the ADC driver API.
Abstract code for this task would look like this:
.. code-block:: c
static int gain_cmd_handler(const struct shell *sh,
size_t argc, char **argv, void *data)
{
int gain;
/* data is a value corresponding to called command syntax */
gain = (int)data;
adc_set_gain(gain);
shell_print(sh, "ADC gain set to: %s\n"
"Value send to ADC driver: %d",
argv[0],
gain);
return 0;
}
SHELL_SUBCMD_DICT_SET_CREATE(sub_gain, gain_cmd_handler,
(gain_1, 1, "gain 1"), (gain_2, 2, "gain 2"),
(gain_1_2, 3, "gain 1/2"), (gain_1_4, 4, "gain 1/4")
);
SHELL_CMD_REGISTER(gain, &sub_gain, "Set ADC gain", NULL);
This is how it would look like in the shell:
.. image:: images/dict_cmd.png
:align: center
:alt: Dictionary commands example.
Dynamic commands
----------------
Example code demonstrating how to create a root command with static and dynamic
subcommands. At the beginning dynamic command list is empty. New commands
can be added by typing:
.. code-block:: none
dynamic add <new_dynamic_command>
Newly added commands can be prompted or autocompleted with the :kbd:`Tab` key.
.. image:: images/dynamic_cmd.PNG
:align: center
:alt: Command tree with static and dynamic commands.
.. code-block:: c
/* Buffer for 10 dynamic commands */
static char dynamic_cmd_buffer[10][50];
/* commands counter */
static uint8_t dynamic_cmd_cnt;
/* Function returning command dynamically created
* in dynamic_cmd_buffer.
*/
static void dynamic_cmd_get(size_t idx,
struct shell_static_entry *entry)
{
if (idx < dynamic_cmd_cnt) {
entry->syntax = dynamic_cmd_buffer[idx];
entry->handler = NULL;
entry->subcmd = NULL;
entry->help = "Show dynamic command name.";
} else {
/* if there are no more dynamic commands available
* syntax must be set to NULL.
*/
entry->syntax = NULL;
}
}
SHELL_DYNAMIC_CMD_CREATE(m_sub_dynamic_set, dynamic_cmd_get);
SHELL_STATIC_SUBCMD_SET_CREATE(m_sub_dynamic,
SHELL_CMD(add, NULL,"Add new command to dynamic_cmd_buffer and"
" sort them alphabetically.",
cmd_dynamic_add),
SHELL_CMD(execute, &m_sub_dynamic_set,
"Execute a command.", cmd_dynamic_execute),
SHELL_CMD(remove, &m_sub_dynamic_set,
"Remove a command from dynamic_cmd_buffer.",
cmd_dynamic_remove),
SHELL_CMD(show, NULL,
"Show all commands in dynamic_cmd_buffer.",
cmd_dynamic_show),
SHELL_SUBCMD_SET_END
);
SHELL_CMD_REGISTER(dynamic, &m_sub_dynamic,
"Demonstrate dynamic command usage.", cmd_dynamic);
Example implementation can be found under following location:
:zephyr_file:`samples/subsys/shell/shell_module/src/dynamic_cmd.c`.
Commands execution
==================
Each command or subcommand may have a handler. The shell executes the handler
that is found deepest in the command tree and further subcommands (without a
handler) are passed as arguments. Characters within parentheses are treated
as one argument. If shell won't find a handler it will display an error message.
Commands can be also executed from a user application using any active backend
and a function :c:func:`shell_execute_cmd`, as shown in this example:
.. code-block:: c
int main(void)
{
/* Below code will execute "clear" command on a DUMMY backend */
shell_execute_cmd(NULL, "clear");
/* Below code will execute "shell colors off" command on
* an UART backend
*/
shell_execute_cmd(shell_backend_uart_get_ptr(),
"shell colors off");
}
Enable the DUMMY backend by setting the Kconfig
:kconfig:option:`CONFIG_SHELL_BACKEND_DUMMY` option.
Commands execution example
--------------------------
Let's assume a command structure as in the following figure, where:
* :c:macro:`root_cmd` - root command without a handler
* :c:macro:`cmd_xxx_h` - command has a handler
* :c:macro:`cmd_xxx` - command does not have a handler
.. image:: images/execution.png
:align: center
:alt: Command tree with static commands.
Example 1
^^^^^^^^^
Sequence: :c:macro:`root_cmd` :c:macro:`cmd_1_h` :c:macro:`cmd_12_h`
:c:macro:`cmd_121_h` :c:macro:`parameter` will execute command
:c:macro:`cmd_121_h` and :c:macro:`parameter` will be passed as an argument.
Example 2
^^^^^^^^^
Sequence: :c:macro:`root_cmd` :c:macro:`cmd_2` :c:macro:`cmd_22_h`
:c:macro:`parameter1` :c:macro:`parameter2` will execute command
:c:macro:`cmd_22_h` and :c:macro:`parameter1` :c:macro:`parameter2`
will be passed as an arguments.
Example 3
^^^^^^^^^
Sequence: :c:macro:`root_cmd` :c:macro:`cmd_1_h` :c:macro:`parameter1`
:c:macro:`cmd_121_h` :c:macro:`parameter2` will execute command
:c:macro:`cmd_1_h` and :c:macro:`parameter1`, :c:macro:`cmd_121_h` and
:c:macro:`parameter2` will be passed as an arguments.
Example 4
^^^^^^^^^
Sequence: :c:macro:`root_cmd` :c:macro:`parameter` :c:macro:`cmd_121_h`
:c:macro:`parameter2` will not execute any command.
Command handler
----------------
Simple command handler implementation:
.. code-block:: c
static int cmd_handler(const struct shell *sh, size_t argc,
char **argv)
{
ARG_UNUSED(argc);
ARG_UNUSED(argv);
shell_fprintf(shell, SHELL_INFO, "Print info message\n");
shell_print(sh, "Print simple text.");
shell_warn(sh, "Print warning text.");
shell_error(sh, "Print error text.");
return 0;
}
Function :c:func:`shell_fprintf` or the shell print macros:
:c:macro:`shell_print`, :c:macro:`shell_info`, :c:macro:`shell_warn` and
:c:macro:`shell_error` can be used from the command handler or from threads,
but not from an interrupt context. Instead, interrupt handlers should use
:ref:`logging_api` for printing.
Command help
------------
Every user-defined command or subcommand can have its own help description.
The help for commands and subcommands can be created with respective macros:
:c:macro:`SHELL_CMD_REGISTER`, :c:macro:`SHELL_CMD_ARG_REGISTER`,
:c:macro:`SHELL_CMD`, and :c:macro:`SHELL_CMD_ARG`.
Shell prints this help message when you call a command
or subcommand with ``-h`` or ``--help`` parameter.
Parent commands
---------------
In the subcommand handler, you can access both the parameters passed to
commands or the parent commands, depending on how you index ``argv``.
* When indexing ``argv`` with positive numbers, you can access the parameters.
* When indexing ``argv`` with negative numbers, you can access the parent
commands.
* The subcommand to which the handler belongs has the ``argv`` index of 0.
.. code-block:: c
static int cmd_handler(const struct shell *sh, size_t argc,
char **argv)
{
ARG_UNUSED(argc);
/* If it is a subcommand handler parent command syntax
* can be found using argv[-1].
*/
shell_print(sh, "This command has a parent command: %s",
argv[-1]);
/* Print this command syntax */
shell_print(sh, "This command syntax is: %s", argv[0]);
/* Print first argument */
shell_print(sh, "%s", argv[1]);
return 0;
}
Built-in commands
=================
These commands are activated by :kconfig:option:`CONFIG_SHELL_CMDS` set to ``y``.
* :command:`clear` - Clears the screen.
* :command:`history` - Shows the recently entered commands.
* :command:`resize` - Must be executed when terminal width is different than 80
characters or after each change of terminal width. It ensures proper
multiline text display and :kbd:``, :kbd:``, :kbd:`End`, :kbd:`Home` keys
handling. Currently this command works only with UART flow control switched
on. It can be also called with a subcommand:
* :command:`default` - Shell will send terminal width = 80 to the
terminal and assume successful delivery.
These command needs extra activation:
:kconfig:option:`CONFIG_SHELL_CMDS_RESIZE` set to ``y``.
* :command:`select` - It can be used to set new root command. Exit to main
command tree is with alt+r. This command needs extra activation:
:kconfig:option:`CONFIG_SHELL_CMDS_SELECT` set to ``y``.
* :command:`shell` - Root command with useful shell-related subcommands like:
* :command:`echo` - Toggles shell echo.
* :command:`colors` - Toggles colored syntax. This might be helpful in
case of Bluetooth shell to limit the amount of transferred bytes.
* :command:`stats` - Shows shell statistics.
.. _tab-feature:
Tab Feature
***********
The Tab button can be used to suggest commands or subcommands. This feature
is enabled by :kconfig:option:`CONFIG_SHELL_TAB` set to ``y``.
It can also be used for partial or complete auto-completion of commands.
This feature is activated by
:kconfig:option:`CONFIG_SHELL_TAB_AUTOCOMPLETION` set to ``y``.
When user starts writing a command and presses the :kbd:`Tab` button then
the shell will do one of 3 possible things:
* Autocomplete the command.
* Prompts available commands and if possible partly completes the command.
* Will not do anything if there are no available or matching commands.
.. image:: images/tab_prompt.png
:align: center
:alt: Tab Feature usage example
.. _history-feature:
History Feature
***************
This feature enables commands history in the shell. It is activated by:
:kconfig:option:`CONFIG_SHELL_HISTORY` set to ``y``. History can be accessed
using keys: :kbd:`` :kbd:`` or :kbd:`Ctrl+n` and :kbd:`Ctrl+p`
if meta keys are active.
Number of commands that can be stored depends on size
of :kconfig:option:`CONFIG_SHELL_HISTORY_BUFFER` parameter.
Wildcards Feature
*****************
The shell module can handle wildcards. Wildcards are interpreted correctly
when expanded command and its subcommands do not have a handler. For example,
if you want to set logging level to ``err`` for the ``app`` and ``app_test``
modules you can execute the following command:
.. code-block:: none
log enable err a*
.. image:: images/wildcard.png
:align: center
:alt: Wildcard usage example
This feature is activated by :kconfig:option:`CONFIG_SHELL_WILDCARD` set to ``y``.
Meta Keys Feature
*****************
The shell module supports the following meta keys:
.. list-table:: Implemented meta keys
:widths: 10 40
:header-rows: 1
* - Meta keys
- Action
* - :kbd:`Ctrl+a`
- Moves the cursor to the beginning of the line.
* - :kbd:`Ctrl+b`
- Moves the cursor backward one character.
* - :kbd:`Ctrl+c`
- Preserves the last command on the screen and starts a new command in
a new line.
* - :kbd:`Ctrl+d`
- Deletes the character under the cursor.
* - :kbd:`Ctrl+e`
- Moves the cursor to the end of the line.
* - :kbd:`Ctrl+f`
- Moves the cursor forward one character.
* - :kbd:`Ctrl+k`
- Deletes from the cursor to the end of the line.
* - :kbd:`Ctrl+l`
- Clears the screen and leaves the currently typed command at the top of
the screen.
* - :kbd:`Ctrl+n`
- Moves in history to next entry.
* - :kbd:`Ctrl+p`
- Moves in history to previous entry.
* - :kbd:`Ctrl+u`
- Clears the currently typed command.
* - :kbd:`Ctrl+w`
- Removes the word or part of the word to the left of the cursor. Words
separated by period instead of space are treated as one word.
* - :kbd:`Alt+b`
- Moves the cursor backward one word.
* - :kbd:`Alt+f`
- Moves the cursor forward one word.
This feature is activated by :kconfig:option:`CONFIG_SHELL_METAKEYS` set to ``y``.
Getopt Feature
*****************
Some shell users apart from subcommands might need to use options as well.
the arguments string, looking for supported options. Typically, this task
is accomplished by the ``getopt`` family functions.
For this purpose shell supports the getopt and getopt_long libraries available
in the FreeBSD project. This feature is activated by:
:kconfig:option:`CONFIG_POSIX_C_LIB_EXT` set to ``y`` and :kconfig:option:`CONFIG_GETOPT_LONG`
set to ``y``.
This feature can be used in thread safe as well as non thread safe manner.
The former is full compatible with regular getopt usage while the latter
a bit differs.
An example non-thread safe usage:
.. code-block:: c
char *cvalue = NULL;
while ((char c = getopt(argc, argv, "abhc:")) != -1) {
switch (c) {
case 'c':
cvalue = optarg;
break;
default:
break;
}
}
An example thread safe usage:
.. code-block:: c
char *cvalue = NULL;
struct getopt_state *state;
while ((char c = getopt(argc, argv, "abhc:")) != -1) {
state = getopt_state_get();
switch (c) {
case 'c':
cvalue = state->optarg;
break;
default:
break;
}
}
Thread safe getopt functionality is activated by
:kconfig:option:`CONFIG_SHELL_GETOPT` set to ``y``.
Obscured Input Feature
**********************
With the obscured input feature, the shell can be used for implementing a login
prompt or other user interaction whereby the characters the user types should
not be revealed on screen, such as when entering a password.
Once the obscured input has been accepted, it is normally desired to return the
shell to normal operation. Such runtime control is possible with the
``shell_obscure_set`` function.
An example of login and logout commands using this feature is located in
:zephyr_file:`samples/subsys/shell/shell_module/src/main.c` and the config file
:zephyr_file:`samples/subsys/shell/shell_module/prj_login.conf`.
This feature is activated upon startup by :kconfig:option:`CONFIG_SHELL_START_OBSCURED`
set to ``y``. With this set either way, the option can still be controlled later
at runtime. :kconfig:option:`CONFIG_SHELL_CMDS_SELECT` is useful to prevent entry
of any other command besides a login command, by means of the
``shell_set_root_cmd`` function. Likewise, :kconfig:option:`CONFIG_SHELL_PROMPT_UART`
allows you to set the prompt upon startup, but it can be changed later with the
``shell_prompt_change`` function.
Shell Logger Backend Feature
****************************
Shell instance can act as the :ref:`logging_api` backend. Shell ensures that log
messages are correctly multiplexed with shell output. Log messages from logger
thread are enqueued and processed in the shell thread. Logger thread will block
for configurable amount of time if queue is full, blocking logger thread context
for that time. Oldest log message is removed from the queue after timeout and
new message is enqueued. Use the ``shell stats show`` command to retrieve
number of log messages dropped by the shell instance. Log queue size and timeout
are :c:macro:`SHELL_DEFINE` arguments.
This feature is activated by: :kconfig:option:`CONFIG_SHELL_LOG_BACKEND` set to ``y``.
.. warning::
Enqueuing timeout must be set carefully when multiple backends are used
in the system. The shell instance could have a slow transport or could
block, for example, by a UART with hardware flow control. If timeout is
set too high, the logger thread could be blocked and impact other logger
backends.
.. warning::
As the shell is a complex logger backend, it can not output logs if
the application crashes before the shell thread is running. In this
situation, you can enable one of the simple logging backends instead,
such as UART (:kconfig:option:`CONFIG_LOG_BACKEND_UART`) or
RTT (:kconfig:option:`CONFIG_LOG_BACKEND_RTT`), which are available earlier
during system initialization.
RTT Backend Channel Selection
*****************************
Instead of using the shell as a logger backend, RTT shell backend and RTT log
backend can also be used simultaneously, but over different channels. By
separating them, the log can be captured or monitored without shell output or
the shell may be scripted without log interference. Enabling both the Shell RTT
backend and the Log RTT backend does not work by default, because both default
to channel ``0``. There are two options:
1. The Shell buffer can use an alternate channel, for example using
:kconfig:option:`CONFIG_SHELL_BACKEND_RTT_BUFFER` set to ``1``.
This allows monitoring the log using `JLinkRTTViewer
<path_to_url#j-link-rtt-viewer>`_
while a script interfaces over channel 1.
2. The Log buffer can use an alternate channel, for example using
:kconfig:option:`CONFIG_LOG_BACKEND_RTT_BUFFER` set to ``1``.
This allows interactive use of the shell through JLinkRTTViewer, while the log
is written to file.
See `shell backends <backends_>`_ for details on how to enable RTT as a Shell backend.
Usage
*****
The following code shows a simple use case of this library:
.. code-block:: c
int main(void)
{
}
static int cmd_demo_ping(const struct shell *sh, size_t argc,
char **argv)
{
ARG_UNUSED(argc);
ARG_UNUSED(argv);
shell_print(sh, "pong");
return 0;
}
static int cmd_demo_params(const struct shell *sh, size_t argc,
char **argv)
{
int cnt;
shell_print(sh, "argc = %d", argc);
for (cnt = 0; cnt < argc; cnt++) {
shell_print(sh, " argv[%d] = %s", cnt, argv[cnt]);
}
return 0;
}
/* Creating subcommands (level 1 command) array for command "demo". */
SHELL_STATIC_SUBCMD_SET_CREATE(sub_demo,
SHELL_CMD(params, NULL, "Print params command.",
cmd_demo_params),
SHELL_CMD(ping, NULL, "Ping command.", cmd_demo_ping),
SHELL_SUBCMD_SET_END
);
/* Creating root (level 0) command "demo" without a handler */
SHELL_CMD_REGISTER(demo, &sub_demo, "Demo commands", NULL);
/* Creating root (level 0) command "version" */
SHELL_CMD_REGISTER(version, NULL, "Show kernel version", cmd_version);
Users may use the :kbd:`Tab` key to complete a command/subcommand or to see the
available subcommands for the currently entered command level.
For example, when the cursor is positioned at the beginning of the command
line and the :kbd:`Tab` key is pressed, the user will see all root (level 0)
commands:
.. code-block:: none
clear demo shell history log resize version
.. note::
To view the subcommands that are available for a specific command, you
must first type a :kbd:`space` after this command and then hit
:kbd:`Tab`.
These commands are registered by various modules, for example:
* :command:`clear`, :command:`shell`, :command:`history`, and :command:`resize`
are built-in commands which have been registered by
:zephyr_file:`subsys/shell/shell.c`
* :command:`demo` and :command:`version` have been registered in example code
above by main.c
* :command:`log` has been registered by :zephyr_file:`subsys/logging/log_cmds.c`
Then, if a user types a :command:`demo` command and presses the :kbd:`Tab` key,
the shell will only print the subcommands registered for this command:
.. code-block:: none
params ping
API Reference
*************
.. doxygengroup:: shell_api
``` | /content/code_sandbox/doc/services/shell/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6,806 |
```restructuredtext
.. _cs_trace_defmt:
ARM Coresight Trace Deformatter
###############################
Formatter is a method of wrapping multiple trace streams (specified by 7 bit ID) into a
single output stream. Formatter is using 16 byte frames which wraps up to 15 bytes of
data. It is used, for example, by ETR (Embedded Trace Router) which is a circular RAM
buffer where data from various trace streams can be saved. Typically tracing data is
decoded offline by the host but deformatter can be used on-chip to decode the data during
application runtime.
Usage
#####
Deformatter is initialized with a user callback. Data is decoded using
:c:func:`cs_trace_defmt_process` in 16 bytes chunks. Callback is called whenever stream changes or
end of chunk is reached. Callback contains stream ID and the data.
API documentation
*****************
.. doxygengroup:: cs_trace_defmt
``` | /content/code_sandbox/doc/services/debugging/cs_trace_defmt.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 193 |
```restructuredtext
.. _debugging:
Debugging
#########
.. toctree::
:maxdepth: 1
thread-analyzer.rst
coredump.rst
gdbstub.rst
debugmon.rst
mipi_stp_decoder.rst
symtab.rst
cs_trace_defmt.rst
``` | /content/code_sandbox/doc/services/debugging/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 73 |
```restructuredtext
.. _gdbstub:
GDB stub
########
.. contents::
:local:
:depth: 2
Overview
********
The gdbstub feature provides an implementation of the GDB Remote
Serial Protocol (RSP) that allows you to remotely debug Zephyr
using GDB.
The protocol supports different connection types: serial, UDP/IP and
TCP/IP. Zephyr currently supports only serial device communication.
The GDB program acts as a client while the Zephyr gdbstub acts as a
server. When this feature is enabled, Zephyr stops its execution after
:c:func:`gdb_init` starts gdbstub service and waits for a GDB
connection. Once a connection is established it is possible to
synchronously interact with Zephyr. Note that currently it is not
possible to asynchronously send commands to the target.
Features
********
The following features are supported:
* Add and remove breakpoints
* Continue and step the target
* Print backtrace
* Read or write general registers
* Read or write the memory
Enabling GDB Stub
*****************
GDB stub can be enabled with the :kconfig:option:`CONFIG_GDBSTUB` option.
Using Serial Backend
====================
The serial backend for GDB stub can be enabled with
the :kconfig:option:`CONFIG_GDBSTUB_SERIAL_BACKEND` option.
Since serial backend utilizes UART devices to send and receive GDB commands,
* If there are spare UART devices on the board, set ``zephyr,gdbstub-uart``
property of the chosen node to the spare UART device so that :c:func:`printk`
and log messages are not being printed to the same UART device used for GDB.
* For boards with only one UART device, :c:func:`printk` and logging
must be disabled if they are also using the same UART device for output.
GDB related messages may interleave with log messages which may have
unintended consequences. Usually this can be done by disabling
:kconfig:option:`CONFIG_PRINTK` and :kconfig:option:`CONFIG_LOG`.
Debugging
*********
Using Serial Backend
====================
#. Build with GDB stub and serial backend enabled.
#. Flash built image onto board and reset the board.
* Execution should now be paused at :c:func:`gdb_init`.
#. Execute GDB on development machine and connect to the GDB stub.
.. code-block:: bash
target remote <serial device>
For example,
.. code-block:: bash
target remote /dev/ttyUSB1
#. GDB commands can be used to start debugging.
Example
*******
There is a test application :zephyr_file:`tests/subsys/debug/gdbstub` with one of its
test cases ``debug.gdbstub.breakpoints`` demonstrating how the Zephyr GDB stub can be used.
The test also has a case to connect to the QEMU's GDB stub implementation (at a custom
port ``tcp:1235``) as a reference to validate the test script itself.
Run the test with the following command from your :envvar:`ZEPHYR_BASE` directory:
.. code-block:: console
./scripts/twister -p qemu_x86 -T tests/subsys/debug/gdbstub
The test should run successfully, and now let's do something similar step-by-step
to demonstrate how the Zephyr GDB stub works from the GDB user's perspective.
In the snippets below use and expect your appropriate directories instead of
``<SDK install directory>``, ``<build_directory>``, ``<ZEPHYR_BASE>``.
#. Open two terminal windows.
#. On the first terminal, build and run the test application:
.. zephyr-app-commands::
:zephyr-app: tests/subsys/debug/gdbstub
:host-os: unix
:board: qemu_x86
:gen-args: '-DCONFIG_QEMU_EXTRA_FLAGS="-serial tcp:localhost:5678,server"'
:goals: build run
Note how we set :kconfig:option:`CONFIG_QEMU_EXTRA_FLAGS` to direct QEMU serial
console port to the ``localhost`` TCP port ``5678`` to wait for a connection
from the GDB ``remote`` command we are going to do on the next steps.
#. On the second terminal, start GDB:
.. code-block:: bash
<SDK install directory>/x86_64-zephyr-elf/bin/x86_64-zephyr-elf-gdb
#. Tell GDB where to look for the built ELF file:
.. code-block:: text
(gdb) symbol-file <build directory>/zephyr/zephyr.elf
Response from GDB:
.. code-block:: text
Reading symbols from <build directory>/zephyr/zephyr.elf...
#. Tell GDB to connect to the Zephyr gdbstub serial backend which is exposed
earlier as a server through the TCP port ``-serial`` redirection at QEMU.
.. code-block:: text
(gdb) target remote localhost:5678
Response from GDB:
.. code-block:: text
Remote debugging using localhost:5678
arch_gdb_init () at <ZEPHYR_BASE>/arch/x86/core/ia32/gdbstub.c:252
252 }
GDB also shows where the code execution is stopped. In this case,
it is at :zephyr_file:`arch/x86/core/ia32/gdbstub.c`, line 252.
#. Use command ``bt`` or ``backtrace`` to show the backtrace of stack frames.
.. code-block:: text
(gdb) bt
#0 arch_gdb_init () at <ZEPHYR_BASE>/arch/x86/core/ia32/gdbstub.c:252
#1 0x00104140 in gdb_init () at <ZEPHYR_BASE>/zephyr/subsys/debug/gdbstub.c:852
#2 0x00109c13 in z_sys_init_run_level (level=INIT_LEVEL_PRE_KERNEL_2) at <ZEPHYR_BASE>/kernel/init.c:360
#3 0x00109e73 in z_cstart () at <ZEPHYR_BASE>/kernel/init.c:630
#4 0x00104422 in z_prep_c (arg=0x1245bc <x86_cpu_boot_arg>) at <ZEPHYR_BASE>/arch/x86/core/prep_c.c:80
#5 0x001000c9 in __csSet () at <ZEPHYR_BASE>/arch/x86/core/ia32/crt0.S:290
#6 0x001245bc in uart_dev ()
#7 0x00134988 in z_interrupt_stacks ()
#8 0x00000000 in ?? ()
#. Use command ``list`` to show the source code and surroundings where
code execution is stopped.
.. code-block:: text
(gdb) list
247 __asm__ volatile ("int3");
248
249 #ifdef CONFIG_GDBSTUB_TRACE
250 printk("gdbstub:%s GDB is connected\n", __func__);
251 #endif
252 }
253
254 /* Hook current IDT. */
255 _EXCEPTION_CONNECT_NOCODE(z_gdb_debug_isr, IV_DEBUG, 3);
256 _EXCEPTION_CONNECT_NOCODE(z_gdb_break_isr, IV_BREAKPOINT, 3);
#. Use command ``s`` or ``step`` to step through program until it reaches
a different source line. Now that it finished executing :c:func:`arch_gdb_init`
and is continuing in :c:func:`gdb_init`.
.. code-block:: text
(gdb) s
gdb_init () at <ZEPHYR_BASE>/subsys/debug/gdbstub.c:857
857 return 0;
.. code-block:: text
(gdb) list
852 arch_gdb_init();
853
854 #ifdef CONFIG_GDBSTUB_TRACE
855 printk("gdbstub:%s exit\n", __func__);
856 #endif
857 return 0;
858 }
859
860 #ifdef CONFIG_XTENSA
861 /*
#. Use command ``br`` or ``break`` to setup a breakpoint. For this example
set up a breakpoint at :c:func:`main`, and let code execution continue
without any intervention using command ``c`` (or ``continue``).
.. code-block:: text
(gdb) break main
Breakpoint 1 at 0x10064d: file <ZEPHYR_BASE>/tests/subsys/debug/gdbstub/src/main.c, line 27.
.. code-block:: text
(gdb) continue
Continuing.
Once code execution reaches :c:func:`main`, execution will be stopped
and GDB prompt returns.
.. code-block:: text
Breakpoint 1, main () at <ZEPHYR_BASE>/tests/subsys/debug/gdbstub/src/main.c:27
27 printk("%s():enter\n", __func__);
Now GDB is waiting at the beginning of :c:func:`main`:
.. code-block:: text
(gdb) list
22
23 int main(void)
24 {
25 int ret;
26
27 printk("%s():enter\n", __func__);
28 ret = test();
29 printk("ret=%d\n", ret);
30 return 0;
31 }
#. To examine the value of ``ret``, the command ``p`` or ``print``
can be used.
.. code-block:: text
(gdb) p ret
$1 = 1273788
Since ``ret`` has not been initialized, it contains some random value.
#. If step (``s`` or ``step``) is used here, it will continue execution
skipping the interior of :c:func:`test`.
To examine code execution inside :c:func:`test`,
a breakpoint can be set for :c:func:`test`, or simply using
``si`` (or ``stepi``) to execute one machine instruction, where it has
the side effect of going into the function. The GDB command ``finish``
can be used to continue execution without intervention until the function
returns.
.. code-block:: text
(gdb) finish
Run till exit from #0 test () at <ZEPHYR_BASE>/tests/subsys/debug/gdbstub/src/main.c:17
0x00100667 in main () at <ZEPHYR_BASE>/tests/subsys/debug/gdbstub/src/main.c:28
28 ret = test();
Value returned is $2 = 30
#. Examine ``ret`` again which should have the return value from
:c:func:`test`. Sometimes, the assignment is not done until another
``step`` is issued, as in this case. This is due to the assignment
code is done after returning from function. The assignment code is
generated by the toolchain as machine instructions which are not
visible when viewing the corresponding C source file.
.. code-block:: text
(gdb) p ret
$3 = 1273788
(gdb) step
29 printk("ret=%d\n", ret);
(gdb) p ret
$4 = 30
#. If ``continue`` is issued here, code execution will continue indefinitely
as there are no breakpoints to further stop execution. Breaking execution
in GDB via :kbd:`Ctrl-C` does not currently work as the Zephyr gdbstub does
not support this functionality yet. Switch to the first console with QEMU
running the Zephyr image and stop it manually with :kbd:`Ctrl+a x`.
When the same test is executed by Twister, it automatically takes care of
stopping the QEMU instance.
``` | /content/code_sandbox/doc/services/debugging/gdbstub.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,701 |
```restructuredtext
.. _mipi_stp_decoder:
MIPI STP Decoder
################
The MIPI System Trace Protocol (MIPI STP) was developed as a generic base protocol that can
be shared by multiple application-specific trace protocols. It serves as a wrapper protocol
that merges disparate streams that typically contain different trace protocols from different
trace sources. Stream consists of opcode (shortest is 4 bit long) followed by optional data and
optional timestamp. There are opcodes for data (8, 16, 32, 64 bit data marked/not marked, with or
without timestamp), stream recognition (master and channel), synchronization (ASYNC opcode) and
others.
One example where protocol is used is ARM Coresight STM (System Trace Macrocell) where data
written to Stimulus Port registers maps directly to STP stream.
This module can be used to perform on-chip decoding of the data stream. STP v2 is used.
Usage
*****
Decoder is initialized with a callback. A callback is called on each decoded opcode.
Decoder has internal state since there are dependency between opcodes (e.g. timestamp can be
relative). Decoder can be in synchronization or not. Initial state is configurable.
If decoder is not synchronized to the stream then it decodes each nibble in search for ASYNC opcode.
Loss of synchronization can be indicated to the decoder by calling
:c:func:`mipi_stp_decoder_sync_loss`. :c:func:`mipi_stp_decoder_decode` is used to decode the data.
Limitations
***********
There are following limitations:
* Decoder supports only little endian architectures.
* When decoding nibbles, it is more efficient when core supports unaligned memory access.
Implementation supports optimized version with unaligned memory access and generic one.
Optimized version is used for ARM Cortex-M (expect for M0).
* Limited set of the most common opcodes is implemented.
API documentation
*****************
.. doxygengroup:: mipi_stp_decoder_apis
``` | /content/code_sandbox/doc/services/debugging/mipi_stp_decoder.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 412 |
```restructuredtext
.. _thread_analyzer:
Thread analyzer
###################
The thread analyzer module enables all the Zephyr options required to track
the thread information, e.g. thread stack size usage and other runtime thread
runtime statistics.
The analysis is performed on demand when the application calls
:c:func:`thread_analyzer_run` or :c:func:`thread_analyzer_print`.
For example, to build the synchronization sample with Thread Analyser enabled,
do the following:
.. zephyr-app-commands::
:app: samples/synchronization/
:board: qemu_x86
:goals: build
:gen-args: -DCONFIG_QEMU_ICOUNT=n -DCONFIG_THREAD_ANALYZER=y \
-DCONFIG_THREAD_ANALYZER_USE_PRINTK=y -DCONFIG_THREAD_ANALYZER_AUTO=y \
-DCONFIG_THREAD_ANALYZER_AUTO_INTERVAL=5
When you run the generated application in Qemu, you will get the additional
information from Thread Analyzer::
thread_a: Hello World from cpu 0 on qemu_x86!
Thread analyze:
thread_b : STACK: unused 740 usage 284 / 1024 (27 %); CPU: 0 %
thread_analyzer : STACK: unused 8 usage 504 / 512 (98 %); CPU: 0 %
thread_a : STACK: unused 648 usage 376 / 1024 (36 %); CPU: 98 %
idle : STACK: unused 204 usage 116 / 320 (36 %); CPU: 0 %
thread_b: Hello World from cpu 0 on qemu_x86!
thread_a: Hello World from cpu 0 on qemu_x86!
thread_b: Hello World from cpu 0 on qemu_x86!
thread_a: Hello World from cpu 0 on qemu_x86!
thread_b: Hello World from cpu 0 on qemu_x86!
thread_a: Hello World from cpu 0 on qemu_x86!
thread_b: Hello World from cpu 0 on qemu_x86!
thread_a: Hello World from cpu 0 on qemu_x86!
Thread analyze:
thread_b : STACK: unused 648 usage 376 / 1024 (36 %); CPU: 7 %
thread_analyzer : STACK: unused 8 usage 504 / 512 (98 %); CPU: 0 %
thread_a : STACK: unused 648 usage 376 / 1024 (36 %); CPU: 9 %
idle : STACK: unused 204 usage 116 / 320 (36 %); CPU: 82 %
thread_b: Hello World from cpu 0 on qemu_x86!
thread_a: Hello World from cpu 0 on qemu_x86!
thread_b: Hello World from cpu 0 on qemu_x86!
thread_a: Hello World from cpu 0 on qemu_x86!
thread_b: Hello World from cpu 0 on qemu_x86!
thread_a: Hello World from cpu 0 on qemu_x86!
thread_b: Hello World from cpu 0 on qemu_x86!
thread_a: Hello World from cpu 0 on qemu_x86!
Thread analyze:
thread_b : STACK: unused 648 usage 376 / 1024 (36 %); CPU: 7 %
thread_analyzer : STACK: unused 8 usage 504 / 512 (98 %); CPU: 0 %
thread_a : STACK: unused 648 usage 376 / 1024 (36 %); CPU: 8 %
idle : STACK: unused 204 usage 116 / 320 (36 %); CPU: 83 %
thread_b: Hello World from cpu 0 on qemu_x86!
thread_a: Hello World from cpu 0 on qemu_x86!
thread_b: Hello World from cpu 0 on qemu_x86!
Configuration
*************
Configure this module using the following options.
* ``THREAD_ANALYZER``: enable the module.
* ``THREAD_ANALYZER_USE_PRINTK``: use printk for thread statistics.
* ``THREAD_ANALYZER_USE_LOG``: use the logger for thread statistics.
* ``THREAD_ANALYZER_AUTO``: run the thread analyzer automatically.
You do not need to add any code to the application when using this option.
* ``THREAD_ANALYZER_AUTO_INTERVAL``: the time for which the module sleeps
between consecutive printing of thread analysis in automatic mode.
* ``THREAD_ANALYZER_AUTO_STACK_SIZE``: the stack for thread analyzer
automatic thread.
* ``THREAD_NAME``: enable this option in the kernel to print the name of the
thread instead of its ID.
* ``THREAD_RUNTIME_STATS``: enable this option to print thread runtime data such
as utilization (This options is automatically selected by THREAD_ANALYZER).
API documentation
*****************
.. doxygengroup:: thread_analyzer
``` | /content/code_sandbox/doc/services/debugging/thread-analyzer.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,020 |
```unknown
# Doxyfile 1.12.0
# This file describes the settings to be used by the documentation system
# Doxygen (www.doxygen.org) for a project.
#
# All text after a double hash (##) is considered a comment and is placed in
# front of the TAG it is preceding.
#
# All text after a single hash (#) is considered a comment and will be ignored.
# The format is:
# TAG = value [value, ...]
# For lists, items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (\" \").
#
# Note:
#
# Use Doxygen to compare the used configuration file with the template
# configuration file:
# doxygen -x [configFile]
# Use Doxygen to compare the used configuration file with the template
# configuration file without replacing the environment variables or CMake type
# replacement variables:
# doxygen -x_noenv [configFile]
#your_sha256_hash-----------
# Project related configuration options
#your_sha256_hash-----------
# This tag specifies the encoding used for all characters in the configuration
# file that follow. The default is UTF-8 which is also the encoding used for all
# text before the first occurrence of this tag. Doxygen uses libiconv (or the
# iconv built into libc) for the transcoding. See
# path_to_url for the list of possible encodings.
# The default value is: UTF-8.
DOXYFILE_ENCODING = UTF-8
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
# double-quotes, unless you are using Doxywizard) that should identify the
# project for which the documentation is generated. This name is used in the
# title of most generated pages and in a few other places.
# The default value is: My Project.
PROJECT_NAME = "Zephyr API Documentation"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER = @ZEPHYR_VERSION@
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF = "A Scalable Open Source RTOS"
# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
# the logo to the output directory.
PROJECT_LOGO = @ZEPHYR_BASE@/doc/_doxygen/logo.svg
# With the PROJECT_ICON tag one can specify an icon that is included in the tabs
# when the HTML document is shown. Doxygen will copy the logo to the output
# directory.
PROJECT_ICON =
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
# entered, it will be relative to the location where Doxygen was started. If
# left blank the current directory will be used.
OUTPUT_DIRECTORY = @DOXY_OUT@
# If the CREATE_SUBDIRS tag is set to YES then Doxygen will create up to 4096
# sub-directories (in 2 levels) under the output directory of each output format
# and will distribute the generated files over these directories. Enabling this
# option can be useful when feeding Doxygen a huge amount of source files, where
# putting all generated files in the same directory would otherwise causes
# performance problems for the file system. Adapt CREATE_SUBDIRS_LEVEL to
# control the number of sub-directories.
# The default value is: NO.
CREATE_SUBDIRS = NO
# Controls the number of sub-directories that will be created when
# CREATE_SUBDIRS tag is set to YES. Level 0 represents 16 directories, and every
# level increment doubles the number of directories, resulting in 4096
# directories at level 8 which is the default and also the maximum value. The
# sub-directories are organized in 2 levels, the first level always has a fixed
# number of 16 directories.
# Minimum value: 0, maximum value: 8, default value: 8.
# This tag requires that the tag CREATE_SUBDIRS is set to YES.
CREATE_SUBDIRS_LEVEL = 8
# If the ALLOW_UNICODE_NAMES tag is set to YES, Doxygen will allow non-ASCII
# characters to appear in the names of generated files. If set to NO, non-ASCII
# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
# U+3044.
# The default value is: NO.
ALLOW_UNICODE_NAMES = NO
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by Doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Bulgarian,
# Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, English
# (United States), Esperanto, Farsi (Persian), Finnish, French, German, Greek,
# Hindi, Hungarian, Indonesian, Italian, Japanese, Japanese-en (Japanese with
# English messages), Korean, Korean-en (Korean with English messages), Latvian,
# Lithuanian, Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese,
# Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish,
# Swedish, Turkish, Ukrainian and Vietnamese.
# The default value is: English.
OUTPUT_LANGUAGE = English
# If the BRIEF_MEMBER_DESC tag is set to YES, Doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
# The default value is: YES.
BRIEF_MEMBER_DESC = YES
# If the REPEAT_BRIEF tag is set to YES, Doxygen will prepend the brief
# description of a member or function before the detailed description
#
# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
# brief descriptions will be completely suppressed.
# The default value is: YES.
REPEAT_BRIEF = YES
# This tag implements a quasi-intelligent brief description abbreviator that is
# used to form the text in various listings. Each string in this list, if found
# as the leading text of the brief description, will be stripped from the text
# and the result, after processing the whole list, is used as the annotated
# text. Otherwise, the brief description is used as-is. If left blank, the
# following values are used ($name is automatically replaced with the name of
# the entity):The $name class, The $name widget, The $name file, is, provides,
# specifies, contains, represents, a, an and the.
ABBREVIATE_BRIEF = YES
# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
# Doxygen will generate a detailed section even if there is only a brief
# description.
# The default value is: NO.
ALWAYS_DETAILED_SEC = YES
# If the INLINE_INHERITED_MEMB tag is set to YES, Doxygen will show all
# inherited members of a class in the documentation of that class as if those
# members were ordinary class members. Constructors, destructors and assignment
# operators of the base classes will not be shown.
# The default value is: NO.
INLINE_INHERITED_MEMB = YES
# If the FULL_PATH_NAMES tag is set to YES, Doxygen will prepend the full path
# before files name in the file list and in the header files. If set to NO the
# shortest path that makes the file name unique will be used
# The default value is: YES.
FULL_PATH_NAMES = YES
# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
# Stripping is only done if one of the specified strings matches the left-hand
# part of the path. The tag can be used to show relative paths in the file list.
# If left blank the directory from which Doxygen is run is used as the path to
# strip.
#
# Note that you can specify absolute paths here, but also relative paths, which
# will be relative from the directory where Doxygen is started.
# This tag requires that the tag FULL_PATH_NAMES is set to YES.
STRIP_FROM_PATH = @ZEPHYR_BASE@/include
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
# path mentioned in the documentation of a class, which tells the reader which
# header file to include in order to use a class. If left blank only the name of
# the header file containing the class definition is used. Otherwise one should
# specify the list of include paths that are normally passed to the compiler
# using the -I flag.
STRIP_FROM_INC_PATH =
# If the SHORT_NAMES tag is set to YES, Doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
# support long names like on DOS, Mac, or CD-ROM.
# The default value is: NO.
SHORT_NAMES = NO
# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen will interpret the
# first line (until the first dot) of a Javadoc-style comment as the brief
# description. If set to NO, the Javadoc-style will behave just like regular Qt-
# style comments (thus requiring an explicit @brief command for a brief
# description.)
# The default value is: NO.
JAVADOC_AUTOBRIEF = YES
# If the JAVADOC_BANNER tag is set to YES then Doxygen will interpret a line
# such as
# /***************
# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
# Javadoc-style will behave just like regular comments and it will not be
# interpreted by Doxygen.
# The default value is: NO.
JAVADOC_BANNER = NO
# If the QT_AUTOBRIEF tag is set to YES then Doxygen will interpret the first
# line (until the first dot) of a Qt-style comment as the brief description. If
# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
# requiring an explicit \brief command for a brief description.)
# The default value is: NO.
QT_AUTOBRIEF = YES
# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen treat a
# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
# a brief description. This used to be the default behavior. The new default is
# to treat a multi-line C++ comment block as a detailed description. Set this
# tag to YES if you prefer the old behavior instead.
#
# Note that setting this tag to YES also means that rational rose comments are
# not recognized any more.
# The default value is: NO.
MULTILINE_CPP_IS_BRIEF = NO
# By default Python docstrings are displayed as preformatted text and Doxygen's
# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the
# Doxygen's special commands can be used and the contents of the docstring
# documentation blocks is shown as Doxygen documentation.
# The default value is: YES.
PYTHON_DOCSTRING = YES
# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
# documentation from any documented member that it re-implements.
# The default value is: YES.
INHERIT_DOCS = YES
# If the SEPARATE_MEMBER_PAGES tag is set to YES then Doxygen will produce a new
# page for each member. If set to NO, the documentation of a member will be part
# of the file/class/namespace that contains it.
# The default value is: NO.
SEPARATE_MEMBER_PAGES = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
# uses this value to replace tabs by spaces in code fragments.
# Minimum value: 1, maximum value: 16, default value: 4.
TAB_SIZE = 8
# This tag can be used to specify a number of aliases that act as commands in
# the documentation. An alias has the form:
# name=value
# For example adding
# "sideeffect=@par Side Effects:^^"
# will allow you to put the command \sideeffect (or @sideeffect) in the
# documentation, which will result in a user-defined paragraph with heading
# "Side Effects:". Note that you cannot put \n's in the value part of an alias
# to insert newlines (in the resulting output). You can put ^^ in the value part
# of an alias to insert a newline as if a physical newline was in the original
# file. When you need a literal { or } or , in the value part of an alias you
# have to escape them by means of a backslash (\), this can lead to conflicts
# with the commands \{ and \} for these it is advised to use the version @{ and
# @} or use a double escape (\\{ and \\})
ALIASES = "rst=\verbatim embed:rst:leading-asterisk" \
endrst=\endverbatim \
"kconfig{1}=\htmlonly <code>\1</code> \endhtmlonly \xmlonly <verbatim>embed:rst:inline :kconfig:option:`\1`</verbatim> \endxmlonly" \
"req{1}=\ref ZEPH_\1 \"ZEPH-\1\"" \
"satisfy{1}=\xrefitem satisfy \"Satisfies requirement\" \"Requirement Implementation\" \1" \
"verify{1}=\xrefitem verify \"Verifies requirement\" \"Requirement Verification\" \1" \
"funcprops=\par \"Function properties (list may not be complete)\"" \
"reschedule=\htmlonly reschedule \endhtmlonly \xmlonly <verbatim>embed:rst:inline :ref:`api_term_reschedule`</verbatim> \endxmlonly" \
"sleep=\htmlonly sleep \endhtmlonly \xmlonly <verbatim>embed:rst:inline :ref:`api_term_sleep`</verbatim> \endxmlonly" \
"no_wait=\htmlonly no-wait \endhtmlonly \xmlonly <verbatim>embed:rst:inline :ref:`api_term_no-wait`</verbatim> \endxmlonly" \
"isr_ok=\htmlonly isr-ok \endhtmlonly \xmlonly <verbatim>embed:rst:inline :ref:`api_term_isr-ok`</verbatim> \endxmlonly" \
"pre_kernel_ok=\htmlonly pre-kernel-ok \endhtmlonly \xmlonly <verbatim>embed:rst:inline :ref:`api_term_pre-kernel-ok`</verbatim> \endxmlonly" \
"async=\htmlonly async \endhtmlonly \xmlonly <verbatim>embed:rst:inline :ref:`api_term_async`</verbatim> \endxmlonly" \
"atomic_api=As for all atomic APIs, includes a full/sequentially-consistent memory barrier (where applicable)." \
"supervisor=\htmlonly supervisor \endhtmlonly \xmlonly <verbatim>embed:rst:inline :ref:`api_term_supervisor`</verbatim> \endxmlonly"
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
# instance, some of the names that are used will be different. The list of all
# members will be omitted, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_FOR_C = YES
# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
# Python sources only. Doxygen will then generate output that is more tailored
# for that language. For instance, namespaces will be presented as packages,
# qualified scopes will look different, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_JAVA = NO
# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
# sources. Doxygen will then generate output that is tailored for Fortran.
# The default value is: NO.
OPTIMIZE_FOR_FORTRAN = NO
# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
# sources. Doxygen will then generate output that is tailored for VHDL.
# The default value is: NO.
OPTIMIZE_OUTPUT_VHDL = NO
# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
# sources only. Doxygen will then generate output that is more tailored for that
# language. For instance, namespaces will be presented as modules, types will be
# separated into more groups, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_SLICE = NO
# Doxygen selects the parser to use depending on the extension of the files it
# parses. With this tag you can assign which parser to use for a given
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by Doxygen: IDL, Java, JavaScript,
# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice,
# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
# tries to guess whether the code is fixed or free formatted code, this is the
# default for Fortran type files). For instance to make Doxygen treat .inc files
# as Fortran files (default is PHP), and .f files as C (default is Fortran),
# use: inc=Fortran f=C.
#
# Note: For files without extension you can use no_extension as a placeholder.
#
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
# the files are not read by Doxygen. When specifying no_extension you should add
# * to the FILE_PATTERNS.
#
# Note see also the list of default file extension mappings.
EXTENSION_MAPPING =
# If the MARKDOWN_SUPPORT tag is enabled then Doxygen pre-processes all comments
# according to the Markdown format, which allows for more readable
# documentation. See path_to_url for details.
# The output of markdown processing is further processed by Doxygen, so you can
# mix Doxygen, HTML, and XML commands with Markdown formatting. Disable only in
# case of backward compatibilities issues.
# The default value is: YES.
MARKDOWN_SUPPORT = YES
# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
# to that level are automatically included in the table of contents, even if
# they do not have an id attribute.
# Note: This feature currently applies only to Markdown headings.
# Minimum value: 0, maximum value: 99, default value: 6.
# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
TOC_INCLUDE_HEADINGS = 5
# The MARKDOWN_ID_STYLE tag can be used to specify the algorithm used to
# generate identifiers for the Markdown headings. Note: Every identifier is
# unique.
# Possible values are: DOXYGEN use a fixed 'autotoc_md' string followed by a
# sequence number starting at 0 and GITHUB use the lower case version of title
# with any whitespace replaced by '-' and punctuation characters removed.
# The default value is: DOXYGEN.
# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
MARKDOWN_ID_STYLE = DOXYGEN
# When enabled Doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
# be prevented in individual cases by putting a % sign in front of the word or
# globally by setting AUTOLINK_SUPPORT to NO.
# The default value is: YES.
AUTOLINK_SUPPORT = YES
# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
# to include (a tag file for) the STL sources as input, then you should set this
# tag to YES in order to let Doxygen match functions declarations and
# definitions whose arguments contain STL classes (e.g. func(std::string);
# versus func(std::string) {}). This also makes the inheritance and
# collaboration diagrams that involve STL classes more complete and accurate.
# The default value is: NO.
BUILTIN_STL_SUPPORT = NO
# If you use Microsoft's C++/CLI language, you should set this option to YES to
# enable parsing support.
# The default value is: NO.
CPP_CLI_SUPPORT = YES
# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
# path_to_url sources only. Doxygen will parse
# them like normal C++ but will assume all classes use public instead of private
# inheritance when no explicit protection keyword is present.
# The default value is: NO.
SIP_SUPPORT = NO
# For Microsoft's IDL there are propget and propput attributes to indicate
# getter and setter methods for a property. Setting this option to YES will make
# Doxygen to replace the get and set methods by a property in the documentation.
# This will only work if the methods are indeed getting or setting a simple
# type. If this is not the case, or you want to show the methods anyway, you
# should set this option to NO.
# The default value is: YES.
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
# tag is set to YES then Doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
# The default value is: NO.
DISTRIBUTE_GROUP_DOC = NO
# If one adds a struct or class to a group and this option is enabled, then also
# any nested class or struct is added to the same group. By default this option
# is disabled and one has to add nested compounds explicitly via \ingroup.
# The default value is: NO.
GROUP_NESTED_COMPOUNDS = NO
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
# type (e.g. under the Public Functions section). Set it to NO to prevent
# subgrouping. Alternatively, this can be done per class using the
# \nosubgrouping command.
# The default value is: YES.
SUBGROUPING = YES
# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
# are shown inside the group in which they are included (e.g. using \ingroup)
# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
# and RTF).
#
# Note that this feature does not work in combination with
# SEPARATE_MEMBER_PAGES.
# The default value is: NO.
INLINE_GROUPED_CLASSES = NO
# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
# with only public data fields or simple typedef fields will be shown inline in
# the documentation of the scope in which they are defined (i.e. file,
# namespace, or group documentation), provided this scope is documented. If set
# to NO, structs, classes, and unions are shown on a separate page (for HTML and
# Man pages) or section (for LaTeX and RTF).
# The default value is: NO.
INLINE_SIMPLE_STRUCTS = NO
# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
# enum is documented as struct, union, or enum with the name of the typedef. So
# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
# with name TypeT. When disabled the typedef will appear as a member of a file,
# namespace, or class. And the struct will be named TypeS. This can typically be
# useful for C code in case the coding convention dictates that all compound
# types are typedef'ed and only the typedef is referenced, never the tag name.
# The default value is: NO.
TYPEDEF_HIDES_STRUCT = NO
# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
# cache is used to resolve symbols given their name and scope. Since this can be
# an expensive process and often the same symbol appears multiple times in the
# code, Doxygen keeps a cache of pre-resolved symbols. If the cache is too small
# Doxygen will become slower. If the cache is too large, memory is wasted. The
# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
# symbols. At the end of a run Doxygen will report the cache usage and suggest
# the optimal cache size from a speed point of view.
# Minimum value: 0, maximum value: 9, default value: 0.
LOOKUP_CACHE_SIZE = 9
# The NUM_PROC_THREADS specifies the number of threads Doxygen is allowed to use
# during processing. When set to 0 Doxygen will based this on the number of
# cores available in the system. You can set it explicitly to a value larger
# than 0 to get more control over the balance between CPU load and processing
# speed. At this moment only the input processing can be done using multiple
# threads. Since this is still an experimental feature the default is set to 1,
# which effectively disables parallel processing. Please report any issues you
# encounter. Generating dot graphs in parallel is controlled by the
# DOT_NUM_THREADS setting.
# Minimum value: 0, maximum value: 32, default value: 1.
NUM_PROC_THREADS = 1
# If the TIMESTAMP tag is set different from NO then each generated page will
# contain the date or date and time when the page was generated. Setting this to
# NO can help when comparing the output of multiple runs.
# Possible values are: YES, NO, DATETIME and DATE.
# The default value is: NO.
TIMESTAMP = YES
#your_sha256_hash-----------
# Build related configuration options
#your_sha256_hash-----------
# If the EXTRACT_ALL tag is set to YES, Doxygen will assume all entities in
# documentation are documented, even if no documentation was available. Private
# class members and static file members will be hidden unless the
# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
# Note: This will also disable the warnings about undocumented members that are
# normally produced when WARNINGS is set to YES.
# The default value is: NO.
EXTRACT_ALL = YES
# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
# be included in the documentation.
# The default value is: NO.
EXTRACT_PRIVATE = NO
# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
# methods of a class will be included in the documentation.
# The default value is: NO.
EXTRACT_PRIV_VIRTUAL = NO
# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
EXTRACT_PACKAGE = YES
# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
# included in the documentation.
# The default value is: NO.
EXTRACT_STATIC = YES
# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
# locally in source files will be included in the documentation. If set to NO,
# only classes defined in header files are included. Does not have any effect
# for Java sources.
# The default value is: YES.
EXTRACT_LOCAL_CLASSES = YES
# This flag is only useful for Objective-C code. If set to YES, local methods,
# which are defined in the implementation section but not in the interface are
# included in the documentation. If set to NO, only methods in the interface are
# included.
# The default value is: NO.
EXTRACT_LOCAL_METHODS = YES
# If this flag is set to YES, the members of anonymous namespaces will be
# extracted and appear in the documentation as a namespace called
# 'anonymous_namespace{file}', where file will be replaced with the base name of
# the file that contains the anonymous namespace. By default anonymous namespace
# are hidden.
# The default value is: NO.
EXTRACT_ANON_NSPACES = NO
# If this flag is set to YES, the name of an unnamed parameter in a declaration
# will be determined by the corresponding definition. By default unnamed
# parameters remain unnamed in the output.
# The default value is: YES.
RESOLVE_UNNAMED_PARAMS = YES
# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
# undocumented members inside documented classes or files. If set to NO these
# members will be included in the various overviews, but no documentation
# section is generated. This option has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_MEMBERS = NO
# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy. If set
# to NO, these classes will be included in the various overviews. This option
# will also hide undocumented C++ concepts if enabled. This option has no effect
# if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all friend
# declarations. If set to NO, these declarations will be included in the
# documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
# documentation blocks found inside the body of a function. If set to NO, these
# blocks will be appended to the function's detailed documentation block.
# The default value is: NO.
HIDE_IN_BODY_DOCS = NO
# The INTERNAL_DOCS tag determines if documentation that is typed after a
# \internal command is included. If the tag is set to NO then the documentation
# will be excluded. Set it to YES to include the internal documentation.
# The default value is: NO.
INTERNAL_DOCS = NO
# With the correct setting of option CASE_SENSE_NAMES Doxygen will better be
# able to match the capabilities of the underlying filesystem. In case the
# filesystem is case sensitive (i.e. it supports files in the same directory
# whose names only differ in casing), the option must be set to YES to properly
# deal with such files in case they appear in the input. For filesystems that
# are not case sensitive the option should be set to NO to properly deal with
# output files written for symbols that only differ in casing, such as for two
# classes, one named CLASS and the other named Class, and to also support
# references to files without having to specify the exact matching casing. On
# Windows (including Cygwin) and macOS, users should typically set this option
# to NO, whereas on Linux or other Unix flavors it should typically be set to
# YES.
# Possible values are: SYSTEM, NO and YES.
# The default value is: SYSTEM.
CASE_SENSE_NAMES = YES
# If the HIDE_SCOPE_NAMES tag is set to NO then Doxygen will show members with
# their full class and namespace scopes in the documentation. If set to YES, the
# scope will be hidden.
# The default value is: NO.
HIDE_SCOPE_NAMES = NO
# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then Doxygen will
# append additional text to a page's title, such as Class Reference. If set to
# YES the compound reference will be hidden.
# The default value is: NO.
HIDE_COMPOUND_REFERENCE= NO
# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class
# will show which file needs to be included to use the class.
# The default value is: YES.
SHOW_HEADERFILE = YES
# If the SHOW_INCLUDE_FILES tag is set to YES then Doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
SHOW_INCLUDE_FILES = YES
# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
# grouped member an include statement to the documentation, telling the reader
# which file to include in order to use the member.
# The default value is: NO.
SHOW_GROUPED_MEMB_INC = YES
# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen will list include
# files with double quotes in the documentation rather than with sharp brackets.
# The default value is: NO.
FORCE_LOCAL_INCLUDES = NO
# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
# documentation for inline members.
# The default value is: YES.
INLINE_INFO = YES
# If the SORT_MEMBER_DOCS tag is set to YES then Doxygen will sort the
# (detailed) documentation of file and class members alphabetically by member
# name. If set to NO, the members will appear in declaration order.
# The default value is: YES.
SORT_MEMBER_DOCS = YES
# If the SORT_BRIEF_DOCS tag is set to YES then Doxygen will sort the brief
# descriptions of file, namespace and class members alphabetically by member
# name. If set to NO, the members will appear in declaration order. Note that
# this will also influence the order of the classes in the class list.
# The default value is: NO.
SORT_BRIEF_DOCS = NO
# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then Doxygen will sort the
# (brief and detailed) documentation of class members so that constructors and
# destructors are listed first. If set to NO the constructors will appear in the
# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
# member documentation.
# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
# detailed member documentation.
# The default value is: NO.
SORT_MEMBERS_CTORS_1ST = NO
# If the SORT_GROUP_NAMES tag is set to YES then Doxygen will sort the hierarchy
# of group names into alphabetical order. If set to NO the group names will
# appear in their defined order.
# The default value is: NO.
SORT_GROUP_NAMES = YES
# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
# fully-qualified names, including namespaces. If set to NO, the class list will
# be sorted only by class name, not including the namespace part.
# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
# Note: This option applies only to the class list, not to the alphabetical
# list.
# The default value is: NO.
SORT_BY_SCOPE_NAME = YES
# If the STRICT_PROTO_MATCHING option is enabled and Doxygen fails to do proper
# type resolution of all parameters of a function it will reject a match between
# the prototype and the implementation of a member function even if there is
# only one candidate or it is obvious which candidate to choose by doing a
# simple string match. By disabling STRICT_PROTO_MATCHING Doxygen will still
# accept a match between prototype and implementation in such cases.
# The default value is: NO.
STRICT_PROTO_MATCHING = YES
# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
# list. This list is created by putting \todo commands in the documentation.
# The default value is: YES.
GENERATE_TODOLIST = NO
# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
# list. This list is created by putting \test commands in the documentation.
# The default value is: YES.
GENERATE_TESTLIST = NO
# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
# list. This list is created by putting \bug commands in the documentation.
# The default value is: YES.
GENERATE_BUGLIST = NO
# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
# the deprecated list. This list is created by putting \deprecated commands in
# the documentation.
# The default value is: YES.
GENERATE_DEPRECATEDLIST= YES
# The ENABLED_SECTIONS tag can be used to enable conditional documentation
# sections, marked by \if <section_label> ... \endif and \cond <section_label>
# ... \endcond blocks.
ENABLED_SECTIONS = YES
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the
# documentation. If the initializer consists of more lines than specified here
# it will be hidden. Use a value of 0 to hide initializers completely. The
# appearance of the value of individual variables and macros / defines can be
# controlled using \showinitializer or \hideinitializer command in the
# documentation regardless of this setting.
# Minimum value: 0, maximum value: 10000, default value: 30.
MAX_INITIALIZER_LINES = 300
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
# the bottom of the documentation of classes and structs. If set to YES, the
# list will mention the files that were used to generate the documentation.
# The default value is: YES.
SHOW_USED_FILES = YES
# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
# will remove the Files entry from the Quick Index and from the Folder Tree View
# (if specified).
# The default value is: YES.
SHOW_FILES = YES
# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
# page. This will remove the Namespaces entry from the Quick Index and from the
# Folder Tree View (if specified).
# The default value is: YES.
SHOW_NAMESPACES = YES
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# Doxygen should invoke to get the current version for each file (typically from
# the version control system). Doxygen will invoke the program by executing (via
# popen()) the command command input-file, where command is the value of the
# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
# by Doxygen. Whatever the program writes to standard output is used as the file
# version. For an example see the documentation.
FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by Doxygen. The layout file controls the global structure of the generated
# output files in an output format independent way. To create the layout file
# that represents Doxygen's defaults, run Doxygen with the -l option. You can
# optionally specify a file name after the option, if omitted DoxygenLayout.xml
# will be used as the name of the layout file. See also section "Changing the
# layout of pages" for information.
#
# Note that if you run Doxygen from a directory containing a file called
# DoxygenLayout.xml, Doxygen will parse it automatically even if the LAYOUT_FILE
# tag is left empty.
LAYOUT_FILE =
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
# extension is automatically appended if omitted. This requires the bibtex tool
# to be installed. See also path_to_url for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
# search path. See also \cite for info how to create references.
CITE_BIB_FILES =
# The EXTERNAL_TOOL_PATH tag can be used to extend the search path (PATH
# environment variable) so that external tools such as latex and gs can be
# found.
# Note: Directories specified with EXTERNAL_TOOL_PATH are added in front of the
# path already specified by the PATH variable, and are added in the order
# specified.
# Note: This option is particularly useful for macOS version 14 (Sonoma) and
# higher, when running Doxygen from Doxywizard, because in this case any user-
# defined changes to the PATH are ignored. A typical example on macOS is to set
# EXTERNAL_TOOL_PATH = /Library/TeX/texbin /usr/local/bin
# together with the standard path, the full search path used by doxygen when
# launching external tools will then become
# PATH=/Library/TeX/texbin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
EXTERNAL_TOOL_PATH =
#your_sha256_hash-----------
# Configuration options related to warning and progress messages
#your_sha256_hash-----------
# The QUIET tag can be used to turn on/off the messages that are generated to
# standard output by Doxygen. If QUIET is set to YES this implies that the
# messages are off.
# The default value is: NO.
QUIET = YES
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated to standard error (stderr) by Doxygen. If WARNINGS is set to YES
# this implies that the warnings are on.
#
# Tip: Turn warnings on while writing the documentation.
# The default value is: YES.
WARNINGS = YES
# If the WARN_IF_UNDOCUMENTED tag is set to YES then Doxygen will generate
# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
# The default value is: YES.
WARN_IF_UNDOCUMENTED = YES
# If the WARN_IF_DOC_ERROR tag is set to YES, Doxygen will generate warnings for
# potential errors in the documentation, such as documenting some parameters in
# a documented function twice, or documenting parameters that don't exist or
# using markup commands wrongly.
# The default value is: YES.
WARN_IF_DOC_ERROR = YES
# If WARN_IF_INCOMPLETE_DOC is set to YES, Doxygen will warn about incomplete
# function parameter documentation. If set to NO, Doxygen will accept that some
# parameters have no documentation without warning.
# The default value is: YES.
WARN_IF_INCOMPLETE_DOC = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
# value. If set to NO, Doxygen will only warn about wrong parameter
# documentation, but not about the absence of documentation. If EXTRACT_ALL is
# set to YES then this flag will automatically be disabled. See also
# WARN_IF_INCOMPLETE_DOC
# The default value is: NO.
WARN_NO_PARAMDOC = NO
# If WARN_IF_UNDOC_ENUM_VAL option is set to YES, Doxygen will warn about
# undocumented enumeration values. If set to NO, Doxygen will accept
# undocumented enumeration values. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
# The default value is: NO.
WARN_IF_UNDOC_ENUM_VAL = NO
# If the WARN_AS_ERROR tag is set to YES then Doxygen will immediately stop when
# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS
# then Doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but
# at the end of the Doxygen process Doxygen will return with a non-zero status.
# If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS_PRINT then Doxygen behaves
# like FAIL_ON_WARNINGS but in case no WARN_LOGFILE is defined Doxygen will not
# write the warning messages in between other messages but write them at the end
# of a run, in case a WARN_LOGFILE is defined the warning messages will be
# besides being in the defined file also be shown at the end of a run, unless
# the WARN_LOGFILE is defined as - i.e. standard output (stdout) in that case
# the behavior will remain as with the setting FAIL_ON_WARNINGS.
# Possible values are: NO, YES, FAIL_ON_WARNINGS and FAIL_ON_WARNINGS_PRINT.
# The default value is: NO.
WARN_AS_ERROR = NO
# The WARN_FORMAT tag determines the format of the warning messages that Doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
# and the warning text. Optionally the format may contain $version, which will
# be replaced by the version of the file (if it could be obtained via
# FILE_VERSION_FILTER)
# See also: WARN_LINE_FORMAT
# The default value is: $file:$line: $text.
WARN_FORMAT = "$file:$line: $text"
# In the $text part of the WARN_FORMAT command it is possible that a reference
# to a more specific place is given. To make it easier to jump to this place
# (outside of Doxygen) the user can define a custom "cut" / "paste" string.
# Example:
# WARN_LINE_FORMAT = "'vi $file +$line'"
# See also: WARN_FORMAT
# The default value is: at line $line of file $file.
WARN_LINE_FORMAT = "at line $line of file $file"
# The WARN_LOGFILE tag can be used to specify a file to which warning and error
# messages should be written. If left blank the output is written to standard
# error (stderr). In case the file specified cannot be opened for writing the
# warning and error messages are written to standard error. When as file - is
# specified the warning and error messages are written to standard output
# (stdout).
WARN_LOGFILE =
#your_sha256_hash-----------
# Configuration options related to the input files
#your_sha256_hash-----------
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
INPUT = @ZEPHYR_BASE@/doc/_doxygen/mainpage.md \
@ZEPHYR_BASE@/doc/_doxygen/groups.dox \
@ZEPHYR_BASE@/kernel/include/kernel_arch_interface.h \
@ZEPHYR_BASE@/include/zephyr/arch/cache.h \
@ZEPHYR_BASE@/include/zephyr/sys/atomic.h \
@ZEPHYR_BASE@/include/ \
@ZEPHYR_BASE@/lib/libc/minimal/include/ \
@ZEPHYR_BASE@/subsys/testsuite/include/ \
@ZEPHYR_BASE@/subsys/testsuite/ztest/include/
# This tag can be used to specify the character encoding of the source files
# that Doxygen parses. Internally Doxygen uses the UTF-8 encoding. Doxygen uses
# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
# documentation (see:
# path_to_url for the list of possible encodings.
# See also: INPUT_FILE_ENCODING
# The default value is: UTF-8.
INPUT_ENCODING = UTF-8
# This tag can be used to specify the character encoding of the source files
# that Doxygen parses The INPUT_FILE_ENCODING tag can be used to specify
# character encoding on a per file pattern basis. Doxygen will compare the file
# name with each pattern and apply the encoding instead of the default
# INPUT_ENCODING) if there is a match. The character encodings are a list of the
# form: pattern=encoding (like *.php=ISO-8859-1).
# See also: INPUT_ENCODING for further information on supported encodings.
INPUT_FILE_ENCODING =
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
# *.h) to filter out the source-files in the directories.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# read by Doxygen.
#
# Note the list of default checked file patterns might differ from the list of
# default file extension mappings.
#
# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cxxm,
# *.cpp, *.cppm, *.ccm, *.c++, *.c++m, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl,
# *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, *.h++, *.ixx, *.l, *.cs, *.d,
# *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to
# be provided as Doxygen C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08,
# *.f18, *.f, *.for, *.vhd, *.vhdl, *.ucf, *.qsf and *.ice.
FILE_PATTERNS = *.c \
*.h \
*.S \
*.md
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
# The default value is: NO.
RECURSIVE = YES
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
#
# Note that relative paths are relative to the directory from which Doxygen is
# run.
EXCLUDE = @ZEPHYR_BASE@/include/zephyr/portability/cmsis_os.h \
@ZEPHYR_BASE@/include/zephyr/portability/cmsis_os2.h
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
# The default value is: NO.
EXCLUDE_SYMLINKS = NO
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
# certain files from those directories.
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
EXCLUDE_PATTERNS =
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# ANamespace::AClass, ANamespace::*Test
EXCLUDE_SYMBOLS = _* \
*.__unnamed__ \
z_* \
Z_*
# The EXAMPLE_PATH tag can be used to specify one or more files or directories
# that contain example code fragments that are included (see the \include
# command).
EXAMPLE_PATH =
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank all
# files are included.
EXAMPLE_PATTERNS =
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude commands
# irrespective of the value of the RECURSIVE tag.
# The default value is: NO.
EXAMPLE_RECURSIVE = YES
# The IMAGE_PATH tag can be used to specify one or more files or directories
# that contain images that are to be included in the documentation (see the
# \image command).
IMAGE_PATH =
# The INPUT_FILTER tag can be used to specify a program that Doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
# by executing (via popen()) the command:
#
# <filter> <input-file>
#
# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
# name of an input file. Doxygen will then use the output that the filter
# program writes to standard output. If FILTER_PATTERNS is specified, this tag
# will be ignored.
#
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
#
# Note that Doxygen will use the data processed and written to standard output
# for further processing, therefore nothing else, like debug statements or used
# commands (so in case of a Windows batch file always use @echo OFF), should be
# written to standard output.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by Doxygen.
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
# basis. Doxygen will compare the file name with each pattern and apply the
# filter if there is a match. The filters are a list of the form: pattern=filter
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by Doxygen.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER) will also be used to filter the input files that are used for
# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
# The default value is: NO.
FILTER_SOURCE_FILES = NO
# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
# it is also possible to disable source filtering for a specific pattern using
# *.ext= (so without naming a filter).
# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
FILTER_SOURCE_PATTERNS =
# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
# is part of the input, its contents will be placed on the main page
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the Doxygen output.
USE_MDFILE_AS_MAINPAGE = @ZEPHYR_BASE@/doc/_doxygen/mainpage.md
# The Fortran standard specifies that for fixed formatted Fortran code all
# characters from position 72 are to be considered as comment. A common
# extension is to allow longer lines before the automatic comment starts. The
# setting FORTRAN_COMMENT_AFTER will also make it possible that longer lines can
# be processed before the automatic comment starts.
# Minimum value: 7, maximum value: 10000, default value: 72.
FORTRAN_COMMENT_AFTER = 72
#your_sha256_hash-----------
# Configuration options related to source browsing
#your_sha256_hash-----------
# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
# generated. Documented entities will be cross-referenced with these sources.
#
# Note: To get rid of all source code in the generated output, make sure that
# also VERBATIM_HEADERS is set to NO.
# The default value is: NO.
SOURCE_BROWSER = NO
# Setting the INLINE_SOURCES tag to YES will include the body of functions,
# multi-line macros, enums or list initialized variables directly into the
# documentation.
# The default value is: NO.
INLINE_SOURCES = NO
# Setting the STRIP_CODE_COMMENTS tag to YES will instruct Doxygen to hide any
# special comment blocks from generated source code fragments. Normal C, C++ and
# Fortran comments will always remain visible.
# The default value is: YES.
STRIP_CODE_COMMENTS = YES
# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
# entity all documented functions referencing it will be listed.
# The default value is: NO.
REFERENCED_BY_RELATION = NO
# If the REFERENCES_RELATION tag is set to YES then for each documented function
# all documented entities called/used by that function will be listed.
# The default value is: NO.
REFERENCES_RELATION = NO
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
# to YES then the hyperlinks from functions in REFERENCES_RELATION and
# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
# link to the documentation.
# The default value is: YES.
REFERENCES_LINK_SOURCE = YES
# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
# source code will show a tooltip with additional information such as prototype,
# brief description and links to the definition and documentation. Since this
# will make the HTML file larger and loading of large files a bit slower, you
# can opt to disable this feature.
# The default value is: YES.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
SOURCE_TOOLTIPS = YES
# If the USE_HTAGS tag is set to YES then the references to source code will
# point to the HTML generated by the htags(1) tool instead of Doxygen built-in
# source browser. The htags tool is part of GNU's global source tagging system
# (see path_to_url You will need version
# 4.8.6 or higher.
#
# To use it do the following:
# - Install the latest version of global
# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
# - Make sure the INPUT points to the root of the source tree
# - Run doxygen as normal
#
# Doxygen will invoke htags (and that will in turn invoke gtags), so these
# tools must be available from the command line (i.e. in the search path).
#
# The result: instead of the source browser generated by Doxygen, the links to
# source code will now point to the output of htags.
# The default value is: NO.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
USE_HTAGS = NO
# If the VERBATIM_HEADERS tag is set the YES then Doxygen will generate a
# verbatim copy of the header file for each class for which an include is
# specified. Set to NO to disable this.
# See also: Section \class.
# The default value is: YES.
VERBATIM_HEADERS = YES
# If the CLANG_ASSISTED_PARSING tag is set to YES then Doxygen will use the
# clang parser (see:
# path_to_url for more accurate parsing at the cost of reduced
# performance. This can be particularly helpful with template rich C++ code for
# which Doxygen's built-in parser lacks the necessary type information.
# Note: The availability of this option depends on whether or not Doxygen was
# generated with the -Duse_libclang=ON option for CMake.
# The default value is: NO.
CLANG_ASSISTED_PARSING = NO
# If the CLANG_ASSISTED_PARSING tag is set to YES and the CLANG_ADD_INC_PATHS
# tag is set to YES then Doxygen will add the directory of each input to the
# include path.
# The default value is: YES.
# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
CLANG_ADD_INC_PATHS = YES
# If clang assisted parsing is enabled you can provide the compiler with command
# line options that you would normally use when invoking the compiler. Note that
# the include paths will already be set by Doxygen for the files and directories
# specified with INPUT and INCLUDE_PATH.
# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
CLANG_OPTIONS =
# If clang assisted parsing is enabled you can provide the clang parser with the
# path to the directory containing a file called compile_commands.json. This
# file is the compilation database (see:
# path_to_url containing the
# options used when the source files were built. This is equivalent to
# specifying the -p option to a clang tool, such as clang-check. These options
# will then be passed to the parser. Any options specified with CLANG_OPTIONS
# will be added as well.
# Note: The availability of this option depends on whether or not Doxygen was
# generated with the -Duse_libclang=ON option for CMake.
CLANG_DATABASE_PATH =
#your_sha256_hash-----------
# Configuration options related to the alphabetical class index
#your_sha256_hash-----------
# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
# compounds will be generated. Enable this if the project contains a lot of
# classes, structs, unions or interfaces.
# The default value is: YES.
ALPHABETICAL_INDEX = YES
# The IGNORE_PREFIX tag can be used to specify a prefix (or a list of prefixes)
# that should be ignored while generating the index headers. The IGNORE_PREFIX
# tag works for classes, function and member names. The entity will be placed in
# the alphabetical list under the first letter of the entity name that remains
# after removing the prefix.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
IGNORE_PREFIX =
#your_sha256_hash-----------
# Configuration options related to the HTML output
#your_sha256_hash-----------
# If the GENERATE_HTML tag is set to YES, Doxygen will generate HTML output
# The default value is: YES.
GENERATE_HTML = YES
# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_OUTPUT = html
# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
# generated HTML page (for example: .htm, .php, .asp).
# The default value is: .html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FILE_EXTENSION = .html
# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
# each generated HTML page. If the tag is left blank Doxygen will generate a
# standard header.
#
# To get valid HTML the header file that includes any scripts and style sheets
# that Doxygen needs, which is dependent on the configuration options used (e.g.
# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
# default header using
# doxygen -w html new_header.html new_footer.html new_stylesheet.css
# YourConfigFile
# and then modify the file new_header.html. See also section "Doxygen usage"
# for information on how to generate the default header that Doxygen normally
# uses.
# Note: The header is subject to change so you typically have to regenerate the
# default header when upgrading to a newer version of Doxygen. For a description
# of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_HEADER = @ZEPHYR_BASE@/doc/_doxygen/header.html
# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
# generated HTML page. If the tag is left blank Doxygen will generate a standard
# footer. See HTML_HEADER for more information on how to generate a default
# footer and what special commands can be used inside the footer. See also
# section "Doxygen usage" for information on how to generate the default footer
# that Doxygen normally uses.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
# sheet that is used by each HTML page. It can be used to fine-tune the look of
# the HTML output. If left blank Doxygen will generate a default style sheet.
# See also section "Doxygen usage" for information on how to generate the style
# sheet that Doxygen normally uses.
# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
# it is more robust and this tag (HTML_STYLESHEET) will in the future become
# obsolete.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_STYLESHEET =
# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# cascading style sheets that are included after the standard style sheets
# created by Doxygen. Using this option one can overrule certain style aspects.
# This is preferred over using HTML_STYLESHEET since it does not replace the
# standard style sheet and is therefore more robust against future updates.
# Doxygen will copy the style sheet files to the output directory.
# Note: The order of the extra style sheet files is of importance (e.g. the last
# style sheet in the list overrules the setting of the previous ones in the
# list).
# Note: Since the styling of scrollbars can currently not be overruled in
# Webkit/Chromium, the styling will be left out of the default doxygen.css if
# one or more extra stylesheets have been specified. So if scrollbar
# customization is desired it has to be added explicitly. For an example see the
# documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_STYLESHEET = @ZEPHYR_BASE@/doc/_doxygen/doxygen-awesome.css \
@ZEPHYR_BASE@/doc/_doxygen/doxygen-awesome-sidebar-only.css \
@ZEPHYR_BASE@/doc/_doxygen/doxygen-awesome-sidebar-only-darkmode-toggle.css \
@ZEPHYR_BASE@/doc/_doxygen/doxygen-awesome-zephyr.css
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
# that these files will be copied to the base HTML output directory. Use the
# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
# files will be copied as-is; there are no commands or markers available.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_FILES = @ZEPHYR_BASE@/doc/_doxygen/doxygen-awesome-darkmode-toggle.js \
@ZEPHYR_BASE@/doc/_doxygen/doxygen-awesome-zephyr.js
# The HTML_COLORSTYLE tag can be used to specify if the generated HTML output
# should be rendered with a dark or light theme.
# Possible values are: LIGHT always generates light mode output, DARK always
# generates dark mode output, AUTO_LIGHT automatically sets the mode according
# to the user preference, uses light mode if no preference is set (the default),
# AUTO_DARK automatically sets the mode according to the user preference, uses
# dark mode if no preference is set and TOGGLE allows a user to switch between
# light and dark mode via a button.
# The default value is: AUTO_LIGHT.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE = LIGHT
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a color-wheel, see
# path_to_url for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# purple, and 360 is red again.
# Minimum value: 0, maximum value: 359, default value: 220.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_HUE = 220
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
# in the HTML output. For a value of 0 the output will use gray-scales only. A
# value of 255 will produce the most vivid colors.
# Minimum value: 0, maximum value: 255, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_SAT = 100
# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
# luminance component of the colors in the HTML output. Values below 100
# gradually make the output lighter, whereas values above 100 make the output
# darker. The value divided by 100 is the actual gamma applied, so 80 represents
# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
# change the gamma.
# Minimum value: 40, maximum value: 240, default value: 80.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_GAMMA = 80
# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
# documentation will contain a main index with vertical navigation menus that
# are dynamically created via JavaScript. If disabled, the navigation index will
# consists of multiple levels of tabs that are statically embedded in every HTML
# page. Disable this option to support browsers that do not have JavaScript,
# like the Qt help browser.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_MENUS = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
# page has loaded.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_SECTIONS = YES
# If the HTML_CODE_FOLDING tag is set to YES then classes and functions can be
# dynamically folded and expanded in the generated HTML source code.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_CODE_FOLDING = YES
# If the HTML_COPY_CLIPBOARD tag is set to YES then Doxygen will show an icon in
# the top right corner of code and text fragments that allows the user to copy
# its content to the clipboard. Note this only works if supported by the browser
# and the web page is served via a secure context (see:
# path_to_url i.e. using the https: or file:
# protocol.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COPY_CLIPBOARD = YES
# Doxygen stores a couple of settings persistently in the browser (via e.g.
# cookies). By default these settings apply to all HTML pages generated by
# Doxygen across all projects. The HTML_PROJECT_COOKIE tag can be used to store
# the settings under a project specific key, such that the user preferences will
# be stored separately.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_PROJECT_COOKIE =
# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
# shown in the various tree structured indices initially; the user can expand
# and collapse entries dynamically later on. Doxygen will expand the tree to
# such a level that at most the specified number of entries are visible (unless
# a fully collapsed tree already exceeds this amount). So setting the number of
# entries 1 will produce a full collapsed tree by default. 0 is a special value
# representing an infinite number of entries and will result in a full expanded
# tree by default.
# Minimum value: 0, maximum value: 9999, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files will be
# generated that can be used as input for Apple's Xcode 3 integrated development
# environment (see:
# path_to_url introduced with OSX 10.5 (Leopard). To
# create a documentation set, Doxygen will generate a Makefile in the HTML
# output directory. Running make will produce the docset in that directory and
# running make install will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
# startup. See path_to_url
# genXcode/_index.html for more information.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_DOCSET = YES
# This tag determines the name of the docset feed. A documentation feed provides
# an umbrella under which multiple documentation sets from a single provider
# (such as a company or product suite) can be grouped.
# The default value is: Doxygen generated docs.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_FEEDNAME = "Doxygen generated docs"
# This tag determines the URL of the docset feed. A documentation feed provides
# an umbrella under which multiple documentation sets from a single provider
# (such as a company or product suite) can be grouped.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_FEEDURL =
# This tag specifies a string that should uniquely identify the documentation
# set bundle. This should be a reverse domain-name style string, e.g.
# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_BUNDLE_ID = org.doxygen.Project
# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
# the documentation publisher. This should be a reverse domain-name style
# string, e.g. com.mycompany.MyDocSet.documentation.
# The default value is: org.doxygen.Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
# The default value is: Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then Doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
# on Windows. In the beginning of 2021 Microsoft took the original page, with
# a.o. the download links, offline the HTML help workshop was already many years
# in maintenance mode). You can download the HTML help workshop from the web
# archives at Installation executable (see:
# path_to_url
# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe).
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
# generated by Doxygen into a single compiled HTML file (.chm). Compiled HTML
# files are now used as the Windows 98 help format, and will replace the old
# Windows help format (.hlp) on all Windows platforms in the future. Compressed
# HTML files also contain an index, a table of contents, and you can search for
# words in the documentation. The HTML workshop also contains a viewer for
# compressed HTML files.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_HTMLHELP = NO
# The CHM_FILE tag can be used to specify the file name of the resulting .chm
# file. You can add a path in front of the file if the result should not be
# written to the html output directory.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_FILE = NO
# The HHC_LOCATION tag can be used to specify the location (absolute path
# including file name) of the HTML help compiler (hhc.exe). If non-empty,
# Doxygen will try to run the HTML help compiler on the generated index.hhp.
# The file has to be specified with full path.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
# The GENERATE_CHI flag controls if a separate .chi index file is generated
# (YES) or that it should be included in the main .chm file (NO).
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
GENERATE_CHI = NO
# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
# and project file content.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_INDEX_ENCODING =
# The BINARY_TOC flag controls whether a binary table of contents is generated
# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
# enables the Previous and Next buttons.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
BINARY_TOC = YES
# The TOC_EXPAND flag can be set to YES to add extra items for group members to
# the table of contents of the HTML help documentation and to the tree view.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
TOC_EXPAND = NO
# The SITEMAP_URL tag is used to specify the full URL of the place where the
# generated documentation will be placed on the server by the user during the
# deployment of the documentation. The generated sitemap is called sitemap.xml
# and placed on the directory specified by HTML_OUTPUT. In case no SITEMAP_URL
# is specified no sitemap is generated. For information about the sitemap
# protocol see path_to_url
# This tag requires that the tag GENERATE_HTML is set to YES.
SITEMAP_URL =
# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
# (.qch) of the generated HTML documentation.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_QHP = NO
# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
# the file name of the resulting .qch file. The path specified is relative to
# the HTML output folder.
# This tag requires that the tag GENERATE_QHP is set to YES.
QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
# Project output. For more information please see Qt Help Project / Namespace
# (see:
# path_to_url#namespace).
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
# Help Project output. For more information please see Qt Help Project / Virtual
# Folders (see:
# path_to_url#virtual-folders).
# The default value is: doc.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_VIRTUAL_FOLDER = doc
# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
# filter to add. For more information please see Qt Help Project / Custom
# Filters (see:
# path_to_url#custom-filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see Qt Help Project / Custom
# Filters (see:
# path_to_url#custom-filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's filter section matches. Qt Help Project / Filter Attributes (see:
# path_to_url#filter-attributes).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_SECT_FILTER_ATTRS =
# The QHG_LOCATION tag can be used to specify the location (absolute path
# including file name) of Qt's qhelpgenerator. If non-empty Doxygen will try to
# run qhelpgenerator on the generated .qhp file.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHG_LOCATION =
# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
# generated, together with the HTML files, they form an Eclipse help plugin. To
# install this plugin and make it available under the help contents menu in
# Eclipse, the contents of the directory containing the HTML and XML files needs
# to be copied into the plugins directory of eclipse. The name of the directory
# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
# After copying Eclipse needs to be restarted before the help appears.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_ECLIPSEHELP = NO
# A unique identifier for the Eclipse help plugin. When installing the plugin
# the directory name containing the HTML and XML files should also have this
# name. Each documentation set should have its own identifier.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
ECLIPSE_DOC_ID = org.doxygen.Project
# If you want full control over the layout of the generated HTML pages it might
# be necessary to disable the index and replace it with your own. The
# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
# of each HTML page. A value of NO enables the index and the value YES disables
# it. Since the tabs in the index contain the same information as the navigation
# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
DISABLE_INDEX = NO
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
# structure should be generated to display hierarchical information. If the tag
# value is set to YES, a side panel will be generated containing a tree-like
# index structure (just like the one that is generated for HTML Help). For this
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
# further fine tune the look of the index (see "Fine-tuning the output"). As an
# example, the default style sheet generated by Doxygen has an example that
# shows how to put an image at the root of the tree instead of the PROJECT_NAME.
# Since the tree basically has the same information as the tab index, you could
# consider setting DISABLE_INDEX to YES when enabling this option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = YES
# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the
# FULL_SIDEBAR option determines if the side bar is limited to only the treeview
# area (value NO) or if it should extend to the full height of the window (value
# YES). Setting this to YES gives a layout similar to
# path_to_url with more room for contents, but less room for the
# project logo, title, and description. If either GENERATE_TREEVIEW or
# DISABLE_INDEX is set to NO, this option has no effect.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
FULL_SIDEBAR = NO
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
# Doxygen will group on one line in the generated HTML documentation.
#
# Note that a value of 0 will completely suppress the enum values from appearing
# in the overview section.
# Minimum value: 0, maximum value: 20, default value: 4.
# This tag requires that the tag GENERATE_HTML is set to YES.
ENUM_VALUES_PER_LINE = 4
# When the SHOW_ENUM_VALUES tag is set doxygen will show the specified
# enumeration values besides the enumeration mnemonics.
# The default value is: NO.
SHOW_ENUM_VALUES = NO
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
# to set the initial width (in pixels) of the frame in which the tree is shown.
# Minimum value: 0, maximum value: 1500, default value: 250.
# This tag requires that the tag GENERATE_HTML is set to YES.
TREEVIEW_WIDTH = 300
# If the EXT_LINKS_IN_WINDOW option is set to YES, Doxygen will open links to
# external symbols imported via tag files in a separate window.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
EXT_LINKS_IN_WINDOW = NO
# If the OBFUSCATE_EMAILS tag is set to YES, Doxygen will obfuscate email
# addresses.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
OBFUSCATE_EMAILS = YES
# If the HTML_FORMULA_FORMAT option is set to svg, Doxygen will use the pdf2svg
# tool (see path_to_url or inkscape (see
# path_to_url to generate formulas as SVG images instead of PNGs for
# the HTML output. These images will generally look nicer at scaled resolutions.
# Possible values are: png (the default) and svg (looks nicer but requires the
# pdf2svg or inkscape tool).
# The default value is: png.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FORMULA_FORMAT = png
# Use this tag to change the font size of LaTeX formulas included as images in
# the HTML documentation. When you change the font size after a successful
# Doxygen run you need to manually remove any form_*.png images from the HTML
# output directory to force them to be regenerated.
# Minimum value: 8, maximum value: 50, default value: 10.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_FONTSIZE = 10
# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
# to create new LaTeX commands to be used in formulas as building blocks. See
# the section "Including formulas" for details.
FORMULA_MACROFILE =
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# path_to_url which uses client side JavaScript for the rendering
# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
# to it using the MATHJAX_RELPATH option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
USE_MATHJAX = NO
# With MATHJAX_VERSION it is possible to specify the MathJax version to be used.
# Note that the different versions of MathJax have different requirements with
# regards to the different settings, so it is possible that also other MathJax
# settings have to be changed when switching between the different MathJax
# versions.
# Possible values are: MathJax_2 and MathJax_3.
# The default value is: MathJax_2.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_VERSION = MathJax_2
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. For more details about the output format see MathJax
# version 2 (see:
# path_to_url and MathJax version 3
# (see:
# path_to_url
# Possible values are: HTML-CSS (which is slower, but has the best
# compatibility. This is the name for Mathjax version 2, for MathJax version 3
# this will be translated into chtml), NativeMML (i.e. MathML. Only supported
# for MathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This
# is the name for Mathjax version 3, for MathJax version 2 this will be
# translated into HTML-CSS) and SVG.
# The default value is: HTML-CSS.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_FORMAT = HTML-CSS
# When MathJax is enabled you need to specify the location relative to the HTML
# output directory using the MATHJAX_RELPATH option. The destination directory
# should contain the MathJax.js script. For instance, if the mathjax directory
# is located at the same level as the HTML output directory, then
# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
# MathJax from path_to_url before deployment. The default value is:
# - in case of MathJax version 2: path_to_url
# - in case of MathJax version 3: path_to_url
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = path_to_url
# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
# extension names that should be enabled during MathJax rendering. For example
# for MathJax version 2 (see
# path_to_url#tex-and-latex-extensions):
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# For example for MathJax version 3 (see
# path_to_url
# MATHJAX_EXTENSIONS = ams
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_EXTENSIONS =
# The MATHJAX_CODEFILE tag can be used to specify a file with JavaScript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
# (see:
# path_to_url for more details. For an
# example see the documentation.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_CODEFILE =
# When the SEARCHENGINE tag is enabled Doxygen will generate a search box for
# the HTML output. The underlying search engine uses JavaScript and DHTML and
# should work on any modern browser. Note that when using HTML help
# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
# there is already a search function so this one should typically be disabled.
# For large projects the JavaScript based search engine can be slow, then
# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
# search using the keyboard; to jump to the search box use <access key> + S
# (what the <access key> is depends on the OS and browser, but it is typically
# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
# key> to jump into the search results window, the results can be navigated
# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
# the search. The filter options can be selected when the cursor is inside the
# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
# to select a filter and <Enter> or <escape> to activate or cancel the filter
# option.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
SEARCHENGINE = YES
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a web server instead of a web client using JavaScript. There
# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
# setting. When disabled, Doxygen will generate a PHP script for searching and
# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
# and searching needs to be provided by external tools. See the section
# "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
SERVER_BASED_SEARCH = NO
# When EXTERNAL_SEARCH tag is enabled Doxygen will no longer generate the PHP
# script for searching. Instead the search results are written to an XML file
# which needs to be processed by an external indexer. Doxygen will invoke an
# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
# search results.
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see:
# path_to_url
#
# See the section "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH = NO
# The SEARCHENGINE_URL should point to a search engine hosted by a web server
# which will return the search results when EXTERNAL_SEARCH is enabled.
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see:
# path_to_url See the section "External Indexing and Searching" for
# details.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHENGINE_URL =
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
# search data is written to a file for indexing by an external tool. With the
# SEARCHDATA_FILE tag the name of this file can be specified.
# The default file is: searchdata.xml.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHDATA_FILE = searchdata.xml
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
# projects and redirect the results back to the right project.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH_ID =
# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through Doxygen
# projects other than the one defined by this configuration file, but that are
# all added to the same external search index. Each project needs to have a
# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
# to a relative location where the documentation can be found. The format is:
# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTRA_SEARCH_MAPPINGS =
#your_sha256_hash-----------
# Configuration options related to the LaTeX output
#your_sha256_hash-----------
# If the GENERATE_LATEX tag is set to YES, Doxygen will generate LaTeX output.
# The default value is: YES.
GENERATE_LATEX = NO
# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: latex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked.
#
# Note that when not enabling USE_PDFLATEX the default is latex when enabling
# USE_PDFLATEX the default is pdflatex and when in the later case latex is
# chosen this is overwritten by pdflatex. For specific output languages the
# default can have been set differently, this depends on the implementation of
# the output language.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_CMD_NAME = latex
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
# index for LaTeX.
# Note: This tag is used in the Makefile / make.bat.
# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
# (.tex).
# The default file is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
MAKEINDEX_CMD_NAME = makeindex
# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
# generate index for LaTeX. In case there is no backslash (\) as first character
# it will be automatically added in the LaTeX code.
# Note: This tag is used in the generated output file (.tex).
# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
# The default value is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_MAKEINDEX_CMD = makeindex
# If the COMPACT_LATEX tag is set to YES, Doxygen generates more compact LaTeX
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
COMPACT_LATEX = NO
# The PAPER_TYPE tag can be used to set the paper type that is used by the
# printer.
# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
# 14 inches) and executive (7.25 x 10.5 inches).
# The default value is: a4.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
# that should be included in the LaTeX output. The package can be specified just
# by its name or with the correct syntax as to be used with the LaTeX
# \usepackage command. To get the times font for instance you can specify :
# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
# To use the option intlimits with the amsmath package you can specify:
# EXTRA_PACKAGES=[intlimits]{amsmath}
# If left blank no extra packages will be included.
# This tag requires that the tag GENERATE_LATEX is set to YES.
EXTRA_PACKAGES =
# The LATEX_HEADER tag can be used to specify a user-defined LaTeX header for
# the generated LaTeX document. The header should contain everything until the
# first chapter. If it is left blank Doxygen will generate a standard header. It
# is highly recommended to start with a default header using
# doxygen -w latex new_header.tex new_footer.tex new_stylesheet.sty
# and then modify the file new_header.tex. See also section "Doxygen usage" for
# information on how to generate the default header that Doxygen normally uses.
#
# Note: Only use a user-defined header if you know what you are doing!
# Note: The header is subject to change so you typically have to regenerate the
# default header when upgrading to a newer version of Doxygen. The following
# commands have a special meaning inside the header (and footer): For a
# description of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a user-defined LaTeX footer for
# the generated LaTeX document. The footer should contain everything after the
# last chapter. If it is left blank Doxygen will generate a standard footer. See
# LATEX_HEADER for more information on how to generate a default footer and what
# special commands can be used inside the footer. See also section "Doxygen
# usage" for information on how to generate the default footer that Doxygen
# normally uses. Note: Only use a user-defined footer if you know what you are
# doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# LaTeX style sheets that are included after the standard style sheets created
# by Doxygen. Using this option one can overrule certain style aspects. Doxygen
# will copy the style sheet files to the output directory.
# Note: The order of the extra style sheet files is of importance (e.g. the last
# style sheet in the list overrules the setting of the previous ones in the
# list).
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EXTRA_STYLESHEET =
# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the LATEX_OUTPUT output
# directory. Note that the files will be copied as-is; there are no commands or
# markers available.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EXTRA_FILES =
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
# contain links (just like the HTML output) instead of page references. This
# makes the output suitable for online browsing using a PDF viewer.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PDF_HYPERLINKS = YES
# If the USE_PDFLATEX tag is set to YES, Doxygen will use the engine as
# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX
# files. Set this option to YES, to get a higher quality PDF documentation.
#
# See also section LATEX_CMD_NAME for selecting the engine.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
USE_PDFLATEX = YES
# The LATEX_BATCHMODE tag signals the behavior of LaTeX in case of an error.
# Possible values are: NO same as ERROR_STOP, YES same as BATCH, BATCH In batch
# mode nothing is printed on the terminal, errors are scrolled as if <return> is
# hit at every error; missing files that TeX tries to input or request from
# keyboard input (\read on a not open input stream) cause the job to abort,
# NON_STOP In nonstop mode the diagnostic message will appear on the terminal,
# but there is no possibility of user interaction just like in batch mode,
# SCROLL In scroll mode, TeX will stop only for missing files to input or if
# keyboard input is necessary and ERROR_STOP In errorstop mode, TeX will stop at
# each error, asking for user intervention.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BATCHMODE = NO
# If the LATEX_HIDE_INDICES tag is set to YES then Doxygen will not include the
# index chapters (such as File Index, Compound Index, etc.) in the output.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HIDE_INDICES = NO
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# path_to_url and \cite for more info.
# The default value is: plain.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BIB_STYLE = plain
# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
# path from which the emoji images will be read. If a relative path is entered,
# it will be relative to the LATEX_OUTPUT directory. If left blank the
# LATEX_OUTPUT directory will be used.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EMOJI_DIRECTORY =
#your_sha256_hash-----------
# Configuration options related to the RTF output
#your_sha256_hash-----------
# If the GENERATE_RTF tag is set to YES, Doxygen will generate RTF output. The
# RTF output is optimized for Word 97 and may not look too pretty with other RTF
# readers/editors.
# The default value is: NO.
GENERATE_RTF = NO
# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: rtf.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_OUTPUT = rtf
# If the COMPACT_RTF tag is set to YES, Doxygen generates more compact RTF
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
COMPACT_RTF = NO
# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
# contain hyperlink fields. The RTF file will contain links (just like the HTML
# output) instead of page references. This makes the output suitable for online
# browsing using Word or some other Word compatible readers that support those
# fields.
#
# Note: WordPad (write) and others do not support links.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_HYPERLINKS = YES
# Load stylesheet definitions from file. Syntax is similar to Doxygen's
# configuration file, i.e. a series of assignments. You only have to provide
# replacements, missing definitions are set to their default value.
#
# See also section "Doxygen usage" for information on how to generate the
# default style sheet that Doxygen normally uses.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_STYLESHEET_FILE =
# Set optional variables used in the generation of an RTF document. Syntax is
# similar to Doxygen's configuration file. A template extensions file can be
# generated using doxygen -e rtf extensionFile.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_EXTENSIONS_FILE =
# The RTF_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the RTF_OUTPUT output directory.
# Note that the files will be copied as-is; there are no commands or markers
# available.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_EXTRA_FILES =
#your_sha256_hash-----------
# Configuration options related to the man page output
#your_sha256_hash-----------
# If the GENERATE_MAN tag is set to YES, Doxygen will generate man pages for
# classes and files.
# The default value is: NO.
GENERATE_MAN = NO
# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it. A directory man3 will be created inside the directory specified by
# MAN_OUTPUT.
# The default directory is: man.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_OUTPUT = man
# The MAN_EXTENSION tag determines the extension that is added to the generated
# man pages. In case the manual section does not start with a number, the number
# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
# optional.
# The default value is: .3.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_EXTENSION = .3
# The MAN_SUBDIR tag determines the name of the directory created within
# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
# MAN_EXTENSION with the initial . removed.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_SUBDIR =
# If the MAN_LINKS tag is set to YES and Doxygen generates man output, then it
# will generate one additional man file for each entity documented in the real
# man page(s). These additional files only source the real man page, but without
# them the man command would be unable to find the correct page.
# The default value is: NO.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_LINKS = NO
#your_sha256_hash-----------
# Configuration options related to the XML output
#your_sha256_hash-----------
# If the GENERATE_XML tag is set to YES, Doxygen will generate an XML file that
# captures the structure of the code including all documentation.
# The default value is: NO.
GENERATE_XML = YES
# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: xml.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_OUTPUT = xml
# If the XML_PROGRAMLISTING tag is set to YES, Doxygen will dump the program
# listings (including syntax highlighting and cross-referencing information) to
# the XML output. Note that enabling this will significantly increase the size
# of the XML output.
# The default value is: YES.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_PROGRAMLISTING = YES
# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, Doxygen will include
# namespace members in file scope as well, matching the HTML output.
# The default value is: NO.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_NS_MEMB_FILE_SCOPE = NO
#your_sha256_hash-----------
# Configuration options related to the DOCBOOK output
#your_sha256_hash-----------
# If the GENERATE_DOCBOOK tag is set to YES, Doxygen will generate Docbook files
# that can be used to generate PDF.
# The default value is: NO.
GENERATE_DOCBOOK = NO
# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
# front of it.
# The default directory is: docbook.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_OUTPUT = docbook
#your_sha256_hash-----------
# Configuration options for the AutoGen Definitions output
#your_sha256_hash-----------
# If the GENERATE_AUTOGEN_DEF tag is set to YES, Doxygen will generate an
# AutoGen Definitions (see path_to_url file that captures
# the structure of the code including all documentation. Note that this feature
# is still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
#your_sha256_hash-----------
# Configuration options related to Sqlite3 output
#your_sha256_hash-----------
# If the GENERATE_SQLITE3 tag is set to YES Doxygen will generate a Sqlite3
# database with symbols found by Doxygen stored in tables.
# The default value is: NO.
GENERATE_SQLITE3 = NO
# The SQLITE3_OUTPUT tag is used to specify where the Sqlite3 database will be
# put. If a relative path is entered the value of OUTPUT_DIRECTORY will be put
# in front of it.
# The default directory is: sqlite3.
# This tag requires that the tag GENERATE_SQLITE3 is set to YES.
SQLITE3_OUTPUT = sqlite3
# The SQLITE3_RECREATE_DB tag is set to YES, the existing doxygen_sqlite3.db
# database file will be recreated with each Doxygen run. If set to NO, Doxygen
# will warn if a database file is already found and not modify it.
# The default value is: YES.
# This tag requires that the tag GENERATE_SQLITE3 is set to YES.
SQLITE3_RECREATE_DB = YES
#your_sha256_hash-----------
# Configuration options related to the Perl module output
#your_sha256_hash-----------
# If the GENERATE_PERLMOD tag is set to YES, Doxygen will generate a Perl module
# file that captures the structure of the code including all documentation.
#
# Note that this feature is still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_PERLMOD = NO
# If the PERLMOD_LATEX tag is set to YES, Doxygen will generate the necessary
# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
# output from the Perl module output.
# The default value is: NO.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_LATEX = NO
# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
# formatted so it can be parsed by a human reader. This is useful if you want to
# understand what is going on. On the other hand, if this tag is set to NO, the
# size of the Perl module output will be much smaller and Perl will parse it
# just the same.
# The default value is: YES.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_PRETTY = YES
# The names of the make variables in the generated doxyrules.make file are
# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
# so different doxyrules.make files included by the same Makefile don't
# overwrite each other's variables.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_MAKEVAR_PREFIX =
#your_sha256_hash-----------
# Configuration options related to the preprocessor
#your_sha256_hash-----------
# If the ENABLE_PREPROCESSING tag is set to YES, Doxygen will evaluate all
# C-preprocessor directives found in the sources and include files.
# The default value is: YES.
ENABLE_PREPROCESSING = YES
# If the MACRO_EXPANSION tag is set to YES, Doxygen will expand all macro names
# in the source code. If set to NO, only conditional compilation will be
# performed. Macro expansion can be done in a controlled way by setting
# EXPAND_ONLY_PREDEF to YES.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
MACRO_EXPANSION = YES
# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
# the macro expansion is limited to the macros specified with the PREDEFINED and
# EXPAND_AS_DEFINED tags.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_ONLY_PREDEF = NO
# If the SEARCH_INCLUDES tag is set to YES, the include files in the
# INCLUDE_PATH will be searched if a #include is found.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SEARCH_INCLUDES = YES
# The INCLUDE_PATH tag can be used to specify one or more directories that
# contain include files that are not input files but should be processed by the
# preprocessor. Note that the INCLUDE_PATH is not recursive, so the setting of
# RECURSIVE has no effect here.
# This tag requires that the tag SEARCH_INCLUDES is set to YES.
INCLUDE_PATH =
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
# directories. If left blank, the patterns specified with FILE_PATTERNS will be
# used.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
INCLUDE_FILE_PATTERNS =
# The PREDEFINED tag can be used to specify one or more macro names that are
# defined before the preprocessor is started (similar to the -D option of e.g.
# gcc). The argument of the tag is a list of macros of the form: name or
# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
# is assumed. To prevent a macro definition from being undefined via #undef or
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
PREDEFINED = __DOXYGEN__ \
CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT \
CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN \
CONFIG_BT_CLASSIC \
CONFIG_BT_EATT \
CONFIG_BT_L2CAP_SEG_RECV \
CONFIG_BT_MESH_MODEL_EXTENSIONS \
CONFIG_BT_REMOTE_INFO \
CONFIG_BT_USER_DATA_LEN_UPDATE \
CONFIG_BT_USER_PHY_UPDATE \
CONFIG_BT_SMP \
CONFIG_BT_SMP_APP_PAIRING_ACCEPT \
CONFIG_CBPRINTF_LIBC_SUBSTS \
CONFIG_ERRNO \
CONFIG_FLASH_JESD216_API \
CONFIG_FLASH_PAGE_LAYOUT \
CONFIG_FP16 \
CONFIG_FPU \
CONFIG_FPU_SHARING \
CONFIG_GDBSTUB \
CONFIG_HEAP_MEM_POOL_SIZE \
CONFIG_MMU \
CONFIG_NET_L2_ETHERNET_MGMT \
CONFIG_NET_L2_IEEE802154_MGMT \
CONFIG_NET_L2_IEEE802154_SECURITY \
CONFIG_NET_MGMT_EVENT \
CONFIG_NET_SOCKETS_POSIX_NAMES \
CONFIG_NET_TCP \
CONFIG_NET_UDP \
CONFIG_SCHED_CPU_MASK \
CONFIG_SCHED_DEADLINE \
CONFIG_SCHED_DEADLINE \
CONFIG_SETTINGS_RUNTIME \
CONFIG_SMP \
CONFIG_SYS_CLOCK_EXISTS \
CONFIG_THREAD_CUSTOM_DATA \
CONFIG_THREAD_MONITOR \
CONFIG_THREAD_STACK_INFO \
CONFIG_TIMING_FUNCTIONS \
CONFIG_UART_DRV_CMD \
CONFIG_UART_INTERRUPT_DRIVEN \
CONFIG_UART_ASYNC_API \
CONFIG_USERSPACE \
CONFIG_USE_SWITCH \
NET_MGMT_DEFINE_REQUEST_HANDLER(x)= \
DEVICE_DEFINE()= \
BUILD_ASSERT()= \
XEN_GUEST_HANDLE_64(x)= \
_LINKER \
__deprecated= \
__sparse_cache= \
__packed= \
__aligned(x)= \
__attribute_nonnull(...)= \
"__printf_like(x, y)=" \
__attribute__(x)= \
__syscall= \
__syscall_always_inline= \
__must_check= \
"ATOMIC_DEFINE(x, y)=atomic_t x[ATOMIC_BITMAP_SIZE(y)]" \
"ZTEST(suite, fn)=void fn(void)" \
"ZTEST_USER(suite, fn)=void fn(void)" \
"ZTEST_USER_F(suite, fn)=void fn(void)" \
"ZTEST_F(suite, fn)=void fn(void)"
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
# macro definition that is found in the sources will be used. Use the PREDEFINED
# tag if you want to use a different macro definition that overrules the
# definition found in the source code.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES then Doxygen's preprocessor will
# remove all references to function-like macros that are alone on a line, have
# an all uppercase name, and do not end with a semicolon. Such function macros
# are typically used for boiler-plate code, and will confuse the parser if not
# removed.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SKIP_FUNCTION_MACROS = NO
#your_sha256_hash-----------
# Configuration options related to external references
#your_sha256_hash-----------
# The TAGFILES tag can be used to specify one or more tag files. For each tag
# file the location of the external documentation should be added. The format of
# a tag file without this location is as follows:
# TAGFILES = file1 file2 ...
# Adding location for the tag files is done as follows:
# TAGFILES = file1=loc1 "file2 = loc2" ...
# where loc1 and loc2 can be relative or absolute paths or URLs. See the
# section "Linking to external documentation" for more information about the use
# of tag files.
# Note: Each tag file must have a unique name (where the name does NOT include
# the path). If a tag file is not located in the directory in which Doxygen is
# run, you must also specify the path to the tagfile here.
TAGFILES =
# When a file name is specified after GENERATE_TAGFILE, Doxygen will create a
# tag file that is based on the input files it reads. See section "Linking to
# external documentation" for more information about the usage of tag files.
GENERATE_TAGFILE = @DOXY_OUT@/html/zephyr.tag
# If the ALLEXTERNALS tag is set to YES, all external classes and namespaces
# will be listed in the class and namespace index. If set to NO, only the
# inherited external classes will be listed.
# The default value is: NO.
ALLEXTERNALS = NO
# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
# in the topic index. If set to NO, only the current project's groups will be
# listed.
# The default value is: YES.
EXTERNAL_GROUPS = YES
# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
# the related pages index. If set to NO, only the current project's pages will
# be listed.
# The default value is: YES.
EXTERNAL_PAGES = YES
#your_sha256_hash-----------
# Configuration options related to diagram generator tools
#your_sha256_hash-----------
# If set to YES the inheritance and collaboration graphs will hide inheritance
# and usage relations if the target is undocumented or is not a class.
# The default value is: YES.
HIDE_UNDOC_RELATIONS = YES
# If you set the HAVE_DOT tag to YES then Doxygen will assume the dot tool is
# available from the path. This tool is part of Graphviz (see:
# path_to_url a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
# The default value is: NO.
HAVE_DOT = NO
# The DOT_NUM_THREADS specifies the number of dot invocations Doxygen is allowed
# to run in parallel. When set to 0 Doxygen will base this on the number of
# processors available in the system. You can set it explicitly to a value
# larger than 0 to get control over the balance between CPU load and processing
# speed.
# Minimum value: 0, maximum value: 32, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_NUM_THREADS = 0
# DOT_COMMON_ATTR is common attributes for nodes, edges and labels of
# subgraphs. When you want a differently looking font in the dot files that
# Doxygen generates you can specify fontname, fontcolor and fontsize attributes.
# For details please see <a href=path_to_url
# Edge and Graph Attributes specification</a> You need to make sure dot is able
# to find the font, which can be done by putting it in a standard location or by
# setting the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
# directory containing the font. Default graphviz fontsize is 14.
# The default value is: fontname=Helvetica,fontsize=10.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_COMMON_ATTR = "fontname=Helvetica,fontsize=10"
# DOT_EDGE_ATTR is concatenated with DOT_COMMON_ATTR. For elegant style you can
# add 'arrowhead=open, arrowtail=open, arrowsize=0.5'. <a
# href=path_to_url documentation about
# arrows shapes.</a>
# The default value is: labelfontname=Helvetica,labelfontsize=10.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_EDGE_ATTR = "labelfontname=Helvetica,labelfontsize=10"
# DOT_NODE_ATTR is concatenated with DOT_COMMON_ATTR. For view without boxes
# around nodes set 'shape=plain' or 'shape=plaintext' <a
# href=path_to_url specification</a>
# The default value is: shape=box,height=0.2,width=0.4.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_NODE_ATTR = "shape=box,height=0.2,width=0.4"
# You can set the path where dot can find font specified with fontname in
# DOT_COMMON_ATTR and others dot attributes.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTPATH =
# If the CLASS_GRAPH tag is set to YES or GRAPH or BUILTIN then Doxygen will
# generate a graph for each documented class showing the direct and indirect
# inheritance relations. In case the CLASS_GRAPH tag is set to YES or GRAPH and
# HAVE_DOT is enabled as well, then dot will be used to draw the graph. In case
# the CLASS_GRAPH tag is set to YES and HAVE_DOT is disabled or if the
# CLASS_GRAPH tag is set to BUILTIN, then the built-in generator will be used.
# If the CLASS_GRAPH tag is set to TEXT the direct and indirect inheritance
# relations will be shown as texts / links. Explicit enabling an inheritance
# graph or choosing a different representation for an inheritance graph of a
# specific class, can be accomplished by means of the command \inheritancegraph.
# Disabling an inheritance graph can be accomplished by means of the command
# \hideinheritancegraph.
# Possible values are: NO, YES, TEXT, GRAPH and BUILTIN.
# The default value is: YES.
CLASS_GRAPH = TEXT
# If the COLLABORATION_GRAPH tag is set to YES then Doxygen will generate a
# graph for each documented class showing the direct and indirect implementation
# dependencies (inheritance, containment, and class references variables) of the
# class with other documented classes. Explicit enabling a collaboration graph,
# when COLLABORATION_GRAPH is set to NO, can be accomplished by means of the
# command \collaborationgraph. Disabling a collaboration graph can be
# accomplished by means of the command \hidecollaborationgraph.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
COLLABORATION_GRAPH = YES
# If the GROUP_GRAPHS tag is set to YES then Doxygen will generate a graph for
# groups, showing the direct groups dependencies. Explicit enabling a group
# dependency graph, when GROUP_GRAPHS is set to NO, can be accomplished by means
# of the command \groupgraph. Disabling a directory graph can be accomplished by
# means of the command \hidegroupgraph. See also the chapter Grouping in the
# manual.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GROUP_GRAPHS = YES
# If the UML_LOOK tag is set to YES, Doxygen will generate inheritance and
# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
UML_LOOK = NO
# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
# class node. If there are many fields or methods and many nodes the graph may
# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
# number of items for each type to make the size more manageable. Set this to 0
# for no limit. Note that the threshold may be exceeded by 50% before the limit
# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
# but if the number exceeds 15, the total amount of fields shown is limited to
# 10.
# Minimum value: 0, maximum value: 100, default value: 10.
# This tag requires that the tag UML_LOOK is set to YES.
UML_LIMIT_NUM_FIELDS = 10
# If the DOT_UML_DETAILS tag is set to NO, Doxygen will show attributes and
# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS
# tag is set to YES, Doxygen will add type and arguments for attributes and
# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, Doxygen
# will not generate fields with class member information in the UML graphs. The
# class diagrams will look similar to the default class diagrams but using UML
# notation for the relationships.
# Possible values are: NO, YES and NONE.
# The default value is: NO.
# This tag requires that the tag UML_LOOK is set to YES.
DOT_UML_DETAILS = NO
# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters
# to display on a single line. If the actual line length exceeds this threshold
# significantly it will be wrapped across multiple lines. Some heuristics are
# applied to avoid ugly line breaks.
# Minimum value: 0, maximum value: 1000, default value: 17.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_WRAP_THRESHOLD = 17
# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
# collaboration graphs will show the relations between templates and their
# instances.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
TEMPLATE_RELATIONS = NO
# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
# YES then Doxygen will generate a graph for each documented file showing the
# direct and indirect include dependencies of the file with other documented
# files. Explicit enabling an include graph, when INCLUDE_GRAPH is set to NO,
# can be accomplished by means of the command \includegraph. Disabling an
# include graph can be accomplished by means of the command \hideincludegraph.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDE_GRAPH = YES
# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
# set to YES then Doxygen will generate a graph for each documented file showing
# the direct and indirect include dependencies of the file with other documented
# files. Explicit enabling an included by graph, when INCLUDED_BY_GRAPH is set
# to NO, can be accomplished by means of the command \includedbygraph. Disabling
# an included by graph can be accomplished by means of the command
# \hideincludedbygraph.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDED_BY_GRAPH = YES
# If the CALL_GRAPH tag is set to YES then Doxygen will generate a call
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable call graphs for selected
# functions only using the \callgraph command. Disabling a call graph can be
# accomplished by means of the command \hidecallgraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALL_GRAPH = NO
# If the CALLER_GRAPH tag is set to YES then Doxygen will generate a caller
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable caller graphs for selected
# functions only using the \callergraph command. Disabling a caller graph can be
# accomplished by means of the command \hidecallergraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALLER_GRAPH = NO
# If the GRAPHICAL_HIERARCHY tag is set to YES then Doxygen will graphical
# hierarchy of all classes instead of a textual one.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GRAPHICAL_HIERARCHY = YES
# If the DIRECTORY_GRAPH tag is set to YES then Doxygen will show the
# dependencies a directory has on other directories in a graphical way. The
# dependency relations are determined by the #include relations between the
# files in the directories. Explicit enabling a directory graph, when
# DIRECTORY_GRAPH is set to NO, can be accomplished by means of the command
# \directorygraph. Disabling a directory graph can be accomplished by means of
# the command \hidedirectorygraph.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
DIRECTORY_GRAPH = YES
# The DIR_GRAPH_MAX_DEPTH tag can be used to limit the maximum number of levels
# of child directories generated in directory dependency graphs by dot.
# Minimum value: 1, maximum value: 25, default value: 1.
# This tag requires that the tag DIRECTORY_GRAPH is set to YES.
DIR_GRAPH_MAX_DEPTH = 1
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. For an explanation of the image formats see the section
# output formats in the documentation of the dot tool (Graphviz (see:
# path_to_url
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
# png:gdiplus:gdiplus.
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_IMAGE_FORMAT = png
# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
# enable generation of interactive SVG images that allow zooming and panning.
#
# Note that this requires a modern browser other than Internet Explorer. Tested
# and working are Firefox, Chrome, Safari, and Opera.
# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
# the SVG files visible. Older versions of IE do not have SVG support.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
INTERACTIVE_SVG = NO
# The DOT_PATH tag can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_PATH =
# The DOTFILE_DIRS tag can be used to specify one or more directories that
# contain dot files that are included in the documentation (see the \dotfile
# command).
# This tag requires that the tag HAVE_DOT is set to YES.
DOTFILE_DIRS =
# You can include diagrams made with dia in Doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
# If left empty dia is assumed to be found in the default search path.
DIA_PATH =
# The DIAFILE_DIRS tag can be used to specify one or more directories that
# contain dia files that are included in the documentation (see the \diafile
# command).
DIAFILE_DIRS =
# When using PlantUML, the PLANTUML_JAR_PATH tag should be used to specify the
# path where java can find the plantuml.jar file or to the filename of jar file
# to be used. If left blank, it is assumed PlantUML is not used or called during
# a preprocessing step. Doxygen will generate a warning when it encounters a
# \startuml command in this case and will not generate output for the diagram.
PLANTUML_JAR_PATH =
# When using PlantUML, the PLANTUML_CFG_FILE tag can be used to specify a
# configuration file for PlantUML.
PLANTUML_CFG_FILE =
# When using PlantUML, the specified paths are searched for files specified by
# the !include statement in a PlantUML block.
PLANTUML_INCLUDE_PATH =
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
# that will be shown in the graph. If the number of nodes in a graph becomes
# larger than this value, Doxygen will truncate the graph, which is visualized
# by representing a node as a red box. Note that if the number of direct
# children of the root node in a graph is already larger than
# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
# Minimum value: 0, maximum value: 10000, default value: 50.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_GRAPH_MAX_NODES = 50
# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
# generated by dot. A depth value of 3 means that only nodes reachable from the
# root by following a path via at most 3 edges will be shown. Nodes that lay
# further from the root node will be omitted. Note that setting this option to 1
# or 2 may greatly reduce the computation time needed for large code bases. Also
# note that the size of a graph can be further restricted by
# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
# Minimum value: 0, maximum value: 1000, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
MAX_DOT_GRAPH_DEPTH = 0
# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
# this, this feature is disabled by default.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_MULTI_TARGETS = NO
# If the GENERATE_LEGEND tag is set to YES Doxygen will generate a legend page
# explaining the meaning of the various boxes and arrows in the dot generated
# graphs.
# Note: This tag requires that UML_LOOK isn't set, i.e. the Doxygen internal
# graphical representation for inheritance and collaboration diagrams is used.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GENERATE_LEGEND = YES
# If the DOT_CLEANUP tag is set to YES, Doxygen will remove the intermediate
# files that are used to generate the various graphs.
#
# Note: This setting is not only used for dot files but also for msc temporary
# files.
# The default value is: YES.
DOT_CLEANUP = YES
# You can define message sequence charts within Doxygen comments using the \msc
# command. If the MSCGEN_TOOL tag is left empty (the default), then Doxygen will
# use a built-in version of mscgen tool to produce the charts. Alternatively,
# the MSCGEN_TOOL tag can also specify the name an external tool. For instance,
# specifying prog as the value, Doxygen will call the tool as prog -T
# <outfile_format> -o <outputfile> <inputfile>. The external tool should support
# output file formats "png", "eps", "svg", and "ismap".
MSCGEN_TOOL =
# The MSCFILE_DIRS tag can be used to specify one or more directories that
# contain msc files that are included in the documentation (see the \mscfile
# command).
MSCFILE_DIRS =
@INCLUDE_CUSTOM_FILE@
``` | /content/code_sandbox/doc/zephyr.doxyfile.in | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 31,526 |
```restructuredtext
.. _symtab:
Symbol Table (Symtab)
#####################
The Symtab module, when enabled, will generate full symbol table during the Zephyr linking
stage that keep tracks of the information about the functions' name and address, for advanced application
with a lot of functions, this is expected to consume a sizable amount of ROM.
Currently, this is being used to look up the function names during a stack trace in supported architectures.
Usage
*****
Application can gain access to the symbol table data structure by including the :file:`symtab.h` header
file and call :c:func:`symtab_get`. For now, we only provide :c:func:`symtab_find_symbol_name`
function to look-up the symbol name and offset of an address. More advanced functionalities and be
achieved by directly accessing the members of the data structure.
Configuration
*************
Configure this module using the following options.
* :kconfig:option:`CONFIG_SYMTAB`: enable the generation of the symbol table.
API documentation
*****************
.. doxygengroup:: symtab_apis
``` | /content/code_sandbox/doc/services/debugging/symtab.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 225 |
```restructuredtext
.. _debugmon:
Cortex-M Debug Monitor
######################
Monitor mode debugging is a Cortex-M feature, that provides a non-halting approach to
debugging. With this it's possible to continue the execution of high-priority interrupts,
even when waiting on a breakpoint.
This strategy makes it possible to debug time-sensitive software, that would
otherwise crash when the core halts (e.g. applications that need to keep
communication links alive).
Zephyr provides support for enabling and configuring the Debug Monitor exception.
It also contains a ready implementation of the interrupt, which can be used with
SEGGER J-Link debuggers.
Configuration
*************
Configure this module using the following options.
* :kconfig:option:`CONFIG_CORTEX_M_DEBUG_MONITOR_HOOK`: enable the module. This option, by itself,
requires an implementation of debug monitor interrupt that will be executed
every time the program enters a breakpoint.
With a SEGGER debug probe, it's possible to use a ready, SEGGER-provided implementation
of the interrupt.
* :kconfig:option:`CONFIG_SEGGER_DEBUGMON`: enables SEGGER debug monitor interrupt. Can be
used with SEGGER JLinkGDBServer and a SEGGER debug probe.
Usage
*****
When monitor mode debugging is enabled, entering a breakpoint will not halt the
processor, but rather generate an interrupt with ISR implemented under
``z_arm_debug_monitor`` symbol. :kconfig:option:`CONFIG_CORTEX_M_DEBUG_MONITOR_HOOK` config configures this interrupt
to be the lowest available priority, which will allow other interrupts to execute
while processor spins on a breakpoint.
Using SEGGER-provided ISR
=========================
The ready implementation provided with :kconfig:option:`CONFIG_SEGGER_DEBUGMON` provides functionality
required to debug in the monitor mode using regular GDB commands.
Steps to configure SEGGER debug monitor:
1. Build a sample with :kconfig:option:`CONFIG_CORTEX_M_DEBUG_MONITOR_HOOK`` and :kconfig:option:`CONFIG_SEGGER_DEBUGMON`
configs enabled.
2. Attach JLink GDB server to the target.
Example linux command: ``JLinkGDBServerCLExe -device <device> -if swd``.
3. Connect to the server with your GDB installation.
Example linux command: ``arm-none-eabi-gdb --ex="file build/zephyr.elf" --ex="target remote localhost:2331"``.
4. Enable monitor mode debugging in GDB using command: ``monitor exec SetMonModeDebug=1``.
After these steps use regular gdb commands to debug your program.
Using other custom ISR
======================
In order to provide a custom debug monitor interrupt, override ``z_arm_debug_monitor``
symbol. Additionally, manual configuration of some registers is required
(see :ref:`debug monitor sample<debugmon-sample>`).
``` | /content/code_sandbox/doc/services/debugging/debugmon.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 594 |
```restructuredtext
.. _profiling:
Profiling
#########
Required Kconfig: :kconfig:option:`CONFIG_PROFILING`
.. toctree::
:maxdepth: 1
perf.rst
``` | /content/code_sandbox/doc/services/profiling/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 43 |
```restructuredtext
.. _coredump:
Core Dump
#########
The core dump module enables dumping the CPU registers and memory content
for offline debugging. This module is called when a fatal error is
encountered and prints or stores data according to which backends
are enabled.
Configuration
*************
Configure this module using the following options.
* ``DEBUG_COREDUMP``: enable the module.
Here are the options to enable output backends for core dump:
* ``DEBUG_COREDUMP_BACKEND_LOGGING``: use log module for core dump output.
* ``DEBUG_COREDUMP_BACKEND_FLASH_PARTITION``: use flash partition for core
dump output.
* ``DEBUG_COREDUMP_BACKEND_NULL``: fallback core dump backend if other
backends cannot be enabled. All output is sent to null.
Here are the choices regarding memory dump:
* ``DEBUG_COREDUMP_MEMORY_DUMP_MIN``: only dumps the stack of the exception
thread, its thread struct, and some other bare minimal data to support
walking the stack in the debugger. Use this only if absolute minimum of data
dump is desired.
* ``DEBUG_COREDUMP_MEMORY_DUMP_THREADS``: Dumps the thread struct and stack of all
threads and all data required to debug threads.
* ``DEBUG_COREDUMP_MEMORY_DUMP_LINKER_RAM``: Dumps the memory region between
_image_ram_start[] and _image_ram_end[]. This includes at least data, noinit,
and BSS sections. This is the default.
Additional memory can be included in a dump (even with the "DEBUG_COREDUMP_MEMORY_DUMP_MIN"
config selected) through one or more :ref:`coredump devices <coredump_device_api>`
Usage
*****
When the core dump module is enabled, during a fatal error, CPU registers
and memory content are printed or stored according to which backends
are enabled. This core dump data can fed into a custom-made GDB server as
a remote target for GDB (and other GDB compatible debuggers). CPU registers,
memory content and stack can be examined in the debugger.
This usually involves the following steps:
1. Get the core dump log from the device depending on enabled backends.
For example, if the log module backend is used, get the log output
from the log module backend.
2. Convert the core dump log into a binary format that can be parsed by
the GDB server. For example,
:zephyr_file:`scripts/coredump/coredump_serial_log_parser.py` can be used
to convert the serial console log into a binary file.
3. Start the custom GDB server using the script
:zephyr_file:`scripts/coredump/coredump_gdbserver.py` with the core dump
binary log file, and the Zephyr ELF file as parameters. The GDB server
can also be started from within GDB, see below.
4. Start the debugger corresponding to the target architecture.
.. note::
Developers for Intel ADSP CAVS 15-25 platforms using
``ZEPHYR_TOOLCHAIN_VARIANT=zephyr`` should use the debugger in the
``xtensa-intel_apl_adsp`` toolchain of the SDK.
5. When ``DEBUG_COREDUMP_BACKEND_FLASH_PARTITION`` is enabled the core dump
data is stored in the flash partition. The flash partition must be defined
in the device tree:
.. code-block:: devicetree
&flash0 {
partitions {
coredump_partition: partition@255000 {
label = "coredump-partition";
reg = <0x255000 DT_SIZE_K(4)>;
};
};
Example
-------
This example uses the log module backend tied to serial console.
This was done on :ref:`qemu_x86` where a null pointer was dereferenced.
This is the core dump log from the serial console, and is stored
in :file:`coredump.log`:
::
Booting from ROM..*** Booting Zephyr OS build zephyr-v2.3.0-1840-g7bba91944a63 ***
Hello World! qemu_x86
E: Page fault at address 0x0 (error code 0x2)
E: Linear address not present in page tables
E: PDE: 0x0000000000115827 Writable, User, Execute Enabled
E: PTE: Non-present
E: EAX: 0x00000000, EBX: 0x00000000, ECX: 0x00119d74, EDX: 0x000003f8
E: ESI: 0x00000000, EDI: 0x00101aa7, EBP: 0x00119d10, ESP: 0x00119d00
E: EFLAGS: 0x00000206 CS: 0x0008 CR3: 0x00119000
E: call trace:
E: EIP: 0x00100459
E: 0x00100477 (0x0)
E: 0x00100492 (0x0)
E: 0x001004c8 (0x0)
E: 0x00105465 (0x105465)
E: 0x00101abe (0x0)
E: >>> ZEPHYR FATAL ERROR 0: CPU exception on CPU 0
E: Current thread: 0x00119080 (unknown)
E: #CD:BEGIN#
E: #CD:5a4501000100050000000000
E: #CD:4101003800
E: #CD:your_sha256_hash
E: #CD:00000000a71a100059041000060200000800000000901100
E: #CD:4d010080901100e0901100
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:4d0100b4991100b49d1100
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:your_sha256_hash
E: #CD:END#
E: Halting system
1. Run the core dump serial log converter:
.. code-block:: console
./scripts/coredump/coredump_serial_log_parser.py coredump.log coredump.bin
2. Start the custom GDB server:
.. code-block:: console
./scripts/coredump/coredump_gdbserver.py build/zephyr/zephyr.elf coredump.bin
3. Start GDB:
.. code-block:: console
<path to SDK>/x86_64-zephyr-elf/bin/x86_64-zephyr-elf-gdb build/zephyr/zephyr.elf
4. Inside GDB, connect to the GDB server via port 1234:
.. code-block:: console
(gdb) target remote localhost:1234
5. Examine the CPU registers:
.. code-block:: console
(gdb) info registers
Output from GDB:
::
eax 0x0 0
ecx 0x119d74 1154420
edx 0x3f8 1016
ebx 0x0 0
esp 0x119d00 0x119d00 <z_main_stack+844>
ebp 0x119d10 0x119d10 <z_main_stack+860>
esi 0x0 0
edi 0x101aa7 1055399
eip 0x100459 0x100459 <func_3+16>
eflags 0x206 [ PF IF ]
cs 0x8 8
ss <unavailable>
ds <unavailable>
es <unavailable>
fs <unavailable>
gs <unavailable>
6. Examine the backtrace:
.. code-block:: console
(gdb) bt
Output from GDB:
::
#0 0x00100459 in func_3 (addr=0x0) at zephyr/rtos/zephyr/samples/hello_world/src/main.c:14
#1 0x00100477 in func_2 (addr=0x0) at zephyr/rtos/zephyr/samples/hello_world/src/main.c:21
#2 0x00100492 in func_1 (addr=0x0) at zephyr/rtos/zephyr/samples/hello_world/src/main.c:28
#3 0x001004c8 in main () at zephyr/rtos/zephyr/samples/hello_world/src/main.c:42
Starting the GDB server from within GDB
---------------------------------------
You can use ``target remote |`` to start the custom GDB server from inside
GDB, instead of in a separate shell.
1. Start GDB:
.. code-block:: console
<path to SDK>/x86_64-zephyr-elf/bin/x86_64-zephyr-elf-gdb build/zephyr/zephyr.elf
2. Inside GDB, start the GDB server using the ``--pipe`` option:
.. code-block:: console
(gdb) target remote | ./scripts/coredump/coredump_gdbserver.py --pipe build/zephyr/zephyr.elf coredump.bin
File Format
***********
The core dump binary file consists of one file header, one
architecture-specific block, zero or one threads metadata block(s),
and multiple memory blocks. All numbers in
the headers below are little endian.
File Header
-----------
The file header consists of the following fields:
.. list-table:: Core dump binary file header
:widths: 2 1 7
:header-rows: 1
* - Field
- Data Type
- Description
* - ID
- ``char[2]``
- ``Z``, ``E`` as identifier of file.
* - Header version
- ``uint16_t``
- Identify the version of the header. This needs to be incremented
whenever the header struct is modified. This allows parser to
reject older header versions so it will not incorrectly parse
the header.
* - Target code
- ``uint16_t``
- Indicate which target (e.g. architecture or SoC) so the parser
can instantiate the correct register block parser.
* - Pointer size
- 'uint8_t'
- Size of ``uintptr_t`` in power of 2. (e.g. 5 for 32-bit,
6 for 64-bit). This is needed to accommodate 32-bit and 64-bit
target in parsing the memory block addresses.
* - Flags
- ``uint8_t``
-
* - Fatal error reason
- ``unsigned int``
- Reason for the fatal error, as the same in
``enum k_fatal_error_reason`` defined in
:zephyr_file:`include/zephyr/fatal.h`
Architecture-specific Block
---------------------------
The architecture-specific block contains the byte stream of data specific
to the target architecture (e.g. CPU registers)
.. list-table:: Architecture-specific Block
:widths: 2 1 7
:header-rows: 1
* - Field
- Data Type
- Description
* - ID
- ``char``
- ``A`` to indicate this is a architecture-specific block.
* - Header version
- ``uint16_t``
- Identify the version of this block. To be interpreted by the target
architecture specific block parser.
* - Number of bytes
- ``uint16_t``
- Number of bytes following the header which contains the byte stream
for target data. The format of the byte stream is specific to
the target and is only being parsed by the target parser.
* - Register byte stream
- ``uint8_t[]``
- Contains target architecture specific data.
Threads Metadata Block
---------------------------
The threads metadata block contains the byte stream of data necessary
for debugging threads.
.. list-table:: Threads Metadata Block
:widths: 2 1 7
:header-rows: 1
* - Field
- Data Type
- Description
* - ID
- ``char``
- ``T`` to indicate this is a threads metadata block.
* - Header version
- ``uint16_t``
- Identify the version of the header. This needs to be incremented
whenever the header struct is modified. This allows parser to
reject older header versions so it will not incorrectly parse
the header.
* - Number of bytes
- ``uint16_t``
- Number of bytes following the header which contains the byte stream
for target data.
* - Byte stream
- ``uint8_t[]``
- Contains data necessary for debugging threads.
Memory Block
------------
The memory block contains the start and end addresses and the data within
the memory region.
.. list-table:: Memory Block
:widths: 2 1 7
:header-rows: 1
* - Field
- Data Type
- Description
* - ID
- ``char``
- ``M`` to indicate this is a memory block.
* - Header version
- ``uint16_t``
- Identify the version of the header. This needs to be incremented
whenever the header struct is modified. This allows parser to
reject older header versions so it will not incorrectly parse
the header.
* - Start address
- ``uintptr_t``
- The start address of the memory region.
* - End address
- ``uintptr_t``
- The end address of the memory region.
* - Memory byte stream
- ``uint8_t[]``
- Contains the memory content between the start and end addresses.
Adding New Target
*****************
The architecture-specific block is target specific and requires new
dumping routine and parser for new targets. To add a new target,
the following needs to be done:
#. Add a new target code to the ``enum coredump_tgt_code`` in
:zephyr_file:`include/zephyr/debug/coredump.h`.
#. Implement :c:func:`arch_coredump_tgt_code_get` simply to return
the newly introduced target code.
#. Implement :c:func:`arch_coredump_info_dump` to construct
a target architecture block and call :c:func:`coredump_buffer_output`
to output the block to core dump backend.
#. Add a parser to the core dump GDB stub scripts under
``scripts/coredump/gdbstubs/``
#. Extends the ``gdbstubs.gdbstub.GdbStub`` class.
#. During ``__init__``, store the GDB signal corresponding to
the exception reason in ``self.gdb_signal``.
#. Parse the architecture-specific block from
``self.logfile.get_arch_data()``. This needs to match the format
as implemented in step 3 (inside :c:func:`arch_coredump_info_dump`).
#. Implement the abstract method ``handle_register_group_read_packet``
where it returns the register group as GDB expected. Refer to
GDB's code and documentation on what it is expecting for
the new target.
#. Optionally implement ``handle_register_single_read_packet``
for registers not covered in the ``g`` packet.
#. Extend ``get_gdbstub()`` in
:zephyr_file:`scripts/coredump/gdbstubs/__init__.py` to return
the newly implemented GDB stub.
API documentation
*****************
.. doxygengroup:: coredump_apis
.. doxygengroup:: arch-coredump
``` | /content/code_sandbox/doc/services/debugging/coredump.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,847 |
```restructuredtext
.. _profiling-perf:
Perf
####
Perf is a profiler tool based on stack tracing. It can be used for lightweight profiling
with minimal code overhead.
Work Principle
**************
The ``perf record`` shell command starts a timer with the perf tracer function.
Timers are driven by interrupts, so the perf tracer function is called during an interruption.
The Zephyr core saves the return address and frame pointer in the interrupt stack or ``callee_saved``
structure before calling the interrupt handler. Thus, the perf trace function makes stack traces by
using the return address and frame pointer.
The :zephyr_file:`scripts/profiling/stackcollapse.py` script can be used to convert return addresses
in the stack trace to function names using symbols from the ELF file, and to prints them in the
format expected by `FlameGraph`_.
Configuration
*************
You can configure this module using the following options:
* :kconfig:option:`CONFIG_PROFILING_PERF`: Enables the module. This option adds
the ``perf`` command to the shell.
* :kconfig:option:`CONFIG_PROFILING_PERF_BUFFER_SIZE`: Sets the size of the perf buffer
where samples are saved before printing.
Usage
*****
Refer to the :zephyr:code-sample:`profiling-perf` sample for an example of how to use the perf tool.
.. _FlameGraph: path_to_url
``` | /content/code_sandbox/doc/services/profiling/perf.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 296 |
```restructuredtext
.. _virtualization_reference:
Virtualization
##############
.. toctree::
:maxdepth: 1
ivshmem.rst
``` | /content/code_sandbox/doc/services/virtualization/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 32 |
```restructuredtext
.. _ivshmem_driver:
Inter-VM Shared Memory
######################
.. contents::
:local:
:depth: 2
Overview
********
As Zephyr is enabled to run as a guest OS on Qemu and
`ACRN <path_to_url`_
it might be necessary to make VMs aware of each other, or aware of the host.
This is made possible by exposing a shared memory among parties via a feature
called ivshmem, which stands for inter-VM Shared Memory.
The two types are supported: a plain shared memory (ivshmem-plain) or a shared
memory with the ability for a VM to generate an interruption on another, and
thus to be interrupted as well itself (ivshmem-doorbell).
Please refer to the official `Qemu ivshmem documentation
<path_to_url`_ for more information.
Support
*******
Zephyr supports both versions: plain and doorbell. Ivshmem driver can be built
by enabling :kconfig:option:`CONFIG_IVSHMEM`. By default, this will expose the plain
version. :kconfig:option:`CONFIG_IVSHMEM_DOORBELL` needs to be enabled to get the
doorbell version.
Because the doorbell version uses MSI-X vectors to support notification vectors,
the :kconfig:option:`CONFIG_IVSHMEM_MSI_X_VECTORS` has to be tweaked to the number of
vectors that will be needed.
Note that a tiny shell module can be exposed to test the ivshmem feature by
enabling :kconfig:option:`CONFIG_IVSHMEM_SHELL`.
ivshmem-v2
**********
Zephyr also supports ivshmem-v2:
path_to_url
This is primarily used for IPC in the Jailhouse hypervisor
(e.g. :zephyr:code-sample:`eth-ivshmem`). It is also possible to use ivshmem-v2 without
Jailhouse by building the Siemens fork of QEMU, and modifying the QEMU launch flags:
path_to_url
API Reference
*************
.. doxygengroup:: ivshmem
``` | /content/code_sandbox/doc/services/virtualization/ivshmem.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 443 |
```restructuredtext
.. _file_system_api:
File Systems
############
Zephyr RTOS Virtual Filesystem Switch (VFS) allows applications to mount multiple
file systems at different mount points (e.g., ``/fatfs`` and ``/lfs``). The
mount point data structure contains all the necessary information required
to instantiate, mount, and operate on a file system. The File system Switch
decouples the applications from directly accessing an individual file system's
specific API or internal functions by introducing file system registration
mechanisms.
In Zephyr, any file system implementation or library can be plugged into or
pulled out through a file system registration API. Each file system
implementation must have a globally unique integer identifier; use
:c:enumerator:`FS_TYPE_EXTERNAL_BASE` to avoid clashes with in-tree identifiers.
.. code-block:: c
int fs_register(int type, const struct fs_file_system_t *fs);
int fs_unregister(int type, const struct fs_file_system_t *fs);
Zephyr RTOS supports multiple instances of a file system by making use of
the mount point as the disk volume name, which is used by the file system library
while formatting or mounting a disk.
A file system is declared as:
.. code-block:: c
static struct fs_mount_t mp = {
.type = FS_FATFS,
.mnt_point = FATFS_MNTP,
.fs_data = &fat_fs,
};
where
- ``FS_FATFS`` is the file system type like FATFS or LittleFS.
- ``FATFS_MNTP`` is the mount point where the file system will be mounted.
- ``fat_fs`` is the file system data which will be used by fs_mount() API.
Samples
*******
Samples for the VFS are mainly supplied in ``samples/subsys/fs``, although various examples of the
VFS usage are provided as important functionalities in samples for different subsystems.
Here is the list of samples worth looking at:
- ``samples/subsys/fs/fat_fs`` is an example of FAT file system usage with SDHC media;
- ``samples/subsys/shell/fs`` is an example of Shell fs subsystem, using internal flash partition
formatted to LittleFS;
- ``samples/subsys/usb/mass/`` example of USB Mass Storage device that uses FAT FS driver with RAM
or SPI connected FLASH, or LittleFS in flash, depending on the sample configuration.
API Reference
*************
.. doxygengroup:: file_system_api
``` | /content/code_sandbox/doc/services/file_system/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 522 |
```restructuredtext
.. _sensing:
Sensing Subsystem
########################
.. contents::
:local:
:depth: 2
Overview
********
Sensing Subsystem is a high level sensor framework inside the OS user
space service layer. It is a framework focused on sensor fusion, client
arbitration, sampling, timing, scheduling and sensor based power management.
Key concepts in Sensing Subsystem include physical sensor and virtual sensor objects,
and a scheduling framework over sensor object relationships.
Physical sensors do not depend on any other sensor objects for input, and
will directly interact with existing zephyr sensor device drivers.
Virtual sensors rely on other sensor objects (physical or virtual) as
report inputs.
The sensing subsystem relies on Zephyr sensor device APIs (existing version or update in future)
to leverage Zephyr's large library of sensor device drivers (100+).
Use of the sensing subsystem is optional. Applications that only need to access simple sensors
devices can use the Zephyr :ref:`sensor` API directly.
Since the sensing subsystem is separated from device driver layer or
kernel space and could support various customizations and sensor
algorithms in user space with virtual sensor concepts. The existing
sensor device driver can focus on low layer device side works, can keep
simple as much as possible, just provide device HW abstraction and
operations etc. This is very good for system stability.
The sensing subsystem is decoupled with any sensor expose/transfer
protocols, the target is to support various up-layer frameworks and
Applications with different sensor expose/transfer protocols,
such as `CHRE <path_to_url`_, HID sensors Applications,
MQTT sensor Applications according different products requirements. Or even support multiple
Applications with different up-layer sensor protocols at the same time
with it's multiple clients support design.
Sensing subsystem can help build a unified Zephyr sensing architecture for
cross host OSes support and as well as IoT sensor solutions.
The diagram below illustrates how the Sensing Subsystem integrates with up-layer frameworks.
.. image:: images/sensing_solution.png
:align: center
:alt: Unified Zephyr sensing architecture.
Configurability
***************
* Reusable and configurable standalone subsystem.
* Based on Zephyr existing low-level Sensor API (reuse 100+ existing sensor device drivers)
* Provide Zephyr high-level Sensing Subsystem API for Applications.
* Separate option CHRE Sensor PAL Implementation module to support CHRE.
* Decoupled with any host link protocols, it's Zephyr Application's role to handle different
protocols (MQTT, HID or Private, all configurable)
Main Features
*************
* Scope
* Focus on framework for sensor fusion, multiple clients, arbitration, data sampling, timing
management and scheduling.
* Sensor Abstraction
* ``Physical sensor``: interacts with Zephyr sensor device drivers, focus on data collecting.
* ``Virtual sensor``: relies on other sensor(s), ``physical`` or ``virtual``, focus on
data fusion.
* Data Driven Model
* ``Polling mode``: periodical sampling rate
* ``Interrupt mode``: data ready, threshold interrupt etc.
* Scheduling
* single thread main loop for all sensor objects sampling and process.
* Buffer Mode for Batching
* Configurable Via Device Tree
Below diagram shows the API position and scope:
.. image:: images/sensing_api_org.png
:align: center
:alt: Sensing subsystem API organization.
``Sensing Subsystem API`` is for Applications.
``Sensing Sensor API`` is for development ``sensors``.
Major Flows
***********
* Sensor Configuration Flow
.. image:: images/sensor_config_flow.png
:align: center
:alt: Sensor Configuration Flow (App set report interval to hinge angel sensor example).
* Sensor Data Flow
.. image:: images/sensor_data_flow.png
:align: center
:alt: Sensor Data Flow (App receive hinge angel data through data event callback example).
Sensor Types And Instance
*************************
The ``Sensing Subsystem`` supports multiple instances of the same sensor type,
there're two methods for Applications to identify and open an unique sensor instance:
* Enumerate all sensor instances
:c:func:`sensing_get_sensors` returns all current board configuration supported sensor instances'
information in a :c:struct:`sensing_sensor_info` pointer array .
Then Applications can use :c:func:`sensing_open_sensor` to
open specific sensor instance for future accessing, configuration and receive sensor data etc.
This method is suitable for supporting some up-layer frameworks like ``CHRE``, ``HID`` which need
to dynamically enumerate the underlying platform's sensor instances.
* Open the sensor instance by devicetree node directly
Applications can use :c:func:`sensing_open_sensor_by_dt` to open a sensor instance directly with
sensor devicetree node identifier.
For example:
.. code-block:: c
sensing_open_sensor_by_dt(DEVICE_DT_GET(DT_NODELABEL(base_accel)), cb_list, handle);
sensing_open_sensor_by_dt(DEVICE_DT_GET(DT_CHOSEN(zephyr_sensing_base_accel)), cb_list, handle);
This method is useful and easy use for some simple Application which just want to access specific
sensor(s).
``Sensor type`` follows the
`HID standard sensor types definition <path_to_url`_.
See :zephyr_file:`include/zephyr/sensing/sensing_sensor_types.h`
Sensor Instance Handler
***********************
Clients using a :c:type:`sensing_sensor_handle_t` type handler to handle a opened sensor
instance, and all subsequent operations on this sensor instance need use this handler,
such as set configurations, read sensor sample data, etc.
For a sensor instance, could have two kinds of clients:
``Application clients`` and ``Sensor clients``.
``Application clients`` can use :c:func:`sensing_open_sensor` to open a sensor instance
and get it's handler.
For ``Sensor clients``, there is no open API for opening a reporter, because the client-report
relationship is built at the sensor's registration stage with devicetree.
The ``Sensing Subsystem`` will auto open and create ``handlers`` for client sensor
to it's reporter sensors.
``Sensor clients`` can get it's reporters' handlers via :c:func:`sensing_sensor_get_reporters`.
.. image:: images/sensor_top.png
:align: center
:alt: Sensor Reporting Topology.
.. note::
Sensors inside the Sensing Subsystem, the reporting relationship between them are all auto
generated by Sensing Subsystem according devicetree definitions, handlers between client sensor
and reporter sensors are auto created.
Application(s) need to call :c:func:`sensing_open_sensor` to explicitly open the sensor instance.
Sensor Sample Value
*******************
* Data Structure
Each sensor sample value defines as a common ``header`` + ``readings[]`` data structure, like
:c:struct:`sensing_sensor_value_3d_q31`, :c:struct:`sensing_sensor_value_q31`, and
:c:struct:`sensing_sensor_value_uint32`.
The ``header`` definition :c:func:`sensing_sensor_value_header`.
* Time Stamp
Time stamp unit in sensing subsystem is ``micro seconds``.
The ``header`` defines a **base_timestamp**, and
each element in the **readings[]** array defines **timestamp_delta**.
The **timestamp_delta** is in relation to the previous **readings** (or the **base_timestamp**)
For example:
* timestamp of ``readings[0]`` is ``header.base_timestamp`` + ``readings[0].timestamp_delta``.
* timestamp of ``readings[1]`` is ``timestamp of readings[0]`` + ``readings[1].timestamp_delta``.
Since timestamp unit is micro seconds,
the max **timestamp_delta** (``uint32_t``) is ``4295`` seconds.
If a sensor has batched data where two consecutive readings differ by more than ``4295`` seconds,
the sensing subsystem runtime will split them across multiple instances of the readings structure,
and send multiple events.
This concept is referred from `CHRE Sensor API <path_to_url
chre/blob/zephyr/chre_api/include/chre_api/chre/sensor_types.h>`_.
* Data Format
``Sensing Subsystem`` uses per sensor type defined data format structure,
and support ``Q Format`` defined in :zephyr_file:`include/zephyr/dsp/types.h`
for ``zdsp`` lib support.
For example :c:struct:`sensing_sensor_value_3d_q31` can be used by 3D IMU sensors like
:c:macro:`SENSING_SENSOR_TYPE_MOTION_ACCELEROMETER_3D`,
:c:macro:`SENSING_SENSOR_TYPE_MOTION_UNCALIB_ACCELEROMETER_3D`,
and :c:macro:`SENSING_SENSOR_TYPE_MOTION_GYROMETER_3D`.
:c:struct:`sensing_sensor_value_uint32` can be used by
:c:macro:`SENSING_SENSOR_TYPE_LIGHT_AMBIENTLIGHT` sensor,
and :c:struct:`sensing_sensor_value_q31` can be used by
:c:macro:`SENSING_SENSOR_TYPE_MOTION_HINGE_ANGLE` sensor
See :zephyr_file:`include/zephyr/sensing/sensing_datatypes.h`
Device Tree Configuration
*************************
Sensing subsystem using device tree to configuration all sensor instances and their properties,
reporting relationships.
See the example :zephyr_file:`samples/subsys/sensing/simple/boards/native_sim.overlay`
API Reference
*************
.. doxygengroup:: sensing_sensor_types
.. doxygengroup:: sensing_datatypes
.. doxygengroup:: sensing_api
.. doxygengroup:: sensing_sensor
``` | /content/code_sandbox/doc/services/sensing/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,111 |
```restructuredtext
.. _settings_api:
Settings
########
The settings subsystem gives modules a way to store persistent per-device
configuration and runtime state. A variety of storage implementations are
provided behind a common API using FCB, NVS, or a file system. These different
implementations give the application developer flexibility to select an
appropriate storage medium, and even change it later as needs change. This
subsystem is used by various Zephyr components and can be used simultaneously by
user applications.
Settings items are stored as key-value pair strings. By convention,
the keys can be organized by the package and subtree defining the key,
for example the key ``id/serial`` would define the ``serial`` configuration
element for the package ``id``.
Convenience routines are provided for converting a key value to
and from a string type.
For an example of the settings subsystem refer to :zephyr:code-sample:`settings` sample.
.. note::
As of Zephyr release 2.1 the recommended backend for non-filesystem
storage is :ref:`NVS <nvs_api>`.
Handlers
********
Settings handlers for subtree implement a set of handler functions.
These are registered using a call to ``settings_register()``.
**h_get**
This gets called when asking for a settings element value by its name using
``settings_runtime_get()`` from the runtime backend.
**h_set**
This gets called when the value is loaded from persisted storage with
``settings_load()``, or when using ``settings_runtime_set()`` from the
runtime backend.
**h_commit**
This gets called after the settings have been loaded in full.
Sometimes you don't want an individual setting value to take
effect right away, for example if there are multiple settings
which are interdependent.
**h_export**
This gets called to write all current settings. This happens
when ``settings_save()`` tries to save the settings or transfer to any
user-implemented back-end.
Backends
********
Backends are meant to load and save data to/from setting handlers, and
implement a set of handler functions. These are registered using a call to
``settings_src_register()`` for backends that can load data, and/or
``settings_dst_register()`` for backends that can save data. The current
implementation allows for multiple source backends but only a single destination
backend.
**csi_load**
This gets called when loading values from persistent storage using
``settings_load()``.
**csi_save**
This gets called when saving a single setting to persistent storage using
``settings_save_one()``.
**csi_save_start**
This gets called when starting a save of all current settings using
``settings_save()``.
**csi_save_end**
This gets called after having saved of all current settings using
``settings_save()``.
Zephyr Storage Backends
***********************
Zephyr has three storage backends: a Flash Circular Buffer
(:kconfig:option:`CONFIG_SETTINGS_FCB`), a file in the filesystem
(:kconfig:option:`CONFIG_SETTINGS_FILE`), or non-volatile storage
(:kconfig:option:`CONFIG_SETTINGS_NVS`).
You can declare multiple sources for settings; settings from
all of these are restored when ``settings_load()`` is called.
There can be only one target for writing settings; this is where
data is stored when you call ``settings_save()``, or ``settings_save_one()``.
FCB read target is registered using ``settings_fcb_src()``, and write target
using ``settings_fcb_dst()``. As a side-effect, ``settings_fcb_src()``
initializes the FCB area, so it must be called before calling
``settings_fcb_dst()``. File read target is registered using
``settings_file_src()``, and write target by using ``settings_file_dst()``.
Non-volatile storage read target is registered using
``settings_nvs_src()``, and write target by using
``settings_nvs_dst()``.
Storage Location
****************
The FCB and non-volatile storage (NVS) backends both look for a fixed
partition with label "storage" by default. A different partition can be
selected by setting the ``zephyr,settings-partition`` property of the
chosen node in the devicetree.
The file path used by the file backend to store settings is selected via the
option ``CONFIG_SETTINGS_FILE_PATH``.
Loading data from persisted storage
***********************************
A call to ``settings_load()`` uses an ``h_set`` implementation
to load settings data from storage to volatile memory.
After all data is loaded, the ``h_commit`` handler is issued,
signalling the application that the settings were successfully
retrieved.
Technically FCB and file backends may store some history of the entities.
This means that the newest data entity is stored after any
older existing data entities.
Starting with Zephyr 2.1, the back-end must filter out all old entities and
call the callback with only the newest entity.
Storing data to persistent storage
**********************************
A call to ``settings_save_one()`` uses a backend implementation to store
settings data to the storage medium. A call to ``settings_save()`` uses an
``h_export`` implementation to store different data in one operation using
``settings_save_one()``.
A key need to be covered by a ``h_export`` only if it is supposed to be stored
by ``settings_save()`` call.
For both FCB and file back-end only storage requests with data which
changes most actual key's value are stored, therefore there is no need to check
whether a value changed by the application. Such a storage mechanism implies
that storage can contain multiple value assignments for a key , while only the
last is the current value for the key.
Garbage collection
==================
When storage becomes full (FCB) or consumes too much space (file),
the backend removes non-recent key-value pairs records and unnecessary
key-delete records.
Secure domain settings
**********************
Currently settings doesn't provide scheme of being secure, and non-secure
configuration storage simultaneously for the same instance.
It is recommended that secure domain uses its own settings instance and it might
provide data for non-secure domain using dedicated interface if needed
(case dependent).
Example: Device Configuration
*****************************
This is a simple example, where the settings handler only implements ``h_set``
and ``h_export``. ``h_set`` is called when the value is restored from storage
(or when set initially), and ``h_export`` is used to write the value to
storage thanks to ``storage_func()``. The user can also implement some other
export functionality, for example, writing to the shell console).
.. code-block:: c
#define DEFAULT_FOO_VAL_VALUE 1
static int8 foo_val = DEFAULT_FOO_VAL_VALUE;
static int foo_settings_set(const char *name, size_t len,
settings_read_cb read_cb, void *cb_arg)
{
const char *next;
int rc;
if (settings_name_steq(name, "bar", &next) && !next) {
if (len != sizeof(foo_val)) {
return -EINVAL;
}
rc = read_cb(cb_arg, &foo_val, sizeof(foo_val));
if (rc >= 0) {
/* key-value pair was properly read.
* rc contains value length.
*/
return 0;
}
/* read-out error */
return rc;
}
return -ENOENT;
}
static int foo_settings_export(int (*storage_func)(const char *name,
const void *value,
size_t val_len))
{
return storage_func("foo/bar", &foo_val, sizeof(foo_val));
}
struct settings_handler my_conf = {
.name = "foo",
.h_set = foo_settings_set,
.h_export = foo_settings_export
};
Example: Persist Runtime State
******************************
This is a simple example showing how to persist runtime state. In this example,
only ``h_set`` is defined, which is used when restoring value from
persisted storage.
In this example, the ``main`` function increments ``foo_val``, and then
persists the latest number. When the system restarts, the application calls
``settings_load()`` while initializing, and ``foo_val`` will continue counting
up from where it was before restart.
.. code-block:: c
#include <zephyr/kernel.h>
#include <zephyr/sys/reboot.h>
#include <zephyr/settings/settings.h>
#include <zephyr/sys/printk.h>
#include <inttypes.h>
#define DEFAULT_FOO_VAL_VALUE 0
static uint8_t foo_val = DEFAULT_FOO_VAL_VALUE;
static int foo_settings_set(const char *name, size_t len,
settings_read_cb read_cb, void *cb_arg)
{
const char *next;
int rc;
if (settings_name_steq(name, "bar", &next) && !next) {
if (len != sizeof(foo_val)) {
return -EINVAL;
}
rc = read_cb(cb_arg, &foo_val, sizeof(foo_val));
if (rc >= 0) {
return 0;
}
return rc;
}
return -ENOENT;
}
struct settings_handler my_conf = {
.name = "foo",
.h_set = foo_settings_set
};
int main(void)
{
settings_subsys_init();
settings_register(&my_conf);
settings_load();
foo_val++;
settings_save_one("foo/bar", &foo_val, sizeof(foo_val));
printk("foo: %d\n", foo_val);
k_msleep(1000);
sys_reboot(SYS_REBOOT_COLD);
}
Example: Custom Backend Implementation
**************************************
This is a simple example showing how to register a simple custom backend
handler (:kconfig:option:`CONFIG_SETTINGS_CUSTOM`).
.. code-block:: c
static int settings_custom_load(struct settings_store *cs,
const struct settings_load_arg *arg)
{
//...
}
static int settings_custom_save(struct settings_store *cs, const char *name,
const char *value, size_t val_len)
{
//...
}
/* custom backend interface */
static struct settings_store_itf settings_custom_itf = {
.csi_load = settings_custom_load,
.csi_save = settings_custom_save,
};
/* custom backend node */
static struct settings_store settings_custom_store = {
.cs_itf = &settings_custom_itf
};
int settings_backend_init(void)
{
/* register custom backend */
settings_dst_register(&settings_custom_store);
settings_src_register(&settings_custom_store);
return 0;
}
API Reference
*************
The Settings subsystem APIs are provided by ``settings.h``:
API for general settings usage
==============================
.. doxygengroup:: settings
API for key-name processing
===========================
.. doxygengroup:: settings_name_proc
API for runtime settings manipulation
=====================================
.. doxygengroup:: settings_rt
API of backend interface
========================
.. doxygengroup:: settings_backend
``` | /content/code_sandbox/doc/services/settings/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,393 |
```restructuredtext
.. _management_fixing_prev_mcumgr:
Fixing and backporting fixes to Zephyr v2.7 MCUmgr
##################################################
The processes described in this document apply to both the zephyr repository itself and the MCUmgr :ref:`module <modules>` defined in :zephyr_file:`west.yml`.
.. note::
Currently, the backporting process, described in this document, is required only when providing
changes to Zephyr version 2.7 LTS
There are two different processes: one for issues that have also been fixed in the current
version of Zephyr (backports), and one for issues that are being fixed only in a previous version.
The upstream MCUmgr repository is located `in this page <path_to_url`_.
The Zephyr fork used in version 2.7 and earlier is `located here <path_to_url`_.
Versions of Zephyr past 2.7 use the MCUmgr library that is `part of the Zephyr code base <path_to_url`_.
Possible origins of a code change
*********************************
In Zephyr version 2.7 and earlier, you must first apply the fix
to the upstream repository of MCUmgr and then bring it to Zephyr with snapshot updates.
As such, there are four possible ways to apply a change to the 2.7 branch:
* The fix, done directly to the Zephyr held code of the MCUmgr library, is backported to the ``v2.7-branch``.
* The fix, ported to the Zephyr held code from the upstream repository, is backported to the ``v2.7-branch``.
* The fix, done upstream and no longer relevant to the current version, is directly backported
to the ``v2.7-branch``.
* The fix, not present upstream and not relevant for the current version of Zephyr, is
directly applied to the ``v2.7-branch``.
The first three cases are cases of *backports* , the last one is a case of a *new fix* and has no
corresponding fix in the current version.
.. _management_fixing_prev_mcumgr_submit:
Applying fixes to previous versions of MCUmgr
*********************************************
This section indicates how to apply fixes to previous versions of MCUmgr.
Creating a bug report
=====================
Every proposed fix requires a bug report submitted for the specified version of Zephyr affected by the bug.
In case the reported bug in a previous version has already been fixed in the current version, the description
of the bug must be copied with the following:
* Additional references to the bug in the current version
* The PR for the current version
* The SHAs of the commits, if the PR has already been merged
You must also apply the ``backport v2.7-branch`` label to the bug report.
Creating the pull request for the fix
=====================================
You can either create a *backport pull request* or a *new-fix pull request*.
Creating backport pull requests
-------------------------------
Backporting a fix means that some or all of the fix commits, as they exist in the current version,
are ported to a previous version.
.. note::
Backporting requires the fix for the current version to be already merged.
To create a backport pull request, do the following:
1. Port the fix commits from the current version to the previous version.
Even if some of the commits require changes, keep the commit messages of all the ported commits
as close to the ones in the original commits as possible, adding the following line:
::
"Backporting commit <sha>"
``<sha>`` indicates the SHA of the commit after it has been already merged in the current version.
#. Create the pull request selecting ``v2.7-branch`` as the merge target.
#. Update ``west.yml`` within Zephyr, creating a pull-request to update the MCUmgr library referenced in
Zephyr 2.7.
Creating new-fix pull requests
------------------------------
When the fix needed does not have a corresponding fix in the current version, the bug report
must follow the ordinary process.
1. Create the pull request selecting ``v2.7-branch`` as the merge target.
#. Update ``west.yml`` within Zephyr, creating a pull-request to update the MCUmgr library referenced in
Zephyr 2.7.
Configuration management
************************
This chapter describes the maintainers' side of accepting and merging fixes and backports.
Prerequisites
=============
As a maintainer, these are the steps required before proceeding with the merge process:
1. Check if the author has followed the correct steps that are required to apply the fix, as described in
:ref:`management_fixing_prev_mcumgr_submit`.
#. Ensure that the author of the fix has also provided the ``west.yml`` update for Zephyr 2.7.
The specific merging process depends on where the fix comes from and whether it is a *backport* or a *new
fix*.
Merging a backported fix
========================
There are two possible sources of backports:
* The Zephyr code base
* A direct fix from upstream
Both cases are similar and differ only in the branch name.
To merge a backported fix after the pull request for the fix has gone through the review process,
as a maintainer, do the following:
1. Create a branch named as follow:
::
backport-<source>-<pr_num>-to_v2.7-branch
``<source>`` can be one of the following:
* ``upstream`` - if the fix has originally been merged to the upstream repository.
* ``zephyr`` - if the fix has been applied to the Zephyr internal MCUmgr library (past 2.7 versions).
``<pr_num>`` is the number of the original pull request that has already been merged.
For example, a branch named ``backport-upstream-137-to-v2.7-branch`` indicates a backport of pull
request 137, which has already been merged to the upstream repository of MCUmgr.
#. Push the reviewed pull-request branch to the newly created branch and merge the backport branch
to ``v2.7-branch``.
Merging a new fix
=================
Merging a new fix, that is not a backport of either any upstream or Zephyr fix, does not require any special
treatment. Apply the fix directly at the top of ``v2.7-branch``.
Merge west.yml
==============
As an MCUmgr maintainer, you may not be able to merge the ``west.yml`` update, to introduce the fix to Zephyr.
However, you are responsible for such a merge to happen as soon as possible after the MCUmgr fixes have been
applied to the ``v2.7-branch`` of the MCUmgr.
``` | /content/code_sandbox/doc/services/device_mgmt/mcumgr_backporting.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,469 |
```restructuredtext
.. _mcu_mgr:
MCUmgr
#######
Overview
********
The management subsystem allows remote management of Zephyr-enabled devices.
The following management operations are available:
* Image management
* File System management
* OS management
* Settings (config) management
* Shell management
* Statistic management
* Zephyr management
over the following transports:
* BLE (Bluetooth Low Energy)
* Serial (UART)
* UDP over IP
The management subsystem is based on the Simple Management Protocol (SMP)
provided by `MCUmgr`_, an open source project that provides a
management subsystem that is portable across multiple real-time operating
systems.
The management subsystem is located in :zephyr_file:`subsys/mgmt/` inside of
the Zephyr tree.
Additionally, there is a :zephyr:code-sample:`sample <smp-svr>` server that provides
management functionality over BLE and serial.
.. _mcumgr_tools_libraries:
Tools/libraries
***************
There are various tools and libraries available which enable usage of MCUmgr functionality on a
device which are listed below. Note that these tools are not part of or related to the Zephyr
project.
.. only:: html
.. table:: Tools and Libraries for MCUmgr
:align: center
+your_sha256_hash----------------+-------------------------------------------+--------------------------+--------------------------------------------------+---------------+------------+---------+
| +---------+-------+-----+--------+----------+--------+-----------+-----+----+-----+------+----------+----+-------+--------+ | | |
| | Windows | Linux | mac | Mobile | Embedded | Serial | Bluetooth | UDP | OS | IMG | Stat | Settings | FS | Shell | Zephyr | | | |
+================================================================================+=========+=======+=====+========+==========+========+===========+=====+====+=====+======+==========+====+=======+========+===============+============+=========+
| `AuTerm <path_to_url`_ | | | | | | | | | | | | | | | | Application | C++ (Qt) | GPLv3 |
+your_sha256_hash----------------+---------+-------+-----+--------+----------+--------+-----------+-----+----+-----+------+----------+----+-------+--------+---------------+------------+---------+
| `mcumgr-client <path_to_url`_ | | | | | | | | | | | | | | | | Application | Rust | BSD |
+your_sha256_hash----------------+---------+-------+-----+--------+----------+--------+-----------+-----+----+-----+------+----------+----+-------+--------+---------------+------------+---------+
| `mcumgr-web <path_to_url`_ | | | | | | | | | | | | | | | | Web page | Javascript | MIT |
| | | | | | | | | | | | | | | | | (chrome only) | | |
+your_sha256_hash----------------+---------+-------+-----+--------+----------+--------+-----------+-----+----+-----+------+----------+----+-------+--------+---------------+------------+---------+
| nRF Connect Device Manager: |br| | | | | | | | | | | | | | | | | | | |
| `Android | | | | | | | | | | | | | | | | Library and | Java, | Apache |
| <path_to_url`_ | | | | | | | | | | | | | | | | application | Kotlin, | |
| and `iOS | | | | | | | | | | | | | | | | | Swift | |
| <path_to_url`_ | | | | | | | | | | | | | | | | | | |
+your_sha256_hash----------------+---------+-------+-----+--------+----------+--------+-----------+-----+----+-----+------+----------+----+-------+--------+---------------+------------+---------+
| Zephyr MCUmgr client (in-tree) | | | | | | | | | | | | | | | | Library | C | Apache |
+your_sha256_hash----------------+---------+-------+-----+--------+----------+--------+-----------+-----+----+-----+------+----------+----+-------+--------+---------------+------------+---------+
.. only:: latex
.. raw:: latex
\begin{landscape}
.. table:: Tools and Libraries for MCUmgr
:align: center
+your_sha256_hash----------------+---------------+-----------------+--------------------------------------------------+---------------+------------+
| Name | OS support | Transports | Groups | Type | Language |
| | | +----+-----+------+----------+----+-------+--------+ | |
| | | | OS | IMG | Stat | Settings | FS | Shell | Zephyr | | |
+================================================================================+===============+=================+====+=====+======+==========+====+=======+========+===============+============+
| `AuTerm <path_to_url`_ | Windows, |br| | Serial, |br| | | | | | | | | App | C++ (Qt) |
| | Linux, |br| | Bluetooth, |br| | | | | | | | | | |
| | macOS | UDP | | | | | | | | | |
+your_sha256_hash----------------+---------------+-----------------+----+-----+------+----------+----+-------+--------+---------------+------------+
| `mcumgr-client <path_to_url`_ | Windows, |br| | Serial | | | | | | | | App | Rust |
| | Linux, |br| | | | | | | | | | | |
| | macOS | | | | | | | | | | |
+your_sha256_hash----------------+---------------+-----------------+----+-----+------+----------+----+-------+--------+---------------+------------+
| `mcumgr-web <path_to_url`_ | Windows, |br| | Bluetooth | | | | | | | | Web (chrome | Javascript |
| | Linux, |br| | | | | | | | | | only) | |
| | macOS | | | | | | | | | | |
+your_sha256_hash----------------+---------------+-----------------+----+-----+------+----------+----+-------+--------+---------------+------------+
| nRF Connect Device Manager: |br| | iOS, |br| | Bluetooth | | | | | | | | Library, App | Java, |
| `Android | Android | | | | | | | | | | Kotlin, |
| <path_to_url`_ | | | | | | | | | | | Swift |
| and `iOS | | | | | | | | | | | |
| <path_to_url`_ | | | | | | | | | | | |
+your_sha256_hash----------------+---------------+-----------------+----+-----+------+----------+----+-------+--------+---------------+------------+
| Zephyr MCUmgr client (in-tree) | Linux, |br| | Serial, |br| | | | | | | | | Library | C |
| | Zephyr | Bluetooth, |br| | | | | | | | | | |
| | | UDP | | | | | | | | | |
+your_sha256_hash----------------+---------------+-----------------+----+-----+------+----------+----+-------+--------+---------------+------------+
.. raw:: latex
\end{landscape}
Note that a tick for a particular group indicates basic support for that group in the code, it is
possible that not all commands/features of a group are supported by the implementation.
.. _mcumgr_jlink_ob_virtual_msd:
J-Link Virtual MSD Interaction Note
***********************************
On boards where a J-Link OB is present which has both CDC and MSC (virtual Mass
Storage Device, also known as drag-and-drop) support, the MSD functionality can
prevent MCUmgr commands over the CDC UART port from working due to how USB
endpoints are configured in the J-Link firmware (for example on the
:ref:`Nordic nrf52840dk/nrf52840 board <nrf52840dk_nrf52840>`) because of
limiting the maximum packet size (most likely to occur when using image
management commands for updating firmware). This issue can be
resolved by disabling MSD functionality on the J-Link device, follow the
instructions on :ref:`nordic_segger_msd` to disable MSD support.
Bootloader Integration
**********************
The :ref:`dfu` subsystem integrates the management subsystem with the
bootloader, providing the ability to send and upgrade a Zephyr image to a
device.
Currently only the MCUboot bootloader is supported. See :ref:`mcuboot` for more
information.
.. _MCUmgr: path_to_url
.. _MCUboot design: path_to_url
Discord channel
***************
Developers welcome!
* Discord mcumgr channel: path_to_url
API Reference
*************
.. doxygengroup:: mcumgr_mgmt_api
``` | /content/code_sandbox/doc/services/device_mgmt/mcumgr.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,381 |
```restructuredtext
.. _dfu:
Device Firmware Upgrade
#######################
Overview
********
The Device Firmware Upgrade subsystem provides the necessary frameworks to
upgrade the image of a Zephyr-based application at run time. It currently
consists of two different modules:
* :zephyr_file:`subsys/dfu/boot/`: Interface code to bootloaders
* :zephyr_file:`subsys/dfu/img_util/`: Image management code
The DFU subsystem deals with image management, but not with the transport
or management protocols themselves required to send the image to the target
device. For information on these protocols and frameworks please refer to the
:ref:`device_mgmt` section.
.. _flash_img_api:
Flash Image
===========
The flash image API as part of the Device Firmware Upgrade (DFU) subsystem
provides an abstraction on top of Flash Stream to simplify writing firmware
image chunks to flash.
API Reference
-------------
.. doxygengroup:: flash_img_api
.. _mcuboot_api:
MCUBoot API
===========
The MCUboot API is provided to get version information and boot status of
application images. It allows to select application image and boot type
for the next boot.
API Reference
-------------
.. doxygengroup:: mcuboot_api
Bootloaders
***********
.. _mcuboot:
MCUboot
=======
Zephyr is directly compatible with the open source, cross-RTOS
`MCUboot boot loader`_. It interfaces with MCUboot and is aware of the image
format required by it, so that Device Firmware Upgrade is available when MCUboot
is the boot loader used with Zephyr. The source code itself is hosted in the
`MCUboot GitHub Project`_ page.
In order to use MCUboot with Zephyr you need to take the following into account:
1. You will need to define the flash partitions required by MCUboot; see
:ref:`flash_map_api` for details.
2. You will have to specify your flash partition as the chosen code partition
.. code-block:: devicetree
/ {
chosen {
zephyr,code-partition = &slot0_partition;
};
};
3. Your application's :file:`.conf` file needs to enable the
:kconfig:option:`CONFIG_BOOTLOADER_MCUBOOT` Kconfig option in order for Zephyr to
be built in an MCUboot-compatible manner
4. You need to build and flash MCUboot itself on your device
5. You might need to take precautions to avoid mass erasing the flash and also
to flash the Zephyr application image at the correct offset (right after the
bootloader)
More detailed information regarding the use of MCUboot with Zephyr can be found
in the `MCUboot with Zephyr`_ documentation page on the MCUboot website.
.. _MCUboot boot loader: path_to_url
.. _MCUboot with Zephyr: path_to_url
.. _MCUboot GitHub Project: path_to_url
``` | /content/code_sandbox/doc/services/device_mgmt/dfu.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 648 |
```restructuredtext
.. _device_mgmt:
Device Management
#################
.. toctree::
:maxdepth: 1
mcumgr.rst
mcumgr_handlers.rst
mcumgr_callbacks.rst
mcumgr_backporting.rst
smp_protocol.rst
smp_transport.rst
dfu.rst
ota.rst
ec_host_cmd.rst
SMP Groups
==========
.. toctree::
:maxdepth: 1
smp_groups/smp_group_0.rst
smp_groups/smp_group_1.rst
smp_groups/smp_group_2.rst
smp_groups/smp_group_3.rst
smp_groups/smp_group_8.rst
smp_groups/smp_group_9.rst
smp_groups/smp_group_63.rst
``` | /content/code_sandbox/doc/services/device_mgmt/index.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 193 |
```restructuredtext
.. _ec_host_cmd_backend_api:
EC Host Command
###############
Overview
********
The host command protocol defines the interface for a host, or application processor, to
communicate with a target embedded controller (EC). The EC Host command subsystem implements the
target side of the protocol, generating responses to commands sent by the host. The host command
protocol interface supports multiple versions, but this subsystem implementation only support
protocol version 3.
Architecture
************
The Host Command subsystem contains a few components:
* Backend
* General handler
* Command handler
The backend is a layer between a peripheral driver and the general handler. It is responsible for
sending and receiving commands via chosen peripheral.
The general handler validates data from the backend e.g. check sizes, checksum, etc. If the command
is valid and the user has provided a handler for a received command id, the command handler is
called.
.. image:: ec_host_cmd.png
:align: center
SHI (Serial Host Interface) is different to this because it is used only for communication with a
host. SHI does not have API itself, thus the backend and peripheral driver layers are combined into
one backend layer.
.. image:: ec_host_cmd_shi.png
:align: center
Another case is SPI. Unfortunately, the current SPI API can't be used to handle the host commands
communication. The main issues are unknown command size sent by the host (the SPI transaction
sends/receives specific number of bytes) and need to constant sending status byte (the SPI module
is enabled and disabled per transaction). It forces implementing the SPI driver within a backend,
as it is done for SHI. That means a SPI backend has to be implemented per chip family. However, it
can be changed in the future once the SPI API is extended to host command needs. Please check `the
discussion <path_to_url`_.
That approach requires configuring the SPI dts node in a special way. The main compatible string of
a SPI node has changed to use the Host Command version of a SPI driver. The rest of the properties
should be configured as usual. Example of the SPI node for STM32:
.. code-block:: devicetree
&spi1 {
/* Change the compatible string to use the Host Command version of the
* STM32 SPI driver
*/
compatible = "st,stm32-spi-host-cmd";
status = "okay";
dmas = <&dma2 3 3 0x38440 0x03>,
<&dma2 0 3 0x38480 0x03>;
dma-names = "tx", "rx";
/* This field is used to point at our CS pin */
cs-gpios = <&gpioa 4 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
};
The STM32 SPI host command backend driver supports the :dtcompatible:`st,stm32h7-spi` and
:dtcompatible:`st,stm32-spi-fifo` variant implementations. To enable these variants, append the
corresponding compatible string. For example, to enable FIFO support and support for the STM32H7
SoCs, modify the compatible string as shown.
.. code-block:: devicetree
&spi1 {
compatible = "st,stm32h7-spi", "st,stm32-spi-fifo", "st,stm32-spi-host-cmd";
...
};
The chip that runs Zephyr is a SPI slave and the `cs-gpios` property is used to point our CS pin.
For the SPI, it is required to set the backend chosen node ``zephyr,host-cmd-spi-backend``.
The supported backend and peripheral drivers:
* Simulator
* SHI - ITE and NPCX
* eSPI - any eSPI slave driver that support :kconfig:option:`CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD` and
:kconfig:option:`CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE`
* UART - any UART driver that supports the asynchronous API
* SPI - STM32
Initialization
**************
If the application configures one of the following backend chosen nodes and
:kconfig:option:`CONFIG_EC_HOST_CMD_INITIALIZE_AT_BOOT` is set, then the corresponding backend
initializes the host command subsystem by calling :c:func:`ec_host_cmd_init`:
* ``zephyr,host-cmd-espi-backend``
* ``zephyr,host-cmd-shi-backend``
* ``zephyr,host-cmd-uart-backend``
* ``zephyr,host-cmd-spi-backend``
If no backend chosen node is configured, the application must call the :c:func:`ec_host_cmd_init`
function directly. This way of initialization is useful if a backend is chosen in runtime
based on e.g. GPIO state.
Buffers
*******
The host command communication requires buffers for rx and tx. The buffers are be provided by the
general handler if :kconfig:option:`CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE` > 0 for rx buffer and
:kconfig:option:`CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_SIZE` > 0 for the tx buffer. The shared
buffers are useful for applications that use multiple backends. Defining separate buffers by every
backend would increase the memory usage. However, some buffers can be defined by a peripheral driver
e.g. eSPI. These ones should be reused as much as possible.
Logging
*******
The host command has an embedded logging system of the ongoing communication. The are a few logging
levels:
* `LOG_INF` is used to log a command id of a new command and not success responses. Repeats of the
same command are not logged
* `LOG_DBG` logs every command, even repeats
* `LOG_DBG` + :kconfig:option:`CONFIG_EC_HOST_CMD_LOG_DBG_BUFFERS` logs every command and responses
with the data buffers
API Reference
*************
.. doxygengroup:: ec_host_cmd_interface
``` | /content/code_sandbox/doc/services/device_mgmt/ec_host_cmd.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,272 |
```restructuredtext
.. _mcumgr_smp_transport_specification:
SMP Transport Specification
###########################
The documents specifies information needed for implementing server and client
side SMP transports.
.. _mcumgr_smp_transport_ble:
BLE (Bluetooth Low Energy)
**************************
MCUmgr Clients need to use following BLE Characteristics, when implementing
SMP client:
- **Service UUID**: `8D53DC1D-1DB7-4CD3-868B-8A527460AA84`
- **Characteristic UUID**: `DA2E7828-FBCE-4E01-AE9E-261174997C48`
All SMP communication utilizes a single GATT characteristic. An SMP request is
sent via a GATT Write Without Response command. An SMP response is sent in the form
of a GATT Notification
If an SMP request or response is too large to fit in a single GATT command, the
sender fragments it across several packets. No additional framing is
introduced when a request or response is fragmented; the payload is simply
split among several packets. Since GATT guarantees ordered delivery of
packets, the SMP header in the first fragment contains sufficient information
for reassembly.
.. _mcumgr_smp_transport_uart:
UART/serial and console
***********************
SMP protocol specification by MCUmgr subsystem of Zephyr uses basic framing
of data to allow multiplexing of UART channel. Multiplexing requires
prefixing each frame with two byte marker and terminating it with newline.
Currently MCUmgr imposes a 127 byte limit on frame size, although there
are no real protocol constraints that require that limit.
The limit includes the prefix and the newline character, so the allowed payload
size is actually 124 bytes.
Although no such transport exists in Zephyr, it is possible to implement
MCUmgr client/server over UART transport that does not have framing at all,
or uses hardware serial port control, or other means of framing.
Frame fragmenting
=================
SMP protocol over serial is fragmented into MTU size frames; each
frame consists of two byte start marker, body and terminating newline
character.
There are four types of types of frames: initial, partial, partial-final
and initial-final; each frame type differs by start marker and/or body
contents.
Frame formats
-------------
Initial frame requires to be followed by optional sequence of partial
frames and finally by partial-final frame.
Body is always Base64 encoded, so the body size, here described as
MTU - 3, is able to actually carry N = (MTU - 3) / 4 * 3 bytes
of raw data.
Body of initial frame is preceded by two byte total packet length,
encoded in Big Endian, and equals size of a raw body plus two bytes,
size of CRC16; this means that actual body size allowed into an
initial frame is N - 2.
If a body size is smaller than N - 4, than it is possible to carry
entire body with preceding length and following it CRC in a single
frame, here called initial-final; for the description of initial-final
frame look below.
Initial frame format:
.. table::
:align: center
+---------------+---------------+---------------------------+
| Content | Size | Description |
+===============+===============+===========================+
| 0x06 0x09 | 2 bytes | Frame start marker |
+---------------+---------------+---------------------------+
| <base64-i> | no more than | Base64 encoded body |
| | MTU - 3 bytes | |
+---------------+---------------+---------------------------+
| 0x0a | 1 byte | Frame termination |
+---------------+---------------+---------------------------+
``<base64-i>`` is Base64 encoded body of format:
.. table::
:align: center
+---------------+---------------+---------------------------+
| Content | Size | Description |
+===============+===============+===========================+
| total length | 2 bytes | Big endian 16-bit value |
| | | representing total length |
| | | of body + 2 bytes for |
| | | CRC16; note that size of |
| | | total length field is not |
| | | added to total length |
| | | value. |
+---------------+---------------+---------------------------+
| body | no more than | Raw body data fragment |
| | MTU - 5 | |
+---------------+---------------+---------------------------+
Initial-final frame format is similar to initial frame format,
but differs by ``<base64-i>`` definition.
``<base64-i>`` of initial-final frame, is Base64 encoded data taking
form:
.. table::
:align: center
+---------------+---------------+---------------------------+
| Content | Size | Description |
+===============+===============+===========================+
| total length | 2 bytes | Big endian 16-bit value |
| | | representing total length |
| | | of body + 2 bytes for |
| | | CRC16; note that size of |
| | | total length field is not |
| | | added to total length |
| | | value. |
+---------------+---------------+---------------------------+
| body | no more than | Raw body data fragment |
| | MTU - 7 | |
+---------------+---------------+---------------------------+
| crc16 | 2 bytes | CRC16 of entire packet |
| | | body, preceding length |
| | | not included. |
+---------------+---------------+---------------------------+
Partial frame is continuation after previous initial or other partial
frame. Partial frame takes form:
.. table::
:align: center
+---------------+---------------+---------------------------+
| Content | Size | Description |
+===============+===============+===========================+
| 0x04 0x14 | 2 bytes | Frame start marker |
+---------------+---------------+---------------------------+
| <base64-i> | no more than | Base64 encoded body |
| | MTU - 3 bytes | |
+---------------+---------------+---------------------------+
| 0x0a | 1 byte | Frame termination |
+---------------+---------------+---------------------------+
The ``<base64-i>`` of partial frame is Base64 encoding of data,
taking form:
.. table::
:align: center
+---------------+---------------+---------------------------+
| Content | Size | Description |
+===============+===============+===========================+
| body | no more than | Raw body data fragment |
| | MTU - 3 | |
+---------------+---------------+---------------------------+
The ``<base64-i>`` of partial-final frame is Base64 encoding of data,
taking form:
.. table::
:align: center
+---------------+---------------+---------------------------+
| Content | Size | Description |
+===============+===============+===========================+
| body | no more than | Raw body data fragment |
| | MTU - 3 | |
+---------------+---------------+---------------------------+
| crc16 | 2 bytes | CRC16 of entire packet |
| | | body, preceding length |
| | | not included. |
+---------------+---------------+---------------------------+
CRC Details
-----------
The CRC16 included in final type frames is calculated over only
raw data and does not include packet length.
CRC16 polynomial is 0x1021 and initial value is 0.
API Reference
*************
.. doxygengroup:: mcumgr_transport_smp
``` | /content/code_sandbox/doc/services/device_mgmt/smp_transport.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,724 |
```restructuredtext
.. _ota:
Over-the-Air Update
###################
Overview
********
Over-the-Air (OTA) Update is a method for delivering firmware updates to remote
devices using a network connection. Although the name implies a wireless
connection, updates received over a wired connection (such as Ethernet)
are still commonly referred to as OTA updates. This approach requires server
infrastructure to host the firmware binary and implement a method of signaling
when an update is available. Security is a concern with OTA updates; firmware
binaries should be cryptographically signed and verified before upgrading.
The :ref:`dfu` section discusses upgrading Zephyr firmware using MCUboot. The
same method can be used as part of OTA. The binary is first downloaded
into an unoccupied code partition, usually named ``slot1_partition``, then
upgraded using the :ref:`mcuboot` process.
Examples of OTA
***************
Golioth
=======
`Golioth`_ is an IoT management platform that includes OTA updates. Devices are
configured to observe your available firmware revisions on the Golioth Cloud.
When a new version is available, the device downloads and flashes the binary. In
this implementation, the connection between cloud and device is secured using
TLS/DTLS, and the signed firmware binary is confirmed by MCUboot before the
upgrade occurs.
1. A working sample can be found on the `Golioth Firmware SDK repository`_
2. The `Golioth OTA documentation`_ includes complete information about the
versioning process
Eclipse hawkBit |trade|
=======================
`Eclipse hawkBit`_ |trade| is an update server framework that uses polling on a
REST api to detect firmware updates. When a new update is detected, the binary
is downloaded and installed. MCUboot can be used to verify the signature before
upgrading the firmware.
There is a :zephyr:code-sample:`hawkbit-api` sample included in the
Zephyr :ref:`mgmt-samples` section.
UpdateHub
=========
`UpdateHub`_ is a platform for remotely updating embedded devices. Updates can
be manually triggered or monitored via polling. When a new update is detected,
the binary is downloaded and installed. MCUboot can be used to verify the
signature before upgrading the firmware.
There is an :zephyr:code-sample:`updatehub-fota` sample included in the Zephyr
:ref:`mgmt-samples` section.
SMP Server
==========
A Simple Management Protocol (SMP) server can be used to update firmware via
Bluetooth Low Energy (BLE) or UDP. :ref:`mcu_mgr` is used to send a signed
firmware binary to the remote device where it is verified by MCUboot before the
upgrade occurs.
There is an :zephyr:code-sample:`smp-svr` sample included in the Zephyr :ref:`mgmt-samples`
section.
Lightweight M2M (LWM2M)
=======================
The :ref:`lwm2m_interface` protocol includes support for firmware update via
:kconfig:option:`CONFIG_LWM2M_FIRMWARE_UPDATE_OBJ_SUPPORT`. Devices securely
connect to an LwM2M server using DTLS. A :zephyr:code-sample:`lwm2m-client` sample is
available but it does not demonstrate the firmware update feature.
.. _MCUboot bootloader: path_to_url
.. _Golioth: path_to_url
.. _Golioth Firmware SDK repository: path_to_url
.. _Golioth OTA documentation: path_to_url
.. _Eclipse hawkBit: path_to_url
.. _UpdateHub: path_to_url
``` | /content/code_sandbox/doc/services/device_mgmt/ota.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 772 |
```restructuredtext
.. _mcumgr_handlers:
MCUmgr handlers
###############
Overview
********
MCUmgr functions by having group handlers which identify a group of functions relating to a
specific management area, which is addressed with a 16-bit identification value,
:c:enum:`mcumgr_group_t` contains the management groups available in Zephyr with their
corresponding group ID values. The group ID is included in SMP headers to identify which
group a command belongs to, there is also an 8-bit command ID which identifies the function of
that group to execute - see :ref:`mcumgr_smp_protocol_specification` for details on the SMP
protocol and header. There can only be one registered group per unique ID.
Implementation
**************
MCUmgr handlers can be added externally by application code or by module code, they do not have
to reside in the upstream Zephyr tree to be usable. The first step to creating a handler is to
create the folder structure for it, the typical Zephyr MCUmgr group layout is as follows:
.. code-block:: none
<dir>/grp/<grp_name>_mgmt/
CMakeLists.txt
Kconfig
include
<grp_name>_mgmt.h
<grp_name>_mgmt_callbacks.h
src
<grp_name>_mgmt.c
Note that the header files in upstream Zephyr MCUmgr handlers reside in the
``zephyr/include/zephyr/mgmt/mcumgr/grp/<grp_name>_mgmt`` directory to allow the files to be
globally included by applications.
Initial header <grp_name>_mgmt.h
================================
The purpose of the header file is to provide defines which can be used by the MCUmgr handler
itself and application code, e.g. to reference the command IDs for executing functions. An example
file would look similar to:
.. literalinclude:: ../../../tests/subsys/mgmt/mcumgr/handler_demo/example_as_module/include/example_mgmt.h
:language: c
:linenos:
This provides the defines for 2 command ``test`` and ``other`` and sets up the SMP version 2 error
responses (which have unique error codes per group as opposed to the legacy SMP version 1 error
responses that return a :c:enum:`mcumgr_err_t` - there should always be an OK error code with the
value 0 and an unknown error code with the value 1. The above example then adds an error code of
``not wanted`` with value 2. In addition, the group ID is set to be
:c:enumerator:`MGMT_GROUP_ID_PERUSER`, which is the start group ID for user-defined groups, note
that group IDs need to be unique so other custom groups should use different values, a central index
header file (as upstream Zephyr has) can be used to distribute group IDs more easily.
Initial header <grp_name>_mgmt_callbacks.h
==========================================
The purpose of the header file is to provide defines which can be used by the MCUmgr handler
itself and application code, e.g. to reference the command IDs for executing functions. An example
file would look similar to:
.. literalinclude:: ../../../tests/subsys/mgmt/mcumgr/handler_demo/example_as_module/include/example_mgmt_callbacks.h
:language: c
:linenos:
This sets up a single event which application (or module) code can register for to receive a
callback when the function handler is executed, which allows the flow of the handler to be
changed (i.e. to return an error instead of continuing). The event group ID is set to
:c:enumerator:`MGMT_EVT_GRP_USER_CUSTOM_START`, which is the start event ID for user-defined groups,
note that event IDs need to be unique so other custom groups should use different values, a
central index header file (as upstream Zephyr has) can be used to distribute event IDs more
easily.
Initial source <grp_name>_mgmt.c
================================
The purpose of this source file is to handle the incoming MCUmgr commands, provide responses, and
register the transport with MCUmgr so that commands will be sent to it. An example file would
look similar to:
.. literalinclude:: ../../../tests/subsys/mgmt/mcumgr/handler_demo/example_as_module/src/example_mgmt.c
:language: c
:linenos:
The above code creates 2 function handlers, ``test`` which supports read requests and takes 2
required parameters, and ``other`` which supports write requests and takes 1 optional parameter,
this function handler has an optional notification callback feature that allows other parts of
the code to listen for the event and take any required actions that are necessary or prevent
further execution of the function by returning an error, further details on MCUmgr callback
functionality can be found on :ref:`mcumgr_callbacks`.
Note that other code referencing callbacks for custom MCUmgr handlers needs to include both the
base Zephyr callback include file and the custom handler callback file, only in-tree Zephyr
handler headers are included when including the upstream Zephyr callback header file.
Initial Kconfig
===============
The purpose of the Kconfig file is to provide options which users can enable or change relating
to the functionality of the handler being implemented. An example file would look similar to:
.. literalinclude:: ../../../tests/subsys/mgmt/mcumgr/handler_demo/Kconfig
:language: kconfig
Initial CMakeLists.txt
======================
The CMakeLists.txt file is used by the build system to setup files to compile, include
directories to add and specify options that can be changed. A basic file only need to include the
source files if the Kconfig options are enabled. An example file would look similar to:
.. tabs::
.. group-tab:: Zephyr module
.. literalinclude:: ../../../tests/subsys/mgmt/mcumgr/handler_demo/example_as_module/CMakeLists.txt
:language: cmake
.. group-tab:: Application
.. literalinclude:: ../../../tests/subsys/mgmt/mcumgr/handler_demo/CMakeLists.txt
:language: cmake
:start-after: Include handler files
Including from application
**************************
Application-specific MCUmgr handlers can be added by creating/editing application build files.
Example modifications are shown below.
Example CMakeLists.txt
======================
The application ``CMakeLists.txt`` file can load the CMake file for the example MCUmgr handler by
adding the following:
.. code-block:: cmake
add_subdirectory(mcumgr/grp/<grp_name>)
Example Kconfig
===============
The application Kconfig file can include the Kconfig file for the example MCUmgr handler by adding
the following to the ``Kconfig`` file in the application directory (or creating it if it does not
exist):
.. code-block:: kconfig
rsource "mcumgr/grp/<grp_name>/Kconfig"
# Include Zephyr's Kconfig
source "Kconfig.zephyr"
Including from Zephyr Module
****************************
Zephyr :ref:`modules` can be used to add custom MCUmgr handlers to multiple different applications
without needing to duplicate the code in each application's source tree, see :ref:`module-yml` for
details on how to set up the module files. Example files are shown below.
Example zephyr/module.yml
=========================
This is an example file which can be used to load the Kconfig and CMake files from the root of the
module directory, and would be placed at ``zephyr/module.yml``:
.. code-block:: yaml
build:
kconfig: Kconfig
cmake: .
Example CMakeLists.txt
======================
This is an example CMakeLists.txt file which loads the CMake file for the example MCUmgr handler,
and would be placed at ``CMakeLists.txt``:
.. code-block:: cmake
add_subdirectory(mcumgr/grp/<grp_name>)
Example Kconfig
===============
This is an example Kconfig file which loads the Kconfig file for the example MCUmgr handler, and
would be placed at ``Kconfig``:
.. code-block:: kconfig
rsource "mcumgr/grp/<grp_name>/Kconfig"
Demonstration handler
*********************
There is a demonstration project which includes configuration for both application and zephyr
module-MCUmgr handlers which can be used as a basis for created your own in
:zephyr_file:`tests/subsys/mgmt/mcumgr/handler_demo/`.
``` | /content/code_sandbox/doc/services/device_mgmt/mcumgr_handlers.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,801 |
```restructuredtext
.. _mcumgr_smp_protocol_specification:
SMP Protocol Specification
##########################
This is description of Simple Management Protocol, SMP, that is used by
MCUmgr to pass requests to devices and receive responses from them.
SMP is an application layer protocol. The underlying transport layer is not
in scope of this documentation.
.. note::
SMP in this context refers to SMP for MCUmgr (Simple Management Protocol),
it is unrelated to SMP in Bluetooth (Security Manager Protocol), but there
is an MCUmgr SMP transport for Bluetooth.
Frame: The envelope
*******************
Each frame consists of a header and data. The ``Data Length`` field in the
header may be used for reassembly purposes if underlying transport layer supports
fragmentation.
Frames are encoded in "Big Endian" (Network endianness) when fields are more than
one byte long, and takes the following form:
.. _mcumgr_smp_protocol_frame:
.. table::
:align: center
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|3 |2 |1 |0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|7|6|5|4|3|2|1|0|7|6|5|4|3|2|1|0|7|6|5|4|3|2|1|0|7|6|5|4|3|2|1|0|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Res |Ver| OP | Flags | Data Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Group ID | Sequence Num | Command ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Data |
| ... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
.. note::
The original specification states that SMP should support receiving
both the "Little-endian" and "Big-endian" frames but in reality the
MCUmgr library is hardcoded to always treat "Network" side as
"Big-endian".
Data is optional and is not present when ``Data Length`` is zero.
The encoding of data depends on the target of group/ID.
A description of the various fields and their meaning:
.. table::
:align: center
+-------------------+---------------------------------------------------+
| Field | Description |
+===================+===================================================+
| ``Res`` | This is reserved, not-used field and must be |
| | always set to 0. |
+-------------------+---------------------------------------------------+
| ``Ver`` (Version) | This indicates the version of the protocol being |
| | used, this should be set to 0b01 to use the newer |
| | SMP transport where error codes are more detailed |
| | and returned in the map, otherwise left as 0b00 |
| | to use the legacy SMP protocol. Versions 0b10 and |
| | 0b11 are reserved for future use and should not |
| | be used. |
+-------------------+---------------------------------------------------+
| ``OP`` | :c:enum:`mcumgr_op_t`, determines whether |
| | information is written to a device or requested |
| | from it and whether a packet contains request to |
| | an SMP server or response from it. |
+-------------------+---------------------------------------------------+
| ``Flags`` | Reserved for flags; there are no flags defined |
| | yet, the field should be set to 0 |
+-------------------+---------------------------------------------------+
| ``Data Length`` | Length of the ``Data`` field |
+-------------------+---------------------------------------------------+
| ``Group ID`` | :c:enum:`mcumgr_group_t`, see |
| | :ref:`mcumgr_smp_protocol_group_ids` for further |
| | details. |
+-------------------+---------------------------------------------------+
| ``Sequence Num`` | This is a frame sequence number. |
| | The number is increased by one with each request |
| | frame. |
| | The Sequence Num of a response should match |
| | the one in the request. |
+-------------------+---------------------------------------------------+
| ``Command ID`` | This is a command, within ``Group``. |
+-------------------+---------------------------------------------------+
| ``Data`` | This is data payload of the ``Data Length`` |
| | size. It is optional as ``Data Length`` may be |
| | set to zero, which means that no data follows |
| | the header. |
+-------------------+---------------------------------------------------+
.. note::
Contents of ``Data`` depends on a value of an ``OP``, a ``Group ID``,
and a ``Command ID``.
.. _mcumgr_smp_protocol_group_ids:
Management ``Group ID``'s
=========================
The SMP protocol supports predefined common groups and allows user defined
groups. The following table presents a list of common groups:
.. table::
:align: center
+---------------+-----------------------------------------------+
| Decimal ID | Group description |
+===============+===============================================+
| ``0`` | :ref:`mcumgr_smp_group_0` |
+---------------+-----------------------------------------------+
| ``1`` | :ref:`mcumgr_smp_group_1` |
+---------------+-----------------------------------------------+
| ``2`` | :ref:`mcumgr_smp_group_2` |
+---------------+-----------------------------------------------+
| ``3`` | :ref:`mcumgr_smp_group_3` |
+---------------+-----------------------------------------------+
| ``4`` | Application/system log management |
| | (currently not used by Zephyr) |
+---------------+-----------------------------------------------+
| ``5`` | Run-time tests |
| | (unused by Zephyr) |
+---------------+-----------------------------------------------+
| ``6`` | Split image management |
| | (unused by Zephyr) |
+---------------+-----------------------------------------------+
| ``7`` | Test crashing application |
| | (unused by Zephyr) |
+---------------+-----------------------------------------------+
| ``8`` | :ref:`mcumgr_smp_group_8` |
+---------------+-----------------------------------------------+
| ``9`` | :ref:`mcumgr_smp_group_9` |
+---------------+-----------------------------------------------+
| ``63`` | :ref:`mcumgr_smp_group_63` |
+---------------+-----------------------------------------------+
| ``64`` | This is the base group for defining |
| | an application specific management groups. |
+---------------+-----------------------------------------------+
The payload for above groups, except for user groups (``64`` and above) is
always CBOR encoded. The group ``64``, and above can define their own scheme
for data communication.
Minimal response
****************
Regardless of a command issued, as long as there is SMP client on the
other side of a request, a response should be issued containing the header
followed by CBOR map container.
Lack of response is only allowed when there is no SMP service or device is
non-responsive.
Minimal response SMP data
=========================
Minimal response is:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
Note that in the case of a successful command, an empty map will be returned (``rc``/``err`` is
only returned if there is an error condition, therefore if only an empty map is returned or a
response lacks these, the request can be considered as being successful. For SMP version 2,
errors relating to SMP itself that are not group specific will still be returned as ``rc``
errors, SMP version 2 clients must therefore be able to handle both types of errors.
Specifications of management groups supported by Zephyr
*******************************************************
.. toctree::
:maxdepth: 1
smp_groups/smp_group_0.rst
smp_groups/smp_group_1.rst
smp_groups/smp_group_2.rst
smp_groups/smp_group_3.rst
smp_groups/smp_group_8.rst
smp_groups/smp_group_9.rst
``` | /content/code_sandbox/doc/services/device_mgmt/smp_protocol.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,130 |
```restructuredtext
.. _mcumgr_callbacks:
MCUmgr Callbacks
################
Overview
********
MCUmgr has a customisable callback/notification system that allows application
(and module) code to receive callbacks for MCUmgr events that they are
interested in and react to them or return a status code to the calling function
that provides control over if the action should be allowed or not. An example
of this is with the fs_mgmt group, whereby file access can be gated, the
callback allows the application to inspect the request path and allow or deny
access to said file, or it can rewrite the provided path to a different path
for transparent file redirection support.
Implementation
**************
Enabling
========
The base callback/notification system can be enabled using
:kconfig:option:`CONFIG_MCUMGR_MGMT_NOTIFICATION_HOOKS` which will compile the
registration and notification system into the code. This will not provide any
callbacks by default as the callbacks that are supported by a build must also
be selected by enabling the Kconfig's for the required callbacks (see
:ref:`mcumgr_cb_events` for further details). A callback function with the
:c:type:`mgmt_cb` type definition can then be declared and registered by
calling :c:func:`mgmt_callback_register` for the desired event inside of a
:c:struct`mgmt_callback` structure. Handlers are called in the order that they
were registered.
With the system enabled, a basic handler can be set up and defined in
application code as per:
.. code-block:: c
#include <zephyr/kernel.h>
#include <zephyr/mgmt/mcumgr/mgmt/mgmt.h>
#include <zephyr/mgmt/mcumgr/mgmt/callbacks.h>
struct mgmt_callback my_callback;
enum mgmt_cb_return my_function(uint32_t event, enum mgmt_cb_return prev_status,
int32_t *rc, uint16_t *group, bool *abort_more,
void *data, size_t data_size)
{
if (event == MGMT_EVT_OP_CMD_DONE) {
/* This is the event we registered for */
}
/* Return OK status code to continue with acceptance to underlying handler */
return MGMT_CB_OK;
}
int main()
{
my_callback.callback = my_function;
my_callback.event_id = MGMT_EVT_OP_CMD_DONE;
mgmt_callback_register(&my_callback);
}
This code registers a handler for the :c:enumerator:`MGMT_EVT_OP_CMD_DONE`
event, which will be called after a MCUmgr command has been processed and
output generated, note that this requires that
:kconfig:option:`CONFIG_MCUMGR_SMP_COMMAND_STATUS_HOOKS` be enabled to receive
this callback.
Multiple callbacks can be setup to use a single function as a common callback,
and many different functions can be used for each event by registering each
group once, or all notifications for a whole group can be enabled by using one
of the ``MGMT_EVT_OP_*_ALL`` events, alternatively a handler can setup for
every notification by using :c:enumerator:`MGMT_EVT_OP_ALL`. When setting up
handlers, events can be combined that are in the same group only, for example
5 img_mgmt callbacks can be setup with a single registration call, but to also
setup a callback for an os_mgmt callback, this must be done as a separate
registration. Group IDs are numerical increments, event IDs are bitmask values,
hence the restriction.
As an example, the following registration is allowed, which will register for 3
SMP events with a single callback function in a single registration:
.. code-block:: c
my_callback.callback = my_function;
my_callback.event_id = (MGMT_EVT_OP_CMD_RECV |
MGMT_EVT_OP_CMD_STATUS |
MGMT_EVT_OP_CMD_DONE);
mgmt_callback_register(&my_callback);
The following code is not allowed, and will cause undefined operation, because
it mixes the IMG management group with the OS management group whereby the
group is **not** a bitmask value, only the event is:
.. code-block:: c
my_callback.callback = my_function;
my_callback.event_id = (MGMT_EVT_OP_IMG_MGMT_DFU_STARTED |
MGMT_EVT_OP_OS_MGMT_RESET);
mgmt_callback_register(&my_callback);
.. _mcumgr_cb_events:
Events
======
Events can be selected by enabling their corresponding Kconfig option:
- :kconfig:option:`CONFIG_MCUMGR_SMP_COMMAND_STATUS_HOOKS`
MCUmgr command status (:c:enumerator:`MGMT_EVT_OP_CMD_RECV`,
:c:enumerator:`MGMT_EVT_OP_CMD_STATUS`,
:c:enumerator:`MGMT_EVT_OP_CMD_DONE`)
- :kconfig:option:`CONFIG_MCUMGR_GRP_FS_FILE_ACCESS_HOOK`
fs_mgmt file access (:c:enumerator:`MGMT_EVT_OP_FS_MGMT_FILE_ACCESS`)
- :kconfig:option:`CONFIG_MCUMGR_GRP_IMG_UPLOAD_CHECK_HOOK`
img_mgmt upload check (:c:enumerator:`MGMT_EVT_OP_IMG_MGMT_DFU_CHUNK`)
- :kconfig:option:`CONFIG_MCUMGR_GRP_IMG_STATUS_HOOKS`
img_mgmt upload status (:c:enumerator:`MGMT_EVT_OP_IMG_MGMT_DFU_STOPPED`,
:c:enumerator:`MGMT_EVT_OP_IMG_MGMT_DFU_STARTED`,
:c:enumerator:`MGMT_EVT_OP_IMG_MGMT_DFU_PENDING`,
:c:enumerator:`MGMT_EVT_OP_IMG_MGMT_DFU_CONFIRMED`)
- :kconfig:option:`CONFIG_MCUMGR_GRP_OS_RESET_HOOK`
os_mgmt reset check (:c:enumerator:`MGMT_EVT_OP_OS_MGMT_RESET`)
- :kconfig:option:`CONFIG_MCUMGR_GRP_SETTINGS_ACCESS_HOOK`
settings_mgmt access (:c:enumerator:`MGMT_EVT_OP_SETTINGS_MGMT_ACCESS`)
Actions
=======
Some callbacks expect a return status to either allow or disallow an operation,
an example is the fs_mgmt access hook which allows for access to files to be
allowed or denied. With these handlers, the first non-OK error code returned
by a handler will be returned to the MCUmgr client.
An example of selectively denying file access:
.. code-block:: c
#include <zephyr/kernel.h>
#include <zephyr/mgmt/mcumgr/mgmt/mgmt.h>
#include <zephyr/mgmt/mcumgr/mgmt/callbacks.h>
#include <string.h>
struct mgmt_callback my_callback;
enum mgmt_cb_return my_function(uint32_t event, enum mgmt_cb_return prev_status,
int32_t *rc, uint16_t *group, bool *abort_more,
void *data, size_t data_size)
{
/* Only run this handler if a previous handler has not failed */
if (event == MGMT_EVT_OP_FS_MGMT_FILE_ACCESS && prev_status == MGMT_CB_OK) {
struct fs_mgmt_file_access *fs_data = (struct fs_mgmt_file_access *)data;
/* Check if this is an upload and deny access if it is, otherwise check
* the path and deny if is matches a name
*/
if (fs_data->access == FS_MGMT_FILE_ACCESS_WRITE) {
/* Return an access denied error code to the client and abort calling
* further handlers
*/
*abort_more = true;
*rc = MGMT_ERR_EACCESSDENIED;
return MGMT_CB_ERROR_RC;
} else if (strcmp(fs_data->filename, "/lfs1/false_deny.txt") == 0) {
/* Return a no entry error code to the client, call additional handlers
* (which will have failed set to true)
*/
*rc = MGMT_ERR_ENOENT;
return MGMT_CB_ERROR_RC;
}
}
/* Return OK status code to continue with acceptance to underlying handler */
return MGMT_CB_OK;
}
int main()
{
my_callback.callback = my_function;
my_callback.event_id = MGMT_EVT_OP_FS_MGMT_FILE_ACCESS;
mgmt_callback_register(&my_callback);
}
This code registers a handler for the
:c:enumerator:`MGMT_EVT_OP_FS_MGMT_FILE_ACCESS` event, which will be called
after a fs_mgmt file read/write command has been received to check if access to
the file should be allowed or not, note that this requires that
:kconfig:option:`CONFIG_MCUMGR_GRP_FS_FILE_ACCESS_HOOK` be enabled to receive
this callback.
Two types of errors can be returned, the ``rc`` parameter can be set to an
:c:enum:`mcumgr_err_t` error code and :c:enumerator:`MGMT_CB_ERROR_RC`
can be returned, or a group error code (introduced with version 2 of the MCUmgr
protocol) can be set by setting the ``group`` value to the group and ``rc``
value to the group error code and returning :c:enumerator:`MGMT_CB_ERROR_ERR`.
MCUmgr Command Callback Usage/Adding New Event Types
====================================================
To add a callback to a MCUmgr command, :c:func:`mgmt_callback_notify` can be
called with the event ID and, optionally, a data struct to pass to the callback
(which can be modified by handlers). If no data needs to be passed back,
``NULL`` can be used instead, and size of the data set to 0.
An example MCUmgr command handler:
.. code-block:: c
#include <zephyr/kernel.h>
#include <zcbor_common.h>
#include <zcbor_encode.h>
#include <zephyr/mgmt/mcumgr/smp/smp.h>
#include <zephyr/mgmt/mcumgr/mgmt/mgmt.h>
#include <zephyr/mgmt/mcumgr/mgmt/callbacks.h>
#define MGMT_EVT_GRP_USER_ONE MGMT_EVT_GRP_USER_CUSTOM_START
enum user_one_group_events {
/** Callback on first post, data is test_struct. */
MGMT_EVT_OP_USER_ONE_FIRST = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_USER_ONE, 0),
/** Callback on second post, data is test_struct. */
MGMT_EVT_OP_USER_ONE_SECOND = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_USER_ONE, 1),
/** Used to enable all user_one events. */
MGMT_EVT_OP_USER_ONE_ALL = MGMT_DEF_EVT_OP_ALL(MGMT_EVT_GRP_USER_ONE),
};
struct test_struct {
uint8_t some_value;
};
static int test_command(struct mgmt_ctxt *ctxt)
{
int rc;
int err_rc;
uint16_t err_group;
zcbor_state_t *zse = ctxt->cnbe->zs;
bool ok;
struct test_struct test_data = {
.some_value = 8,
};
rc = mgmt_callback_notify(MGMT_EVT_OP_USER_ONE_FIRST, &test_data,
sizeof(test_data), &err_rc, &err_group);
if (rc != MGMT_CB_OK) {
/* A handler returned a failure code */
if (rc == MGMT_CB_ERROR_RC) {
/* The failure code is the RC value */
return err_rc;
}
/* The failure is a group and ID error value */
ok = smp_add_cmd_err(zse, err_group, (uint16_t)err_rc);
goto end;
}
/* All handlers returned success codes */
ok = zcbor_tstr_put_lit(zse, "output_value") &&
zcbor_int32_put(zse, 1234);
end:
rc = (ok ? MGMT_ERR_EOK : MGMT_ERR_EMSGSIZE);
return rc;
}
If no response is required for the callback, the function call be called and
casted to void.
.. _mcumgr_cb_migration:
Migration
*********
If there is existing code using the previous callback system(s) in Zephyr 3.2
or earlier, then it will need to be migrated to the new system. To migrate
code, the following callback registration functions will need to be migrated
to register for callbacks using :c:func:`mgmt_callback_register` (note that
:kconfig:option:`CONFIG_MCUMGR_MGMT_NOTIFICATION_HOOKS` will need to be set to
enable the new notification system in addition to any migrations):
* mgmt_evt
Using :c:enumerator:`MGMT_EVT_OP_CMD_RECV`,
:c:enumerator:`MGMT_EVT_OP_CMD_STATUS`, or
:c:enumerator:`MGMT_EVT_OP_CMD_DONE` as drop-in replacements for events of
the same name, where the provided data is :c:struct:`mgmt_evt_op_cmd_arg`.
:kconfig:option:`CONFIG_MCUMGR_SMP_COMMAND_STATUS_HOOKS` needs to be set.
* fs_mgmt_register_evt_cb
Using :c:enumerator:`MGMT_EVT_OP_FS_MGMT_FILE_ACCESS` where the provided
data is :c:struct:`fs_mgmt_file_access`. Instead of returning true to allow
the action or false to deny, a MCUmgr result code needs to be returned,
:c:enumerator:`MGMT_ERR_EOK` will allow the action, any other return code
will disallow it and return that code to the client
(:c:enumerator:`MGMT_ERR_EACCESSDENIED` can be used for an access denied
error). :kconfig:option:`CONFIG_MCUMGR_GRP_IMG_STATUS_HOOKS` needs to be
set.
* img_mgmt_register_callbacks
Using :c:enumerator:`MGMT_EVT_OP_IMG_MGMT_DFU_STARTED` if
``dfu_started_cb`` was used,
:c:enumerator:`MGMT_EVT_OP_IMG_MGMT_DFU_STOPPED` if ``dfu_stopped_cb`` was
used, :c:enumerator:`MGMT_EVT_OP_IMG_MGMT_DFU_PENDING` if
``dfu_pending_cb`` was used or
:c:enumerator:`MGMT_EVT_OP_IMG_MGMT_DFU_CONFIRMED` if ``dfu_confirmed_cb``
was used. These callbacks do not have any return status.
:kconfig:option:`CONFIG_MCUMGR_GRP_IMG_STATUS_HOOKS` needs to be set.
* img_mgmt_set_upload_cb
Using :c:enumerator:`MGMT_EVT_OP_IMG_MGMT_DFU_CHUNK` where the provided
data is :c:struct:`img_mgmt_upload_check`. Instead of returning true to
allow the action or false to deny, a MCUmgr result code needs to be
returned, :c:enumerator:`MGMT_ERR_EOK` will allow the action, any other
return code will disallow it and return that code to the client
(:c:enumerator:`MGMT_ERR_EACCESSDENIED` can be used for an access denied
error). :kconfig:option:`CONFIG_MCUMGR_GRP_IMG_UPLOAD_CHECK_HOOK` needs to
be set.
* os_mgmt_register_reset_evt_cb
Using :c:enumerator:`MGMT_EVT_OP_OS_MGMT_RESET`. Instead of returning
true to allow the action or false to deny, a MCUmgr result code needs to be
returned, :c:enumerator:`MGMT_ERR_EOK` will allow the action, any other
return code will disallow it and return that code to the client
(:c:enumerator:`MGMT_ERR_EACCESSDENIED` can be used for an access denied
error). :kconfig:option:`CONFIG_MCUMGR_SMP_COMMAND_STATUS_HOOKS` needs to
be set.
API Reference
*************
.. doxygengroup:: mcumgr_callback_api
``` | /content/code_sandbox/doc/services/device_mgmt/mcumgr_callbacks.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,350 |
```restructuredtext
.. _mcumgr_smp_group_2:
Statistics management
#####################
Statistics management allows to obtain data gathered by Statistics subsystem
of Zephyr, enabled with :kconfig:option:`CONFIG_STATS`.
Statistics management group defines commands:
.. table::
:align: center
+-------------------+-----------------------------------------------+
| ``Command ID`` | Command description |
+===================+===============================================+
| ``0`` | Group data |
+-------------------+-----------------------------------------------+
| ``1`` | List groups |
+-------------------+-----------------------------------------------+
Statistics: group data
**********************
The command is used to obtain data for group specified by a name.
The name is one of group names as registered, with
:c:macro:`STATS_INIT_AND_REG` macro or :c:func:`stats_init_and_reg` function
call, within module that gathers the statistics.
Statistics: group data request
==============================
Statistics group data request header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``2`` | ``0`` |
+--------+--------------+----------------+
CBOR data of request:
.. code-block:: none
{
(str)"name" : (str)
}
where:
.. table::
:align: center
+-----------------------+---------------------------------------------------+
| "name" | group name. |
+-----------------------+---------------------------------------------------+
Statistics: group data response
===============================
Statistics group data response header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``2`` | ``0`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"name" : (str)
(str)"fields" : {
(str)<entry_name> : (uint)
...
}
}
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "name" | this is name of group the response contains data for. |
+------------------+your_sha256_hash---------+
| "fields" | this is map of entries within groups that consists of pairs where the |
| | entry name is mapped to value it represents in statistics. |
+------------------+your_sha256_hash---------+
| <entry_name> | single entry to value mapping; value is hardcoded to unsigned integer |
| | type, in a CBOR meaning. |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
Statistics: list of groups
**************************
The command is used to obtain list of groups of statistics that are gathered
on a device. This is a list of names as given to groups with
:c:macro:`STATS_INIT_AND_REG` macro or :c:func:`stats_init_and_reg` function
calls, within module that gathers the statistics; this means that this command
may be considered optional as it is known during compilation what groups will
be included into build and listing them is not needed prior to issuing a query.
Statistics: list of groups request
==================================
Statistics group list request header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``2`` | ``1`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data.
Statistics: list of groups response
===================================
Statistics group list request header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``2`` | ``1`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"stat_list" : [
(str)<stat_group_name>, ...
]
}
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "stat_list" | array of strings representing group names; this array may be empty if |
| | there are no groups. |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
``` | /content/code_sandbox/doc/services/device_mgmt/smp_groups/smp_group_2.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,580 |
```restructuredtext
.. _mcumgr_smp_group_8:
File management
###############
The file management group provides commands that allow to upload and download files
to/from a device.
File management group defines following commands:
.. table::
:align: center
+-------------------+-----------------------------------------------+
| ``Command ID`` | Command description |
+===================+===============================================+
| ``0`` | File download/upload |
+-------------------+-----------------------------------------------+
| ``1`` | File status |
+-------------------+-----------------------------------------------+
| ``2`` | File hash/checksum |
+-------------------+-----------------------------------------------+
| ``3`` | Supported file hash/checksum types |
+-------------------+-----------------------------------------------+
| ``4`` | File close |
+-------------------+-----------------------------------------------+
File download
*************
Command allows to download contents of an existing file from specified path
of a target device. Client applications must keep track of data they have
already downloaded and where their position in the file is (MCUmgr will cache
these also), and issue subsequent requests, with modified offset, to gather
the entire file.
Request does not carry size of requested chunk, the size is specified
by application itself.
Note that file handles will remain open for consecutive requests (as long as
an idle timeout has not been reached and another transport does not make use
of uploading/downloading files using fs_mgmt), but files are not exclusively
owned by MCUmgr, for the time of download session, and may change between
requests or even be removed.
.. note::
By default, all file upload/download requests are unconditionally allowed.
However, if the Kconfig option
:kconfig:option:`CONFIG_MCUMGR_GRP_FS_FILE_ACCESS_HOOK` is enabled, then an
application can register a callback handler for
:c:enumerator:`MGMT_EVT_OP_FS_MGMT_FILE_ACCESS` (see
:ref:`MCUmgr callbacks <mcumgr_callbacks>`), which allows for allowing or
declining access to reading/writing a particular file, or for rewriting the
path supplied by the client.
File download request
=====================
File download request header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``8`` | ``0`` |
+--------+--------------+----------------+
CBOR data of request:
.. code-block:: none
{
(str)"off" : (uint)
(str)"name" : (str)
}
where:
.. table::
:align: center
+-----------------------+---------------------------------------------------+
| "off" | offset to start download at |
+-----------------------+---------------------------------------------------+
| "name" | absolute path to a file |
+-----------------------+---------------------------------------------------+
File download response
======================
File download response header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``8`` | ``0`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"off" : (uint)
(str)"data" : (byte str)
(str,opt)"len" : (uint)
}
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "off" | offset the response is for. |
+------------------+your_sha256_hash---------+
| "data" | chunk of data read from file; it is CBOR encoded stream of bytes with |
| | embedded size; "data" appears only in responses where "rc" is 0. |
+------------------+your_sha256_hash---------+
| "len" | length of file, this field is only mandatory when "off" is 0. |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
File upload
***********
Allows to upload a file to a specified location. Command will automatically overwrite
existing file or create a new one if it does not exist at specified path.
The protocol supports stateless upload where each requests carries different chunk
of a file and it is client side responsibility to track progress of upload.
Note that file handles will remain open for consecutive requests (as long as
an idle timeout has not been reached, but files are not exclusively owned by
MCUmgr, for the time of download session, and may change between requests or
even be removed. Note that file handles will remain open for consecutive
requests (as long as an idle timeout has not been reached and another transport
does not make use of uploading/downloading files using fs_mgmt), but files are
not exclusively owned by MCUmgr, for the time of download session, and may
change between requests or even be removed.
.. note::
Weirdly, the current Zephyr implementation is half-stateless as is able to hold
single upload context, holding information on ongoing upload, that consists
of bool flag indicating in-progress upload, last successfully uploaded offset
and total length only.
.. note::
By default, all file upload/download requests are unconditionally allowed.
However, if the Kconfig option
:kconfig:option:`CONFIG_MCUMGR_GRP_FS_FILE_ACCESS_HOOK` is enabled, then an
application can register a callback handler for
:c:enumerator:`MGMT_EVT_OP_FS_MGMT_FILE_ACCESS` (see
:ref:`MCUmgr callbacks <mcumgr_callbacks>`), which allows for allowing or
declining access to reading/writing a particular file, or for rewriting the
path supplied by the client.
File upload request
===================
File upload request header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``2`` | ``8`` | ``0`` |
+--------+--------------+----------------+
CBOR data of request:
.. code-block:: none
{
(str)"off" : (uint)
(str)"data" : (str)
(str)"name" : (str)
(str,opt)"len" : (uint)
}
where:
.. table::
:align: center
+-----------------------+---------------------------------------------------+
| "off" | offset to start/continue upload at. |
+-----------------------+---------------------------------------------------+
| "data" | chunk of data to write to the file; |
| | it is CBOR encoded with length embedded. |
+-----------------------+---------------------------------------------------+
| "name" | absolute path to a file. |
+-----------------------+---------------------------------------------------+
| "len" | length of file, this field is only mandatory |
| | when "off" is 0. |
+-----------------------+---------------------------------------------------+
File upload response
====================
File upload response header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``3`` | ``8`` | ``0`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"off" : (uint)
}
In case of error the CBOR data takes the form:
.. .. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "off" | offset of last successfully written data. |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
File status
***********
Command allows to retrieve status of an existing file from specified path
of a target device.
File status request
===================
File status request header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``8`` | ``1`` |
+--------+--------------+----------------+
CBOR data of request:
.. code-block:: none
{
(str)"name" : (str)
}
where:
.. table::
:align: center
+-----------------------+---------------------------------------------------+
| "name" | absolute path to a file. |
+-----------------------+---------------------------------------------------+
File status response
====================
File status response header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``8`` | ``1`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"len" : (uint)
}
In case of error the CBOR data takes form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "len" | length of file (in bytes). |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
File hash/checksum
******************
Command allows to generate a hash/checksum of an existing file at a specified
path on a target device. Note that kernel heap memory is required for buffers to
be allocated for this to function, and large stack memory buffers are required
for generation of the output hash/checksum.
Requires :kconfig:option:`CONFIG_MCUMGR_GRP_FS_CHECKSUM_HASH` to be enabled for
the base functionality, supported hash/checksum are opt-in with
:kconfig:option:`CONFIG_MCUMGR_GRP_FS_CHECKSUM_IEEE_CRC32` or
:kconfig:option:`CONFIG_MCUMGR_GRP_FS_HASH_SHA256`.
File hash/checksum request
==========================
File hash/checksum request header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``8`` | ``2`` |
+--------+--------------+----------------+
CBOR data of request:
.. code-block:: none
{
(str)"name" : (str)
(str,opt)"type" : (str)
(str,opt)"off" : (uint)
(str,opt)"len" : (uint)
}
where:
.. table::
:align: center
+-----------------------+---------------------------------------------------+
| "name" | absolute path to a file. |
+-----------------------+---------------------------------------------------+
| "type" | type of hash/checksum to perform |
| | :ref:`mcumgr_group_8_hash_checksum_types` or omit |
| | to use default. |
+-----------------------+---------------------------------------------------+
| "off" | offset to start hash/checksum calculation at |
| | (optional, 0 if not provided). |
+-----------------------+---------------------------------------------------+
| "len" | maximum length of data to read from file to |
| | generate hash/checksum with (optional, full file |
| | size if not provided). |
+-----------------------+---------------------------------------------------+
.. _mcumgr_group_8_hash_checksum_types:
Hash/checksum types
===================
.. table::
:align: center
+-------------+--------------------------------------+-------------+--------------+
| String name | Hash/checksum | Byte string | Size (bytes) |
+=============+======================================+=============+==============+
| ``crc32`` | IEEE CRC32 checksum | No | 4 |
+-------------+--------------------------------------+-------------+--------------+
| ``sha256`` | SHA256 (Secure Hash Algorithm) | Yes | 32 |
+-------------+--------------------------------------+-------------+--------------+
Note that the default type will be crc32 if it is enabled, or sha256 if crc32 is
not enabled.
File hash/checksum response
===========================
File hash/checksum response header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``8`` | ``2`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"type" : (str)
(str,opt)"off" : (uint)
(str)"len" : (uint)
(str)"output" : (uint or bstr)
}
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "type" | type of hash/checksum that was performed |
| | :ref:`mcumgr_group_8_hash_checksum_types`. |
+------------------+your_sha256_hash---------+
| "off" | offset that hash/checksum calculation started at (only present if not |
| | 0). |
+------------------+your_sha256_hash---------+
| "len" | length of input data used for hash/checksum generation (in bytes). |
+------------------+your_sha256_hash---------+
| "output" | output hash/checksum. |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
Supported file hash/checksum types
**********************************
Command allows listing which hash and checksum types are available on a device.
Requires Kconfig :kconfig:option:`CONFIG_MCUMGR_GRP_FS_CHECKSUM_HASH_SUPPORTED_CMD`
to be enabled.
Supported file hash/checksum types request
==========================================
Supported file hash/checksum types request header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``8`` | ``3`` |
+--------+--------------+----------------+
The command sends empty CBOR map as data.
Supported file hash/checksum types response
===========================================
Supported file hash/checksum types response header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``8`` | ``3`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"types" : {
(str)<hash_checksum_name> : {
(str)"format" : (uint)
(str)"size" : (uint)
}
...
}
}
In case of error the CBOR data takes form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+----------------------+your_sha256_hash---------+
| <hash_checksum_name> | name of the hash/checksum type |
| | :ref:`mcumgr_group_8_hash_checksum_types`. |
+----------------------+your_sha256_hash---------+
| "format" | format that the hash/checksum returns where 0 is for numerical and 1 is |
| | for byte array. |
+----------------------+your_sha256_hash---------+
| "size" | size (in bytes) of output hash/checksum response. |
+----------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+----------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+----------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+----------------------+your_sha256_hash---------+
File close
**********
Command allows closing any open file handles held by fs_mgmt upload/download
requests that might have stalled or be incomplete.
File close request
==================
File close request header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``2`` | ``8`` | ``4`` |
+--------+--------------+----------------+
The command sends empty CBOR map as data.
File close response
===================
File close response header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``3`` | ``8`` | ``4`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data if successful.
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
``` | /content/code_sandbox/doc/services/device_mgmt/smp_groups/smp_group_8.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,361 |
```restructuredtext
.. _mcumgr_smp_group_3:
Settings (Config) Management Group
##################################
Settings management group (known as Configuration Manager in the original MCUmgr repository)
defines the following commands:
.. table::
:align: center
+----------------+------------------------------+
| ``Command ID`` | Command description |
+================+==============================+
| ``0`` | Read/write setting |
+----------------+------------------------------+
| ``1`` | Delete setting |
+----------------+------------------------------+
| ``2`` | Commit settings |
+----------------+------------------------------+
| ``3`` | Load/Save settings |
+----------------+------------------------------+
Note that the Zephyr version adds additional commands and features which are not supported by
the original upstream version, however, the original client functionality should work for
read/write functionality.
Read/write setting command
**************************
Read/write setting command allows updating a setting entry on a device or
getting the current value of a setting from a device.
Read setting request
====================
Read setting request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``3`` | ``0`` |
+--------+--------------+----------------+
CBOR data of request:
.. code-block:: none
{
(str)"name" : (str)
(str,opt)"max_size" : (uint)
}
where:
.. table::
:align: center
+------------+-----------------------------------------+
| "name" | string of the setting to retrieve |
+------------+-----------------------------------------+
| "max_size" | optional maximum size of data to return |
+------------+-----------------------------------------+
Read setting response
=====================
Read setting response header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``3`` | ``0`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"val" : (bstr)
(str,opt)"max_size" : (uint)
}
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "val" | binary string of the returned data, note that the underlying data type |
| | cannot be specified through this and must be known by the client. |
+------------------+your_sha256_hash---------+
| "max_size" | will be set if the maximum supported data size is smaller than the |
| | maximum requested data size, and contains the maximum data size which |
| | the device supports, equivalent to |
| | kconfig:option:`CONFIG_MCUMGR_GRP_SETTINGS_NAME_LEN`. |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
Write setting request
=====================
Write setting request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``2`` | ``3`` | ``0`` |
+--------+--------------+----------------+
CBOR data of request:
.. code-block:: none
{
(str)"name" : (str)
(str)"val" : (bstr)
}
where:
.. table::
:align: center
+--------+-------------------------------------+
| "name" | string of the setting to update/set |
+--------+-------------------------------------+
| "val" | value to set the setting to |
+--------+-------------------------------------+
Write setting response
======================
Write setting response header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``3`` | ``3`` | ``0`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data if successful. In case of error the CBOR data takes
the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
Delete setting command
**********************
Delete setting command allows deleting a setting on a device.
Delete setting request
======================
Delete setting request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``2`` | ``3`` | ``1`` |
+--------+--------------+----------------+
CBOR data of request:
.. code-block:: none
{
(str)"name" : (str)
}
where:
.. table::
:align: center
+--------+---------------------------------+
| "name" | string of the setting to delete |
+--------+---------------------------------+
Delete setting response
=======================
Delete setting response header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``3`` | ``3`` | ``1`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data if successful. In case of error the CBOR data takes
the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
Commit settings command
***********************
Commit settings command allows committing all settings that have been set but not yet applied on a
device.
Commit settings request
=======================
Commit settings request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``2`` | ``3`` | ``2`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data.
Commit settings response
========================
Commit settings response header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``3`` | ``3`` | ``2`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data if successful. In case of error the CBOR data takes
the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
Load/Save settings command
**************************
Load/Save settings command allows loading/saving all serialized items from/to persistent storage
on a device.
Load settings request
=====================
Load settings request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``3`` | ``3`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data.
Load settings response
======================
Load settings response header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``3`` | ``3`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data if successful. In case of error the CBOR data takes
the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
Save settings request
=====================
Save settings request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``2`` | ``3`` | ``3`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data.
Save settings response
======================
Save settings response header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``3`` | ``3`` | ``3`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data if successful. In case of error the CBOR data takes
the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash--------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash--------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash--------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash--------+
Settings access callback
************************
There is a settings access MCUmgr callback available (see :ref:`mcumgr_callbacks` for details on
callbacks) which allows for applications/modules to know when settings management commands are
used and, optionally, block access (for example through the use of a security mechanism). This
callback can be enabled with :kconfig:option:`CONFIG_MCUMGR_GRP_SETTINGS_ACCESS_HOOK`, registered
with the event :c:enumerator:`MGMT_EVT_OP_SETTINGS_MGMT_ACCESS`, whereby the supplied callback data
is :c:struct:`settings_mgmt_access`.
``` | /content/code_sandbox/doc/services/device_mgmt/smp_groups/smp_group_3.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,691 |
```restructuredtext
.. _mcumgr_smp_group_63:
Zephyr Management Group
#######################
Zephyr management group defines the following commands:
.. table::
:align: center
+----------------+------------------------------+
| ``Command ID`` | Command description |
+================+==============================+
| ``0`` | Erase storage |
+----------------+------------------------------+
Erase storage command
*********************
Erase storage command allows clearing the ``storage_partition`` flash partition on a device,
generally this is used when switching to a new application build if the application uses storage
that should be cleared (application dependent).
Erase storage request
=====================
Erase storage request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``2`` | ``63`` | ``0`` |
+--------+--------------+----------------+
The command sends sends empty CBOR map as data.
Erase storage response
======================
Read setting response header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``3`` | ``63`` | ``0`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data if successful. In case of error the CBOR data takes
the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
``` | /content/code_sandbox/doc/services/device_mgmt/smp_groups/smp_group_63.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 617 |
```restructuredtext
.. _mcumgr_smp_group_9:
Shell management
################
Shell management allows passing commands to the shell subsystem over the SMP
protocol.
Shell management group defines following commands:
.. table::
:align: center
+-------------------+-----------------------------------------------+
| ``Command ID`` | Command description |
+===================+===============================================+
| ``0`` | Shell command line execute |
+-------------------+-----------------------------------------------+
Shell command line execute
**************************
The command allows to execute command line in a similar way to typing it into
a shell, but both a request and a response are transported over SMP.
Shell command line execute request
==================================
Execute command request header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``2`` | ``9`` | ``0`` |
+--------+--------------+----------------+
CBOR data of request:
.. code-block:: none
{
(str)"argv" : [
(str)<cmd>
(str,opt)<arg>
...
]
}
where:
.. table::
:align: center
+-----------------------+---------------------------------------------------+
| "argv" | array consisting of strings representing command |
| | and its arguments. |
+-----------------------+---------------------------------------------------+
| <cmd> | command to be executed. |
+-----------------------+---------------------------------------------------+
| <arg> | optional arguments to command. |
+-----------------------+---------------------------------------------------+
Shell command line execute response
===================================
Command line execute response header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``3`` | ``9`` | ``0`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"o" : (str)
(str)"ret" : (int)
}
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "o" | command output. |
+------------------+your_sha256_hash---------+
| "ret" | return code from shell command execution. |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
.. note::
In older versions of Zephyr, "rc" was used for both the mcumgr status code
and shell command execution return code, this legacy behaviour can be
restored by enabling :kconfig:option:`CONFIG_MCUMGR_GRP_SHELL_LEGACY_RC_RETURN_CODE`
``` | /content/code_sandbox/doc/services/device_mgmt/smp_groups/smp_group_9.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 894 |
```restructuredtext
.. _mcumgr_smp_group_0:
Default/OS Management Group
###########################
OS management group defines following commands:
.. table::
:align: center
+-------------------+-----------------------------------------------+
| ``Command ID`` | Command description |
+===================+===============================================+
| ``0`` | Echo |
+-------------------+-----------------------------------------------+
| ``1`` | Console/Terminal echo control; |
| | unimplemented by Zephyr |
+-------------------+-----------------------------------------------+
| ``2`` | Task Statistics |
+-------------------+-----------------------------------------------+
| ``3`` | Memory pool statistics |
+-------------------+-----------------------------------------------+
| ``4`` | Date-time string |
+-------------------+-----------------------------------------------+
| ``5`` | System reset |
+-------------------+-----------------------------------------------+
| ``6`` | MCUMGR parameters |
+-------------------+-----------------------------------------------+
| ``7`` | OS/Application info |
+-------------------+-----------------------------------------------+
| ``8`` | Bootloader information |
+-------------------+-----------------------------------------------+
Echo command
************
Echo command responses by sending back string that it has received.
Echo request
============
Echo request header fields:
.. table::
:align: center
+--------------------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+====================+==============+================+
| ``0`` or ``2`` | ``0`` | ``0`` |
+--------------------+--------------+----------------+
CBOR data of request:
.. code-block:: none
{
(str)"d" : (str)
}
where:
.. table::
:align: center
+-----------------------+---------------------------------------------------+
| "d" | string to be replied by echo service. |
+-----------------------+---------------------------------------------------+
Echo response
=============
Echo response header fields:
.. table::
:align: center
+--------+--------------+----------------+----------------------------------+
| ``OP`` | ``Group ID`` | ``Command ID`` | Note |
+========+==============+================+==================================+
| ``1`` | ``0`` | ``0`` | When request ``OP`` was ``0`` |
+--------+--------------+----------------+----------------------------------+
| ``3`` | ``0`` | ``0`` | When request ``OP`` was ``2`` |
+--------+--------------+----------------+----------------------------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"r" : (str)
}
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "r" | replying echo string. |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
Task statistics command
***********************
The command responds with some system statistics.
Task statistics request
=======================
Task statistics request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``0`` | ``2`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data.
Task statistics response
========================
Task statistics response header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``0`` | ``2`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"tasks" : {
(str)<task_name> : {
(str)"prio" : (uint)
(str)"tid" : (uint)
(str)"state" : (uint)
(str)"stkuse" : (uint)
(str)"stksiz" : (uint)
(str)"cswcnt" : (uint)
(str)"runtime" : (uint)
(str)"last_checkin" : (uint)
(str)"next_checkin" : (uint)
}
...
}
}
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| <task_name> | string identifying task. |
+------------------+your_sha256_hash---------+
| "prio" | task priority. |
+------------------+your_sha256_hash---------+
| "tid" | numeric task ID. |
+------------------+your_sha256_hash---------+
| "state" | numeric task state. |
+------------------+your_sha256_hash---------+
| "stkuse" | task's/thread's stack usage. |
+------------------+your_sha256_hash---------+
| "stksiz" | task's/thread's stack size. |
+------------------+your_sha256_hash---------+
| "cswcnt" | task's/thread's context switches. |
+------------------+your_sha256_hash---------+
| "runtime" | task's/thread's runtime in "ticks". |
+------------------+your_sha256_hash---------+
| "last_checkin" | set to 0 by Zephyr. |
+------------------+your_sha256_hash---------+
| "next_checkin" | set to 0 by Zephyr. |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
.. note::
The unit for "stkuse" and "stksiz" is system dependent and in case of Zephyr
this is number of 4 byte words.
Memory pool statistics
**********************
The command is used to obtain information on memory pools active in running
system.
Memory pool statistic request
=============================
Memory pool statistics request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``0`` | ``3`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data.
Memory pool statistics response
===============================
Memory pool statistics response header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``0`` | ``3`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)<pool_name> {
(str)"blksiz" : (int)
(str)"nblks" : (int)
(str)"nfree" : (int)
(str)"min' : (int)
}
...
}
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| <pool_name> | string representing the pool name, used as a key for dictionary with |
| | pool statistics data. |
+------------------+your_sha256_hash---------+
| "blksiz" | size of the memory block in the pool. |
+------------------+your_sha256_hash---------+
| "nblks" | number of blocks in the pool. |
+------------------+your_sha256_hash---------+
| "nfree" | number of free blocks. |
+------------------+your_sha256_hash---------+
| "min" | lowest number of free blocks the pool reached during run-time. |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
Date-time command
*****************
The command allows to obtain string representing current time-date on a device
or set a new time to a device.
The time format used, by both set and get operations, is:
"yyyy-MM-dd'T'HH:mm:ss.SSSSSSZZZZZ"
Date-time get
=============
The command allows to obtain date-time from a device.
Date-time get request
---------------------
Date-time request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``0`` | ``4`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data.
Date-time get response
----------------------
Date-time get response header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``0`` | ``4`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"datetime" : (str)
}
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "datetime" | String in format: ``yyyy-MM-dd'T'HH:mm:ss.SSSSSSZZZZZ``. |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
Date-time set
=============
The command allows to set date-time to a device.
Date-time set request
---------------------
Date-time set request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``2`` | ``0`` | ``4`` |
+--------+--------------+----------------+
CBOR data of response:
.. code-block:: none
{
(str)"datetime" : (str)
}
where:
.. table::
:align: center
+---------------+----------------------------------------------------------+
| "datetime" | String in format: ``yyyy-MM-dd'T'HH:mm:ss.SSSSSSZZZZZ``. |
+---------------+----------------------------------------------------------+
Date-time set response
----------------------
Date-time set response header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``3`` | ``0`` | ``4`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data if successful. In case of error the
CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
System reset
************
Performs reset of system. The device should issue response before resetting so
that the SMP client could receive information that the command has been
accepted. By default, this command is accepted in all conditions, however if
the :kconfig:option:`CONFIG_MCUMGR_GRP_OS_RESET_HOOK` is enabled and an
application registers a callback, the callback will be called when this command
is issued and can be used to perform any necessary tidy operations prior to the
module rebooting, or to reject the reset request outright altogether with an
error response. For details on this functionality, see `ref:`mcumgr_callbacks`.
System reset request
====================
System reset request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``2`` | ``0`` | ``5`` |
+--------+--------------+----------------+
Normally the command sends an empty CBOR map as data, but if a previous reset
attempt has responded with "rc" equal to :c:enumerator:`MGMT_ERR_EBUSY` then the
following map may be sent to force a reset:
.. code-block:: none
{
(opt)"force" : (int)
}
where:
.. table::
:align: center
+-----------------------+---------------------------------------------------+
| "force" | Force reset if value > 0, optional if 0. |
+-----------------------+---------------------------------------------------+
System reset response
=====================
System reset response header fields
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``3`` | ``0`` | ``5`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data if successful. In case of error the
CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
MCUmgr Parameters
*****************
Used to obtain parameters of mcumgr library.
MCUmgr Parameters Request
=========================
MCUmgr parameters request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``0`` | ``6`` |
+--------+--------------+----------------+
The command sends an empty CBOR map as data.
MCUmgr Parameters Response
==========================
MCUmgr parameters response header fields
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``0`` | ``6`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"buf_size" : (uint)
(str)"buf_count" : (uint)
}
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "buf_size" | Single SMP buffer size, this includes SMP header and CBOR payload. |
+------------------+your_sha256_hash---------+
| "buf_count" | Number of SMP buffers supported. |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
.. _mcumgr_os_application_info:
OS/Application Info
*******************
Used to obtain information on running image, similar functionality to the linux
uname command, allowing details such as kernel name, kernel version, build
date/time, processor type and application-defined details to be returned. This
functionality can be enabled with :kconfig:option:`CONFIG_MCUMGR_GRP_OS_INFO`.
OS/Application Info Request
===========================
OS/Application info request header fields:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``0`` | ``7`` |
+--------+--------------+----------------+
CBOR data of request:
.. code-block:: none
{
(str,opt)"format" : (str)
}
where:
.. table::
:align: center
+----------+your_sha256_hash---+
| "format" | Format specifier of returned response, fields are appended in |
| | their natural ascending index order, not the order of |
| | characters that are received by the command. Format |
| | specifiers: |br| |
| | * ``s`` Kernel name |br| |
| | * ``n`` Node name |br| |
| | * ``r`` Kernel release |br| |
| | * ``v`` Kernel version |br| |
| | * ``b`` Build date and time (requires |
| | :kconfig:option:`CONFIG_MCUMGR_GRP_OS_INFO_BUILD_DATE_TIME`) |br| |
| | * ``m`` Machine |br| |
| | * ``p`` Processor |br| |
| | * ``i`` Hardware platform |br| |
| | * ``o`` Operating system |br| |
| | * ``a`` All fields (shorthand for all above options) |br| |
| | If this option is not provided, the ``s`` Kernel name option |
| | will be used. |
+----------+your_sha256_hash---+
OS/Application Info Response
============================
OS/Application info response header fields
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``0`` | ``7`` |
+--------+--------------+----------------+
CBOR data of successful response:
.. code-block:: none
{
(str)"output" : (str)
}
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "output" | Text response including requested parameters. |
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
Bootloader Information
**********************
Allows retrieving information about the on-board bootloader and its parameters.
Bootloader Information Request
==============================
Bootloader information request header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``0`` | ``0`` | ``8`` |
+--------+--------------+----------------+
CBOR data of request:
.. code-block:: none
{
(str,opt)"query" : (str)
}
where:
.. table::
:align: center
+--------------+-----------------------------------------------+
| "query" | Is string representing query for parameters, |
| | with no restrictions how the query looks like |
| | as processing of query is left for bootloader |
| | backend. |
| | If there is no query, then response will |
| | return string identifying the bootloader. |
+--------------+-----------------------------------------------+
Bootloader Information Response
===============================
Bootloader information response header:
.. table::
:align: center
+--------+--------------+----------------+
| ``OP`` | ``Group ID`` | ``Command ID`` |
+========+==============+================+
| ``1`` | ``0`` | ``8`` |
+--------+--------------+----------------+
In case when no "query" has been provided in request,
CBOR data of response:
.. code-block:: none
{
(str)"bootloader" : (str)
}
where:
.. table::
:align: center
+--------------+-----------------------------------------------+
| "bootloader" | String representing bootloader name |
+--------------+-----------------------------------------------+
In case when "query" is provided:
.. code-block:: none
{
(str,opt)<response> : ()
...
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| <response> | Response to "query". This is optional and may be left out in case when |
| | query yields no response, SMP version 2 error code of |
| | `OS_MGMT_ERR_QUERY_YIELDS_NO_ANSWER` is expected. |
| | Response may have more than one parameter reported back or it may be |
| | a map, that is dependent on bootloader backednd and query. |
+------------------+your_sha256_hash---------+
| ... | Parameter characteristic information. |
+------------------+your_sha256_hash---------+
Parameter may be accompanied by additional, parameter specific, information keywords with
assigned values.
In case of error the CBOR data takes the form:
.. tabs::
.. group-tab:: SMP version 2
.. code-block:: none
{
(str)"err" : {
(str)"group" : (uint)
(str)"rc" : (uint)
}
}
.. group-tab:: SMP version 1 (and non-group SMP version 2)
.. code-block:: none
{
(str)"rc" : (int)
}
where:
.. table::
:align: center
+------------------+your_sha256_hash---------+
| "err" -> "group" | :c:enum:`mcumgr_group_t` group of the group-based error code. Only |
| | appears if an error is returned when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "err" -> "rc" | contains the index of the group-based error code. Only appears if |
| | non-zero (error condition) when using SMP version 2. |
+------------------+your_sha256_hash---------+
| "rc" | :c:enum:`mcumgr_err_t` only appears if non-zero (error condition) when |
| | using SMP version 1 or for SMP errors when using SMP version 2. |
+------------------+your_sha256_hash---------+
Bootloader Information: MCUboot
===============================
In case when MCUboot is application bootloader, empty request will
be responded with:
.. code-block:: none
{
(str)"bootloader" : (str)"MCUboot"
}
Currently "MCUboot" supports querying for mode of operation:
.. code-block:: none
{
(str)"query" : (str)"mode"
}
Response to "mode" is:
.. code-block:: none
{
(str)"mode" : (int)
(str,opt)"no-downgrade" : (bool)
}
where "mode" is one of:
.. table::
:align: center
+-----+-----------------------------------------------------+
| -1 | Unknown mode of MCUboot. |
+-----+-----------------------------------------------------+
| 0 | MCUboot is in single application mode. |
+-----+-----------------------------------------------------+
| 1 | MCUboot is in swap using scratch partition mode. |
+-----+-----------------------------------------------------+
| 2 | MCUboot is in overwrite (upgrade-only) mode. |
+-----+-----------------------------------------------------+
| 3 | MCUboot is in swap without scratch mode. |
+-----+-----------------------------------------------------+
| 4 | MCUboot is in DirectXIP without revert mode. |
+-----+-----------------------------------------------------+
| 5 | MCUboot is in DirectXIP with revert mode. |
+-----+-----------------------------------------------------+
| 6 | MCUboot is in RAM loader mode. |
+-----+-----------------------------------------------------+
The ``no-downgrade`` field is a flag, which is always sent when true, indicating that MCUboot has
downgrade prevention enabled; downgrade prevention means that if the uploaded image has a lower
version than the currently running application, it will not be used for an update by MCUboot.
MCUmgr may reject images with a lower version in this configuration.
``` | /content/code_sandbox/doc/services/device_mgmt/smp_groups/smp_group_0.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 7,416 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.