text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```objective-c
/*
*
*/
#ifndef H_SETTINGS_MGMT_
#define H_SETTINGS_MGMT_
#ifdef __cplusplus
extern "C" {
#endif
/**
* Command IDs for settings management group.
*/
#define SETTINGS_MGMT_ID_READ_WRITE 0
#define SETTINGS_MGMT_ID_DELETE 1
#define SETTINGS_MGMT_ID_COMMIT 2
#define SETTINGS_MGMT_ID_LOAD_SAVE 3
/**
* Command result codes for settings management group.
*/
enum settings_mgmt_ret_code_t {
/** No error, this is implied if there is no ret value in the response. */
SETTINGS_MGMT_ERR_OK = 0,
/** Unknown error occurred. */
SETTINGS_MGMT_ERR_UNKNOWN,
/** The provided key name is too long to be used. */
SETTINGS_MGMT_ERR_KEY_TOO_LONG,
/** The provided key name does not exist. */
SETTINGS_MGMT_ERR_KEY_NOT_FOUND,
/** The provided key name does not support being read. */
SETTINGS_MGMT_ERR_READ_NOT_SUPPORTED,
/** The provided root key name does not exist. */
SETTINGS_MGMT_ERR_ROOT_KEY_NOT_FOUND,
/** The provided key name does not support being written. */
SETTINGS_MGMT_ERR_WRITE_NOT_SUPPORTED,
/** The provided key name does not support being deleted. */
SETTINGS_MGMT_ERR_DELETE_NOT_SUPPORTED,
};
#ifdef __cplusplus
}
#endif
#endif
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/grp/settings_mgmt/settings_mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 281 |
```objective-c
/*
*
*/
#ifndef H_SHELL_MGMT_
#define H_SHELL_MGMT_
#ifdef __cplusplus
extern "C" {
#endif
/**
* Command IDs for shell management group.
*/
#define SHELL_MGMT_ID_EXEC 0
/**
* Command result codes for shell management group.
*/
enum shell_mgmt_err_code_t {
/** No error, this is implied if there is no ret value in the response */
SHELL_MGMT_ERR_OK = 0,
/** Unknown error occurred. */
SHELL_MGMT_ERR_UNKNOWN,
/** The provided command to execute is too long. */
SHELL_MGMT_ERR_COMMAND_TOO_LONG,
/** No command to execute was provided. */
SHELL_MGMT_ERR_EMPTY_COMMAND,
};
#ifdef __cplusplus
}
#endif
#endif /* H_SHELL_MGMT_ */
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/grp/shell_mgmt/shell_mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 166 |
```objective-c
/*
*
*/
#ifndef H_IMG_MGMT_CLIENT_
#define H_IMG_MGMT_CLIENT_
#include <inttypes.h>
#include <zephyr/mgmt/mcumgr/grp/img_mgmt/img_mgmt.h>
#include <zephyr/mgmt/mcumgr/smp/smp_client.h>
/**
* @brief MCUmgr Image management client API
* @defgroup mcumgr_img_mgmt_client MCUmgr img_mgmt_client API
* @ingroup mcumgr
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Image list flags.
*/
struct mcumgr_image_list_flags {
/** Bootable image */
bool bootable: 1;
/** Pending update state */
bool pending: 1;
/** Confirmed image */
bool confirmed: 1;
/** Active image */
bool active: 1;
/** Permanent image state */
bool permanent: 1;
};
/**
* @brief Image list data.
*/
struct mcumgr_image_data {
/** Image slot num */
uint32_t slot_num;
/** Image number */
uint32_t img_num;
/** Image SHA256 checksum */
char hash[IMG_MGMT_DATA_SHA_LEN];
/** Image Version */
char version[IMG_MGMT_VER_MAX_STR_LEN + 1];
/** Image Flags */
struct mcumgr_image_list_flags flags;
};
/**
* @brief MCUmgr Image list response.
*/
struct mcumgr_image_state {
/** Status */
enum mcumgr_err_t status;
/** Length of image_list */
int image_list_length;
/** Image list pointer */
struct mcumgr_image_data *image_list;
};
/**
* @brief MCUmgr Image upload response.
*/
struct mcumgr_image_upload {
/** Status */
enum mcumgr_err_t status;
/** Reported image offset */
size_t image_upload_offset;
};
/**
* @brief IMG mgmt client upload structure
*
* Structure is used internally by the client
*/
struct img_gr_upload {
/** Image 256-bit hash */
char sha256[IMG_MGMT_DATA_SHA_LEN];
/** True when Hash is configured, false when not */
bool hash_initialized;
/** Image size */
size_t image_size;
/** Image upload offset state */
size_t offset;
/** Worst case init upload message size */
size_t upload_header_size;
/** Image slot num */
uint32_t image_num;
};
/**
* @brief IMG mgmt client object.
*/
struct img_mgmt_client {
/** SMP client object */
struct smp_client_object *smp_client;
/** Image Upload state data for client internal use */
struct img_gr_upload upload;
/** Client image list buffer size */
int image_list_length;
/** Image list buffer */
struct mcumgr_image_data *image_list;
/** Command status */
int status;
};
/**
* @brief Inilialize image group client.
*
* Function initializes image group client for given SMP client using supplied image data.
*
* @param client IMG mgmt client object
* @param smp_client SMP client object
* @param image_list_size Length of image_list buffer.
* @param image_list Image list buffer pointer.
*
*/
void img_mgmt_client_init(struct img_mgmt_client *client, struct smp_client_object *smp_client,
int image_list_size, struct mcumgr_image_data *image_list);
/**
* @brief Initialize image upload.
*
* @param client IMG mgmt client object
* @param image_size Size of image in bytes.
* @param image_num Image slot Num.
* @param image_hash Pointer to HASH for image must be SHA256 hash of entire upload
* if present (32 bytes). Use NULL when HASH from image is not available.
*
* @return 0 on success.
* @return @ref mcumgr_err_t code on failure.
*/
int img_mgmt_client_upload_init(struct img_mgmt_client *client, size_t image_size,
uint32_t image_num, const char *image_hash);
/**
* @brief Upload part of image.
*
* @param client IMG mgmt client object
* @param data Pointer to data.
* @param length Length of data
* @param res_buf Pointer for command response structure.
*
* @return 0 on success.
* @return @ref mcumgr_err_t code on failure.
*/
int img_mgmt_client_upload(struct img_mgmt_client *client, const uint8_t *data, size_t length,
struct mcumgr_image_upload *res_buf);
/**
* @brief Write image state.
*
* @param client IMG mgmt client object
* @param hash Pointer to Hash (Needed for test).
* @param confirm Set false for test and true for confirmation.
* @param res_buf Pointer for command response structure.
*
* @return 0 on success.
* @return @ref mcumgr_err_t code on failure.
*/
int img_mgmt_client_state_write(struct img_mgmt_client *client, char *hash, bool confirm,
struct mcumgr_image_state *res_buf);
/**
* @brief Read image state.
*
* @param client IMG mgmt client object
* @param res_buf Pointer for command response structure.
*
* @return 0 on success.
* @return @ref mcumgr_err_t code on failure.
*/
int img_mgmt_client_state_read(struct img_mgmt_client *client, struct mcumgr_image_state *res_buf);
/**
* @brief Erase selected Image Slot
*
* @param client IMG mgmt client object
* @param slot Slot number
*
* @return 0 on success.
* @return @ref mcumgr_err_t code on failure.
*/
int img_mgmt_client_erase(struct img_mgmt_client *client, uint32_t slot);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* H_IMG_MGMT_CLIENT_ */
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/grp/img_mgmt/img_mgmt_client.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,221 |
```objective-c
/*
*
*/
#ifndef H_MCUMGR_IMG_MGMT_CALLBACKS_
#define H_MCUMGR_IMG_MGMT_CALLBACKS_
#ifdef __cplusplus
extern "C" {
#endif
/* Dummy definitions, include zephyr/mgmt/mcumgr/grp/img_mgmt/img_mgmt.h for actual definitions */
struct img_mgmt_upload_action;
struct img_mgmt_upload_req;
/**
* @brief MCUmgr img_mgmt callback API
* @defgroup mcumgr_callback_api_img_mgmt MCUmgr img_mgmt callback API
* @ingroup mcumgr_callback_api
* @{
*/
/**
* Structure provided in the #MGMT_EVT_OP_IMG_MGMT_DFU_CHUNK notification callback: This callback
* function is used to notify the application about a pending firmware upload packet from a client
* and authorise or deny it. Upload will be allowed so long as all notification handlers return
* #MGMT_ERR_EOK, if one returns an error then the upload will be denied.
*/
struct img_mgmt_upload_check {
/** Action to take */
struct img_mgmt_upload_action *action;
/** Upload request information */
struct img_mgmt_upload_req *req;
};
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/grp/img_mgmt/img_mgmt_callbacks.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 244 |
```objective-c
/*
*
*/
#ifndef H_STAT_MGMT_
#define H_STAT_MGMT_
#ifdef __cplusplus
extern "C" {
#endif
/**
* Command IDs for statistics management group.
*/
#define STAT_MGMT_ID_SHOW 0
#define STAT_MGMT_ID_LIST 1
/**
* Command result codes for statistics management group.
*/
enum stat_mgmt_err_code_t {
/** No error, this is implied if there is no ret value in the response */
STAT_MGMT_ERR_OK = 0,
/** Unknown error occurred. */
STAT_MGMT_ERR_UNKNOWN,
/** The provided statistic group name was not found. */
STAT_MGMT_ERR_INVALID_GROUP,
/** The provided statistic name was not found. */
STAT_MGMT_ERR_INVALID_STAT_NAME,
/** The size of the statistic cannot be handled. */
STAT_MGMT_ERR_INVALID_STAT_SIZE,
/** Walk through of statistics was aborted. */
STAT_MGMT_ERR_WALK_ABORTED,
};
/**
* @brief Represents a single value in a statistics group.
*/
struct stat_mgmt_entry {
const char *name;
uint64_t value;
};
#ifdef __cplusplus
}
#endif
#endif /* H_STAT_MGMT_ */
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/grp/stat_mgmt/stat_mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 244 |
```objective-c
/*
*
*/
#ifndef H_MCUMGR_OS_MGMT_CALLBACKS_
#define H_MCUMGR_OS_MGMT_CALLBACKS_
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief MCUmgr os_mgmt callback API
* @defgroup mcumgr_callback_api_os_mgmt MCUmgr os_mgmt callback API
* @ingroup mcumgr_callback_api
* @{
*/
/**
* Structure provided in the #MGMT_EVT_OP_OS_MGMT_RESET notification callback: This callback
* function is used to notify the application about a pending device reboot request and to
* authorise or deny it.
*/
struct os_mgmt_reset_data {
/** Contains the value of the force parameter. */
bool force;
};
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/grp/os_mgmt/os_mgmt_callbacks.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 158 |
```objective-c
/*
*
*/
#ifndef H_IMG_MGMT_
#define H_IMG_MGMT_
#include <inttypes.h>
#include <zephyr/mgmt/mcumgr/mgmt/mgmt.h>
#include <zephyr/mgmt/mcumgr/smp/smp.h>
#include <bootutil/image.h>
#include <zcbor_common.h>
/**
* @brief MCUmgr img_mgmt API
* @defgroup mcumgr_img_mgmt MCUmgr img_mgmt API
* @ingroup mcumgr
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
#define IMG_MGMT_DATA_SHA_LEN 32 /* SHA256 */
/**
* Image state flags
*/
#define IMG_MGMT_STATE_F_PENDING 0x01
#define IMG_MGMT_STATE_F_CONFIRMED 0x02
#define IMG_MGMT_STATE_F_ACTIVE 0x04
#define IMG_MGMT_STATE_F_PERMANENT 0x08
/* 255.255.65535.4294967295\0 */
#define IMG_MGMT_VER_MAX_STR_LEN (sizeof("255.255.65535.4294967295"))
/**
* Swap Types for image management state machine
*/
#define IMG_MGMT_SWAP_TYPE_NONE 0
#define IMG_MGMT_SWAP_TYPE_TEST 1
#define IMG_MGMT_SWAP_TYPE_PERM 2
#define IMG_MGMT_SWAP_TYPE_REVERT 3
#define IMG_MGMT_SWAP_TYPE_UNKNOWN 255
/**
* Command IDs for image management group.
*/
#define IMG_MGMT_ID_STATE 0
#define IMG_MGMT_ID_UPLOAD 1
#define IMG_MGMT_ID_FILE 2
#define IMG_MGMT_ID_CORELIST 3
#define IMG_MGMT_ID_CORELOAD 4
#define IMG_MGMT_ID_ERASE 5
/**
* Command result codes for image management group.
*/
enum img_mgmt_err_code_t {
/** No error, this is implied if there is no ret value in the response */
IMG_MGMT_ERR_OK = 0,
/** Unknown error occurred. */
IMG_MGMT_ERR_UNKNOWN,
/** Failed to query flash area configuration. */
IMG_MGMT_ERR_FLASH_CONFIG_QUERY_FAIL,
/** There is no image in the slot. */
IMG_MGMT_ERR_NO_IMAGE,
/** The image in the slot has no TLVs (tag, length, value). */
IMG_MGMT_ERR_NO_TLVS,
/** The image in the slot has an invalid TLV type and/or length. */
IMG_MGMT_ERR_INVALID_TLV,
/** The image in the slot has multiple hash TLVs, which is invalid. */
IMG_MGMT_ERR_TLV_MULTIPLE_HASHES_FOUND,
/** The image in the slot has an invalid TLV size. */
IMG_MGMT_ERR_TLV_INVALID_SIZE,
/** The image in the slot does not have a hash TLV, which is required. */
IMG_MGMT_ERR_HASH_NOT_FOUND,
/** There is no free slot to place the image. */
IMG_MGMT_ERR_NO_FREE_SLOT,
/** Flash area opening failed. */
IMG_MGMT_ERR_FLASH_OPEN_FAILED,
/** Flash area reading failed. */
IMG_MGMT_ERR_FLASH_READ_FAILED,
/** Flash area writing failed. */
IMG_MGMT_ERR_FLASH_WRITE_FAILED,
/** Flash area erase failed. */
IMG_MGMT_ERR_FLASH_ERASE_FAILED,
/** The provided slot is not valid. */
IMG_MGMT_ERR_INVALID_SLOT,
/** Insufficient heap memory (malloc failed). */
IMG_MGMT_ERR_NO_FREE_MEMORY,
/** The flash context is already set. */
IMG_MGMT_ERR_FLASH_CONTEXT_ALREADY_SET,
/** The flash context is not set. */
IMG_MGMT_ERR_FLASH_CONTEXT_NOT_SET,
/** The device for the flash area is NULL. */
IMG_MGMT_ERR_FLASH_AREA_DEVICE_NULL,
/** The offset for a page number is invalid. */
IMG_MGMT_ERR_INVALID_PAGE_OFFSET,
/** The offset parameter was not provided and is required. */
IMG_MGMT_ERR_INVALID_OFFSET,
/** The length parameter was not provided and is required. */
IMG_MGMT_ERR_INVALID_LENGTH,
/** The image length is smaller than the size of an image header. */
IMG_MGMT_ERR_INVALID_IMAGE_HEADER,
/** The image header magic value does not match the expected value. */
IMG_MGMT_ERR_INVALID_IMAGE_HEADER_MAGIC,
/** The hash parameter provided is not valid. */
IMG_MGMT_ERR_INVALID_HASH,
/** The image load address does not match the address of the flash area. */
IMG_MGMT_ERR_INVALID_FLASH_ADDRESS,
/** Failed to get version of currently running application. */
IMG_MGMT_ERR_VERSION_GET_FAILED,
/** The currently running application is newer than the version being uploaded. */
IMG_MGMT_ERR_CURRENT_VERSION_IS_NEWER,
/** There is already an image operating pending. */
IMG_MGMT_ERR_IMAGE_ALREADY_PENDING,
/** The image vector table is invalid. */
IMG_MGMT_ERR_INVALID_IMAGE_VECTOR_TABLE,
/** The image it too large to fit. */
IMG_MGMT_ERR_INVALID_IMAGE_TOO_LARGE,
/** The amount of data sent is larger than the provided image size. */
IMG_MGMT_ERR_INVALID_IMAGE_DATA_OVERRUN,
/** Confirmation of image has been denied */
IMG_MGMT_ERR_IMAGE_CONFIRMATION_DENIED,
/** Setting test to active slot is not allowed */
IMG_MGMT_ERR_IMAGE_SETTING_TEST_TO_ACTIVE_DENIED,
/** Current active slot for image cannot be determined */
IMG_MGMT_ERR_ACTIVE_SLOT_NOT_KNOWN,
};
/**
* IMG_MGMT_ID_UPLOAD statuses.
*/
enum img_mgmt_id_upload_t {
IMG_MGMT_ID_UPLOAD_STATUS_START = 0,
IMG_MGMT_ID_UPLOAD_STATUS_ONGOING,
IMG_MGMT_ID_UPLOAD_STATUS_COMPLETE,
};
extern int boot_current_slot;
extern struct img_mgmt_state g_img_mgmt_state;
/** Represents an individual upload request. */
struct img_mgmt_upload_req {
uint32_t image; /* 0 by default */
size_t off; /* SIZE_MAX if unspecified */
size_t size; /* SIZE_MAX if unspecified */
struct zcbor_string img_data;
struct zcbor_string data_sha;
bool upgrade; /* Only allow greater version numbers. */
};
/** Global state for upload in progress. */
struct img_mgmt_state {
/** Flash area being written; -1 if no upload in progress. */
int area_id;
/** Flash offset of next chunk. */
size_t off;
/** Total size of image data. */
size_t size;
/** Hash of image data; used for resumption of a partial upload. */
uint8_t data_sha_len;
uint8_t data_sha[IMG_MGMT_DATA_SHA_LEN];
};
/** Describes what to do during processing of an upload request. */
struct img_mgmt_upload_action {
/** The total size of the image. */
unsigned long long size;
/** The number of image bytes to write to flash. */
int write_bytes;
/** The flash area to write to. */
int area_id;
/** Whether to process the request; false if offset is wrong. */
bool proceed;
/** Whether to erase the destination flash area. */
bool erase;
#ifdef CONFIG_MCUMGR_GRP_IMG_VERBOSE_ERR
/** "rsn" string to be sent as explanation for "rc" code */
const char *rc_rsn;
#endif
};
/*
* @brief Read info of an image at the specified slot number
*
* @param image_slot image slot number
* @param ver output buffer for image version
* @param hash output buffer for image hash
* @param flags output buffer for image flags
*
* @return 0 on success, non-zero on failure.
*/
int img_mgmt_read_info(int image_slot, struct image_version *ver, uint8_t *hash, uint32_t *flags);
/**
* @brief Get the image version of the currently running application.
*
* @param ver output buffer for an image version information object.
*
* @return 0 on success, non-zero on failure.
*/
int img_mgmt_my_version(struct image_version *ver);
/**
* @brief Format version string from struct image_version
*
* @param ver pointer to image_version object
* @param dst output buffer for image version string
*
* @return Non-negative on success, negative value on error.
*/
int img_mgmt_ver_str(const struct image_version *ver, char *dst);
/**
* @brief Get active, running application slot number for an image
*
* @param image image number to get active slot for.
*
* @return Non-negative slot number
*/
int img_mgmt_active_slot(int image);
/**
* @brief Get active image number
*
* Gets 0 based number for running application.
*
* @return Non-negative image number.
*/
int img_mgmt_active_image(void);
/**
* @brief Check if the image slot is in use.
*
* The check is based on MCUboot flags, not image contents. This means that
* slot with image in it, but no bootable flags set, is considered empty.
* Active slot is always in use.
*
* @param slot slot number
*
* @return 0 if slot is not used, non-0 otherwise.
*/
int img_mgmt_slot_in_use(int slot);
/**
* @brief Check if any slot is in MCUboot pending state.
*
* Function returns 1 if slot 0 or slot 1 is in MCUboot pending state,
* which means that it has been either marked for test or confirmed.
*
* @return 1 if there's pending DFU otherwise 0.
*/
int img_mgmt_state_any_pending(void);
/**
* @brief Returns state flags set to slot.
*
* Flags are translated from MCUboot image state flags.
* Returned value is zero if no flags are set or a combination of:
* IMG_MGMT_STATE_F_PENDING
* IMG_MGMT_STATE_F_CONFIRMED
* IMG_MGMT_STATE_F_ACTIVE
* IMG_MGMT_STATE_F_PERMANENT
*
* @param query_slot slot number
*
* @return return the state flags.
*
*/
uint8_t img_mgmt_state_flags(int query_slot);
/**
* @brief Sets the pending flag for the specified image slot.
*
* Sets specified image slot to be used as active slot during next boot,
* either for test or permanently. Non-permanent image will be reverted
* unless image confirms itself during next boot.
*
* @param slot slot number
* @param permanent permanent or test only
*
* @return 0 on success, non-zero on failure
*/
int img_mgmt_state_set_pending(int slot, int permanent);
/**
* @brief Confirms the current image state.
*
* Prevents a fallback from occurring on the next reboot if the active image
* is currently being tested.
*
* @return 0 on success, non-zero on failure
*/
int img_mgmt_state_confirm(void);
/**
* Compares two image version numbers in a semver-compatible way.
*
* @param a The first version to compare
* @param b The second version to compare
*
* @return -1 if a < b
* @return 0 if a = b
* @return 1 if a > b
*/
int img_mgmt_vercmp(const struct image_version *a, const struct image_version *b);
#if defined(CONFIG_MCUMGR_GRP_IMG_MUTEX)
/*
* @brief Will reset the image management state back to default (no ongoing upload),
* requires that CONFIG_MCUMGR_GRP_IMG_MUTEX be enabled to allow for mutex
* locking of the image management state object.
*/
void img_mgmt_reset_upload(void);
#endif
#ifdef CONFIG_MCUMGR_GRP_IMG_VERBOSE_ERR
#define IMG_MGMT_UPLOAD_ACTION_SET_RC_RSN(action, rsn) ((action)->rc_rsn = (rsn))
#define IMG_MGMT_UPLOAD_ACTION_RC_RSN(action) ((action)->rc_rsn)
int img_mgmt_error_rsp(struct smp_streamer *ctxt, int rc, const char *rsn);
extern const char *img_mgmt_err_str_app_reject;
extern const char *img_mgmt_err_str_hdr_malformed;
extern const char *img_mgmt_err_str_magic_mismatch;
extern const char *img_mgmt_err_str_no_slot;
extern const char *img_mgmt_err_str_flash_open_failed;
extern const char *img_mgmt_err_str_flash_erase_failed;
extern const char *img_mgmt_err_str_flash_write_failed;
extern const char *img_mgmt_err_str_downgrade;
extern const char *img_mgmt_err_str_image_bad_flash_addr;
extern const char *img_mgmt_err_str_image_too_large;
extern const char *img_mgmt_err_str_data_overrun;
#else
#define IMG_MGMT_UPLOAD_ACTION_SET_RC_RSN(action, rsn)
#define IMG_MGMT_UPLOAD_ACTION_RC_RSN(action) NULL
#endif
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* H_IMG_MGMT_ */
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/grp/img_mgmt/img_mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,688 |
```objective-c
/*
*
*/
#ifndef H_OS_MGMT_CLIENT_
#define H_OS_MGMT_CLIENT_
#include <inttypes.h>
#include <zephyr/mgmt/mcumgr/smp/smp_client.h>
/**
* @brief MCUmgr OS management client API
* @defgroup mcumgr_os_mgmt_client MCUmgr os_mgmt_client API
* @ingroup mcumgr
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief OS mgmt client object
*/
struct os_mgmt_client {
/** SMP client object */
struct smp_client_object *smp_client;
/** Command status */
int status;
};
/**
* @brief Initialize OS management client.
*
* @param client OS mgmt client object
* @param smp_client SMP client object
*
*/
void os_mgmt_client_init(struct os_mgmt_client *client, struct smp_client_object *smp_client);
/**
* @brief Send SMP message for Echo command.
*
* @param client OS mgmt client object
* @param echo_string Echo string
* @param max_len Max length of @p echo_string
*
* @return 0 on success.
* @return @ref mcumgr_err_t code on failure.
*/
int os_mgmt_client_echo(struct os_mgmt_client *client, const char *echo_string, size_t max_len);
/**
* @brief Send SMP Reset command.
*
* @param client OS mgmt client object
*
* @return 0 on success.
* @return @ref mcumgr_err_t code on failure.
*/
int os_mgmt_client_reset(struct os_mgmt_client *client);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* H_OS_MGMT_CLIENT_ */
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/grp/os_mgmt/os_mgmt_client.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 352 |
```objective-c
/*
*
*/
#ifndef H_OS_MGMT_
#define H_OS_MGMT_
#ifdef __cplusplus
extern "C" {
#endif
/**
* Command IDs for OS management group.
*/
#define OS_MGMT_ID_ECHO 0
#define OS_MGMT_ID_CONS_ECHO_CTRL 1
#define OS_MGMT_ID_TASKSTAT 2
#define OS_MGMT_ID_MPSTAT 3
#define OS_MGMT_ID_DATETIME_STR 4
#define OS_MGMT_ID_RESET 5
#define OS_MGMT_ID_MCUMGR_PARAMS 6
#define OS_MGMT_ID_INFO 7
#define OS_MGMT_ID_BOOTLOADER_INFO 8
/**
* Command result codes for OS management group.
*/
enum os_mgmt_err_code_t {
/** No error, this is implied if there is no ret value in the response */
OS_MGMT_ERR_OK = 0,
/** Unknown error occurred. */
OS_MGMT_ERR_UNKNOWN,
/** The provided format value is not valid. */
OS_MGMT_ERR_INVALID_FORMAT,
/** Query was not recognized. */
OS_MGMT_ERR_QUERY_YIELDS_NO_ANSWER,
/** RTC is not set */
OS_MGMT_ERR_RTC_NOT_SET,
/** RTC command failed */
OS_MGMT_ERR_RTC_COMMAND_FAILED,
};
/* Bitmask values used by the os info command handler. Note that the width of this variable is
* 32-bits, allowing 32 flags, custom user-level implementations should start at
* OS_MGMT_INFO_FORMAT_USER_CUSTOM_START and reference that directly as additional format
* specifiers might be added to this list in the future.
*/
enum os_mgmt_info_formats {
OS_MGMT_INFO_FORMAT_KERNEL_NAME = BIT(0),
OS_MGMT_INFO_FORMAT_NODE_NAME = BIT(1),
OS_MGMT_INFO_FORMAT_KERNEL_RELEASE = BIT(2),
OS_MGMT_INFO_FORMAT_KERNEL_VERSION = BIT(3),
OS_MGMT_INFO_FORMAT_BUILD_DATE_TIME = BIT(4),
OS_MGMT_INFO_FORMAT_MACHINE = BIT(5),
OS_MGMT_INFO_FORMAT_PROCESSOR = BIT(6),
OS_MGMT_INFO_FORMAT_HARDWARE_PLATFORM = BIT(7),
OS_MGMT_INFO_FORMAT_OPERATING_SYSTEM = BIT(8),
OS_MGMT_INFO_FORMAT_USER_CUSTOM_START = BIT(9),
};
/* Structure provided in the MGMT_EVT_OP_OS_MGMT_INFO_CHECK notification callback */
struct os_mgmt_info_check {
/* Input format string from the mcumgr client */
struct zcbor_string *format;
/* Bitmask of values specifying which outputs should be present */
uint32_t *format_bitmask;
/* Number of valid format characters parsed, must be incremented by 1 for each valid
* character
*/
uint16_t *valid_formats;
/* Needs to be set to true if the OS name is being provided by external code */
bool *custom_os_name;
};
/* Structure provided in the MGMT_EVT_OP_OS_MGMT_INFO_APPEND notification callback */
struct os_mgmt_info_append {
/* The format bitmask from the processed commands, the bits should be cleared once
* processed, note that if all_format_specified is specified, the corresponding bits here
* will not be set
*/
uint32_t *format_bitmask;
/* Will be true if the all 'a' specifier was provided */
bool all_format_specified;
/* The output buffer which the responses should be appended to. If prior_output is true, a
* space must be added prior to the output response
*/
uint8_t *output;
/* The current size of the output response in the output buffer, must be updated to be the
* size of the output response after appending data
*/
uint16_t *output_length;
/* The size of the output buffer, including null terminator character, if the output
* response would exceed this size, the function must abort and return false to return a
* memory error to the client
*/
uint16_t buffer_size;
/* If there has been prior output, must be set to true if a response has been output */
bool *prior_output;
};
#ifdef __cplusplus
}
#endif
#endif /* H_OS_MGMT_ */
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/grp/os_mgmt/os_mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 877 |
```objective-c
/*
*
*/
#ifndef H_MGMT_MGMT_DEFINES_
#define H_MGMT_MGMT_DEFINES_
#include <inttypes.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief MCUmgr mgmt API
* @defgroup mcumgr_mgmt_api MCUmgr mgmt API
* @ingroup mcumgr
* @{
*/
/**
* Used at end of MCUmgr handlers to return an error if the message size limit was reached,
* or OK if it was not
*/
#define MGMT_RETURN_CHECK(ok) ok ? MGMT_ERR_EOK : MGMT_ERR_EMSGSIZE
/** Opcodes; encoded in first byte of header. */
enum mcumgr_op_t {
/** Read op-code */
MGMT_OP_READ = 0,
/** Read response op-code */
MGMT_OP_READ_RSP,
/** Write op-code */
MGMT_OP_WRITE,
/** Write response op-code */
MGMT_OP_WRITE_RSP,
};
/**
* MCUmgr groups. The first 64 groups are reserved for system level mcumgr
* commands. Per-user commands are then defined after group 64.
*/
enum mcumgr_group_t {
/** OS (operating system) group */
MGMT_GROUP_ID_OS = 0,
/** Image management group, used for uploading firmware images */
MGMT_GROUP_ID_IMAGE,
/** Statistic management group, used for retrieving statistics */
MGMT_GROUP_ID_STAT,
/** Settings management (config) group, used for reading/writing settings */
MGMT_GROUP_ID_SETTINGS,
/** Log management group (unused) */
MGMT_GROUP_ID_LOG,
/** Crash group (unused) */
MGMT_GROUP_ID_CRASH,
/** Split image management group (unused) */
MGMT_GROUP_ID_SPLIT,
/** Run group (unused) */
MGMT_GROUP_ID_RUN,
/** FS (file system) group, used for performing file IO operations */
MGMT_GROUP_ID_FS,
/** Shell management group, used for executing shell commands */
MGMT_GROUP_ID_SHELL,
/** User groups defined from 64 onwards */
MGMT_GROUP_ID_PERUSER = 64,
/** Zephyr-specific groups decrease from PERUSER to avoid collision with upstream and
* user-defined groups.
* Zephyr-specific: Basic group
*/
ZEPHYR_MGMT_GRP_BASIC = (MGMT_GROUP_ID_PERUSER - 1),
};
/**
* MCUmgr error codes.
*/
enum mcumgr_err_t {
/** No error (success). */
MGMT_ERR_EOK = 0,
/** Unknown error. */
MGMT_ERR_EUNKNOWN,
/** Insufficient memory (likely not enough space for CBOR object). */
MGMT_ERR_ENOMEM,
/** Error in input value. */
MGMT_ERR_EINVAL,
/** Operation timed out. */
MGMT_ERR_ETIMEOUT,
/** No such file/entry. */
MGMT_ERR_ENOENT,
/** Current state disallows command. */
MGMT_ERR_EBADSTATE,
/** Response too large. */
MGMT_ERR_EMSGSIZE,
/** Command not supported. */
MGMT_ERR_ENOTSUP,
/** Corrupt */
MGMT_ERR_ECORRUPT,
/** Command blocked by processing of other command */
MGMT_ERR_EBUSY,
/** Access to specific function, command or resource denied */
MGMT_ERR_EACCESSDENIED,
/** Requested SMP MCUmgr protocol version is not supported (too old) */
MGMT_ERR_UNSUPPORTED_TOO_OLD,
/** Requested SMP MCUmgr protocol version is not supported (too new) */
MGMT_ERR_UNSUPPORTED_TOO_NEW,
/** User errors defined from 256 onwards */
MGMT_ERR_EPERUSER = 256
};
#define MGMT_HDR_SIZE 8
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* MGMT_MGMT_DEFINES_H_ */
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/mgmt/mgmt_defines.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 810 |
```objective-c
/*
*
*/
#ifndef H_MCUMGR_MGMT_HANDLERS_
#define H_MCUMGR_MGMT_HANDLERS_
#include <zephyr/kernel.h>
#include <zephyr/sys/util_macro.h>
#include <zephyr/sys/iterable_sections.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/**
* @brief MCUmgr handler registration API
* @defgroup mcumgr_handler_api MCUmgr handler API
* @ingroup mcumgr
* @{
*/
/** Type definition for a MCUmgr handler initialisation function */
typedef void (*mcumgr_handler_init_t)(void);
/** @cond INTERNAL_HIDDEN */
struct mcumgr_handler {
/** Initialisation function to be called */
const mcumgr_handler_init_t init;
};
/** @endcond */
/**
* @brief Define a MCUmgr handler to register.
*
* This adds a new entry to the iterable section linker list of MCUmgr handers.
*
* @param name Name of the MCUmgr handler to registger.
* @param _init Init function to be called (mcumgr_handler_init_t).
*/
#define MCUMGR_HANDLER_DEFINE(name, _init) \
STRUCT_SECTION_ITERABLE(mcumgr_handler, name) = { \
.init = _init, \
}
#ifdef __cplusplus
}
#endif /* __cplusplus */
/**
* @}
*/
#endif /* H_MCUMGR_MGMT_HANDLERS_ */
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/mgmt/handlers.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 299 |
```objective-c
/*
*
*/
#ifndef H_MGMT_MGMT_
#define H_MGMT_MGMT_
#include <inttypes.h>
#include <zephyr/sys/slist.h>
#include <zephyr/mgmt/mcumgr/smp/smp.h>
#include <zephyr/mgmt/mcumgr/mgmt/mgmt_defines.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief MCUmgr mgmt API
* @defgroup mcumgr_mgmt_api MCUmgr mgmt API
* @since 1.11
* @version 1.0.0
* @ingroup mcumgr
* @{
*/
/** @typedef mgmt_alloc_rsp_fn
* @brief Allocates a buffer suitable for holding a response.
*
* If a source buf is provided, its user data is copied into the new buffer.
*
* @param src_buf An optional source buffer to copy user data from.
* @param arg Optional streamer argument.
*
* @return Newly-allocated buffer on success NULL on failure.
*/
typedef void *(*mgmt_alloc_rsp_fn)(const void *src_buf, void *arg);
/** @typedef mgmt_reset_buf_fn
* @brief Resets a buffer to a length of 0.
*
* The buffer's user data remains, but its payload is cleared.
*
* @param buf The buffer to reset.
* @param arg Optional streamer argument.
*/
typedef void (*mgmt_reset_buf_fn)(void *buf, void *arg);
#ifdef CONFIG_MCUMGR_SMP_VERBOSE_ERR_RESPONSE
#define MGMT_CTXT_SET_RC_RSN(mc, rsn) ((mc->rc_rsn) = (rsn))
#define MGMT_CTXT_RC_RSN(mc) ((mc)->rc_rsn)
#else
#define MGMT_CTXT_SET_RC_RSN(mc, rsn)
#define MGMT_CTXT_RC_RSN(mc) NULL
#endif
/** @typedef mgmt_handler_fn
* @brief Processes a request and writes the corresponding response.
*
* A separate handler is required for each supported op-ID pair.
*
* @param ctxt The mcumgr context to use.
*
* @return 0 if a response was successfully encoded, #mcumgr_err_t code on failure.
*/
typedef int (*mgmt_handler_fn)(struct smp_streamer *ctxt);
/**
* @brief Read handler and write handler for a single command ID.
* Set use_custom_payload to true when using a user defined payload type
*/
struct mgmt_handler {
mgmt_handler_fn mh_read;
mgmt_handler_fn mh_write;
#if defined(CONFIG_MCUMGR_MGMT_HANDLER_USER_DATA)
void *user_data;
#endif
};
/**
* @brief A collection of handlers for an entire command group.
*/
struct mgmt_group {
/** Entry list node. */
sys_snode_t node;
/** Array of handlers; one entry per command ID. */
const struct mgmt_handler *mg_handlers;
uint16_t mg_handlers_count;
/** The numeric ID of this group. */
uint16_t mg_group_id;
#if defined(CONFIG_MCUMGR_SMP_SUPPORT_ORIGINAL_PROTOCOL)
/** A function handler for translating version 2 SMP error codes to version 1 SMP error
* codes (optional)
*/
smp_translate_error_fn mg_translate_error;
#endif
#if defined(CONFIG_MCUMGR_MGMT_CUSTOM_PAYLOAD)
/** Should be true when using user defined payload */
bool custom_payload;
#endif
};
/**
* @brief Registers a full command group.
*
* @param group The group to register.
*/
void mgmt_register_group(struct mgmt_group *group);
/**
* @brief Unregisters a full command group.
*
* @param group The group to register.
*/
void mgmt_unregister_group(struct mgmt_group *group);
/**
* @brief Finds a registered command handler.
*
* @param group_id The group of the command to find.
* @param command_id The ID of the command to find.
*
* @return The requested command handler on success;
* NULL on failure.
*/
const struct mgmt_handler *mgmt_find_handler(uint16_t group_id, uint16_t command_id);
/**
* @brief Finds a registered command group.
*
* @param group_id The group id of the command group to find.
*
* @return The requested group on success;
* NULL on failure.
*/
const struct mgmt_group *mgmt_find_group(uint16_t group_id);
/**
* @brief Finds a registered command handler.
*
* @param group The group of the command to find.
* @param command_id The ID of the command to find.
*
* @return The requested command handler on success;
* NULL on failure.
*/
const struct mgmt_handler *mgmt_get_handler(const struct mgmt_group *group, uint16_t command_id);
#if defined(CONFIG_MCUMGR_SMP_SUPPORT_ORIGINAL_PROTOCOL)
/**
* @brief Finds a registered error translation function for converting from SMP
* version 2 error codes to legacy SMP version 1 error codes.
*
* @param group_id The group of the translation function to find.
*
* @return Requested lookup function on success.
* @return NULL on failure.
*/
smp_translate_error_fn mgmt_find_error_translation_function(uint16_t group_id);
#endif
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* MGMT_MGMT_H_ */
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/mgmt/mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,112 |
```objective-c
/*
*
*/
/** @file
* @brief Shell transport for the mcumgr SMP protocol.
*/
#ifndef ZEPHYR_INCLUDE_MGMT_SMP_SHELL_H_
#define ZEPHYR_INCLUDE_MGMT_SMP_SHELL_H_
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#define SMP_SHELL_RX_BUF_SIZE 127
/** @brief Data used by SMP shell */
struct smp_shell_data {
struct net_buf_pool *buf_pool;
struct k_fifo buf_ready;
struct net_buf *buf;
atomic_t esc_state;
};
/**
* @brief Attempt to process received bytes as part of an SMP frame.
*
* Called to scan buffer from the beginning and consume all bytes that are
* part of SMP frame until frame or buffer ends.
*
* @param data SMP shell transfer data.
* @param bytes Buffer with bytes to process
* @param size Number of bytes to process
*
* @return number of bytes consumed by the SMP
*/
size_t smp_shell_rx_bytes(struct smp_shell_data *data, const uint8_t *bytes,
size_t size);
/**
* @brief Processes SMP data and executes command if full frame was received.
*
* This function should be called from thread context.
*
* @param data SMP shell transfer data.
*/
void smp_shell_process(struct smp_shell_data *data);
/**
* @brief Initializes SMP transport over shell.
*
* This function should be called before feeding SMP transport with received
* data.
*
* @return 0 on success
*/
int smp_shell_init(void);
#ifdef __cplusplus
}
#endif
#endif
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/transport/smp_shell.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 337 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_MGMT_SERIAL_H_
#define ZEPHYR_INCLUDE_MGMT_SERIAL_H_
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#define MCUMGR_SERIAL_HDR_PKT 0x0609
#define MCUMGR_SERIAL_HDR_FRAG 0x0414
#define MCUMGR_SERIAL_MAX_FRAME 127
#define MCUMGR_SERIAL_HDR_PKT_1 (MCUMGR_SERIAL_HDR_PKT >> 8)
#define MCUMGR_SERIAL_HDR_PKT_2 (MCUMGR_SERIAL_HDR_PKT & 0xff)
#define MCUMGR_SERIAL_HDR_FRAG_1 (MCUMGR_SERIAL_HDR_FRAG >> 8)
#define MCUMGR_SERIAL_HDR_FRAG_2 (MCUMGR_SERIAL_HDR_FRAG & 0xff)
/**
* @brief Maintains state for an incoming mcumgr request packet.
*/
struct mcumgr_serial_rx_ctxt {
/* Contains the partially- or fully-received mcumgr request. Data
* stored in this buffer has already been base64-decoded.
*/
struct net_buf *nb;
/* Length of full packet, as read from header. */
uint16_t pkt_len;
};
/** @typedef mcumgr_serial_tx_cb
* @brief Transmits a chunk of raw response data.
*
* @param data The data to transmit.
* @param len The number of bytes to transmit.
*
* @return 0 on success; negative error code on failure.
*/
typedef int (*mcumgr_serial_tx_cb)(const void *data, int len);
/**
* @brief Processes an mcumgr request fragment received over a serial
* transport.
*
* Processes an mcumgr request fragment received over a serial transport. If
* the fragment is the end of a valid mcumgr request, this function returns a
* net_buf containing the decoded request. It is the caller's responsibility
* to free the net_buf after it has been processed.
*
* @param rx_ctxt The receive context associated with the serial
* transport being used.
* @param frag The incoming fragment to process.
* @param frag_len The length of the fragment, in bytes.
*
* @return A net_buf containing the decoded request if a
* complete and valid request has been
* received.
* NULL if the packet is incomplete or invalid.
*/
struct net_buf *mcumgr_serial_process_frag(
struct mcumgr_serial_rx_ctxt *rx_ctxt,
const uint8_t *frag, int frag_len);
/**
* @brief Encodes and transmits an mcumgr packet over serial.
*
* @param data The mcumgr packet data to send.
* @param len The length of the unencoded mcumgr packet.
* @param cb A callback used to transmit raw bytes.
*
* @return 0 on success; negative error code on failure.
*/
int mcumgr_serial_tx_pkt(const uint8_t *data, int len, mcumgr_serial_tx_cb cb);
#ifdef __cplusplus
}
#endif
#endif
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/transport/serial.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 657 |
```objective-c
/*
*
*/
/**
* @file
* @brief UDP transport for the MCUmgr SMP protocol.
*/
#ifndef ZEPHYR_INCLUDE_MGMT_SMP_UDP_H_
#define ZEPHYR_INCLUDE_MGMT_SMP_UDP_H_
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Enables the UDP SMP MCUmgr transport thread(s) which will open a socket and
* listen to requests.
*
* @note API is not thread safe.
*
* @return 0 on success
* @return -errno code on failure.
*/
int smp_udp_open(void);
/**
* @brief Disables the UDP SMP MCUmgr transport thread(s) which will close open sockets.
*
* @note API is not thread safe.
*
* @return 0 on success
* @return -errno code on failure.
*/
int smp_udp_close(void);
#ifdef __cplusplus
}
#endif
#endif
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/transport/smp_udp.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 196 |
```objective-c
/*
*
*/
#ifndef H_MCUMGR_CALLBACKS_
#define H_MCUMGR_CALLBACKS_
#include <inttypes.h>
#include <zephyr/sys/slist.h>
#include <zephyr/mgmt/mcumgr/mgmt/mgmt.h>
#ifdef CONFIG_MCUMGR_GRP_FS
#include <zephyr/mgmt/mcumgr/grp/fs_mgmt/fs_mgmt_callbacks.h>
#endif
#ifdef CONFIG_MCUMGR_GRP_IMG
#include <zephyr/mgmt/mcumgr/grp/img_mgmt/img_mgmt_callbacks.h>
#endif
#ifdef CONFIG_MCUMGR_GRP_OS
#include <zephyr/mgmt/mcumgr/grp/os_mgmt/os_mgmt_callbacks.h>
#endif
#ifdef CONFIG_MCUMGR_GRP_SETTINGS
#include <zephyr/mgmt/mcumgr/grp/settings_mgmt/settings_mgmt_callbacks.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief MCUmgr callback API
* @defgroup mcumgr_callback_api MCUmgr callback API
* @ingroup mcumgr
* @{
*/
/** @cond INTERNAL_HIDDEN */
/** Event which signifies that all event IDs for a particular group should be enabled. */
#define MGMT_EVT_OP_ID_ALL 0xffff
/** Get event for a particular group and event ID. */
#define MGMT_DEF_EVT_OP_ID(group, event_id) ((group << 16) | BIT(event_id))
/** Get event used for enabling all event IDs of a particular group. */
#define MGMT_DEF_EVT_OP_ALL(group) ((group << 16) | MGMT_EVT_OP_ID_ALL)
/** @endcond */
/** Get group from event. */
#define MGMT_EVT_GET_GROUP(event) ((event >> 16) & MGMT_EVT_OP_ID_ALL)
/** Get event ID from event. */
#define MGMT_EVT_GET_ID(event) (event & MGMT_EVT_OP_ID_ALL)
/**
* MGMT event callback return value.
*/
enum mgmt_cb_return {
/** No error. */
MGMT_CB_OK,
/** SMP protocol error and ``err_rc`` contains the #mcumgr_err_t error code. */
MGMT_CB_ERROR_RC,
/**
* Group (application-level) error and ``err_group`` contains the group ID that caused
* the error and ``err_rc`` contains the error code of that group to return.
*/
MGMT_CB_ERROR_ERR,
};
/* Deprecated after Zephyr 3.4, use MGMT_CB_ERROR_ERR instead */
#define MGMT_CB_ERROR_RET __DEPRECATED_MACRO MGMT_CB_ERROR_ERR
/**
* @typedef mgmt_cb
* @brief Function to be called on MGMT notification/event.
*
* This callback function is used to notify an application or system about a MCUmgr mgmt event.
*
* @param event #mcumgr_op_t.
* @param prev_status #mgmt_cb_return of the previous handler calls, if it is an error then it
* will be the first error that was returned by a handler (i.e. this handler
* is being called for a notification only, the return code will be ignored).
* @param rc If ``prev_status`` is #MGMT_CB_ERROR_RC then this is the SMP error that
* was returned by the first handler that failed. If ``prev_status`` is
* #MGMT_CB_ERROR_ERR then this will be the group error rc code returned by
* the first handler that failed. If the handler wishes to raise an SMP
* error, this must be set to the #mcumgr_err_t status and #MGMT_CB_ERROR_RC
* must be returned by the function, if the handler wishes to raise a ret
* error, this must be set to the group ret status and #MGMT_CB_ERROR_ERR
* must be returned by the function.
* @param group If ``prev_status`` is #MGMT_CB_ERROR_ERR then this is the group of the
* ret error that was returned by the first handler that failed. If the
* handler wishes to raise a ret error, this must be set to the group ret
* status and #MGMT_CB_ERROR_ERR must be returned by the function.
* @param abort_more Set to true to abort further processing by additional handlers.
* @param data Optional event argument.
* @param data_size Size of optional event argument (0 if no data is provided).
*
* @return #mgmt_cb_return indicating the status to return to the calling code (only
* checked when this is the first failure reported by a handler).
*/
typedef enum mgmt_cb_return (*mgmt_cb)(uint32_t event, enum mgmt_cb_return prev_status,
int32_t *rc, uint16_t *group, bool *abort_more, void *data,
size_t data_size);
/**
* MGMT event callback group IDs. Note that this is not a 1:1 mapping with #mcumgr_group_t values.
*/
enum mgmt_cb_groups {
MGMT_EVT_GRP_ALL = 0,
MGMT_EVT_GRP_SMP,
MGMT_EVT_GRP_OS,
MGMT_EVT_GRP_IMG,
MGMT_EVT_GRP_FS,
MGMT_EVT_GRP_SETTINGS,
MGMT_EVT_GRP_USER_CUSTOM_START = MGMT_GROUP_ID_PERUSER,
};
/**
* MGMT event opcodes for all command processing.
*/
enum smp_all_events {
/** Used to enable all events. */
MGMT_EVT_OP_ALL = MGMT_DEF_EVT_OP_ALL(MGMT_EVT_GRP_ALL),
};
/**
* MGMT event opcodes for base SMP command processing.
*/
enum smp_group_events {
/** Callback when a command is received, data is mgmt_evt_op_cmd_arg(). */
MGMT_EVT_OP_CMD_RECV = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_SMP, 0),
/** Callback when a status is updated, data is mgmt_evt_op_cmd_arg(). */
MGMT_EVT_OP_CMD_STATUS = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_SMP, 1),
/** Callback when a command has been processed, data is mgmt_evt_op_cmd_arg(). */
MGMT_EVT_OP_CMD_DONE = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_SMP, 2),
/** Used to enable all smp_group events. */
MGMT_EVT_OP_CMD_ALL = MGMT_DEF_EVT_OP_ALL(MGMT_EVT_GRP_SMP),
};
/**
* MGMT event opcodes for filesystem management group.
*/
enum fs_mgmt_group_events {
/** Callback when a file has been accessed, data is fs_mgmt_file_access(). */
MGMT_EVT_OP_FS_MGMT_FILE_ACCESS = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_FS, 0),
/** Used to enable all fs_mgmt_group events. */
MGMT_EVT_OP_FS_MGMT_ALL = MGMT_DEF_EVT_OP_ALL(MGMT_EVT_GRP_FS),
};
/**
* MGMT event opcodes for image management group.
*/
enum img_mgmt_group_events {
/** Callback when a client sends a file upload chunk, data is img_mgmt_upload_check(). */
MGMT_EVT_OP_IMG_MGMT_DFU_CHUNK = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_IMG, 0),
/** Callback when a DFU operation is stopped. */
MGMT_EVT_OP_IMG_MGMT_DFU_STOPPED = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_IMG, 1),
/** Callback when a DFU operation is started. */
MGMT_EVT_OP_IMG_MGMT_DFU_STARTED = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_IMG, 2),
/** Callback when a DFU operation has finished being transferred. */
MGMT_EVT_OP_IMG_MGMT_DFU_PENDING = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_IMG, 3),
/** Callback when an image has been confirmed. */
MGMT_EVT_OP_IMG_MGMT_DFU_CONFIRMED = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_IMG, 4),
/** Callback when an image write command has finished writing to flash. */
MGMT_EVT_OP_IMG_MGMT_DFU_CHUNK_WRITE_COMPLETE = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_IMG, 5),
/** Used to enable all img_mgmt_group events. */
MGMT_EVT_OP_IMG_MGMT_ALL = MGMT_DEF_EVT_OP_ALL(MGMT_EVT_GRP_IMG),
};
/**
* MGMT event opcodes for operating system management group.
*/
enum os_mgmt_group_events {
/** Callback when a reset command has been received, data is os_mgmt_reset_data. */
MGMT_EVT_OP_OS_MGMT_RESET = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_OS, 0),
/** Callback when an info command is processed, data is os_mgmt_info_check. */
MGMT_EVT_OP_OS_MGMT_INFO_CHECK = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_OS, 1),
/** Callback when an info command needs to output data, data is os_mgmt_info_append. */
MGMT_EVT_OP_OS_MGMT_INFO_APPEND = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_OS, 2),
/** Callback when a datetime get command has been received. */
MGMT_EVT_OP_OS_MGMT_DATETIME_GET = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_OS, 3),
/** Callback when a datetime set command has been received, data is struct rtc_time(). */
MGMT_EVT_OP_OS_MGMT_DATETIME_SET = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_OS, 4),
/** Used to enable all os_mgmt_group events. */
MGMT_EVT_OP_OS_MGMT_ALL = MGMT_DEF_EVT_OP_ALL(MGMT_EVT_GRP_OS),
};
/**
* MGMT event opcodes for settings management group.
*/
enum settings_mgmt_group_events {
/** Callback when a setting is read/written/deleted. */
MGMT_EVT_OP_SETTINGS_MGMT_ACCESS = MGMT_DEF_EVT_OP_ID(MGMT_EVT_GRP_SETTINGS, 0),
/** Used to enable all settings_mgmt_group events. */
MGMT_EVT_OP_SETTINGS_MGMT_ALL = MGMT_DEF_EVT_OP_ALL(MGMT_EVT_GRP_SETTINGS),
};
/**
* MGMT callback struct
*/
struct mgmt_callback {
/** Entry list node. */
sys_snode_t node;
/** Callback that will be called. */
mgmt_cb callback;
/**
* MGMT_EVT_[...] Event ID for handler to be called on. This has special meaning if
* #MGMT_EVT_OP_ALL is used (which will cover all events for all groups), or
* MGMT_EVT_OP_*_MGMT_ALL (which will cover all events for a single group). For events
* that are part of a single group, they can be or'd together for this to have one
* registration trigger on multiple events, please note that this will only work for
* a single group, to register for events in different groups, they must be registered
* separately.
*/
uint32_t event_id;
};
/**
* Arguments for #MGMT_EVT_OP_CMD_RECV, #MGMT_EVT_OP_CMD_STATUS and #MGMT_EVT_OP_CMD_DONE
*/
struct mgmt_evt_op_cmd_arg {
/** #mcumgr_group_t */
uint16_t group;
/** Message ID within group */
uint8_t id;
union {
/** #mcumgr_op_t used in #MGMT_EVT_OP_CMD_RECV */
uint8_t op;
/** #mcumgr_err_t, used in #MGMT_EVT_OP_CMD_DONE */
int err;
/** #img_mgmt_id_upload_t, used in #MGMT_EVT_OP_CMD_STATUS */
int status;
};
};
/**
* @brief Get event ID index from event.
*
* @param event Event to get ID index from.
*
* @return Event index.
*/
uint8_t mgmt_evt_get_index(uint32_t event);
/**
* @brief This function is called to notify registered callbacks about mcumgr notifications/events.
*
* @param event #mcumgr_op_t.
* @param data Optional event argument.
* @param data_size Size of optional event argument (0 if none).
* @param err_rc Pointer to rc value.
* @param err_group Pointer to group value.
*
* @return #mgmt_cb_return either #MGMT_CB_OK if all handlers returned it, or
* #MGMT_CB_ERROR_RC if the first failed handler returned an SMP error (in
* which case ``err_rc`` will be updated with the SMP error) or
* #MGMT_CB_ERROR_ERR if the first failed handler returned a ret group and
* error (in which case ``err_group`` will be updated with the failed group
* ID and ``err_rc`` will be updated with the group-specific error code).
*/
enum mgmt_cb_return mgmt_callback_notify(uint32_t event, void *data, size_t data_size,
int32_t *err_rc, uint16_t *err_group);
/**
* @brief Register event callback function.
*
* @param callback Callback struct.
*/
void mgmt_callback_register(struct mgmt_callback *callback);
/**
* @brief Unregister event callback function.
*
* @param callback Callback struct.
*/
void mgmt_callback_unregister(struct mgmt_callback *callback);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* H_MCUMGR_CALLBACKS_ */
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/mgmt/callbacks.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,815 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_MGMT_SMP_H_
#define ZEPHYR_INCLUDE_MGMT_SMP_H_
#include <zephyr/kernel.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief MCUmgr transport SMP API
* @defgroup mcumgr_transport_smp MCUmgr transport SMP API
* @ingroup mcumgr
* @{
*/
struct smp_transport;
struct zephyr_smp_transport;
struct net_buf;
/** @typedef smp_transport_out_fn
* @brief SMP transmit callback for transport
*
* The supplied net_buf is always consumed, regardless of return code.
*
* @param nb The net_buf to transmit.
*
* @return 0 on success, #mcumgr_err_t code on failure.
*/
typedef int (*smp_transport_out_fn)(struct net_buf *nb);
/** @typedef smp_transport_get_mtu_fn
* @brief SMP MTU query callback for transport
*
* The supplied net_buf should contain a request received from the peer whose
* MTU is being queried. This function takes a net_buf parameter because some
* transports store connection-specific information in the net_buf user header
* (e.g., the BLE transport stores the peer address).
*
* @param nb Contains a request from the relevant peer.
*
* @return The transport's MTU;
* 0 if transmission is currently not possible.
*/
typedef uint16_t (*smp_transport_get_mtu_fn)(const struct net_buf *nb);
/** @typedef smp_transport_ud_copy_fn
* @brief SMP copy user_data callback
*
* The supplied src net_buf should contain a user_data that cannot be copied
* using regular memcpy function (e.g., the BLE transport net_buf user_data
* stores the connection reference that has to be incremented when is going
* to be used by another buffer).
*
* @param dst Source buffer user_data pointer.
* @param src Destination buffer user_data pointer.
*
* @return 0 on success, #mcumgr_err_t code on failure.
*/
typedef int (*smp_transport_ud_copy_fn)(struct net_buf *dst,
const struct net_buf *src);
/** @typedef smp_transport_ud_free_fn
* @brief SMP free user_data callback
*
* This function frees net_buf user data, because some transports store
* connection-specific information in the net_buf user data (e.g., the BLE
* transport stores the connection reference that has to be decreased).
*
* @param ud Contains a user_data pointer to be freed.
*/
typedef void (*smp_transport_ud_free_fn)(void *ud);
/** @typedef smp_transport_query_valid_check_fn
* @brief Function for checking if queued data is still valid.
*
* This function is used to check if queued SMP data is still valid e.g. on a remote device
* disconnecting, this is triggered when smp_rx_remove_invalid() is called.
*
* @param nb net buf containing queued request.
* @param arg Argument provided when calling smp_rx_remove_invalid() function.
*
* @return false if data is no longer valid/should be freed, true otherwise.
*/
typedef bool (*smp_transport_query_valid_check_fn)(struct net_buf *nb, void *arg);
/**
* @brief Function pointers of SMP transport functions, if a handler is NULL then it is not
* supported/implemented.
*/
struct smp_transport_api_t {
/** Transport's send function. */
smp_transport_out_fn output;
/** Transport's get-MTU function. */
smp_transport_get_mtu_fn get_mtu;
/** Transport buffer user_data copy function. */
smp_transport_ud_copy_fn ud_copy;
/** Transport buffer user_data free function. */
smp_transport_ud_free_fn ud_free;
/** Transport's check function for if a query is valid. */
smp_transport_query_valid_check_fn query_valid_check;
};
/**
* @brief SMP transport object for sending SMP responses.
*/
struct smp_transport {
/* Must be the first member. */
struct k_work work;
/* FIFO containing incoming requests to be processed. */
struct k_fifo fifo;
/* Function pointers */
struct smp_transport_api_t functions;
#ifdef CONFIG_MCUMGR_TRANSPORT_REASSEMBLY
/* Packet reassembly internal data, API access only */
struct {
struct net_buf *current; /* net_buf used for reassembly */
uint16_t expected; /* expected bytes to come */
} __reassembly;
#endif
};
/**
* @brief SMP transport type for client registration
*/
enum smp_transport_type {
/** SMP serial */
SMP_SERIAL_TRANSPORT = 0,
/** SMP bluetooth */
SMP_BLUETOOTH_TRANSPORT,
/** SMP shell*/
SMP_SHELL_TRANSPORT,
/** SMP UDP IPv4 */
SMP_UDP_IPV4_TRANSPORT,
/** SMP UDP IPv6 */
SMP_UDP_IPV6_TRANSPORT,
/** SMP user defined type */
SMP_USER_DEFINED_TRANSPORT
};
/**
* @brief SMP Client transport structure
*/
struct smp_client_transport_entry {
sys_snode_t node;
/** Transport structure pointer */
struct smp_transport *smpt;
/** Transport type */
int smpt_type;
};
/**
* @brief Initializes a Zephyr SMP transport object.
*
* @param smpt The transport to construct.
*
* @return 0 If successful
* @return Negative errno code if failure.
*/
int smp_transport_init(struct smp_transport *smpt);
/**
* @brief Used to remove queued requests for an SMP transport that are no longer valid. A
* smp_transport_query_valid_check_fn() function must be registered for this to
* function. If the smp_transport_query_valid_check_fn() function returns false
* during a callback, the queried command will classed as invalid and dropped.
*
* @param zst The transport to use.
* @param arg Argument provided to callback smp_transport_query_valid_check_fn() function.
*/
void smp_rx_remove_invalid(struct smp_transport *zst, void *arg);
/**
* @brief Used to clear pending queued requests for an SMP transport.
*
* @param zst The transport to use.
*/
void smp_rx_clear(struct smp_transport *zst);
/**
* @brief Register a Zephyr SMP transport object for client.
*
* @param entry The transport to construct.
*/
void smp_client_transport_register(struct smp_client_transport_entry *entry);
/**
* @brief Discover a registered SMP transport client object.
*
* @param smpt_type Type of transport
*
* @return Pointer to registered object. Unknown type return NULL.
*/
struct smp_transport *smp_client_transport_get(int smpt_type);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/transport/smp.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,435 |
```objective-c
/*
*
*/
/** @file
* @brief Bluetooth transport for the mcumgr SMP protocol.
*/
#ifndef ZEPHYR_INCLUDE_MGMT_SMP_BT_H_
#define ZEPHYR_INCLUDE_MGMT_SMP_BT_H_
#include <zephyr/types.h>
struct bt_conn;
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Registers the SMP Bluetooth service. Should only be called if the Bluetooth
* transport has been unregistered by calling smp_bt_unregister().
*
* @return 0 on success; negative error code on failure.
*/
int smp_bt_register(void);
/**
* @brief Unregisters the SMP Bluetooth service.
*
* @return 0 on success; negative error code on failure.
*/
int smp_bt_unregister(void);
/**
* @brief Transmits an SMP command/response over the specified Bluetooth connection as a
* notification.
*
* @param conn Connection object.
* @param data Pointer to SMP message.
* @param len data length.
*
* @return 0 in case of success or negative value in case of error.
*/
int smp_bt_notify(struct bt_conn *conn, const void *data, uint16_t len);
#ifdef __cplusplus
}
#endif
#endif
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/transport/smp_bt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 261 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_MGMT_EC_HOST_CMD_SIMULATOR_H_
#define ZEPHYR_INCLUDE_MGMT_EC_HOST_CMD_SIMULATOR_H_
/**
* @file
* @brief Header for commands to interact with the simulator outside of normal
* device interface.
*/
/* For ec_host_cmd_backend_api_send function pointer type */
#include <zephyr/mgmt/ec_host_cmd/backend.h>
/**
* @brief Install callback for when this device would sends data to host
*
* When this host command simulator device should send data to the host, it
* will call the callback parameter provided by this function. Note that
* only one callback may be installed at a time. Calling this a second time
* will override the first callback installation.
*
* @param cb Callback that is called when device would send data to host.
* @param tx_buf Pointer of a pointer to the tx buf structure where data will
* be sent.
*/
void ec_host_cmd_backend_sim_install_send_cb(ec_host_cmd_backend_api_send cb,
struct ec_host_cmd_tx_buf **tx_buf);
/**
* @brief Simulate receiving data from host as passed in to this function
*
* Calling this function simulates that data was sent from the host to the DUT.
*
* @param buffer The buffer that contains the data to receive.
* @param len The number of bytes that are received from the above buffer.
*
* @retval 0 if successful
* @retval -ENOMEM if len is greater than the RX buffer size.
* @retval -EBUSY if the host command framework is busy with another request.
*/
int ec_host_cmd_backend_sim_data_received(const uint8_t *buffer, size_t len);
#endif /* ZEPHYR_INCLUDE_MGMT_EC_HOST_CMD_SIMULATOR_H_ */
``` | /content/code_sandbox/include/zephyr/mgmt/ec_host_cmd/simulator.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 371 |
```objective-c
/*
*
*/
/** @file
* @brief Dummy transport for the mcumgr SMP protocol for unit testing.
*/
#ifndef ZEPHYR_INCLUDE_MGMT_MCUMGR_TRANSPORT_DUMMY_H_
#define ZEPHYR_INCLUDE_MGMT_MCUMGR_TRANSPORT_DUMMY_H_
#include <zephyr/kernel.h>
#include <zephyr/net/buf.h>
#include <zephyr/mgmt/mcumgr/mgmt/mgmt.h>
#include <zephyr/mgmt/mcumgr/transport/serial.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Clears internal dummy SMP state and resets semaphore
*/
void smp_dummy_clear_state(void);
/**
* @brief Adds SMPC data to the internal buffer to be processed
*
* @param data Input data buffer
* @param data_size Size of data (in bytes)
*/
void dummy_mcumgr_add_data(uint8_t *data, uint16_t data_size);
/**
* @brief Processes a single line (fragment) coming from the mcumgr response to
* be used in tests
*
* @retval net buffer of processed data
*/
struct net_buf *smp_dummy_get_outgoing(void);
/**
* @brief Waits for a period of time for outgoing SMPC data to be ready and
* returns either when a full message is ready or when the timeout has
* elapsed.
*
* @param wait_time_s Time to wait for data (in seconds)
*
* @retval true on message received successfully, false on timeout
*/
bool smp_dummy_wait_for_data(uint32_t wait_time_s);
/**
* @brief Calls dummy_mcumgr_add_data with the internal SMPC receive buffer.
*/
void smp_dummy_add_data(void);
/**
* @brief Gets current send buffer position
*
* @retval Current send buffer position (in bytes)
*/
uint16_t smp_dummy_get_send_pos(void);
/**
* @brief Gets current receive buffer position
*
* @retval Current receive buffer position (in bytes)
*/
uint16_t smp_dummy_get_receive_pos(void);
/**
* @brief Converts input data to go out through the internal SMPC buffer.
*
* @param data Input data buffer
* @param len Size of data (in bytes)
*
* @retval 0 on success, negative on error.
*/
int smp_dummy_tx_pkt(const uint8_t *data, int len);
/**
* @brief Enabled the dummy SMP module (will process sent/received data)
*/
void smp_dummy_enable(void);
/**
* @brief Disables the dummy SMP module (will not process sent/received data)
*/
void smp_dummy_disable(void);
/**
* @brief Returns status on if the dummy SMP system is active
*
* @retval true if dummy SMP is enabled, false otherwise
*/
bool smp_dummy_get_status(void);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_MGMT_MCUMGR_TRANSPORT_DUMMY_H_ */
``` | /content/code_sandbox/include/zephyr/mgmt/mcumgr/transport/smp_dummy.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 614 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public APIs for Host Command backends that respond to host commands
*/
#ifndef ZEPHYR_INCLUDE_MGMT_EC_HOST_CMD_BACKEND_H_
#define ZEPHYR_INCLUDE_MGMT_EC_HOST_CMD_BACKEND_H_
#include <zephyr/sys/__assert.h>
#include <zephyr/device.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/kernel.h>
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct ec_host_cmd_backend {
/** API provided by the backed. */
const struct ec_host_cmd_backend_api *api;
/** Context for the backed. */
void *ctx;
};
/**
* @brief EC Host Command Interface
* @defgroup ec_host_cmd_interface EC Host Command Interface
* @ingroup io_interfaces
* @{
*/
/**
* @brief Context for host command backend and handler to pass rx data.
*/
struct ec_host_cmd_rx_ctx {
/**
* Buffer to hold received data. The buffer is provided by the handler if
* CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE > 0. Otherwise, the backend should provide
* the buffer on its own and overwrites @a buf pointer and @a len_max
* in the init function.
*/
uint8_t *buf;
/** Number of bytes written to @a buf by backend. */
size_t len;
/** Maximum number of bytes to receive with one request packet. */
size_t len_max;
};
/**
* @brief Context for host command backend and handler to pass tx data
*/
struct ec_host_cmd_tx_buf {
/**
* Data to write to the host The buffer is provided by the handler if
* CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_SIZE > 0. Otherwise, the backend should provide
* the buffer on its own and overwrites @a buf pointer and @a len_max
* in the init function.
*/
void *buf;
/** Number of bytes to write from @a buf. */
size_t len;
/** Maximum number of bytes to send with one response packet. */
size_t len_max;
};
/**
* @brief Initialize a host command backend
*
* This routine initializes a host command backend. It includes initialization
* a device used to communication and setting up buffers.
* This function is called by the ec_host_cmd_init function.
*
* @param[in] backend Pointer to the backend structure for the driver instance.
* @param[in,out] rx_ctx Pointer to the receive context object. These objects are used to receive
* data from the driver when the host sends data. The buf member can be
* assigned by the backend.
* @param[in,out] tx Pointer to the transmit buffer object. The buf and len_max members can be
* assigned by the backend. These objects are used to send data by the
* backend with the ec_host_cmd_backend_api_send function.
*
* @retval 0 if successful
*/
typedef int (*ec_host_cmd_backend_api_init)(const struct ec_host_cmd_backend *backend,
struct ec_host_cmd_rx_ctx *rx_ctx,
struct ec_host_cmd_tx_buf *tx);
/**
* @brief Sends data to the host
*
* Sends data from tx buf that was passed via ec_host_cmd_backend_api_init
* function.
*
* @param backend Pointer to the backed to send data.
*
* @retval 0 if successful.
*/
typedef int (*ec_host_cmd_backend_api_send)(const struct ec_host_cmd_backend *backend);
struct ec_host_cmd_backend_api {
ec_host_cmd_backend_api_init init;
ec_host_cmd_backend_api_send send;
};
/**
* @brief Get the eSPI Host Command backend pointer
*
* Get the eSPI pointer backend and pass a pointer to eSPI device instance that will be used for
* the Host Command communication.
*
* @param dev Pointer to eSPI device instance.
*
* @retval The eSPI backend pointer.
*/
struct ec_host_cmd_backend *ec_host_cmd_backend_get_espi(const struct device *dev);
/**
* @brief Get the SHI NPCX Host Command backend pointer
*
* @retval the SHI NPCX backend pointer
*/
struct ec_host_cmd_backend *ec_host_cmd_backend_get_shi_npcx(void);
/**
* @brief Get the SHI ITE Host Command backend pointer
*
* @retval the SHI ITE backend pointer
*/
struct ec_host_cmd_backend *ec_host_cmd_backend_get_shi_ite(void);
/**
* @brief Get the UART Host Command backend pointer
*
* Get the UART pointer backend and pass a pointer to UART device instance that will be used for
* the Host Command communication.
*
* @param dev Pointer to UART device instance.
*
* @retval The UART backend pointer.
*/
struct ec_host_cmd_backend *ec_host_cmd_backend_get_uart(const struct device *dev);
/**
* @brief Get the SPI Host Command backend pointer
*
* Get the SPI pointer backend and pass a chip select pin that will be used for the Host Command
* communication.
*
* @param cs Chip select pin..
*
* @retval The SPI backend pointer.
*/
struct ec_host_cmd_backend *ec_host_cmd_backend_get_spi(struct gpio_dt_spec *cs);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_MGMT_EC_HOST_CMD_EC_HOST_CMD_BACKEND_H_ */
``` | /content/code_sandbox/include/zephyr/mgmt/ec_host_cmd/backend.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,120 |
```objective-c
/* exception.h - automatically selects the correct exception.h file to include */
/*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_EXCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_EXCEPTION_H_
#if defined(CONFIG_X86_64)
#include <zephyr/arch/x86/intel64/exception.h>
#elif defined(CONFIG_X86)
#include <zephyr/arch/x86/ia32/exception.h>
#elif defined(CONFIG_ARM64)
#include <zephyr/arch/arm64/exception.h>
#elif defined(CONFIG_ARM)
#include <zephyr/arch/arm/exception.h>
#elif defined(CONFIG_ARC)
#include <zephyr/arch/arc/v2/exception.h>
#elif defined(CONFIG_NIOS2)
#include <zephyr/arch/nios2/exception.h>
#elif defined(CONFIG_RISCV)
#include <zephyr/arch/riscv/exception.h>
#elif defined(CONFIG_XTENSA)
#include <zephyr/arch/xtensa/exception.h>
#elif defined(CONFIG_MIPS)
#include <zephyr/arch/mips/exception.h>
#elif defined(CONFIG_ARCH_POSIX)
#include <zephyr/arch/posix/exception.h>
#elif defined(CONFIG_SPARC)
#include <zephyr/arch/sparc/exception.h>
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_EXCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 267 |
```objective-c
/* cpu.h - automatically selects the correct arch.h file to include */
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_CPU_H_
#define ZEPHYR_INCLUDE_ARCH_CPU_H_
#include <zephyr/arch/arch_interface.h>
#if defined(CONFIG_X86)
#include <zephyr/arch/x86/arch.h>
#elif defined(CONFIG_ARM64)
#include <zephyr/arch/arm64/arch.h>
#elif defined(CONFIG_ARM)
#include <zephyr/arch/arm/arch.h>
#elif defined(CONFIG_ARC)
#include <zephyr/arch/arc/arch.h>
#elif defined(CONFIG_NIOS2)
#include <zephyr/arch/nios2/arch.h>
#elif defined(CONFIG_RISCV)
#include <zephyr/arch/riscv/arch.h>
#elif defined(CONFIG_XTENSA)
#include <zephyr/arch/xtensa/arch.h>
#elif defined(CONFIG_MIPS)
#include <zephyr/arch/mips/arch.h>
#elif defined(CONFIG_ARCH_POSIX)
#include <zephyr/arch/posix/arch.h>
#elif defined(CONFIG_SPARC)
#include <zephyr/arch/sparc/arch.h>
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_CPU_H_ */
``` | /content/code_sandbox/include/zephyr/arch/cpu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 240 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_MGMT_EC_HOST_CMD_EC_HOST_CMD_H_
#define ZEPHYR_INCLUDE_MGMT_EC_HOST_CMD_EC_HOST_CMD_H_
/**
* @brief EC Host Command Interface
* @defgroup ec_host_cmd_interface EC Host Command Interface
* @since 2.4
* @version 0.1.0
* @ingroup io_interfaces
* @{
*/
#include <stdint.h>
#include <zephyr/kernel.h>
#include <zephyr/mgmt/ec_host_cmd/backend.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/iterable_sections.h>
/**
* @brief Host command response codes (16-bit).
*/
enum ec_host_cmd_status {
/** Host command was successful. */
EC_HOST_CMD_SUCCESS = 0,
/** The specified command id is not recognized or supported. */
EC_HOST_CMD_INVALID_COMMAND = 1,
/** Generic Error. */
EC_HOST_CMD_ERROR = 2,
/** One of more of the input request parameters is invalid. */
EC_HOST_CMD_INVALID_PARAM = 3,
/** Host command is not permitted. */
EC_HOST_CMD_ACCESS_DENIED = 4,
/** Response was invalid (e.g. not version 3 of header). */
EC_HOST_CMD_INVALID_RESPONSE = 5,
/** Host command id version unsupported. */
EC_HOST_CMD_INVALID_VERSION = 6,
/** Checksum did not match. */
EC_HOST_CMD_INVALID_CHECKSUM = 7,
/** A host command is currently being processed. */
EC_HOST_CMD_IN_PROGRESS = 8,
/** Requested information is currently unavailable. */
EC_HOST_CMD_UNAVAILABLE = 9,
/** Timeout during processing. */
EC_HOST_CMD_TIMEOUT = 10,
/** Data or table overflow. */
EC_HOST_CMD_OVERFLOW = 11,
/** Header is invalid or unsupported (e.g. not version 3 of header). */
EC_HOST_CMD_INVALID_HEADER = 12,
/** Did not receive all expected request data. */
EC_HOST_CMD_REQUEST_TRUNCATED = 13,
/** Response was too big to send within one response packet. */
EC_HOST_CMD_RESPONSE_TOO_BIG = 14,
/** Error on underlying communication bus. */
EC_HOST_CMD_BUS_ERROR = 15,
/** System busy. Should retry later. */
EC_HOST_CMD_BUSY = 16,
/** Header version invalid. */
EC_HOST_CMD_INVALID_HEADER_VERSION = 17,
/** Header CRC invalid. */
EC_HOST_CMD_INVALID_HEADER_CRC = 18,
/** Data CRC invalid. */
EC_HOST_CMD_INVALID_DATA_CRC = 19,
/** Can't resend response. */
EC_HOST_CMD_DUP_UNAVAILABLE = 20,
EC_HOST_CMD_MAX = UINT16_MAX /* Force enum to be 16 bits. */
} __packed;
enum ec_host_cmd_log_level {
EC_HOST_CMD_DEBUG_OFF, /* No Host Command debug output */
EC_HOST_CMD_DEBUG_NORMAL, /* Normal output mode; skips repeated commands */
EC_HOST_CMD_DEBUG_EVERY, /* Print every command */
EC_HOST_CMD_DEBUG_PARAMS, /* ... and print params for request/response */
EC_HOST_CMD_DEBUG_MODES /* Number of host command debug modes */
};
enum ec_host_cmd_state {
EC_HOST_CMD_STATE_DISABLED = 0,
EC_HOST_CMD_STATE_RECEIVING,
EC_HOST_CMD_STATE_PROCESSING,
EC_HOST_CMD_STATE_SENDING,
};
typedef void (*ec_host_cmd_user_cb_t)(const struct ec_host_cmd_rx_ctx *rx_ctx, void *user_data);
typedef enum ec_host_cmd_status (*ec_host_cmd_in_progress_cb_t)(void *user_data);
struct ec_host_cmd {
struct ec_host_cmd_rx_ctx rx_ctx;
struct ec_host_cmd_tx_buf tx;
struct ec_host_cmd_backend *backend;
/**
* The backend gives rx_ready (by calling the ec_host_cmd_send_receive function),
* when data in rx_ctx are ready. The handler takes rx_ready to read data in rx_ctx.
*/
struct k_sem rx_ready;
/** Status of the rx data checked in the ec_host_cmd_send_received function. */
enum ec_host_cmd_status rx_status;
/**
* User callback after receiving a command. It is called by the ec_host_cmd_send_received
* function.
*/
ec_host_cmd_user_cb_t user_cb;
void *user_data;
enum ec_host_cmd_state state;
#ifdef CONFIG_EC_HOST_CMD_DEDICATED_THREAD
struct k_thread thread;
#endif /* CONFIG_EC_HOST_CMD_DEDICATED_THREAD */
};
/**
* @brief Arguments passed into every installed host command handler
*/
struct ec_host_cmd_handler_args {
/** Reserved for compatibility. */
void *reserved;
/** Command identifier. */
uint16_t command;
/**
* The version of the host command that is being requested. This will
* be a value that has been static registered as valid for the handler.
*/
uint8_t version;
/** The incoming data that can be cast to the handlers request type. */
const void *input_buf;
/** The number of valid bytes that can be read from @a input_buf. */
uint16_t input_buf_size;
/** The data written to this buffer will be send to the host. */
void *output_buf;
/** Maximum number of bytes that can be written to the @a output_buf. */
uint16_t output_buf_max;
/** Number of bytes of @a output_buf to send to the host. */
uint16_t output_buf_size;
};
typedef enum ec_host_cmd_status (*ec_host_cmd_handler_cb)(struct ec_host_cmd_handler_args *args);
/**
* @brief Structure use for statically registering host command handlers
*/
struct ec_host_cmd_handler {
/** Callback routine to process commands that match @a id. */
ec_host_cmd_handler_cb handler;
/** The numerical command id used as the lookup for commands. */
uint16_t id;
/**
* The bitfield of all versions that the @a handler supports, where
* each bit value represents that the @a handler supports that version.
* E.g. BIT(0) corresponds to version 0.
*/
uint16_t version_mask;
/**
* The minimum @a input_buf_size enforced by the framework before
* passing to the handler.
*/
uint16_t min_rqt_size;
/**
* The minimum @a output_buf_size enforced by the framework before
* passing to the handler.
*/
uint16_t min_rsp_size;
};
/**
* @brief Statically define and register a host command handler.
*
* Helper macro to statically define and register a host command handler that
* has a compile-time-fixed sizes for its both request and response structures.
*
* @param _id Id of host command to handle request for.
* @param _function Name of handler function.
* @param _version_mask The bitfield of all versions that the @a _function
* supports. E.g. BIT(0) corresponds to version 0.
* @param _request_type The datatype of the request parameters for @a _function.
* @param _response_type The datatype of the response parameters for
* @a _function.
*/
#define EC_HOST_CMD_HANDLER(_id, _function, _version_mask, _request_type, _response_type) \
const STRUCT_SECTION_ITERABLE(ec_host_cmd_handler, __cmd##_id) = { \
.handler = _function, \
.id = _id, \
.version_mask = _version_mask, \
.min_rqt_size = sizeof(_request_type), \
.min_rsp_size = sizeof(_response_type), \
}
/**
* @brief Statically define and register a host command handler without sizes.
*
* Helper macro to statically define and register a host command handler whose
* request or response structure size is not known as compile time.
*
* @param _id Id of host command to handle request for.
* @param _function Name of handler function.
* @param _version_mask The bitfield of all versions that the @a _function
* supports. E.g. BIT(0) corresponds to version 0.
*/
#define EC_HOST_CMD_HANDLER_UNBOUND(_id, _function, _version_mask) \
const STRUCT_SECTION_ITERABLE(ec_host_cmd_handler, __cmd##_id) = { \
.handler = _function, \
.id = _id, \
.version_mask = _version_mask, \
.min_rqt_size = 0, \
.min_rsp_size = 0, \
}
/**
* @brief Header for requests from host to embedded controller
*
* Represent the over-the-wire header in LE format for host command requests.
* This represent version 3 of the host command header. The requests are always
* sent from host to embedded controller.
*/
struct ec_host_cmd_request_header {
/**
* Should be 3. The EC will return EC_HOST_CMD_INVALID_HEADER if it
* receives a header with a version it doesn't know how to parse.
*/
uint8_t prtcl_ver;
/**
* Checksum of response and data; sum of all bytes including checksum.
* Should total to 0.
*/
uint8_t checksum;
/** Id of command that is being sent. */
uint16_t cmd_id;
/**
* Version of the specific @a cmd_id being requested. Valid
* versions start at 0.
*/
uint8_t cmd_ver;
/** Unused byte in current protocol version; set to 0. */
uint8_t reserved;
/** Length of data which follows this header. */
uint16_t data_len;
} __packed;
/**
* @brief Header for responses from embedded controller to host
*
* Represent the over-the-wire header in LE format for host command responses.
* This represent version 3 of the host command header. Responses are always
* sent from embedded controller to host.
*/
struct ec_host_cmd_response_header {
/** Should be 3. */
uint8_t prtcl_ver;
/**
* Checksum of response and data; sum of all bytes including checksum.
* Should total to 0.
*/
uint8_t checksum;
/** A @a ec_host_cmd_status response code for specific command. */
uint16_t result;
/** Length of data which follows this header. */
uint16_t data_len;
/** Unused bytes in current protocol version; set to 0. */
uint16_t reserved;
} __packed;
/**
* @brief Initialize the host command subsystem
*
* This routine initializes the host command subsystem. It includes initialization
* of a backend and the handler.
* When the application configures the zephyr,host-cmd-espi-backend/zephyr,host-cmd-shi-backend/
* zephyr,host-cmd-uart-backend chosen node and @kconfig{CONFIG_EC_HOST_CMD_INITIALIZE_AT_BOOT} is
* set, the chosen backend automatically calls this routine at
* @kconfig{CONFIG_EC_HOST_CMD_INIT_PRIORITY}. Applications that require a run-time selection of the
* backend must set @kconfig{CONFIG_EC_HOST_CMD_INITIALIZE_AT_BOOT} to n and must explicitly call
* this routine.
*
* @param[in] backend Pointer to the backend structure to initialize.
*
* @retval 0 if successful
*/
int ec_host_cmd_init(struct ec_host_cmd_backend *backend);
/**
* @brief Send the host command response
*
* This routine sends the host command response. It should be used to send IN_PROGRESS status or
* if the host command handler doesn't return e.g. reboot command.
*
* @param[in] status Host command status to be sent.
* @param[in] args Pointer of a structure passed to the handler.
*
* @retval 0 if successful.
*/
int ec_host_cmd_send_response(enum ec_host_cmd_status status,
const struct ec_host_cmd_handler_args *args);
/**
* @brief Signal a new host command
*
* Signal that a new host command has been received. The function should be called by a backend
* after copying data to the rx buffer and setting the length.
*/
void ec_host_cmd_rx_notify(void);
/**
* @brief Install a user callback for receiving a host command
*
* It allows installing a custom procedure needed by a user after receiving a command.
*
* @param[in] cb A callback to be installed.
* @param[in] user_data User data to be passed to the callback.
*/
void ec_host_cmd_set_user_cb(ec_host_cmd_user_cb_t cb, void *user_data);
/**
* @brief Get the main ec host command structure
*
* This routine returns a pointer to the main host command structure.
* It allows the application code to get inside information for any reason e.g.
* the host command thread id.
*
* @retval A pointer to the main host command structure
*/
const struct ec_host_cmd *ec_host_cmd_get_hc(void);
#ifndef CONFIG_EC_HOST_CMD_DEDICATED_THREAD
/**
* @brief The thread function for Host Command subsystem
*
* This routine calls the Host Command thread entry function. If
* @kconfig{CONFIG_EC_HOST_CMD_DEDICATED_THREAD} is not defined, a new thread is not created,
* and this function has to be called by application code. It doesn't return.
*/
FUNC_NORETURN void ec_host_cmd_task(void);
#endif
#ifdef CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS
/**
* @brief Check if a Host Command that sent EC_HOST_CMD_IN_PROGRESS status has ended.
*
* A Host Command that sends EC_HOST_CMD_IN_PROGRESS status doesn't send a final result.
* The final result can be get with the ec_host_cmd_send_in_progress_status function.
*
* @retval true if the Host Command endded
*/
bool ec_host_cmd_send_in_progress_ended(void);
/**
* @brief Get final result of a last Host Command that has sent EC_HOST_CMD_IN_PROGRESS status.
*
* A Host Command that sends EC_HOST_CMD_IN_PROGRESS status doesn't send a final result.
* Get the saved status with this function. The status can be get only once. Further calls return
* EC_HOST_CMD_UNAVAILABLE.
*
* Saving status of Host Commands that send response data is not supported.
*
* @retval The final status or EC_HOST_CMD_UNAVAILABLE if not available.
*/
enum ec_host_cmd_status ec_host_cmd_send_in_progress_status(void);
/**
* @brief Continue processing a handler in callback after returning EC_HOST_CMD_IN_PROGRESS.
*
* A Host Command handler may return the EC_HOST_CMD_IN_PROGRESS, but needs to continue work.
* This function should be called before returning EC_HOST_CMD_IN_PROGRESS with a callback that
* will be executed. The return status of the callback will be stored and can be get with the
* ec_host_cmd_send_in_progress_status function. The ec_host_cmd_send_in_progress_ended function
* can be used to check if the callback has ended.
*
* @param[in] cb A callback to be called after returning from a command handler.
* @param[in] user_data User data to be passed to the callback.
*
* @retval EC_HOST_CMD_BUSY if any command is already in progress, EC_HOST_CMD_SUCCESS otherwise
*/
enum ec_host_cmd_status ec_host_cmd_send_in_progress_continue(ec_host_cmd_in_progress_cb_t cb,
void *user_data);
#endif /* CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS */
/**
* @brief Add a suppressed command.
*
* Suppressed commands are not logged. Add a command to be suppressed.
*
* @param[in] cmd_id A command id to be suppressed.
*
* @retval 0 if successful, -EIO if exceeded max number of suppressed commands.
*/
int ec_host_cmd_add_suppressed(uint16_t cmd_id);
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_MGMT_EC_HOST_CMD_EC_HOST_CMD_H_ */
``` | /content/code_sandbox/include/zephyr/mgmt/ec_host_cmd/ec_host_cmd.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,333 |
```objective-c
/*
*
*/
/**
* @file
* Public APIs for architectural cache controller drivers
*/
#ifndef ZEPHYR_INCLUDE_ARCH_CACHE_H_
#define ZEPHYR_INCLUDE_ARCH_CACHE_H_
/**
* @brief Cache Controller Interface
* @defgroup cache_arch_interface Cache Controller Interface
* @ingroup io_interfaces
* @{
*/
#if defined(CONFIG_ARM64)
#include <zephyr/arch/arm64/cache.h>
#elif defined(CONFIG_XTENSA)
#include <zephyr/arch/xtensa/cache.h>
#endif
#if defined(CONFIG_DCACHE) || defined(__DOXYGEN__)
/**
* @brief Enable the d-cache
*
* Enable the data cache.
*/
void arch_dcache_enable(void);
#define cache_data_enable arch_dcache_enable
/**
* @brief Disable the d-cache
*
* Disable the data cache.
*/
void arch_dcache_disable(void);
#define cache_data_disable arch_dcache_disable
/**
* @brief Flush the d-cache
*
* Flush the whole data cache.
*
* @retval 0 If succeeded.
* @retval -ENOTSUP If not supported.
* @retval -errno Negative errno for other failures.
*/
int arch_dcache_flush_all(void);
#define cache_data_flush_all arch_dcache_flush_all
/**
* @brief Invalidate the d-cache
*
* Invalidate the whole data cache.
*
* @retval 0 If succeeded.
* @retval -ENOTSUP If not supported.
* @retval -errno Negative errno for other failures.
*/
int arch_dcache_invd_all(void);
#define cache_data_invd_all arch_dcache_invd_all
/**
* @brief Flush and Invalidate the d-cache
*
* Flush and Invalidate the whole data cache.
*
* @retval 0 If succeeded.
* @retval -ENOTSUP If not supported.
* @retval -errno Negative errno for other failures.
*/
int arch_dcache_flush_and_invd_all(void);
#define cache_data_flush_and_invd_all arch_dcache_flush_and_invd_all
/**
* @brief Flush an address range in the d-cache
*
* Flush the specified address range of the data cache.
*
* @note the cache operations act on cache line. When multiple data structures
* share the same cache line being flushed, all the portions of the
* data structures sharing the same line will be flushed. This is usually
* not a problem because writing back is a non-destructive process that
* could be triggered by hardware at any time, so having an aligned
* @p addr or a padded @p size is not strictly necessary.
*
* @param addr Starting address to flush.
* @param size Range size.
*
* @retval 0 If succeeded.
* @retval -ENOTSUP If not supported.
* @retval -errno Negative errno for other failures.
*/
int arch_dcache_flush_range(void *addr, size_t size);
#define cache_data_flush_range(addr, size) arch_dcache_flush_range(addr, size)
/**
* @brief Invalidate an address range in the d-cache
*
* Invalidate the specified address range of the data cache.
*
* @note the cache operations act on cache line. When multiple data structures
* share the same cache line being invalidated, all the portions of the
* non-read-only data structures sharing the same line will be
* invalidated as well. This is a destructive process that could lead to
* data loss and/or corruption. When @p addr is not aligned to the cache
* line and/or @p size is not a multiple of the cache line size the
* behaviour is undefined.
*
* @param addr Starting address to invalidate.
* @param size Range size.
*
* @retval 0 If succeeded.
* @retval -ENOTSUP If not supported.
* @retval -errno Negative errno for other failures.
*/
int arch_dcache_invd_range(void *addr, size_t size);
#define cache_data_invd_range(addr, size) arch_dcache_invd_range(addr, size)
/**
* @brief Flush and Invalidate an address range in the d-cache
*
* Flush and Invalidate the specified address range of the data cache.
*
* @note the cache operations act on cache line. When multiple data structures
* share the same cache line being flushed, all the portions of the
* data structures sharing the same line will be flushed before being
* invalidated. This is usually not a problem because writing back is a
* non-destructive process that could be triggered by hardware at any
* time, so having an aligned @p addr or a padded @p size is not strictly
* necessary.
*
* @param addr Starting address to flush and invalidate.
* @param size Range size.
*
* @retval 0 If succeeded.
* @retval -ENOTSUP If not supported.
* @retval -errno Negative errno for other failures.
*/
int arch_dcache_flush_and_invd_range(void *addr, size_t size);
#define cache_data_flush_and_invd_range(addr, size) \
arch_dcache_flush_and_invd_range(addr, size)
#if defined(CONFIG_DCACHE_LINE_SIZE_DETECT) || defined(__DOXYGEN__)
/**
*
* @brief Get the d-cache line size.
*
* The API is provided to dynamically detect the data cache line size at run
* time.
*
* The function must be implemented only when CONFIG_DCACHE_LINE_SIZE_DETECT is
* defined.
*
* @retval size Size of the d-cache line.
* @retval 0 If the d-cache is not enabled.
*/
size_t arch_dcache_line_size_get(void);
#define cache_data_line_size_get arch_dcache_line_size_get
#endif /* CONFIG_DCACHE_LINE_SIZE_DETECT || __DOXYGEN__ */
#endif /* CONFIG_DCACHE || __DOXYGEN__ */
#if defined(CONFIG_ICACHE) || defined(__DOXYGEN__)
/**
* @brief Enable the i-cache
*
* Enable the instruction cache.
*/
void arch_icache_enable(void);
#define cache_instr_enable arch_icache_enable
/**
* @brief Disable the i-cache
*
* Disable the instruction cache.
*/
void arch_icache_disable(void);
#define cache_instr_disable arch_icache_disable
/**
* @brief Flush the i-cache
*
* Flush the whole instruction cache.
*
* @retval 0 If succeeded.
* @retval -ENOTSUP If not supported.
* @retval -errno Negative errno for other failures.
*/
int arch_icache_flush_all(void);
#define cache_instr_flush_all arch_icache_flush_all
/**
* @brief Invalidate the i-cache
*
* Invalidate the whole instruction cache.
*
* @retval 0 If succeeded.
* @retval -ENOTSUP If not supported.
* @retval -errno Negative errno for other failures.
*/
int arch_icache_invd_all(void);
#define cache_instr_invd_all arch_icache_invd_all
/**
* @brief Flush and Invalidate the i-cache
*
* Flush and Invalidate the whole instruction cache.
*
* @retval 0 If succeeded.
* @retval -ENOTSUP If not supported.
* @retval -errno Negative errno for other failures.
*/
int arch_icache_flush_and_invd_all(void);
#define cache_instr_flush_and_invd_all arch_icache_flush_and_invd_all
/**
* @brief Flush an address range in the i-cache
*
* Flush the specified address range of the instruction cache.
*
* @note the cache operations act on cache line. When multiple data structures
* share the same cache line being flushed, all the portions of the
* data structures sharing the same line will be flushed. This is usually
* not a problem because writing back is a non-destructive process that
* could be triggered by hardware at any time, so having an aligned
* @p addr or a padded @p size is not strictly necessary.
*
* @param addr Starting address to flush.
* @param size Range size.
*
* @retval 0 If succeeded.
* @retval -ENOTSUP If not supported.
* @retval -errno Negative errno for other failures.
*/
int arch_icache_flush_range(void *addr, size_t size);
#define cache_instr_flush_range(addr, size) arch_icache_flush_range(addr, size)
/**
* @brief Invalidate an address range in the i-cache
*
* Invalidate the specified address range of the instruction cache.
*
* @note the cache operations act on cache line. When multiple data structures
* share the same cache line being invalidated, all the portions of the
* non-read-only data structures sharing the same line will be
* invalidated as well. This is a destructive process that could lead to
* data loss and/or corruption. When @p addr is not aligned to the cache
* line and/or @p size is not a multiple of the cache line size the
* behaviour is undefined.
*
* @param addr Starting address to invalidate.
* @param size Range size.
*
* @retval 0 If succeeded.
* @retval -ENOTSUP If not supported.
* @retval -errno Negative errno for other failures.
*/
int arch_icache_invd_range(void *addr, size_t size);
#define cache_instr_invd_range(addr, size) arch_icache_invd_range(addr, size)
/**
* @brief Flush and Invalidate an address range in the i-cache
*
* Flush and Invalidate the specified address range of the instruction cache.
*
* @note the cache operations act on cache line. When multiple data structures
* share the same cache line being flushed, all the portions of the
* data structures sharing the same line will be flushed before being
* invalidated. This is usually not a problem because writing back is a
* non-destructive process that could be triggered by hardware at any
* time, so having an aligned @p addr or a padded @p size is not strictly
* necessary.
*
* @param addr Starting address to flush and invalidate.
* @param size Range size.
*
* @retval 0 If succeeded.
* @retval -ENOTSUP If not supported.
* @retval -errno Negative errno for other failures.
*/
int arch_icache_flush_and_invd_range(void *addr, size_t size);
#define cache_instr_flush_and_invd_range(addr, size) \
arch_icache_flush_and_invd_range(addr, size)
#if defined(CONFIG_ICACHE_LINE_SIZE_DETECT) || defined(__DOXYGEN__)
/**
*
* @brief Get the i-cache line size.
*
* The API is provided to dynamically detect the instruction cache line size at
* run time.
*
* The function must be implemented only when CONFIG_ICACHE_LINE_SIZE_DETECT is
* defined.
*
* @retval size Size of the d-cache line.
* @retval 0 If the d-cache is not enabled.
*/
size_t arch_icache_line_size_get(void);
#define cache_instr_line_size_get arch_icache_line_size_get
#endif /* CONFIG_ICACHE_LINE_SIZE_DETECT || __DOXYGEN__ */
#endif /* CONFIG_ICACHE || __DOXYGEN__ */
#if CONFIG_CACHE_DOUBLEMAP || __DOXYGEN__
bool arch_cache_is_ptr_cached(void *ptr);
#define cache_is_ptr_cached(ptr) arch_cache_is_ptr_cached(ptr)
bool arch_cache_is_ptr_uncached(void *ptr);
#define cache_is_ptr_uncached(ptr) arch_cache_is_ptr_uncached(ptr)
void __sparse_cache *arch_cache_cached_ptr_get(void *ptr);
#define cache_cached_ptr(ptr) arch_cache_cached_ptr_get(ptr)
void *arch_cache_uncached_ptr_get(void __sparse_cache *ptr);
#define cache_uncached_ptr(ptr) arch_cache_uncached_ptr_get(ptr)
#endif /* CONFIG_CACHE_DOUBLEMAP */
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_ARCH_CACHE_H_ */
``` | /content/code_sandbox/include/zephyr/arch/cache.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,425 |
```objective-c
/*
*
*/
/*
* The purpose of this file is to provide essential/minimal architecture-
* specific structure definitions to be included in generic kernel
* structures.
*
* The following rules must be observed:
* 1. arch/structs.h shall not depend on kernel.h both directly and
* indirectly (i.e. it shall not include any header files that include
* kernel.h in their dependency chain).
* 2. kernel.h shall imply arch/structs.h via kernel_structs.h , such that
* it shall not be necessary to include arch/structs.h explicitly when
* kernel.h is included.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_STRUCTS_H_
#define ZEPHYR_INCLUDE_ARCH_STRUCTS_H_
#if !defined(_ASMLANGUAGE)
#if defined(CONFIG_ARM64)
#include <zephyr/arch/arm64/structs.h>
#elif defined(CONFIG_RISCV)
#include <zephyr/arch/riscv/structs.h>
#elif defined(CONFIG_ARM)
#include <zephyr/arch/arm/structs.h>
#else
/* Default definitions when no architecture specific definitions exist. */
/* Per CPU architecture specifics (empty) */
struct _cpu_arch {
#ifdef __cplusplus
/* This struct will have a size 0 in C which is not allowed in C++ (it'll have a size 1). To
* prevent this, we add a 1 byte dummy variable.
*/
uint8_t dummy;
#endif
};
#endif
/* typedefs to be used with GEN_OFFSET_SYM(), etc. */
typedef struct _cpu_arch _cpu_arch_t;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_STRUCTS_H_ */
``` | /content/code_sandbox/include/zephyr/arch/structs.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 354 |
```objective-c
/*
* arch_inlines.h - automatically selects the correct arch_inlines.h file to
* include based on the selected architecture.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_INLINES_H_
#define ZEPHYR_INCLUDE_ARCH_INLINES_H_
#if defined(CONFIG_X86)
#include <zephyr/arch/x86/arch_inlines.h>
#elif defined(CONFIG_ARM)
#include <zephyr/arch/arm/arch_inlines.h>
#elif defined(CONFIG_ARM64)
#include <zephyr/arch/arm64/arch_inlines.h>
#elif defined(CONFIG_ARC)
#include <zephyr/arch/arc/arch_inlines.h>
#elif defined(CONFIG_XTENSA)
#include <zephyr/arch/xtensa/arch_inlines.h>
#elif defined(CONFIG_RISCV)
#include <zephyr/arch/riscv/arch_inlines.h>
#elif defined(CONFIG_NIOS2)
#include <zephyr/arch/nios2/arch_inlines.h>
#elif defined(CONFIG_MIPS)
#include <zephyr/arch/mips/arch_inlines.h>
#elif defined(CONFIG_ARCH_POSIX)
#include <zephyr/arch/posix/arch_inlines.h>
#elif defined(CONFIG_SPARC)
#include <zephyr/arch/sparc/arch_inlines.h>
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_INLINES_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arch_inlines.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 270 |
```objective-c
/* syscall.h - automatically selects the correct syscall.h file to include */
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_SYSCALL_H_
#define ZEPHYR_INCLUDE_ARCH_SYSCALL_H_
#if defined(CONFIG_X86_64)
#include <zephyr/arch/x86/intel64/syscall.h>
#elif defined(CONFIG_X86)
#include <zephyr/arch/x86/ia32/syscall.h>
#elif defined(CONFIG_ARM64)
#include <zephyr/arch/arm64/syscall.h>
#elif defined(CONFIG_ARM)
#include <zephyr/arch/arm/syscall.h>
#elif defined(CONFIG_ARC)
#include <zephyr/arch/arc/syscall.h>
#elif defined(CONFIG_RISCV)
#include <zephyr/arch/riscv/syscall.h>
#elif defined(CONFIG_XTENSA)
#include <zephyr/arch/xtensa/syscall.h>
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_SYSCALL_H_ */
``` | /content/code_sandbox/include/zephyr/arch/syscall.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 192 |
```objective-c
/*
*
* based on include/arch/riscv/exception.h
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_MIPS_EXPCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_MIPS_EXPCEPTION_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <zephyr/toolchain.h>
#ifdef __cplusplus
extern "C" {
#endif
struct arch_esf {
unsigned long ra; /* return address */
unsigned long gp; /* global pointer */
unsigned long t0; /* Caller-saved temporary register */
unsigned long t1; /* Caller-saved temporary register */
unsigned long t2; /* Caller-saved temporary register */
unsigned long t3; /* Caller-saved temporary register */
unsigned long t4; /* Caller-saved temporary register */
unsigned long t5; /* Caller-saved temporary register */
unsigned long t6; /* Caller-saved temporary register */
unsigned long t7; /* Caller-saved temporary register */
unsigned long t8; /* Caller-saved temporary register */
unsigned long t9; /* Caller-saved temporary register */
unsigned long a0; /* function argument */
unsigned long a1; /* function argument */
unsigned long a2; /* function argument */
unsigned long a3; /* function argument */
unsigned long v0; /* return value */
unsigned long v1; /* return value */
unsigned long at; /* assembly temporary */
unsigned long epc;
unsigned long badvaddr;
unsigned long hi;
unsigned long lo;
unsigned long status;
unsigned long cause;
};
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_MIPS_EXPCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/mips/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 375 |
```objective-c
/*
*
* based on include/arch/sparc/arch.h
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_MIPS_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_MIPS_ARCH_H_
#include <zephyr/arch/mips/thread.h>
#include <zephyr/arch/mips/exception.h>
#include <zephyr/arch/common/sys_bitops.h>
#include <zephyr/arch/common/sys_io.h>
#include <zephyr/arch/common/ffs.h>
#include <zephyr/irq.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/devicetree.h>
#include <mips/mipsregs.h>
#define ARCH_STACK_PTR_ALIGN 16
#define OP_LOADREG lw
#define OP_STOREREG sw
#define CP0_STATUS_DEF_RESTORE (ST0_EXL | ST0_IE)
#ifndef _ASMLANGUAGE
#include <zephyr/sys/util.h>
#ifdef __cplusplus
extern "C" {
#endif
#define STACK_ROUND_UP(x) ROUND_UP(x, ARCH_STACK_PTR_ALIGN)
void arch_irq_enable(unsigned int irq);
void arch_irq_disable(unsigned int irq);
int arch_irq_is_enabled(unsigned int irq);
void z_irq_spurious(const void *unused);
/**
* Configure a static interrupt.
*
* All arguments must be computable by the compiler at build time.
*
* @param irq_p IRQ line number
* @param priority_p Interrupt priority
* @param isr_p Interrupt service routine
* @param isr_param_p ISR parameter
* @param flags_p IRQ options
*
* @return The vector assigned to this interrupt
*/
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
{ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
}
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
uint32_t status = read_c0_status();
if (status & ST0_IE) {
write_c0_status(status & ~ST0_IE);
return 1;
}
return 0;
}
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
uint32_t status = read_c0_status();
if (key) {
status |= ST0_IE;
} else {
status &= ~ST0_IE;
}
write_c0_status(status);
}
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
return key != 0;
}
static ALWAYS_INLINE void arch_nop(void)
{
__asm__ volatile ("nop");
}
extern uint32_t sys_clock_cycle_get_32(void);
static inline uint32_t arch_k_cycle_get_32(void)
{
return sys_clock_cycle_get_32();
}
extern uint64_t sys_clock_cycle_get_64(void);
static inline uint64_t arch_k_cycle_get_64(void)
{
return sys_clock_cycle_get_64();
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_MIPS_ARCH_H_ */
``` | /content/code_sandbox/include/zephyr/arch/mips/arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 624 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_MIPS_ARCH_INLINES_H
#define ZEPHYR_INCLUDE_ARCH_MIPS_ARCH_INLINES_H
#include <zephyr/kernel_structs.h>
static ALWAYS_INLINE unsigned int arch_num_cpus(void)
{
return CONFIG_MP_MAX_NUM_CPUS;
}
#endif /* ZEPHYR_INCLUDE_ARCH_MIPS_ARCH_INLINES_H */
``` | /content/code_sandbox/include/zephyr/arch/mips/arch_inlines.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 80 |
```objective-c
/*
*
* based on include/arch/riscv/thread.h
*
*/
/**
* @file
* @brief Per-arch thread definition
*
* This file contains definitions for
*
* struct _thread_arch
* struct _callee_saved
*
* necessary to instantiate instances of struct k_thread.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_MIPS_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_MIPS_THREAD_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
/*
* The following structure defines the list of registers that need to be
* saved/restored when a cooperative context switch occurs.
*/
struct _callee_saved {
unsigned long sp; /* Stack pointer */
unsigned long s0; /* saved register */
unsigned long s1; /* saved register */
unsigned long s2; /* saved register */
unsigned long s3; /* saved register */
unsigned long s4; /* saved register */
unsigned long s5; /* saved register */
unsigned long s6; /* saved register */
unsigned long s7; /* saved register */
unsigned long s8; /* saved register AKA fp */
};
typedef struct _callee_saved _callee_saved_t;
struct _thread_arch {
uint32_t swap_return_value; /* Return value of z_swap() */
};
typedef struct _thread_arch _thread_arch_t;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_MIPS_THREAD_H_ */
``` | /content/code_sandbox/include/zephyr/arch/mips/thread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 300 |
```linker script
/*
*
* based on include/arch/sparc/linker.ld
*
*/
/**
* @file
* @brief Linker command/script file for the MIPS platform
*/
#include <zephyr/linker/sections.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/linker/linker-tool.h>
#define ROMABLE_REGION RAM
#define RAMABLE_REGION RAM
#define _VECTOR_SECTION_NAME vector
#define _EXCEPTION_SECTION_NAME exceptions
#define _RESET_SECTION_NAME reset
MEMORY
{
RAM (rwx) : ORIGIN = CONFIG_SRAM_BASE_ADDRESS, LENGTH = KB(CONFIG_SRAM_SIZE)
/* Used by and documented in include/linker/intlist.ld */
IDT_LIST (wx) : ORIGIN = 0xFFFFF7FF, LENGTH = 2K
}
REGION_ALIAS("REGION_TEXT", RAM);
REGION_ALIAS("REGION_RODATA", RAM);
REGION_ALIAS("REGION_DATA_VMA", RAM);
REGION_ALIAS("REGION_DATA_LMA", RAM);
REGION_ALIAS("REGION_BSS", RAM);
ENTRY(CONFIG_KERNEL_ENTRY)
PROVIDE (__memory_base = CONFIG_SRAM_BASE_ADDRESS);
PROVIDE (__memory_size = CONFIG_SRAM_SIZE * 1024);
PROVIDE (__stack = CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE - 1) * 1024);
SECTIONS
{
#include <zephyr/linker/rel-sections.ld>
#ifdef CONFIG_LLEXT
#include <zephyr/linker/llext-sections.ld>
#endif
SECTION_PROLOGUE(_VECTOR_SECTION_NAME,,)
{
. = ALIGN(0x1000);
KEEP(*(.vectors.*))
} GROUP_LINK_IN(ROMABLE_REGION)
SECTION_PROLOGUE(_RESET_SECTION_NAME,,)
{
. = ALIGN(0x10);
KEEP(*(.reset.*))
} GROUP_LINK_IN(ROMABLE_REGION)
SECTION_PROLOGUE(_EXCEPTION_SECTION_NAME,,)
{
. = ALIGN(0x10);
KEEP(*(".exception.entry.*"))
*(".exception.other.*")
} GROUP_LINK_IN(ROMABLE_REGION)
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
{
. = ALIGN(4);
*(.text)
*(".text.*")
} GROUP_LINK_IN(REGION_TEXT)
__rodata_region_start = .;
#include <zephyr/linker/common-rom.ld>
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_SECTIONS ...). Useful for grouping iterable RO structs.
*/
#include <snippets-rom-sections.ld>
#include <zephyr/linker/thread-local-storage.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{
. = ALIGN(8);
*(.rodata)
*(.rodata.*)
*(.gnu.linkonce.r.*)
*(.rodata1)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rodata.ld>
} GROUP_LINK_IN(REGION_RODATA)
#include <zephyr/linker/cplusplus-rom.ld>
__rodata_region_end = .;
SECTION_PROLOGUE(.plt,,)
{
*(.plt)
}
SECTION_PROLOGUE(.iplt,,)
{
*(.iplt)
}
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{
. = ALIGN(8);
_image_ram_start = .;
__data_ram_start = .;
*(.data)
*(.data.*)
*(.gnu.linkonce.d.*)
*(.sdata)
*(.sdata.*)
. = ALIGN(8);
SORT(CONSTRUCTORS)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rwdata.ld>
} GROUP_DATA_LINK_IN(REGION_DATA_VMA, REGION_DATA_LMA)
#include <zephyr/linker/common-ram.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-data-sections.ld>
__data_ram_end = .;
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),)
{
/*
* For performance, BSS section is assumed to be 4 byte aligned and
* a multiple of 4 bytes
*/
. = ALIGN(4);
__bss_start = .;
*(.dynbss)
*(.sbss)
*(.sbss.*)
*(.bss)
*(.bss.*)
*(.gnu.linkonce.b.*)
*(.scommon)
COMMON_SYMBOLS
/*
* As memory is cleared in words only, it is simpler to ensure the BSS
* section ends on a 4 byte boundary. This wastes a maximum of 3 bytes.
*/
__bss_end = ALIGN(4);
} GROUP_LINK_IN(REGION_BSS)
SECTION_PROLOGUE(_NOINIT_SECTION_NAME,(NOLOAD),)
{
/*
* This section is used for non-initialized objects that
* will not be cleared during the boot process.
*/
*(.noinit)
*(.noinit.*)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-noinit.ld>
} GROUP_LINK_IN(REGION_BSS)
#include <zephyr/linker/cplusplus-ram.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-sections.ld>
#include <zephyr/linker/ram-end.ld>
GROUP_END(RAMABLE_REGION)
#include <zephyr/linker/debug-sections.ld>
.mdebug.abi32 : {
KEEP(*(.mdebug.abi32))
}
SECTION_PROLOGUE(.gnu.attributes, 0,)
{
KEEP(*(.gnu.attributes))
}
/DISCARD/ : {
*(.MIPS.abiflags)
*(.pdr)
*(.reginfo)
}
}
``` | /content/code_sandbox/include/zephyr/arch/mips/linker.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,405 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARM AArch32 public exception handling
*
* ARM AArch32-specific kernel exception handling interface. Included by
* arm/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_EXCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_EXCEPTION_H_
#if defined(CONFIG_CPU_CORTEX_M)
#include <zephyr/arch/arm/cortex_m/exception.h>
#elif defined(CONFIG_CPU_AARCH32_CORTEX_A) || defined(CONFIG_CPU_AARCH32_CORTEX_R)
#include <zephyr/arch/arm/cortex_a_r/exception.h>
#else
#error Unknown ARM architecture
#endif /* CONFIG_CPU_CORTEX_M */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_EXCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 150 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARM AArch32 public interrupt handling
*
* ARM AArch32-specific kernel interrupt handling interface. Included by
* arm/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_IRQ_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_IRQ_H_
#include <zephyr/sw_isr_table.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _ASMLANGUAGE
GTEXT(z_arm_int_exit);
GTEXT(arch_irq_enable)
GTEXT(arch_irq_disable)
GTEXT(arch_irq_is_enabled)
#if defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
GTEXT(z_soc_irq_get_active)
GTEXT(z_soc_irq_eoi)
#endif /* CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
#else
#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
extern void arch_irq_enable(unsigned int irq);
extern void arch_irq_disable(unsigned int irq);
extern int arch_irq_is_enabled(unsigned int irq);
/* internal routine documented in C file, needed by IRQ_CONNECT() macro */
extern void z_arm_irq_priority_set(unsigned int irq, unsigned int prio,
uint32_t flags);
#else
/*
* When a custom interrupt controller is specified, map the architecture
* interrupt control functions to the SoC layer interrupt control functions.
*/
void z_soc_irq_init(void);
void z_soc_irq_enable(unsigned int irq);
void z_soc_irq_disable(unsigned int irq);
int z_soc_irq_is_enabled(unsigned int irq);
void z_soc_irq_priority_set(
unsigned int irq, unsigned int prio, unsigned int flags);
unsigned int z_soc_irq_get_active(void);
void z_soc_irq_eoi(unsigned int irq);
#define arch_irq_enable(irq) z_soc_irq_enable(irq)
#define arch_irq_disable(irq) z_soc_irq_disable(irq)
#define arch_irq_is_enabled(irq) z_soc_irq_is_enabled(irq)
#define z_arm_irq_priority_set(irq, prio, flags) \
z_soc_irq_priority_set(irq, prio, flags)
#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
extern void z_arm_int_exit(void);
extern void z_arm_interrupt_init(void);
/* Flags for use with IRQ_CONNECT() */
/**
* Set this interrupt up as a zero-latency IRQ. If CONFIG_ZERO_LATENCY_LEVELS
* is 1 it has a fixed hardware priority level (discarding what was supplied
* in the interrupt's priority argument). If CONFIG_ZERO_LATENCY_LEVELS is
* greater 1 it has the priority level assigned by the argument.
* The interrupt will run even if irq_lock() is active. Be careful!
*/
#define IRQ_ZERO_LATENCY BIT(0)
#ifdef CONFIG_CPU_CORTEX_M
#if defined(CONFIG_ZERO_LATENCY_LEVELS)
#define ZERO_LATENCY_LEVELS CONFIG_ZERO_LATENCY_LEVELS
#else
#define ZERO_LATENCY_LEVELS 1
#endif
#define _CHECK_PRIO(priority_p, flags_p) \
BUILD_ASSERT(((flags_p & IRQ_ZERO_LATENCY) && \
((ZERO_LATENCY_LEVELS == 1) || \
(priority_p < ZERO_LATENCY_LEVELS))) || \
(priority_p <= IRQ_PRIO_LOWEST), \
"Invalid interrupt priority. Values must not exceed IRQ_PRIO_LOWEST");
#else
#define _CHECK_PRIO(priority_p, flags_p)
#endif
/* All arguments must be computable by the compiler at build time.
*
* Z_ISR_DECLARE will populate the .intList section with the interrupt's
* parameters, which will then be used by gen_irq_tables.py to create
* the vector table and the software ISR table. This is all done at
* build-time.
*
* We additionally set the priority in the interrupt controller at
* runtime.
*/
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
{ \
BUILD_ASSERT(IS_ENABLED(CONFIG_ZERO_LATENCY_IRQS) || !(flags_p & IRQ_ZERO_LATENCY), \
"ZLI interrupt registered but feature is disabled"); \
_CHECK_PRIO(priority_p, flags_p) \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
z_arm_irq_priority_set(irq_p, priority_p, flags_p); \
}
#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
{ \
BUILD_ASSERT(IS_ENABLED(CONFIG_ZERO_LATENCY_IRQS) || !(flags_p & IRQ_ZERO_LATENCY), \
"ZLI interrupt registered but feature is disabled"); \
_CHECK_PRIO(priority_p, flags_p) \
Z_ISR_DECLARE_DIRECT(irq_p, ISR_FLAG_DIRECT, isr_p); \
z_arm_irq_priority_set(irq_p, priority_p, flags_p); \
}
#ifdef CONFIG_PM
extern void _arch_isr_direct_pm(void);
#define ARCH_ISR_DIRECT_PM() _arch_isr_direct_pm()
#else
#define ARCH_ISR_DIRECT_PM() do { } while (false)
#endif
#define ARCH_ISR_DIRECT_HEADER() arch_isr_direct_header()
#define ARCH_ISR_DIRECT_FOOTER(swap) arch_isr_direct_footer(swap)
/* arch/arm/core/exc_exit.S */
extern void z_arm_int_exit(void);
#ifdef CONFIG_TRACING_ISR
extern void sys_trace_isr_enter(void);
extern void sys_trace_isr_exit(void);
#endif
static inline void arch_isr_direct_header(void)
{
#ifdef CONFIG_TRACING_ISR
sys_trace_isr_enter();
#endif
}
static inline void arch_isr_direct_footer(int maybe_swap)
{
#ifdef CONFIG_TRACING_ISR
sys_trace_isr_exit();
#endif
if (maybe_swap != 0) {
z_arm_int_exit();
}
}
#if defined(__clang__)
#define ARCH_ISR_DIAG_OFF \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Wextra\"")
#define ARCH_ISR_DIAG_ON _Pragma("clang diagnostic pop")
#elif defined(__GNUC__)
#define ARCH_ISR_DIAG_OFF \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wattributes\"")
#define ARCH_ISR_DIAG_ON _Pragma("GCC diagnostic pop")
#else
#define ARCH_ISR_DIAG_OFF
#define ARCH_ISR_DIAG_ON
#endif
#define ARCH_ISR_DIRECT_DECLARE(name) \
static inline int name##_body(void); \
ARCH_ISR_DIAG_OFF \
__attribute__ ((interrupt ("IRQ"))) void name(void) \
{ \
int check_reschedule; \
ISR_DIRECT_HEADER(); \
check_reschedule = name##_body(); \
ISR_DIRECT_FOOTER(check_reschedule); \
} \
ARCH_ISR_DIAG_ON \
static inline int name##_body(void)
#if defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
extern void z_arm_irq_direct_dynamic_dispatch_reschedule(void);
extern void z_arm_irq_direct_dynamic_dispatch_no_reschedule(void);
/**
* @brief Macro to register an ISR Dispatcher (with or without re-scheduling
* request) for dynamic direct interrupts.
*
* This macro registers the ISR dispatcher function for dynamic direct
* interrupts for a particular IRQ line, allowing the use of dynamic
* direct ISRs in the kernel for that interrupt source.
* The dispatcher function is invoked when the hardware
* interrupt occurs and then triggers the (software) Interrupt Service Routine
* (ISR) that is registered dynamically (i.e. at run-time) into the software
* ISR table stored in SRAM. The ISR must be connected with
* irq_connect_dynamic() and enabled via irq_enable() before the dynamic direct
* interrupt can be serviced. This ISR dispatcher must be configured by the
* user to trigger thread re-secheduling upon return, using the @param resch
* parameter.
*
* These ISRs are designed for performance-critical interrupt handling and do
* not go through all of the common interrupt handling code.
*
* With respect to their declaration, dynamic 'direct' interrupts are regular
* Zephyr interrupts; their signature must match void isr(void* parameter), as,
* unlike regular direct interrupts, they are not placed directly into the
* ROM hardware vector table but instead they are installed in the software
* ISR table.
*
* The major differences with regular Zephyr interrupts are the following:
* - Similar to direct interrupts, the call into the OS to exit power
* management idle state is optional. Normal interrupts always do this
* before the ISR is run, but with dynamic direct ones when and if it runs
* is controlled by the placement of
* a ISR_DIRECT_PM() macro, or omitted entirely.
* - Similar to direct interrupts, scheduling decisions are optional. Unlike
* direct interrupts, the decisions must be made at build time.
* They are controlled by @param resch to this macro.
*
* @param irq_p IRQ line number.
* @param priority_p Interrupt priority.
* @param flags_p Architecture-specific IRQ configuration flags.
* @param resch Set flag to 'reschedule' to request thread
* re-scheduling upon ISR function. Set flag
* 'no_reschedule' to skip thread re-scheduling
*
* Note: the function is an ARM Cortex-M only API.
*
* @return Interrupt vector assigned to this interrupt.
*/
#define ARM_IRQ_DIRECT_DYNAMIC_CONNECT(irq_p, priority_p, flags_p, resch) \
IRQ_DIRECT_CONNECT(irq_p, priority_p, \
_CONCAT(z_arm_irq_direct_dynamic_dispatch_, resch), flags_p)
#endif /* CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
/* Architecture-specific definition for the target security
* state of an NVIC IRQ line.
*/
typedef enum {
IRQ_TARGET_STATE_SECURE = 0,
IRQ_TARGET_STATE_NON_SECURE
} irq_target_state_t;
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_IRQ_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/irq.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,017 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_GDBSTUB_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_GDBSTUB_H_
#include <zephyr/arch/arm/exception.h>
#ifndef _ASMLANGUAGE
#define DBGDSCR_MONITOR_MODE_EN 0x8000
#define SPSR_ISETSTATE_ARM 0x0
#define SPSR_ISETSTATE_JAZELLE 0x2
#define SPSR_J 24
#define SPSR_T 5
/* Debug Breakpoint Control Register constants */
#define DBGDBCR_MEANING_MASK 0x7
#define DBGDBCR_MEANING_SHIFT 20
#define DBGDBCR_MEANING_ADDR_MISMATCH 0x4
#define DBGDBCR_BYTE_ADDR_MASK 0xF
#define DBGDBCR_BYTE_ADDR_SHIFT 5
#define DBGDBCR_BRK_EN_MASK 0x1
/* Regno of the SPSR */
#define SPSR_REG_IDX 25
/* Minimal size of the packet - SPSR is the last, 42-nd byte, see packet_pos array */
#define GDB_READALL_PACKET_SIZE (42 * 8)
#define IFSR_DEBUG_EVENT 0x2
enum AARCH32_GDB_REG {
R0 = 0,
R1,
R2,
R3,
/* READONLY registers (R4 - R13) except R12 */
R4,
R5,
R6,
R7,
R8,
R9,
R10,
R11,
R12,
/* Stack pointer - READONLY */
R13,
LR,
PC,
/* Saved program status register */
SPSR,
GDB_NUM_REGS
};
/* required structure */
struct gdb_ctx {
/* cause of the exception */
unsigned int exception;
unsigned int registers[GDB_NUM_REGS];
};
void z_gdb_entry(struct arch_esf *esf, unsigned int exc_cause);
#endif
#endif
``` | /content/code_sandbox/include/zephyr/arch/arm/gdbstub.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 420 |
```objective-c
/*
*/
#ifndef ZEPHYR_INCLUDE_ARM_STRUCTS_H_
#define ZEPHYR_INCLUDE_ARM_STRUCTS_H_
#include <zephyr/types.h>
#if defined(CONFIG_CPU_AARCH32_CORTEX_A) || defined(CONFIG_CPU_AARCH32_CORTEX_R)
/* Per CPU architecture specifics */
struct _cpu_arch {
int8_t exc_depth;
};
#else
/* Default definitions when no architecture specific definitions exist. */
/* Per CPU architecture specifics (empty) */
struct _cpu_arch {
#ifdef __cplusplus
/* This struct will have a size 0 in C which is not allowed in C++ (it'll have a size 1). To
* prevent this, we add a 1 byte dummy variable.
*/
uint8_t dummy;
#endif
};
#endif
#endif /* ZEPHYR_INCLUDE_ARM_STRUCTS_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/structs.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 173 |
```objective-c
/*
*
*/
/**
* @defgroup arch-interface Architecture Interface
* @ingroup internal_api
* @brief Internal kernel APIs with public scope
*
* Any public kernel APIs that are implemented as inline functions and need to
* call architecture-specific API so will have the prototypes for the
* architecture-specific APIs here. Architecture APIs that aren't used in this
* way go in kernel/include/kernel_arch_interface.h.
*
* The set of architecture-specific APIs used internally by public macros and
* inline functions in public headers are also specified and documented.
*
* For all macros and inline function prototypes described herein, <arch/cpu.h>
* must eventually pull in full definitions for all of them (the actual macro
* defines and inline function bodies)
*
* include/kernel.h and other public headers depend on definitions in this
* header.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARCH_INTERFACE_H_
#define ZEPHYR_INCLUDE_ARCH_ARCH_INTERFACE_H_
#ifndef _ASMLANGUAGE
#include <zephyr/toolchain.h>
#include <stddef.h>
#include <zephyr/types.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/irq_offload.h>
#ifdef __cplusplus
extern "C" {
#endif
/* NOTE: We cannot pull in kernel.h here, need some forward declarations */
struct arch_esf;
struct k_thread;
struct k_mem_domain;
typedef struct z_thread_stack_element k_thread_stack_t;
typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3);
__deprecated typedef struct arch_esf z_arch_esf_t;
/**
* @defgroup arch-timing Architecture timing APIs
* @ingroup arch-interface
* @{
*/
/**
* Obtain the current cycle count, in units specified by
* CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC. While this is historically
* specified as part of the architecture API, in practice virtually
* all platforms forward it to the sys_clock_cycle_get_32() API
* provided by the timer driver.
*
* @see k_cycle_get_32()
*
* @return The current cycle time. This should count up monotonically
* through the full 32 bit space, wrapping at 0xffffffff. Hardware
* with fewer bits of precision in the timer is expected to synthesize
* a 32 bit count.
*/
static inline uint32_t arch_k_cycle_get_32(void);
/**
* As for arch_k_cycle_get_32(), but with a 64 bit return value. Not
* all timer hardware has a 64 bit timer, this needs to be implemented
* only if CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER is set.
*
* @see arch_k_cycle_get_32()
*
* @return The current cycle time. This should count up monotonically
* through the full 64 bit space, wrapping at 2^64-1. Hardware with
* fewer bits of precision in the timer is generally not expected to
* implement this API.
*/
static inline uint64_t arch_k_cycle_get_64(void);
/** @} */
/**
* @addtogroup arch-threads
* @{
*/
/**
* @def ARCH_THREAD_STACK_RESERVED
*
* @see K_THREAD_STACK_RESERVED
*/
/**
* @def ARCH_STACK_PTR_ALIGN
*
* Required alignment of the CPU's stack pointer register value, dictated by
* hardware constraints and the ABI calling convention.
*
* @see Z_STACK_PTR_ALIGN
*/
/**
* @def ARCH_THREAD_STACK_OBJ_ALIGN(size)
*
* Required alignment of the lowest address of a stack object.
*
* Optional definition.
*
* @see Z_THREAD_STACK_OBJ_ALIGN
*/
/**
* @def ARCH_THREAD_STACK_SIZE_ADJUST(size)
* @brief Round up a stack buffer size to alignment constraints
*
* Adjust a requested stack buffer size to the true size of its underlying
* buffer, defined as the area usable for thread stack context and thread-
* local storage.
*
* The size value passed here does not include storage reserved for platform
* data.
*
* The returned value is either the same size provided (if already properly
* aligned), or rounded up to satisfy alignment constraints. Calculations
* performed here *must* be idempotent.
*
* Optional definition. If undefined, stack buffer sizes are either:
* - Rounded up to the next power of two if user mode is enabled on an arch
* with an MPU that requires such alignment
* - Rounded up to ARCH_STACK_PTR_ALIGN
*
* @see Z_THREAD_STACK_SIZE_ADJUST
*/
/**
* @def ARCH_KERNEL_STACK_RESERVED
* @brief MPU guard size for kernel-only stacks
*
* If MPU stack guards are used to catch stack overflows, specify the
* amount of space reserved in kernel stack objects. If guard sizes are
* context dependent, this should be in the minimum guard size, with
* remaining space carved out if needed.
*
* Optional definition, defaults to 0.
*
* @see K_KERNEL_STACK_RESERVED
*/
/**
* @def ARCH_KERNEL_STACK_OBJ_ALIGN
* @brief Required alignment of the lowest address of a kernel-only stack.
*/
/** @} */
/**
* @addtogroup arch-pm
* @{
*/
/**
* @brief Power save idle routine
*
* This function will be called by the kernel idle loop or possibly within
* an implementation of z_pm_save_idle in the kernel when the
* '_pm_save_flag' variable is non-zero.
*
* Architectures that do not implement power management instructions may
* immediately return, otherwise a power-saving instruction should be
* issued to wait for an interrupt.
*
* @note The function is expected to return after the interrupt that has
* caused the CPU to exit power-saving mode has been serviced, although
* this is not a firm requirement.
*
* @see k_cpu_idle()
*/
void arch_cpu_idle(void);
/**
* @brief Atomically re-enable interrupts and enter low power mode
*
* The requirements for arch_cpu_atomic_idle() are as follows:
*
* -# Enabling interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in k_lifo_get(), for example, of the race condition that
* occurs if this requirement is not met.
*
* -# After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'key' input parameter.
*
* @see k_cpu_atomic_idle()
*
* @param key Lockout key returned by previous invocation of arch_irq_lock()
*/
void arch_cpu_atomic_idle(unsigned int key);
/** @} */
/**
* @addtogroup arch-smp
* @{
*/
/**
* Per-cpu entry function
*
* @param data context parameter, implementation specific
*/
typedef void (*arch_cpustart_t)(void *data);
/**
* @brief Start a numbered CPU on a MP-capable system
*
* This starts and initializes a specific CPU. The main thread on startup is
* running on CPU zero, other processors are numbered sequentially. On return
* from this function, the CPU is known to have begun operating and will enter
* the provided function. Its interrupts will be initialized but disabled such
* that irq_unlock() with the provided key will work to enable them.
*
* Normally, in SMP mode this function will be called by the kernel
* initialization and should not be used as a user API. But it is defined here
* for special-purpose apps which want Zephyr running on one core and to use
* others for design-specific processing.
*
* @param cpu_num Integer number of the CPU
* @param stack Stack memory for the CPU
* @param sz Stack buffer size, in bytes
* @param fn Function to begin running on the CPU.
* @param arg Untyped argument to be passed to "fn"
*/
void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
arch_cpustart_t fn, void *arg);
/**
* @brief Return CPU power status
*
* @param cpu_num Integer number of the CPU
*/
bool arch_cpu_active(int cpu_num);
/** @} */
/**
* @addtogroup arch-irq
* @{
*/
/**
* Lock interrupts on the current CPU
*
* @see irq_lock()
*/
static inline unsigned int arch_irq_lock(void);
/**
* Unlock interrupts on the current CPU
*
* @see irq_unlock()
*/
static inline void arch_irq_unlock(unsigned int key);
/**
* Test if calling arch_irq_unlock() with this key would unlock irqs
*
* @param key value returned by arch_irq_lock()
* @return true if interrupts were unlocked prior to the arch_irq_lock()
* call that produced the key argument.
*/
static inline bool arch_irq_unlocked(unsigned int key);
/**
* Disable the specified interrupt line
*
* @note: The behavior of interrupts that arrive after this call
* returns and before the corresponding call to arch_irq_enable() is
* undefined. The hardware is not required to latch and deliver such
* an interrupt, though on some architectures that may work. Other
* architectures will simply lose such an interrupt and never deliver
* it. Many drivers and subsystems are not tolerant of such dropped
* interrupts and it is the job of the application layer to ensure
* that behavior remains correct.
*
* @see irq_disable()
*/
void arch_irq_disable(unsigned int irq);
/**
* Enable the specified interrupt line
*
* @see irq_enable()
*/
void arch_irq_enable(unsigned int irq);
/**
* Test if an interrupt line is enabled
*
* @see irq_is_enabled()
*/
int arch_irq_is_enabled(unsigned int irq);
/**
* Arch-specific hook to install a dynamic interrupt.
*
* @param irq IRQ line number
* @param priority Interrupt priority
* @param routine Interrupt service routine
* @param parameter ISR parameter
* @param flags Arch-specific IRQ configuration flag
*
* @return The vector assigned to this interrupt
*/
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(const void *parameter),
const void *parameter, uint32_t flags);
/**
* Arch-specific hook to dynamically uninstall a shared interrupt.
* If the interrupt is not being shared, then the associated
* _sw_isr_table entry will be replaced by (NULL, z_irq_spurious)
* (default entry).
*
* @param irq IRQ line number
* @param priority Interrupt priority
* @param routine Interrupt service routine
* @param parameter ISR parameter
* @param flags Arch-specific IRQ configuration flag
*
* @return 0 in case of success, negative value otherwise
*/
int arch_irq_disconnect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(const void *parameter),
const void *parameter, uint32_t flags);
/**
* @def ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags)
*
* @see IRQ_CONNECT()
*/
#ifdef CONFIG_PCIE
/**
* @def ARCH_PCIE_IRQ_CONNECT(bdf, irq, pri, isr, arg, flags)
*
* @see PCIE_IRQ_CONNECT()
*/
#endif /* CONFIG_PCIE */
/**
* @def ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p)
*
* @see IRQ_DIRECT_CONNECT()
*/
/**
* @def ARCH_ISR_DIRECT_PM()
*
* @see ISR_DIRECT_PM()
*/
/**
* @def ARCH_ISR_DIRECT_HEADER()
*
* @see ISR_DIRECT_HEADER()
*/
/**
* @def ARCH_ISR_DIRECT_FOOTER(swap)
*
* @see ISR_DIRECT_FOOTER()
*/
/**
* @def ARCH_ISR_DIRECT_DECLARE(name)
*
* @see ISR_DIRECT_DECLARE()
*/
#ifndef CONFIG_PCIE_CONTROLLER
/**
* @brief Arch-specific hook for allocating IRQs
*
* Note: disable/enable IRQ relevantly inside the implementation of such
* function to avoid concurrency issues. Also, an allocated IRQ is assumed
* to be used thus a following @see arch_irq_is_used() should return true.
*
* @return The newly allocated IRQ or UINT_MAX on error.
*/
unsigned int arch_irq_allocate(void);
/**
* @brief Arch-specific hook for declaring an IRQ being used
*
* Note: disable/enable IRQ relevantly inside the implementation of such
* function to avoid concurrency issues.
*
* @param irq the IRQ to declare being used
*/
void arch_irq_set_used(unsigned int irq);
/**
* @brief Arch-specific hook for checking if an IRQ is being used already
*
* @param irq the IRQ to check
*
* @return true if being, false otherwise
*/
bool arch_irq_is_used(unsigned int irq);
#endif /* CONFIG_PCIE_CONTROLLER */
/**
* @def ARCH_EXCEPT(reason_p)
*
* Generate a software induced fatal error.
*
* If the caller is running in user mode, only K_ERR_KERNEL_OOPS or
* K_ERR_STACK_CHK_FAIL may be induced.
*
* This should ideally generate a software trap, with exception context
* indicating state when this was invoked. General purpose register state at
* the time of trap should not be disturbed from the calling context.
*
* @param reason_p K_ERR_ scoped reason code for the fatal error.
*/
#ifdef CONFIG_IRQ_OFFLOAD
/**
* Run a function in interrupt context.
*
* Implementations should invoke an exception such that the kernel goes through
* its interrupt handling dispatch path, to include switching to the interrupt
* stack, and runs the provided routine and parameter.
*
* The only intended use-case for this function is for test code to simulate
* the correctness of kernel APIs in interrupt handling context. This API
* is not intended for real applications.
*
* @see irq_offload()
*
* @param routine Function to run in interrupt context
* @param parameter Value to pass to the function when invoked
*/
void arch_irq_offload(irq_offload_routine_t routine, const void *parameter);
#endif /* CONFIG_IRQ_OFFLOAD */
/** @} */
/**
* @defgroup arch-smp Architecture-specific SMP APIs
* @ingroup arch-interface
* @{
*/
#ifdef CONFIG_SMP
/** Return the CPU struct for the currently executing CPU */
static inline struct _cpu *arch_curr_cpu(void);
/**
* @brief Processor hardware ID
*
* Most multiprocessor architectures have a low-level unique ID value
* associated with the current CPU that can be retrieved rapidly and
* efficiently in kernel context. Note that while the numbering of
* the CPUs is guaranteed to be unique, the values are
* platform-defined. In particular, they are not guaranteed to match
* Zephyr's own sequential CPU IDs (even though on some platforms they
* do).
*
* @note There is an inherent race with this API: the system may
* preempt the current thread and migrate it to another CPU before the
* value is used. Safe usage requires knowing the migration is
* impossible (e.g. because the code is in interrupt context, holds a
* spinlock, or cannot migrate due to k_cpu_mask state).
*
* @return Unique ID for currently-executing CPU
*/
static inline uint32_t arch_proc_id(void);
/**
* Broadcast an interrupt to all CPUs
*
* This will invoke z_sched_ipi() on all other CPUs in the system.
*/
void arch_sched_broadcast_ipi(void);
/**
* Direct IPIs to the specified CPUs
*
* This will invoke z_sched_ipi() on the CPUs identified by @a cpu_bitmap.
*
* @param cpu_bitmap A bitmap indicating which CPUs need the IPI
*/
void arch_sched_directed_ipi(uint32_t cpu_bitmap);
int arch_smp_init(void);
#endif /* CONFIG_SMP */
/**
* @brief Returns the number of CPUs
*
* For most systems this will be the same as CONFIG_MP_MAX_NUM_CPUS,
* however some systems may determine this at runtime instead.
*
* @return the number of CPUs
*/
static inline unsigned int arch_num_cpus(void);
/** @} */
/**
* @defgroup arch-userspace Architecture-specific userspace APIs
* @ingroup arch-interface
* @{
*/
#ifdef CONFIG_USERSPACE
#include <zephyr/arch/syscall.h>
/**
* Invoke a system call with 0 arguments.
*
* No general-purpose register state other than return value may be preserved
* when transitioning from supervisor mode back down to user mode for
* security reasons.
*
* It is required that all arguments be stored in registers when elevating
* privileges from user to supervisor mode.
*
* Processing of the syscall takes place on a separate kernel stack. Interrupts
* should be enabled when invoking the system call marshallers from the
* dispatch table. Thread preemption may occur when handling system calls.
*
* Call IDs are untrusted and must be bounds-checked, as the value is used to
* index the system call dispatch table, containing function pointers to the
* specific system call code.
*
* @param call_id System call ID
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id);
/**
* Invoke a system call with 1 argument.
*
* @see arch_syscall_invoke0()
*
* @param arg1 First argument to the system call.
* @param call_id System call ID, will be bounds-checked and used to reference
* kernel-side dispatch table
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
uintptr_t call_id);
/**
* Invoke a system call with 2 arguments.
*
* @see arch_syscall_invoke0()
*
* @param arg1 First argument to the system call.
* @param arg2 Second argument to the system call.
* @param call_id System call ID, will be bounds-checked and used to reference
* kernel-side dispatch table
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
uintptr_t call_id);
/**
* Invoke a system call with 3 arguments.
*
* @see arch_syscall_invoke0()
*
* @param arg1 First argument to the system call.
* @param arg2 Second argument to the system call.
* @param arg3 Third argument to the system call.
* @param call_id System call ID, will be bounds-checked and used to reference
* kernel-side dispatch table
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3,
uintptr_t call_id);
/**
* Invoke a system call with 4 arguments.
*
* @see arch_syscall_invoke0()
*
* @param arg1 First argument to the system call.
* @param arg2 Second argument to the system call.
* @param arg3 Third argument to the system call.
* @param arg4 Fourth argument to the system call.
* @param call_id System call ID, will be bounds-checked and used to reference
* kernel-side dispatch table
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t call_id);
/**
* Invoke a system call with 5 arguments.
*
* @see arch_syscall_invoke0()
*
* @param arg1 First argument to the system call.
* @param arg2 Second argument to the system call.
* @param arg3 Third argument to the system call.
* @param arg4 Fourth argument to the system call.
* @param arg5 Fifth argument to the system call.
* @param call_id System call ID, will be bounds-checked and used to reference
* kernel-side dispatch table
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5,
uintptr_t call_id);
/**
* Invoke a system call with 6 arguments.
*
* @see arch_syscall_invoke0()
*
* @param arg1 First argument to the system call.
* @param arg2 Second argument to the system call.
* @param arg3 Third argument to the system call.
* @param arg4 Fourth argument to the system call.
* @param arg5 Fifth argument to the system call.
* @param arg6 Sixth argument to the system call.
* @param call_id System call ID, will be bounds-checked and used to reference
* kernel-side dispatch table
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6,
uintptr_t call_id);
/**
* Indicate whether we are currently running in user mode
*
* @return True if the CPU is currently running with user permissions
*/
static inline bool arch_is_user_context(void);
/**
* @brief Get the maximum number of partitions for a memory domain
*
* @return Max number of partitions, or -1 if there is no limit
*/
int arch_mem_domain_max_partitions_get(void);
#ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
/**
*
* @brief Architecture-specific hook for memory domain initialization
*
* Perform any tasks needed to initialize architecture-specific data within
* the memory domain, such as reserving memory for page tables. All members
* of the provided memory domain aside from `arch` will be initialized when
* this is called, but no threads will be a assigned yet.
*
* This function may fail if initializing the memory domain requires allocation,
* such as for page tables.
*
* The associated function k_mem_domain_init() documents that making
* multiple init calls to the same memory domain is undefined behavior,
* but has no assertions in place to check this. If this matters, it may be
* desirable to add checks for this in the implementation of this function.
*
* @param domain The memory domain to initialize
* @retval 0 Success
* @retval -ENOMEM Insufficient memory
*/
int arch_mem_domain_init(struct k_mem_domain *domain);
#endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
/**
* @brief Add a thread to a memory domain (arch-specific)
*
* Architecture-specific hook to manage internal data structures or hardware
* state when the provided thread has been added to a memory domain.
*
* The thread->mem_domain_info.mem_domain pointer will be set to the domain to
* be added to before this is called. Implementations may assume that the
* thread is not already a member of this domain.
*
* @param thread Thread which needs to be configured.
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters supplied
* @retval -ENOSPC if running out of space in internal structures
* (e.g. translation tables)
*/
int arch_mem_domain_thread_add(struct k_thread *thread);
/**
* @brief Remove a thread from a memory domain (arch-specific)
*
* Architecture-specific hook to manage internal data structures or hardware
* state when the provided thread has been removed from a memory domain.
*
* The thread's memory domain pointer will be the domain that the thread
* is being removed from.
*
* @param thread Thread being removed from its memory domain
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters supplied
*/
int arch_mem_domain_thread_remove(struct k_thread *thread);
/**
* @brief Remove a partition from the memory domain (arch-specific)
*
* Architecture-specific hook to manage internal data structures or hardware
* state when a memory domain has had a partition removed.
*
* The partition index data, and the number of partitions configured, are not
* respectively cleared and decremented in the domain until after this function
* runs.
*
* @param domain The memory domain structure
* @param partition_id The partition index that needs to be deleted
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters supplied
* @retval -ENOENT if no matching partition found
*/
int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
uint32_t partition_id);
/**
* @brief Add a partition to the memory domain
*
* Architecture-specific hook to manage internal data structures or hardware
* state when a memory domain has a partition added.
*
* @param domain The memory domain structure
* @param partition_id The partition that needs to be added
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters supplied
*/
int arch_mem_domain_partition_add(struct k_mem_domain *domain,
uint32_t partition_id);
#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
/**
* @brief Check memory region permissions
*
* Given a memory region, return whether the current memory management hardware
* configuration would allow a user thread to read/write that region. Used by
* system calls to validate buffers coming in from userspace.
*
* Notes:
* The function is guaranteed to never return validation success, if the entire
* buffer area is not user accessible.
*
* The function is guaranteed to correctly validate the permissions of the
* supplied buffer, if the user access permissions of the entire buffer are
* enforced by a single, enabled memory management region.
*
* In some architectures the validation will always return failure
* if the supplied memory buffer spans multiple enabled memory management
* regions (even if all such regions permit user access).
*
* @warning Buffer of size zero (0) has undefined behavior.
*
* @param addr start address of the buffer
* @param size the size of the buffer
* @param write If non-zero, additionally check if the area is writable.
* Otherwise, just check if the memory can be read.
*
* @return nonzero if the permissions don't match.
*/
int arch_buffer_validate(const void *addr, size_t size, int write);
/**
* Get the optimal virtual region alignment to optimize the MMU table layout
*
* Some MMU HW requires some region to be aligned to some of the intermediate
* block alignment in order to reduce table usage.
* This call returns the optimal virtual address alignment in order to permit
* such optimization in the following MMU mapping call.
*
* @param[in] phys Physical address of region to be mapped,
* aligned to @kconfig{CONFIG_MMU_PAGE_SIZE}
* @param[in] size Size of region to be mapped,
* aligned to @kconfig{CONFIG_MMU_PAGE_SIZE}
*
* @return Alignment to apply on the virtual address of this region
*/
size_t arch_virt_region_align(uintptr_t phys, size_t size);
/**
* Perform a one-way transition from supervisor to user mode.
*
* Implementations of this function must do the following:
*
* - Reset the thread's stack pointer to a suitable initial value. We do not
* need any prior context since this is a one-way operation.
* - Set up any kernel stack region for the CPU to use during privilege
* elevation
* - Put the CPU in whatever its equivalent of user mode is
* - Transfer execution to arch_new_thread() passing along all the supplied
* arguments, in user mode.
*
* @param user_entry Entry point to start executing as a user thread
* @param p1 1st parameter to user thread
* @param p2 2nd parameter to user thread
* @param p3 3rd parameter to user thread
*/
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3);
/**
* @brief Induce a kernel oops that appears to come from a specific location
*
* Normally, k_oops() generates an exception that appears to come from the
* call site of the k_oops() itself.
*
* However, when validating arguments to a system call, if there are problems
* we want the oops to appear to come from where the system call was invoked
* and not inside the validation function.
*
* @param ssf System call stack frame pointer. This gets passed as an argument
* to _k_syscall_handler_t functions and its contents are completely
* architecture specific.
*/
FUNC_NORETURN void arch_syscall_oops(void *ssf);
/**
* @brief Safely take the length of a potentially bad string
*
* This must not fault, instead the @p err parameter must have -1 written to it.
* This function otherwise should work exactly like libc strnlen(). On success
* @p err should be set to 0.
*
* @param s String to measure
* @param maxsize Max length of the string
* @param err Error value to write
* @return Length of the string, not counting NULL byte, up to maxsize
*/
size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err);
#endif /* CONFIG_USERSPACE */
/**
* @brief Detect memory coherence type
*
* Required when ARCH_HAS_COHERENCE is true. This function returns
* true if the byte pointed to lies within an architecture-defined
* "coherence region" (typically implemented with uncached memory) and
* can safely be used in multiprocessor code without explicit flush or
* invalidate operations.
*
* @note The result is for only the single byte at the specified
* address, this API is not required to check region boundaries or to
* expect aligned pointers. The expectation is that the code above
* will have queried the appropriate address(es).
*/
#ifndef CONFIG_ARCH_HAS_COHERENCE
static inline bool arch_mem_coherent(void *ptr)
{
ARG_UNUSED(ptr);
return true;
}
#endif
/**
* @brief Ensure cache coherence prior to context switch
*
* Required when ARCH_HAS_COHERENCE is true. On cache-incoherent
* multiprocessor architectures, thread stacks are cached by default
* for performance reasons. They must therefore be flushed
* appropriately on context switch. The rules are:
*
* 1. The region containing live data in the old stack (generally the
* bytes between the current stack pointer and the top of the stack
* memory) must be flushed to underlying storage so a new CPU that
* runs the same thread sees the correct data. This must happen
* before the assignment of the switch_handle field in the thread
* struct which signals the completion of context switch.
*
* 2. Any data areas to be read from the new stack (generally the same
* as the live region when it was saved) should be invalidated (and
* NOT flushed!) in the data cache. This is because another CPU
* may have run or re-initialized the thread since this CPU
* suspended it, and any data present in cache will be stale.
*
* @note The kernel will call this function during interrupt exit when
* a new thread has been chosen to run, and also immediately before
* entering arch_switch() to effect a code-driven context switch. In
* the latter case, it is very likely that more data will be written
* to the old_thread stack region after this function returns but
* before the completion of the switch. Simply flushing naively here
* is not sufficient on many architectures and coordination with the
* arch_switch() implementation is likely required.
*
* @param old_thread The old thread to be flushed before being allowed
* to run on other CPUs.
* @param old_switch_handle The switch handle to be stored into
* old_thread (it will not be valid until the
* cache is flushed so is not present yet).
* This will be NULL if inside z_swap()
* (because the arch_switch() has not saved it
* yet).
* @param new_thread The new thread to be invalidated before it runs locally.
*/
#ifndef CONFIG_KERNEL_COHERENCE
static inline void arch_cohere_stacks(struct k_thread *old_thread,
void *old_switch_handle,
struct k_thread *new_thread)
{
ARG_UNUSED(old_thread);
ARG_UNUSED(old_switch_handle);
ARG_UNUSED(new_thread);
}
#endif
/** @} */
/**
* @defgroup arch-gdbstub Architecture-specific gdbstub APIs
* @ingroup arch-interface
* @{
*/
#ifdef CONFIG_GDBSTUB
struct gdb_ctx;
/**
* @brief Architecture layer debug start
*
* This function is called by @c gdb_init()
*/
void arch_gdb_init(void);
/**
* @brief Continue running program
*
* Continue software execution.
*/
void arch_gdb_continue(void);
/**
* @brief Continue with one step
*
* Continue software execution until reaches the next statement.
*/
void arch_gdb_step(void);
/**
* @brief Read all registers, and outputs as hexadecimal string.
*
* This reads all CPU registers and outputs as hexadecimal string.
* The output string must be parsable by GDB.
*
* @param ctx GDB context
* @param buf Buffer to output hexadecimal string.
* @param buflen Length of buffer.
*
* @return Length of hexadecimal string written.
* Return 0 if error or not supported.
*/
size_t arch_gdb_reg_readall(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen);
/**
* @brief Take a hexadecimal string and update all registers.
*
* This takes in a hexadecimal string as presented from GDB,
* and updates all CPU registers with new values.
*
* @param ctx GDB context
* @param hex Input hexadecimal string.
* @param hexlen Length of hexadecimal string.
*
* @return Length of hexadecimal string parsed.
* Return 0 if error or not supported.
*/
size_t arch_gdb_reg_writeall(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen);
/**
* @brief Read one register, and outputs as hexadecimal string.
*
* This reads one CPU register and outputs as hexadecimal string.
* The output string must be parsable by GDB.
*
* @param ctx GDB context
* @param buf Buffer to output hexadecimal string.
* @param buflen Length of buffer.
* @param regno Register number
*
* @return Length of hexadecimal string written.
* Return 0 if error or not supported.
*/
size_t arch_gdb_reg_readone(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen,
uint32_t regno);
/**
* @brief Take a hexadecimal string and update one register.
*
* This takes in a hexadecimal string as presented from GDB,
* and updates one CPU registers with new value.
*
* @param ctx GDB context
* @param hex Input hexadecimal string.
* @param hexlen Length of hexadecimal string.
* @param regno Register number
*
* @return Length of hexadecimal string parsed.
* Return 0 if error or not supported.
*/
size_t arch_gdb_reg_writeone(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen,
uint32_t regno);
/**
* @brief Add breakpoint or watchpoint.
*
* @param ctx GDB context
* @param type Breakpoint or watchpoint type
* @param addr Address of breakpoint or watchpoint
* @param kind Size of breakpoint/watchpoint in bytes
*
* @retval 0 Operation successful
* @retval -1 Error encountered
* @retval -2 Not supported
*/
int arch_gdb_add_breakpoint(struct gdb_ctx *ctx, uint8_t type,
uintptr_t addr, uint32_t kind);
/**
* @brief Remove breakpoint or watchpoint.
*
* @param ctx GDB context
* @param type Breakpoint or watchpoint type
* @param addr Address of breakpoint or watchpoint
* @param kind Size of breakpoint/watchpoint in bytes
*
* @retval 0 Operation successful
* @retval -1 Error encountered
* @retval -2 Not supported
*/
int arch_gdb_remove_breakpoint(struct gdb_ctx *ctx, uint8_t type,
uintptr_t addr, uint32_t kind);
#endif
/** @} */
#ifdef CONFIG_TIMING_FUNCTIONS
#include <zephyr/timing/types.h>
/**
* @brief Arch specific Timing Measurement APIs
* @defgroup timing_api_arch Arch specific Timing Measurement APIs
* @ingroup timing_api
*
* Implements the necessary bits to support timing measurement
* using architecture specific timing measurement mechanism.
*
* @{
*/
/**
* @brief Initialize the timing subsystem.
*
* Perform the necessary steps to initialize the timing subsystem.
*
* @see timing_init()
*/
void arch_timing_init(void);
/**
* @brief Signal the start of the timing information gathering.
*
* Signal to the timing subsystem that timing information
* will be gathered from this point forward.
*
* @note Any call to arch_timing_counter_get() must be done between
* calls to arch_timing_start() and arch_timing_stop(), and on the
* same CPU core.
*
* @see timing_start()
*/
void arch_timing_start(void);
/**
* @brief Signal the end of the timing information gathering.
*
* Signal to the timing subsystem that timing information
* is no longer being gathered from this point forward.
*
* @note Any call to arch_timing_counter_get() must be done between
* calls to arch_timing_start() and arch_timing_stop(), and on the
* same CPU core.
*
* @see timing_stop()
*/
void arch_timing_stop(void);
/**
* @brief Return timing counter.
*
* @parblock
*
* @note Any call to arch_timing_counter_get() must be done between
* calls to arch_timing_start() and arch_timing_stop(), and on the
* same CPU core.
*
* @endparblock
*
* @parblock
*
* @note Not all architectures have a timing counter with 64 bit precision.
* It is possible to see this value "go backwards" due to internal
* rollover. Timing code must be prepared to address the rollover
* (with platform-dependent code, e.g. by casting to a uint32_t before
* subtraction) or by using arch_timing_cycles_get() which is required
* to understand the distinction.
*
* @endparblock
*
* @return Timing counter.
*
* @see timing_counter_get()
*/
timing_t arch_timing_counter_get(void);
/**
* @brief Get number of cycles between @p start and @p end.
*
* @note For some architectures, the raw numbers from counter need
* to be scaled to obtain actual number of cycles, or may roll over
* internally. This function computes a positive-definite interval
* between two returned cycle values.
*
* @param start Pointer to counter at start of a measured execution.
* @param end Pointer to counter at stop of a measured execution.
* @return Number of cycles between start and end.
*
* @see timing_cycles_get()
*/
uint64_t arch_timing_cycles_get(volatile timing_t *const start,
volatile timing_t *const end);
/**
* @brief Get frequency of counter used (in Hz).
*
* @return Frequency of counter used for timing in Hz.
*
* @see timing_freq_get()
*/
uint64_t arch_timing_freq_get(void);
/**
* @brief Convert number of @p cycles into nanoseconds.
*
* @param cycles Number of cycles
* @return Converted time value
*
* @see timing_cycles_to_ns()
*/
uint64_t arch_timing_cycles_to_ns(uint64_t cycles);
/**
* @brief Convert number of @p cycles into nanoseconds with averaging.
*
* @param cycles Number of cycles
* @param count Times of accumulated cycles to average over
* @return Converted time value
*
* @see timing_cycles_to_ns_avg()
*/
uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count);
/**
* @brief Get frequency of counter used (in MHz).
*
* @return Frequency of counter used for timing in MHz.
*
* @see timing_freq_get_mhz()
*/
uint32_t arch_timing_freq_get_mhz(void);
/** @} */
#endif /* CONFIG_TIMING_FUNCTIONS */
#ifdef CONFIG_PCIE_MSI_MULTI_VECTOR
struct msi_vector;
typedef struct msi_vector msi_vector_t;
/**
* @brief Allocate vector(s) for the endpoint MSI message(s).
*
* @param priority the MSI vectors base interrupt priority
* @param vectors an array to fill with allocated MSI vectors
* @param n_vector the size of MSI vectors array
*
* @return The number of allocated MSI vectors
*/
uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
msi_vector_t *vectors,
uint8_t n_vector);
/**
* @brief Connect an MSI vector to the given routine
*
* @param vector The MSI vector to connect to
* @param routine Interrupt service routine
* @param parameter ISR parameter
* @param flags Arch-specific IRQ configuration flag
*
* @return True on success, false otherwise
*/
bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
void (*routine)(const void *parameter),
const void *parameter,
uint32_t flags);
#endif /* CONFIG_PCIE_MSI_MULTI_VECTOR */
/**
* @brief Perform architecture specific processing within spin loops
*
* This is invoked from busy loops with IRQs disabled such as the contended
* spinlock loop. The default implementation is a weak function that calls
* arch_nop(). Architectures may implement this function to perform extra
* checks or power management tricks if needed.
*/
void arch_spin_relax(void);
/**
* stack_trace_callback_fn - Callback for @ref arch_stack_walk
* @param cookie Caller supplied pointer handed back by @ref arch_stack_walk
* @param addr The stack entry address to consume
*
* @return True, if the entry was consumed or skipped. False, if there is no space left to store
*/
typedef bool (*stack_trace_callback_fn)(void *cookie, unsigned long addr);
/**
* @brief Architecture-specific function to walk the stack
*
* @param callback_fn Callback which is invoked by the architecture code for each entry.
* @param cookie Caller supplied pointer which is handed back to @a callback_fn
* @param thread Pointer to a k_thread struct, can be NULL
* @param esf Pointer to an arch_esf struct, can be NULL
*
* ============ ======= ============================================
* thread esf
* ============ ======= ============================================
* thread NULL Stack trace from thread (can be _current)
* thread esf Stack trace starting on esf
* ============ ======= ============================================
*/
void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
const struct k_thread *thread, const struct arch_esf *esf);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#include <zephyr/arch/arch_inlines.h>
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARCH_INTERFACE_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arch_interface.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 9,048 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_ARCH_INLINES_H
#define ZEPHYR_INCLUDE_ARCH_ARM_ARCH_INLINES_H
#include <zephyr/kernel_structs.h>
#if defined(CONFIG_CPU_AARCH32_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
#include <zephyr/arch/arm/cortex_a_r/lib_helpers.h>
#include <zephyr/arch/arm/cortex_a_r/tpidruro.h>
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
{
return (_cpu_t *)(read_tpidruro() & TPIDRURO_CURR_CPU);
}
#else
#ifndef CONFIG_SMP
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
{
/* Dummy implementation always return the first cpu */
return &_kernel.cpus[0];
}
#endif
#endif
static ALWAYS_INLINE uint32_t arch_proc_id(void)
{
/*
* Placeholder implementation to be replaced with an architecture
* specific call to get processor ID
*/
return arch_curr_cpu()->id;
}
static ALWAYS_INLINE unsigned int arch_num_cpus(void)
{
return CONFIG_MP_MAX_NUM_CPUS;
}
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_ARCH_INLINES_H */
``` | /content/code_sandbox/include/zephyr/arch/arm/arch_inlines.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 253 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARM AArch32 specific kernel interface header
*
* This header contains the ARM AArch32 specific kernel interface. It is
* included by the kernel interface architecture-abstraction header
* (include/arm/cpu.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_ARCH_H_
/* Add include for DTS generated information */
#include <zephyr/devicetree.h>
#include <zephyr/arch/arm/thread.h>
#include <zephyr/arch/arm/exception.h>
#include <zephyr/arch/arm/irq.h>
#include <zephyr/arch/arm/error.h>
#include <zephyr/arch/arm/misc.h>
#include <zephyr/arch/common/addr_types.h>
#include <zephyr/arch/common/ffs.h>
#include <zephyr/arch/arm/nmi.h>
#include <zephyr/arch/arm/asm_inline.h>
#include <zephyr/arch/common/sys_bitops.h>
#if defined(CONFIG_GDBSTUB)
#include <zephyr/arch/arm/gdbstub.h>
#endif
#ifdef CONFIG_CPU_CORTEX_M
#include <zephyr/arch/arm/cortex_m/cpu.h>
#include <zephyr/arch/arm/cortex_m/memory_map.h>
#include <zephyr/arch/common/sys_io.h>
#elif defined(CONFIG_CPU_AARCH32_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
#include <zephyr/arch/arm/cortex_a_r/cpu.h>
#include <zephyr/arch/arm/cortex_a_r/sys_io.h>
#if defined(CONFIG_AARCH32_ARMV8_R)
#include <zephyr/arch/arm/cortex_a_r/lib_helpers.h>
#include <zephyr/arch/arm/cortex_a_r/armv8_timer.h>
#else
#include <zephyr/arch/arm/cortex_a_r/timer.h>
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
#include <zephyr/fatal_types.h>
enum k_fatal_error_reason_arch {
/* Cortex-M MEMFAULT exceptions */
K_ERR_ARM_MEM_GENERIC = K_ERR_ARCH_START,
K_ERR_ARM_MEM_STACKING,
K_ERR_ARM_MEM_UNSTACKING,
K_ERR_ARM_MEM_DATA_ACCESS,
K_ERR_ARM_MEM_INSTRUCTION_ACCESS,
K_ERR_ARM_MEM_FP_LAZY_STATE_PRESERVATION,
/* Cortex-M BUSFAULT exceptions */
K_ERR_ARM_BUS_GENERIC,
K_ERR_ARM_BUS_STACKING,
K_ERR_ARM_BUS_UNSTACKING,
K_ERR_ARM_BUS_PRECISE_DATA_BUS,
K_ERR_ARM_BUS_IMPRECISE_DATA_BUS,
K_ERR_ARM_BUS_INSTRUCTION_BUS,
K_ERR_ARM_BUS_FP_LAZY_STATE_PRESERVATION,
/* Cortex-M USAGEFAULT exceptions */
K_ERR_ARM_USAGE_GENERIC,
K_ERR_ARM_USAGE_DIV_0,
K_ERR_ARM_USAGE_UNALIGNED_ACCESS,
K_ERR_ARM_USAGE_STACK_OVERFLOW,
K_ERR_ARM_USAGE_NO_COPROCESSOR,
K_ERR_ARM_USAGE_ILLEGAL_EXC_RETURN,
K_ERR_ARM_USAGE_ILLEGAL_EPSR,
K_ERR_ARM_USAGE_UNDEFINED_INSTRUCTION,
/* Cortex-M SECURE exceptions */
K_ERR_ARM_SECURE_GENERIC,
K_ERR_ARM_SECURE_ENTRY_POINT,
K_ERR_ARM_SECURE_INTEGRITY_SIGNATURE,
K_ERR_ARM_SECURE_EXCEPTION_RETURN,
K_ERR_ARM_SECURE_ATTRIBUTION_UNIT,
K_ERR_ARM_SECURE_TRANSITION,
K_ERR_ARM_SECURE_LAZY_STATE_PRESERVATION,
K_ERR_ARM_SECURE_LAZY_STATE_ERROR,
/* Cortex-A/R exceptions*/
K_ERR_ARM_UNDEFINED_INSTRUCTION,
K_ERR_ARM_ALIGNMENT_FAULT,
K_ERR_ARM_BACKGROUND_FAULT,
K_ERR_ARM_PERMISSION_FAULT,
K_ERR_ARM_SYNC_EXTERNAL_ABORT,
K_ERR_ARM_ASYNC_EXTERNAL_ABORT,
K_ERR_ARM_SYNC_PARITY_ERROR,
K_ERR_ARM_ASYNC_PARITY_ERROR,
K_ERR_ARM_DEBUG_EVENT,
K_ERR_ARM_TRANSLATION_FAULT,
K_ERR_ARM_UNSUPPORTED_EXCLUSIVE_ACCESS_FAULT
};
#endif /* _ASMLANGUAGE */
/**
* @brief Declare the ARCH_STACK_PTR_ALIGN
*
* Denotes the required alignment of the stack pointer on public API
* boundaries
*
*/
#ifdef CONFIG_STACK_ALIGN_DOUBLE_WORD
#define ARCH_STACK_PTR_ALIGN 8
#else
#define ARCH_STACK_PTR_ALIGN 4
#endif
/**
* @brief Declare the minimum alignment for a thread stack
*
* Denotes the minimum required alignment of a thread stack.
*
* Note:
* User thread stacks must respect the minimum MPU region
* alignment requirement.
*/
#if defined(CONFIG_USERSPACE)
#define Z_THREAD_MIN_STACK_ALIGN CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
#elif defined(CONFIG_ARM_AARCH32_MMU)
#define Z_THREAD_MIN_STACK_ALIGN CONFIG_ARM_MMU_REGION_MIN_ALIGN_AND_SIZE
#else
#define Z_THREAD_MIN_STACK_ALIGN ARCH_STACK_PTR_ALIGN
#endif
/**
* @brief Declare a minimum MPU guard alignment and size
*
* This specifies the minimum MPU guard alignment/size for the MPU. This
* will be used to denote the guard section of the stack, if it exists.
*
* One key note is that this guard results in extra bytes being added to
* the stack. APIs which give the stack ptr and stack size will take this
* guard size into account.
*
* Stack is allocated, but initial stack pointer is at the end
* (highest address). Stack grows down to the actual allocation
* address (lowest address). Stack guard, if present, will comprise
* the lowest MPU_GUARD_ALIGN_AND_SIZE bytes of the stack.
*
* The guard region must include enough space for an exception frame
* below the trapping region as a stack fault will end up storing
* the exception data (0x20 bytes) onto the stack below wherever
* the stack pointer refers, even if that is within the guard region,
* so we make sure the region is strictly larger than this size by
* setting it to 0x40 (to respect any power-of-two requirements).
*
* As the stack grows down, it will reach the end of the stack when it
* encounters either the stack guard region, or the stack allocation
* address.
*
* ----------------------- <---- Stack allocation address + stack size +
* | | MPU_GUARD_ALIGN_AND_SIZE
* | Some thread data | <---- Defined when thread is created
* | ... |
* |---------------------| <---- Actual initial stack ptr
* | Initial Stack Ptr | aligned to ARCH_STACK_PTR_ALIGN
* | ... |
* | ... |
* | ... |
* | ... |
* | ... |
* | ... |
* | ... |
* | ... |
* | Stack Ends |
* |---------------------- <---- Stack Buffer Ptr from API
* | MPU Guard, |
* | if present |
* ----------------------- <---- Stack Allocation address
*
*/
#if defined(CONFIG_MPU_STACK_GUARD)
/* make sure there's more than enough space for an exception frame */
#if CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE <= 0x20
#define MPU_GUARD_ALIGN_AND_SIZE 0x40
#else
#define MPU_GUARD_ALIGN_AND_SIZE CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
#endif
#else
#define MPU_GUARD_ALIGN_AND_SIZE 0
#endif
/**
* @brief Declare the MPU guard alignment and size for a thread stack
* that is using the Floating Point services.
*
* For threads that are using the Floating Point services under Shared
* Registers (CONFIG_FPU_SHARING=y) mode, the exception stack frame may
* contain both the basic stack frame and the FP caller-saved context,
* upon exception entry. Therefore, a wide guard region is required to
* guarantee that stack-overflow detection will always be successful.
*/
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) \
&& defined(CONFIG_MPU_STACK_GUARD)
#if CONFIG_MPU_STACK_GUARD_MIN_SIZE_FLOAT <= 0x20
#define MPU_GUARD_ALIGN_AND_SIZE_FLOAT 0x40
#else
#define MPU_GUARD_ALIGN_AND_SIZE_FLOAT CONFIG_MPU_STACK_GUARD_MIN_SIZE_FLOAT
#endif
#else
#define MPU_GUARD_ALIGN_AND_SIZE_FLOAT 0
#endif
/**
* @brief Define alignment of an MPU guard
*
* Minimum alignment of the start address of an MPU guard, depending on
* whether the MPU architecture enforces a size (and power-of-two) alignment
* requirement.
*/
#if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
#define Z_MPU_GUARD_ALIGN (MAX(MPU_GUARD_ALIGN_AND_SIZE, \
MPU_GUARD_ALIGN_AND_SIZE_FLOAT))
#else
#define Z_MPU_GUARD_ALIGN MPU_GUARD_ALIGN_AND_SIZE
#endif
#if defined(CONFIG_USERSPACE) && \
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
/* This MPU requires regions to be sized to a power of two, and aligned to
* their own size. Since an MPU region must be able to cover the entire
* user-accessible stack buffer, we size/align to match. The privilege
* mode stack is generated elsewhere in memory.
*/
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_POW2_CEIL(size)
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) Z_POW2_CEIL(size)
#else
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) MAX(Z_THREAD_MIN_STACK_ALIGN, \
Z_MPU_GUARD_ALIGN)
#ifdef CONFIG_USERSPACE
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
ROUND_UP(size, CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
#endif
#endif
#ifdef CONFIG_MPU_STACK_GUARD
/* Kernel-only stacks need an MPU guard region programmed at the beginning of
* the stack object, so align the object appropriately.
*/
#define ARCH_KERNEL_STACK_RESERVED MPU_GUARD_ALIGN_AND_SIZE
#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_MPU_GUARD_ALIGN
#endif
/* On arm, all MPU guards are carve-outs. */
#define ARCH_THREAD_STACK_RESERVED 0
/* Legacy case: retain containing extern "C" with C++ */
#ifdef CONFIG_ARM_MPU
#ifdef CONFIG_CPU_HAS_ARM_MPU
#include <zephyr/arch/arm/mpu/arm_mpu.h>
#endif /* CONFIG_CPU_HAS_ARM_MPU */
#ifdef CONFIG_CPU_HAS_NXP_MPU
#include <zephyr/arch/arm/mpu/nxp_mpu.h>
#endif /* CONFIG_CPU_HAS_NXP_MPU */
#endif /* CONFIG_ARM_MPU */
#ifdef CONFIG_ARM_AARCH32_MMU
#include <zephyr/arch/arm/mmu/arm_mmu.h>
#endif /* CONFIG_ARM_AARCH32_MMU */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_ARCH_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,197 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARM AArch32 specific syscall header
*
* This header contains the ARM AArch32 specific syscall interface. It is
* included by the syscall interface architecture-abstraction header
* (include/arch/syscall.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_SYSCALL_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_SYSCALL_H_
#define _SVC_CALL_CONTEXT_SWITCH 0
#define _SVC_CALL_IRQ_OFFLOAD 1
#define _SVC_CALL_RUNTIME_EXCEPT 2
#define _SVC_CALL_SYSTEM_CALL 3
#ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <stdbool.h>
#include <zephyr/arch/arm/misc.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Syscall invocation macros. arm-specific machine constraints used to ensure
* args land in the proper registers.
*/
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6,
uintptr_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r2 __asm__("r2") = arg3;
register uint32_t r3 __asm__("r3") = arg4;
register uint32_t r4 __asm__("r4") = arg5;
register uint32_t r5 __asm__("r5") = arg6;
register uint32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret), "=r"(r1), "=r"(r2), "=r"(r3)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r5), "r" (r6)
: "r8", "memory", "ip");
return ret;
}
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5,
uintptr_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r2 __asm__("r2") = arg3;
register uint32_t r3 __asm__("r3") = arg4;
register uint32_t r4 __asm__("r4") = arg5;
register uint32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret), "=r"(r1), "=r"(r2), "=r"(r3)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r6)
: "r8", "memory", "ip");
return ret;
}
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r2 __asm__("r2") = arg3;
register uint32_t r3 __asm__("r3") = arg4;
register uint32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret), "=r"(r1), "=r"(r2), "=r"(r3)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r6)
: "r8", "memory", "ip");
return ret;
}
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3,
uintptr_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r2 __asm__("r2") = arg3;
register uint32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret), "=r"(r1), "=r"(r2)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r6)
: "r8", "memory", "r3", "ip");
return ret;
}
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
uintptr_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r1 __asm__("r1") = arg2;
register uint32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret), "=r"(r1)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r6)
: "r8", "memory", "r2", "r3", "ip");
return ret;
}
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
uintptr_t call_id)
{
register uint32_t ret __asm__("r0") = arg1;
register uint32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6)
: "r8", "memory", "r1", "r2", "r3", "ip");
return ret;
}
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
{
register uint32_t ret __asm__("r0");
register uint32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6)
: "r8", "memory", "r1", "r2", "r3", "ip");
return ret;
}
static inline bool arch_is_user_context(void)
{
#if defined(CONFIG_CPU_CORTEX_M)
uint32_t value;
/* check for handler mode */
__asm__ volatile("mrs %0, IPSR\n\t" : "=r"(value));
if (value) {
return false;
}
#endif
return z_arm_thread_is_in_user_mode();
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_USERSPACE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_SYSCALL_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/syscall.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,663 |
```objective-c
/* ARM AArch32 GCC specific public inline assembler functions and macros */
/*
*
*/
/* Either public functions or macros or invoked by public functions */
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_ASM_INLINE_GCC_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_ASM_INLINE_GCC_H_
/*
* The file must not be included directly
* Include arch/cpu.h instead
*/
#ifndef _ASMLANGUAGE
#include <zephyr/toolchain.h>
#include <zephyr/types.h>
#include <zephyr/arch/arm/exception.h>
#include <cmsis_core.h>
#if defined(CONFIG_CPU_AARCH32_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
#include <zephyr/arch/arm/cortex_a_r/cpu.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* On ARMv7-M and ARMv8-M Mainline CPUs, this function prevents regular
* exceptions (i.e. with interrupt priority lower than or equal to
* _EXC_IRQ_DEFAULT_PRIO) from interrupting the CPU. NMI, Faults, SVC,
* and Zero Latency IRQs (if supported) may still interrupt the CPU.
*
* On ARMv6-M and ARMv8-M Baseline CPUs, this function reads the value of
* PRIMASK which shows if interrupts are enabled, then disables all interrupts
* except NMI.
*/
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
unsigned int key;
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
#if CONFIG_MP_MAX_NUM_CPUS == 1 || defined(CONFIG_ARMV8_M_BASELINE)
key = __get_PRIMASK();
__disable_irq();
#else
#error "Cortex-M0 and Cortex-M0+ require SoC specific support for cross core synchronisation."
#endif
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
key = __get_BASEPRI();
__set_BASEPRI_MAX(_EXC_IRQ_DEFAULT_PRIO);
__ISB();
#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_AARCH32_ARMV8_R) \
|| defined(CONFIG_ARMV7_A)
__asm__ volatile(
"mrs %0, cpsr;"
"and %0, #" STRINGIFY(I_BIT) ";"
"cpsid i;"
: "=r" (key)
:
: "memory", "cc");
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
return key;
}
/* On Cortex-M0/M0+, this enables all interrupts if they were not
* previously disabled.
*/
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
if (key != 0U) {
return;
}
__enable_irq();
__ISB();
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
__set_BASEPRI(key);
__ISB();
#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_AARCH32_ARMV8_R) \
|| defined(CONFIG_ARMV7_A)
if (key != 0U) {
return;
}
__enable_irq();
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
}
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
/* This convention works for both PRIMASK and BASEPRI */
return key == 0U;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_ASM_INLINE_GCC_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/asm_inline_gcc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 761 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARM AArch32 public kernel miscellaneous
*
* ARM AArch32-specific kernel miscellaneous interface. Included by arm/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_MISC_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_MISC_H_
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
extern uint32_t sys_clock_cycle_get_32(void);
static inline uint32_t arch_k_cycle_get_32(void)
{
return sys_clock_cycle_get_32();
}
extern uint64_t sys_clock_cycle_get_64(void);
static inline uint64_t arch_k_cycle_get_64(void)
{
return sys_clock_cycle_get_64();
}
static ALWAYS_INLINE void arch_nop(void)
{
__asm__ volatile("nop");
}
#if defined(CONFIG_USERSPACE)
extern bool z_arm_thread_is_in_user_mode(void);
#endif
#if defined(CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK)
/* Prototype of a hook that can be enabled to be called every time the CPU is
* made idle (the calls will be done from k_cpu_idle() and k_cpu_atomic_idle()).
* If this hook returns false, the CPU is prevented from entering the actual
* sleep (the WFE/WFI instruction is skipped).
*/
bool z_arm_on_enter_cpu_idle(void);
#endif
#if defined(CONFIG_ARM_ON_ENTER_CPU_IDLE_PREPARE_HOOK)
/* Prototype of a hook that can be enabled to be called every time the CPU is
* made idle (the calls will be done from k_cpu_idle() and k_cpu_atomic_idle()).
* The function is called before interrupts are disabled and can prepare to
* upcoming call to z_arm_on_enter_cpu_idle.
*/
void z_arm_on_enter_cpu_idle_prepare(void);
#endif
#endif
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_MISC_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/misc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 386 |
```objective-c
/*
*
*/
/**
* @file
* @brief Per-arch thread definition
*
* This file contains definitions for
*
* struct _thread_arch
* struct _callee_saved
*
* necessary to instantiate instances of struct k_thread.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_THREAD_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
struct _callee_saved {
uint32_t v1; /* r4 */
uint32_t v2; /* r5 */
uint32_t v3; /* r6 */
uint32_t v4; /* r7 */
uint32_t v5; /* r8 */
uint32_t v6; /* r9 */
uint32_t v7; /* r10 */
uint32_t v8; /* r11 */
uint32_t psp; /* r13 */
#ifdef CONFIG_USE_SWITCH
uint32_t lr; /* lr */
#endif
};
typedef struct _callee_saved _callee_saved_t;
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
struct _preempt_float {
float s16;
float s17;
float s18;
float s19;
float s20;
float s21;
float s22;
float s23;
float s24;
float s25;
float s26;
float s27;
float s28;
float s29;
float s30;
float s31;
};
#endif
struct _thread_arch {
/* interrupt locking key */
uint32_t basepri;
/* r0 in stack frame cannot be written to reliably */
uint32_t swap_return_value;
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/*
* No cooperative floating point register set structure exists for
* the Cortex-M as it automatically saves the necessary registers
* in its exception stack frame.
*/
struct _preempt_float preempt_float;
#endif
#if defined(CONFIG_CPU_AARCH32_CORTEX_A) || defined(CONFIG_CPU_AARCH32_CORTEX_R)
int8_t exception_depth;
#endif
#if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE)
/*
* Status variable holding several thread status flags
* as follows:
*
* byte 0
* +-bits 4-7-----bit-3----------bit-2--------bit-1---+----bit-0------+
* : | | | | |
* : reserved |<Guard FLOAT>| reserved | reserved | <priv mode> |
* : bits | | | | CONTROL.nPRIV |
* +your_sha256_hash--+
*
* byte 1
* +----------------------------bits 8-15-----------------------------+
* : Least significant byte of EXC_RETURN |
* : bit 15| bit 14| bit 13 | bit 12| bit 11 | bit 10 | bit 9 | bit 8 |
* : Res | S | DCRS | FType | Mode | SPSel | Res | ES |
* +your_sha256_hash--+
*
* Bit 0: thread's current privileged mode (Supervisor or User mode)
* Mirrors CONTROL.nPRIV flag.
* Bit 2: Deprecated in favor of FType. Note: FType = !CONTROL.FPCA.
* indicating whether the thread has an active FP context.
* Mirrors CONTROL.FPCA flag.
* Bit 3: indicating whether the thread is applying the long (FLOAT)
* or the default MPU stack guard size.
*
* Bits 8-15: Least significant octet of the EXC_RETURN value when a
* thread is switched-out. The value is copied from LR when
* entering the PendSV handler. When the thread is
* switched in again, the value is restored to LR before
* exiting the PendSV handler.
*/
union {
uint32_t mode;
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
struct {
uint8_t mode_bits;
uint8_t mode_exc_return;
uint16_t mode_reserved2;
};
#endif
};
#if defined(CONFIG_USERSPACE)
uint32_t priv_stack_start;
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
uint32_t priv_stack_end;
uint32_t sp_usr;
#endif
#endif
#endif
};
#if defined(CONFIG_FPU_SHARING) && defined(CONFIG_MPU_STACK_GUARD)
#define Z_ARM_MODE_MPU_GUARD_FLOAT_Msk (1 << 3)
#endif
typedef struct _thread_arch _thread_arch_t;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_THREAD_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/thread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,039 |
```objective-c
/* ARM AArch32 inline assembler functions and macros for public functions */
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_ASM_INLINE_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_ASM_INLINE_H_
/*
* The file must not be included directly
* Include kernel.h instead
*/
#if defined(__GNUC__)
#include <zephyr/arch/arm/asm_inline_gcc.h>
#else
#include <arch/arm/asm_inline_other.h>
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_ASM_INLINE_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/asm_inline.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 106 |
```objective-c
/**
*
*/
#ifndef ZEPHYR_INCLUDE_BARRIER_ARM_H_
#define ZEPHYR_INCLUDE_BARRIER_ARM_H_
#ifndef ZEPHYR_INCLUDE_SYS_BARRIER_H_
#error Please include <zephyr/sys/barrier.h>
#endif
#include <cmsis_core.h>
#ifdef __cplusplus
extern "C" {
#endif
static ALWAYS_INLINE void z_barrier_dmem_fence_full(void)
{
__DMB();
}
static ALWAYS_INLINE void z_barrier_dsync_fence_full(void)
{
__DSB();
}
static ALWAYS_INLINE void z_barrier_isync_fence_full(void)
{
__ISB();
}
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_BARRIER_ARM_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/barrier.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 146 |
```objective-c
/**
* @file
*
* @brief ARM AArch32 NMI routines
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_NMI_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_NMI_H_
#if !defined(_ASMLANGUAGE) && defined(CONFIG_RUNTIME_NMI)
extern void z_arm_nmi_set_handler(void (*pHandler)(void));
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_NMI_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/nmi.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 92 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARM AArch32 Cortex-M public exception handling
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_EXCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_EXCEPTION_H_
#include <zephyr/devicetree.h>
#include <zephyr/arch/arm/cortex_m/nvic.h>
/* for assembler, only works with constants */
#define Z_EXC_PRIO(pri) (((pri) << (8 - NUM_IRQ_PRIO_BITS)) & 0xff)
/*
* In architecture variants with non-programmable fault exceptions
* (e.g. Cortex-M Baseline variants), hardware ensures processor faults
* are given the highest interrupt priority level. SVCalls are assigned
* the highest configurable priority level (level 0); note, however, that
* this interrupt level may be shared with HW interrupts.
*
* In Cortex variants with programmable fault exception priorities we
* assign the highest interrupt priority level (level 0) to processor faults
* with configurable priority.
* The highest priority level may be shared with either Zero-Latency IRQs (if
* support for the feature is enabled) or with SVCall priority level.
* Regular HW IRQs are always assigned priority levels lower than the priority
* levels for SVCalls, Zero-Latency IRQs and processor faults.
*
* PendSV IRQ (which is used in Cortex-M variants to implement thread
* context-switching) is assigned the lowest IRQ priority level.
*/
#if defined(CONFIG_CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS)
#define _EXCEPTION_RESERVED_PRIO 1
#else
#define _EXCEPTION_RESERVED_PRIO 0
#endif
#define _EXC_FAULT_PRIO 0
#define _EXC_ZERO_LATENCY_IRQS_PRIO 0
#define _EXC_SVC_PRIO COND_CODE_1(CONFIG_ZERO_LATENCY_IRQS, \
(CONFIG_ZERO_LATENCY_LEVELS), (0))
#define _IRQ_PRIO_OFFSET (_EXCEPTION_RESERVED_PRIO + _EXC_SVC_PRIO)
#define IRQ_PRIO_LOWEST (BIT(NUM_IRQ_PRIO_BITS) - (_IRQ_PRIO_OFFSET) - 1)
#define _EXC_IRQ_DEFAULT_PRIO Z_EXC_PRIO(_IRQ_PRIO_OFFSET)
/* Use lowest possible priority level for PendSV */
#define _EXC_PENDSV_PRIO 0xff
#define _EXC_PENDSV_PRIO_MASK Z_EXC_PRIO(_EXC_PENDSV_PRIO)
#ifdef _ASMLANGUAGE
GTEXT(z_arm_exc_exit);
#else
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* Registers s16-s31 (d8-d15, q4-q7) must be preserved across subroutine calls.
*
* Registers s0-s15 (d0-d7, q0-q3) do not have to be preserved (and can be used
* for passing arguments or returning results in standard procedure-call variants).
*
* Registers d16-d31 (q8-q15), do not have to be preserved.
*/
struct __fpu_sf {
uint32_t s[16]; /* s0~s15 (d0-d7) */
#ifdef CONFIG_VFP_FEATURE_REGS_S64_D32
uint64_t d[16]; /* d16~d31 */
#endif
uint32_t fpscr;
uint32_t undefined;
};
#endif
/* Additional register state that is not stacked by hardware on exception
* entry.
*
* These fields are ONLY valid in the ESF copy passed into z_arm_fatal_error().
* When information for a member is unavailable, the field is set to zero.
*/
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
struct __extra_esf_info {
_callee_saved_t *callee;
uint32_t msp;
uint32_t exc_return;
};
#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
/* ARM GPRs are often designated by two different names */
#define sys_define_gpr_with_alias(name1, name2) union { uint32_t name1, name2; }
struct arch_esf {
struct __basic_sf {
sys_define_gpr_with_alias(a1, r0);
sys_define_gpr_with_alias(a2, r1);
sys_define_gpr_with_alias(a3, r2);
sys_define_gpr_with_alias(a4, r3);
sys_define_gpr_with_alias(ip, r12);
sys_define_gpr_with_alias(lr, r14);
sys_define_gpr_with_alias(pc, r15);
uint32_t xpsr;
} basic;
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
struct __fpu_sf fpu;
#endif
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
struct __extra_esf_info extra_info;
#endif
};
extern uint32_t z_arm_coredump_fault_sp;
extern void z_arm_exc_exit(void);
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_EXCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_m/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,061 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARM AArch32 public error handling
*
* ARM AArch32-specific kernel error handling interface. Included by
* arm/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_ERROR_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_ERROR_H_
#include <zephyr/arch/arm/syscall.h>
#include <zephyr/arch/arm/exception.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
#if defined(CONFIG_CPU_CORTEX_M)
/* ARMv6 will hard-fault if SVC is called with interrupts locked. Just
* force them unlocked, the thread is in an undefined state anyway
*
* On ARMv7m we won't get a HardFault, but if interrupts were locked the
* thread will continue executing after the exception and forbid PendSV to
* schedule a new thread until they are unlocked which is not what we want.
* Force them unlocked as well.
*/
#define ARCH_EXCEPT(reason_p) \
do {\
arch_irq_unlock(0); \
__asm__ volatile( \
"mov r0, %[_reason]\n" \
"svc %[id]\n" \
:: [_reason] "r" (reason_p), [id] "i" (_SVC_CALL_RUNTIME_EXCEPT) \
: "r0", "memory"); \
} while (false)
#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_AARCH32_ARMV8_R) \
|| defined(CONFIG_ARMV7_A)
/*
* In order to support using svc for an exception while running in an
* isr, stack $lr_svc before calling svc. While exiting the isr,
* z_check_stack_sentinel is called. $lr_svc contains the return address.
* If the sentinel is wrong, it calls svc to cause an oops. This svc
* call will overwrite $lr_svc, losing the return address from the
* z_check_stack_sentinel call if it is not stacked before the svc.
*/
#define ARCH_EXCEPT(reason_p) \
register uint32_t r0 __asm__("r0") = reason_p; \
do { \
__asm__ volatile ( \
"push {lr}\n\t" \
"cpsie i\n\t" \
"svc %[id]\n\t" \
"pop {lr}\n\t" \
: \
: "r" (r0), [id] "i" (_SVC_CALL_RUNTIME_EXCEPT) \
: "memory"); \
} while (false)
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_ERROR_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/error.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 586 |
```objective-c
/*
*
*/
#ifndef _CORTEX_M_CPU_H
#define _CORTEX_M_CPU_H
#ifdef _ASMLANGUAGE
#define _SCS_BASE_ADDR _PPB_INT_SCS
/* ICSR defines */
#define _SCS_ICSR (_SCS_BASE_ADDR + 0xd04)
#define _SCS_ICSR_PENDSV (1 << 28)
#define _SCS_ICSR_UNPENDSV (1 << 27)
#define _SCS_ICSR_RETTOBASE (1 << 11)
#define _SCS_MPU_CTRL (_SCS_BASE_ADDR + 0xd94)
/* CONTROL defines */
#define _CONTROL_FPCA_Msk (1 << 2)
/* EXC_RETURN defines */
#define _EXC_RETURN_SPSEL_Msk (1 << 2)
#define _EXC_RETURN_FTYPE_Msk (1 << 4)
#else
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/* CP10 Access Bits */
#define CPACR_CP10_Pos 20U
#define CPACR_CP10_Msk (3UL << CPACR_CP10_Pos)
#define CPACR_CP10_NO_ACCESS (0UL << CPACR_CP10_Pos)
#define CPACR_CP10_PRIV_ACCESS (1UL << CPACR_CP10_Pos)
#define CPACR_CP10_RESERVED (2UL << CPACR_CP10_Pos)
#define CPACR_CP10_FULL_ACCESS (3UL << CPACR_CP10_Pos)
/* CP11 Access Bits */
#define CPACR_CP11_Pos 22U
#define CPACR_CP11_Msk (3UL << CPACR_CP11_Pos)
#define CPACR_CP11_NO_ACCESS (0UL << CPACR_CP11_Pos)
#define CPACR_CP11_PRIV_ACCESS (1UL << CPACR_CP11_Pos)
#define CPACR_CP11_RESERVED (2UL << CPACR_CP11_Pos)
#define CPACR_CP11_FULL_ACCESS (3UL << CPACR_CP11_Pos)
#ifdef CONFIG_PM_S2RAM
struct __cpu_context {
/* GPRs are saved onto the stack */
uint32_t msp;
uint32_t msplim;
uint32_t psp;
uint32_t psplim;
uint32_t apsr;
uint32_t ipsr;
uint32_t epsr;
uint32_t primask;
uint32_t faultmask;
uint32_t basepri;
uint32_t control;
};
typedef struct __cpu_context _cpu_context_t;
#endif /* CONFIG_PM_S2RAM */
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* _CORTEX_M_CPU_H */
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_m/cpu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 556 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_NVIC_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_NVIC_H_
#include <zephyr/devicetree.h>
#if defined(CONFIG_ARMV8_1_M_MAINLINE)
/* The order here is on purpose since ARMv8.1-M SoCs may define
* CONFIG_ARMV6_M_ARMV8_M_BASELINE, CONFIG_ARMV7_M_ARMV8_M_MAINLINE or
* CONFIG_ARMV8_M_MAINLINE so we want to check for ARMv8.1-M first.
*/
#define NVIC_NODEID DT_INST(0, arm_v8_1m_nvic)
#elif defined(CONFIG_ARMV8_M_BASELINE) || defined(CONFIG_ARMV8_M_MAINLINE)
#define NVIC_NODEID DT_INST(0, arm_v8m_nvic)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#define NVIC_NODEID DT_INST(0, arm_v7m_nvic)
#elif defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
#define NVIC_NODEID DT_INST(0, arm_v6m_nvic)
#endif
#define NUM_IRQ_PRIO_BITS DT_PROP(NVIC_NODEID, arm_num_irq_priority_bits)
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_NVIC_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_m/nvic.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 277 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_FPU_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_FPU_H_
struct fpu_ctx_full {
uint32_t caller_saved[16];
uint32_t callee_saved[16];
uint32_t fpscr;
bool ctx_saved;
};
void z_arm_save_fp_context(struct fpu_ctx_full *buffer);
void z_arm_restore_fp_context(const struct fpu_ctx_full *buffer);
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_FPU_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_m/fpu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 113 |
```objective-c
/*
*
*/
#ifndef _ARM_CORTEX_M_MPU_MEM_CFG_H_
#define _ARM_CORTEX_M_MPU_MEM_CFG_H_
#include <zephyr/arch/arm/mpu/arm_mpu.h>
#if !defined(CONFIG_ARMV8_M_BASELINE) && !defined(CONFIG_ARMV8_M_MAINLINE)
/* Flash Region Definitions */
#if CONFIG_FLASH_SIZE <= 64
#define REGION_FLASH_SIZE REGION_64K
#elif CONFIG_FLASH_SIZE <= 128
#define REGION_FLASH_SIZE REGION_128K
#elif CONFIG_FLASH_SIZE <= 256
#define REGION_FLASH_SIZE REGION_256K
#elif CONFIG_FLASH_SIZE <= 512
#define REGION_FLASH_SIZE REGION_512K
#elif CONFIG_FLASH_SIZE <= 1024
#define REGION_FLASH_SIZE REGION_1M
#elif CONFIG_FLASH_SIZE <= 2048
#define REGION_FLASH_SIZE REGION_2M
#elif CONFIG_FLASH_SIZE <= 4096
#define REGION_FLASH_SIZE REGION_4M
#elif CONFIG_FLASH_SIZE <= 8192
#define REGION_FLASH_SIZE REGION_8M
#elif CONFIG_FLASH_SIZE <= 16384
#define REGION_FLASH_SIZE REGION_16M
#elif CONFIG_FLASH_SIZE <= 65536
#define REGION_FLASH_SIZE REGION_64M
#elif CONFIG_FLASH_SIZE <= 131072
#define REGION_FLASH_SIZE REGION_128M
#elif CONFIG_FLASH_SIZE <= 262144
#define REGION_FLASH_SIZE REGION_256M
#elif CONFIG_FLASH_SIZE <= 524288
#define REGION_FLASH_SIZE REGION_512M
#else
#error "Unsupported flash size configuration"
#endif
/* SRAM Region Definitions */
#if CONFIG_SRAM_SIZE <= 16
#define REGION_SRAM_SIZE REGION_16K
#elif CONFIG_SRAM_SIZE <= 32
#define REGION_SRAM_SIZE REGION_32K
#elif CONFIG_SRAM_SIZE <= 64
#define REGION_SRAM_SIZE REGION_64K
#elif CONFIG_SRAM_SIZE <= 128
#define REGION_SRAM_SIZE REGION_128K
#elif CONFIG_SRAM_SIZE <= 256
#define REGION_SRAM_SIZE REGION_256K
#elif CONFIG_SRAM_SIZE <= 512
#define REGION_SRAM_SIZE REGION_512K
#elif CONFIG_SRAM_SIZE <= 1024
#define REGION_SRAM_SIZE REGION_1M
#elif CONFIG_SRAM_SIZE <= 2048
#define REGION_SRAM_SIZE REGION_2M
#elif CONFIG_SRAM_SIZE <= 4096
#define REGION_SRAM_SIZE REGION_4M
#elif CONFIG_SRAM_SIZE <= 8192
#define REGION_SRAM_SIZE REGION_8M
#elif CONFIG_SRAM_SIZE <= 16384
#define REGION_SRAM_SIZE REGION_16M
#elif CONFIG_SRAM_SIZE == 32768
#define REGION_SRAM_SIZE REGION_32M
#elif CONFIG_SRAM_SIZE == 65536
#define REGION_SRAM_SIZE REGION_64M
#else
#error "Unsupported sram size configuration"
#endif
#endif /* !ARMV8_M_BASELINE && !ARMV8_M_MAINLINE */
#endif /* _ARM_CORTEX_M_MPU_MEM_CFG_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_m/arm_mpu_mem_cfg.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 614 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARM CORTEX-M memory map
*
* This module contains definitions for the memory map of the CORTEX-M series of
* processors.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_MEMORY_MAP_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_MEMORY_MAP_H_
#include <zephyr/sys/util.h>
/* 0x00000000 -> 0x1fffffff: Code in ROM [0.5 GB] */
#define _CODE_BASE_ADDR 0x00000000
#define _CODE_END_ADDR 0x1FFFFFFF
/* 0x20000000 -> 0x3fffffff: SRAM [0.5GB] */
#define _SRAM_BASE_ADDR 0x20000000
#define _SRAM_BIT_BAND_REGION 0x20000000
#define _SRAM_BIT_BAND_REGION_END 0x200FFFFF
#define _SRAM_BIT_BAND_ALIAS 0x22000000
#define _SRAM_BIT_BAND_ALIAS_END 0x23FFFFFF
#define _SRAM_END_ADDR 0x3FFFFFFF
/* 0x40000000 -> 0x5fffffff: Peripherals [0.5GB] */
#define _PERI_BASE_ADDR 0x40000000
#define _PERI_BIT_BAND_REGION 0x40000000
#define _PERI_BIT_BAND_REGION_END 0x400FFFFF
#define _PERI_BIT_BAND_ALIAS 0x42000000
#define _PERI_BIT_BAND_ALIAS_END 0x43FFFFFF
#define _PERI_END_ADDR 0x5FFFFFFF
/* 0x60000000 -> 0x9fffffff: external RAM [1GB] */
#define _ERAM_BASE_ADDR 0x60000000
#define _ERAM_END_ADDR 0x9FFFFFFF
/* 0xa0000000 -> 0xdfffffff: external devices [1GB] */
#define _EDEV_BASE_ADDR 0xA0000000
#define _EDEV_END_ADDR 0xDFFFFFFF
/* 0xe0000000 -> 0xffffffff: varies by processor (see below) */
/* 0xe0000000 -> 0xe00fffff: private peripheral bus */
/* 0xe0000000 -> 0xe003ffff: internal [256KB] */
#define _PPB_INT_BASE_ADDR 0xE0000000
#if defined(CONFIG_CPU_CORTEX_M0) || defined(CONFIG_CPU_CORTEX_M0PLUS) || \
defined(CONFIG_CPU_CORTEX_M1)
#define _PPB_INT_RSVD_0 0xE0000000
#define _PPB_INT_DWT 0xE0001000
#define _PPB_INT_BPU 0xE0002000
#define _PPB_INT_RSVD_1 0xE0003000
#define _PPB_INT_SCS 0xE000E000
#define _PPB_INT_RSVD_2 0xE000F000
#elif defined(CONFIG_CPU_CORTEX_M3) || defined(CONFIG_CPU_CORTEX_M4) || defined(CONFIG_CPU_CORTEX_M7)
#define _PPB_INT_ITM 0xE0000000
#define _PPB_INT_DWT 0xE0001000
#define _PPB_INT_FPB 0xE0002000
#define _PPB_INT_RSVD_1 0xE0003000
#define _PPB_INT_SCS 0xE000E000
#define _PPB_INT_RSVD_2 0xE000F000
#elif defined(CONFIG_CPU_CORTEX_M23) || \
defined(CONFIG_CPU_CORTEX_M33) || \
defined(CONFIG_CPU_CORTEX_M55) || \
defined(CONFIG_CPU_CORTEX_M85)
#define _PPB_INT_RSVD_0 0xE0000000
#define _PPB_INT_SCS 0xE000E000
#define _PPB_INT_SCB 0xE000ED00
#define _PPB_INT_RSVD_1 0xE002E000
#else
#error Unknown CPU
#endif
#define _PPB_INT_END_ADDR 0xE003FFFF
/* 0xe0000000 -> 0xe00fffff: private peripheral bus */
/* 0xe0040000 -> 0xe00fffff: external [768K] */
#define _PPB_EXT_BASE_ADDR 0xE0040000
#if defined(CONFIG_CPU_CORTEX_M0) || defined(CONFIG_CPU_CORTEX_M0PLUS) \
|| defined(CONFIG_CPU_CORTEX_M1) || defined(CONFIG_CPU_CORTEX_M23)
#elif defined(CONFIG_CPU_CORTEX_M3) || defined(CONFIG_CPU_CORTEX_M4)
#define _PPB_EXT_TPIU 0xE0040000
#define _PPB_EXT_ETM 0xE0041000
#define _PPB_EXT_PPB 0xE0042000
#define _PPB_EXT_ROM_TABLE 0xE00FF000
#define _PPB_EXT_END_ADDR 0xE00FFFFF
#elif defined(CONFIG_CPU_CORTEX_M33) || defined(CONFIG_CPU_CORTEX_M55) \
|| defined(CONFIG_CPU_CORTEX_M85)
#undef _PPB_EXT_BASE_ADDR
#define _PPB_EXT_BASE_ADDR 0xE0044000
#define _PPB_EXT_ROM_TABLE 0xE00FF000
#define _PPB_EXT_END_ADDR 0xE00FFFFF
#elif defined(CONFIG_CPU_CORTEX_M7)
#define _PPB_EXT_BASE_ADDR 0xE0040000
#define _PPB_EXT_RSVD_TPIU 0xE0040000
#define _PPB_EXT_ETM 0xE0041000
#define _PPB_EXT_CTI 0xE0042000
#define _PPB_EXT_PPB 0xE0043000
#define _PPB_EXT_PROC_ROM_TABLE 0xE00FE000
#define _PPB_EXT_PPB_ROM_TABLE 0xE00FF000
#else
#error Unknown CPU
#endif
#define _PPB_EXT_END_ADDR 0xE00FFFFF
/* 0xe0100000 -> 0xffffffff: vendor-specific [0.5GB-1MB or 511MB] */
#define _VENDOR_BASE_ADDR 0xE0100000
#define _VENDOR_END_ADDR 0xFFFFFFFF
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_M_MEMORY_MAP_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_m/memory_map.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,382 |
```objective-c
/*
* ARMv7 MMU support
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_AARCH32_ARM_MMU_H_
#define ZEPHYR_INCLUDE_ARCH_AARCH32_ARM_MMU_H_
#ifndef _ASMLANGUAGE
/*
* Comp.:
* ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition,
* ARM document ID DDI0406C Rev. d, March 2018
* Memory type definitions:
* Table B3-10, chap. B3.8.2, p. B3-1363f.
* Outer / inner cache attributes for cacheable memory:
* Table B3-11, chap. B3.8.2, p. B3-1364
*/
/*
* The following definitions are used when specifying a memory
* range to be mapped at boot time using the MMU_REGION_ENTRY
* macro.
*/
#define MT_STRONGLY_ORDERED BIT(0)
#define MT_DEVICE BIT(1)
#define MT_NORMAL BIT(2)
#define MT_MASK 0x7
#define MPERM_R BIT(3)
#define MPERM_W BIT(4)
#define MPERM_X BIT(5)
#define MPERM_UNPRIVILEGED BIT(6)
#define MATTR_NON_SECURE BIT(7)
#define MATTR_NON_GLOBAL BIT(8)
#define MATTR_SHARED BIT(9)
#define MATTR_CACHE_OUTER_WB_WA BIT(10)
#define MATTR_CACHE_OUTER_WT_nWA BIT(11)
#define MATTR_CACHE_OUTER_WB_nWA BIT(12)
#define MATTR_CACHE_INNER_WB_WA BIT(13)
#define MATTR_CACHE_INNER_WT_nWA BIT(14)
#define MATTR_CACHE_INNER_WB_nWA BIT(15)
#define MATTR_MAY_MAP_L1_SECTION BIT(16)
/*
* The following macros are used for adding constant entries
* mmu_regions array of the mmu_config struct. Use MMU_REGION_ENTRY
* for the specification of mappings whose PA and VA differ,
* the use of MMU_REGION_FLAT_ENTRY always results in an identity
* mapping, which are used for the mappings of the Zephyr image's
* code and data.
*/
#define MMU_REGION_ENTRY(_name, _base_pa, _base_va, _size, _attrs) \
{\
.name = _name, \
.base_pa = _base_pa, \
.base_va = _base_va, \
.size = _size, \
.attrs = _attrs, \
}
#define MMU_REGION_FLAT_ENTRY(name, adr, sz, attrs) \
MMU_REGION_ENTRY(name, adr, adr, sz, attrs)
/*
* @brief Auto generate mmu region entry for node_id
*
* Example usage:
*
* @code{.c}
* DT_FOREACH_STATUS_OKAY_VARGS(nxp_imx_gpio,
* MMU_REGION_DT_FLAT_ENTRY,
* (MT_DEVICE_nGnRnE | MT_P_RW_U_NA | MT_NS))
* @endcode
*
* @note Since devicetree_generated.h does not include
* node_id##_P_reg_FOREACH_PROP_ELEM* definitions,
* we can't automate dts node with multiple reg
* entries.
*/
#define MMU_REGION_DT_FLAT_ENTRY(node_id, attrs) \
MMU_REGION_FLAT_ENTRY(DT_NODE_FULL_NAME(node_id), \
DT_REG_ADDR(node_id), \
DT_REG_SIZE(node_id), \
attrs),
/*
* @brief Auto generate mmu region entry for status = "okay"
* nodes compatible to a driver
*
* Example usage:
*
* @code{.c}
* MMU_REGION_DT_COMPAT_FOREACH_FLAT_ENTRY(nxp_imx_gpio,
* (MT_DEVICE_nGnRnE | MT_P_RW_U_NA | MT_NS))
* @endcode
*
* @note This is a wrapper of @ref MMU_REGION_DT_FLAT_ENTRY
*/
#define MMU_REGION_DT_COMPAT_FOREACH_FLAT_ENTRY(compat, attr) \
DT_FOREACH_STATUS_OKAY_VARGS(compat, \
MMU_REGION_DT_FLAT_ENTRY, attr)
/* Region definition data structure */
struct arm_mmu_region {
/* Region Base Physical Address */
uintptr_t base_pa;
/* Region Base Virtual Address */
uintptr_t base_va;
/* Region size */
size_t size;
/* Region Name */
const char *name;
/* Region Attributes */
uint32_t attrs;
};
/* MMU configuration data structure */
struct arm_mmu_config {
/* Number of regions */
uint32_t num_regions;
/* Regions */
const struct arm_mmu_region *mmu_regions;
};
/*
* Reference to the MMU configuration.
*
* This struct is defined and populated for each SoC (in the SoC definition),
* and holds the build-time configuration information for the fixed MMU
* regions enabled during kernel initialization.
*/
extern const struct arm_mmu_config mmu_config;
int z_arm_mmu_init(void);
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_AARCH32_ARM_MMU_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/mmu/arm_mmu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,098 |
```objective-c
/*
*
*/
/**
* @file
* @brief ARM AArch32 Cortex-A and Cortex-R public exception handling
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_EXCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_EXCEPTION_H_
#ifdef _ASMLANGUAGE
GTEXT(z_arm_exc_exit);
#else
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* Registers s16-s31 (d8-d15, q4-q7) must be preserved across subroutine calls.
*
* Registers s0-s15 (d0-d7, q0-q3) do not have to be preserved (and can be used
* for passing arguments or returning results in standard procedure-call variants).
*
* Registers d16-d31 (q8-q15), do not have to be preserved.
*/
struct __fpu_sf {
uint32_t s[16]; /* s0~s15 (d0-d7) */
#ifdef CONFIG_VFP_FEATURE_REGS_S64_D32
uint64_t d[16]; /* d16~d31 */
#endif
uint32_t fpscr;
uint32_t undefined;
};
#endif
/* Additional register state that is not stacked by hardware on exception
* entry.
*
* These fields are ONLY valid in the ESF copy passed into z_arm_fatal_error().
* When information for a member is unavailable, the field is set to zero.
*/
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
struct __extra_esf_info {
_callee_saved_t *callee;
uint32_t msp;
uint32_t exc_return;
};
#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
/* ARM GPRs are often designated by two different names */
#define sys_define_gpr_with_alias(name1, name2) union { uint32_t name1, name2; }
struct arch_esf {
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
struct __extra_esf_info extra_info;
#endif
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
struct __fpu_sf fpu;
#endif
struct __basic_sf {
sys_define_gpr_with_alias(a1, r0);
sys_define_gpr_with_alias(a2, r1);
sys_define_gpr_with_alias(a3, r2);
sys_define_gpr_with_alias(a4, r3);
sys_define_gpr_with_alias(ip, r12);
sys_define_gpr_with_alias(lr, r14);
sys_define_gpr_with_alias(pc, r15);
uint32_t xpsr;
} basic;
};
extern uint32_t z_arm_coredump_fault_sp;
extern void z_arm_exc_exit(bool fatal);
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_EXCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_a_r/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 596 |
```linker script
/*
*
*/
/**
* @file
* @brief Linker command/script file
*
* Linker script for the Cortex-M platforms.
*/
#include <zephyr/linker/sections.h>
#include <zephyr/devicetree.h>
#include <zephyr/linker/devicetree_regions.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/linker/linker-tool.h>
/* physical address of RAM */
#ifdef CONFIG_XIP
#define ROMABLE_REGION FLASH
#else
#define ROMABLE_REGION RAM
#endif
#define RAMABLE_REGION RAM
/* Region of the irq vectors and boot-vector SP/PC */
#if defined(CONFIG_ROMSTART_RELOCATION_ROM)
#define ROMSTART_ADDR CONFIG_ROMSTART_REGION_ADDRESS
#define ROMSTART_SIZE (CONFIG_ROMSTART_REGION_SIZE * 1K)
#else
#define ROMSTART_REGION ROMABLE_REGION
#endif
#if !defined(CONFIG_XIP) && (CONFIG_FLASH_SIZE == 0)
#define ROM_ADDR RAM_ADDR
#else
#define ROM_ADDR (CONFIG_FLASH_BASE_ADDRESS + CONFIG_FLASH_LOAD_OFFSET)
#endif
#if defined(CONFIG_ROM_END_OFFSET)
#define ROM_END_OFFSET CONFIG_ROM_END_OFFSET
#else
#define ROM_END_OFFSET 0
#endif
#if CONFIG_FLASH_LOAD_SIZE > 0
#define ROM_SIZE (CONFIG_FLASH_LOAD_SIZE - ROM_END_OFFSET)
#else
#define ROM_SIZE (CONFIG_FLASH_SIZE * 1024 - CONFIG_FLASH_LOAD_OFFSET - ROM_END_OFFSET)
#endif
#if defined(CONFIG_XIP)
#if defined(CONFIG_IS_BOOTLOADER)
#define RAM_SIZE (CONFIG_BOOTLOADER_SRAM_SIZE * 1K)
#define RAM_ADDR (CONFIG_SRAM_BASE_ADDRESS + \
(CONFIG_SRAM_SIZE * 1K - RAM_SIZE))
#else
#define RAM_SIZE (CONFIG_SRAM_SIZE * 1K)
#define RAM_ADDR CONFIG_SRAM_BASE_ADDRESS
#endif
#else
#define RAM_SIZE (CONFIG_SRAM_SIZE * 1K - CONFIG_BOOTLOADER_SRAM_SIZE * 1K)
#define RAM_ADDR CONFIG_SRAM_BASE_ADDRESS
#endif
#if defined(CONFIG_CUSTOM_SECTION_ALIGN)
_region_min_align = CONFIG_CUSTOM_SECTION_MIN_ALIGN_SIZE;
#else
/* Set alignment to CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
* to make linker section alignment comply with MPU granularity.
*/
#if defined(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
_region_min_align = CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE;
#else
/* If building without MPU support, use default 4-byte alignment. */
_region_min_align = 4;
#endif
#endif
#if !defined(CONFIG_CUSTOM_SECTION_ALIGN) && defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
#define MPU_ALIGN(region_size) \
. = ALIGN(_region_min_align); \
. = ALIGN( 1 << LOG2CEIL(region_size))
#else
#define MPU_ALIGN(region_size) \
. = ALIGN(_region_min_align)
#endif
#include <zephyr/linker/linker-devnull.h>
MEMORY
{
#if defined(CONFIG_ROMSTART_RELOCATION_ROM)
ROMSTART_REGION (rx) : ORIGIN = ROMSTART_ADDR, LENGTH = ROMSTART_SIZE
#endif
FLASH (rx) : ORIGIN = ROM_ADDR, LENGTH = ROM_SIZE
RAM (wx) : ORIGIN = RAM_ADDR, LENGTH = RAM_SIZE
#if defined(CONFIG_LINKER_DEVNULL_MEMORY)
DEVNULL_ROM (rx) : ORIGIN = DEVNULL_ADDR, LENGTH = DEVNULL_SIZE
#endif
LINKER_DT_REGIONS()
/* Used by and documented in include/linker/intlist.ld */
IDT_LIST (wx) : ORIGIN = 0xFFFF7FFF, LENGTH = 32K
}
ENTRY(CONFIG_KERNEL_ENTRY)
SECTIONS
{
#include <zephyr/linker/rel-sections.ld>
#ifdef CONFIG_LLEXT
#include <zephyr/linker/llext-sections.ld>
#endif
/*
* .plt and .iplt are here according to 'arm-zephyr-elf-ld --verbose',
* before text section.
*/
/DISCARD/ :
{
*(.plt)
}
/DISCARD/ :
{
*(.iplt)
}
GROUP_START(ROMABLE_REGION)
__rom_region_start = ROM_ADDR;
SECTION_PROLOGUE(rom_start,,)
{
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_START ...). This typically contains the vector
* table and debug information.
*/
#include <snippets-rom-start.ld>
} GROUP_LINK_IN(ROMSTART_REGION)
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_relocate.ld>
#endif /* CONFIG_CODE_DATA_RELOCATION */
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
{
__text_region_start = .;
#include <zephyr/linker/kobject-text.ld>
*(.text)
*(".text.*")
*(".TEXT.*")
*(.gnu.linkonce.t.*)
/*
* These are here according to 'arm-zephyr-elf-ld --verbose',
* after .gnu.linkonce.t.*
*/
*(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
. = ALIGN(4);
} GROUP_LINK_IN(ROMABLE_REGION)
__text_region_end = .;
#if defined (CONFIG_CPP)
SECTION_PROLOGUE(.ARM.extab,,)
{
/*
* .ARM.extab section containing exception unwinding information.
*/
*(.ARM.extab* .gnu.linkonce.armextab.*)
} GROUP_LINK_IN(ROMABLE_REGION)
#endif
SECTION_PROLOGUE(.ARM.exidx,,)
{
/*
* This section, related to stack and exception unwinding, is placed
* explicitly to prevent it from being shared between multiple regions.
* It must be defined for gcc to support 64-bit math and avoid
* section overlap.
*/
__exidx_start = .;
#if defined (__GCC_LINKER_CMD__) || defined (__LLD_LINKER_CMD__)
*(.ARM.exidx* gnu.linkonce.armexidx.*)
#endif
__exidx_end = .;
} GROUP_LINK_IN(ROMABLE_REGION)
__rodata_region_start = .;
#include <zephyr/linker/common-rom.ld>
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_SECTIONS ...). Useful for grouping iterable RO structs.
*/
#include <snippets-rom-sections.ld>
#include <zephyr/linker/thread-local-storage.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{
*(.rodata)
*(".rodata.*")
*(.gnu.linkonce.r.*)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rodata.ld>
#include <zephyr/linker/kobject-rom.ld>
/*
* For XIP images, in order to avoid the situation when __data_rom_start
* is 32-bit aligned, but the actual data is placed right after rodata
* section, which may not end exactly at 32-bit border, pad rodata
* section, so __data_rom_start points at data and it is 32-bit aligned.
*
* On non-XIP images this may enlarge image size up to 3 bytes. This
* generally is not an issue, since modern ROM and FLASH memory is
* usually 4k aligned.
*/
. = ALIGN(4);
} GROUP_LINK_IN(ROMABLE_REGION)
#include <zephyr/linker/cplusplus-rom.ld>
#if defined(CONFIG_BUILD_ALIGN_LMA)
/*
* Include a padding section here to make sure that the LMA address
* of the sections in the RAMABLE_REGION are aligned with those
* section's VMA alignment requirements.
*/
SECTION_PROLOGUE(padding_section,,)
{
__rodata_region_end = .;
MPU_ALIGN(__rodata_region_end - ADDR(rom_start));
} GROUP_LINK_IN(ROMABLE_REGION)
#else
__rodata_region_end = .;
MPU_ALIGN(__rodata_region_end - ADDR(rom_start));
#endif
__rom_region_end = __rom_region_start + . - ADDR(rom_start);
GROUP_END(ROMABLE_REGION)
/*
* These are here according to 'arm-zephyr-elf-ld --verbose',
* before data section.
*/
/DISCARD/ : {
*(.got.plt)
*(.igot.plt)
*(.got)
*(.igot)
}
GROUP_START(RAMABLE_REGION)
. = RAM_ADDR;
/* Align the start of image RAM with the
* minimum granularity required by MPU.
*/
. = ALIGN(_region_min_align);
_image_ram_start = .;
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
#if defined(CONFIG_USERSPACE)
#define APP_SHARED_ALIGN . = ALIGN(_region_min_align);
#define SMEM_PARTITION_ALIGN MPU_ALIGN
#include <app_smem.ld>
_app_smem_size = _app_smem_end - _app_smem_start;
_app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME);
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),)
{
/*
* For performance, BSS section is assumed to be 4 byte aligned and
* a multiple of 4 bytes
*/
. = ALIGN(4);
__bss_start = .;
__kernel_ram_start = .;
*(.bss)
*(".bss.*")
*(COMMON)
*(".kernel_bss.*")
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_sram_bss_relocate.ld>
#endif
/*
* As memory is cleared in words only, it is simpler to ensure the BSS
* section ends on a 4 byte boundary. This wastes a maximum of 3 bytes.
*/
__bss_end = ALIGN(4);
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
#include <zephyr/linker/common-noinit.ld>
#endif /* CONFIG_USERSPACE */
GROUP_START(DATA_REGION)
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{
__data_region_start = .;
__data_start = .;
*(.data)
*(".data.*")
*(".kernel.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rwdata.ld>
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_sram_data_relocate.ld>
#endif
__data_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__data_size = __data_end - __data_start;
__data_load_start = LOADADDR(_DATA_SECTION_NAME);
__data_region_load_start = LOADADDR(_DATA_SECTION_NAME);
#include <zephyr/linker/common-ram.ld>
#include <zephyr/linker/kobject-data.ld>
#include <zephyr/linker/cplusplus-ram.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-data-sections.ld>
__data_region_end = .;
#ifndef CONFIG_USERSPACE
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),)
{
/*
* For performance, BSS section is assumed to be 4 byte aligned and
* a multiple of 4 bytes
*/
. = ALIGN(4);
__bss_start = .;
__kernel_ram_start = .;
*(.bss)
*(".bss.*")
*(COMMON)
*(".kernel_bss.*")
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_sram_bss_relocate.ld>
#endif
/*
* As memory is cleared in words only, it is simpler to ensure the BSS
* section ends on a 4 byte boundary. This wastes a maximum of 3 bytes.
*/
__bss_end = ALIGN(4);
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
SECTION_PROLOGUE(_NOINIT_SECTION_NAME,(NOLOAD),)
{
/*
* This section is used for non-initialized objects that
* will not be cleared during the boot process.
*/
*(.noinit)
*(".noinit.*")
*(".kernel_noinit.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-noinit.ld>
} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
#endif /* CONFIG_USERSPACE */
/* Define linker symbols */
__kernel_ram_end = RAM_ADDR + RAM_SIZE;
__kernel_ram_size = __kernel_ram_end - __kernel_ram_start;
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_itcm), okay)
GROUP_START(ITCM)
SECTION_PROLOGUE(_ITCM_SECTION_NAME,,SUBALIGN(4))
{
__itcm_start = .;
*(.itcm)
*(".itcm.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function. */
#include <snippets-itcm-section.ld>
__itcm_end = .;
} GROUP_LINK_IN(ITCM AT> ROMABLE_REGION)
__itcm_size = __itcm_end - __itcm_start;
__itcm_load_start = LOADADDR(_ITCM_SECTION_NAME);
GROUP_END(ITCM)
#endif
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
GROUP_START(DTCM)
SECTION_PROLOGUE(_DTCM_BSS_SECTION_NAME, (NOLOAD),SUBALIGN(4))
{
__dtcm_start = .;
__dtcm_bss_start = .;
*(.dtcm_bss)
*(".dtcm_bss.*")
__dtcm_bss_end = .;
} GROUP_LINK_IN(DTCM)
SECTION_PROLOGUE(_DTCM_NOINIT_SECTION_NAME, (NOLOAD),SUBALIGN(4))
{
__dtcm_noinit_start = .;
*(.dtcm_noinit)
*(".dtcm_noinit.*")
__dtcm_noinit_end = .;
} GROUP_LINK_IN(DTCM)
SECTION_PROLOGUE(_DTCM_DATA_SECTION_NAME,,SUBALIGN(4))
{
__dtcm_data_start = .;
*(.dtcm_data)
*(".dtcm_data.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function. */
#include <snippets-dtcm-section.ld>
__dtcm_data_end = .;
} GROUP_LINK_IN(DTCM AT> ROMABLE_REGION)
__dtcm_end = .;
__dtcm_data_load_start = LOADADDR(_DTCM_DATA_SECTION_NAME);
GROUP_END(DTCM)
#endif
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-sections.ld>
#include <zephyr/linker/ram-end.ld>
GROUP_END(RAMABLE_REGION)
#include <zephyr/linker/debug-sections.ld>
/DISCARD/ : { *(.note.GNU-stack) }
SECTION_PROLOGUE(.ARM.attributes, 0,)
{
KEEP(*(.ARM.attributes))
KEEP(*(.gnu.attributes))
}
/* Output section descriptions are needed for these sections to suppress
* warnings when "--orphan-handling=warn" is set for lld.
*/
#if defined(CONFIG_LLVM_USE_LLD)
SECTION_PROLOGUE(.symtab, 0,) { *(.symtab) }
SECTION_PROLOGUE(.strtab, 0,) { *(.strtab) }
SECTION_PROLOGUE(.shstrtab, 0,) { *(.shstrtab) }
#endif
/* Sections generated from 'zephyr,memory-region' nodes */
LINKER_DT_SECTIONS()
/* Must be last in romable region */
SECTION_PROLOGUE(.last_section,,)
{
#ifdef CONFIG_LINKER_LAST_SECTION_ID
/* Fill last section with a word to ensure location counter and actual rom
* region data usage match. */
LONG(CONFIG_LINKER_LAST_SECTION_ID_PATTERN)
#endif
} GROUP_LINK_IN(ROMABLE_REGION)
/* To provide the image size as a const expression,
* calculate this value here. */
_flash_used = LOADADDR(.last_section) + SIZEOF(.last_section) - __rom_region_start;
}
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_m/scripts/linker.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,634 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_CPU_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_CPU_H_
#if defined(CONFIG_ARM_MPU)
#include <zephyr/arch/arm/cortex_a_r/mpu.h>
#endif
/*
* SCTLR register bit assignments
*/
#define SCTLR_MPU_ENABLE (1 << 0)
#define MODE_USR 0x10
#define MODE_FIQ 0x11
#define MODE_IRQ 0x12
#define MODE_SVC 0x13
#define MODE_ABT 0x17
#define MODE_HYP 0x1a
#define MODE_UND 0x1b
#define MODE_SYS 0x1f
#define MODE_MASK 0x1f
#define E_BIT (1 << 9)
#define A_BIT (1 << 8)
#define I_BIT (1 << 7)
#define F_BIT (1 << 6)
#define T_BIT (1 << 5)
#define HIVECS (1 << 13)
#define CPACR_NA (0U)
#define CPACR_FA (3U)
#define CPACR_CP10(r) (r << 20)
#define CPACR_CP11(r) (r << 22)
#define FPEXC_EN (1 << 30)
#define DFSR_DOMAIN_SHIFT (4)
#define DFSR_DOMAIN_MASK (0xf)
#define DFSR_FAULT_4_MASK (1 << 10)
#define DFSR_WRITE_MASK (1 << 11)
#define DFSR_AXI_SLAVE_MASK (1 << 12)
/* Armv8-R AArch32 architecture profile */
#define VBAR_MASK (0xFFFFFFE0U)
#define SCTLR_M_BIT BIT(0)
#define SCTLR_A_BIT BIT(1)
#define SCTLR_C_BIT BIT(2)
#define SCTLR_I_BIT BIT(12)
/* Hyp System Control Register */
#define HSCTLR_RES1 (BIT(29) | BIT(28) | BIT(23) | \
BIT(22) | BIT(18) | BIT(16) | \
BIT(11) | BIT(4) | BIT(3))
/* Hyp Auxiliary Control Register */
#define HACTLR_CPUACTLR BIT(0)
#define HACTLR_CDBGDCI BIT(1)
#define HACTLR_FLASHIFREGIONR BIT(7)
#define HACTLR_PERIPHPREGIONR BIT(8)
#define HACTLR_QOSR_BIT BIT(9)
#define HACTLR_BUSTIMEOUTR_BIT BIT(10)
#define HACTLR_INTMONR_BIT BIT(12)
#define HACTLR_ERR_BIT BIT(13)
#define HACTLR_INIT (HACTLR_ERR_BIT | HACTLR_INTMONR_BIT | \
HACTLR_BUSTIMEOUTR_BIT | HACTLR_QOSR_BIT | \
HACTLR_PERIPHPREGIONR | HACTLR_FLASHIFREGIONR | \
HACTLR_CDBGDCI | HACTLR_CPUACTLR)
/* ARMv8 Timer */
#define CNTV_CTL_ENABLE_BIT BIT(0)
#define CNTV_CTL_IMASK_BIT BIT(1)
/* Interrupt Controller System Register Enable Register */
#define ICC_SRE_ELx_SRE_BIT BIT(0)
#define ICC_SRE_ELx_DFB_BIT BIT(1)
#define ICC_SRE_ELx_DIB_BIT BIT(2)
#define ICC_SRE_EL3_EN_BIT BIT(3)
/* MPIDR */
#define MPIDR_AFFLVL_MASK (0xff)
#define MPIDR_AFF0_SHIFT (0)
#define MPIDR_AFF1_SHIFT (8)
#define MPIDR_AFF2_SHIFT (16)
#define MPIDR_AFFLVL(mpidr, aff_level) \
(((mpidr) >> MPIDR_AFF##aff_level##_SHIFT) & MPIDR_AFFLVL_MASK)
#define GET_MPIDR() read_sysreg(mpidr)
#define MPIDR_TO_CORE(mpidr) MPIDR_AFFLVL(mpidr, 0)
/* ICC SGI macros */
#define SGIR_TGT_MASK (0xffff)
#define SGIR_AFF1_SHIFT (16)
#define SGIR_AFF2_SHIFT (32)
#define SGIR_AFF3_SHIFT (48)
#define SGIR_AFF_MASK (0xff)
#define SGIR_INTID_SHIFT (24)
#define SGIR_INTID_MASK (0xf)
#define SGIR_IRM_SHIFT (40)
#define SGIR_IRM_MASK (0x1)
#define SGIR_IRM_TO_AFF (0)
#define GICV3_SGIR_VALUE(_aff3, _aff2, _aff1, _intid, _irm, _tgt) \
((((uint64_t) (_aff3) & SGIR_AFF_MASK) << SGIR_AFF3_SHIFT) | \
(((uint64_t) (_irm) & SGIR_IRM_MASK) << SGIR_IRM_SHIFT) | \
(((uint64_t) (_aff2) & SGIR_AFF_MASK) << SGIR_AFF2_SHIFT) | \
(((_intid) & SGIR_INTID_MASK) << SGIR_INTID_SHIFT) | \
(((_aff1) & SGIR_AFF_MASK) << SGIR_AFF1_SHIFT) | \
((_tgt) & SGIR_TGT_MASK))
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_CPU_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_a_r/cpu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,202 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_ARMV8_TIMER_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_ARMV8_TIMER_H_
#ifndef _ASMLANGUAGE
#include <zephyr/drivers/timer/arm_arch_timer.h>
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#define ARM_ARCH_TIMER_IRQ ARM_TIMER_VIRTUAL_IRQ
#define ARM_ARCH_TIMER_PRIO ARM_TIMER_VIRTUAL_PRIO
#define ARM_ARCH_TIMER_FLAGS ARM_TIMER_VIRTUAL_FLAGS
static ALWAYS_INLINE void arm_arch_timer_init(void)
{
}
static ALWAYS_INLINE void arm_arch_timer_set_compare(uint64_t val)
{
write_cntv_cval(val);
}
static ALWAYS_INLINE void arm_arch_timer_enable(unsigned char enable)
{
uint64_t cntv_ctl;
cntv_ctl = read_cntv_ctl();
if (enable) {
cntv_ctl |= CNTV_CTL_ENABLE_BIT;
} else {
cntv_ctl &= ~CNTV_CTL_ENABLE_BIT;
}
write_cntv_ctl(cntv_ctl);
}
static ALWAYS_INLINE void arm_arch_timer_set_irq_mask(bool mask)
{
uint64_t cntv_ctl;
cntv_ctl = read_cntv_ctl();
if (mask) {
cntv_ctl |= CNTV_CTL_IMASK_BIT;
} else {
cntv_ctl &= ~CNTV_CTL_IMASK_BIT;
}
write_cntv_ctl(cntv_ctl);
}
static ALWAYS_INLINE uint64_t arm_arch_timer_count(void)
{
return read_cntvct();
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_ARMV8_TIMER_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_a_r/armv8_timer.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 351 |
```objective-c
/*
*
*/
/* "Arch" bit manipulation functions in non-arch-specific C code (uses some
* gcc builtins)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_SYS_IO_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_SYS_IO_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <zephyr/sys/sys_io.h>
#include <zephyr/sys/barrier.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Memory mapped registers I/O functions */
static ALWAYS_INLINE uint8_t sys_read8(mem_addr_t addr)
{
uint8_t val;
__asm__ volatile("ldrb %0, [%1]" : "=r" (val) : "r" (addr));
barrier_dmem_fence_full();
return val;
}
static ALWAYS_INLINE void sys_write8(uint8_t data, mem_addr_t addr)
{
barrier_dmem_fence_full();
__asm__ volatile("strb %0, [%1]" : : "r" (data), "r" (addr));
}
static ALWAYS_INLINE uint16_t sys_read16(mem_addr_t addr)
{
uint16_t val;
__asm__ volatile("ldrh %0, [%1]" : "=r" (val) : "r" (addr));
barrier_dmem_fence_full();
return val;
}
static ALWAYS_INLINE void sys_write16(uint16_t data, mem_addr_t addr)
{
barrier_dmem_fence_full();
__asm__ volatile("strh %0, [%1]" : : "r" (data), "r" (addr));
}
static ALWAYS_INLINE uint32_t sys_read32(mem_addr_t addr)
{
uint32_t val;
__asm__ volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr));
barrier_dmem_fence_full();
return val;
}
static ALWAYS_INLINE void sys_write32(uint32_t data, mem_addr_t addr)
{
barrier_dmem_fence_full();
__asm__ volatile("str %0, [%1]" : : "r" (data), "r" (addr));
}
static ALWAYS_INLINE uint64_t sys_read64(mem_addr_t addr)
{
uint64_t val;
__asm__ volatile("ldrd %Q0, %R0, [%1]" : "=r" (val) : "r" (addr));
barrier_dmem_fence_full();
return val;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_SYS_IO_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_a_r/sys_io.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 546 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_TIMER_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_TIMER_H_
#ifdef CONFIG_ARM_ARCH_TIMER
#ifndef _ASMLANGUAGE
#include <zephyr/drivers/timer/arm_arch_timer.h>
#include <zephyr/sys/device_mmio.h>
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#define ARM_ARCH_TIMER_BASE DT_REG_ADDR_BY_IDX(ARM_TIMER_NODE, 0)
#define ARM_ARCH_TIMER_IRQ ARM_TIMER_VIRTUAL_IRQ
#define ARM_ARCH_TIMER_PRIO ARM_TIMER_VIRTUAL_PRIO
#define ARM_ARCH_TIMER_FLAGS ARM_TIMER_VIRTUAL_FLAGS
#define TIMER_CNT_LOWER 0x00
#define TIMER_CNT_UPPER 0x04
#define TIMER_CTRL 0x08
#define TIMER_ISR 0x0c
#define TIMER_CMP_LOWER 0x10
#define TIMER_CMP_UPPER 0x14
#define TIMER_IRQ_ENABLE BIT(2)
#define TIMER_COMP_ENABLE BIT(1)
#define TIMER_ENABLE BIT(0)
#define TIMER_ISR_EVENT_FLAG BIT(0)
DEVICE_MMIO_TOPLEVEL_STATIC(timer_regs, ARM_TIMER_NODE);
#define TIMER_REG_GET(offs) (DEVICE_MMIO_TOPLEVEL_GET(timer_regs) + offs)
static ALWAYS_INLINE void arm_arch_timer_init(void)
{
DEVICE_MMIO_TOPLEVEL_MAP(timer_regs, K_MEM_CACHE_NONE);
}
static ALWAYS_INLINE void arm_arch_timer_set_compare(uint64_t val)
{
uint32_t lower = (uint32_t)val;
uint32_t upper = (uint32_t)(val >> 32);
uint32_t ctrl;
/* Disable IRQ and comparator */
ctrl = sys_read32(TIMER_REG_GET(TIMER_CTRL));
ctrl &= ~(TIMER_COMP_ENABLE | TIMER_IRQ_ENABLE);
sys_write32(ctrl, TIMER_REG_GET(TIMER_CTRL));
sys_write32(lower, TIMER_REG_GET(TIMER_CMP_LOWER));
sys_write32(upper, TIMER_REG_GET(TIMER_CMP_UPPER));
/* enable comparator back, let set_irq_mask enabling the IRQ again */
ctrl |= TIMER_COMP_ENABLE;
sys_write32(ctrl, TIMER_REG_GET(TIMER_CTRL));
}
#if defined(CONFIG_ARM_ARCH_TIMER_ERRATUM_740657)
/*
* R/W access to the event flag register is required for the timer errata
* 740657 workaround -> comp. ISR implementation in arm_arch_timer.c.
* This functionality is not present in the aarch64 implementation of the
* ARM global timer access functions.
*
* comp. ARM Cortex-A9 processors Software Developers Errata Notice,
* ARM document ID032315.
*/
static ALWAYS_INLINE uint8_t arm_arch_timer_get_int_status(void)
{
return (uint8_t)(sys_read32(TIMER_REG_GET(TIMER_ISR)) & TIMER_ISR_EVENT_FLAG);
}
static ALWAYS_INLINE void arm_arch_timer_clear_int_status(void)
{
sys_write32(TIMER_ISR_EVENT_FLAG, TIMER_REG_GET(TIMER_ISR));
}
#endif /* CONFIG_ARM_ARCH_TIMER_ERRATUM_740657 */
static ALWAYS_INLINE void arm_arch_timer_enable(bool enable)
{
uint32_t ctrl;
ctrl = sys_read32(TIMER_REG_GET(TIMER_CTRL));
if (enable) {
ctrl |= TIMER_ENABLE;
} else {
ctrl &= ~TIMER_ENABLE;
}
sys_write32(ctrl, TIMER_REG_GET(TIMER_CTRL));
}
static ALWAYS_INLINE void arm_arch_timer_set_irq_mask(bool mask)
{
uint32_t ctrl;
ctrl = sys_read32(TIMER_REG_GET(TIMER_CTRL));
if (mask) {
ctrl &= ~TIMER_IRQ_ENABLE;
} else {
ctrl |= TIMER_IRQ_ENABLE;
sys_write32(1, TIMER_REG_GET(TIMER_ISR));
}
sys_write32(ctrl, TIMER_REG_GET(TIMER_CTRL));
}
static ALWAYS_INLINE uint64_t arm_arch_timer_count(void)
{
uint32_t lower;
uint32_t upper, upper_saved;
/* To get the value from the Global Timer Counter register proceed
* as follows:
* 1. Read the upper 32-bit timer counter register.
* 2. Read the lower 32-bit timer counter register.
* 3. Read the upper 32-bit timer counter register again. If the value
* is different to the 32-bit upper value read previously,
* go back to step 2.
* Otherwise the 64-bit timer counter value is correct.
*/
upper = sys_read32(TIMER_REG_GET(TIMER_CNT_UPPER));
do {
upper_saved = upper;
lower = sys_read32(TIMER_REG_GET(TIMER_CNT_LOWER));
upper = sys_read32(TIMER_REG_GET(TIMER_CNT_UPPER));
} while (upper != upper_saved);
return ((uint64_t)upper) << 32 | lower;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_ARM_ARCH_TIMER */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_TIMER_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_a_r/timer.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,027 |
```objective-c
/*
*/
/**
* @file
* @brief tpidruro bits allocation
*
* Among other things, the tpidruro holds the address for the current
* CPU's struct _cpu instance. But such a pointer is at least 4-bytes
* aligned. That leaves two of free bits for other purposes.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_TPIDRURO_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_TPIDRURO_H_
#define TPIDRURO_CURR_CPU 0xFFFFFFFCUL
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_TPIDRURO_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_a_r/tpidruro.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 140 |
```objective-c
/*
*
*
* Armv8-R AArch32 architecture helpers.
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_LIB_HELPERS_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_LIB_HELPERS_H_
#ifndef _ASMLANGUAGE
#include <stdint.h>
#define read_sysreg32(op1, CRn, CRm, op2) \
({ \
uint32_t val; \
__asm__ volatile ("mrc p15, " #op1 ", %0, c" #CRn ", c" \
#CRm ", " #op2 : "=r" (val) :: "memory"); \
val; \
})
#define write_sysreg32(val, op1, CRn, CRm, op2) \
({ \
__asm__ volatile ("mcr p15, " #op1 ", %0, c" #CRn ", c" \
#CRm ", " #op2 :: "r" (val) : "memory"); \
})
#define read_sysreg64(op1, CRm) \
({ \
uint64_t val; \
__asm__ volatile ("mrrc p15, " #op1 ", %Q0, %R0, c" \
#CRm : "=r" (val) :: "memory"); \
val; \
})
#define write_sysreg64(val, op1, CRm) \
({ \
__asm__ volatile ("mcrr p15, " #op1 ", %Q0, %R0, c" \
#CRm :: "r" (val) : "memory"); \
})
#define MAKE_REG_HELPER(reg, op1, CRn, CRm, op2) \
static ALWAYS_INLINE uint32_t read_##reg(void) \
{ \
return read_sysreg32(op1, CRn, CRm, op2); \
} \
static ALWAYS_INLINE void write_##reg(uint32_t val) \
{ \
write_sysreg32(val, op1, CRn, CRm, op2); \
}
#define MAKE_REG64_HELPER(reg, op1, CRm) \
static ALWAYS_INLINE uint64_t read_##reg(void) \
{ \
return read_sysreg64(op1, CRm); \
} \
static ALWAYS_INLINE void write_##reg(uint64_t val) \
{ \
write_sysreg64(val, op1, CRm); \
}
MAKE_REG_HELPER(mpuir, 0, 0, 0, 4);
MAKE_REG_HELPER(mpidr, 0, 0, 0, 5);
MAKE_REG_HELPER(sctlr, 0, 1, 0, 0);
MAKE_REG_HELPER(prselr, 0, 6, 2, 1);
MAKE_REG_HELPER(prbar, 0, 6, 3, 0);
MAKE_REG_HELPER(prlar, 0, 6, 3, 1);
MAKE_REG_HELPER(mair0, 0, 10, 2, 0);
MAKE_REG_HELPER(vbar, 0, 12, 0, 0);
MAKE_REG_HELPER(cntv_ctl, 0, 14, 3, 1);
MAKE_REG_HELPER(ctr, 0, 0, 0, 1);
MAKE_REG_HELPER(tpidruro, 0, 13, 0, 3);
MAKE_REG64_HELPER(ICC_SGI1R, 0, 12);
MAKE_REG64_HELPER(cntvct, 1, 14);
MAKE_REG64_HELPER(cntv_cval, 3, 14);
/*
* GIC v3 compatibility macros:
* ARMv8 AArch32 profile has no mention of
* ELx in the register names.
* We define them anyway to reuse the GICv3 driver
* made for AArch64.
*/
/* ICC_PMR */
MAKE_REG_HELPER(ICC_PMR_EL1, 0, 4, 6, 0);
/* ICC_IAR1 */
MAKE_REG_HELPER(ICC_IAR1_EL1, 0, 12, 12, 0);
/* ICC_EOIR1 */
MAKE_REG_HELPER(ICC_EOIR1_EL1, 0, 12, 12, 1);
/* ICC_SRE */
MAKE_REG_HELPER(ICC_SRE_EL1, 0, 12, 12, 5);
/* ICC_IGRPEN1 */
MAKE_REG_HELPER(ICC_IGRPEN1_EL1, 0, 12, 12, 7);
#define write_sysreg(val, reg) write_##reg(val)
#define read_sysreg(reg) read_##reg()
#define sev() __asm__ volatile("sev" : : : "memory")
#define wfe() __asm__ volatile("wfe" : : : "memory")
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_LIB_HELPERS_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_a_r/lib_helpers.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,136 |
```linker script
/*
*
*/
/* Set initial alignment to the 32 byte minimum for all MPUs */
_app_data_align = 32;
. = ALIGN(32);
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_a_r/scripts/app_data_alignment.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 32 |
```objective-c
*
*/
#ifndef ARCH_ARM_CORTEX_R_MPU_H
#define ARCH_ARM_CORTEX_R_MPU_H 1
#define MPU_RBAR_ADDR_Msk (~0x1f)
#define MPU_RASR_ENABLE_Msk (1)
#define MPU_RASR_SIZE_Pos 1U
#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos)
#define MPU_TYPE_DREGION_Pos 8U
#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos)
#define MPU_RASR_XN_Pos 12
#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos)
#define MPU_RASR_AP_Pos 8
#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos)
#define MPU_RASR_TEX_Pos 3
#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos)
#define MPU_RASR_S_Pos 2
#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos)
#define MPU_RASR_C_Pos 1
#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos)
#define MPU_RASR_B_Pos 0
#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos)
#if defined(CONFIG_CPU_CORTEX_R4) || defined(CONFIG_CPU_CORTEX_R5)
#define ARM_MPU_REGION_SIZE_32B ((uint8_t)0x04U)
#define ARM_MPU_REGION_SIZE_64B ((uint8_t)0x05U)
#define ARM_MPU_REGION_SIZE_128B ((uint8_t)0x06U)
#endif
#define ARM_MPU_REGION_SIZE_256B ((uint8_t)0x07U)
#define ARM_MPU_REGION_SIZE_512B ((uint8_t)0x08U)
#define ARM_MPU_REGION_SIZE_1KB ((uint8_t)0x09U)
#define ARM_MPU_REGION_SIZE_2KB ((uint8_t)0x0aU)
#define ARM_MPU_REGION_SIZE_4KB ((uint8_t)0x0bU)
#define ARM_MPU_REGION_SIZE_8KB ((uint8_t)0x0cU)
#define ARM_MPU_REGION_SIZE_16KB ((uint8_t)0x0dU)
#define ARM_MPU_REGION_SIZE_32KB ((uint8_t)0x0eU)
#define ARM_MPU_REGION_SIZE_64KB ((uint8_t)0x0fU)
#define ARM_MPU_REGION_SIZE_128KB ((uint8_t)0x10U)
#define ARM_MPU_REGION_SIZE_256KB ((uint8_t)0x11U)
#define ARM_MPU_REGION_SIZE_512KB ((uint8_t)0x12U)
#define ARM_MPU_REGION_SIZE_1MB ((uint8_t)0x13U)
#define ARM_MPU_REGION_SIZE_2MB ((uint8_t)0x14U)
#define ARM_MPU_REGION_SIZE_4MB ((uint8_t)0x15U)
#define ARM_MPU_REGION_SIZE_8MB ((uint8_t)0x16U)
#define ARM_MPU_REGION_SIZE_16MB ((uint8_t)0x17U)
#define ARM_MPU_REGION_SIZE_32MB ((uint8_t)0x18U)
#define ARM_MPU_REGION_SIZE_64MB ((uint8_t)0x19U)
#define ARM_MPU_REGION_SIZE_128MB ((uint8_t)0x1aU)
#define ARM_MPU_REGION_SIZE_256MB ((uint8_t)0x1bU)
#define ARM_MPU_REGION_SIZE_512MB ((uint8_t)0x1cU)
#define ARM_MPU_REGION_SIZE_1GB ((uint8_t)0x1dU)
#define ARM_MPU_REGION_SIZE_2GB ((uint8_t)0x1eU)
#define ARM_MPU_REGION_SIZE_4GB ((uint8_t)0x1fU)
#endif
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_a_r/mpu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 892 |
```linker script
/*
*
*/
/**
* @file
* @brief Linker command/script file
*
* Linker script for the Cortex-A and Cortex-R platforms.
*/
#include <zephyr/linker/sections.h>
#include <zephyr/devicetree.h>
#include <zephyr/linker/devicetree_regions.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/linker/linker-tool.h>
/* physical address of RAM */
#ifdef CONFIG_XIP
#define ROMABLE_REGION FLASH
#else
#define ROMABLE_REGION RAM
#endif
#define RAMABLE_REGION RAM
#if !defined(CONFIG_XIP) && (CONFIG_FLASH_SIZE == 0)
#define ROM_ADDR RAM_ADDR
#else
#define ROM_ADDR (CONFIG_FLASH_BASE_ADDRESS + CONFIG_FLASH_LOAD_OFFSET)
#endif
#if defined(CONFIG_ROM_END_OFFSET)
#define ROM_END_OFFSET CONFIG_ROM_END_OFFSET
#else
#define ROM_END_OFFSET 0
#endif
#if CONFIG_FLASH_LOAD_SIZE > 0
#define ROM_SIZE (CONFIG_FLASH_LOAD_SIZE - ROM_END_OFFSET)
#else
#define ROM_SIZE (CONFIG_FLASH_SIZE*1K - CONFIG_FLASH_LOAD_OFFSET - ROM_END_OFFSET)
#endif
#if defined(CONFIG_XIP)
#if defined(CONFIG_IS_BOOTLOADER)
#define RAM_SIZE (CONFIG_BOOTLOADER_SRAM_SIZE * 1K)
#define RAM_ADDR (CONFIG_SRAM_BASE_ADDRESS + \
(CONFIG_SRAM_SIZE * 1K - RAM_SIZE))
#else
#define RAM_SIZE (CONFIG_SRAM_SIZE * 1K)
#define RAM_ADDR CONFIG_SRAM_BASE_ADDRESS
#endif
#else
#define RAM_SIZE (CONFIG_SRAM_SIZE * 1K - CONFIG_BOOTLOADER_SRAM_SIZE * 1K)
#define RAM_ADDR CONFIG_SRAM_BASE_ADDRESS
#endif
/* Set alignment to CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
* to make linker section alignment comply with MPU granularity.
*/
#if defined(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
_region_min_align = CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE;
#elif defined(CONFIG_ARM_AARCH32_MMU)
_region_min_align = CONFIG_MMU_PAGE_SIZE;
#else
/* If building without MPU/MMU support, use default 4-byte alignment. */
_region_min_align = 4;
#endif
#if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
#define MPU_ALIGN(region_size) \
. = ALIGN(_region_min_align); \
. = ALIGN(1 << LOG2CEIL(region_size))
#else
#define MPU_ALIGN(region_size) \
. = ALIGN(_region_min_align)
#endif
#define BSS_ALIGN ALIGN(_region_min_align)
#define MMU_ALIGN . = ALIGN(_region_min_align)
MEMORY
{
FLASH (rx) : ORIGIN = ROM_ADDR, LENGTH = ROM_SIZE
RAM (wx) : ORIGIN = RAM_ADDR, LENGTH = RAM_SIZE
LINKER_DT_REGIONS()
/* Used by and documented in include/linker/intlist.ld */
IDT_LIST (wx) : ORIGIN = 0xFFFF8000, LENGTH = 32K
}
ENTRY(CONFIG_KERNEL_ENTRY)
SECTIONS
{
#include <zephyr/linker/rel-sections.ld>
#ifdef CONFIG_LLEXT
#include <zephyr/linker/llext-sections.ld>
#endif
/*
* .plt and .iplt are here according to 'arm-zephyr-elf-ld --verbose',
* before text section.
*/
SECTION_PROLOGUE(.plt,,)
{
*(.plt)
}
SECTION_PROLOGUE(.iplt,,)
{
*(.iplt)
}
GROUP_START(ROMABLE_REGION)
#if defined(CONFIG_XIP)
__rom_region_start = ROM_ADDR;
#else
__rom_region_start = RAM_ADDR;
#endif
SECTION_PROLOGUE(rom_start,,)
{
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_START ...). This typically contains the vector
* table and debug information.
*/
#include <snippets-rom-start.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_relocate.ld>
#endif /* CONFIG_CODE_DATA_RELOCATION */
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
{
. = ALIGN(_region_min_align);
__text_region_start = .;
#ifndef CONFIG_XIP
z_mapped_start = .;
#endif
#include <zephyr/linker/kobject-text.ld>
*(.text)
*(".text.*")
*(.gnu.linkonce.t.*)
/*
* These are here according to 'arm-zephyr-elf-ld --verbose',
* after .gnu.linkonce.t.*
*/
*(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
__text_region_end = .;
. = ALIGN(_region_min_align);
} GROUP_LINK_IN(ROMABLE_REGION)
#if defined (CONFIG_CPP)
SECTION_PROLOGUE(.ARM.extab,,)
{
/*
* .ARM.extab section containing exception unwinding information.
*/
*(.ARM.extab* .gnu.linkonce.armextab.*)
} GROUP_LINK_IN(ROMABLE_REGION)
#endif
SECTION_PROLOGUE(.ARM.exidx,,)
{
/*
* This section, related to stack and exception unwinding, is placed
* explicitly to prevent it from being shared between multiple regions.
* It must be defined for gcc to support 64-bit math and avoid
* section overlap.
*/
__exidx_start = .;
#if defined (__GCC_LINKER_CMD__)
*(.ARM.exidx* gnu.linkonce.armexidx.*)
#endif
__exidx_end = .;
} GROUP_LINK_IN(ROMABLE_REGION)
SECTION_PROLOGUE(rodata_start,,)
{
. = ALIGN(_region_min_align);
__rodata_region_start = .;
} GROUP_LINK_IN(ROMABLE_REGION)
#include <zephyr/linker/common-rom.ld>
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_SECTIONS ...). Useful for grouping iterable RO structs.
*/
#include <snippets-rom-sections.ld>
#include <zephyr/linker/thread-local-storage.ld>
#include <zephyr/linker/cplusplus-rom.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{
*(.rodata)
*(".rodata.*")
*(.gnu.linkonce.r.*)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rodata.ld>
#include <zephyr/linker/kobject-rom.ld>
/*
* For XIP images, in order to avoid the situation when __data_rom_start
* is 32-bit aligned, but the actual data is placed right after rodata
* section, which may not end exactly at 32-bit border, pad rodata
* section, so __data_rom_start points at data and it is 32-bit aligned.
*
* On non-XIP images this may enlarge image size up to 3 bytes. This
* generally is not an issue, since modern ROM and FLASH memory is
* usually 4k aligned.
*/
. = ALIGN(4);
/*
* RODATA must be the last section so that the size of the entire read
* only area will be filled to a power of 2.
*/
MPU_ALIGN(ABSOLUTE(.) - __rom_region_start);
} GROUP_LINK_IN(ROMABLE_REGION)
__rodata_region_end = .;
__rom_region_end = .;
MPU_ALIGN(__rodata_region_end - __rom_region_start);
_image_rom_end_order = (LOG2CEIL(__rom_region_end) - 1) << 1;
GROUP_END(ROMABLE_REGION)
/*
* These are here according to 'arm-zephyr-elf-ld --verbose',
* before data section.
*/
SECTION_PROLOGUE(.got,,)
{
*(.got.plt)
*(.igot.plt)
*(.got)
*(.igot)
}
GROUP_START(RAMABLE_REGION)
. = RAM_ADDR;
/* Align the start of image RAM with the
* minimum granularity required by MPU.
*/
. = ALIGN(_region_min_align);
_image_ram_start = .;
#ifdef CONFIG_XIP
z_mapped_start = .;
#endif
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
#if defined(CONFIG_USERSPACE)
#define APP_SHARED_ALIGN . = ALIGN(_region_min_align);
#define SMEM_PARTITION_ALIGN MPU_ALIGN
#include <app_smem.ld>
_app_smem_size = _app_smem_end - _app_smem_start;
_app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME);
#endif /* CONFIG_USERSPACE */
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD), BSS_ALIGN)
{
/*
* For performance, BSS section is assumed to be 4 byte aligned and
* a multiple of 4 bytes
*/
. = ALIGN(4);
__bss_start = .;
__kernel_ram_start = .;
*(.bss)
*(".bss.*")
*(COMMON)
*(".kernel_bss.*")
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_sram_bss_relocate.ld>
#endif
/*
* As memory is cleared in words only, it is simpler to ensure the BSS
* section ends on a 4 byte boundary. This wastes a maximum of 3 bytes.
*/
__bss_end = ALIGN(4);
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
#include <zephyr/linker/common-noinit.ld>
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{
__data_region_start = .;
__data_start = .;
*(.data)
*(".data.*")
*(".kernel.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rwdata.ld>
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_sram_data_relocate.ld>
#endif
__data_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__data_size = __data_end - __data_start;
__data_load_start = LOADADDR(_DATA_SECTION_NAME);
__data_region_load_start = LOADADDR(_DATA_SECTION_NAME);
#include <zephyr/linker/common-ram.ld>
#include <zephyr/linker/kobject-data.ld>
#include <zephyr/linker/cplusplus-ram.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-data-sections.ld>
__data_region_end = .;
/* Define linker symbols */
__kernel_ram_end = RAM_ADDR + RAM_SIZE;
__kernel_ram_size = __kernel_ram_end - __kernel_ram_start;
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ocm), okay)
GROUP_START(OCM)
SECTION_PROLOGUE(_OCM_BSS_SECTION_NAME, (NOLOAD),SUBALIGN(4))
{
__ocm_start = .;
__ocm_bss_start = .;
*(.ocm_bss)
*(".ocm_bss.*")
__ocm_bss_end = .;
} GROUP_LINK_IN(LINKER_DT_NODE_REGION_NAME(DT_CHOSEN(zephyr_ocm)))
SECTION_PROLOGUE(_OCM_DATA_SECTION_NAME,,SUBALIGN(4))
{
__ocm_data_start = .;
*(.ocm_data)
*(".ocm_data.*")
__ocm_data_end = .;
} GROUP_LINK_IN(LINKER_DT_NODE_REGION_NAME(DT_CHOSEN(zephyr_ocm)))
__ocm_end = .;
__ocm_size = __ocm_end - __ocm_start;
GROUP_END(OCM)
#endif
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-sections.ld>
#define LAST_RAM_ALIGN . = ALIGN(_region_min_align);
#include <zephyr/linker/ram-end.ld>
GROUP_END(RAMABLE_REGION)
#include <zephyr/linker/debug-sections.ld>
SECTION_PROLOGUE(.ARM.attributes, 0,)
{
KEEP(*(.ARM.attributes))
KEEP(*(.gnu.attributes))
}
/* Sections generated from 'zephyr,memory-region' nodes */
LINKER_DT_SECTIONS()
/* Must be last in romable region */
SECTION_PROLOGUE(.last_section,,)
{
#ifdef CONFIG_LINKER_LAST_SECTION_ID
/* Fill last section with a word to ensure location counter and actual rom
* region data usage match. */
LONG(CONFIG_LINKER_LAST_SECTION_ID_PATTERN)
#endif
} GROUP_LINK_IN(ROMABLE_REGION)
/* To provide the image size as a const expression,
* calculate this value here. */
_flash_used = LOADADDR(.last_section) + SIZEOF(.last_section) - __rom_region_start;
}
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_a_r/scripts/linker.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,926 |
```objective-c
/*
*
*/
#ifndef _ASMLANGUAGE
#include <cmsis_core.h>
/* Convenience macros to represent the ARMv7-M-specific
* configuration for memory access permission and
* cache-ability attribution.
*/
/* Privileged No Access, Unprivileged No Access */
#define NO_ACCESS 0x0
#define NO_ACCESS_Msk ((NO_ACCESS << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* Privileged No Access, Unprivileged No Access */
#define P_NA_U_NA 0x0
#define P_NA_U_NA_Msk ((P_NA_U_NA << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* Privileged Read Write, Unprivileged No Access */
#define P_RW_U_NA 0x1
#define P_RW_U_NA_Msk ((P_RW_U_NA << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* Privileged Read Write, Unprivileged Read Only */
#define P_RW_U_RO 0x2
#define P_RW_U_RO_Msk ((P_RW_U_RO << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* Privileged Read Write, Unprivileged Read Write */
#define P_RW_U_RW 0x3U
#define P_RW_U_RW_Msk ((P_RW_U_RW << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* Privileged Read Write, Unprivileged Read Write */
#define FULL_ACCESS 0x3
#define FULL_ACCESS_Msk ((FULL_ACCESS << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* Privileged Read Only, Unprivileged No Access */
#define P_RO_U_NA 0x5
#define P_RO_U_NA_Msk ((P_RO_U_NA << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* Privileged Read Only, Unprivileged Read Only */
#define P_RO_U_RO 0x6
#define P_RO_U_RO_Msk ((P_RO_U_RO << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* Privileged Read Only, Unprivileged Read Only */
#define RO 0x7
#define RO_Msk ((RO << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* Attribute flag for not-allowing execution (eXecute Never) */
#define NOT_EXEC MPU_RASR_XN_Msk
/* The following definitions are for internal use in arm_mpu.h. */
#define STRONGLY_ORDERED_SHAREABLE MPU_RASR_S_Msk
#define DEVICE_SHAREABLE (MPU_RASR_B_Msk | MPU_RASR_S_Msk)
#define NORMAL_OUTER_INNER_WRITE_THROUGH_SHAREABLE \
(MPU_RASR_C_Msk | MPU_RASR_S_Msk)
#define NORMAL_OUTER_INNER_WRITE_THROUGH_NON_SHAREABLE MPU_RASR_C_Msk
#define NORMAL_OUTER_INNER_WRITE_BACK_SHAREABLE \
(MPU_RASR_C_Msk | MPU_RASR_B_Msk | MPU_RASR_S_Msk)
#define NORMAL_OUTER_INNER_WRITE_BACK_NON_SHAREABLE \
(MPU_RASR_C_Msk | MPU_RASR_B_Msk)
#define NORMAL_OUTER_INNER_NON_CACHEABLE_SHAREABLE \
((1 << MPU_RASR_TEX_Pos) | MPU_RASR_S_Msk)
#define NORMAL_OUTER_INNER_NON_CACHEABLE_NON_SHAREABLE \
(1 << MPU_RASR_TEX_Pos)
#define NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_SHAREABLE \
((1 << MPU_RASR_TEX_Pos) |\
MPU_RASR_C_Msk | MPU_RASR_B_Msk | MPU_RASR_S_Msk)
#define NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE \
((1 << MPU_RASR_TEX_Pos) | MPU_RASR_C_Msk | MPU_RASR_B_Msk)
#define DEVICE_NON_SHAREABLE (2 << MPU_RASR_TEX_Pos)
/* Bit-masks to disable sub-regions. */
#define SUB_REGION_0_DISABLED ((0x01 << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk)
#define SUB_REGION_1_DISABLED ((0x02 << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk)
#define SUB_REGION_2_DISABLED ((0x04 << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk)
#define SUB_REGION_3_DISABLED ((0x08 << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk)
#define SUB_REGION_4_DISABLED ((0x10 << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk)
#define SUB_REGION_5_DISABLED ((0x20 << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk)
#define SUB_REGION_6_DISABLED ((0x40 << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk)
#define SUB_REGION_7_DISABLED ((0x80 << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk)
#define REGION_SIZE(size) ((ARM_MPU_REGION_SIZE_ ## size \
<< MPU_RASR_SIZE_Pos) & MPU_RASR_SIZE_Msk)
#define REGION_32B REGION_SIZE(32B)
#define REGION_64B REGION_SIZE(64B)
#define REGION_128B REGION_SIZE(128B)
#define REGION_256B REGION_SIZE(256B)
#define REGION_512B REGION_SIZE(512B)
#define REGION_1K REGION_SIZE(1KB)
#define REGION_2K REGION_SIZE(2KB)
#define REGION_4K REGION_SIZE(4KB)
#define REGION_8K REGION_SIZE(8KB)
#define REGION_16K REGION_SIZE(16KB)
#define REGION_32K REGION_SIZE(32KB)
#define REGION_64K REGION_SIZE(64KB)
#define REGION_128K REGION_SIZE(128KB)
#define REGION_256K REGION_SIZE(256KB)
#define REGION_512K REGION_SIZE(512KB)
#define REGION_1M REGION_SIZE(1MB)
#define REGION_2M REGION_SIZE(2MB)
#define REGION_4M REGION_SIZE(4MB)
#define REGION_8M REGION_SIZE(8MB)
#define REGION_16M REGION_SIZE(16MB)
#define REGION_32M REGION_SIZE(32MB)
#define REGION_64M REGION_SIZE(64MB)
#define REGION_128M REGION_SIZE(128MB)
#define REGION_256M REGION_SIZE(256MB)
#define REGION_512M REGION_SIZE(512MB)
#define REGION_1G REGION_SIZE(1GB)
#define REGION_2G REGION_SIZE(2GB)
#define REGION_4G REGION_SIZE(4GB)
#define ARM_MPU_REGION_INIT(p_name, p_base, p_size, p_attr) \
{ .name = p_name, \
.base = p_base, \
.attr = p_attr(size_to_mpu_rasr_size(p_size)), \
}
/* Some helper defines for common regions */
/* On Cortex-M, we can only set the XN bit when CONFIG_XIP=y. When
* CONFIG_XIP=n, the entire image will be linked to SRAM, so we need to keep
* the SRAM region XN bit clear or the application code will not be executable.
*/
#define REGION_RAM_ATTR(size) \
{ \
(NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE | \
IF_ENABLED(CONFIG_XIP, (MPU_RASR_XN_Msk |)) size | P_RW_U_NA_Msk) \
}
#define REGION_RAM_NOCACHE_ATTR(size) \
{ \
(NORMAL_OUTER_INNER_NON_CACHEABLE_NON_SHAREABLE | \
MPU_RASR_XN_Msk | size | P_RW_U_NA_Msk) \
}
#if defined(CONFIG_MPU_ALLOW_FLASH_WRITE)
#define REGION_FLASH_ATTR(size) \
{ \
(NORMAL_OUTER_INNER_WRITE_THROUGH_NON_SHAREABLE | size | \
P_RW_U_RO_Msk) \
}
#else
#define REGION_FLASH_ATTR(size) \
{ \
(NORMAL_OUTER_INNER_WRITE_THROUGH_NON_SHAREABLE | size | RO_Msk) \
}
#endif
#define REGION_PPB_ATTR(size) { (STRONGLY_ORDERED_SHAREABLE | size | \
P_RW_U_NA_Msk) }
#define REGION_IO_ATTR(size) { (DEVICE_NON_SHAREABLE | size | P_RW_U_NA_Msk) }
#define REGION_EXTMEM_ATTR(size) { (STRONGLY_ORDERED_SHAREABLE | size | \
NO_ACCESS_Msk) }
struct arm_mpu_region_attr {
/* Attributes belonging to RASR (including the encoded region size) */
uint32_t rasr;
};
typedef struct arm_mpu_region_attr arm_mpu_region_attr_t;
/* Typedef for the k_mem_partition attribute */
typedef struct {
uint32_t rasr_attr;
} k_mem_partition_attr_t;
/* Read-Write access permission attributes */
#define _K_MEM_PARTITION_P_NA_U_NA (NO_ACCESS_Msk | NOT_EXEC)
#define _K_MEM_PARTITION_P_RW_U_RW (P_RW_U_RW_Msk | NOT_EXEC)
#define _K_MEM_PARTITION_P_RW_U_RO (P_RW_U_RO_Msk | NOT_EXEC)
#define _K_MEM_PARTITION_P_RW_U_NA (P_RW_U_NA_Msk | NOT_EXEC)
#define _K_MEM_PARTITION_P_RO_U_RO (P_RO_U_RO_Msk | NOT_EXEC)
#define _K_MEM_PARTITION_P_RO_U_NA (P_RO_U_NA_Msk | NOT_EXEC)
/* Execution-allowed attributes */
#define _K_MEM_PARTITION_P_RWX_U_RWX (P_RW_U_RW_Msk)
#define _K_MEM_PARTITION_P_RWX_U_RX (P_RW_U_RO_Msk)
#define _K_MEM_PARTITION_P_RX_U_RX (P_RO_U_RO_Msk)
/* Kernel macros for memory attribution
* (access permissions and cache-ability).
*
* The macros are to be stored in k_mem_partition_attr_t
* objects. The format of k_mem_partition_attr_t is an
* "1-1" mapping of the ARMv7-M MPU RASR attribute register
* fields (excluding the <size> and <enable> bit-fields).
*/
/* Read-Write access permission attributes (default cache-ability) */
#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
{ _K_MEM_PARTITION_P_NA_U_NA | \
NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE})
#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
{ _K_MEM_PARTITION_P_RW_U_RW | \
NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE})
#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
{ _K_MEM_PARTITION_P_RW_U_RO | \
NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE})
#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
{ _K_MEM_PARTITION_P_RW_U_NA | \
NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE})
#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
{ _K_MEM_PARTITION_P_RO_U_RO | \
NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE})
#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
{ _K_MEM_PARTITION_P_RO_U_NA | \
NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE})
/* Execution-allowed attributes (default-cacheability) */
#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
{ _K_MEM_PARTITION_P_RWX_U_RWX | \
NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE})
#define K_MEM_PARTITION_P_RWX_U_RX ((k_mem_partition_attr_t) \
{ _K_MEM_PARTITION_P_RWX_U_RX | \
NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE})
#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
{ _K_MEM_PARTITION_P_RX_U_RX | \
NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE})
/*
* @brief Evaluate Write-ability
*
* Evaluate whether the access permissions include write-ability.
*
* @param attr The k_mem_partition_attr_t object holding the
* MPU attributes to be checked against write-ability.
*/
#define K_MEM_PARTITION_IS_WRITABLE(attr) \
({ \
int __is_writable__; \
switch (attr.rasr_attr & MPU_RASR_AP_Msk) { \
case P_RW_U_RW_Msk: \
case P_RW_U_RO_Msk: \
case P_RW_U_NA_Msk: \
__is_writable__ = 1; \
break; \
default: \
__is_writable__ = 0; \
} \
__is_writable__; \
})
/*
* @brief Evaluate Execution allowance
*
* Evaluate whether the access permissions include execution.
*
* @param attr The k_mem_partition_attr_t object holding the
* MPU attributes to be checked against execution
* allowance.
*/
#define K_MEM_PARTITION_IS_EXECUTABLE(attr) \
(!((attr.rasr_attr) & (NOT_EXEC)))
/* Attributes for no-cache enabling (share-ability is selected by default) */
#define K_MEM_PARTITION_P_NA_U_NA_NOCACHE ((k_mem_partition_attr_t) \
{(_K_MEM_PARTITION_P_NA_U_NA \
| NORMAL_OUTER_INNER_NON_CACHEABLE_SHAREABLE)})
#define K_MEM_PARTITION_P_RW_U_RW_NOCACHE ((k_mem_partition_attr_t) \
{(_K_MEM_PARTITION_P_RW_U_RW \
| NORMAL_OUTER_INNER_NON_CACHEABLE_SHAREABLE)})
#define K_MEM_PARTITION_P_RW_U_RO_NOCACHE ((k_mem_partition_attr_t) \
{(_K_MEM_PARTITION_P_RW_U_RO \
| NORMAL_OUTER_INNER_NON_CACHEABLE_SHAREABLE)})
#define K_MEM_PARTITION_P_RW_U_NA_NOCACHE ((k_mem_partition_attr_t) \
{(_K_MEM_PARTITION_P_RW_U_NA \
| NORMAL_OUTER_INNER_NON_CACHEABLE_SHAREABLE)})
#define K_MEM_PARTITION_P_RO_U_RO_NOCACHE ((k_mem_partition_attr_t) \
{(_K_MEM_PARTITION_P_RO_U_RO \
| NORMAL_OUTER_INNER_NON_CACHEABLE_SHAREABLE)})
#define K_MEM_PARTITION_P_RO_U_NA_NOCACHE ((k_mem_partition_attr_t) \
{(_K_MEM_PARTITION_P_RO_U_NA \
| NORMAL_OUTER_INNER_NON_CACHEABLE_SHAREABLE)})
#define K_MEM_PARTITION_P_RWX_U_RWX_NOCACHE ((k_mem_partition_attr_t) \
{(_K_MEM_PARTITION_P_RWX_U_RWX \
| NORMAL_OUTER_INNER_NON_CACHEABLE_SHAREABLE)})
#define K_MEM_PARTITION_P_RWX_U_RX_NOCACHE ((k_mem_partition_attr_t) \
{(_K_MEM_PARTITION_P_RWX_U_RX \
| NORMAL_OUTER_INNER_NON_CACHEABLE_SHAREABLE)})
#define K_MEM_PARTITION_P_RX_U_RX_NOCACHE ((k_mem_partition_attr_t) \
{(_K_MEM_PARTITION_P_RX_U_RX \
| NORMAL_OUTER_INNER_NON_CACHEABLE_SHAREABLE)})
#endif /* _ASMLANGUAGE */
#define _ARCH_MEM_PARTITION_ALIGN_CHECK(start, size) \
BUILD_ASSERT(!(((size) & ((size) - 1))) && \
(size) >= CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE && \
!((uint32_t)(start) & ((size) - 1)), \
"the size of the partition must be power of 2" \
" and greater than or equal to the minimum MPU region size." \
"start address of the partition must align with size.")
``` | /content/code_sandbox/include/zephyr/arch/arm/mpu/arm_mpu_v7m.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,403 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_MPU_NXP_MPU_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_MPU_NXP_MPU_H_
#ifndef _ASMLANGUAGE
#define NXP_MPU_REGION_NUMBER 12
/* Bus Master User Mode Access */
#define UM_READ 4
#define UM_WRITE 2
#define UM_EXEC 1
#define BM0_UM_SHIFT 0
#define BM1_UM_SHIFT 6
#define BM2_UM_SHIFT 12
#define BM3_UM_SHIFT 18
/* Bus Master Supervisor Mode Access */
#define SM_RWX_ALLOW 0
#define SM_RX_ALLOW 1
#define SM_RW_ALLOW 2
#define SM_SAME_AS_UM 3
#define BM0_SM_SHIFT 3
#define BM1_SM_SHIFT 9
#define BM2_SM_SHIFT 15
#define BM3_SM_SHIFT 21
#define BM4_WE_SHIFT 24
#define BM4_RE_SHIFT 25
#if CONFIG_USB_KINETIS || CONFIG_UDC_KINETIS
#define BM4_PERMISSIONS ((1 << BM4_RE_SHIFT) | (1 << BM4_WE_SHIFT))
#else
#define BM4_PERMISSIONS 0
#endif
/* Read Attribute */
#define MPU_REGION_READ ((UM_READ << BM0_UM_SHIFT) | \
(UM_READ << BM1_UM_SHIFT) | \
(UM_READ << BM2_UM_SHIFT) | \
(UM_READ << BM3_UM_SHIFT))
/* Write Attribute */
#define MPU_REGION_WRITE ((UM_WRITE << BM0_UM_SHIFT) | \
(UM_WRITE << BM1_UM_SHIFT) | \
(UM_WRITE << BM2_UM_SHIFT) | \
(UM_WRITE << BM3_UM_SHIFT))
/* Execute Attribute */
#define MPU_REGION_EXEC ((UM_EXEC << BM0_UM_SHIFT) | \
(UM_EXEC << BM1_UM_SHIFT) | \
(UM_EXEC << BM2_UM_SHIFT) | \
(UM_EXEC << BM3_UM_SHIFT))
/* Super User Attributes */
#define MPU_REGION_SU ((SM_SAME_AS_UM << BM0_SM_SHIFT) | \
(SM_SAME_AS_UM << BM1_SM_SHIFT) | \
(SM_SAME_AS_UM << BM2_SM_SHIFT) | \
(SM_SAME_AS_UM << BM3_SM_SHIFT))
#define MPU_REGION_SU_RX ((SM_RX_ALLOW << BM0_SM_SHIFT) | \
(SM_RX_ALLOW << BM1_SM_SHIFT) | \
(SM_RX_ALLOW << BM2_SM_SHIFT) | \
(SM_RX_ALLOW << BM3_SM_SHIFT))
#define MPU_REGION_SU_RW ((SM_RW_ALLOW << BM0_SM_SHIFT) | \
(SM_RW_ALLOW << BM1_SM_SHIFT) | \
(SM_RW_ALLOW << BM2_SM_SHIFT) | \
(SM_RW_ALLOW << BM3_SM_SHIFT))
#define MPU_REGION_SU_RWX ((SM_RWX_ALLOW << BM0_SM_SHIFT) | \
(SM_RWX_ALLOW << BM1_SM_SHIFT) | \
(SM_RWX_ALLOW << BM2_SM_SHIFT) | \
(SM_RWX_ALLOW << BM3_SM_SHIFT))
/* The ENDADDR field has the last 5 bit reserved and set to 1 */
#define ENDADDR_ROUND(x) (x - 0x1F)
#define REGION_USER_MODE_ATTR {(MPU_REGION_READ | \
MPU_REGION_WRITE | \
MPU_REGION_SU)}
/* Some helper defines for common regions */
#if defined(CONFIG_MPU_ALLOW_FLASH_WRITE)
#define REGION_RAM_ATTR {((MPU_REGION_SU_RWX) | \
((UM_READ | UM_WRITE | UM_EXEC) << BM3_UM_SHIFT) | \
(BM4_PERMISSIONS))}
#define REGION_FLASH_ATTR {(MPU_REGION_SU_RWX)}
#else
#define REGION_RAM_ATTR {((MPU_REGION_SU_RW) | \
((UM_READ | UM_WRITE) << BM3_UM_SHIFT) | \
(BM4_PERMISSIONS))}
#define REGION_FLASH_ATTR {(MPU_REGION_READ | \
MPU_REGION_EXEC | \
MPU_REGION_SU)}
#endif
#define REGION_IO_ATTR {(MPU_REGION_READ | \
MPU_REGION_WRITE | \
MPU_REGION_EXEC | \
MPU_REGION_SU)}
#define REGION_RO_ATTR {(MPU_REGION_READ | MPU_REGION_SU)}
#define REGION_USER_RO_ATTR {(MPU_REGION_READ | \
MPU_REGION_SU)}
/* ENET (Master 3) and USB (Master 4) devices will not be able
to access RAM when the region is dynamically disabled in NXP MPU.
DEBUGGER (Master 1) can't be disabled in Region 0. */
#define REGION_DEBUGGER_AND_DEVICE_ATTR {((MPU_REGION_SU) | \
((UM_READ | UM_WRITE) << BM3_UM_SHIFT) | \
(BM4_PERMISSIONS))}
#define REGION_DEBUG_ATTR {MPU_REGION_SU}
#define REGION_BACKGROUND_ATTR {MPU_REGION_SU_RW}
struct nxp_mpu_region_attr {
/* NXP MPU region access permission attributes */
uint32_t attr;
};
typedef struct nxp_mpu_region_attr nxp_mpu_region_attr_t;
/* Typedef for the k_mem_partition attribute*/
typedef struct {
uint32_t ap_attr;
} k_mem_partition_attr_t;
/* Kernel macros for memory attribution
* (access permissions and cache-ability).
*
* The macros are to be stored in k_mem_partition_attr_t
* objects.
*/
/* Read-Write access permission attributes */
#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
{(MPU_REGION_SU)})
#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
{(MPU_REGION_READ | MPU_REGION_WRITE | MPU_REGION_SU)})
#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
{(MPU_REGION_READ | MPU_REGION_SU_RW)})
#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
{(MPU_REGION_SU_RW)})
#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
{(MPU_REGION_READ | MPU_REGION_SU)})
#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
{(MPU_REGION_SU_RX)})
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
{(MPU_REGION_READ | MPU_REGION_WRITE | \
MPU_REGION_EXEC | MPU_REGION_SU)})
#define K_MEM_PARTITION_P_RWX_U_RX ((k_mem_partition_attr_t) \
{(MPU_REGION_READ | MPU_REGION_EXEC | MPU_REGION_SU_RWX)})
#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
{(MPU_REGION_READ | MPU_REGION_EXEC | MPU_REGION_SU)})
/*
* @brief Evaluate Write-ability
*
* Evaluate whether the access permissions include write-ability.
*
* @param attr The k_mem_partition_attr_t object holding the
* MPU attributes to be checked against write-ability.
*/
#define K_MEM_PARTITION_IS_WRITABLE(attr) \
({ \
int __is_writable__; \
switch (attr.ap_attr) { \
case MPU_REGION_WRITE: \
case MPU_REGION_SU_RW: \
__is_writable__ = 1; \
break; \
default: \
__is_writable__ = 0; \
} \
__is_writable__; \
})
/*
* @brief Evaluate Execution allowance
*
* Evaluate whether the access permissions include execution.
*
* @param attr The k_mem_partition_attr_t object holding the
* MPU attributes to be checked against execution
* allowance.
*/
#define K_MEM_PARTITION_IS_EXECUTABLE(attr) \
({ \
int __is_executable__; \
switch (attr.ap_attr) { \
case MPU_REGION_SU_RX: \
case MPU_REGION_EXEC: \
__is_executable__ = 1; \
break; \
default: \
__is_executable__ = 0; \
} \
__is_executable__; \
})
/* Region definition data structure */
struct nxp_mpu_region {
/* Region Base Address */
uint32_t base;
/* Region End Address */
uint32_t end;
/* Region Name */
const char *name;
/* Region Attributes */
nxp_mpu_region_attr_t attr;
};
#define MPU_REGION_ENTRY(_name, _base, _end, _attr) \
{\
.name = _name, \
.base = _base, \
.end = _end, \
.attr = _attr, \
}
/* MPU configuration data structure */
struct nxp_mpu_config {
/* Number of regions */
uint32_t num_regions;
/* Regions */
const struct nxp_mpu_region *mpu_regions;
/* SRAM Region */
uint32_t sram_region;
};
/* Reference to the MPU configuration.
*
* This struct is defined and populated for each SoC (in the SoC definition),
* and holds the build-time configuration information for the fixed MPU
* regions enabled during kernel initialization. Dynamic MPU regions (e.g.
* for Thread Stack, Stack Guards, etc.) are programmed during runtime, thus,
* not kept here.
*/
extern const struct nxp_mpu_config mpu_config;
#endif /* _ASMLANGUAGE */
#define _ARCH_MEM_PARTITION_ALIGN_CHECK(start, size) \
BUILD_ASSERT((size) % \
CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE == 0 && \
(size) >= CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE && \
(uint32_t)(start) % CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE == 0, \
"the size of the partition must align with minimum MPU \
region size" \
" and greater than or equal to minimum MPU region size." \
"start address of the partition must align with minimum MPU \
region size.")
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_MPU_NXP_MPU_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/mpu/nxp_mpu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,122 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_MPU_ARM_MPU_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_MPU_ARM_MPU_H_
#if defined(CONFIG_CPU_CORTEX_M0PLUS) || \
defined(CONFIG_CPU_CORTEX_M3) || \
defined(CONFIG_CPU_CORTEX_M4) || \
defined(CONFIG_CPU_CORTEX_M7) || \
defined(CONFIG_ARMV7_R)
#include <zephyr/arch/arm/mpu/arm_mpu_v7m.h>
#elif defined(CONFIG_CPU_CORTEX_M23) || \
defined(CONFIG_CPU_CORTEX_M33) || \
defined(CONFIG_CPU_CORTEX_M55) || \
defined(CONFIG_CPU_CORTEX_M85) || \
defined(CONFIG_AARCH32_ARMV8_R)
#include <zephyr/arch/arm/mpu/arm_mpu_v8.h>
#else
#error "Unsupported ARM CPU"
#endif
#ifndef _ASMLANGUAGE
/* Region definition data structure */
struct arm_mpu_region {
/* Region Base Address */
uint32_t base;
/* Region Name */
const char *name;
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
/* Region Size */
uint32_t size;
#endif
/* Region Attributes */
arm_mpu_region_attr_t attr;
};
/* MPU configuration data structure */
struct arm_mpu_config {
/* Number of regions */
uint32_t num_regions;
/* Regions */
const struct arm_mpu_region *mpu_regions;
};
#if defined(CONFIG_ARMV7_R)
#define MPU_REGION_ENTRY(_name, _base, _size, _attr) \
{\
.name = _name, \
.base = _base, \
.size = _size, \
.attr = _attr, \
}
#else
#define MPU_REGION_ENTRY(_name, _base, _attr) \
{\
.name = _name, \
.base = _base, \
.attr = _attr, \
}
#endif
/* Reference to the MPU configuration.
*
* This struct is defined and populated for each SoC (in the SoC definition),
* and holds the build-time configuration information for the fixed MPU
* regions enabled during kernel initialization. Dynamic MPU regions (e.g.
* for Thread Stack, Stack Guards, etc.) are programmed during runtime, thus,
* not kept here.
*/
extern const struct arm_mpu_config mpu_config;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_MPU_ARM_MPU_H_ */
``` | /content/code_sandbox/include/zephyr/arch/arm/mpu/arm_mpu.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 522 |
```linker script
/*
*
*/
/* Set initial alignment to the 32 byte minimum for all MPUs */
_app_data_align = 32;
. = ALIGN(32);
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_r/scripts/app_data_alignment.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 32 |
```linker script
/*
*
*/
#include <zephyr/arch/arm/cortex_a_r/scripts/linker.ld>
``` | /content/code_sandbox/include/zephyr/arch/arm/cortex_r/scripts/linker.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 20 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_NIOS2_EXPCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_NIOS2_EXPCEPTION_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct arch_esf {
uint32_t ra; /* return address r31 */
uint32_t r1; /* at */
uint32_t r2; /* return value */
uint32_t r3; /* return value */
uint32_t r4; /* register args */
uint32_t r5; /* register args */
uint32_t r6; /* register args */
uint32_t r7; /* register args */
uint32_t r8; /* Caller-saved general purpose */
uint32_t r9; /* Caller-saved general purpose */
uint32_t r10; /* Caller-saved general purpose */
uint32_t r11; /* Caller-saved general purpose */
uint32_t r12; /* Caller-saved general purpose */
uint32_t r13; /* Caller-saved general purpose */
uint32_t r14; /* Caller-saved general purpose */
uint32_t r15; /* Caller-saved general purpose */
uint32_t estatus;
uint32_t instr; /* Instruction being executed when exc occurred */
};
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_NIOS2_EXPCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/nios2/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 296 |
```objective-c
/*
*
*/
#ifndef _ASMLANGUAGE
/* Convenience macros to represent the ARMv8-M-specific
* configuration for memory access permission and
* cache-ability attribution.
*/
#if defined(CONFIG_AARCH32_ARMV8_R)
#define MPU_IR_REGION_Msk (0xFFU)
#define MPU_IR_REGION_Pos 8U
/* MPU RBAR Register attribute msk Definitions */
#define MPU_RBAR_BASE_Pos 6U
#define MPU_RBAR_BASE_Msk (0x3FFFFFFFFFFFFFFUL << MPU_RBAR_BASE_Pos)
#define MPU_RBAR_SH_Pos 3U
#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos)
#define MPU_RBAR_AP_Pos 1U
#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos)
/* RBAR XN */
#define MPU_RBAR_XN_Pos 0U
#define MPU_RBAR_XN_Msk (0x1UL << MPU_RBAR_XN_Pos)
/* MPU PLBAR Register Definitions */
#define MPU_RLAR_LIMIT_Pos 6U
#define MPU_RLAR_LIMIT_Msk (0x3FFFFFFFFFFFFFFUL << MPU_RLAR_LIMIT_Pos)
#define MPU_RLAR_AttrIndx_Pos 1U
#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos)
#define MPU_RLAR_EN_Msk (0x1UL)
#else
#include <cmsis_core.h>
#endif
/* Privileged No Access, Unprivileged No Access */
/*#define NO_ACCESS 0x0 */
/*#define NO_ACCESS_Msk ((NO_ACCESS << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk) */
/* Privileged No Access, Unprivileged No Access */
/*#define P_NA_U_NA 0x0 */
/*#define P_NA_U_NA_Msk ((P_NA_U_NA << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk) */
/* Privileged Read Write, Unprivileged No Access */
#define P_RW_U_NA 0x0
#define P_RW_U_NA_Msk ((P_RW_U_NA << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk)
/* Privileged Read Write, Unprivileged Read Only */
/*#define P_RW_U_RO 0x2 */
/*#define P_RW_U_RO_Msk ((P_RW_U_RO << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)*/
/* Privileged Read Write, Unprivileged Read Write */
#define P_RW_U_RW 0x1
#define P_RW_U_RW_Msk ((P_RW_U_RW << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk)
/* Privileged Read Write, Unprivileged Read Write */
#define FULL_ACCESS 0x1
#define FULL_ACCESS_Msk ((FULL_ACCESS << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk)
/* Privileged Read Only, Unprivileged No Access */
#define P_RO_U_NA 0x2
#define P_RO_U_NA_Msk ((P_RO_U_NA << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk)
/* Privileged Read Only, Unprivileged Read Only */
#define P_RO_U_RO 0x3
#define P_RO_U_RO_Msk ((P_RO_U_RO << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk)
/* Privileged Read Only, Unprivileged Read Only */
#define RO 0x3
#define RO_Msk ((RO << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk)
/* Attribute flag for not-allowing execution (eXecute Never) */
#define NOT_EXEC MPU_RBAR_XN_Msk
/* Attribute flags for share-ability */
#define NON_SHAREABLE 0x0
#define NON_SHAREABLE_Msk \
((NON_SHAREABLE << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk)
#define OUTER_SHAREABLE 0x2
#define OUTER_SHAREABLE_Msk \
((OUTER_SHAREABLE << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk)
#define INNER_SHAREABLE 0x3
#define INNER_SHAREABLE_Msk \
((INNER_SHAREABLE << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk)
/* Helper define to calculate the region limit address. */
#define REGION_LIMIT_ADDR(base, size) \
(((base & MPU_RBAR_BASE_Msk) + size - 1) & MPU_RLAR_LIMIT_Msk)
/* Attribute flags for cache-ability */
#if defined(CONFIG_AARCH32_ARMV8_R)
/* Memory Attributes for Device Memory
* 1.Gathering (G/nG)
* Determines whether multiple accesses can be merged into a single
* bus transaction.
* nG: Number/size of accesses on the bus = number/size of accesses
* in code.
*
* 2.Reordering (R/nR)
* Determines whether accesses to the same device can be reordered.
* nR: Accesses to the same IMPLEMENTATION DEFINED block size will
* appear on the bus in program order.
*
* 3 Early Write Acknowledgment (E/nE)
* Indicates to the memory system whether a buffer can send
* acknowledgements.
* nE: The response should come from the end slave, not buffering in
* the interconnect.
*/
#define DEVICE_nGnRnE 0x0U
#define DEVICE_nGnRE 0x4U
#define DEVICE_nGRE 0x8U
#define DEVICE_GRE 0xCU
#endif
/* Read/Write Allocation Configurations for Cacheable Memory */
#define R_NON_W_NON 0x0 /* Do not allocate Read/Write */
#define R_NON_W_ALLOC 0x1 /* Do not allocate Read, Allocate Write */
#define R_ALLOC_W_NON 0x2 /* Allocate Read, Do not allocate Write */
#define R_ALLOC_W_ALLOC 0x3 /* Allocate Read/Write */
/* Memory Attributes for Normal Memory */
#define NORMAL_O_WT_NT 0x80 /* Normal, Outer Write-through non-transient */
#define NORMAL_O_WB_NT 0xC0 /* Normal, Outer Write-back non-transient */
#define NORMAL_O_NON_C 0x40 /* Normal, Outer Non-Cacheable */
#define NORMAL_I_WT_NT 0x08 /* Normal, Inner Write-through non-transient */
#define NORMAL_I_WB_NT 0x0C /* Normal, Inner Write-back non-transient */
#define NORMAL_I_NON_C 0x04 /* Normal, Inner Non-Cacheable */
#define NORMAL_OUTER_INNER_WRITE_THROUGH_READ_ALLOCATE_NON_TRANS \
((NORMAL_O_WT_NT | (R_ALLOC_W_NON << 4)) \
| \
(NORMAL_I_WT_NT | R_ALLOC_W_NON)) \
#define NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_TRANS \
((NORMAL_O_WB_NT | (R_ALLOC_W_ALLOC << 4)) \
| \
(NORMAL_I_WB_NT | R_ALLOC_W_ALLOC))
#define NORMAL_OUTER_INNER_NON_CACHEABLE \
((NORMAL_O_NON_C | (R_NON_W_NON << 4)) \
| \
(NORMAL_I_NON_C | R_NON_W_NON))
/* Common cache-ability configuration for Flash, SRAM regions */
#define MPU_CACHE_ATTRIBUTES_FLASH \
NORMAL_OUTER_INNER_WRITE_THROUGH_READ_ALLOCATE_NON_TRANS
#define MPU_CACHE_ATTRIBUTES_SRAM \
NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_TRANS
#define MPU_CACHE_ATTRIBUTES_SRAM_NOCACHE \
NORMAL_OUTER_INNER_NON_CACHEABLE
/* Global MAIR configurations */
#define MPU_MAIR_ATTR_FLASH MPU_CACHE_ATTRIBUTES_FLASH
#define MPU_MAIR_INDEX_FLASH 0
#define MPU_MAIR_ATTR_SRAM MPU_CACHE_ATTRIBUTES_SRAM
#define MPU_MAIR_INDEX_SRAM 1
#define MPU_MAIR_ATTR_SRAM_NOCACHE MPU_CACHE_ATTRIBUTES_SRAM_NOCACHE
#define MPU_MAIR_INDEX_SRAM_NOCACHE 2
#if defined(CONFIG_AARCH32_ARMV8_R)
#define MPU_MAIR_ATTR_DEVICE DEVICE_nGnRnE
#define MPU_MAIR_INDEX_DEVICE 3
/* Flash region(s): Attribute-0
* SRAM region(s): Attribute-1
* SRAM no cache-able regions(s): Attribute-2
* DEVICE no cache-able regions(s): Attribute-3
*/
#define MPU_MAIR_ATTRS \
((MPU_MAIR_ATTR_FLASH << (MPU_MAIR_INDEX_FLASH * 8)) | \
(MPU_MAIR_ATTR_SRAM << (MPU_MAIR_INDEX_SRAM * 8)) | \
(MPU_MAIR_ATTR_SRAM_NOCACHE << (MPU_MAIR_INDEX_SRAM_NOCACHE * 8)) | \
(MPU_MAIR_ATTR_DEVICE << (MPU_MAIR_INDEX_DEVICE * 8)))
#else
/* Flash region(s): Attribute-0
* SRAM region(s): Attribute-1
* SRAM no cache-able regions(s): Attribute-2
*/
#define MPU_MAIR_ATTRS \
(((MPU_MAIR_ATTR_FLASH << MPU_MAIR0_Attr0_Pos) & MPU_MAIR0_Attr0_Msk) | \
((MPU_MAIR_ATTR_SRAM << MPU_MAIR0_Attr1_Pos) & MPU_MAIR0_Attr1_Msk) | \
((MPU_MAIR_ATTR_SRAM_NOCACHE << MPU_MAIR0_Attr2_Pos) & \
MPU_MAIR0_Attr2_Msk))
#endif
/* Some helper defines for common regions.
*
* Note that the ARMv8-M/R MPU architecture requires that the
* enabled MPU regions are non-overlapping. Therefore, it is
* recommended to use these helper defines only for configuring
* fixed MPU regions at build-time (i.e. regions that are not
* expected to be re-programmed or re-adjusted at run-time so
* that they do not overlap with other MPU regions).
*/
#if defined(CONFIG_AARCH32_ARMV8_R)
#define ARM_MPU_REGION_INIT(p_name, p_base, p_size, p_attr) \
{ .name = p_name, \
.base = p_base, \
.attr = p_attr(p_base + p_size), \
}
#define REGION_RAM_ATTR(limit) \
{ \
.rbar = NOT_EXEC | \
P_RW_U_NA_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_SRAM, \
.r_limit = limit - 1, /* Region Limit */ \
}
#define REGION_RAM_TEXT_ATTR(limit) \
{ \
.rbar = P_RO_U_RO_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_SRAM, \
.r_limit = limit - 1, /* Region Limit */ \
}
#define REGION_RAM_RO_ATTR(limit) \
{ \
.rbar = NOT_EXEC | \
P_RO_U_RO_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_SRAM, \
.r_limit = limit - 1, /* Region Limit */ \
}
#define REGION_RAM_NOCACHE_ATTR(limit) \
{ \
.rbar = NOT_EXEC | \
P_RW_U_NA_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_SRAM_NOCACHE, \
.r_limit = limit - 1, /* Region Limit */ \
}
#if defined(CONFIG_MPU_ALLOW_FLASH_WRITE)
/* Note that the access permissions allow for un-privileged writes, contrary
* to ARMv7-M where un-privileged code has Read-Only permissions.
*/
#define REGION_FLASH_ATTR(limit) \
{ \
.rbar = P_RW_U_RW_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_FLASH, \
.r_limit = limit - 1, /* Region Limit */ \
}
#else /* CONFIG_MPU_ALLOW_FLASH_WRITE */
#define REGION_FLASH_ATTR(limit) \
{ \
.rbar = RO_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_FLASH, \
.r_limit = limit - 1, /* Region Limit */ \
}
#endif /* CONFIG_MPU_ALLOW_FLASH_WRITE */
#define REGION_DEVICE_ATTR(limit) \
{ \
/* AP, XN, SH */ \
.rbar = NOT_EXEC | P_RW_U_NA_Msk | NON_SHAREABLE_Msk, \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_DEVICE, \
/* Region Limit */ \
.r_limit = limit - 1, \
}
#else
#define ARM_MPU_REGION_INIT(p_name, p_base, p_size, p_attr) \
{ .name = p_name, \
.base = p_base, \
.attr = p_attr(p_base, p_size), \
}
/* On Cortex-M, we can only set the XN bit when CONFIG_XIP=y. When
* CONFIG_XIP=n, the entire image will be linked to SRAM, so we need to keep
* the SRAM region XN bit clear or the application code will not be executable.
*/
#define REGION_RAM_ATTR(base, size) \
{\
.rbar = IF_ENABLED(CONFIG_XIP, (NOT_EXEC |)) \
P_RW_U_NA_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_SRAM, \
.r_limit = REGION_LIMIT_ADDR(base, size), /* Region Limit */ \
}
#define REGION_RAM_NOCACHE_ATTR(base, size) \
{\
.rbar = NOT_EXEC | \
P_RW_U_NA_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_SRAM_NOCACHE, \
.r_limit = REGION_LIMIT_ADDR(base, size), /* Region Limit */ \
}
#if defined(CONFIG_MPU_ALLOW_FLASH_WRITE)
/* Note that the access permissions allow for un-privileged writes, contrary
* to ARMv7-M where un-privileged code has Read-Only permissions.
*/
#define REGION_FLASH_ATTR(base, size) \
{\
.rbar = P_RW_U_RW_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_FLASH, \
.r_limit = REGION_LIMIT_ADDR(base, size), /* Region Limit */ \
}
#else /* CONFIG_MPU_ALLOW_FLASH_WRITE */
#define REGION_FLASH_ATTR(base, size) \
{\
.rbar = RO_Msk | NON_SHAREABLE_Msk, /* AP, XN, SH */ \
/* Cache-ability */ \
.mair_idx = MPU_MAIR_INDEX_FLASH, \
.r_limit = REGION_LIMIT_ADDR(base, size), /* Region Limit */ \
}
#endif /* CONFIG_MPU_ALLOW_FLASH_WRITE */
#endif
struct arm_mpu_region_attr {
/* Attributes belonging to RBAR */
uint8_t rbar: 5;
/* MAIR index for attribute indirection */
uint8_t mair_idx: 3;
/* Region Limit Address value to be written to the RLAR register. */
uint32_t r_limit;
};
typedef struct arm_mpu_region_attr arm_mpu_region_attr_t;
/* Typedef for the k_mem_partition attribute */
typedef struct {
uint16_t rbar;
uint16_t mair_idx;
} k_mem_partition_attr_t;
/* Kernel macros for memory attribution
* (access permissions and cache-ability).
*
* The macros are to be stored in k_mem_partition_attr_t
* objects. The format of a k_mem_partition_attr_t object
* is as follows: field <rbar> contains a direct mapping
* of the <XN> and <AP> bit-fields of the RBAR register;
* field <mair_idx> contains a direct mapping of AttrIdx
* bit-field, stored in RLAR register.
*/
/* Read-Write access permission attributes */
#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
{(P_RW_U_RW_Msk | NOT_EXEC), MPU_MAIR_INDEX_SRAM})
#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
{(P_RW_U_NA_Msk | NOT_EXEC), MPU_MAIR_INDEX_SRAM})
#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
{(P_RO_U_RO_Msk | NOT_EXEC), MPU_MAIR_INDEX_SRAM})
#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
{(P_RO_U_NA_Msk | NOT_EXEC), MPU_MAIR_INDEX_SRAM})
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
{(P_RW_U_RW_Msk), MPU_MAIR_INDEX_SRAM})
#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
{(P_RO_U_RO_Msk), MPU_MAIR_INDEX_SRAM})
/*
* @brief Evaluate Write-ability
*
* Evaluate whether the access permissions include write-ability.
*
* @param attr The k_mem_partition_attr_t object holding the
* MPU attributes to be checked against write-ability.
*/
#define K_MEM_PARTITION_IS_WRITABLE(attr) \
({ \
int __is_writable__; \
switch (attr.rbar & MPU_RBAR_AP_Msk) { \
case P_RW_U_RW_Msk: \
case P_RW_U_NA_Msk: \
__is_writable__ = 1; \
break; \
default: \
__is_writable__ = 0; \
} \
__is_writable__; \
})
/*
* @brief Evaluate Execution allowance
*
* Evaluate whether the access permissions include execution.
*
* @param attr The k_mem_partition_attr_t object holding the
* MPU attributes to be checked against execution
* allowance.
*/
#define K_MEM_PARTITION_IS_EXECUTABLE(attr) \
(!((attr.rbar) & (NOT_EXEC)))
/* Attributes for no-cache enabling (share-ability is selected by default) */
/* Read-Write access permission attributes */
#define K_MEM_PARTITION_P_RW_U_RW_NOCACHE ((k_mem_partition_attr_t) \
{(P_RW_U_RW_Msk | NOT_EXEC | OUTER_SHAREABLE_Msk), \
MPU_MAIR_INDEX_SRAM_NOCACHE})
#define K_MEM_PARTITION_P_RW_U_NA_NOCACHE ((k_mem_partition_attr_t) \
{(P_RW_U_NA_Msk | NOT_EXEC | OUTER_SHAREABLE_Msk), \
MPU_MAIR_INDEX_SRAM_NOCACHE})
#define K_MEM_PARTITION_P_RO_U_RO_NOCACHE ((k_mem_partition_attr_t) \
{(P_RO_U_RO_Msk | NOT_EXEC | OUTER_SHAREABLE_Msk), \
MPU_MAIR_INDEX_SRAM_NOCACHE})
#define K_MEM_PARTITION_P_RO_U_NA_NOCACHE ((k_mem_partition_attr_t) \
{(P_RO_U_NA_Msk | NOT_EXEC | OUTER_SHAREABLE_Msk), \
MPU_MAIR_INDEX_SRAM_NOCACHE})
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RWX_U_RWX_NOCACHE ((k_mem_partition_attr_t) \
{(P_RW_U_RW_Msk | OUTER_SHAREABLE_Msk), MPU_MAIR_INDEX_SRAM_NOCACHE})
#define K_MEM_PARTITION_P_RX_U_RX_NOCACHE ((k_mem_partition_attr_t) \
{(P_RO_U_RO_Msk | OUTER_SHAREABLE_Msk), MPU_MAIR_INDEX_SRAM_NOCACHE})
#endif /* _ASMLANGUAGE */
#define _ARCH_MEM_PARTITION_ALIGN_CHECK(start, size) \
BUILD_ASSERT((size > 0) && ((uint32_t)start % \
CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE == 0U) && \
((size) % CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE == 0), \
" the start and size of the partition must align " \
"with the minimum MPU region size.")
``` | /content/code_sandbox/include/zephyr/arch/arm/mpu/arm_mpu_v8.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,441 |
```objective-c
/*
*
*/
/**
* @file
* @brief Nios II specific kernel interface header
* This header contains the Nios II specific kernel interface. It is
* included by the generic kernel interface header (include/arch/cpu.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_NIOS2_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_NIOS2_ARCH_H_
#include <system.h>
#include <zephyr/arch/nios2/thread.h>
#include <zephyr/arch/nios2/exception.h>
#include <zephyr/arch/nios2/asm_inline.h>
#include <zephyr/arch/common/addr_types.h>
#include <zephyr/devicetree.h>
#include <zephyr/arch/nios2/nios2.h>
#include <zephyr/arch/common/sys_bitops.h>
#include <zephyr/sys/sys_io.h>
#include <zephyr/arch/common/ffs.h>
#define ARCH_STACK_PTR_ALIGN 4
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <zephyr/irq.h>
#include <zephyr/sw_isr_table.h>
#ifdef __cplusplus
extern "C" {
#endif
/* There is no notion of priority with the Nios II internal interrupt
* controller and no flags are currently supported.
*/
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
{ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
}
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
unsigned int key, tmp;
__asm__ volatile (
"rdctl %[key], status\n\t"
"movi %[tmp], -2\n\t"
"and %[tmp], %[key], %[tmp]\n\t"
"wrctl status, %[tmp]\n\t"
: [key] "=r" (key), [tmp] "=r" (tmp)
: : "memory");
return key;
}
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
/* If the CPU is built without certain features, then
* the only writable bit in the status register is PIE
* in which case we can just write the value stored in key,
* all the other writable bits will be the same.
*
* If not, other stuff could have changed and we need to
* specifically flip just that bit.
*/
#if (ALT_CPU_NUM_OF_SHADOW_REG_SETS > 0) || \
(defined ALT_CPU_EIC_PRESENT) || \
(defined ALT_CPU_MMU_PRESENT) || \
(defined ALT_CPU_MPU_PRESENT)
__asm__ volatile (
"andi %[key], %[key], 1\n\t"
"beq %[key], zero, 1f\n\t"
"rdctl %[key], status\n\t"
"ori %[key], %[key], 1\n\t"
"wrctl status, %[key]\n\t"
"1:\n\t"
: [key] "+r" (key)
: : "memory");
#else
__asm__ volatile (
"wrctl status, %[key]"
: : [key] "r" (key)
: "memory");
#endif
}
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
return key & 1;
}
void arch_irq_enable(unsigned int irq);
void arch_irq_disable(unsigned int irq);
FUNC_NORETURN void z_SysFatalErrorHandler(unsigned int reason,
const struct arch_esf *esf);
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
const struct arch_esf *esf);
enum nios2_exception_cause {
NIOS2_EXCEPTION_UNKNOWN = -1,
NIOS2_EXCEPTION_RESET = 0,
NIOS2_EXCEPTION_CPU_ONLY_RESET_REQUEST = 1,
NIOS2_EXCEPTION_INTERRUPT = 2,
NIOS2_EXCEPTION_TRAP_INST = 3,
NIOS2_EXCEPTION_UNIMPLEMENTED_INST = 4,
NIOS2_EXCEPTION_ILLEGAL_INST = 5,
NIOS2_EXCEPTION_MISALIGNED_DATA_ADDR = 6,
NIOS2_EXCEPTION_MISALIGNED_TARGET_PC = 7,
NIOS2_EXCEPTION_DIVISION_ERROR = 8,
NIOS2_EXCEPTION_SUPERVISOR_ONLY_INST_ADDR = 9,
NIOS2_EXCEPTION_SUPERVISOR_ONLY_INST = 10,
NIOS2_EXCEPTION_SUPERVISOR_ONLY_DATA_ADDR = 11,
NIOS2_EXCEPTION_TLB_MISS = 12,
NIOS2_EXCEPTION_TLB_EXECUTE_PERM_VIOLATION = 13,
NIOS2_EXCEPTION_TLB_READ_PERM_VIOLATION = 14,
NIOS2_EXCEPTION_TLB_WRITE_PERM_VIOLATION = 15,
NIOS2_EXCEPTION_MPU_INST_REGION_VIOLATION = 16,
NIOS2_EXCEPTION_MPU_DATA_REGION_VIOLATION = 17,
NIOS2_EXCEPTION_ECC_TLB_ERR = 18,
NIOS2_EXCEPTION_ECC_FETCH_ERR = 19,
NIOS2_EXCEPTION_ECC_REGISTER_FILE_ERR = 20,
NIOS2_EXCEPTION_ECC_DATA_ERR = 21,
NIOS2_EXCEPTION_ECC_DATA_CACHE_WRITEBACK_ERR = 22
};
/* Bitfield indicating which exception cause codes report a valid
* badaddr register. NIOS2_EXCEPTION_TLB_MISS and NIOS2_EXCEPTION_ECC_TLB_ERR
* are deliberately not included here, you need to check if TLBMISC.D=1
*/
#define NIOS2_BADADDR_CAUSE_MASK \
(BIT(NIOS2_EXCEPTION_SUPERVISOR_ONLY_DATA_ADDR) | \
BIT(NIOS2_EXCEPTION_MISALIGNED_DATA_ADDR) | \
BIT(NIOS2_EXCEPTION_MISALIGNED_TARGET_PC) | \
BIT(NIOS2_EXCEPTION_TLB_READ_PERM_VIOLATION) | \
BIT(NIOS2_EXCEPTION_TLB_WRITE_PERM_VIOLATION) | \
BIT(NIOS2_EXCEPTION_MPU_DATA_REGION_VIOLATION) | \
BIT(NIOS2_EXCEPTION_ECC_DATA_ERR))
extern uint32_t sys_clock_cycle_get_32(void);
static inline uint32_t arch_k_cycle_get_32(void)
{
return sys_clock_cycle_get_32();
}
extern uint64_t sys_clock_cycle_get_64(void);
static inline uint64_t arch_k_cycle_get_64(void)
{
return sys_clock_cycle_get_64();
}
static ALWAYS_INLINE void arch_nop(void)
{
__asm__ volatile("nop");
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_NIOS2_ARCH_H_ */
``` | /content/code_sandbox/include/zephyr/arch/nios2/arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,436 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_NIOS2_ARCH_INLINES_H
#define ZEPHYR_INCLUDE_ARCH_NIOS2_ARCH_INLINES_H
#include <zephyr/kernel_structs.h>
static ALWAYS_INLINE unsigned int arch_num_cpus(void)
{
return CONFIG_MP_MAX_NUM_CPUS;
}
#endif /* ZEPHYR_INCLUDE_ARCH_NIOS2_ARCH_INLINES_H */
``` | /content/code_sandbox/include/zephyr/arch/nios2/arch_inlines.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 83 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_NIOS2_ASM_INLINE_GCC_H_
#define ZEPHYR_INCLUDE_ARCH_NIOS2_ASM_INLINE_GCC_H_
/*
* The file must not be included directly
* Include arch/cpu.h instead
*/
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <zephyr/sys/sys_io.h>
#include <zephyr/toolchain.h>
/* Using the *io variants of these instructions to prevent issues on
* devices that have an instruction/data cache
*/
static ALWAYS_INLINE void sys_write32(uint32_t data, mm_reg_t addr)
{
__builtin_stwio((void *)addr, data);
}
static ALWAYS_INLINE uint32_t sys_read32(mm_reg_t addr)
{
return __builtin_ldwio((void *)addr);
}
static ALWAYS_INLINE void sys_write8(uint8_t data, mm_reg_t addr)
{
sys_write32(data, addr);
}
static ALWAYS_INLINE uint8_t sys_read8(mm_reg_t addr)
{
return __builtin_ldbuio((void *)addr);
}
static ALWAYS_INLINE void sys_write16(uint16_t data, mm_reg_t addr)
{
sys_write32(data, addr);
}
static ALWAYS_INLINE uint16_t sys_read16(mm_reg_t addr)
{
return __builtin_ldhuio((void *)addr);
}
#endif /* _ASMLANGUAGE */
#endif /* _ASM_INLINE_GCC_PUBLIC_GCC_H */
``` | /content/code_sandbox/include/zephyr/arch/nios2/asm_inline_gcc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 295 |
```objective-c
#ifndef ZEPHYR_INCLUDE_ARCH_NIOS2_NIOS2_H_
#define ZEPHYR_INCLUDE_ARCH_NIOS2_NIOS2_H_
/******************************************************************************
* *
* *
* All rights reserved. *
* *
* Permission is hereby granted, free of charge, to any person obtaining a *
* copy of this software and associated documentation files (the "Software"), *
* to deal in the Software without restriction, including without limitation *
* the rights to use, copy, modify, merge, publish, distribute, sublicense, *
* and/or sell copies of the Software, and to permit persons to whom the *
* Software is furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in *
* all copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *
* DEALINGS IN THE SOFTWARE. *
* *
* This agreement shall be governed in all respects by the laws of the State *
* of California and by the laws of the United States of America. *
* *
******************************************************************************/
/*
* This header provides processor specific macros for accessing the Nios2
* control registers.
*/
#ifdef __cplusplus
extern "C"
{
#endif /* __cplusplus */
/*
* Number of available IRQs in internal interrupt controller.
*/
#define NIOS2_NIRQ 32
/* Size in bits of registers */
#define SYSTEM_BUS_WIDTH 32
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/sys/sys_io.h>
/*
* Functions for accessing select Nios II general-purpose registers.
*/
/* ET (Exception Temporary) register */
static inline uint32_t _nios2_read_et(void)
{
uint32_t et;
__asm__("mov %0, et" : "=r" (et));
return et;
}
static inline void _nios2_write_et(uint32_t et)
{
__asm__ volatile("mov et, %z0" : : "rM" (et));
}
static inline uint32_t _nios2_read_sp(void)
{
uint32_t sp;
__asm__("mov %0, sp" : "=r" (sp));
return sp;
}
/*
* Functions for useful processor instructions.
*/
static inline void z_nios2_break(void)
{
__asm__ volatile("break");
}
static inline void _nios2_report_stack_overflow(void)
{
__asm__ volatile("break 3");
}
/*
* Low-level cache management functions
*/
static inline void _nios2_dcache_addr_flush(void *addr)
{
__asm__ volatile ("flushda (%0)" :: "r" (addr));
}
static inline void z_nios2_dcache_flush(uint32_t offset)
{
__asm__ volatile ("flushd (%0)" :: "r" (offset));
}
static inline void z_nios2_icache_flush(uint32_t offset)
{
__asm__ volatile ("flushi %0" :: "r" (offset));
}
static inline void z_nios2_pipeline_flush(void)
{
__asm__ volatile ("flushp");
}
/*
* Functions for reading/writing control registers
*/
enum nios2_creg {
NIOS2_CR_STATUS = 0,
NIOS2_CR_ESTATUS = 1,
NIOS2_CR_BSTATUS = 2,
NIOS2_CR_IENABLE = 3,
NIOS2_CR_IPENDING = 4,
NIOS2_CR_CPUID = 5,
/* 6 is reserved */
NIOS2_CR_EXCEPTION = 7,
NIOS2_CR_PTEADDR = 8,
NIOS2_CR_TLBACC = 9,
NIOS2_CR_TLBMISC = 10,
NIOS2_CR_ECCINJ = 11,
NIOS2_CR_BADADDR = 12,
NIOS2_CR_CONFIG = 13,
NIOS2_CR_MPUBASE = 14,
NIOS2_CR_MPUACC = 15
};
/* XXX I would prefer to define these as static inline functions for
* type checking purposes. However if -O0 is used (i.e. CONFIG_DEBUG is on)
* we get errors "Control register number must be in range 0-31 for
* __builtin_rdctl" with the following code:
*
* static inline uint32_t z_nios2_creg_read(enum nios2_creg reg)
* {
* return __builtin_rdctl(reg);
* }
*
* This compiles just fine with -Os.
*/
#define z_nios2_creg_read(reg) __builtin_rdctl(reg)
#define z_nios2_creg_write(reg, val) __builtin_wrctl(reg, val)
#define z_nios2_get_register_address(base, regnum) \
((void *)(((uint8_t *)base) + ((regnum) * (SYSTEM_BUS_WIDTH / 8))))
static inline void _nios2_reg_write(void *base, int regnum, uint32_t data)
{
sys_write32(data,
(mm_reg_t)z_nios2_get_register_address(base, regnum));
}
static inline uint32_t _nios2_reg_read(void *base, int regnum)
{
return sys_read32((mm_reg_t)z_nios2_get_register_address(base, regnum));
}
#endif /* _ASMLANGUAGE */
/*
* Nios II control registers that are always present
*/
#define NIOS2_STATUS status
#define NIOS2_ESTATUS estatus
#define NIOS2_BSTATUS bstatus
#define NIOS2_IENABLE ienable
#define NIOS2_IPENDING ipending
#define NIOS2_CPUID cpuid
/*
* Bit masks & offsets for Nios II control registers.
* The presence and size of a field is sometimes dependent on the Nios II
* configuration. Bit masks for every possible field and the maximum size of
* that field are defined.
*
* All bit-masks are expressed relative to the position
* of the data with a register. To read data that is LSB-
* aligned, the register read data should be masked, then
* right-shifted by the designated "OFST" macro value. The
* opposite should be used for register writes when starting
* with LSB-aligned data.
*/
/* STATUS, ESTATUS, BSTATUS, and SSTATUS registers */
#define NIOS2_STATUS_PIE_MSK (0x00000001)
#define NIOS2_STATUS_PIE_OFST (0)
#define NIOS2_STATUS_U_MSK (0x00000002)
#define NIOS2_STATUS_U_OFST (1)
#define NIOS2_STATUS_EH_MSK (0x00000004)
#define NIOS2_STATUS_EH_OFST (2)
#define NIOS2_STATUS_IH_MSK (0x00000008)
#define NIOS2_STATUS_IH_OFST (3)
#define NIOS2_STATUS_IL_MSK (0x000003f0)
#define NIOS2_STATUS_IL_OFST (4)
#define NIOS2_STATUS_CRS_MSK (0x0000fc00)
#define NIOS2_STATUS_CRS_OFST (10)
#define NIOS2_STATUS_PRS_MSK (0x003f0000)
#define NIOS2_STATUS_PRS_OFST (16)
#define NIOS2_STATUS_NMI_MSK (0x00400000)
#define NIOS2_STATUS_NMI_OFST (22)
#define NIOS2_STATUS_RSIE_MSK (0x00800000)
#define NIOS2_STATUS_RSIE_OFST (23)
#define NIOS2_STATUS_SRS_MSK (0x80000000)
#define NIOS2_STATUS_SRS_OFST (31)
/* EXCEPTION register */
#define NIOS2_EXCEPTION_REG_CAUSE_MASK (0x0000007c)
#define NIOS2_EXCEPTION_REG_CAUSE_OFST (2)
#define NIOS2_EXCEPTION_REG_ECCFTL_MASK (0x80000000)
#define NIOS2_EXCEPTION_REG_ECCFTL_OFST (31)
/* PTEADDR (Page Table Entry Address) register */
#define NIOS2_PTEADDR_REG_VPN_OFST 2
#define NIOS2_PTEADDR_REG_VPN_MASK 0x3ffffc
#define NIOS2_PTEADDR_REG_PTBASE_OFST 22
#define NIOS2_PTEADDR_REG_PTBASE_MASK 0xffc00000
/* TLBACC (TLB Access) register */
#define NIOS2_TLBACC_REG_PFN_OFST 0
#define NIOS2_TLBACC_REG_PFN_MASK 0xfffff
#define NIOS2_TLBACC_REG_G_OFST 20
#define NIOS2_TLBACC_REG_G_MASK 0x100000
#define NIOS2_TLBACC_REG_X_OFST 21
#define NIOS2_TLBACC_REG_X_MASK 0x200000
#define NIOS2_TLBACC_REG_W_OFST 22
#define NIOS2_TLBACC_REG_W_MASK 0x400000
#define NIOS2_TLBACC_REG_R_OFST 23
#define NIOS2_TLBACC_REG_R_MASK 0x800000
#define NIOS2_TLBACC_REG_C_OFST 24
#define NIOS2_TLBACC_REG_C_MASK 0x1000000
#define NIOS2_TLBACC_REG_IG_OFST 25
#define NIOS2_TLBACC_REG_IG_MASK 0xfe000000
/* TLBMISC (TLB Miscellaneous) register */
#define NIOS2_TLBMISC_REG_D_OFST 0
#define NIOS2_TLBMISC_REG_D_MASK 0x1
#define NIOS2_TLBMISC_REG_PERM_OFST 1
#define NIOS2_TLBMISC_REG_PERM_MASK 0x2
#define NIOS2_TLBMISC_REG_BAD_OFST 2
#define NIOS2_TLBMISC_REG_BAD_MASK 0x4
#define NIOS2_TLBMISC_REG_DBL_OFST 3
#define NIOS2_TLBMISC_REG_DBL_MASK 0x8
#define NIOS2_TLBMISC_REG_PID_OFST 4
#define NIOS2_TLBMISC_REG_PID_MASK 0x3fff0
#define NIOS2_TLBMISC_REG_WE_OFST 18
#define NIOS2_TLBMISC_REG_WE_MASK 0x40000
#define NIOS2_TLBMISC_REG_RD_OFST 19
#define NIOS2_TLBMISC_REG_RD_MASK 0x80000
#define NIOS2_TLBMISC_REG_WAY_OFST 20
#define NIOS2_TLBMISC_REG_WAY_MASK 0xf00000
#define NIOS2_TLBMISC_REG_EE_OFST 24
#define NIOS2_TLBMISC_REG_EE_MASK 0x1000000
/* ECCINJ (ECC Inject) register */
#define NIOS2_ECCINJ_REG_RF_OFST 0
#define NIOS2_ECCINJ_REG_RF_MASK 0x3
#define NIOS2_ECCINJ_REG_ICTAG_OFST 2
#define NIOS2_ECCINJ_REG_ICTAG_MASK 0xc
#define NIOS2_ECCINJ_REG_ICDAT_OFST 4
#define NIOS2_ECCINJ_REG_ICDAT_MASK 0x30
#define NIOS2_ECCINJ_REG_DCTAG_OFST 6
#define NIOS2_ECCINJ_REG_DCTAG_MASK 0xc0
#define NIOS2_ECCINJ_REG_DCDAT_OFST 8
#define NIOS2_ECCINJ_REG_DCDAT_MASK 0x300
#define NIOS2_ECCINJ_REG_TLB_OFST 10
#define NIOS2_ECCINJ_REG_TLB_MASK 0xc00
#define NIOS2_ECCINJ_REG_DTCM0_OFST 12
#define NIOS2_ECCINJ_REG_DTCM0_MASK 0x3000
#define NIOS2_ECCINJ_REG_DTCM1_OFST 14
#define NIOS2_ECCINJ_REG_DTCM1_MASK 0xc000
#define NIOS2_ECCINJ_REG_DTCM2_OFST 16
#define NIOS2_ECCINJ_REG_DTCM2_MASK 0x30000
#define NIOS2_ECCINJ_REG_DTCM3_OFST 18
#define NIOS2_ECCINJ_REG_DTCM3_MASK 0xc0000
/* CONFIG register */
#define NIOS2_CONFIG_REG_PE_MASK (0x00000001)
#define NIOS2_CONFIG_REG_PE_OFST (0)
#define NIOS2_CONFIG_REG_ANI_MASK (0x00000002)
#define NIOS2_CONFIG_REG_ANI_OFST (1)
#define NIOS2_CONFIG_REG_ECCEN_MASK (0x00000004)
#define NIOS2_CONFIG_REG_ECCEN_OFST (2)
#define NIOS2_CONFIG_REG_ECCEXC_MASK (0x00000008)
#define NIOS2_CONFIG_REG_ECCEXC_OFST (3)
/* MPUBASE (MPU Base Address) Register */
#define NIOS2_MPUBASE_D_MASK (0x00000001)
#define NIOS2_MPUBASE_D_OFST (0)
#define NIOS2_MPUBASE_INDEX_MASK (0x0000003e)
#define NIOS2_MPUBASE_INDEX_OFST (1)
#define NIOS2_MPUBASE_BASE_ADDR_MASK (0xffffffc0)
#define NIOS2_MPUBASE_BASE_ADDR_OFST (6)
/* MPUACC (MPU Access) Register */
#define NIOS2_MPUACC_LIMIT_MASK (0xffffffc0)
#define NIOS2_MPUACC_LIMIT_OFST (6)
#define NIOS2_MPUACC_MASK_MASK (0xffffffc0)
#define NIOS2_MPUACC_MASK_OFST (6)
#define NIOS2_MPUACC_C_MASK (0x00000020)
#define NIOS2_MPUACC_C_OFST (5)
#define NIOS2_MPUACC_PERM_MASK (0x0000001c)
#define NIOS2_MPUACC_PERM_OFST (2)
#define NIOS2_MPUACC_RD_MASK (0x00000002)
#define NIOS2_MPUACC_RD_OFST (1)
#define NIOS2_MPUACC_WR_MASK (0x00000001)
#define NIOS2_MPUACC_WR_OFST (0)
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* ZEPHYR_INCLUDE_ARCH_NIOS2_NIOS2_H_ */
``` | /content/code_sandbox/include/zephyr/arch/nios2/nios2.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,226 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_NIOS2_ASM_INLINE_H_
#define ZEPHYR_INCLUDE_ARCH_NIOS2_ASM_INLINE_H_
/*
* The file must not be included directly
* Include kernel.h instead
*/
#if defined(__GNUC__)
#include <zephyr/arch/nios2/asm_inline_gcc.h>
#else
#include <arch/nios2/asm_inline_other.h>
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_NIOS2_ASM_INLINE_H_ */
``` | /content/code_sandbox/include/zephyr/arch/nios2/asm_inline.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 102 |
```objective-c
/*
*
*/
/**
* @file
* @brief Per-arch thread definition
*
* This file contains definitions for
*
* struct _thread_arch
* struct _callee_saved
*
* necessary to instantiate instances of struct k_thread.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_NIOS2_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_NIOS2_THREAD_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
struct _callee_saved {
/* General purpose callee-saved registers */
uint32_t r16;
uint32_t r17;
uint32_t r18;
uint32_t r19;
uint32_t r20;
uint32_t r21;
uint32_t r22;
uint32_t r23;
/* Normally used for the frame pointer but also a general purpose
* register if frame pointers omitted
*/
uint32_t r28;
/* Return address */
uint32_t ra;
/* Stack pointer */
uint32_t sp;
/* IRQ status before irq_lock() and call to z_swap() */
uint32_t key;
/* Return value of z_swap() */
uint32_t retval;
};
typedef struct _callee_saved _callee_saved_t;
struct _thread_arch {
/* nothing for now */
};
typedef struct _thread_arch _thread_arch_t;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_NIOS2_THREAD_H_ */
``` | /content/code_sandbox/include/zephyr/arch/nios2/thread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 294 |
```linker script
/*
*
*/
/* Set initial alignment to the 32 byte minimum for all MPUs */
_app_data_align = 32;
. = ALIGN(32);
``` | /content/code_sandbox/include/zephyr/arch/common/app_data_alignment.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 32 |
```objective-c
/*
*
*/
/* Memory bits manipulation functions in non-arch-specific C code */
#ifndef ZEPHYR_INCLUDE_ARCH_COMMON_SYS_BITOPS_H_
#define ZEPHYR_INCLUDE_ARCH_COMMON_SYS_BITOPS_H_
#ifndef _ASMLANGUAGE
#include <zephyr/toolchain.h>
#include <zephyr/types.h>
#include <zephyr/sys/sys_io.h>
#ifdef __cplusplus
extern "C" {
#endif
static ALWAYS_INLINE void sys_set_bit(mem_addr_t addr, unsigned int bit)
{
uint32_t temp = *(volatile uint32_t *)addr;
*(volatile uint32_t *)addr = temp | (1 << bit);
}
static ALWAYS_INLINE void sys_clear_bit(mem_addr_t addr, unsigned int bit)
{
uint32_t temp = *(volatile uint32_t *)addr;
*(volatile uint32_t *)addr = temp & ~(1 << bit);
}
static ALWAYS_INLINE int sys_test_bit(mem_addr_t addr, unsigned int bit)
{
uint32_t temp = *(volatile uint32_t *)addr;
return temp & (1 << bit);
}
static ALWAYS_INLINE void sys_set_bits(mem_addr_t addr, unsigned int mask)
{
uint32_t temp = *(volatile uint32_t *)addr;
*(volatile uint32_t *)addr = temp | mask;
}
static ALWAYS_INLINE void sys_clear_bits(mem_addr_t addr, unsigned int mask)
{
uint32_t temp = *(volatile uint32_t *)addr;
*(volatile uint32_t *)addr = temp & ~mask;
}
static ALWAYS_INLINE
void sys_bitfield_set_bit(mem_addr_t addr, unsigned int bit)
{
/* Doing memory offsets in terms of 32-bit values to prevent
* alignment issues
*/
sys_set_bit(addr + ((bit >> 5) << 2), bit & 0x1F);
}
static ALWAYS_INLINE
void sys_bitfield_clear_bit(mem_addr_t addr, unsigned int bit)
{
sys_clear_bit(addr + ((bit >> 5) << 2), bit & 0x1F);
}
static ALWAYS_INLINE
int sys_bitfield_test_bit(mem_addr_t addr, unsigned int bit)
{
return sys_test_bit(addr + ((bit >> 5) << 2), bit & 0x1F);
}
static ALWAYS_INLINE
int sys_test_and_set_bit(mem_addr_t addr, unsigned int bit)
{
int ret;
ret = sys_test_bit(addr, bit);
sys_set_bit(addr, bit);
return ret;
}
static ALWAYS_INLINE
int sys_test_and_clear_bit(mem_addr_t addr, unsigned int bit)
{
int ret;
ret = sys_test_bit(addr, bit);
sys_clear_bit(addr, bit);
return ret;
}
static ALWAYS_INLINE
int sys_bitfield_test_and_set_bit(mem_addr_t addr, unsigned int bit)
{
int ret;
ret = sys_bitfield_test_bit(addr, bit);
sys_bitfield_set_bit(addr, bit);
return ret;
}
static ALWAYS_INLINE
int sys_bitfield_test_and_clear_bit(mem_addr_t addr, unsigned int bit)
{
int ret;
ret = sys_bitfield_test_bit(addr, bit);
sys_bitfield_clear_bit(addr, bit);
return ret;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_COMMON_SYS_BITOPS_H_ */
``` | /content/code_sandbox/include/zephyr/arch/common/sys_bitops.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 677 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_COMMON_FFS_H_
#define ZEPHYR_INCLUDE_ARCH_COMMON_FFS_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
*
* @brief find most significant bit set in a 32-bit word
*
* This routine finds the first bit set starting from the most significant bit
* in the argument passed in and returns the index of that bit. Bits are
* numbered starting at 1 from the least significant bit. A return value of
* zero indicates that the value passed is zero.
*
* @return most significant bit set, 0 if @a op is 0
*/
static ALWAYS_INLINE unsigned int find_msb_set(uint32_t op)
{
if (op == 0) {
return 0;
}
return 32 - __builtin_clz(op);
}
/**
*
* @brief find least significant bit set in a 32-bit word
*
* This routine finds the first bit set starting from the least significant bit
* in the argument passed in and returns the index of that bit. Bits are
* numbered starting at 1 from the least significant bit. A return value of
* zero indicates that the value passed is zero.
*
* @return least significant bit set, 0 if @a op is 0
*/
static ALWAYS_INLINE unsigned int find_lsb_set(uint32_t op)
{
#ifdef CONFIG_TOOLCHAIN_HAS_BUILTIN_FFS
return __builtin_ffs(op);
#else
/*
* Toolchain does not have __builtin_ffs().
* Need to do this manually.
*/
int bit;
if (op == 0) {
return 0;
}
for (bit = 0; bit < 32; bit++) {
if ((op & (1 << bit)) != 0) {
return (bit + 1);
}
}
/*
* This should never happen but we need to keep
* compiler happy.
*/
return 0;
#endif /* CONFIG_TOOLCHAIN_HAS_BUILTIN_FFS */
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_COMMON_FFS_H_ */
``` | /content/code_sandbox/include/zephyr/arch/common/ffs.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 476 |
```linker script
/*
*
*/
/**
* @file
* @brief Linker command/script file
*
* Linker script for the Nios II platform
*/
#include <zephyr/linker/sections.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/linker/linker-tool.h>
/* These sections are specific to this CPU */
#define _EXCEPTION_SECTION_NAME exceptions
#define _RESET_SECTION_NAME reset
/* This linker script requires the following macros to be defined in the
* SOC-specific linker script. All of these values can be found defined
* in system.h for CPU configurations that can generate a HAL.
*
* _RESET_VECTOR CPU entry point at boot
* _EXC_VECTOR General exception vector
* _ROM_ADDR Beginning of flash memory
* _ROM_SIZE Size in bytes of flash memory
* _RAM_ADDR Beginning of RAM
* _RAM_SIZE Size of RAM in bytes
*
* For now we support two scenarios:
*
* 1. Non-XIP systems where the reset vector is at the beginning of RAM
* with the exception vector 0x20 bytes after it.
* 2. XIP systems where the reset vector is at the beginning of ROM and
* the exception vector is in RAM
*/
#if defined(CONFIG_ROM_END_OFFSET)
#define ROM_END_OFFSET CONFIG_ROM_END_OFFSET
#else
#define ROM_END_OFFSET 0
#endif
#ifdef CONFIG_XIP
#define ROMABLE_REGION FLASH
#else
#define ROMABLE_REGION RAM
#endif
#define RAMABLE_REGION RAM
#ifdef CONFIG_XIP
ASSERT(_RESET_VECTOR == _ROM_ADDR, "Reset vector not at beginning of ROM!")
MEMORY
{
RESET (rx) : ORIGIN = _RESET_VECTOR, LENGTH = 0x20
FLASH (rx) : ORIGIN = _RESET_VECTOR + 0x20 , LENGTH = (_ROM_SIZE - 0x20 - ROM_END_OFFSET)
RAM (wx) : ORIGIN = _EXC_VECTOR, LENGTH = _RAM_SIZE - (_EXC_VECTOR - _RAM_ADDR)
/* Used by and documented in include/linker/intlist.ld */
IDT_LIST (wx) : ORIGIN = 0xFFFFF7FF, LENGTH = 2K
}
#else
MEMORY
{
RESET (wx) : ORIGIN = _RESET_VECTOR, LENGTH = 0x20
RAM (wx) : ORIGIN = _EXC_VECTOR, LENGTH = _RAM_SIZE - (_EXC_VECTOR - _RAM_ADDR)
/* Used by and documented in include/linker/intlist.ld */
IDT_LIST (wx) : ORIGIN = 0xFFFFF7FF, LENGTH = 2K
}
#endif
ENTRY(CONFIG_KERNEL_ENTRY)
SECTIONS
{
#include <zephyr/linker/rel-sections.ld>
#ifdef CONFIG_LLEXT
#include <zephyr/linker/llext-sections.ld>
#endif
/*
* .plt and .iplt are here according to
* 'nios2-zephyr-elf-ld --verbose', before text section.
*/
SECTION_PROLOGUE(.plt,,)
{
*(.plt)
}
SECTION_PROLOGUE(.iplt,,)
{
*(.iplt)
}
GROUP_START(ROMABLE_REGION)
__rom_region_start = _ROM_ADDR;
SECTION_PROLOGUE(_RESET_SECTION_NAME,,)
{
KEEP(*(.reset.*))
} GROUP_LINK_IN(RESET)
#ifndef CONFIG_XIP
SECTION_PROLOGUE(_EXCEPTION_SECTION_NAME,,)
{
KEEP(*(".exception.entry.*"))
*(".exception.other.*")
} GROUP_LINK_IN(ROMABLE_REGION)
#endif
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
{
/* XXX If ALT_CPU_RESET_ADDR is not the same as _ROM_ADDR
* we are going to waste flash space? */
. = ALT_CPU_RESET_ADDR;
__text_region_start = .;
*(.text)
*(".text.*")
*(.gnu.linkonce.t.*)
} GROUP_LINK_IN(ROMABLE_REGION)
__text_region_end = .;
#if defined(CONFIG_GP_ALL_DATA)
_gp = ABSOLUTE(. + 0x8000);
PROVIDE(gp = _gp);
#endif
__rodata_region_start = .;
#include <zephyr/linker/common-rom.ld>
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_SECTIONS ...). Useful for grouping iterable RO structs.
*/
#include <snippets-rom-sections.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{
. = ALIGN(4);
*(.rodata)
*(".rodata.*")
*(.gnu.linkonce.r.*)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rodata.ld>
. = ALIGN(4);
} GROUP_LINK_IN(ROMABLE_REGION)
#include <zephyr/linker/cplusplus-rom.ld>
__rodata_region_end = .;
__rodata_region_size = __rodata_region_end - __rodata_region_start;
__rom_region_end = .;
__data_region_load_start = ALIGN(4); /* XIP imaged DATA ROM start addr */
GROUP_END(ROMABLE_REGION)
GROUP_START(RAMABLE_REGION)
#ifdef CONFIG_XIP
/* Altera strongly recommends keeping exception entry code in RAM
* even on XIP systems
*
* This is code not data, but we need this copied just like XIP data
*/
SECTION_DATA_PROLOGUE(_EXCEPTION_SECTION_NAME,,)
{
_image_ram_start = .;
__data_region_start = .;
KEEP(*(".exception.entry.*"))
*(".exception.other.*")
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif
#ifndef CONFIG_XIP
_image_ram_start = .;
#endif
#include <zephyr/linker/common-ram.ld>
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{
__data_start = .;
*(.data)
*(".data.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rwdata.ld>
/* the Nios2 architecture only has 16-bit signed immediate offsets in
* the instructions, so accessing a general address requires typically
* three instructions - basically, two for the two halves of the 32-bit
* address, and one to merge them - but if we can put the most commonly
* accessed globals in a special 64K span of memory addressed by the GP
* register, then we can access those values in a single instruction,
* saving both codespace and runtime.
*
* Since these immediate offsets are signed, place gp 0x8000 past the
* beginning of .sdata so that we can use both positive and negative
* offsets.
*/
#if defined(CONFIG_GP_LOCAL) || defined(CONFIG_GP_GLOBAL)
_gp = ABSOLUTE(. + 0x8000);
PROVIDE(gp = _gp);
#endif
*(.sdata .sdata.* .gnu.linkonce.s.*)
*(.sdata2 .sdata2.* .gnu.linkonce.s2.*)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
__data_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__data_size = __data_end - __data_start;
__data_load_start = LOADADDR(_DATA_SECTION_NAME);
#include <zephyr/linker/cplusplus-ram.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-data-sections.ld>
__data_region_end = .;
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),)
{
/*
* For performance, BSS section is assumed to be 4 byte aligned and
* a multiple of 4 bytes
*/
. = ALIGN(4);
__bss_start = .;
*(.sbss)
*(".sbss.*")
*(.bss)
*(".bss.*")
COMMON_SYMBOLS
/*
* As memory is cleared in words only, it is simpler to ensure the BSS
* section ends on a 4 byte boundary. This wastes a maximum of 3 bytes.
*/
__bss_end = ALIGN(4);
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
#include <zephyr/linker/common-noinit.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-sections.ld>
#include <zephyr/linker/ram-end.ld>
GROUP_END(RAMABLE_REGION)
#include <zephyr/linker/debug-sections.ld>
}
``` | /content/code_sandbox/include/zephyr/arch/nios2/linker.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,972 |
```objective-c
/*
*/
/**
* @file
*
* @brief public S2RAM APIs.
* @defgroup pm_s2ram S2RAM APIs
* @ingroup subsys_pm
* @{
*/
#ifndef ZEPHYR_INCLUDE_ARCH_COMMON_PM_S2RAM_H_
#define ZEPHYR_INCLUDE_ARCH_COMMON_PM_S2RAM_H_
#ifdef _ASMLANGUAGE
GTEXT(arch_pm_s2ram_suspend);
#else
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief System off function
*
* This function is passed as argument and called by @ref arch_pm_s2ram_suspend
* to power the system off after the CPU context has been saved.
*
* This function never returns if the system is powered off. If the operation
* cannot be performed a proper value is returned and the code must take care
* of restoring the system in a fully operational state before returning.
*
* @retval none The system is powered off.
* @retval -EBUSY The system is busy and cannot be powered off at this time.
* @retval -errno Other error codes.
*/
typedef int (*pm_s2ram_system_off_fn_t)(void);
/**
* @brief Save CPU context on suspend
*
* This function is used on suspend-to-RAM (S2RAM) to save the CPU context in
* (retained) RAM before powering the system off using the provided function.
* This function is usually called from the PM subsystem / hooks.
*
* The CPU context is usually the minimum set of CPU registers which content
* must be restored on resume to let the platform resume its execution from the
* point it left at the time of suspension.
*
* @param system_off Function to power off the system.
*
* @retval 0 The CPU context was successfully saved and restored.
* @retval -EBUSY The system is busy and cannot be suspended at this time.
* @retval -errno Negative errno code in case of failure.
*/
int arch_pm_s2ram_suspend(pm_s2ram_system_off_fn_t system_off);
/**
* @brief Mark that core is entering suspend-to-RAM state.
*
* Function is called when system state is stored to RAM, just before going to system
* off.
*
* Default implementation is setting a magic word in RAM. CONFIG_PM_S2RAM_CUSTOM_MARKING
* allows custom implementation.
*/
void pm_s2ram_mark_set(void);
/**
* @brief Check suspend-to-RAM marking and clear its state.
*
* Function is used to determine if resuming after suspend-to-RAM shall be performed
* or standard boot code shall be executed.
*
* Default implementation is checking a magic word in RAM. CONFIG_PM_S2RAM_CUSTOM_MARKING
* allows custom implementation.
*
* @retval true if marking is found which indicates resuming after suspend-to-RAM.
* @retval false if marking is not found which indicates standard boot.
*/
bool pm_s2ram_mark_check_and_clear(void);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_COMMON_PM_S2RAM_H_ */
``` | /content/code_sandbox/include/zephyr/arch/common/pm_s2ram.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 649 |
```objective-c
/* x86 address types (virtual, physical, etc) definitions */
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_ADDR_TYPES_H_
#define ZEPHYR_INCLUDE_ARCH_X86_ADDR_TYPES_H_
#ifndef _ASMLANGUAGE
typedef uintptr_t paddr_t;
typedef void *vaddr_t;
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_X86_ADDR_TYPES_H_ */
``` | /content/code_sandbox/include/zephyr/arch/common/addr_types.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 81 |
```objective-c
/*
* Organisation (CSIRO) ABN 41 687 119 230.
*
*
* Based on the ARM semihosting API from:
* path_to_url
*
* RISC-V semihosting also follows these conventions:
* path_to_url
*/
/**
* @file
*
* @brief public Semihosting APIs based on ARM definitions.
* @defgroup semihost Semihosting APIs
* @ingroup os_services
* @{
*/
#ifndef ZEPHYR_INCLUDE_ARCH_COMMON_SEMIHOST_H_
#define ZEPHYR_INCLUDE_ARCH_COMMON_SEMIHOST_H_
/** @brief Semihosting instructions */
enum semihost_instr {
/*
* File I/O operations
*/
/** Open a file or stream on the host system. */
SEMIHOST_OPEN = 0x01,
/** Check whether a file is associated with a stream/terminal */
SEMIHOST_ISTTY = 0x09,
/** Write to a file or stream. */
SEMIHOST_WRITE = 0x05,
/** Read from a file at the current cursor position. */
SEMIHOST_READ = 0x06,
/** Closes a file on the host which has been opened by SEMIHOST_OPEN. */
SEMIHOST_CLOSE = 0x02,
/** Get the length of a file. */
SEMIHOST_FLEN = 0x0C,
/** Set the file cursor to a given position in a file. */
SEMIHOST_SEEK = 0x0A,
/** Get a temporary absolute file path to create a temporary file. */
SEMIHOST_TMPNAM = 0x0D,
/** Remove a file on the host system. Possibly insecure! */
SEMIHOST_REMOVE = 0x0E,
/** Rename a file on the host system. Possibly insecure! */
SEMIHOST_RENAME = 0x0F,
/*
* Terminal I/O operations
*/
/** Write one character to the debug terminal. */
SEMIHOST_WRITEC = 0x03,
/** Write a NULL terminated string to the debug terminal. */
SEMIHOST_WRITE0 = 0x04,
/** Read one character from the debug terminal. */
SEMIHOST_READC = 0x07,
/*
* Time operations
*/
SEMIHOST_CLOCK = 0x10,
SEMIHOST_ELAPSED = 0x30,
SEMIHOST_TICKFREQ = 0x31,
SEMIHOST_TIME = 0x11,
/*
* System/Misc. operations
*/
/** Retrieve the errno variable from semihosting operations. */
SEMIHOST_ERRNO = 0x13,
/** Get commandline parameters for the application to run with */
SEMIHOST_GET_CMDLINE = 0x15,
SEMIHOST_HEAPINFO = 0x16,
SEMIHOST_ISERROR = 0x08,
SEMIHOST_SYSTEM = 0x12
};
/**
* @brief Modes to open a file with
*
* Behaviour corresponds to equivalent fopen strings.
* i.e. SEMIHOST_OPEN_RB_PLUS == "rb+"
*/
enum semihost_open_mode {
SEMIHOST_OPEN_R = 0,
SEMIHOST_OPEN_RB = 1,
SEMIHOST_OPEN_R_PLUS = 2,
SEMIHOST_OPEN_RB_PLUS = 3,
SEMIHOST_OPEN_W = 4,
SEMIHOST_OPEN_WB = 5,
SEMIHOST_OPEN_W_PLUS = 6,
SEMIHOST_OPEN_WB_PLUS = 7,
SEMIHOST_OPEN_A = 8,
SEMIHOST_OPEN_AB = 9,
SEMIHOST_OPEN_A_PLUS = 10,
SEMIHOST_OPEN_AB_PLUS = 11,
};
/**
* @brief Manually execute a semihosting instruction
*
* @param instr instruction code to run
* @param args instruction specific arguments
*
* @return integer return code of instruction
*/
long semihost_exec(enum semihost_instr instr, void *args);
/**
* @brief Read a byte from the console
*
* @return char byte read from the console.
*/
char semihost_poll_in(void);
/**
* @brief Write a byte to the console
*
* @param c byte to write to console
*/
void semihost_poll_out(char c);
/**
* @brief Open a file on the host system
*
* @param path file path to open. Can be absolute or relative to current
* directory of the running process.
* @param mode value from @ref semihost_open_mode.
*
* @retval handle positive handle on success.
* @retval -1 on failure.
*/
long semihost_open(const char *path, long mode);
/**
* @brief Close a file
*
* @param fd handle returned by @ref semihost_open.
*
* @retval 0 on success.
* @retval -1 on failure.
*/
long semihost_close(long fd);
/**
* @brief Query the size of a file
*
* @param fd handle returned by @ref semihost_open.
*
* @retval positive file size on success.
* @retval -1 on failure.
*/
long semihost_flen(long fd);
/**
* @brief Seeks to an absolute position in a file.
*
* @param fd handle returned by @ref semihost_open.
* @param offset offset from the start of the file in bytes.
*
* @retval 0 on success.
* @retval -errno negative error code on failure.
*/
long semihost_seek(long fd, long offset);
/**
* @brief Read the contents of a file into a buffer.
*
* @param fd handle returned by @ref semihost_open.
* @param buf buffer to read data into.
* @param len number of bytes to read.
*
* @retval read number of bytes read on success.
* @retval -errno negative error code on failure.
*/
long semihost_read(long fd, void *buf, long len);
/**
* @brief Write the contents of a buffer into a file.
*
* @param fd handle returned by @ref semihost_open.
* @param buf buffer to write data from.
* @param len number of bytes to write.
*
* @retval 0 on success.
* @retval -errno negative error code on failure.
*/
long semihost_write(long fd, const void *buf, long len);
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_ARCH_COMMON_SEMIHOST_H_ */
``` | /content/code_sandbox/include/zephyr/arch/common/semihost.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,405 |
```objective-c
/*
*
*/
/* Memory mapped registers I/O functions in non-arch-specific C code */
#ifndef ZEPHYR_INCLUDE_ARCH_COMMON_SYS_IO_H_
#define ZEPHYR_INCLUDE_ARCH_COMMON_SYS_IO_H_
#ifndef _ASMLANGUAGE
#include <zephyr/toolchain.h>
#include <zephyr/types.h>
#include <zephyr/sys/sys_io.h>
#ifdef __cplusplus
extern "C" {
#endif
static ALWAYS_INLINE uint8_t sys_read8(mem_addr_t addr)
{
return *(volatile uint8_t *)addr;
}
static ALWAYS_INLINE void sys_write8(uint8_t data, mem_addr_t addr)
{
*(volatile uint8_t *)addr = data;
}
static ALWAYS_INLINE uint16_t sys_read16(mem_addr_t addr)
{
return *(volatile uint16_t *)addr;
}
static ALWAYS_INLINE void sys_write16(uint16_t data, mem_addr_t addr)
{
*(volatile uint16_t *)addr = data;
}
static ALWAYS_INLINE uint32_t sys_read32(mem_addr_t addr)
{
return *(volatile uint32_t *)addr;
}
static ALWAYS_INLINE void sys_write32(uint32_t data, mem_addr_t addr)
{
*(volatile uint32_t *)addr = data;
}
static ALWAYS_INLINE uint64_t sys_read64(mem_addr_t addr)
{
return *(volatile uint64_t *)addr;
}
static ALWAYS_INLINE void sys_write64(uint64_t data, mem_addr_t addr)
{
*(volatile uint64_t *)addr = data;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_COMMON_SYS_IO_H_ */
``` | /content/code_sandbox/include/zephyr/arch/common/sys_io.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 335 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_POSIX_EXPCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_POSIX_EXPCEPTION_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct arch_esf {
uint32_t dummy; /*maybe we will want to add something someday*/
};
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_POSIX_EXPCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/posix/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 105 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_EXC_HANDLE_H_
#define ZEPHYR_INCLUDE_EXC_HANDLE_H_
/*
* This is used by some architectures to define code ranges which may
* perform operations that could generate a CPU exception that should not
* be fatal. Instead, the exception should return but set the program
* counter to a 'fixup' memory address which will gracefully error out.
*
* For example, in the case where user mode passes in a C string via
* system call, the length of that string needs to be measured. A specially
* written assembly language version of strlen (arch_user_string_len)
* defines start and end symbols where the memory in the string is examined;
* if this generates a fault, jumping to the fixup symbol within the same
* function will return an error result to the caller.
*
* To ensure precise control of the state of registers and the stack pointer,
* these functions need to be written in assembly.
*
* The arch-specific fault handling code will define an array of these
* z_exc_handle structures and return from the exception with the PC updated
* to the fixup address if a match is found.
*/
struct z_exc_handle {
void *start;
void *end;
void *fixup;
};
#define Z_EXC_HANDLE(name) \
{ name ## _fault_start, name ## _fault_end, name ## _fixup }
#define Z_EXC_DECLARE(name) \
void name ## _fault_start(void); \
void name ## _fault_end(void); \
void name ## _fixup(void)
#endif /* ZEPHYR_INCLUDE_EXC_HANDLE_H_ */
``` | /content/code_sandbox/include/zephyr/arch/common/exc_handle.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 341 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_POSIX_POSIX_TRACE_H_
#define ZEPHYR_INCLUDE_ARCH_POSIX_POSIX_TRACE_H_
#include <stdarg.h>
#ifdef __cplusplus
extern "C" {
#endif
void posix_vprint_error_and_exit(const char *format, va_list vargs);
void posix_vprint_warning(const char *format, va_list vargs);
void posix_vprint_trace(const char *format, va_list vargs);
void posix_print_error_and_exit(const char *format, ...);
void posix_print_warning(const char *format, ...);
void posix_print_trace(const char *format, ...);
/*
* Return 1 if traces to <output> will go to a tty.
* When printing to a terminal we may use ASCII escapes for color or other
* niceties.
* But when redirecting to files, or piping to other commands, those should be
* disabled by default.
*
* Where the <output> should be set to 0 to query about posix_print_trace output
* (typically STDOUT)
* and 1 to query about the warning and error output (posix_print_error/warning)
* outputs (typically STDERR)
*/
int posix_trace_over_tty(int output);
#ifdef __cplusplus
}
#endif
#endif
``` | /content/code_sandbox/include/zephyr/arch/posix/posix_trace.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 260 |
```linker script
/*
*
*/
#define NATIVE_INIT_LEVEL(level) \
__native_##level##_tasks_start = .; \
KEEP(*(SORT(.native_##level[0-9]_task))); \
KEEP(*(SORT(.native_##level[1-9][0-9]_task))); \
KEEP(*(SORT(.native_##level[1-9][0-9][0-9]_task))); \
SECTION_PROLOGUE (native_pre_tasks,,)
{
__native_tasks_start = .;
NATIVE_INIT_LEVEL(PRE_BOOT_1)
NATIVE_INIT_LEVEL(PRE_BOOT_2)
NATIVE_INIT_LEVEL(PRE_BOOT_3)
NATIVE_INIT_LEVEL(FIRST_SLEEP)
NATIVE_INIT_LEVEL(ON_EXIT)
__native_tasks_end = .;
}
``` | /content/code_sandbox/include/zephyr/arch/posix/native_tasks.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 175 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_POSIX_ARCH_INLINES_H
#define ZEPHYR_INCLUDE_ARCH_POSIX_ARCH_INLINES_H
#include <zephyr/kernel_structs.h>
static ALWAYS_INLINE unsigned int arch_num_cpus(void)
{
return CONFIG_MP_MAX_NUM_CPUS;
}
#endif /* ZEPHYR_INCLUDE_ARCH_POSIX_ARCH_INLINES_H */
``` | /content/code_sandbox/include/zephyr/arch/posix/arch_inlines.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 80 |
```objective-c
/*
*
*/
/**
* @file
* @brief POSIX arch specific kernel interface header
* This header contains the POSIX arch specific kernel interface.
* It is included by the generic kernel interface header (include/arch/cpu.h)
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_POSIX_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_POSIX_ARCH_H_
/* Add include for DTS generated information */
#include <zephyr/devicetree.h>
#include <zephyr/toolchain.h>
#include <zephyr/irq.h>
#include <zephyr/arch/posix/exception.h>
#include <zephyr/arch/posix/asm_inline.h>
#include <zephyr/arch/posix/thread.h>
#include <board_irq.h> /* Each board must define this */
#include <zephyr/sw_isr_table.h>
#include <zephyr/arch/posix/posix_soc_if.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CONFIG_64BIT
#define ARCH_STACK_PTR_ALIGN 8
#else
#define ARCH_STACK_PTR_ALIGN 4
#endif
extern uint32_t sys_clock_cycle_get_32(void);
static inline uint32_t arch_k_cycle_get_32(void)
{
return sys_clock_cycle_get_32();
}
extern uint64_t sys_clock_cycle_get_64(void);
static inline uint64_t arch_k_cycle_get_64(void)
{
return sys_clock_cycle_get_64();
}
static ALWAYS_INLINE void arch_nop(void)
{
__asm__ volatile("nop");
}
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
return key == false;
}
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
return posix_irq_lock();
}
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
posix_irq_unlock(key);
}
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_POSIX_ARCH_H_ */
``` | /content/code_sandbox/include/zephyr/arch/posix/arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 385 |
```linker script
/*
*
*/
SECTION_PROLOGUE (.native_sim_if,,)
{
KEEP(*(.native_sim_if));
KEEP(*(.native_sim_if.*));
}
``` | /content/code_sandbox/include/zephyr/arch/posix/native_sim_interface.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 33 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_POSIX_POSIX_SOC_IF_H_
#define ZEPHYR_INCLUDE_ARCH_POSIX_POSIX_SOC_IF_H_
/*
* This file lists the functions the POSIX architecture core expects the
* SOC or board will provide
*
* All functions listed here must be provided by the implementation of the SOC
* or all its boards
*/
#include <zephyr/arch/posix/posix_trace.h>
#include "soc_irq.h" /* Must exist and define _ARCH_IRQ/ISR_* macros */
#ifdef __cplusplus
extern "C" {
#endif
void posix_halt_cpu(void);
void posix_atomic_halt_cpu(unsigned int imask);
void posix_irq_enable(unsigned int irq);
void posix_irq_disable(unsigned int irq);
int posix_irq_is_enabled(unsigned int irq);
unsigned int posix_irq_lock(void);
void posix_irq_unlock(unsigned int key);
void posix_irq_full_unlock(void);
int posix_get_current_irq(void);
void posix_sw_set_pending_IRQ(unsigned int IRQn);
void posix_sw_clear_pending_IRQ(unsigned int IRQn);
#ifdef CONFIG_IRQ_OFFLOAD
void posix_irq_offload(void (*routine)(const void *), const void *parameter);
#endif
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_POSIX_POSIX_SOC_IF_H_ */
``` | /content/code_sandbox/include/zephyr/arch/posix/posix_soc_if.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 267 |
```objective-c
/*
*
*/
/*
* POSIX ARCH specific public inline "assembler" functions and macros
*/
/* Either public functions or macros or invoked by public functions */
#ifndef ZEPHYR_INCLUDE_ARCH_POSIX_ASM_INLINE_GCC_H_
#define ZEPHYR_INCLUDE_ARCH_POSIX_ASM_INLINE_GCC_H_
/*
* The file must not be included directly
* Include kernel.h instead
*/
#ifndef _ASMLANGUAGE
#include <zephyr/toolchain/common.h>
#include <zephyr/types.h>
#include <zephyr/arch/common/sys_bitops.h>
#include <zephyr/arch/common/sys_io.h>
#include <zephyr/arch/common/ffs.h>
#include <zephyr/arch/posix/posix_soc_if.h>
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_POSIX_ASM_INLINE_GCC_H_ */
``` | /content/code_sandbox/include/zephyr/arch/posix/asm_inline_gcc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 176 |
```objective-c
/*
*
*/
/**
* @file
* @brief Per-arch thread definition
*
* This file contains definitions for
*
* struct _thread_arch
* struct _callee_saved
*
* necessary to instantiate instances of struct k_thread.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_POSIX_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_POSIX_THREAD_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct _callee_saved {
/* IRQ status before irq_lock() and call to z_swap() */
uint32_t key;
/* Return value of z_swap() */
uint32_t retval;
/* Thread status pointer */
void *thread_status;
};
struct _thread_arch {
/* nothing for now */
int dummy;
};
typedef struct _thread_arch _thread_arch_t;
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_POSIX_THREAD_H_ */
``` | /content/code_sandbox/include/zephyr/arch/posix/thread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 206 |
```objective-c
/* POSIX inline "assembler" functions and macros for public functions */
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_POSIX_ASM_INLINE_H_
#define ZEPHYR_INCLUDE_ARCH_POSIX_ASM_INLINE_H_
/*
* The file must not be included directly
* Include kernel.h instead
*/
#if defined(__GNUC__)
#include <zephyr/arch/posix/asm_inline_gcc.h>
#else
#error "Only a compiler with GNU C extensions is supported for the POSIX arch"
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_POSIX_ASM_INLINE_H_ */
``` | /content/code_sandbox/include/zephyr/arch/posix/asm_inline.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 116 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ARCH_SPARC_EXPCEPTION_H_
#define ZEPHYR_INCLUDE_ARCH_SPARC_EXPCEPTION_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct arch_esf {
uint32_t out[8];
uint32_t global[8];
uint32_t psr;
uint32_t pc;
uint32_t npc;
uint32_t wim;
uint32_t tbr;
uint32_t y;
};
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_SPARC_EXPCEPTION_H_ */
``` | /content/code_sandbox/include/zephyr/arch/sparc/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 140 |
```linker script
/*
*
*/
/**
* @file
* @brief Linker command/script file
*
* Linker script for the POSIX (native) platform
*/
#include <zephyr/linker/sections.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/linker/linker-tool.h>
SECTIONS
{
#ifdef CONFIG_LLEXT
#include <zephyr/linker/llext-sections.ld>
#endif
SECTION_PROLOGUE(rom_start,,)
{
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rom-start.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
#include <zephyr/linker/common-rom.ld>
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_SECTIONS ...). Useful for grouping iterable RO structs.
*/
#include <snippets-rom-sections.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rodata.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rwdata.ld>
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#include <zephyr/linker/common-ram.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
#include <zephyr/arch/posix/native_tasks.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-data-sections.ld>
__data_region_end = .;
SECTION_DATA_PROLOGUE(_NOINIT_SECTION_NAME,,)
{
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-noinit.ld>
} GROUP_LINK_IN(RAMABLE_REGION)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-sections.ld>
} INSERT AFTER .data;
SECTIONS
{
#include <zephyr/arch/posix/native_sim_interface.ld>
} INSERT AFTER .text;
/*
* Note that the INSERT command actually changes the meaning of the -T command
* line switch: The script will now augment the default SECTIONS instead of
* replacing it.
*/
``` | /content/code_sandbox/include/zephyr/arch/posix/linker.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 596 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.