text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```objective-c
/*
*
*/
/**
* @file
* @brief Ethernet Management interface public header
*/
#ifndef ZEPHYR_INCLUDE_NET_ETHERNET_MGMT_H_
#define ZEPHYR_INCLUDE_NET_ETHERNET_MGMT_H_
#include <zephyr/net/ethernet.h>
#include <zephyr/net/net_mgmt.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Ethernet library
* @defgroup ethernet_mgmt Ethernet Library
* @since 1.12
* @version 0.8.0
* @ingroup networking
* @{
*/
/** @cond INTERNAL_HIDDEN */
#define _NET_ETHERNET_LAYER NET_MGMT_LAYER_L2
#define _NET_ETHERNET_CODE 0x208
#define _NET_ETHERNET_BASE (NET_MGMT_IFACE_BIT | \
NET_MGMT_LAYER(_NET_ETHERNET_LAYER) | \
NET_MGMT_LAYER_CODE(_NET_ETHERNET_CODE))
#define _NET_ETHERNET_EVENT (_NET_ETHERNET_BASE | NET_MGMT_EVENT_BIT)
enum net_request_ethernet_cmd {
NET_REQUEST_ETHERNET_CMD_SET_AUTO_NEGOTIATION = 1,
NET_REQUEST_ETHERNET_CMD_SET_LINK,
NET_REQUEST_ETHERNET_CMD_SET_DUPLEX,
NET_REQUEST_ETHERNET_CMD_SET_MAC_ADDRESS,
NET_REQUEST_ETHERNET_CMD_SET_QAV_PARAM,
NET_REQUEST_ETHERNET_CMD_SET_QBV_PARAM,
NET_REQUEST_ETHERNET_CMD_SET_QBU_PARAM,
NET_REQUEST_ETHERNET_CMD_SET_TXTIME_PARAM,
NET_REQUEST_ETHERNET_CMD_SET_PROMISC_MODE,
NET_REQUEST_ETHERNET_CMD_GET_PRIORITY_QUEUES_NUM,
NET_REQUEST_ETHERNET_CMD_GET_QAV_PARAM,
NET_REQUEST_ETHERNET_CMD_GET_PORTS_NUM,
NET_REQUEST_ETHERNET_CMD_GET_QBV_PARAM,
NET_REQUEST_ETHERNET_CMD_GET_QBU_PARAM,
NET_REQUEST_ETHERNET_CMD_GET_TXTIME_PARAM,
NET_REQUEST_ETHERNET_CMD_SET_T1S_PARAM,
NET_REQUEST_ETHERNET_CMD_SET_TXINJECTION_MODE,
NET_REQUEST_ETHERNET_CMD_GET_TXINJECTION_MODE,
NET_REQUEST_ETHERNET_CMD_SET_MAC_FILTER,
};
#define NET_REQUEST_ETHERNET_SET_AUTO_NEGOTIATION \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_SET_AUTO_NEGOTIATION)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_AUTO_NEGOTIATION);
#define NET_REQUEST_ETHERNET_SET_LINK \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_SET_LINK)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_LINK);
#define NET_REQUEST_ETHERNET_SET_DUPLEX \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_SET_DUPLEX)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_DUPLEX);
#define NET_REQUEST_ETHERNET_SET_MAC_ADDRESS \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_SET_MAC_ADDRESS)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_MAC_ADDRESS);
#define NET_REQUEST_ETHERNET_SET_QAV_PARAM \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_SET_QAV_PARAM)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_QAV_PARAM);
#define NET_REQUEST_ETHERNET_GET_PORTS_NUM \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_GET_PORTS_NUM)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_PORTS_NUM);
#define NET_REQUEST_ETHERNET_SET_QBV_PARAM \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_SET_QBV_PARAM)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_QBV_PARAM);
#define NET_REQUEST_ETHERNET_SET_QBU_PARAM \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_SET_QBU_PARAM)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_QBU_PARAM);
#define NET_REQUEST_ETHERNET_SET_TXTIME_PARAM \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_SET_TXTIME_PARAM)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_TXTIME_PARAM);
#define NET_REQUEST_ETHERNET_SET_PROMISC_MODE \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_SET_PROMISC_MODE)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_PROMISC_MODE);
#define NET_REQUEST_ETHERNET_GET_PRIORITY_QUEUES_NUM \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_GET_PRIORITY_QUEUES_NUM)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_PRIORITY_QUEUES_NUM);
#define NET_REQUEST_ETHERNET_GET_QAV_PARAM \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_GET_QAV_PARAM)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_QAV_PARAM);
#define NET_REQUEST_ETHERNET_GET_QBV_PARAM \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_GET_QBV_PARAM)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_QBV_PARAM);
#define NET_REQUEST_ETHERNET_GET_QBU_PARAM \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_GET_QBU_PARAM)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_QBU_PARAM);
#define NET_REQUEST_ETHERNET_GET_TXTIME_PARAM \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_GET_TXTIME_PARAM)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_TXTIME_PARAM);
#define NET_REQUEST_ETHERNET_SET_T1S_PARAM \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_SET_T1S_PARAM)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_T1S_PARAM);
#define NET_REQUEST_ETHERNET_SET_TXINJECTION_MODE \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_SET_TXINJECTION_MODE)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_TXINJECTION_MODE);
#define NET_REQUEST_ETHERNET_GET_TXINJECTION_MODE \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_GET_TXINJECTION_MODE)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_TXINJECTION_MODE);
#define NET_REQUEST_ETHERNET_SET_MAC_FILTER \
(_NET_ETHERNET_BASE | NET_REQUEST_ETHERNET_CMD_SET_MAC_FILTER)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_MAC_FILTER);
struct net_eth_addr;
struct ethernet_qav_param;
struct ethernet_qbv_param;
struct ethernet_qbu_param;
struct ethernet_txtime_param;
struct ethernet_req_params {
union {
bool auto_negotiation;
bool full_duplex;
bool promisc_mode;
bool txinjection_mode;
struct {
bool link_10bt;
bool link_100bt;
bool link_1000bt;
} l;
struct net_eth_addr mac_address;
struct ethernet_qav_param qav_param;
struct ethernet_qbv_param qbv_param;
struct ethernet_qbu_param qbu_param;
struct ethernet_txtime_param txtime_param;
struct ethernet_t1s_param t1s_param;
struct ethernet_filter filter;
int priority_queues_num;
int ports_num;
};
};
enum net_event_ethernet_cmd {
NET_EVENT_ETHERNET_CMD_CARRIER_ON = 1,
NET_EVENT_ETHERNET_CMD_CARRIER_OFF,
NET_EVENT_ETHERNET_CMD_VLAN_TAG_ENABLED,
NET_EVENT_ETHERNET_CMD_VLAN_TAG_DISABLED,
};
#define NET_EVENT_ETHERNET_CARRIER_ON \
(_NET_ETHERNET_EVENT | NET_EVENT_ETHERNET_CMD_CARRIER_ON)
#define NET_EVENT_ETHERNET_CARRIER_OFF \
(_NET_ETHERNET_EVENT | NET_EVENT_ETHERNET_CMD_CARRIER_OFF)
#define NET_EVENT_ETHERNET_VLAN_TAG_ENABLED \
(_NET_ETHERNET_EVENT | NET_EVENT_ETHERNET_CMD_VLAN_TAG_ENABLED)
#define NET_EVENT_ETHERNET_VLAN_TAG_DISABLED \
(_NET_ETHERNET_EVENT | NET_EVENT_ETHERNET_CMD_VLAN_TAG_DISABLED)
struct net_if;
/** @endcond */
/**
* @brief Raise CARRIER_ON event when Ethernet is connected.
*
* @param iface Ethernet network interface.
*/
#if defined(CONFIG_NET_L2_ETHERNET_MGMT)
void ethernet_mgmt_raise_carrier_on_event(struct net_if *iface);
#else
static inline void ethernet_mgmt_raise_carrier_on_event(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif
/**
* @brief Raise CARRIER_OFF event when Ethernet is disconnected.
*
* @param iface Ethernet network interface.
*/
#if defined(CONFIG_NET_L2_ETHERNET_MGMT)
void ethernet_mgmt_raise_carrier_off_event(struct net_if *iface);
#else
static inline void ethernet_mgmt_raise_carrier_off_event(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif
/**
* @brief Raise VLAN_ENABLED event when VLAN is enabled.
*
* @param iface Ethernet network interface.
* @param tag VLAN tag which is enabled.
*/
#if defined(CONFIG_NET_L2_ETHERNET_MGMT)
void ethernet_mgmt_raise_vlan_enabled_event(struct net_if *iface, uint16_t tag);
#else
static inline void ethernet_mgmt_raise_vlan_enabled_event(struct net_if *iface,
uint16_t tag)
{
ARG_UNUSED(iface);
ARG_UNUSED(tag);
}
#endif
/**
* @brief Raise VLAN_DISABLED event when VLAN is disabled.
*
* @param iface Ethernet network interface.
* @param tag VLAN tag which is disabled.
*/
#if defined(CONFIG_NET_L2_ETHERNET_MGMT)
void ethernet_mgmt_raise_vlan_disabled_event(struct net_if *iface,
uint16_t tag);
#else
static inline void ethernet_mgmt_raise_vlan_disabled_event(struct net_if *iface,
uint16_t tag)
{
ARG_UNUSED(iface);
ARG_UNUSED(tag);
}
#endif
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_ETHERNET_MGMT_H_ */
``` | /content/code_sandbox/include/zephyr/net/ethernet_mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,142 |
```objective-c
/** @file
* @brief Network timer with wrap around
*
* Timer that runs longer than about 49 days needs to calculate wraps.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_NET_TIMEOUT_H_
#define ZEPHYR_INCLUDE_NET_NET_TIMEOUT_H_
/**
* @brief Network long timeout primitives and helpers
* @defgroup net_timeout Network long timeout primitives and helpers
* @since 1.14
* @version 0.8.0
* @ingroup networking
* @{
*/
#include <string.h>
#include <stdbool.h>
#include <limits.h>
#include <zephyr/types.h>
#include <zephyr/sys/slist.h>
#ifdef __cplusplus
extern "C" {
#endif
/** @brief Divisor used to support ms resolution timeouts.
*
* Because delays are processed in work queues which are not invoked
* synchronously with clock changes we need to be able to detect timeouts
* after they occur, which requires comparing "deadline" to "now" with enough
* "slop" to handle any observable latency due to "now" advancing past
* "deadline".
*
* The simplest solution is to use the native conversion of the well-defined
* 32-bit unsigned difference to a 32-bit signed difference, which caps the
* maximum delay at INT32_MAX. This is compatible with the standard mechanism
* for detecting completion of deadlines that do not overflow their
* representation.
*/
#define NET_TIMEOUT_MAX_VALUE ((uint32_t)INT32_MAX)
/** Generic struct for handling network timeouts.
*
* Except for the linking node, all access to state from these objects must go
* through the defined API.
*/
struct net_timeout {
/** Used to link multiple timeouts that share a common timer infrastructure.
*
* For examples a set of related timers may use a single delayed work
* structure, which is always scheduled at the shortest time to a
* timeout event.
*/
sys_snode_t node;
/** Time at which the timer was last set.
*
* This usually corresponds to the low 32 bits of k_uptime_get().
*/
uint32_t timer_start;
/** Portion of remaining timeout that does not exceed
* NET_TIMEOUT_MAX_VALUE.
*
* This value is updated in parallel with timer_start and wrap_counter
* by net_timeout_evaluate().
*/
uint32_t timer_timeout;
/** Timer wrap count.
*
* This tracks multiples of NET_TIMEOUT_MAX_VALUE milliseconds that
* have yet to pass. It is also updated along with timer_start and
* wrap_counter by net_timeout_evaluate().
*/
uint32_t wrap_counter;
};
/** @brief Configure a network timeout structure.
*
* @param timeout a pointer to the timeout state.
*
* @param lifetime the duration of the timeout in seconds.
*
* @param now the time at which the timeout started counting down, in
* milliseconds. This is generally a captured value of k_uptime_get_32().
*/
void net_timeout_set(struct net_timeout *timeout,
uint32_t lifetime,
uint32_t now);
/** @brief Return the 64-bit system time at which the timeout will complete.
*
* @note Correct behavior requires invocation of net_timeout_evaluate() at its
* specified intervals.
*
* @param timeout state a pointer to the timeout state, initialized by
* net_timeout_set() and maintained by net_timeout_evaluate().
*
* @param now the full-precision value of k_uptime_get() relative to which the
* deadline will be calculated.
*
* @return the value of k_uptime_get() at which the timeout will expire.
*/
int64_t net_timeout_deadline(const struct net_timeout *timeout,
int64_t now);
/** @brief Calculate the remaining time to the timeout in whole seconds.
*
* @note This function rounds the remaining time down, i.e. if the timeout
* will occur in 3500 milliseconds the value 3 will be returned.
*
* @note Correct behavior requires invocation of net_timeout_evaluate() at its
* specified intervals.
*
* @param timeout a pointer to the timeout state
*
* @param now the time relative to which the estimate of remaining time should
* be calculated. This should be recently captured value from
* k_uptime_get_32().
*
* @retval 0 if the timeout has completed.
* @retval positive the remaining duration of the timeout, in seconds.
*/
uint32_t net_timeout_remaining(const struct net_timeout *timeout,
uint32_t now);
/** @brief Update state to reflect elapsed time and get new delay.
*
* This function must be invoked periodically to (1) apply the effect of
* elapsed time on what remains of a total delay that exceeded the maximum
* representable delay, and (2) determine that either the timeout has
* completed or that the infrastructure must wait a certain period before
* checking again for completion.
*
* @param timeout a pointer to the timeout state
*
* @param now the time relative to which the estimate of remaining time should
* be calculated. This should be recently captured value from
* k_uptime_get_32().
*
* @retval 0 if the timeout has completed
* @retval positive the maximum delay until the state of this timeout should
* be re-evaluated, in milliseconds.
*/
uint32_t net_timeout_evaluate(struct net_timeout *timeout,
uint32_t now);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_NET_TIMEOUT_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_timeout.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,161 |
```objective-c
/*
*
*/
/** @file lwm2m.h
*
* @brief LwM2M high-level API
*
* @details
* LwM2M high-level interface is defined in this header.
*
* @note The implementation assumes UDP module is enabled.
*
* @note For more information refer to Technical Specification
* OMA-TS-LightweightM2M_Core-V1_1_1-20190617-A
*
* @defgroup lwm2m_api LwM2M high-level API
* @since 1.9
* @version 0.8.0
* @ingroup networking
* @{
*/
#ifndef ZEPHYR_INCLUDE_NET_LWM2M_H_
#define ZEPHYR_INCLUDE_NET_LWM2M_H_
#include <time.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/mutex.h>
#include <zephyr/net/coap.h>
#include <zephyr/net/lwm2m_path.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @name LwM2M Objects managed by OMA for LwM2M tech specification.
* Objects in this range have IDs from 0 to 1023.
* @{
*/
/* clang-format off */
#define LWM2M_OBJECT_SECURITY_ID 0 /**< Security object */
#define LWM2M_OBJECT_SERVER_ID 1 /**< Server object */
#define LWM2M_OBJECT_ACCESS_CONTROL_ID 2 /**< Access Control object */
#define LWM2M_OBJECT_DEVICE_ID 3 /**< Device object */
#define LWM2M_OBJECT_CONNECTIVITY_MONITORING_ID 4 /**< Connectivity Monitoring object */
#define LWM2M_OBJECT_FIRMWARE_ID 5 /**< Firmware object */
#define LWM2M_OBJECT_LOCATION_ID 6 /**< Location object */
#define LWM2M_OBJECT_CONNECTIVITY_STATISTICS_ID 7 /**< Connectivity Statistics object */
#define LWM2M_OBJECT_SOFTWARE_MANAGEMENT_ID 9 /**< Software Management object */
#define LWM2M_OBJECT_PORTFOLIO_ID 16 /**< Portfolio object */
#define LWM2M_OBJECT_BINARYAPPDATACONTAINER_ID 19 /**< Binary App Data Container object */
#define LWM2M_OBJECT_EVENT_LOG_ID 20 /**< Event Log object */
#define LWM2M_OBJECT_OSCORE_ID 21 /**< OSCORE object */
#define LWM2M_OBJECT_GATEWAY_ID 25 /**< Gateway object */
/* clang-format on */
/** @} */
/**
* @name LwM2M Objects produced by 3rd party Standards Development
* Organizations.
* Refer to the OMA LightweightM2M (LwM2M) Object and Resource Registry:
* path_to_url
* @{
*/
/* clang-format off */
#define IPSO_OBJECT_GENERIC_SENSOR_ID 3300 /**< IPSO Generic Sensor object */
#define IPSO_OBJECT_TEMP_SENSOR_ID 3303 /**< IPSO Temperature Sensor object */
#define IPSO_OBJECT_HUMIDITY_SENSOR_ID 3304 /**< IPSO Humidity Sensor object */
#define IPSO_OBJECT_LIGHT_CONTROL_ID 3311 /**< IPSO Light Control object */
#define IPSO_OBJECT_ACCELEROMETER_ID 3313 /**< IPSO Accelerometer object */
#define IPSO_OBJECT_VOLTAGE_SENSOR_ID 3316 /**< IPSO Voltage Sensor object */
#define IPSO_OBJECT_CURRENT_SENSOR_ID 3317 /**< IPSO Current Sensor object */
#define IPSO_OBJECT_PRESSURE_ID 3323 /**< IPSO Pressure Sensor object */
#define IPSO_OBJECT_BUZZER_ID 3338 /**< IPSO Buzzer object */
#define IPSO_OBJECT_TIMER_ID 3340 /**< IPSO Timer object */
#define IPSO_OBJECT_ONOFF_SWITCH_ID 3342 /**< IPSO On/Off Switch object */
#define IPSO_OBJECT_PUSH_BUTTON_ID 3347 /**< IPSO Push Button object */
#define UCIFI_OBJECT_BATTERY_ID 3411 /**< uCIFI Battery object */
#define IPSO_OBJECT_FILLING_LEVEL_SENSOR_ID 3435 /**< IPSO Filling Level Sensor object */
/* clang-format on */
/** @} */
/**
* @brief Callback function called when a socket error is encountered
*
* @param error Error code
*/
typedef void (*lwm2m_socket_fault_cb_t)(int error);
/** @brief LwM2M object path structure */
struct lwm2m_obj_path {
uint16_t obj_id; /**< Object ID */
uint16_t obj_inst_id; /**< Object instance ID */
uint16_t res_id; /**< Resource ID */
uint16_t res_inst_id; /**< Resource instance ID */
uint8_t level; /**< Path level (0-4). Ex. 4 = resource instance. */
};
/**
* @brief Observe callback events
*/
enum lwm2m_observe_event {
LWM2M_OBSERVE_EVENT_OBSERVER_ADDED, /**< Observer added */
LWM2M_OBSERVE_EVENT_OBSERVER_REMOVED, /**< Observer removed */
LWM2M_OBSERVE_EVENT_NOTIFY_ACK, /**< Notification ACKed */
LWM2M_OBSERVE_EVENT_NOTIFY_TIMEOUT, /**< Notification timed out */
};
/**
* @brief Observe callback indicating observer adds and deletes, and
* notification ACKs and timeouts
*
* @param[in] event Observer add/delete or notification ack/timeout
* @param[in] path LwM2M path
* @param[in] user_data Pointer to user_data buffer, as provided in
* send_traceable_notification(). Used to determine for which
* data the ACKed/timed out notification was.
*/
typedef void (*lwm2m_observe_cb_t)(enum lwm2m_observe_event event, struct lwm2m_obj_path *path,
void *user_data);
struct lwm2m_ctx;
/**
* @brief LwM2M RD client events
*
* LwM2M client events are passed back to the event_cb function in
* lwm2m_rd_client_start()
*/
enum lwm2m_rd_client_event {
/** Invalid event */
LWM2M_RD_CLIENT_EVENT_NONE,
/** Bootstrap registration failure */
LWM2M_RD_CLIENT_EVENT_BOOTSTRAP_REG_FAILURE,
/** Bootstrap registration complete */
LWM2M_RD_CLIENT_EVENT_BOOTSTRAP_REG_COMPLETE,
/** Bootstrap transfer complete */
LWM2M_RD_CLIENT_EVENT_BOOTSTRAP_TRANSFER_COMPLETE,
/** Registration failure */
LWM2M_RD_CLIENT_EVENT_REGISTRATION_FAILURE,
/** Registration complete */
LWM2M_RD_CLIENT_EVENT_REGISTRATION_COMPLETE,
/** Registration timeout */
LWM2M_RD_CLIENT_EVENT_REG_TIMEOUT,
/** Registration update complete */
LWM2M_RD_CLIENT_EVENT_REG_UPDATE_COMPLETE,
/** De-registration failure */
LWM2M_RD_CLIENT_EVENT_DEREGISTER_FAILURE,
/** Disconnected */
LWM2M_RD_CLIENT_EVENT_DISCONNECT,
/** Queue mode RX off */
LWM2M_RD_CLIENT_EVENT_QUEUE_MODE_RX_OFF,
/** Engine suspended */
LWM2M_RD_CLIENT_EVENT_ENGINE_SUSPENDED,
/** Network error */
LWM2M_RD_CLIENT_EVENT_NETWORK_ERROR,
/** Registration update */
LWM2M_RD_CLIENT_EVENT_REG_UPDATE,
/** De-register */
LWM2M_RD_CLIENT_EVENT_DEREGISTER,
/** Server disabled */
LWM2M_RD_CLIENT_EVENT_SERVER_DISABLED,
};
/**
* @brief Asynchronous RD client event callback
*
* @param[in] ctx LwM2M context generating the event
* @param[in] event LwM2M RD client event code
*/
typedef void (*lwm2m_ctx_event_cb_t)(struct lwm2m_ctx *ctx,
enum lwm2m_rd_client_event event);
/**
* @brief Different traffic states of the LwM2M socket.
*
* This information can be used to give hints for the network interface
* that can decide what kind of power management should be used.
*
* These hints are given from CoAP layer messages, so usage of DTLS might affect the
* actual number of expected datagrams.
*/
enum lwm2m_socket_states {
LWM2M_SOCKET_STATE_ONGOING, /**< Ongoing traffic is expected. */
LWM2M_SOCKET_STATE_ONE_RESPONSE, /**< One response is expected for the next message. */
LWM2M_SOCKET_STATE_LAST, /**< Next message is the last one. */
LWM2M_SOCKET_STATE_NO_DATA, /**< No more data is expected. */
};
/**
* @brief LwM2M context structure to maintain information for a single
* LwM2M connection.
*/
struct lwm2m_ctx {
/** Destination address storage */
struct sockaddr remote_addr;
/** @cond INTERNAL_HIDDEN
* Private CoAP and networking structures + 1 is for RD Client own message
*/
struct coap_pending pendings[CONFIG_LWM2M_ENGINE_MAX_PENDING + 1];
struct coap_reply replies[CONFIG_LWM2M_ENGINE_MAX_REPLIES + 1];
sys_slist_t pending_sends;
#if defined(CONFIG_LWM2M_QUEUE_MODE_ENABLED)
sys_slist_t queued_messages;
#endif
sys_slist_t observer;
/** @endcond */
/** A pointer to currently processed request, for internal LwM2M engine
* use. The underlying type is ``struct lwm2m_message``, but since it's
* declared in a private header and not exposed to the application,
* it's stored as a void pointer.
*/
void *processed_req;
#if defined(CONFIG_LWM2M_DTLS_SUPPORT) || defined(__DOXYGEN__)
/**
* @name DTLS related information
* Available only when @kconfig{CONFIG_LWM2M_DTLS_SUPPORT} is enabled and
* @ref lwm2m_ctx.use_dtls is set to true.
* @{
*/
/** TLS tag is set by client as a reference used when the
* LwM2M engine calls tls_credential_(add|delete)
*/
int tls_tag;
/** Destination hostname.
* When MBEDTLS SNI is enabled socket must be set with destination
* server hostname.
*/
char *desthostname;
/** Destination hostname length */
uint16_t desthostnamelen;
/** Flag to indicate if hostname verification is enabled */
bool hostname_verify;
/** Custom load_credentials function.
* Client can set load_credentials function as a way of overriding
* the default behavior of load_tls_credential() in lwm2m_engine.c
*/
int (*load_credentials)(struct lwm2m_ctx *client_ctx);
/** @} */
#endif
/** Custom socket options.
* Client can override default socket options by providing
* a callback that is called after a socket is created and before
* connect.
*/
int (*set_socketoptions)(struct lwm2m_ctx *client_ctx);
/** Flag to indicate if context should use DTLS.
* Enabled via the use of coaps:// protocol prefix in connection
* information.
* NOTE: requires @kconfig{CONFIG_LWM2M_DTLS_SUPPORT}
*/
bool use_dtls;
/**
* Flag to indicate that the socket connection is suspended.
* With queue mode, this will tell if there is a need to reconnect.
*/
bool connection_suspended;
#if defined(CONFIG_LWM2M_QUEUE_MODE_ENABLED) || defined(__DOXYGEN__)
/**
* Flag to indicate that the client is buffering Notifications and Send messages.
* True value buffer Notifications and Send messages.
*/
bool buffer_client_messages;
#endif
/** Current index of Security Object used for server credentials */
int sec_obj_inst;
/** Current index of Server Object used in this context. */
int srv_obj_inst;
/** Flag to enable BOOTSTRAP interface. See Section "Bootstrap Interface"
* of LwM2M Technical Specification for more information.
*/
bool bootstrap_mode;
/** Socket File Descriptor */
int sock_fd;
/** Socket fault callback. LwM2M processing thread will call this
* callback in case of socket errors on receive.
*/
lwm2m_socket_fault_cb_t fault_cb;
/** Callback for new or cancelled observations, and acknowledged or timed
* out notifications.
*/
lwm2m_observe_cb_t observe_cb;
/** Callback for client events */
lwm2m_ctx_event_cb_t event_cb;
/** Validation buffer. Used as a temporary buffer to decode the resource
* value before validation. On successful validation, its content is
* copied into the actual resource buffer.
*/
uint8_t validate_buf[CONFIG_LWM2M_ENGINE_VALIDATION_BUFFER_SIZE];
/**
* Callback to indicate transmission states.
* Client application may request LwM2M engine to indicate hints about
* transmission states and use that information to control various power
* saving modes.
*/
void (*set_socket_state)(int fd, enum lwm2m_socket_states state);
};
/**
* LwM2M Time series data structure
*/
struct lwm2m_time_series_elem {
/** Cached data Unix timestamp */
time_t t;
/** Element value */
union {
/** @cond INTERNAL_HIDDEN */
uint8_t u8;
uint16_t u16;
uint32_t u32;
uint64_t u64;
int8_t i8;
int16_t i16;
int32_t i32;
int64_t i64;
time_t time;
double f;
bool b;
/** @endcond */
};
};
/**
* @brief Asynchronous callback to get a resource buffer and length.
*
* Prior to accessing the data buffer of a resource, the engine can
* use this callback to get the buffer pointer and length instead
* of using the resource's data buffer.
*
* The client or LwM2M objects can register a function of this type via:
* lwm2m_register_read_callback()
* lwm2m_register_pre_write_callback()
*
* @param[in] obj_inst_id Object instance ID generating the callback.
* @param[in] res_id Resource ID generating the callback.
* @param[in] res_inst_id Resource instance ID generating the callback
* (typically 0 for non-multi instance resources).
* @param[out] data_len Length of the data buffer.
*
* @return Callback returns a pointer to the data buffer or NULL for failure.
*/
typedef void *(*lwm2m_engine_get_data_cb_t)(uint16_t obj_inst_id,
uint16_t res_id,
uint16_t res_inst_id,
size_t *data_len);
/**
* @brief Asynchronous callback when data has been set to a resource buffer.
*
* After changing the data of a resource buffer, the LwM2M engine can
* make use of this callback to pass the data back to the client or LwM2M
* objects.
*
* On a block-wise transfers the handler is called multiple times with the data blocks
* and increasing offset. The last block has the last_block flag set to true.
* Beginning of the block transfer has the offset set to 0.
*
* A function of this type can be registered via:
* lwm2m_register_validate_callback()
* lwm2m_register_post_write_callback()
*
* @param[in] obj_inst_id Object instance ID generating the callback.
* @param[in] res_id Resource ID generating the callback.
* @param[in] res_inst_id Resource instance ID generating the callback
* (typically 0 for non-multi instance resources).
* @param[in] data Pointer to data.
* @param[in] data_len Length of the data.
* @param[in] last_block Flag used during block transfer to indicate the last
* block of data. For non-block transfers this is always
* false.
* @param[in] total_size Expected total size of data for a block transfer.
* For non-block transfers this is 0.
* @param[in] offset Offset of the data block. For non-block transfers this is always 0.
*
* @return Callback returns a negative error code (errno.h) indicating
* reason of failure or 0 for success.
*/
typedef int (*lwm2m_engine_set_data_cb_t)(uint16_t obj_inst_id,
uint16_t res_id, uint16_t res_inst_id,
uint8_t *data, uint16_t data_len,
bool last_block, size_t total_size, size_t offset);
/**
* @brief Asynchronous event notification callback.
*
* Various object instance and resource-based events in the LwM2M engine
* can trigger a callback of this function type: object instance create,
* and object instance delete.
*
* Register a function of this type via:
* lwm2m_register_create_callback()
* lwm2m_register_delete_callback()
*
* @param[in] obj_inst_id Object instance ID generating the callback.
*
* @return Callback returns a negative error code (errno.h) indicating
* reason of failure or 0 for success.
*/
typedef int (*lwm2m_engine_user_cb_t)(uint16_t obj_inst_id);
/**
* @brief Asynchronous execute notification callback.
*
* Resource executes trigger a callback of this type.
*
* Register a function of this type via:
* lwm2m_register_exec_callback()
*
* @param[in] obj_inst_id Object instance ID generating the callback.
* @param[in] args Pointer to execute arguments payload. (This can be
* NULL if no arguments are provided)
* @param[in] args_len Length of argument payload in bytes.
*
* @return Callback returns a negative error code (errno.h) indicating
* reason of failure or 0 for success.
*/
typedef int (*lwm2m_engine_execute_cb_t)(uint16_t obj_inst_id,
uint8_t *args, uint16_t args_len);
/**
* @name Power source types used for the "Available Power Sources" resource of
* the LwM2M Device object (3/0/6).
* @{
*/
#define LWM2M_DEVICE_PWR_SRC_TYPE_DC_POWER 0 /**< DC power */
#define LWM2M_DEVICE_PWR_SRC_TYPE_BAT_INT 1 /**< Internal battery */
#define LWM2M_DEVICE_PWR_SRC_TYPE_BAT_EXT 2 /**< External battery */
#define LWM2M_DEVICE_PWR_SRC_TYPE_FUEL_CELL 3 /**< Fuel cell */
#define LWM2M_DEVICE_PWR_SRC_TYPE_PWR_OVER_ETH 4 /**< Power over Ethernet */
#define LWM2M_DEVICE_PWR_SRC_TYPE_USB 5 /**< USB */
#define LWM2M_DEVICE_PWR_SRC_TYPE_AC_POWER 6 /**< AC (mains) power */
#define LWM2M_DEVICE_PWR_SRC_TYPE_SOLAR 7 /**< Solar */
#define LWM2M_DEVICE_PWR_SRC_TYPE_MAX 8 /**< Max value for Available Power Source type */
/** @} */
/**
* @name Error codes used for the "Error Code" resource of the LwM2M Device
* object.
* An LwM2M client can register one of the following error codes via
* the lwm2m_device_add_err() function.
* @{
*/
#define LWM2M_DEVICE_ERROR_NONE 0 /**< No error */
#define LWM2M_DEVICE_ERROR_LOW_POWER 1 /**< Low battery power */
#define LWM2M_DEVICE_ERROR_EXT_POWER_SUPPLY_OFF 2 /**< External power supply off */
#define LWM2M_DEVICE_ERROR_GPS_FAILURE 3 /**< GPS module failure */
#define LWM2M_DEVICE_ERROR_LOW_SIGNAL_STRENGTH 4 /**< Low received signal strength */
#define LWM2M_DEVICE_ERROR_OUT_OF_MEMORY 5 /**< Out of memory */
#define LWM2M_DEVICE_ERROR_SMS_FAILURE 6 /**< SMS failure */
#define LWM2M_DEVICE_ERROR_NETWORK_FAILURE 7 /**< IP Connectivity failure */
#define LWM2M_DEVICE_ERROR_PERIPHERAL_FAILURE 8 /**< Peripheral malfunction */
/** @} */
/**
* @name Battery status codes used for the "Battery Status" resource (3/0/20)
* of the LwM2M Device object. As the battery status changes, an LwM2M
* client can set one of the following codes via:
* lwm2m_set_u8("3/0/20", [battery status])
* @{
*/
#define LWM2M_DEVICE_BATTERY_STATUS_NORMAL 0 /**< The battery is operating normally and not on
* power
*/
#define LWM2M_DEVICE_BATTERY_STATUS_CHARGING 1 /**< The battery is currently charging */
#define LWM2M_DEVICE_BATTERY_STATUS_CHARGE_COMP 2 /**< The battery is fully charged and the charger
* is still connected
*/
#define LWM2M_DEVICE_BATTERY_STATUS_DAMAGED 3 /**< The battery has some problem */
#define LWM2M_DEVICE_BATTERY_STATUS_LOW 4 /**< The battery is low on charge */
#define LWM2M_DEVICE_BATTERY_STATUS_NOT_INST 5 /**< The battery is not installed */
#define LWM2M_DEVICE_BATTERY_STATUS_UNKNOWN 6 /**< The battery information is not available */
/** @} */
/**
* @brief Register a new error code with LwM2M Device object.
*
* @param[in] error_code New error code.
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_device_add_err(uint8_t error_code);
/**
* @name LWM2M Firmware Update object states
*
* An LwM2M client or the LwM2M Firmware Update object use the following codes
* to represent the LwM2M Firmware Update state (5/0/3).
* @{
*/
/**
* Idle. Before downloading or after successful updating.
*/
#define STATE_IDLE 0
/**
* Downloading. The data sequence is being downloaded.
*/
#define STATE_DOWNLOADING 1
/**
* Downloaded. The whole data sequence has been downloaded.
*/
#define STATE_DOWNLOADED 2
/**
* Updating. The device is being updated.
*/
#define STATE_UPDATING 3
/** @} */
/**
* @name LWM2M Firmware Update object result codes
*
* After processing a firmware update, the client sets the result via one of
* the following codes via lwm2m_set_u8("5/0/5", [result code])
* @{
*/
#define RESULT_DEFAULT 0 /**< Initial value */
#define RESULT_SUCCESS 1 /**< Firmware updated successfully */
#define RESULT_NO_STORAGE 2 /**< Not enough flash memory for the new firmware package */
#define RESULT_OUT_OF_MEM 3 /**< Out of RAM during downloading process */
#define RESULT_CONNECTION_LOST 4 /**< Connection lost during downloading process */
#define RESULT_INTEGRITY_FAILED 5 /**< Integrity check failure for new downloaded package */
#define RESULT_UNSUP_FW 6 /**< Unsupported package type */
#define RESULT_INVALID_URI 7 /**< Invalid URI */
#define RESULT_UPDATE_FAILED 8 /**< Firmware update failed */
#define RESULT_UNSUP_PROTO 9 /**< Unsupported protocol */
/** @} */
#if defined(CONFIG_LWM2M_FIRMWARE_UPDATE_OBJ_SUPPORT) || defined(__DOXYGEN__)
/**
* @brief Set data callback for firmware block transfer.
*
* LwM2M clients use this function to register a callback for receiving the
* block transfer data when performing a firmware update.
*
* @param[in] cb A callback function to receive the block transfer data
*/
void lwm2m_firmware_set_write_cb(lwm2m_engine_set_data_cb_t cb);
/**
* @brief Get the data callback for firmware block transfer writes.
*
* @return A registered callback function to receive the block transfer data
*/
lwm2m_engine_set_data_cb_t lwm2m_firmware_get_write_cb(void);
/**
* @brief Set data callback for firmware block transfer.
*
* LwM2M clients use this function to register a callback for receiving the
* block transfer data when performing a firmware update.
*
* @param[in] obj_inst_id Object instance ID
* @param[in] cb A callback function to receive the block transfer data
*/
void lwm2m_firmware_set_write_cb_inst(uint16_t obj_inst_id, lwm2m_engine_set_data_cb_t cb);
/**
* @brief Get the data callback for firmware block transfer writes.
*
* @param[in] obj_inst_id Object instance ID
* @return A registered callback function to receive the block transfer data
*/
lwm2m_engine_set_data_cb_t lwm2m_firmware_get_write_cb_inst(uint16_t obj_inst_id);
/**
* @brief Set callback for firmware update cancel.
*
* LwM2M clients use this function to register a callback to perform actions
* on firmware update cancel.
*
* @param[in] cb A callback function perform actions on firmware update cancel.
*/
void lwm2m_firmware_set_cancel_cb(lwm2m_engine_user_cb_t cb);
/**
* @brief Get a callback for firmware update cancel.
*
* @return A registered callback function perform actions on firmware update cancel.
*/
lwm2m_engine_user_cb_t lwm2m_firmware_get_cancel_cb(void);
/**
* @brief Set data callback for firmware update cancel.
*
* LwM2M clients use this function to register a callback to perform actions
* on firmware update cancel.
*
* @param[in] obj_inst_id Object instance ID
* @param[in] cb A callback function perform actions on firmware update cancel.
*/
void lwm2m_firmware_set_cancel_cb_inst(uint16_t obj_inst_id, lwm2m_engine_user_cb_t cb);
/**
* @brief Get the callback for firmware update cancel.
*
* @param[in] obj_inst_id Object instance ID
* @return A registered callback function perform actions on firmware update cancel.
*/
lwm2m_engine_user_cb_t lwm2m_firmware_get_cancel_cb_inst(uint16_t obj_inst_id);
/**
* @brief Set data callback to handle firmware update execute events.
*
* LwM2M clients use this function to register a callback for receiving the
* update resource "execute" operation on the LwM2M Firmware Update object.
*
* @param[in] cb A callback function to receive the execute event.
*/
void lwm2m_firmware_set_update_cb(lwm2m_engine_execute_cb_t cb);
/**
* @brief Get the event callback for firmware update execute events.
*
* @return A registered callback function to receive the execute event.
*/
lwm2m_engine_execute_cb_t lwm2m_firmware_get_update_cb(void);
/**
* @brief Set data callback to handle firmware update execute events.
*
* LwM2M clients use this function to register a callback for receiving the
* update resource "execute" operation on the LwM2M Firmware Update object.
*
* @param[in] obj_inst_id Object instance ID
* @param[in] cb A callback function to receive the execute event.
*/
void lwm2m_firmware_set_update_cb_inst(uint16_t obj_inst_id, lwm2m_engine_execute_cb_t cb);
/**
* @brief Get the event callback for firmware update execute events.
*
* @param[in] obj_inst_id Object instance ID
* @return A registered callback function to receive the execute event.
*/
lwm2m_engine_execute_cb_t lwm2m_firmware_get_update_cb_inst(uint16_t obj_inst_id);
#endif
#if defined(CONFIG_LWM2M_SWMGMT_OBJ_SUPPORT) || defined(__DOXYGEN__)
/**
* @brief Set callback to handle software activation requests
*
* The callback will be executed when the LWM2M execute operation gets called
* on the corresponding object's Activate resource instance.
*
* @param[in] obj_inst_id The instance number to set the callback for.
* @param[in] cb A callback function to receive the execute event.
*
* @return 0 on success, otherwise a negative integer.
*/
int lwm2m_swmgmt_set_activate_cb(uint16_t obj_inst_id, lwm2m_engine_execute_cb_t cb);
/**
* @brief Set callback to handle software deactivation requests
*
* The callback will be executed when the LWM2M execute operation gets called
* on the corresponding object's Deactivate resource instance.
*
* @param[in] obj_inst_id The instance number to set the callback for.
* @param[in] cb A callback function to receive the execute event.
*
* @return 0 on success, otherwise a negative integer.
*/
int lwm2m_swmgmt_set_deactivate_cb(uint16_t obj_inst_id, lwm2m_engine_execute_cb_t cb);
/**
* @brief Set callback to handle software install requests
*
* The callback will be executed when the LWM2M execute operation gets called
* on the corresponding object's Install resource instance.
*
* @param[in] obj_inst_id The instance number to set the callback for.
* @param[in] cb A callback function to receive the execute event.
*
* @return 0 on success, otherwise a negative integer.
*/
int lwm2m_swmgmt_set_install_package_cb(uint16_t obj_inst_id, lwm2m_engine_execute_cb_t cb);
/**
* @brief Set callback to handle software uninstall requests
*
* The callback will be executed when the LWM2M execute operation gets called
* on the corresponding object's Uninstall resource instance.
*
* @param[in] obj_inst_id The instance number to set the callback for.
* @param[in] cb A callback function for handling the execute event.
*
* @return 0 on success, otherwise a negative integer.
*/
int lwm2m_swmgmt_set_delete_package_cb(uint16_t obj_inst_id, lwm2m_engine_execute_cb_t cb);
/**
* @brief Set callback to read software package
*
* The callback will be executed when the LWM2M read operation gets called
* on the corresponding object.
*
* @param[in] obj_inst_id The instance number to set the callback for.
* @param[in] cb A callback function for handling the read event.
*
* @return 0 on success, otherwise a negative integer.
*/
int lwm2m_swmgmt_set_read_package_version_cb(uint16_t obj_inst_id, lwm2m_engine_get_data_cb_t cb);
/**
* @brief Set data callback for software management block transfer.
*
* The callback will be executed when the LWM2M block write operation gets called
* on the corresponding object's resource instance.
*
* @param[in] obj_inst_id The instance number to set the callback for.
* @param[in] cb A callback function for handling the block write event.
*
* @return 0 on success, otherwise a negative integer.
*/
int lwm2m_swmgmt_set_write_package_cb(uint16_t obj_inst_id, lwm2m_engine_set_data_cb_t cb);
/**
* Function to be called when a Software Management object instance
* completed the Install operation.
*
* @param[in] obj_inst_id The Software Management object instance
* @param[in] error_code The result code of the operation. Zero on success
* otherwise it should be a negative integer.
*
* return 0 on success, otherwise a negative integer.
*/
int lwm2m_swmgmt_install_completed(uint16_t obj_inst_id, int error_code);
#endif
#if defined(CONFIG_LWM2M_EVENT_LOG_OBJ_SUPPORT) || defined(__DOXYGEN__)
/**
* @brief Set callback to read log data
*
* The callback will be executed when the LWM2M read operation gets called
* on the corresponding object.
*
* @param[in] cb A callback function for handling the read event.
*/
void lwm2m_event_log_set_read_log_data_cb(lwm2m_engine_get_data_cb_t cb);
#endif
/**
* @brief Maximum value for Objlnk resource fields
*/
#define LWM2M_OBJLNK_MAX_ID USHRT_MAX
/**
* @brief LWM2M Objlnk resource type structure
*/
struct lwm2m_objlnk {
uint16_t obj_id; /**< Object ID */
uint16_t obj_inst; /**< Object instance ID */
};
/**
* @brief Change an observer's pmin value.
*
* LwM2M clients use this function to modify the pmin attribute
* for an observation being made.
* Example to update the pmin of a temperature sensor value being observed:
* lwm2m_update_observer_min_period(client_ctx, &LWM2M_OBJ(3303, 0, 5700), 5);
*
* @param[in] client_ctx LwM2M context
* @param[in] path LwM2M path as a struct
* @param[in] period_s Value of pmin to be given (in seconds).
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_update_observer_min_period(struct lwm2m_ctx *client_ctx,
const struct lwm2m_obj_path *path, uint32_t period_s);
/**
* @brief Change an observer's pmax value.
*
* LwM2M clients use this function to modify the pmax attribute
* for an observation being made.
* Example to update the pmax of a temperature sensor value being observed:
* lwm2m__update_observer_max_period(client_ctx, &LWM2M_OBJ(3303, 0, 5700), 5);
*
* @param[in] client_ctx LwM2M context
* @param[in] path LwM2M path as a struct
* @param[in] period_s Value of pmax to be given (in seconds).
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_update_observer_max_period(struct lwm2m_ctx *client_ctx,
const struct lwm2m_obj_path *path, uint32_t period_s);
/**
* @brief Create an LwM2M object instance.
*
* LwM2M clients use this function to create non-default LwM2M objects:
* Example to create first temperature sensor object:
* lwm2m_create_obj_inst(&LWM2M_OBJ(3303, 0));
*
* @param[in] path LwM2M path as a struct
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_create_object_inst(const struct lwm2m_obj_path *path);
/**
* @brief Delete an LwM2M object instance.
*
* LwM2M clients use this function to delete LwM2M objects.
*
* @param[in] path LwM2M path as a struct
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_delete_object_inst(const struct lwm2m_obj_path *path);
/**
* @brief Locks the registry for this thread.
*
* Use this function before writing to multiple resources. This halts the
* lwm2m main thread until all the write-operations are finished.
*
*/
void lwm2m_registry_lock(void);
/**
* @brief Unlocks the registry previously locked by lwm2m_registry_lock().
*
*/
void lwm2m_registry_unlock(void);
/**
* @brief Set resource (instance) value (opaque buffer)
*
* @param[in] path LwM2M path as a struct
* @param[in] data_ptr Data buffer
* @param[in] data_len Length of buffer
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_opaque(const struct lwm2m_obj_path *path, const char *data_ptr, uint16_t data_len);
/**
* @brief Set resource (instance) value (string)
*
* @param[in] path LwM2M path as a struct
* @param[in] data_ptr NULL terminated char buffer
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_string(const struct lwm2m_obj_path *path, const char *data_ptr);
/**
* @brief Set resource (instance) value (u8)
*
* @param[in] path LwM2M path as a struct
* @param[in] value u8 value
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_u8(const struct lwm2m_obj_path *path, uint8_t value);
/**
* @brief Set resource (instance) value (u16)
*
* @param[in] path LwM2M path as a struct
* @param[in] value u16 value
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_u16(const struct lwm2m_obj_path *path, uint16_t value);
/**
* @brief Set resource (instance) value (u32)
*
* @param[in] path LwM2M path as a struct
* @param[in] value u32 value
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_u32(const struct lwm2m_obj_path *path, uint32_t value);
/**
* @brief Set resource (instance) value (s8)
*
* @param[in] path LwM2M path as a struct
* @param[in] value s8 value
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_s8(const struct lwm2m_obj_path *path, int8_t value);
/**
* @brief Set resource (instance) value (s16)
*
* @param[in] path LwM2M path as a struct
* @param[in] value s16 value
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_s16(const struct lwm2m_obj_path *path, int16_t value);
/**
* @brief Set resource (instance) value (s32)
*
* @param[in] path LwM2M path as a struct
* @param[in] value s32 value
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_s32(const struct lwm2m_obj_path *path, int32_t value);
/**
* @brief Set resource (instance) value (s64)
*
* @param[in] path LwM2M path as a struct
* @param[in] value s64 value
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_s64(const struct lwm2m_obj_path *path, int64_t value);
/**
* @brief Set resource (instance) value (bool)
*
* @param[in] path LwM2M path as a struct
* @param[in] value bool value
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_bool(const struct lwm2m_obj_path *path, bool value);
/**
* @brief Set resource (instance) value (double)
*
* @param[in] path LwM2M path as a struct
* @param[in] value double value
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_f64(const struct lwm2m_obj_path *path, const double value);
/**
* @brief Set resource (instance) value (Objlnk)
*
* @param[in] path LwM2M path as a struct
* @param[in] value pointer to the lwm2m_objlnk structure
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_objlnk(const struct lwm2m_obj_path *path, const struct lwm2m_objlnk *value);
/**
* @brief Set resource (instance) value (Time)
*
* @param[in] path LwM2M path as a struct
* @param[in] value Epoch timestamp
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_time(const struct lwm2m_obj_path *path, time_t value);
/**
* @brief LwM2M resource item structure
*
* Value type must match the target resource as no type conversion are
* done and the value is just memcopied.
*
* Following C types are used for resource types:
* * BOOL is uint8_t
* * U8 is uint8_t
* * S8 is int8_t
* * U16 is uint16_t
* * S16 is int16_t
* * U32 is uint32_t
* * S32 is int32_t
* * S64 is int64_t
* * TIME is time_t
* * FLOAT is double
* * OBJLNK is struct lwm2m_objlnk
* * STRING is char * and the null-terminator should be included in the size.
* * OPAQUE is any binary data. When null-terminated string is written in OPAQUE
* resource, the terminator should not be included in size.
*
*/
struct lwm2m_res_item {
/** Pointer to LwM2M path as a struct */
struct lwm2m_obj_path *path;
/** Pointer to resource value */
void *value;
/** Size of the value. For string resources, it should contain the null-terminator. */
uint16_t size;
};
/**
* @brief Set multiple resource (instance) values
*
* NOTE: Value type must match the target resource as this function
* does not do any type conversion.
* See struct @ref lwm2m_res_item for list of resource types.
*
* @param[in] res_list LwM2M resource item list
* @param[in] res_list_size Length of resource list
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_bulk(const struct lwm2m_res_item res_list[], size_t res_list_size);
/**
* @brief Get resource (instance) value (opaque buffer)
*
* @param[in] path LwM2M path as a struct
* @param[out] buf Data buffer to copy data into
* @param[in] buflen Length of buffer
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_opaque(const struct lwm2m_obj_path *path, void *buf, uint16_t buflen);
/**
* @brief Get resource (instance) value (string)
*
* @param[in] path LwM2M path as a struct
* @param[out] str String buffer to copy data into
* @param[in] buflen Length of buffer
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_string(const struct lwm2m_obj_path *path, void *str, uint16_t buflen);
/**
* @brief Get resource (instance) value (u8)
*
* @param[in] path LwM2M path as a struct
* @param[out] value u8 buffer to copy data into
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_u8(const struct lwm2m_obj_path *path, uint8_t *value);
/**
* @brief Get resource (instance) value (u16)
*
* @param[in] path LwM2M path as a struct
* @param[out] value u16 buffer to copy data into
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_u16(const struct lwm2m_obj_path *path, uint16_t *value);
/**
* @brief Get resource (instance) value (u32)
*
* @param[in] path LwM2M path as a struct
* @param[out] value u32 buffer to copy data into
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_u32(const struct lwm2m_obj_path *path, uint32_t *value);
/**
* @brief Get resource (instance) value (s8)
*
* @param[in] path LwM2M path as a struct
* @param[out] value s8 buffer to copy data into
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_s8(const struct lwm2m_obj_path *path, int8_t *value);
/**
* @brief Get resource (instance) value (s16)
*
* @param[in] path LwM2M path as a struct
* @param[out] value s16 buffer to copy data into
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_s16(const struct lwm2m_obj_path *path, int16_t *value);
/**
* @brief Get resource (instance) value (s32)
*
* @param[in] path LwM2M path as a struct
* @param[out] value s32 buffer to copy data into
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_s32(const struct lwm2m_obj_path *path, int32_t *value);
/**
* @brief Get resource (instance) value (s64)
*
* @param[in] path LwM2M path as a struct
* @param[out] value s64 buffer to copy data into
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_s64(const struct lwm2m_obj_path *path, int64_t *value);
/**
* @brief Get resource (instance) value (bool)
*
* @param[in] path LwM2M path as a struct
* @param[out] value bool buffer to copy data into
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_bool(const struct lwm2m_obj_path *path, bool *value);
/**
* @brief Get resource (instance) value (double)
*
* @param[in] path LwM2M path as a struct
* @param[out] value double buffer to copy data into
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_f64(const struct lwm2m_obj_path *path, double *value);
/**
* @brief Get resource (instance) value (Objlnk)
*
* @param[in] path LwM2M path as a struct
* @param[out] buf lwm2m_objlnk buffer to copy data into
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_objlnk(const struct lwm2m_obj_path *path, struct lwm2m_objlnk *buf);
/**
* @brief Get resource (instance) value (Time)
*
* @param[in] path LwM2M path as a struct
* @param[out] buf time_t pointer to copy data
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_time(const struct lwm2m_obj_path *path, time_t *buf);
/**
* @brief Set resource (instance) read callback
*
* LwM2M clients can use this to set the callback function for resource reads when data
* handling in the LwM2M engine needs to be bypassed.
* For example reading back opaque binary data from external storage.
*
* This callback should not generally be used for any data that might be observed as
* engine does not have any knowledge of data changes.
*
* When separate buffer for data should be used, use lwm2m_set_res_buf() instead
* to set the storage.
*
* @param[in] path LwM2M path as a struct
* @param[in] cb Read resource callback
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_register_read_callback(const struct lwm2m_obj_path *path, lwm2m_engine_get_data_cb_t cb);
/**
* @brief Set resource (instance) pre-write callback
*
* This callback is triggered before setting the value of a resource. It
* can pass a special data buffer to the engine so that the actual resource
* value can be calculated later, etc.
*
* @param[in] path LwM2M path as a struct
* @param[in] cb Pre-write resource callback
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_register_pre_write_callback(const struct lwm2m_obj_path *path,
lwm2m_engine_get_data_cb_t cb);
/**
* @brief Set resource (instance) validation callback
*
* This callback is triggered before setting the value of a resource to the
* resource data buffer.
*
* The callback allows an LwM2M client or object to validate the data before
* writing and notify an error if the data should be discarded for any reason
* (by returning a negative error code).
*
* @note All resources that have a validation callback registered are initially
* decoded into a temporary validation buffer. Make sure that
* ``CONFIG_LWM2M_ENGINE_VALIDATION_BUFFER_SIZE`` is large enough to
* store each of the validated resources (individually).
*
* @param[in] path LwM2M path as a struct
* @param[in] cb Validate resource data callback
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_register_validate_callback(const struct lwm2m_obj_path *path,
lwm2m_engine_set_data_cb_t cb);
/**
* @brief Set resource (instance) post-write callback
*
* This callback is triggered after setting the value of a resource to the
* resource data buffer.
*
* It allows an LwM2M client or object to post-process the value of a resource
* or trigger other related resource calculations.
*
* @param[in] path LwM2M path as a struct
* @param[in] cb Post-write resource callback
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_register_post_write_callback(const struct lwm2m_obj_path *path,
lwm2m_engine_set_data_cb_t cb);
/**
* @brief Set resource execute event callback
*
* This event is triggered when the execute method of a resource is enabled.
*
* @param[in] path LwM2M path as a struct
* @param[in] cb Execute resource callback
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_register_exec_callback(const struct lwm2m_obj_path *path, lwm2m_engine_execute_cb_t cb);
/**
* @brief Set object instance create event callback
*
* This event is triggered when an object instance is created.
*
* @param[in] obj_id LwM2M object id
* @param[in] cb Create object instance callback
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_register_create_callback(uint16_t obj_id,
lwm2m_engine_user_cb_t cb);
/**
* @brief Set object instance delete event callback
*
* This event is triggered when an object instance is deleted.
*
* @param[in] obj_id LwM2M object id
* @param[in] cb Delete object instance callback
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_register_delete_callback(uint16_t obj_id,
lwm2m_engine_user_cb_t cb);
/**
* @brief Resource read-only value bit
*/
#define LWM2M_RES_DATA_READ_ONLY 0
/**
* @brief Resource read-only flag
*/
#define LWM2M_RES_DATA_FLAG_RO BIT(LWM2M_RES_DATA_READ_ONLY)
/**
* @brief Read resource flags helper macro
*/
#define LWM2M_HAS_RES_FLAG(res, f) ((res->data_flags & f) == f)
/**
* @brief Set data buffer for a resource
*
* Use this function to set the data buffer and flags for the specified LwM2M
* resource.
*
* @param[in] path LwM2M path as a struct
* @param[in] buffer_ptr Data buffer pointer
* @param[in] buffer_len Length of buffer
* @param[in] data_len Length of existing data in the buffer
* @param[in] data_flags Data buffer flags (such as read-only, etc)
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_res_buf(const struct lwm2m_obj_path *path, void *buffer_ptr, uint16_t buffer_len,
uint16_t data_len, uint8_t data_flags);
/**
* @brief Update data size for a resource
*
* Use this function to set the new size of data in the buffer if you write
* to a buffer received by lwm2m_get_res_buf().
*
* @param[in] path LwM2M path as a struct
* @param[in] data_len Length of data
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_res_data_len(const struct lwm2m_obj_path *path, uint16_t data_len);
/**
* @brief Get data buffer for a resource
*
* Use this function to get the data buffer information for the specified LwM2M
* resource.
*
* If you directly write into the buffer, you must use lwm2m_set_res_data_len()
* function to update the new size of the written data.
*
* All parameters, except for the pathstr, can be NULL if you don't want to read those values.
*
* @param[in] path LwM2M path as a struct
* @param[out] buffer_ptr Data buffer pointer
* @param[out] buffer_len Length of buffer
* @param[out] data_len Length of existing data in the buffer
* @param[out] data_flags Data buffer flags (such as read-only, etc)
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_get_res_buf(const struct lwm2m_obj_path *path, void **buffer_ptr, uint16_t *buffer_len,
uint16_t *data_len, uint8_t *data_flags);
/**
* @brief Create a resource instance
*
* LwM2M clients use this function to create multi-resource instances:
* Example to create 0 instance of device available power sources:
* lwm2m_create_res_inst(&LWM2M_OBJ(3, 0, 6, 0));
*
* @param[in] path LwM2M path as a struct
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_create_res_inst(const struct lwm2m_obj_path *path);
/**
* @brief Delete a resource instance
*
* Use this function to remove an existing resource instance
*
* @param[in] path LwM2M path as a struct
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_delete_res_inst(const struct lwm2m_obj_path *path);
/**
* @brief Update the period of the device service.
*
* Change the duration of the periodic device service that notifies the
* current time.
*
* @param[in] period_ms New period for the device service (in milliseconds)
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_update_device_service_period(uint32_t period_ms);
/**
* @brief Check whether a path is observed
*
* @param[in] path LwM2M path as a struct to check
*
* @return true when there exists an observation of the same level
* or lower as the given path, false if it doesn't or path is not a
* valid LwM2M-path.
* E.g. true if path refers to a resource and the parent object has an
* observation, false for the inverse.
*/
bool lwm2m_path_is_observed(const struct lwm2m_obj_path *path);
/**
* @brief Stop the LwM2M engine
*
* LwM2M clients normally do not need to call this function as it is called
* within lwm2m_rd_client. However, if the client does not use the RD
* client implementation, it will need to be called manually.
*
* @param[in] client_ctx LwM2M context
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_engine_stop(struct lwm2m_ctx *client_ctx);
/**
* @brief Start the LwM2M engine
*
* LwM2M clients normally do not need to call this function as it is called
* by lwm2m_rd_client_start(). However, if the client does not use the RD
* client implementation, it will need to be called manually.
*
* @param[in] client_ctx LwM2M context
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_engine_start(struct lwm2m_ctx *client_ctx);
/**
* @brief Acknowledge the currently processed request with an empty ACK.
*
* LwM2M engine by default sends piggybacked responses for requests.
* This function allows to send an empty ACK for a request earlier (from the
* application callback). The LwM2M engine will then send the actual response
* as a separate CON message after all callbacks are executed.
*
* @param[in] client_ctx LwM2M context
*
*/
void lwm2m_acknowledge(struct lwm2m_ctx *client_ctx);
/*
* LwM2M RD client flags, used to configure LwM2M session.
*/
/**
* @brief Run bootstrap procedure in current session.
*/
#define LWM2M_RD_CLIENT_FLAG_BOOTSTRAP BIT(0)
/**
* @brief Start the LwM2M RD (Registration / Discovery) Client
*
* The RD client sits just above the LwM2M engine and performs the necessary
* actions to implement the "Registration interface".
* For more information see Section "Client Registration Interface" of
* LwM2M Technical Specification.
*
* NOTE: lwm2m_engine_start() is called automatically by this function.
*
* @param[in] client_ctx LwM2M context
* @param[in] ep_name Registered endpoint name
* @param[in] flags Flags used to configure current LwM2M session.
* @param[in] event_cb Client event callback function
* @param[in] observe_cb Observe callback function called when an observer was
* added or deleted, and when a notification was acked or
* has timed out
*
* @return 0 for success, -EINPROGRESS when client is already running
* or negative error codes in case of failure.
*/
int lwm2m_rd_client_start(struct lwm2m_ctx *client_ctx, const char *ep_name,
uint32_t flags, lwm2m_ctx_event_cb_t event_cb,
lwm2m_observe_cb_t observe_cb);
/**
* @brief Stop the LwM2M RD (De-register) Client
*
* The RD client sits just above the LwM2M engine and performs the necessary
* actions to implement the "Registration interface".
* For more information see Section "Client Registration Interface" of the
* LwM2M Technical Specification.
*
* @param[in] client_ctx LwM2M context
* @param[in] event_cb Client event callback function
* @param[in] deregister True to deregister the client if registered.
* False to force close the connection.
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_rd_client_stop(struct lwm2m_ctx *client_ctx,
lwm2m_ctx_event_cb_t event_cb, bool deregister);
/**
* @brief Suspend the LwM2M engine Thread
*
* Suspend LwM2M engine. Use case could be when network connection is down.
* LwM2M Engine indicate before it suspend by
* LWM2M_RD_CLIENT_EVENT_ENGINE_SUSPENDED event.
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_engine_pause(void);
/**
* @brief Resume the LwM2M engine thread
*
* Resume suspended LwM2M engine. After successful resume call engine will do
* full registration or registration update based on suspended time.
* Event's LWM2M_RD_CLIENT_EVENT_REGISTRATION_COMPLETE or LWM2M_RD_CLIENT_EVENT_REG_UPDATE_COMPLETE
* indicate that client is connected to server.
*
* @return 0 for success or negative in case of error.
*/
int lwm2m_engine_resume(void);
/**
* @brief Trigger a Registration Update of the LwM2M RD Client
*/
void lwm2m_rd_client_update(void);
/**
* @brief LwM2M path maximum length
*/
#define LWM2M_MAX_PATH_STR_SIZE sizeof("/65535/65535/65535/65535")
/**
* @brief Helper function to print path objects' contents to log
*
* @param[in] buf The buffer to use for formatting the string
* @param[in] path The path to stringify
*
* @return Resulting formatted path string
*/
char *lwm2m_path_log_buf(char *buf, struct lwm2m_obj_path *path);
/**
* @brief LwM2M send status
*
* LwM2M send status are generated back to the lwm2m_send_cb_t function in
* lwm2m_send_cb()
*/
enum lwm2m_send_status {
LWM2M_SEND_STATUS_SUCCESS, /**< Succeed */
LWM2M_SEND_STATUS_FAILURE, /**< Failure */
LWM2M_SEND_STATUS_TIMEOUT, /**< Timeout */
};
/**
* @typedef lwm2m_send_cb_t
* @brief Callback returning send status
*/
typedef void (*lwm2m_send_cb_t)(enum lwm2m_send_status status);
/**
* @briefLwM2MSENDoperation to given path list asynchronously with confirmation callback
*
* @param ctx LwM2M context
* @param path_list LwM2M path struct list
* @param path_list_size Length of path list. Max size is CONFIG_LWM2M_COMPOSITE_PATH_LIST_SIZE
* @param reply_cb Callback triggered with confirmation state or NULL if not used
*
*@return 0 for success or negative in case of error.
*
*/
int lwm2m_send_cb(struct lwm2m_ctx *ctx, const struct lwm2m_obj_path path_list[],
uint8_t path_list_size, lwm2m_send_cb_t reply_cb);
/**
* @briefReturns LwM2Mclient context
*
* @return ctx LwM2M context
*
*/
struct lwm2m_ctx *lwm2m_rd_client_ctx(void);
/**
* @briefEnable data cache for a resource.
*
* Application may enable caching of resource data by allocating buffer for LwM2M engine to use.
* Buffer must be size of struct @ref lwm2m_time_series_elem times cache_len
*
* @param path LwM2M path to resource as a struct
* @param data_cache Pointer to Data cache array
* @param cache_len number of cached entries
*
*@return 0 for success or negative in case of error.
*
*/
int lwm2m_enable_cache(const struct lwm2m_obj_path *path, struct lwm2m_time_series_elem *data_cache,
size_t cache_len);
/**
* @brief Security modes as defined in LwM2M Security object.
*/
enum lwm2m_security_mode_e {
LWM2M_SECURITY_PSK = 0, /**< Pre-Shared Key mode */
LWM2M_SECURITY_RAW_PK = 1, /**< Raw Public Key mode */
LWM2M_SECURITY_CERT = 2, /**< Certificate mode */
LWM2M_SECURITY_NOSEC = 3, /**< NoSec mode */
LWM2M_SECURITY_CERT_EST = 4, /**< Certificate mode with EST */
};
/**
* @brief Read security mode from selected security object instance.
*
* This data is valid only if RD client is running.
*
* @param ctx Pointer to client context.
* @return int Positive values are @ref lwm2m_security_mode_e, negative error codes otherwise.
*/
int lwm2m_security_mode(struct lwm2m_ctx *ctx);
/**
* @brief Set default socket options for DTLS connections.
*
* The engine calls this when @ref lwm2m_ctx::set_socketoptions is not overwritten.
* You can call this from the overwritten callback to set extra options after or
* before defaults.
*
* @param ctx Client context
* @return 0 for success or negative in case of error.
*/
int lwm2m_set_default_sockopt(struct lwm2m_ctx *ctx);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_LWM2M_H_ */
/**@} */
``` | /content/code_sandbox/include/zephyr/net/lwm2m.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 13,912 |
```objective-c
/*
*
*/
/**
* @file
* @brief Socket utility functions.
*/
#ifndef ZEPHYR_INCLUDE_NET_SOCKETUTILS_H_
#define ZEPHYR_INCLUDE_NET_SOCKETUTILS_H_
#include <zephyr/net/socket.h>
/**
* @brief Find port in addr:port string.
*
* @param addr_str String of addr[:port] format
*
* @return Pointer to "port" part, or NULL is none.
*/
const char *net_addr_str_find_port(const char *addr_str);
/**
* @brief Call getaddrinfo() on addr:port string
*
* Convenience function to split addr[:port] string into address vs port
* components (or use default port number), and call getaddrinfo() on the
* result.
*
* @param addr_str String of addr[:port] format
* @param def_port Default port number to use if addr_str doesn't contain it
* @param hints getaddrinfo() hints
* @param res Result of getaddrinfo() (freeaddrinfo() should be called on it
* as usual.
*
* @return Result of getaddrinfo() call.
*/
int net_getaddrinfo_addr_str(const char *addr_str, const char *def_port,
const struct zsock_addrinfo *hints,
struct zsock_addrinfo **res);
#endif /* ZEPHYR_INCLUDE_NET_SOCKETUTILS_H_ */
``` | /content/code_sandbox/include/zephyr/net/socketutils.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 292 |
```objective-c
/*
*
*/
/** @file
* @brief IPv4 Autoconfiguration
*/
#ifndef ZEPHYR_INCLUDE_NET_IPV4_AUTOCONF_H_
#define ZEPHYR_INCLUDE_NET_IPV4_AUTOCONF_H_
/** Current state of IPv4 Autoconfiguration */
enum net_ipv4_autoconf_state {
NET_IPV4_AUTOCONF_INIT, /**< Initialization state */
NET_IPV4_AUTOCONF_ASSIGNED, /**< Assigned state */
NET_IPV4_AUTOCONF_RENEW, /**< Renew state */
};
struct net_if;
/**
* @brief Start IPv4 autoconfiguration RFC 3927: IPv4 Link Local
*
* @details Start IPv4 IP autoconfiguration
*
* @param iface A valid pointer on an interface
*/
#if defined(CONFIG_NET_IPV4_AUTO)
void net_ipv4_autoconf_start(struct net_if *iface);
#else
static inline void net_ipv4_autoconf_start(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif
/**
* @brief Reset autoconf process
*
* @details Reset IPv4 IP autoconfiguration
*
* @param iface A valid pointer on an interface
*/
#if defined(CONFIG_NET_IPV4_AUTO)
void net_ipv4_autoconf_reset(struct net_if *iface);
#else
static inline void net_ipv4_autoconf_reset(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif
/** @cond INTERNAL_HIDDEN */
/**
* @brief Initialize IPv4 auto configuration engine.
*/
#if defined(CONFIG_NET_IPV4_AUTO)
void net_ipv4_autoconf_init(void);
#else
static inline void net_ipv4_autoconf_init(void) { }
#endif
/** @endcond */
#endif /* ZEPHYR_INCLUDE_NET_IPV4_AUTOCONF_H_ */
``` | /content/code_sandbox/include/zephyr/net/ipv4_autoconf.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 349 |
```objective-c
/*
*
*/
/**
* @file
*
* @brief CoAP implementation for Zephyr.
*/
#ifndef ZEPHYR_INCLUDE_NET_COAP_LINK_FORMAT_H_
#define ZEPHYR_INCLUDE_NET_COAP_LINK_FORMAT_H_
/**
* @addtogroup coap COAP Library
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* This resource should be added before all other resources that should be
* included in the responses of the .well-known/core resource if is to be used with
* coap_well_known_core_get.
*/
#define COAP_WELL_KNOWN_CORE_PATH \
((const char * const[]) { ".well-known", "core", NULL })
/**
* @brief Build a CoAP response for a .well-known/core CoAP request.
*
* @param resource Array of known resources, terminated with an empty resource
* @param request A pointer to the .well-known/core CoAP request
* @param response A pointer to a CoAP response, will be initialized
* @param data A data pointer to be used to build the CoAP response
* @param data_len The maximum length of the data buffer
*
* @return 0 in case of success or negative in case of error.
*/
int coap_well_known_core_get(struct coap_resource *resource,
const struct coap_packet *request,
struct coap_packet *response,
uint8_t *data, uint16_t data_len);
/**
* @brief Build a CoAP response for a .well-known/core CoAP request.
*
* @param resources Array of known resources
* @param resources_len Number of resources in the array
* @param request A pointer to the .well-known/core CoAP request
* @param response A pointer to a CoAP response, will be initialized
* @param data A data pointer to be used to build the CoAP response
* @param data_len The maximum length of the data buffer
*
* @return 0 in case of success or negative in case of error.
*/
int coap_well_known_core_get_len(struct coap_resource *resources,
size_t resources_len,
const struct coap_packet *request,
struct coap_packet *response,
uint8_t *data, uint16_t data_len);
/**
* In case you want to add attributes to the resources included in the
* 'well-known/core' "virtual" resource, the 'user_data' field should point
* to a valid coap_core_metadata structure.
*/
struct coap_core_metadata {
/** List of attributes to add */
const char * const *attributes;
/** User specific data */
void *user_data;
};
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_COAP_LINK_FORMAT_H_ */
``` | /content/code_sandbox/include/zephyr/net/coap_link_format.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 584 |
```objective-c
/** @file
* @brief Ethernet Bridge public header file
*
* Ethernet Bridges connect two or more Ethernet networks together and
* transparently forward packets from one network to the others as if
* they were part of the same network.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_ETHERNET_BRIDGE_H_
#define ZEPHYR_INCLUDE_NET_ETHERNET_BRIDGE_H_
#include <zephyr/sys/slist.h>
#include <zephyr/sys/iterable_sections.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Ethernet Bridging API
* @defgroup eth_bridge Ethernet Bridging API
* @since 2.7
* @version 0.8.0
* @ingroup networking
* @{
*/
/** @cond INTERNAL_HIDDEN */
struct eth_bridge {
struct k_mutex lock;
sys_slist_t interfaces;
sys_slist_t listeners;
bool initialized;
};
#define ETH_BRIDGE_INITIALIZER(obj) \
{ \
.lock = { }, \
.interfaces = SYS_SLIST_STATIC_INIT(&obj.interfaces), \
.listeners = SYS_SLIST_STATIC_INIT(&obj.listeners), \
}
/** @endcond */
/**
* @brief Statically define and initialize a bridge instance.
*
* @param name Name of the bridge object
*/
#define ETH_BRIDGE_INIT(name) \
STRUCT_SECTION_ITERABLE(eth_bridge, name) = \
ETH_BRIDGE_INITIALIZER(name)
/** @cond INTERNAL_HIDDEN */
struct eth_bridge_iface_context {
sys_snode_t node;
struct eth_bridge *instance;
bool allow_tx;
};
struct eth_bridge_listener {
sys_snode_t node;
struct k_fifo pkt_queue;
};
/** @endcond */
/**
* @brief Add an Ethernet network interface to a bridge
*
* This adds a network interface to a bridge. The interface is then put
* into promiscuous mode, all packets received by this interface are sent
* to the bridge, and any other packets sent to the bridge (with some
* exceptions) are transmitted via this interface.
*
* For transmission from the bridge to occur via this interface, it is
* necessary to enable TX mode with eth_bridge_iface_tx(). TX mode is
* initially disabled.
*
* Once an interface is added to a bridge, all its incoming traffic is
* diverted to the bridge. However, packets sent out with net_if_queue_tx()
* via this interface are not subjected to the bridge.
*
* @param br A pointer to an initialized bridge object
* @param iface Interface to add
*
* @return 0 if OK, negative error code otherwise.
*/
int eth_bridge_iface_add(struct eth_bridge *br, struct net_if *iface);
/**
* @brief Remove an Ethernet network interface from a bridge
*
* @param br A pointer to an initialized bridge object
* @param iface Interface to remove
*
* @return 0 if OK, negative error code otherwise.
*/
int eth_bridge_iface_remove(struct eth_bridge *br, struct net_if *iface);
/**
* @brief Enable/disable transmission mode for a bridged interface
*
* When TX mode is off, the interface may receive packets and send them to
* the bridge but no packets coming from the bridge will be sent through this
* interface. When TX mode is on, both incoming and outgoing packets are
* allowed.
*
* @param iface Interface to configure
* @param allow true to activate TX mode, false otherwise
*
* @return 0 if OK, negative error code otherwise.
*/
int eth_bridge_iface_allow_tx(struct net_if *iface, bool allow);
/**
* @brief Add (register) a listener to the bridge
*
* This lets a software listener register a pointer to a provided FIFO for
* receiving packets sent to the bridge. The listener is responsible for
* emptying the FIFO with k_fifo_get() which will return a struct net_pkt
* pointer, and releasing the packet with net_pkt_unref() when done with it.
*
* The listener wishing not to receive any more packets should simply
* unregister itself with eth_bridge_listener_remove().
*
* @param br A pointer to an initialized bridge object
* @param l A pointer to an initialized listener instance.
*
* @return 0 if OK, negative error code otherwise.
*/
int eth_bridge_listener_add(struct eth_bridge *br, struct eth_bridge_listener *l);
/**
* @brief Remove (unregister) a listener from the bridge
*
* @param br A pointer to an initialized bridge object
* @param l A pointer to the listener instance to be removed.
*
* @return 0 if OK, negative error code otherwise.
*/
int eth_bridge_listener_remove(struct eth_bridge *br, struct eth_bridge_listener *l);
/**
* @brief Get bridge index according to pointer
*
* @param br Pointer to bridge instance
*
* @return Bridge index
*/
int eth_bridge_get_index(struct eth_bridge *br);
/**
* @brief Get bridge instance according to index
*
* @param index Bridge instance index
*
* @return Pointer to bridge instance or NULL if not found.
*/
struct eth_bridge *eth_bridge_get_by_index(int index);
/**
* @typedef eth_bridge_cb_t
* @brief Callback used while iterating over bridge instances
*
* @param br Pointer to bridge instance
* @param user_data User supplied data
*/
typedef void (*eth_bridge_cb_t)(struct eth_bridge *br, void *user_data);
/**
* @brief Go through all the bridge instances in order to get
* information about them. This is mainly useful in
* net-shell to print data about currently active bridges.
*
* @param cb Callback to call for each bridge instance
* @param user_data User supplied data
*/
void net_eth_bridge_foreach(eth_bridge_cb_t cb, void *user_data);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_ETHERNET_BRIDGE_H_ */
``` | /content/code_sandbox/include/zephyr/net/ethernet_bridge.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,234 |
```objective-c
/*
*
*/
/**
* @file
* @brief IEEE 802.15.4 MAC information element (IE) related types and helpers
*
* This is not to be included by the application. This file contains only those
* parts of the types required for IE support that need to be visible to IEEE
* 802.15.4 drivers and L2 at the same time, i.e. everything related to header
* IE representation, parsing and generation.
*
* All specification references in this file refer to IEEE 802.15.4-2020.
*
* @note All structs and attributes in this file that directly represent parts
* of IEEE 802.15.4 frames are in LITTLE ENDIAN, see section 4, especially
* section 4.3.
*/
#ifndef ZEPHYR_INCLUDE_NET_IEEE802154_IE_H_
#define ZEPHYR_INCLUDE_NET_IEEE802154_IE_H_
#include <zephyr/net/buf.h>
#include <zephyr/sys/byteorder.h>
/**
* @addtogroup ieee802154_driver
* @{
*
* @name IEEE 802.15.4, section 7.4.2: MAC header information elements
* @{
*/
/**
* @brief Information Element Types.
*
* @details See sections 7.4.2.1 and 7.4.3.1.
*/
enum ieee802154_ie_type {
IEEE802154_IE_TYPE_HEADER = 0x0, /**< Header type */
IEEE802154_IE_TYPE_PAYLOAD, /**< Payload type */
};
/**
* @brief Header Information Element IDs.
*
* @details See section 7.4.2.1, table 7-7, partial list, only IEs actually used
* are implemented.
*/
enum ieee802154_header_ie_element_id {
IEEE802154_HEADER_IE_ELEMENT_ID_VENDOR_SPECIFIC_IE = 0x00, /**< Vendor specific IE */
IEEE802154_HEADER_IE_ELEMENT_ID_CSL_IE = 0x1a, /**< CSL IE */
IEEE802154_HEADER_IE_ELEMENT_ID_RIT_IE = 0x1b, /**< RIT IE */
IEEE802154_HEADER_IE_ELEMENT_ID_RENDEZVOUS_TIME_IE = 0x1d, /**< Rendezvous time IE */
IEEE802154_HEADER_IE_ELEMENT_ID_TIME_CORRECTION_IE = 0x1e, /**< Time correction IE */
IEEE802154_HEADER_IE_ELEMENT_ID_HEADER_TERMINATION_1 = 0x7e, /**< Header termination 1 */
IEEE802154_HEADER_IE_ELEMENT_ID_HEADER_TERMINATION_2 = 0x7f, /**< Header termination 2 */
/* partial list, add additional ids as needed */
};
/** @cond INTERNAL_HIDDEN */
#define IEEE802154_VENDOR_SPECIFIC_IE_OUI_LEN 3
/** INTERNAL_HIDDEN @endcond */
/** @brief Vendor Specific Header IE, see section 7.4.2.3. */
struct ieee802154_header_ie_vendor_specific {
/** Vendor OUI */
uint8_t vendor_oui[IEEE802154_VENDOR_SPECIFIC_IE_OUI_LEN];
/** Vendor specific information */
uint8_t *vendor_specific_info;
} __packed;
/** @brief Full CSL IE, see section 7.4.2.3. */
struct ieee802154_header_ie_csl_full {
uint16_t csl_phase; /**< CSL phase */
uint16_t csl_period; /**< CSL period */
uint16_t csl_rendezvous_time; /**< Rendezvous time */
} __packed;
/** @brief Reduced CSL IE, see section 7.4.2.3. */
struct ieee802154_header_ie_csl_reduced {
uint16_t csl_phase; /**< CSL phase */
uint16_t csl_period; /**< CSL period */
} __packed;
/** @brief Generic CSL IE, see section 7.4.2.3. */
struct ieee802154_header_ie_csl {
union {
/** CSL full information */
struct ieee802154_header_ie_csl_full full;
/** CSL reduced information */
struct ieee802154_header_ie_csl_reduced reduced;
};
} __packed;
/** @brief RIT IE, see section 7.4.2.4. */
struct ieee802154_header_ie_rit {
uint8_t time_to_first_listen; /**< Time to First Listen */
uint8_t number_of_repeat_listen; /**< Number of Repeat Listen */
uint16_t repeat_listen_interval; /**< Repeat listen interval */
} __packed;
/**
* @brief Full Rendezvous Time IE, see section 7.4.2.6
* (macCslInterval is nonzero).
*/
struct ieee802154_header_ie_rendezvous_time_full {
uint16_t rendezvous_time; /**< Rendezvous time */
uint16_t wakeup_interval; /**< Wakeup interval */
} __packed;
/**
* @brief Reduced Rendezvous Time IE, see section 7.4.2.6
* (macCslInterval is zero).
*/
struct ieee802154_header_ie_rendezvous_time_reduced {
uint16_t rendezvous_time; /**< Rendezvous time */
} __packed;
/** @brief Rendezvous Time IE, see section 7.4.2.6. */
struct ieee802154_header_ie_rendezvous_time {
union {
/** Rendezvous time full information */
struct ieee802154_header_ie_rendezvous_time_full full;
/** Rendezvous time reduced information */
struct ieee802154_header_ie_rendezvous_time_reduced reduced;
};
} __packed;
/** @brief Time Correction IE, see section 7.4.2.7. */
struct ieee802154_header_ie_time_correction {
uint16_t time_sync_info; /**< Time synchronization information */
} __packed;
/** @cond INTERNAL_HIDDEN */
/* @brief Generic Header IE, see section 7.4.2.1. */
struct ieee802154_header_ie {
#if CONFIG_LITTLE_ENDIAN
uint16_t length : 7;
uint16_t element_id_low : 1; /* see enum ieee802154_header_ie_element_id */
uint16_t element_id_high : 7;
uint16_t type : 1; /* always 0 */
#else
uint16_t element_id_low : 1; /* see enum ieee802154_header_ie_element_id */
uint16_t length : 7;
uint16_t type : 1; /* always 0 */
uint16_t element_id_high : 7;
#endif
union {
struct ieee802154_header_ie_vendor_specific vendor_specific;
struct ieee802154_header_ie_csl csl;
struct ieee802154_header_ie_rit rit;
struct ieee802154_header_ie_rendezvous_time rendezvous_time;
struct ieee802154_header_ie_time_correction time_correction;
/* add additional supported header IEs here */
} content;
} __packed;
/** INTERNAL_HIDDEN @endcond */
/** @brief The header IE's header length (2 bytes). */
#define IEEE802154_HEADER_IE_HEADER_LENGTH sizeof(uint16_t)
/** @cond INTERNAL_HIDDEN */
#define IEEE802154_DEFINE_HEADER_IE(_element_id, _length, _content, _content_type) \
(struct ieee802154_header_ie) { \
.length = (_length), \
.element_id_high = (_element_id) >> 1U, .element_id_low = (_element_id) & 0x01, \
.type = IEEE802154_IE_TYPE_HEADER, \
.content._content_type = _content, \
}
#define IEEE802154_DEFINE_HEADER_IE_VENDOR_SPECIFIC_CONTENT_LEN(_vendor_specific_info_len) \
(IEEE802154_VENDOR_SPECIFIC_IE_OUI_LEN + (_vendor_specific_info_len))
#define IEEE802154_DEFINE_HEADER_IE_VENDOR_SPECIFIC_CONTENT(_vendor_oui, _vendor_specific_info) \
(struct ieee802154_header_ie_vendor_specific) { \
.vendor_oui = _vendor_oui, .vendor_specific_info = (_vendor_specific_info), \
}
#define IEEE802154_DEFINE_HEADER_IE_CSL_REDUCED_CONTENT(_csl_phase, _csl_period) \
(struct ieee802154_header_ie_csl_reduced) { \
.csl_phase = sys_cpu_to_le16(_csl_phase), \
.csl_period = sys_cpu_to_le16(_csl_period), \
}
#define IEEE802154_DEFINE_HEADER_IE_CSL_FULL_CONTENT(_csl_phase, _csl_period, \
_csl_rendezvous_time) \
(struct ieee802154_header_ie_csl_full) { \
.csl_phase = sys_cpu_to_le16(_csl_phase), \
.csl_period = sys_cpu_to_le16(_csl_period), \
.csl_rendezvous_time = sys_cpu_to_le16(_csl_rendezvous_time), \
}
#define IEEE802154_HEADER_IE_TIME_CORRECTION_NACK 0x8000
#define IEEE802154_HEADER_IE_TIME_CORRECTION_MASK 0x0fff
#define IEEE802154_HEADER_IE_TIME_CORRECTION_SIGN_BIT_MASK 0x0800
#define IEEE802154_DEFINE_HEADER_IE_TIME_CORRECTION_CONTENT(_ack, _time_correction_us) \
(struct ieee802154_header_ie_time_correction) { \
.time_sync_info = sys_cpu_to_le16( \
(!(_ack) * IEEE802154_HEADER_IE_TIME_CORRECTION_NACK) | \
((_time_correction_us) & IEEE802154_HEADER_IE_TIME_CORRECTION_MASK)), \
}
/** INTERNAL_HIDDEN @endcond */
/**
* @brief Define a vendor specific header IE, see section 7.4.2.3.
*
* @details Example usage (all parameters in little endian):
*
* @code{.c}
* uint8_t vendor_specific_info[] = {...some vendor specific IE content...};
* struct ieee802154_header_ie header_ie = IEEE802154_DEFINE_HEADER_IE_VENDOR_SPECIFIC(
* {0x9b, 0xb8, 0xea}, vendor_specific_info, sizeof(vendor_specific_info));
* @endcode
*
* @param _vendor_oui an initializer for a 3 byte vendor oui array in little
* endian
* @param _vendor_specific_info pointer to a variable length uint8_t array with
* the vendor specific IE content
* @param _vendor_specific_info_len the length of the vendor specific IE content
* (in bytes)
*/
#define IEEE802154_DEFINE_HEADER_IE_VENDOR_SPECIFIC(_vendor_oui, _vendor_specific_info, \
_vendor_specific_info_len) \
IEEE802154_DEFINE_HEADER_IE(IEEE802154_HEADER_IE_ELEMENT_ID_VENDOR_SPECIFIC_IE, \
IEEE802154_DEFINE_HEADER_IE_VENDOR_SPECIFIC_CONTENT_LEN( \
_vendor_specific_info_len), \
IEEE802154_DEFINE_HEADER_IE_VENDOR_SPECIFIC_CONTENT( \
_vendor_oui, _vendor_specific_info), \
vendor_specific)
/**
* @brief Define a reduced CSL IE, see section 7.4.2.3.
*
* @details Example usage (all parameters in CPU byte order):
*
* @code{.c}
* uint16_t csl_phase = ...;
* uint16_t csl_period = ...;
* struct ieee802154_header_ie header_ie =
* IEEE802154_DEFINE_HEADER_IE_CSL_REDUCED(csl_phase, csl_period);
* @endcode
*
* @param _csl_phase CSL phase in CPU byte order
* @param _csl_period CSL period in CPU byte order
*/
#define IEEE802154_DEFINE_HEADER_IE_CSL_REDUCED(_csl_phase, _csl_period) \
IEEE802154_DEFINE_HEADER_IE( \
IEEE802154_HEADER_IE_ELEMENT_ID_CSL_IE, \
sizeof(struct ieee802154_header_ie_csl_reduced), \
IEEE802154_DEFINE_HEADER_IE_CSL_REDUCED_CONTENT(_csl_phase, _csl_period), \
csl.reduced)
/**
* @brief Define a full CSL IE, see section 7.4.2.3.
*
* @details Example usage (all parameters in CPU byte order):
*
* @code{.c}
* uint16_t csl_phase = ...;
* uint16_t csl_period = ...;
* uint16_t csl_rendezvous_time = ...;
* struct ieee802154_header_ie header_ie =
* IEEE802154_DEFINE_HEADER_IE_CSL_REDUCED(csl_phase, csl_period, csl_rendezvous_time);
* @endcode
*
* @param _csl_phase CSL phase in CPU byte order
* @param _csl_period CSL period in CPU byte order
* @param _csl_rendezvous_time CSL rendezvous time in CPU byte order
*/
#define IEEE802154_DEFINE_HEADER_IE_CSL_FULL(_csl_phase, _csl_period, _csl_rendezvous_time) \
IEEE802154_DEFINE_HEADER_IE(IEEE802154_HEADER_IE_ELEMENT_ID_CSL_IE, \
sizeof(struct ieee802154_header_ie_csl_full), \
IEEE802154_DEFINE_HEADER_IE_CSL_FULL_CONTENT( \
_csl_phase, _csl_period, _csl_rendezvous_time), \
csl.full)
/**
* @brief Define a Time Correction IE, see section 7.4.2.7.
*
* @details Example usage (parameter in CPU byte order):
*
* @code{.c}
* uint16_t time_sync_info = ...;
* struct ieee802154_header_ie header_ie =
* IEEE802154_DEFINE_HEADER_IE_TIME_CORRECTION(true, time_sync_info);
* @endcode
*
* @param _ack whether or not the enhanced ACK frame that receives this IE is an
* ACK (true) or NACK (false)
* @param _time_correction_us the positive or negative deviation from expected
* RX time in microseconds
*/
#define IEEE802154_DEFINE_HEADER_IE_TIME_CORRECTION(_ack, _time_correction_us) \
IEEE802154_DEFINE_HEADER_IE( \
IEEE802154_HEADER_IE_ELEMENT_ID_TIME_CORRECTION_IE, \
sizeof(struct ieee802154_header_ie_time_correction), \
IEEE802154_DEFINE_HEADER_IE_TIME_CORRECTION_CONTENT(_ack, _time_correction_us), \
time_correction)
/**
* @brief Retrieve the time correction value in microseconds from a Time Correction IE,
* see section 7.4.2.7.
*
* @param[in] ie pointer to the Time Correction IE structure
*
* @return The time correction value in microseconds.
*/
static inline int16_t
ieee802154_header_ie_get_time_correction_us(struct ieee802154_header_ie_time_correction *ie)
{
if (ie->time_sync_info & IEEE802154_HEADER_IE_TIME_CORRECTION_SIGN_BIT_MASK) {
/* Negative integer */
return (int16_t)ie->time_sync_info | ~IEEE802154_HEADER_IE_TIME_CORRECTION_MASK;
}
/* Positive integer */
return (int16_t)ie->time_sync_info & IEEE802154_HEADER_IE_TIME_CORRECTION_MASK;
}
/**
* @brief Set the element ID of a header IE.
*
* @param[in] ie pointer to a header IE
* @param[in] element_id IE element id in CPU byte order
*/
static inline void ieee802154_header_ie_set_element_id(struct ieee802154_header_ie *ie,
uint8_t element_id)
{
ie->element_id_high = element_id >> 1U;
ie->element_id_low = element_id & 0x01;
}
/**
* @brief Get the element ID of a header IE.
*
* @param[in] ie pointer to a header IE
*
* @return header IE element id in CPU byte order
*/
static inline uint8_t ieee802154_header_ie_get_element_id(struct ieee802154_header_ie *ie)
{
return (ie->element_id_high << 1U) | ie->element_id_low;
}
/** @brief The length in bytes of a "Time Correction" header IE. */
#define IEEE802154_TIME_CORRECTION_HEADER_IE_LEN \
(IEEE802154_HEADER_IE_HEADER_LENGTH + sizeof(struct ieee802154_header_ie_time_correction))
/** @brief The length in bytes of a "Header Termination 1" header IE. */
#define IEEE802154_HEADER_TERMINATION_1_HEADER_IE_LEN IEEE802154_HEADER_IE_HEADER_LENGTH
/**
* @}
*
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_IEEE802154_IE_H_ */
``` | /content/code_sandbox/include/zephyr/net/ieee802154_ie.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,534 |
```objective-c
/*
*
*/
/**
* @file
* @brief Packet data common to all IEEE 802.15.4 L2 layers
*
* All references to the spec refer to IEEE 802.15.4-2020.
*/
#ifndef ZEPHYR_INCLUDE_NET_IEEE802154_PKT_H_
#define ZEPHYR_INCLUDE_NET_IEEE802154_PKT_H_
#include <string.h>
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
/** @cond ignore */
#ifndef NET_PKT_HAS_CONTROL_BLOCK
#define NET_PKT_HAS_CONTROL_BLOCK
#endif
/* See section 6.16.2.8 - Received Signal Strength Indicator (RSSI) */
#define IEEE802154_MAC_RSSI_MIN 0U /* corresponds to -174 dBm */
#define IEEE802154_MAC_RSSI_MAX 254U /* corresponds to 80 dBm */
#define IEEE802154_MAC_RSSI_UNDEFINED 255U /* used by us to indicate an undefined RSSI value */
#define IEEE802154_MAC_RSSI_DBM_MIN -174 /* in dBm */
#define IEEE802154_MAC_RSSI_DBM_MAX 80 /* in dBm */
#define IEEE802154_MAC_RSSI_DBM_UNDEFINED INT16_MIN /* represents an undefined RSSI value */
struct net_pkt_cb_ieee802154 {
#if defined(CONFIG_NET_L2_OPENTHREAD)
uint32_t ack_fc; /* Frame counter set in the ACK */
uint8_t ack_keyid; /* Key index set in the ACK */
#endif
union {
/* RX packets */
struct {
uint8_t lqi; /* Link Quality Indicator */
/* See section 6.16.2.8 - Received Signal Strength Indicator (RSSI)
* "RSSI is represented as one octet of integer [...]; therefore,
* the minimum and maximum values are 0 (174 dBm) and 254 (80 dBm),
* respectively. 255 is reserved." (MAC PIB attribute macRssi, see
* section 8.4.3.10, table 8-108)
*
* TX packets will show zero for this value. Drivers may set the
* field to the reserved value 255 (0xff) to indicate that an RSSI
* value is not available for this packet.
*/
uint8_t rssi;
};
};
/* Flags */
uint8_t ack_fpb : 1; /* Frame Pending Bit was set in the ACK */
uint8_t frame_secured : 1; /* Frame is authenticated and
* encrypted according to its
* Auxiliary Security Header
*/
uint8_t mac_hdr_rdy : 1; /* Indicates if frame's MAC header
* is ready to be transmitted or if
* it requires further modifications,
* e.g. Frame Counter injection.
*/
#if defined(CONFIG_NET_L2_OPENTHREAD)
uint8_t ack_seb : 1; /* Security Enabled Bit was set in the ACK */
#endif
};
struct net_pkt;
static inline void *net_pkt_cb(struct net_pkt *pkt);
static inline struct net_pkt_cb_ieee802154 *net_pkt_cb_ieee802154(struct net_pkt *pkt)
{
return (struct net_pkt_cb_ieee802154 *)net_pkt_cb(pkt);
};
static inline uint8_t net_pkt_ieee802154_lqi(struct net_pkt *pkt)
{
return net_pkt_cb_ieee802154(pkt)->lqi;
}
static inline void net_pkt_set_ieee802154_lqi(struct net_pkt *pkt, uint8_t lqi)
{
net_pkt_cb_ieee802154(pkt)->lqi = lqi;
}
/**
* @brief Get the unsigned RSSI value as defined in section 6.16.2.8,
* Received Signal Strength Indicator (RSSI)
*
* @param pkt Pointer to the packet.
*
* @returns RSSI represented as unsigned byte value, ranging from
* 0 (174 dBm) to 254 (80 dBm).
* The special value 255 (IEEE802154_MAC_RSSI_UNDEFINED)
* indicates that an RSSI value is not available for this
* packet. Will return zero for packets on the TX path.
*/
static inline uint8_t net_pkt_ieee802154_rssi(struct net_pkt *pkt)
{
return net_pkt_cb_ieee802154(pkt)->rssi;
}
/**
* @brief Set the unsigned RSSI value as defined in section 6.16.2.8,
* Received Signal Strength Indicator (RSSI).
*
* @param pkt Pointer to the packet that was received with the given
* RSSI.
* @param rssi RSSI represented as unsigned byte value, ranging from
* 0 (174 dBm) to 254 (80 dBm).
* The special value 255 (IEEE802154_MAC_RSSI_UNDEFINED)
* indicates that an RSSI value is not available for this
* packet.
*/
static inline void net_pkt_set_ieee802154_rssi(struct net_pkt *pkt, uint8_t rssi)
{
net_pkt_cb_ieee802154(pkt)->rssi = rssi;
}
/**
* @brief Get a signed RSSI value measured in dBm.
*
* @param pkt Pointer to the packet.
*
* @returns RSSI represented in dBm. Returns the special value
* IEEE802154_MAC_RSSI_DBM_UNDEFINED if an RSSI value
* is not available for this packet. Packets on the TX
* path will always show -174 dBm (which corresponds to
* an internal value of unsigned zero).
*/
static inline int16_t net_pkt_ieee802154_rssi_dbm(struct net_pkt *pkt)
{
int16_t rssi = net_pkt_cb_ieee802154(pkt)->rssi;
return rssi == IEEE802154_MAC_RSSI_UNDEFINED ? IEEE802154_MAC_RSSI_DBM_UNDEFINED
: rssi + IEEE802154_MAC_RSSI_DBM_MIN;
}
/**
* @brief Set the RSSI value as a signed integer measured in dBm.
*
* @param pkt Pointer to the packet that was received with the given
* RSSI.
* @param rssi represented in dBm. Set to the special value
* IEEE802154_MAC_RSSI_DBM_UNDEFINED if an RSSI value is
* not available for this packet. Values above 80 dBm will
* be mapped to 80 dBm, values below -174 dBm will be mapped
* to -174 dBm.
*/
static inline void net_pkt_set_ieee802154_rssi_dbm(struct net_pkt *pkt, int16_t rssi)
{
if (likely(rssi >= IEEE802154_MAC_RSSI_DBM_MIN && rssi <= IEEE802154_MAC_RSSI_DBM_MAX)) {
int16_t unsigned_rssi = rssi - IEEE802154_MAC_RSSI_DBM_MIN;
net_pkt_cb_ieee802154(pkt)->rssi = unsigned_rssi;
return;
} else if (rssi == IEEE802154_MAC_RSSI_DBM_UNDEFINED) {
net_pkt_cb_ieee802154(pkt)->rssi = IEEE802154_MAC_RSSI_UNDEFINED;
return;
} else if (rssi < IEEE802154_MAC_RSSI_DBM_MIN) {
net_pkt_cb_ieee802154(pkt)->rssi = IEEE802154_MAC_RSSI_MIN;
return;
} else if (rssi > IEEE802154_MAC_RSSI_DBM_MAX) {
net_pkt_cb_ieee802154(pkt)->rssi = IEEE802154_MAC_RSSI_MAX;
return;
}
CODE_UNREACHABLE;
}
static inline bool net_pkt_ieee802154_ack_fpb(struct net_pkt *pkt)
{
return net_pkt_cb_ieee802154(pkt)->ack_fpb;
}
static inline void net_pkt_set_ieee802154_ack_fpb(struct net_pkt *pkt, bool fpb)
{
net_pkt_cb_ieee802154(pkt)->ack_fpb = fpb;
}
static inline bool net_pkt_ieee802154_frame_secured(struct net_pkt *pkt)
{
return net_pkt_cb_ieee802154(pkt)->frame_secured;
}
static inline void net_pkt_set_ieee802154_frame_secured(struct net_pkt *pkt, bool secured)
{
net_pkt_cb_ieee802154(pkt)->frame_secured = secured;
}
static inline bool net_pkt_ieee802154_mac_hdr_rdy(struct net_pkt *pkt)
{
return net_pkt_cb_ieee802154(pkt)->mac_hdr_rdy;
}
static inline void net_pkt_set_ieee802154_mac_hdr_rdy(struct net_pkt *pkt, bool rdy)
{
net_pkt_cb_ieee802154(pkt)->mac_hdr_rdy = rdy;
}
#if defined(CONFIG_NET_L2_OPENTHREAD)
static inline uint32_t net_pkt_ieee802154_ack_fc(struct net_pkt *pkt)
{
return net_pkt_cb_ieee802154(pkt)->ack_fc;
}
static inline void net_pkt_set_ieee802154_ack_fc(struct net_pkt *pkt, uint32_t fc)
{
net_pkt_cb_ieee802154(pkt)->ack_fc = fc;
}
static inline uint8_t net_pkt_ieee802154_ack_keyid(struct net_pkt *pkt)
{
return net_pkt_cb_ieee802154(pkt)->ack_keyid;
}
static inline void net_pkt_set_ieee802154_ack_keyid(struct net_pkt *pkt, uint8_t keyid)
{
net_pkt_cb_ieee802154(pkt)->ack_keyid = keyid;
}
static inline bool net_pkt_ieee802154_ack_seb(struct net_pkt *pkt)
{
return net_pkt_cb_ieee802154(pkt)->ack_seb;
}
static inline void net_pkt_set_ieee802154_ack_seb(struct net_pkt *pkt, bool seb)
{
net_pkt_cb_ieee802154(pkt)->ack_seb = seb;
}
#endif /* CONFIG_NET_L2_OPENTHREAD */
/** @endcond */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_IEEE802154_PKT_H_ */
``` | /content/code_sandbox/include/zephyr/net/ieee802154_pkt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,126 |
```objective-c
/** @file
* @brief SocketCAN definitions.
*
* Definitions for SocketCAN support.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_SOCKETCAN_H_
#define ZEPHYR_INCLUDE_NET_SOCKETCAN_H_
#include <zephyr/types.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/net_if.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief SocketCAN library
* @defgroup socket_can SocketCAN library
* @since 1.14
* @version 0.8.0
* @ingroup networking
* @{
*/
/** Protocols of the protocol family PF_CAN */
#define CAN_RAW 1
/** @cond INTERNAL_HIDDEN */
/* SocketCAN options */
#define SOL_CAN_BASE 100
#define SOL_CAN_RAW (SOL_CAN_BASE + CAN_RAW)
enum {
CAN_RAW_FILTER = 1,
};
/** @endcond */
/* SocketCAN MTU size compatible with Linux */
#ifdef CONFIG_CAN_FD_MODE
/** SocketCAN max data length */
#define SOCKETCAN_MAX_DLEN 64U
/** CAN FD frame MTU */
#define CANFD_MTU (sizeof(struct socketcan_frame))
/** CAN frame MTU */
#define CAN_MTU (CANFD_MTU - 56U)
#else /* CONFIG_CAN_FD_MODE */
/** SocketCAN max data length */
#define SOCKETCAN_MAX_DLEN 8U
/** CAN frame MTU */
#define CAN_MTU (sizeof(struct socketcan_frame))
#endif /* !CONFIG_CAN_FD_MODE */
/* CAN FD specific flags from Linux Kernel (include/uapi/linux/can.h) */
#define CANFD_BRS 0x01 /**< Bit rate switch (second bitrate for payload data) */
#define CANFD_ESI 0x02 /**< Error state indicator of the transmitting node */
#define CANFD_FDF 0x04 /**< Mark CAN FD for dual use of struct canfd_frame */
/**
* struct sockaddr_can - The sockaddr structure for CAN sockets
*
*/
struct sockaddr_can {
sa_family_t can_family; /**< Address family */
int can_ifindex; /**< SocketCAN network interface index */
};
/**
* @name Linux SocketCAN compatibility
*
* The following structures and functions provide compatibility with the CAN
* frame and CAN filter formats used by Linux SocketCAN.
*
* @{
*/
/**
* CAN Identifier structure for Linux SocketCAN compatibility.
*
* The fields in this type are:
*
* @code{.text}
*
* +------+--------------------------------------------------------------+
* | Bits | Description |
* +======+==============================================================+
* | 0-28 | CAN identifier (11/29 bit) |
* +------+--------------------------------------------------------------+
* | 29 | Error message frame flag (0 = data frame, 1 = error message) |
* +------+--------------------------------------------------------------+
* | 30 | Remote transmission request flag (1 = RTR frame) |
* +------+--------------------------------------------------------------+
* | 31 | Frame format flag (0 = standard 11 bit, 1 = extended 29 bit) |
* +------+--------------------------------------------------------------+
*
* @endcode
*/
typedef uint32_t socketcan_id_t;
/**
* @brief CAN frame for Linux SocketCAN compatibility.
*/
struct socketcan_frame {
/** 32-bit CAN ID + EFF/RTR/ERR flags. */
socketcan_id_t can_id;
/** Frame payload length in bytes. */
uint8_t len;
/** Additional flags for CAN FD. */
uint8_t flags;
/** @cond INTERNAL_HIDDEN */
uint8_t res0; /* reserved/padding. */
uint8_t res1; /* reserved/padding. */
/** @endcond */
/** The payload data. */
uint8_t data[SOCKETCAN_MAX_DLEN];
};
/**
* @brief CAN filter for Linux SocketCAN compatibility.
*
* A filter is considered a match when `received_can_id & mask == can_id & can_mask`.
*/
struct socketcan_filter {
/** The CAN identifier to match. */
socketcan_id_t can_id;
/** The mask applied to @a can_id for matching. */
socketcan_id_t can_mask;
/** Additional flags for FD frame filter. */
uint8_t flags;
};
/** @} */
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_SOCKETCAN_H_ */
``` | /content/code_sandbox/include/zephyr/net/socketcan.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 900 |
```objective-c
/*
*
*/
/**
* @file
* @brief Socket Offload Redirect API
*/
#ifndef ZEPHYR_INCLUDE_NET_SOCKET_OFFLOAD_H_
#define ZEPHYR_INCLUDE_NET_SOCKET_OFFLOAD_H_
#include <zephyr/net/net_ip.h>
#include <zephyr/net/socket.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief An offloaded Socket DNS API interface
*
* It is assumed that these offload functions follow the
* POSIX socket API standard for arguments, return values and setting of errno.
*/
struct socket_dns_offload {
/** DNS getaddrinfo offloaded implementation API */
int (*getaddrinfo)(const char *node, const char *service,
const struct zsock_addrinfo *hints,
struct zsock_addrinfo **res);
/** DNS freeaddrinfo offloaded implementation API */
void (*freeaddrinfo)(struct zsock_addrinfo *res);
};
/**
* @brief Register an offloaded socket DNS API interface.
*
* @param ops A pointer to the offloaded socket DNS API interface.
*/
void socket_offload_dns_register(const struct socket_dns_offload *ops);
/** @cond INTERNAL_HIDDEN */
int socket_offload_getaddrinfo(const char *node, const char *service,
const struct zsock_addrinfo *hints,
struct zsock_addrinfo **res);
void socket_offload_freeaddrinfo(struct zsock_addrinfo *res);
/** @endcond */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_SOCKET_OFFLOAD_H_ */
``` | /content/code_sandbox/include/zephyr/net/socket_offload.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 320 |
```objective-c
/**
* @file
* @brief BSD Socket service API
*
* API can be used to install a k_work that is called
* if there is data received to a socket.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_SOCKET_SERVICE_H_
#define ZEPHYR_INCLUDE_NET_SOCKET_SERVICE_H_
/**
* @brief BSD socket service API
* @defgroup bsd_socket_service BSD socket service API
* @since 3.6
* @version 0.1.0
* @ingroup networking
* @{
*/
#include <sys/types.h>
#include <zephyr/types.h>
#include <zephyr/net/socket.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* This struct contains information which socket triggered
* calls to the callback function.
*/
struct net_socket_service_event {
/** k_work that is done when there is desired activity in file descriptor. */
struct k_work work;
/** Callback to be called for desired socket activity */
k_work_handler_t callback;
/** Socket information that triggered this event. */
struct zsock_pollfd event;
/** User data */
void *user_data;
/** Service back pointer */
struct net_socket_service_desc *svc;
};
/**
* Main structure holding socket service configuration information.
* The k_work item is created so that when there is data coming
* to those fds, the k_work callback is then called.
* The workqueue can be set NULL in which case system workqueue is used.
* The service descriptor should be created at built time, and then used
* as a parameter to register the sockets to be monitored.
* User should create needed sockets and then setup the poll struct and
* then register the sockets to be monitored at runtime.
*/
struct net_socket_service_desc {
#if CONFIG_NET_SOCKETS_LOG_LEVEL >= LOG_LEVEL_DBG
/**
* Owner name. This can be used in debugging to see who has
* registered this service.
*/
const char *owner;
#endif
/** Workqueue where the work is submitted. */
struct k_work_q *work_q;
/** Pointer to the list of services that we are listening */
struct net_socket_service_event *pev;
/** Length of the pollable socket array for this service. */
int pev_len;
/** Where are my pollfd entries in the global list */
int *idx;
};
/** @cond INTERNAL_HIDDEN */
#define __z_net_socket_svc_get_name(_svc_id) __z_net_socket_service_##_svc_id
#define __z_net_socket_svc_get_idx(_svc_id) __z_net_socket_service_idx_##_svc_id
#define __z_net_socket_svc_get_owner __FILE__ ":" STRINGIFY(__LINE__)
extern void net_socket_service_callback(struct k_work *work);
#if CONFIG_NET_SOCKETS_LOG_LEVEL >= LOG_LEVEL_DBG
#define NET_SOCKET_SERVICE_OWNER .owner = __z_net_socket_svc_get_owner,
#else
#define NET_SOCKET_SERVICE_OWNER
#endif
#define __z_net_socket_service_define(_name, _work_q, _cb, _count, ...) \
static int __z_net_socket_svc_get_idx(_name); \
static struct net_socket_service_event \
__z_net_socket_svc_get_name(_name)[_count] = { \
[0 ... ((_count) - 1)] = { \
.event.fd = -1, /* Invalid socket */ \
.callback = _cb, \
} \
}; \
COND_CODE_0(NUM_VA_ARGS_LESS_1(__VA_ARGS__), (), __VA_ARGS__) \
const STRUCT_SECTION_ITERABLE(net_socket_service_desc, _name) = { \
NET_SOCKET_SERVICE_OWNER \
.work_q = (_work_q), \
.pev = __z_net_socket_svc_get_name(_name), \
.pev_len = (_count), \
.idx = &__z_net_socket_svc_get_idx(_name), \
}
/** @endcond */
/**
* @brief Statically define a network socket service.
* The user callback is called synchronously for this service meaning that
* the service API will wait until the user callback returns before continuing
* with next socket service.
*
* The socket service can be accessed outside the module where it is defined using:
*
* @code extern struct net_socket_service_desc <name>; @endcode
*
* @note This macro cannot be used together with a static keyword.
* If such a use-case is desired, use NET_SOCKET_SERVICE_SYNC_DEFINE_STATIC
* instead.
*
* @param name Name of the service.
* @param work_q Pointer to workqueue where the work is done. Can be null in which case
* system workqueue is used.
* @param cb Callback function that is called for socket activity.
* @param count How many pollable sockets is needed for this service.
*/
#define NET_SOCKET_SERVICE_SYNC_DEFINE(name, work_q, cb, count) \
__z_net_socket_service_define(name, work_q, cb, count)
/**
* @brief Statically define a network socket service in a private (static) scope.
* The user callback is called synchronously for this service meaning that
* the service API will wait until the user callback returns before continuing
* with next socket service.
*
* @param name Name of the service.
* @param work_q Pointer to workqueue where the work is done. Can be null in which case
* system workqueue is used.
* @param cb Callback function that is called for socket activity.
* @param count How many pollable sockets is needed for this service.
*/
#define NET_SOCKET_SERVICE_SYNC_DEFINE_STATIC(name, work_q, cb, count) \
__z_net_socket_service_define(name, work_q, cb, count, static)
/**
* @brief Register pollable sockets.
*
* @param service Pointer to a service description.
* @param fds Socket array to poll.
* @param len Length of the socket array.
* @param user_data User specific data.
*
* @retval 0 No error
* @retval -ENOENT Service is not found.
* @retval -ENINVAL Invalid parameter.
*/
__syscall int net_socket_service_register(const struct net_socket_service_desc *service,
struct zsock_pollfd *fds, int len, void *user_data);
/**
* @brief Unregister pollable sockets.
*
* @param service Pointer to a service description.
*
* @retval 0 No error
* @retval -ENOENT Service is not found.
* @retval -ENINVAL Invalid parameter.
*/
static inline int net_socket_service_unregister(const struct net_socket_service_desc *service)
{
return net_socket_service_register(service, NULL, 0, NULL);
}
/**
* @typedef net_socket_service_cb_t
* @brief Callback used while iterating over socket services.
*
* @param svc Pointer to current socket service.
* @param user_data A valid pointer to user data or NULL
*/
typedef void (*net_socket_service_cb_t)(const struct net_socket_service_desc *svc,
void *user_data);
/**
* @brief Go through all the socket services and call callback for each service.
*
* @param cb User-supplied callback function to call
* @param user_data User specified data
*/
void net_socket_service_foreach(net_socket_service_cb_t cb, void *user_data);
#ifdef __cplusplus
}
#endif
#include <zephyr/syscalls/socket_service.h>
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_SOCKET_SERVICE_H_ */
``` | /content/code_sandbox/include/zephyr/net/socket_service.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,578 |
```objective-c
/*
*
*/
/** @file
* @brief IGMP API
*/
#ifndef ZEPHYR_INCLUDE_NET_IGMP_H_
#define ZEPHYR_INCLUDE_NET_IGMP_H_
/**
* @brief IGMP (Internet Group Management Protocol)
* @defgroup igmp IGMP API
* @since 2.6
* @version 0.8.0
* @ingroup networking
* @{
*/
#include <zephyr/types.h>
#include <zephyr/net/net_if.h>
#include <zephyr/net/net_ip.h>
#ifdef __cplusplus
extern "C" {
#endif
/** IGMP parameters */
struct igmp_param {
struct in_addr *source_list; /**< List of sources to include or exclude */
size_t sources_len; /**< Length of source list */
bool include; /**< Source list filter type */
};
/**
* @brief Join a given multicast group.
*
* @param iface Network interface where join message is sent
* @param addr Multicast group to join
* @param param Optional parameters
*
* @return Return 0 if joining was done, <0 otherwise.
*/
#if defined(CONFIG_NET_IPV4_IGMP)
int net_ipv4_igmp_join(struct net_if *iface, const struct in_addr *addr,
const struct igmp_param *param);
#else
static inline int net_ipv4_igmp_join(struct net_if *iface, const struct in_addr *addr,
const struct igmp_param *param)
{
ARG_UNUSED(iface);
ARG_UNUSED(addr);
ARG_UNUSED(param);
return -ENOSYS;
}
#endif
/**
* @brief Leave a given multicast group.
*
* @param iface Network interface where leave message is sent
* @param addr Multicast group to leave
*
* @return Return 0 if leaving is done, <0 otherwise.
*/
#if defined(CONFIG_NET_IPV4_IGMP)
int net_ipv4_igmp_leave(struct net_if *iface, const struct in_addr *addr);
#else
static inline int net_ipv4_igmp_leave(struct net_if *iface,
const struct in_addr *addr)
{
ARG_UNUSED(iface);
ARG_UNUSED(addr);
return -ENOSYS;
}
#endif
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_IGMP_H_ */
``` | /content/code_sandbox/include/zephyr/net/igmp.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 486 |
```objective-c
/*
*
*/
/** @file
* @brief TLS credentials management
*
* An API for applications to configure TLS credentials.
*/
#ifndef ZEPHYR_INCLUDE_NET_TLS_CREDENTIALS_H_
#define ZEPHYR_INCLUDE_NET_TLS_CREDENTIALS_H_
/**
* @brief TLS credentials management
* @defgroup tls_credentials TLS credentials management
* @since 1.13
* @version 0.8.0
* @ingroup networking
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/** TLS credential types */
enum tls_credential_type {
/** Unspecified credential. */
TLS_CREDENTIAL_NONE,
/** A trusted CA certificate. Use this to authenticate remote servers.
* Used with certificate-based ciphersuites.
*/
TLS_CREDENTIAL_CA_CERTIFICATE,
/** A public server certificate. Use this to register your own server
* certificate. Should be registered together with a corresponding
* private key. Used with certificate-based ciphersuites.
*/
TLS_CREDENTIAL_SERVER_CERTIFICATE,
/** Private key. Should be registered together with a corresponding
* public certificate. Used with certificate-based ciphersuites.
*/
TLS_CREDENTIAL_PRIVATE_KEY,
/** Pre-shared key. Should be registered together with a corresponding
* PSK identity. Used with PSK-based ciphersuites.
*/
TLS_CREDENTIAL_PSK,
/** Pre-shared key identity. Should be registered together with a
* corresponding PSK. Used with PSK-based ciphersuites.
*/
TLS_CREDENTIAL_PSK_ID
};
/** Secure tag, a reference to TLS credential
*
* Secure tag can be used to reference credential after it was registered
* in the system.
*
* @note Some TLS credentials come in pairs:
* - TLS_CREDENTIAL_SERVER_CERTIFICATE with TLS_CREDENTIAL_PRIVATE_KEY,
* - TLS_CREDENTIAL_PSK with TLS_CREDENTIAL_PSK_ID.
* Such pairs of credentials must be assigned the same secure tag to be
* correctly handled in the system.
*
* @note Negative values are reserved for internal use.
*/
typedef int sec_tag_t;
/**
* @brief Add a TLS credential.
*
* @details This function adds a TLS credential, that can be used
* by TLS/DTLS for authentication.
*
* @param tag A security tag that credential will be referenced with.
* @param type A TLS/DTLS credential type.
* @param cred A TLS/DTLS credential.
* @param credlen A TLS/DTLS credential length.
*
* @retval 0 TLS credential successfully added.
* @retval -EACCES Access to the TLS credential subsystem was denied.
* @retval -ENOMEM Not enough memory to add new TLS credential.
* @retval -EEXIST TLS credential of specific tag and type already exists.
*/
int tls_credential_add(sec_tag_t tag, enum tls_credential_type type,
const void *cred, size_t credlen);
/**
* @brief Get a TLS credential.
*
* @details This function gets an already registered TLS credential,
* referenced by @p tag secure tag of @p type.
*
* @param tag A security tag of requested credential.
* @param type A TLS/DTLS credential type of requested credential.
* @param cred A buffer for TLS/DTLS credential.
* @param credlen A buffer size on input. TLS/DTLS credential length on output.
*
* @retval 0 TLS credential successfully obtained.
* @retval -EACCES Access to the TLS credential subsystem was denied.
* @retval -ENOENT Requested TLS credential was not found.
* @retval -EFBIG Requested TLS credential does not fit in the buffer provided.
*/
int tls_credential_get(sec_tag_t tag, enum tls_credential_type type,
void *cred, size_t *credlen);
/**
* @brief Delete a TLS credential.
*
* @details This function removes a TLS credential, referenced by @p tag
* secure tag of @p type.
*
* @param tag A security tag corresponding to removed credential.
* @param type A TLS/DTLS credential type of removed credential.
*
* @retval 0 TLS credential successfully deleted.
* @retval -EACCES Access to the TLS credential subsystem was denied.
* @retval -ENOENT Requested TLS credential was not found.
*/
int tls_credential_delete(sec_tag_t tag, enum tls_credential_type type);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_TLS_CREDENTIALS_H_ */
``` | /content/code_sandbox/include/zephyr/net/tls_credentials.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 967 |
```objective-c
/** @file
@brief UDP utility functions
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_UDP_H_
#define ZEPHYR_INCLUDE_NET_UDP_H_
#include <zephyr/types.h>
#include <zephyr/net/net_core.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/net_pkt.h>
#ifdef __cplusplus
extern "C" {
#endif
/* These APIs are mostly meant for Zephyr internal use so do not generate
* documentation for them.
*/
/** @cond INTERNAL_HIDDEN */
/**
* @brief UDP library
* @defgroup udp UDP Library
* @ingroup networking
* @{
*/
/**
* @brief Get UDP packet header data from net_pkt.
*
* @details The values in the returned header are in network byte order.
* Note that you must access the UDP header values by the returned pointer,
* the hdr parameter is just a placeholder for the header data and it might
* not contain anything if the header fits properly in the first fragment of
* the network packet.
*
* @param pkt Network packet
* @param hdr Where to place the header if it does not fit in first fragment
* of the network packet. This might not be populated if UDP header fits in
* net_buf fragment.
*
* @return Return pointer to header or NULL if something went wrong.
* Always use the returned pointer to access the UDP header.
*/
#if defined(CONFIG_NET_UDP)
struct net_udp_hdr *net_udp_get_hdr(struct net_pkt *pkt,
struct net_udp_hdr *hdr);
#else
static inline struct net_udp_hdr *net_udp_get_hdr(struct net_pkt *pkt,
struct net_udp_hdr *hdr)
{
return NULL;
}
#endif /* CONFIG_NET_UDP */
/**
* @brief Set UDP packet header data in net_pkt.
*
* @details The values in the header must be in network byte order.
* This function is normally called after a call to net_udp_get_hdr().
* The hdr parameter value should be the same that is returned by function
* net_udp_get_hdr() call. Note that if the UDP header fits in first net_pkt
* fragment, then this function will not do anything as your hdr parameter
* was pointing directly to net_pkt.
*
* @param pkt Network packet
* @param hdr Header data pointer that was returned by net_udp_get_hdr().
*
* @return Return pointer to header or NULL if something went wrong.
*/
#if defined(CONFIG_NET_UDP)
struct net_udp_hdr *net_udp_set_hdr(struct net_pkt *pkt,
struct net_udp_hdr *hdr);
#else
static inline struct net_udp_hdr *net_udp_set_hdr(struct net_pkt *pkt,
struct net_udp_hdr *hdr)
{
return NULL;
}
#endif /* CONFIG_NET_UDP */
/**
* @}
*/
/** @endcond */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_UDP_H_ */
``` | /content/code_sandbox/include/zephyr/net/udp.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 597 |
```objective-c
/** @file
* @brief Network context definitions
*
* An API for applications to define a network connection.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_NET_CONTEXT_H_
#define ZEPHYR_INCLUDE_NET_NET_CONTEXT_H_
/**
* @brief Application network context
* @defgroup net_context Application network context
* @since 1.0
* @version 0.8.0
* @ingroup networking
* @{
*/
#include <zephyr/kernel.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/net_if.h>
#include <zephyr/net/net_stats.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Is this context used or not */
#define NET_CONTEXT_IN_USE BIT(0)
/** @cond INTERNAL_HIDDEN */
/** State of the context (bits 1 & 2 in the flags) */
enum net_context_state {
NET_CONTEXT_IDLE = 0,
NET_CONTEXT_UNCONNECTED = 0,
NET_CONTEXT_CONFIGURING = 1,
NET_CONTEXT_CONNECTING = 1,
NET_CONTEXT_READY = 2,
NET_CONTEXT_CONNECTED = 2,
NET_CONTEXT_LISTENING = 3,
};
/** @endcond */
/**
* The address family, connection type and IP protocol are
* stored into a bit field to save space.
*/
/** Protocol family of this connection */
#define NET_CONTEXT_FAMILY (BIT(3) | BIT(4) | BIT(5))
/** Type of the connection (datagram / stream / raw) */
#define NET_CONTEXT_TYPE (BIT(6) | BIT(7))
/** Remote address set */
#define NET_CONTEXT_REMOTE_ADDR_SET BIT(8)
/** Is the socket accepting connections */
#define NET_CONTEXT_ACCEPTING_SOCK BIT(9)
/** Is the socket closing / closed */
#define NET_CONTEXT_CLOSING_SOCK BIT(10)
/** Context is bound to a specific interface */
#define NET_CONTEXT_BOUND_TO_IFACE BIT(11)
struct net_context;
/**
* @typedef net_context_recv_cb_t
* @brief Network data receive callback.
*
* @details The recv callback is called after a network data packet is
* received. This callback is called by RX thread so its stack and execution
* context is used here. Keep processing in the callback minimal to reduce the
* time spent blocked while handling packets.
*
* @param context The context to use.
* @param pkt Network buffer that is received. If the pkt is not NULL,
* then the callback will own the buffer and it needs to unref the pkt
* as soon as it has finished working with it. On EOF, pkt will be NULL.
* @param ip_hdr a pointer to relevant IP (v4 or v6) header.
* @param proto_hdr a pointer to relevant protocol (udp or tcp) header.
* @param status Value is set to 0 if some data or the connection is
* at EOF, <0 if there was an error receiving data, in this case the
* pkt parameter is set to NULL.
* @param user_data The user data given in net_recv() call.
*/
typedef void (*net_context_recv_cb_t)(struct net_context *context,
struct net_pkt *pkt,
union net_ip_header *ip_hdr,
union net_proto_header *proto_hdr,
int status,
void *user_data);
/**
* @typedef net_context_send_cb_t
* @brief Network data send callback.
*
* @details The send callback is called after a network data packet is sent.
* This callback is called by TX thread so its stack and execution context is
* used here. Keep processing in the callback minimal to reduce the time spent
* blocked while handling packets.
*
* @param context The context to use.
* @param status Value is set to >= 0: amount of data that was sent,
* < 0 there was an error sending data.
* @param user_data The user data given in net_send() call.
*/
typedef void (*net_context_send_cb_t)(struct net_context *context,
int status,
void *user_data);
/**
* @typedef net_tcp_accept_cb_t
* @brief Accept callback
*
* @details The accept callback is called after a successful connection was
* established or if there was an error while we were waiting for a connection
* attempt. This callback is called by RX thread so its stack and execution
* context is used here. Keep processing in the callback minimal to reduce the
* time spent blocked while handling packets.
*
* @param new_context The context to use.
* @param addr The peer address.
* @param addrlen Length of the peer address.
* @param status The status code, 0 on success, < 0 otherwise
* @param user_data The user data given in net_context_accept() call.
*/
typedef void (*net_tcp_accept_cb_t)(struct net_context *new_context,
struct sockaddr *addr,
socklen_t addrlen,
int status,
void *user_data);
/**
* @typedef net_context_connect_cb_t
* @brief Connection callback.
*
* @details The connect callback is called after a connection is being
* established.
* For TCP connections, this callback is called by RX thread so its stack and
* execution context is used here. The callback is called after the TCP
* connection was established or if the connection failed. Keep processing in
* the callback minimal to reduce the time spent blocked while handling
* packets.
* For UDP connections, this callback is called immediately by
* net_context_connect() function. UDP is a connectionless protocol so the
* connection can be thought of being established immediately.
*
* @param context The context to use.
* @param status Status of the connection establishment. This is 0
* if the connection was established successfully, <0 if there was an
* error.
* @param user_data The user data given in net_context_connect() call.
*/
typedef void (*net_context_connect_cb_t)(struct net_context *context,
int status,
void *user_data);
/* The net_pkt_get_slab_func_t is here in order to avoid circular
* dependency between net_pkt.h and net_context.h
*/
/**
* @typedef net_pkt_get_slab_func_t
*
* @brief Function that is called to get the slab that is used
* for net_pkt allocations.
*
* @return Pointer to valid struct k_mem_slab instance.
*/
typedef struct k_mem_slab *(*net_pkt_get_slab_func_t)(void);
/* The net_pkt_get_pool_func_t is here in order to avoid circular
* dependency between net_pkt.h and net_context.h
*/
/**
* @typedef net_pkt_get_pool_func_t
*
* @brief Function that is called to get the pool that is used
* for net_buf allocations.
*
* @return Pointer to valid struct net_buf_pool instance.
*/
typedef struct net_buf_pool *(*net_pkt_get_pool_func_t)(void);
struct net_tcp;
struct net_conn_handle;
/**
* Note that we do not store the actual source IP address in the context
* because the address is already be set in the network interface struct.
* If there is no such source address there, the packet cannot be sent
* anyway. This saves 12 bytes / context in IPv6.
*/
__net_socket struct net_context {
/** First member of the structure to allow to put contexts into a FIFO.
*/
void *fifo_reserved;
/** User data associated with a context.
*/
void *user_data;
/** Reference count
*/
atomic_t refcount;
/** Internal lock for protecting this context from multiple access.
*/
struct k_mutex lock;
/** Local endpoint address. Note that the values are in network byte
* order.
*/
struct sockaddr_ptr local;
/** Remote endpoint address. Note that the values are in network byte
* order.
*/
struct sockaddr remote;
/** Connection handle */
struct net_conn_handle *conn_handler;
/** Receive callback to be called when desired packet
* has been received.
*/
net_context_recv_cb_t recv_cb;
/** Send callback to be called when the packet has been sent
* successfully.
*/
net_context_send_cb_t send_cb;
/** Connect callback to be called when a connection has been
* established.
*/
net_context_connect_cb_t connect_cb;
#if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
/** Get TX net_buf pool for this context.
*/
net_pkt_get_slab_func_t tx_slab;
/** Get DATA net_buf pool for this context.
*/
net_pkt_get_pool_func_t data_pool;
#endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
#if defined(CONFIG_NET_TCP)
/** TCP connection information */
void *tcp;
#endif /* CONFIG_NET_TCP */
#if defined(CONFIG_NET_CONTEXT_SYNC_RECV)
/**
* Semaphore to signal synchronous recv call completion.
*/
struct k_sem recv_data_wait;
#endif /* CONFIG_NET_CONTEXT_SYNC_RECV */
#if defined(CONFIG_NET_SOCKETS)
/** BSD socket private data */
void *socket_data;
/** Per-socket packet or connection queues */
union {
struct k_fifo recv_q;
struct k_fifo accept_q;
};
struct {
/** Condition variable used when receiving data */
struct k_condvar recv;
/** Mutex used by condition variable */
struct k_mutex *lock;
} cond;
#endif /* CONFIG_NET_SOCKETS */
#if defined(CONFIG_NET_OFFLOAD)
/** context for use by offload drivers */
void *offload_context;
#endif /* CONFIG_NET_OFFLOAD */
#if defined(CONFIG_NET_SOCKETS_CAN)
int can_filter_id;
#endif /* CONFIG_NET_SOCKETS_CAN */
/** Option values */
struct {
#if defined(CONFIG_NET_CONTEXT_PRIORITY)
/** Priority of the network data sent via this net_context */
uint8_t priority;
#endif
#if defined(CONFIG_NET_CONTEXT_TXTIME)
/** When to send the packet out */
bool txtime;
#endif
#if defined(CONFIG_SOCKS)
/** Socks proxy address */
struct {
struct sockaddr addr;
socklen_t addrlen;
} proxy;
#endif
#if defined(CONFIG_NET_CONTEXT_RCVTIMEO)
/** Receive timeout */
k_timeout_t rcvtimeo;
#endif
#if defined(CONFIG_NET_CONTEXT_SNDTIMEO)
/** Send timeout */
k_timeout_t sndtimeo;
#endif
#if defined(CONFIG_NET_CONTEXT_RCVBUF)
/** Receive buffer maximum size */
uint16_t rcvbuf;
#endif
#if defined(CONFIG_NET_CONTEXT_SNDBUF)
/** Send buffer maximum size */
uint16_t sndbuf;
#endif
#if defined(CONFIG_NET_CONTEXT_DSCP_ECN)
/**
* DSCP (Differentiated Services Code point) and
* ECN (Explicit Congestion Notification) values.
*/
uint8_t dscp_ecn;
#endif
#if defined(CONFIG_NET_CONTEXT_REUSEADDR)
/** Re-use address (SO_REUSEADDR) flag on a socket. */
bool reuseaddr;
#endif
#if defined(CONFIG_NET_CONTEXT_REUSEPORT)
/** Re-use port (SO_REUSEPORT) flag on a socket. */
bool reuseport;
#endif
#if defined(CONFIG_NET_IPV4_MAPPING_TO_IPV6)
/** Support v4-mapped-on-v6 addresses */
bool ipv6_v6only;
#endif
#if defined(CONFIG_NET_CONTEXT_RECV_PKTINFO)
/** Receive network packet information in recvmsg() call */
bool recv_pktinfo;
#endif
#if defined(CONFIG_NET_IPV6)
/**
* Source address selection preferences. Currently used only for IPv6,
* see RFC 5014 for details.
*/
uint16_t addr_preferences;
#endif
#if defined(CONFIG_NET_CONTEXT_TIMESTAMPING)
/** Enable RX, TX or both timestamps of packets send through sockets. */
uint8_t timestamping;
#endif
} options;
/** Protocol (UDP, TCP or IEEE 802.3 protocol value) */
uint16_t proto;
/** Flags for the context */
uint16_t flags;
/** Network interface assigned to this context */
int8_t iface;
/** IPv6 hop limit or IPv4 ttl for packets sent via this context. */
union {
struct {
uint8_t ipv6_hop_limit; /**< IPv6 hop limit */
uint8_t ipv6_mcast_hop_limit; /**< IPv6 multicast hop limit */
};
struct {
uint8_t ipv4_ttl; /**< IPv4 TTL */
uint8_t ipv4_mcast_ttl; /**< IPv4 multicast TTL */
};
};
#if defined(CONFIG_SOCKS)
/** Is socks proxy enabled */
bool proxy_enabled;
#endif
};
/**
* @brief Is this context used or not.
*
* @param context Network context.
*
* @return True if the context is currently in use, False otherwise.
*/
static inline bool net_context_is_used(struct net_context *context)
{
NET_ASSERT(context);
return context->flags & NET_CONTEXT_IN_USE;
}
/**
* @brief Is this context bound to a network interface.
*
* @param context Network context.
*
* @return True if the context is bound to network interface, False otherwise.
*/
static inline bool net_context_is_bound_to_iface(struct net_context *context)
{
NET_ASSERT(context);
return context->flags & NET_CONTEXT_BOUND_TO_IFACE;
}
/**
* @brief Is this context is accepting data now.
*
* @param context Network context.
*
* @return True if the context is accepting connections, False otherwise.
*/
static inline bool net_context_is_accepting(struct net_context *context)
{
NET_ASSERT(context);
return context->flags & NET_CONTEXT_ACCEPTING_SOCK;
}
/**
* @brief Set this context to accept data now.
*
* @param context Network context.
* @param accepting True if accepting, False if not
*/
static inline void net_context_set_accepting(struct net_context *context,
bool accepting)
{
NET_ASSERT(context);
if (accepting) {
context->flags |= NET_CONTEXT_ACCEPTING_SOCK;
} else {
context->flags &= (uint16_t)~NET_CONTEXT_ACCEPTING_SOCK;
}
}
/**
* @brief Is this context closing.
*
* @param context Network context.
*
* @return True if the context is closing, False otherwise.
*/
static inline bool net_context_is_closing(struct net_context *context)
{
NET_ASSERT(context);
return context->flags & NET_CONTEXT_CLOSING_SOCK;
}
/**
* @brief Set this context to closing.
*
* @param context Network context.
* @param closing True if closing, False if not
*/
static inline void net_context_set_closing(struct net_context *context,
bool closing)
{
NET_ASSERT(context);
if (closing) {
context->flags |= NET_CONTEXT_CLOSING_SOCK;
} else {
context->flags &= (uint16_t)~NET_CONTEXT_CLOSING_SOCK;
}
}
/** @cond INTERNAL_HIDDEN */
#define NET_CONTEXT_STATE_SHIFT 1
#define NET_CONTEXT_STATE_MASK 0x03
/** @endcond */
/**
* @brief Get state for this network context.
*
* @details This function returns the state of the context.
*
* @param context Network context.
*
* @return Network state.
*/
static inline
enum net_context_state net_context_get_state(struct net_context *context)
{
NET_ASSERT(context);
return (enum net_context_state)
((context->flags >> NET_CONTEXT_STATE_SHIFT) &
NET_CONTEXT_STATE_MASK);
}
/**
* @brief Set state for this network context.
*
* @details This function sets the state of the context.
*
* @param context Network context.
* @param state New network context state.
*/
static inline void net_context_set_state(struct net_context *context,
enum net_context_state state)
{
NET_ASSERT(context);
context->flags &= ~(NET_CONTEXT_STATE_MASK << NET_CONTEXT_STATE_SHIFT);
context->flags |= ((state & NET_CONTEXT_STATE_MASK) <<
NET_CONTEXT_STATE_SHIFT);
}
/**
* @brief Get address family for this network context.
*
* @details This function returns the address family (IPv4 or IPv6)
* of the context.
*
* @param context Network context.
*
* @return Network state.
*/
static inline sa_family_t net_context_get_family(struct net_context *context)
{
NET_ASSERT(context);
return ((context->flags & NET_CONTEXT_FAMILY) >> 3);
}
/**
* @brief Set address family for this network context.
*
* @details This function sets the address family (IPv4, IPv6 or AF_PACKET)
* of the context.
*
* @param context Network context.
* @param family Address family (AF_INET, AF_INET6, AF_PACKET, AF_CAN)
*/
static inline void net_context_set_family(struct net_context *context,
sa_family_t family)
{
uint8_t flag = 0U;
NET_ASSERT(context);
if (family == AF_UNSPEC || family == AF_INET || family == AF_INET6 ||
family == AF_PACKET || family == AF_CAN) {
/* Family is in BIT(4), BIT(5) and BIT(6) */
flag = (uint8_t)(family << 3);
}
context->flags |= flag;
}
/**
* @brief Get context type for this network context.
*
* @details This function returns the context type (stream, datagram or raw)
* of the context.
*
* @param context Network context.
*
* @return Network context type.
*/
static inline
enum net_sock_type net_context_get_type(struct net_context *context)
{
NET_ASSERT(context);
return (enum net_sock_type)((context->flags & NET_CONTEXT_TYPE) >> 6);
}
/**
* @brief Set context type for this network context.
*
* @details This function sets the context type (stream or datagram)
* of the context.
*
* @param context Network context.
* @param type Context type (SOCK_STREAM or SOCK_DGRAM)
*/
static inline void net_context_set_type(struct net_context *context,
enum net_sock_type type)
{
uint16_t flag = 0U;
NET_ASSERT(context);
if (type == SOCK_DGRAM || type == SOCK_STREAM || type == SOCK_RAW) {
/* Type is in BIT(6) and BIT(7)*/
flag = (uint16_t)(type << 6);
}
context->flags |= flag;
}
/**
* @brief Set CAN filter id for this network context.
*
* @details This function sets the CAN filter id of the context.
*
* @param context Network context.
* @param filter_id CAN filter id
*/
#if defined(CONFIG_NET_SOCKETS_CAN)
static inline void net_context_set_can_filter_id(struct net_context *context,
int filter_id)
{
NET_ASSERT(context);
context->can_filter_id = filter_id;
}
#else
static inline void net_context_set_can_filter_id(struct net_context *context,
int filter_id)
{
ARG_UNUSED(context);
ARG_UNUSED(filter_id);
}
#endif
/**
* @brief Get CAN filter id for this network context.
*
* @details This function gets the CAN filter id of the context.
*
* @param context Network context.
*
* @return Filter id of this network context
*/
#if defined(CONFIG_NET_SOCKETS_CAN)
static inline int net_context_get_can_filter_id(struct net_context *context)
{
NET_ASSERT(context);
return context->can_filter_id;
}
#else
static inline int net_context_get_can_filter_id(struct net_context *context)
{
ARG_UNUSED(context);
return -1;
}
#endif
/**
* @brief Get context IP protocol for this network context.
*
* @details This function returns the IP protocol (UDP / TCP /
* IEEE 802.3 protocol value) of the context.
*
* @param context Network context.
*
* @return Network context IP protocol.
*/
static inline uint16_t net_context_get_proto(struct net_context *context)
{
return context->proto;
}
/**
* @brief Set context IP protocol for this network context.
*
* @details This function sets the context IP protocol (UDP / TCP)
* of the context.
*
* @param context Network context.
* @param proto Context IP protocol (IPPROTO_UDP, IPPROTO_TCP or IEEE 802.3
* protocol value)
*/
static inline void net_context_set_proto(struct net_context *context,
uint16_t proto)
{
context->proto = proto;
}
/**
* @brief Get network interface for this context.
*
* @details This function returns the used network interface.
*
* @param context Network context.
*
* @return Context network interface if context is bind to interface,
* NULL otherwise.
*/
static inline
struct net_if *net_context_get_iface(struct net_context *context)
{
NET_ASSERT(context);
return net_if_get_by_index(context->iface);
}
/**
* @brief Set network interface for this context.
*
* @details This function binds network interface to this context.
*
* @param context Network context.
* @param iface Network interface.
*/
static inline void net_context_set_iface(struct net_context *context,
struct net_if *iface)
{
NET_ASSERT(iface);
context->iface = (uint8_t)net_if_get_by_iface(iface);
}
/**
* @brief Bind network interface to this context.
*
* @details This function binds network interface to this context.
*
* @param context Network context.
* @param iface Network interface.
*/
static inline void net_context_bind_iface(struct net_context *context,
struct net_if *iface)
{
NET_ASSERT(iface);
context->flags |= NET_CONTEXT_BOUND_TO_IFACE;
net_context_set_iface(context, iface);
}
/**
* @brief Get IPv4 TTL (time-to-live) value for this context.
*
* @details This function returns the IPv4 TTL (time-to-live) value that is
* set to this context.
*
* @param context Network context.
*
* @return IPv4 TTL value
*/
static inline uint8_t net_context_get_ipv4_ttl(struct net_context *context)
{
return context->ipv4_ttl;
}
/**
* @brief Set IPv4 TTL (time-to-live) value for this context.
*
* @details This function sets the IPv4 TTL (time-to-live) value for
* this context.
*
* @param context Network context.
* @param ttl IPv4 time-to-live value.
*/
static inline void net_context_set_ipv4_ttl(struct net_context *context,
uint8_t ttl)
{
context->ipv4_ttl = ttl;
}
/**
* @brief Get IPv4 multicast TTL (time-to-live) value for this context.
*
* @details This function returns the IPv4 multicast TTL (time-to-live) value
* that is set to this context.
*
* @param context Network context.
*
* @return IPv4 multicast TTL value
*/
static inline uint8_t net_context_get_ipv4_mcast_ttl(struct net_context *context)
{
return context->ipv4_mcast_ttl;
}
/**
* @brief Set IPv4 multicast TTL (time-to-live) value for this context.
*
* @details This function sets the IPv4 multicast TTL (time-to-live) value for
* this context.
*
* @param context Network context.
* @param ttl IPv4 multicast time-to-live value.
*/
static inline void net_context_set_ipv4_mcast_ttl(struct net_context *context,
uint8_t ttl)
{
context->ipv4_mcast_ttl = ttl;
}
/**
* @brief Get IPv6 hop limit value for this context.
*
* @details This function returns the IPv6 hop limit value that is set to this
* context.
*
* @param context Network context.
*
* @return IPv6 hop limit value
*/
static inline uint8_t net_context_get_ipv6_hop_limit(struct net_context *context)
{
return context->ipv6_hop_limit;
}
/**
* @brief Set IPv6 hop limit value for this context.
*
* @details This function sets the IPv6 hop limit value for this context.
*
* @param context Network context.
* @param hop_limit IPv6 hop limit value.
*/
static inline void net_context_set_ipv6_hop_limit(struct net_context *context,
uint8_t hop_limit)
{
context->ipv6_hop_limit = hop_limit;
}
/**
* @brief Get IPv6 multicast hop limit value for this context.
*
* @details This function returns the IPv6 multicast hop limit value
* that is set to this context.
*
* @param context Network context.
*
* @return IPv6 multicast hop limit value
*/
static inline uint8_t net_context_get_ipv6_mcast_hop_limit(struct net_context *context)
{
return context->ipv6_mcast_hop_limit;
}
/**
* @brief Set IPv6 multicast hop limit value for this context.
*
* @details This function sets the IPv6 multicast hop limit value for
* this context.
*
* @param context Network context.
* @param hop_limit IPv6 multicast hop limit value.
*/
static inline void net_context_set_ipv6_mcast_hop_limit(struct net_context *context,
uint8_t hop_limit)
{
context->ipv6_mcast_hop_limit = hop_limit;
}
/**
* @brief Enable or disable socks proxy support for this context.
*
* @details This function either enables or disables socks proxy support for
* this context.
*
* @param context Network context.
* @param enable Enable socks proxy or disable it.
*/
#if defined(CONFIG_SOCKS)
static inline void net_context_set_proxy_enabled(struct net_context *context,
bool enable)
{
context->proxy_enabled = enable;
}
#else
static inline void net_context_set_proxy_enabled(struct net_context *context,
bool enable)
{
ARG_UNUSED(context);
ARG_UNUSED(enable);
}
#endif
/**
* @brief Is socks proxy support enabled or disabled for this context.
*
* @details This function returns current socks proxy status for
* this context.
*
* @param context Network context.
*
* @return True if socks proxy is enabled for this context, False otherwise
*/
#if defined(CONFIG_SOCKS)
static inline bool net_context_is_proxy_enabled(struct net_context *context)
{
return context->proxy_enabled;
}
#else
static inline bool net_context_is_proxy_enabled(struct net_context *context)
{
ARG_UNUSED(context);
return false;
}
#endif
/**
* @brief Get network context.
*
* @details Network context is used to define the connection 5-tuple
* (protocol, remote address, remote port, source address and source
* port). Random free port number will be assigned to source port when
* context is created. This is similar as BSD socket() function.
* The context will be created with a reference count of 1.
*
* @param family IP address family (AF_INET or AF_INET6)
* @param type Type of the socket, SOCK_STREAM or SOCK_DGRAM
* @param ip_proto IP protocol, IPPROTO_UDP or IPPROTO_TCP. For raw socket
* access, the value is the L2 protocol value from IEEE 802.3 (see ethernet.h)
* @param context The allocated context is returned to the caller.
*
* @return 0 if ok, < 0 if error
*/
int net_context_get(sa_family_t family,
enum net_sock_type type,
uint16_t ip_proto,
struct net_context **context);
/**
* @brief Close and unref a network context.
*
* @details This releases the context. It is not possible to send or
* receive data via this context after this call. This is similar as
* BSD shutdown() function. For legacy compatibility, this function
* will implicitly decrement the reference count and possibly destroy
* the context either now or when it reaches a final state.
*
* @param context The context to be closed.
*
* @return 0 if ok, < 0 if error
*/
int net_context_put(struct net_context *context);
/**
* @brief Take a reference count to a net_context, preventing destruction
*
* @details Network contexts are not recycled until their reference
* count reaches zero. Note that this does not prevent any "close"
* behavior that results from errors or net_context_put. It simply
* prevents the context from being recycled for further use.
*
* @param context The context on which to increment the reference count
*
* @return The new reference count
*/
int net_context_ref(struct net_context *context);
/**
* @brief Decrement the reference count to a network context
*
* @details Decrements the refcount. If it reaches zero, the context
* will be recycled. Note that this does not cause any
* network-visible "close" behavior (i.e. future packets to this
* connection may see TCP RST or ICMP port unreachable responses). See
* net_context_put() for that.
*
* @param context The context on which to decrement the reference count
*
* @return The new reference count, zero if the context was destroyed
*/
int net_context_unref(struct net_context *context);
/**
* @brief Create IPv4 packet in provided net_pkt from context
*
* @param context Network context for a connection
* @param pkt Network packet
* @param src Source address, or NULL to choose a default
* @param dst Destination IPv4 address
*
* @return Return 0 on success, negative errno otherwise.
*/
#if defined(CONFIG_NET_IPV4)
int net_context_create_ipv4_new(struct net_context *context,
struct net_pkt *pkt,
const struct in_addr *src,
const struct in_addr *dst);
#else
static inline int net_context_create_ipv4_new(struct net_context *context,
struct net_pkt *pkt,
const struct in_addr *src,
const struct in_addr *dst)
{
return -1;
}
#endif /* CONFIG_NET_IPV4 */
/**
* @brief Create IPv6 packet in provided net_pkt from context
*
* @param context Network context for a connection
* @param pkt Network packet
* @param src Source address, or NULL to choose a default from context
* @param dst Destination IPv6 address
*
* @return Return 0 on success, negative errno otherwise.
*/
#if defined(CONFIG_NET_IPV6)
int net_context_create_ipv6_new(struct net_context *context,
struct net_pkt *pkt,
const struct in6_addr *src,
const struct in6_addr *dst);
#else
static inline int net_context_create_ipv6_new(struct net_context *context,
struct net_pkt *pkt,
const struct in6_addr *src,
const struct in6_addr *dst)
{
ARG_UNUSED(context);
ARG_UNUSED(pkt);
ARG_UNUSED(src);
ARG_UNUSED(dst);
return -1;
}
#endif /* CONFIG_NET_IPV6 */
/**
* @brief Assign a socket a local address.
*
* @details This is similar as BSD bind() function.
*
* @param context The context to be assigned.
* @param addr Address to assigned.
* @param addrlen Length of the address.
*
* @return 0 if ok, < 0 if error
*/
int net_context_bind(struct net_context *context,
const struct sockaddr *addr,
socklen_t addrlen);
/**
* @brief Mark the context as a listening one.
*
* @details This is similar as BSD listen() function.
*
* @param context The context to use.
* @param backlog The size of the pending connections backlog.
*
* @return 0 if ok, < 0 if error
*/
int net_context_listen(struct net_context *context,
int backlog);
/**
* @brief Create a network connection.
*
* @details The net_context_connect function creates a network
* connection to the host specified by addr. After the
* connection is established, the user-supplied callback (cb)
* is executed. cb is called even if the timeout was set to
* K_FOREVER. cb is not called if the timeout expires.
* For datagram sockets (SOCK_DGRAM), this function only sets
* the peer address.
* This function is similar to the BSD connect() function.
*
* @param context The network context.
* @param addr The peer address to connect to.
* @param addrlen Peer address length.
* @param cb Callback function. Set to NULL if not required.
* @param timeout The timeout value for the connection. Possible values:
* * K_NO_WAIT: this function will return immediately,
* * K_FOREVER: this function will block until the
* connection is established,
* * >0: this function will wait the specified ms.
* @param user_data Data passed to the callback function.
*
* @return 0 on success.
* @return -EINVAL if an invalid parameter is passed as an argument.
* @return -ENOTSUP if the operation is not supported or implemented.
* @return -ETIMEDOUT if the connect operation times out.
*/
int net_context_connect(struct net_context *context,
const struct sockaddr *addr,
socklen_t addrlen,
net_context_connect_cb_t cb,
k_timeout_t timeout,
void *user_data);
/**
* @brief Accept a network connection attempt.
*
* @details Accept a connection being established. This function
* will return immediately if the timeout is set to K_NO_WAIT.
* In this case the context will call the supplied callback when ever
* there is a connection established to this context. This is "a register
* handler and forget" type of call (async).
* If the timeout is set to K_FOREVER, the function will wait
* until the connection is established. Timeout value > 0, will wait as
* many ms.
* After the connection is established a caller-supplied callback is called.
* The callback is called even if timeout was set to K_FOREVER, the
* callback is called before this function will return in this case.
* The callback is not called if the timeout expires.
* This is similar as BSD accept() function.
*
* @param context The context to use.
* @param cb Caller-supplied callback function.
* @param timeout Timeout for the connection. Possible values
* are K_FOREVER, K_NO_WAIT, >0.
* @param user_data Caller-supplied user data.
*
* @return 0 if ok, < 0 if error
*/
int net_context_accept(struct net_context *context,
net_tcp_accept_cb_t cb,
k_timeout_t timeout,
void *user_data);
/**
* @brief Send data to a peer.
*
* @details This function can be used to send network data to a peer
* connection. After the network buffer is sent, a caller-supplied
* callback is called. Note that the callback might be called after this
* function has returned. For context of type SOCK_DGRAM, the destination
* address must have been set by the call to net_context_connect().
* This is similar as BSD send() function.
*
* @param context The network context to use.
* @param buf The data buffer to send
* @param len Length of the buffer
* @param cb Caller-supplied callback function.
* @param timeout Currently this value is not used.
* @param user_data Caller-supplied user data.
*
* @return 0 if ok, < 0 if error
*/
int net_context_send(struct net_context *context,
const void *buf,
size_t len,
net_context_send_cb_t cb,
k_timeout_t timeout,
void *user_data);
/**
* @brief Send data to a peer specified by address.
*
* @details This function can be used to send network data to a peer
* specified by address. This variant can only be used for datagram
* connections of type SOCK_DGRAM. After the network buffer is sent,
* a caller-supplied callback is called. Note that the callback might be
* called after this function has returned.
* This is similar as BSD sendto() function.
*
* @param context The network context to use.
* @param buf The data buffer to send
* @param len Length of the buffer
* @param dst_addr Destination address.
* @param addrlen Length of the address.
* @param cb Caller-supplied callback function.
* @param timeout Currently this value is not used.
* @param user_data Caller-supplied user data.
*
* @return numbers of bytes sent on success, a negative errno otherwise
*/
int net_context_sendto(struct net_context *context,
const void *buf,
size_t len,
const struct sockaddr *dst_addr,
socklen_t addrlen,
net_context_send_cb_t cb,
k_timeout_t timeout,
void *user_data);
/**
* @brief Send data in iovec to a peer specified in msghdr struct.
*
* @details This function has similar semantics as Posix sendmsg() call.
* For unconnected socket, the msg_name field in msghdr must be set. For
* connected socket the msg_name should be set to NULL, and msg_namelen to 0.
* After the network buffer is sent, a caller-supplied callback is called.
* Note that the callback might be called after this function has returned.
*
* @param context The network context to use.
* @param msghdr The data to send
* @param flags Flags for the sending.
* @param cb Caller-supplied callback function.
* @param timeout Currently this value is not used.
* @param user_data Caller-supplied user data.
*
* @return numbers of bytes sent on success, a negative errno otherwise
*/
int net_context_sendmsg(struct net_context *context,
const struct msghdr *msghdr,
int flags,
net_context_send_cb_t cb,
k_timeout_t timeout,
void *user_data);
/**
* @brief Receive network data from a peer specified by context.
*
* @details This function can be used to register a callback function
* that is called by the network stack when network data has been received
* for this context. As this function registers a callback, then there
* is no need to call this function multiple times if timeout is set to
* K_NO_WAIT.
* If callback function or user data changes, then the function can be called
* multiple times to register new values.
* This function will return immediately if the timeout is set to K_NO_WAIT.
* If the timeout is set to K_FOREVER, the function will wait until the
* network buffer is received. Timeout value > 0 will wait as many ms.
* After the network buffer is received, a caller-supplied callback is
* called. The callback is called even if timeout was set to K_FOREVER,
* the callback is called before this function will return in this case.
* The callback is not called if the timeout expires. The timeout functionality
* can be compiled out if synchronous behavior is not needed. The sync call
* logic requires some memory that can be saved if only async way of call is
* used. If CONFIG_NET_CONTEXT_SYNC_RECV is not set, then the timeout parameter
* value is ignored.
* This is similar as BSD recv() function.
* Note that net_context_bind() should be called before net_context_recv().
* Default random port number is assigned to local port. Only bind() will
* update connection information from context. If recv() is called before
* bind() call, it may refuse to bind to a context which already has
* a connection associated.
*
* @param context The network context to use.
* @param cb Caller-supplied callback function.
* @param timeout Caller-supplied timeout. Possible values
* are K_FOREVER, K_NO_WAIT, >0.
* @param user_data Caller-supplied user data.
*
* @return 0 if ok, < 0 if error
*/
int net_context_recv(struct net_context *context,
net_context_recv_cb_t cb,
k_timeout_t timeout,
void *user_data);
/**
* @brief Update TCP receive window for context.
*
* @details This function should be used by an application which
* doesn't fully process incoming data in its receive callback,
* but for example, queues it. In this case, receive callback
* should decrease the window (call this function with a negative
* value) by the size of queued data, and function(s) which dequeue
* data - with positive value corresponding to the dequeued size.
* For example, if receive callback gets a packet with the data
* size of 256 and queues it, it should call this function with
* delta of -256. If a function extracts 10 bytes of the queued
* data, it should call it with delta of 10.
*
* @param context The TCP network context to use.
* @param delta Size, in bytes, by which to increase TCP receive
* window (negative value to decrease).
*
* @return 0 if ok, < 0 if error
*/
int net_context_update_recv_wnd(struct net_context *context,
int32_t delta);
/** @brief Network context options. These map to BSD socket option values. */
enum net_context_option {
NET_OPT_PRIORITY = 1, /**< Context priority */
NET_OPT_TXTIME = 2, /**< TX time */
NET_OPT_SOCKS5 = 3, /**< SOCKS5 */
NET_OPT_RCVTIMEO = 4, /**< Receive timeout */
NET_OPT_SNDTIMEO = 5, /**< Send timeout */
NET_OPT_RCVBUF = 6, /**< Receive buffer */
NET_OPT_SNDBUF = 7, /**< Send buffer */
NET_OPT_DSCP_ECN = 8, /**< DSCP ECN */
NET_OPT_REUSEADDR = 9, /**< Re-use address */
NET_OPT_REUSEPORT = 10, /**< Re-use port */
NET_OPT_IPV6_V6ONLY = 11, /**< Share IPv4 and IPv6 port space */
NET_OPT_RECV_PKTINFO = 12, /**< Receive packet information */
NET_OPT_MCAST_TTL = 13, /**< IPv4 multicast TTL */
NET_OPT_MCAST_HOP_LIMIT = 14, /**< IPv6 multicast hop limit */
NET_OPT_UNICAST_HOP_LIMIT = 15, /**< IPv6 unicast hop limit */
NET_OPT_TTL = 16, /**< IPv4 unicast TTL */
NET_OPT_ADDR_PREFERENCES = 17, /**< IPv6 address preference */
NET_OPT_TIMESTAMPING = 18, /**< Packet timestamping */
};
/**
* @brief Set an connection option for this context.
*
* @param context The network context to use.
* @param option Option to set
* @param value Option value
* @param len Option length
*
* @return 0 if ok, <0 if error
*/
int net_context_set_option(struct net_context *context,
enum net_context_option option,
const void *value, size_t len);
/**
* @brief Get connection option value for this context.
*
* @param context The network context to use.
* @param option Option to set
* @param value Option value
* @param len Option length (returned to caller)
*
* @return 0 if ok, <0 if error
*/
int net_context_get_option(struct net_context *context,
enum net_context_option option,
void *value, size_t *len);
/**
* @typedef net_context_cb_t
* @brief Callback used while iterating over network contexts
*
* @param context A valid pointer on current network context
* @param user_data A valid pointer on some user data or NULL
*/
typedef void (*net_context_cb_t)(struct net_context *context, void *user_data);
/**
* @brief Go through all the network connections and call callback
* for each network context.
*
* @param cb User-supplied callback function to call.
* @param user_data User specified data.
*/
void net_context_foreach(net_context_cb_t cb, void *user_data);
/**
* @brief Set custom network buffer pools for context send operations
*
* Set custom network buffer pools used by the IP stack to allocate
* network buffers used by the context when sending data to the
* network. Using dedicated buffers may help make send operations on
* a given context more reliable, e.g. not be subject to buffer
* starvation due to operations on other network contexts. Buffer pools
* are set per context, but several contexts may share the same buffers.
* Note that there's no support for per-context custom receive packet
* pools.
*
* @param context Context that will use the given net_buf pools.
* @param tx_pool Pointer to the function that will return TX pool
* to the caller. The TX pool is used when sending data to network.
* There is one TX net_pkt for each network packet that is sent.
* @param data_pool Pointer to the function that will return DATA pool
* to the caller. The DATA pool is used to store data that is sent to
* the network.
*/
#if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
static inline void net_context_setup_pools(struct net_context *context,
net_pkt_get_slab_func_t tx_slab,
net_pkt_get_pool_func_t data_pool)
{
NET_ASSERT(context);
context->tx_slab = tx_slab;
context->data_pool = data_pool;
}
#else
#define net_context_setup_pools(context, tx_pool, data_pool)
#endif
/**
* @brief Check if a port is in use (bound)
*
* This function checks if a port is bound with respect to the specified
* @p ip_proto and @p local_addr.
*
* @param ip_proto the IP protocol
* @param local_port the port to check
* @param local_addr the network address
*
* @return true if the port is bound
* @return false if the port is not bound
*/
bool net_context_port_in_use(enum net_ip_protocol ip_proto,
uint16_t local_port, const struct sockaddr *local_addr);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_NET_CONTEXT_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_context.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 9,679 |
```objective-c
/** @file
* @brief Websocket API
*
* An API for applications to setup websocket connections
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_WEBSOCKET_H_
#define ZEPHYR_INCLUDE_NET_WEBSOCKET_H_
#include <zephyr/kernel.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/http/parser.h>
#include <zephyr/net/http/client.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Websocket API
* @defgroup websocket Websocket API
* @since 1.12
* @version 0.1.0
* @ingroup networking
* @{
*/
/** Message type values. Returned in websocket_recv_msg() */
#define WEBSOCKET_FLAG_FINAL 0x00000001 /**< Final frame */
#define WEBSOCKET_FLAG_TEXT 0x00000002 /**< Textual data */
#define WEBSOCKET_FLAG_BINARY 0x00000004 /**< Binary data */
#define WEBSOCKET_FLAG_CLOSE 0x00000008 /**< Closing connection */
#define WEBSOCKET_FLAG_PING 0x00000010 /**< Ping message */
#define WEBSOCKET_FLAG_PONG 0x00000020 /**< Pong message */
/** @brief Websocket option codes */
enum websocket_opcode {
WEBSOCKET_OPCODE_CONTINUE = 0x00, /**< Message continues */
WEBSOCKET_OPCODE_DATA_TEXT = 0x01, /**< Textual data */
WEBSOCKET_OPCODE_DATA_BINARY = 0x02, /**< Binary data */
WEBSOCKET_OPCODE_CLOSE = 0x08, /**< Closing connection */
WEBSOCKET_OPCODE_PING = 0x09, /**< Ping message */
WEBSOCKET_OPCODE_PONG = 0x0A, /**< Pong message */
};
/**
* @typedef websocket_connect_cb_t
* @brief Callback called after Websocket connection is established.
*
* @param ws_sock Websocket id
* @param req HTTP handshake request
* @param user_data A valid pointer on some user data or NULL
*
* @return 0 if ok, <0 if there is an error and connection should be aborted
*/
typedef int (*websocket_connect_cb_t)(int ws_sock, struct http_request *req,
void *user_data);
/**
* Websocket client connection request. This contains all the data that is
* needed when doing a Websocket connection request.
*/
struct websocket_request {
/** Host of the Websocket server when doing HTTP handshakes. */
const char *host;
/** URL of the Websocket. */
const char *url;
/** User supplied callback function to call when optional headers need
* to be sent. This can be NULL, in which case the optional_headers
* field in http_request is used. The idea of this optional_headers
* callback is to allow user to send more HTTP header data that is
* practical to store in allocated memory.
*/
http_header_cb_t optional_headers_cb;
/** A NULL terminated list of any optional headers that
* should be added to the HTTP request. May be NULL.
* If the optional_headers_cb is specified, then this field is ignored.
*/
const char **optional_headers;
/** User supplied callback function to call when a connection is
* established.
*/
websocket_connect_cb_t cb;
/** User supplied list of callback functions if the calling application
* wants to know the parsing status or the HTTP fields during the
* handshake. This is optional parameter and normally not needed but
* is useful if the caller wants to know something about
* the fields that the server is sending.
*/
const struct http_parser_settings *http_cb;
/** User supplied buffer where HTTP connection data is stored */
uint8_t *tmp_buf;
/** Length of the user supplied temp buffer */
size_t tmp_buf_len;
};
/**
* @brief Connect to a server that provides Websocket service. The callback is
* called after connection is established. The returned value is a new socket
* descriptor that can be used to send / receive data using the BSD socket API.
*
* @param http_sock Socket id to the server. Note that this socket is used to do
* HTTP handshakes etc. The actual Websocket connectivity is done via the
* returned websocket id. Note that the http_sock must not be closed
* after this function returns as it is used to deliver the Websocket
* packets to the Websocket server.
* @param req Websocket request. User should allocate and fill the request
* data.
* @param timeout Max timeout to wait for the connection. The timeout value is
* in milliseconds. Value SYS_FOREVER_MS means to wait forever.
* @param user_data User specified data that is passed to the callback.
*
* @return Websocket id to be used when sending/receiving Websocket data.
*/
int websocket_connect(int http_sock, struct websocket_request *req,
int32_t timeout, void *user_data);
/**
* @brief Send websocket msg to peer.
*
* @details The function will automatically add websocket header to the
* message.
*
* @param ws_sock Websocket id returned by websocket_connect().
* @param payload Websocket data to send.
* @param payload_len Length of the data to be sent.
* @param opcode Operation code (text, binary, ping, pong, close)
* @param mask Mask the data, see RFC 6455 for details
* @param final Is this final message for this message send. If final == false,
* then the first message must have valid opcode and subsequent messages
* must have opcode WEBSOCKET_OPCODE_CONTINUE. If final == true and this
* is the only message, then opcode should have proper opcode (text or
* binary) set.
* @param timeout How long to try to send the message. The value is in
* milliseconds. Value SYS_FOREVER_MS means to wait forever.
*
* @return <0 if error, >=0 amount of bytes sent
*/
int websocket_send_msg(int ws_sock, const uint8_t *payload, size_t payload_len,
enum websocket_opcode opcode, bool mask, bool final,
int32_t timeout);
/**
* @brief Receive websocket msg from peer.
*
* @details The function will automatically remove websocket header from the
* message.
*
* @param ws_sock Websocket id returned by websocket_connect().
* @param buf Buffer where websocket data is read.
* @param buf_len Length of the data buffer.
* @param message_type Type of the message.
* @param remaining How much there is data left in the message after this read.
* @param timeout How long to try to receive the message.
* The value is in milliseconds. Value SYS_FOREVER_MS means to wait
* forever.
*
* @retval >=0 amount of bytes received.
* @retval -EAGAIN on timeout.
* @retval -ENOTCONN on socket close.
* @retval -errno other negative errno value in case of failure.
*/
int websocket_recv_msg(int ws_sock, uint8_t *buf, size_t buf_len,
uint32_t *message_type, uint64_t *remaining,
int32_t timeout);
/**
* @brief Close websocket.
*
* @details One must call websocket_connect() after this call to re-establish
* the connection.
*
* @param ws_sock Websocket id returned by websocket_connect().
*
* @return <0 if error, 0 the connection was closed successfully
*/
int websocket_disconnect(int ws_sock);
/**
* @brief Register a socket as websocket. This is called by HTTP server
* when a connection is upgraded to a websocket connection.
*
* @param http_sock Underlying socket connection socket.
* @param recv_buf Temporary receive buffer for websocket parsing. This must
* point to a memory area that is valid for the duration of the whole
* websocket session.
* @param recv_buf_len Length of the temporary receive buffer.
*
* @return <0 if error, >=0 the actual websocket to be used by application
*/
int websocket_register(int http_sock, uint8_t *recv_buf, size_t recv_buf_len);
/**
* @brief Unregister a websocket. This is called when we no longer need
* the underlying "real" socket. This will close first the websocket
* and then the original socket.
*
* @param ws_sock Websocket connection socket.
*
* @return <0 if error, 0 the websocket connection is now fully closed
*/
int websocket_unregister(int ws_sock);
/** @cond INTERNAL_HIDDEN */
#if defined(CONFIG_WEBSOCKET_CLIENT)
void websocket_init(void);
#else
static inline void websocket_init(void)
{
}
#endif
/** @endcond */
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_WEBSOCKET_H_ */
``` | /content/code_sandbox/include/zephyr/net/websocket.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,872 |
```objective-c
/** @file
* @brief IPv6 and IPv4 definitions
*
* Generic IPv6 and IPv4 address definitions.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_NET_IP_H_
#define ZEPHYR_INCLUDE_NET_NET_IP_H_
/**
* @brief IPv4/IPv6 primitives and helpers
* @defgroup ip_4_6 IPv4/IPv6 primitives and helpers
* @since 1.0
* @version 1.0.0
* @ingroup networking
* @{
*/
#include <string.h>
#include <zephyr/types.h>
#include <stdbool.h>
#include <zephyr/sys/util.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/toolchain.h>
#include <zephyr/net/net_linkaddr.h>
#ifdef __cplusplus
extern "C" {
#endif
/** @cond INTERNAL_HIDDEN */
/* Specifying VLAN tag here in order to avoid circular dependencies */
#define NET_VLAN_TAG_UNSPEC 0x0fff
/** @endcond */
/* Protocol families. */
#define PF_UNSPEC 0 /**< Unspecified protocol family. */
#define PF_INET 1 /**< IP protocol family version 4. */
#define PF_INET6 2 /**< IP protocol family version 6. */
#define PF_PACKET 3 /**< Packet family. */
#define PF_CAN 4 /**< Controller Area Network. */
#define PF_NET_MGMT 5 /**< Network management info. */
#define PF_LOCAL 6 /**< Inter-process communication */
#define PF_UNIX PF_LOCAL /**< Inter-process communication */
/* Address families. */
#define AF_UNSPEC PF_UNSPEC /**< Unspecified address family. */
#define AF_INET PF_INET /**< IP protocol family version 4. */
#define AF_INET6 PF_INET6 /**< IP protocol family version 6. */
#define AF_PACKET PF_PACKET /**< Packet family. */
#define AF_CAN PF_CAN /**< Controller Area Network. */
#define AF_NET_MGMT PF_NET_MGMT /**< Network management info. */
#define AF_LOCAL PF_LOCAL /**< Inter-process communication */
#define AF_UNIX PF_UNIX /**< Inter-process communication */
/** Protocol numbers from IANA/BSD */
enum net_ip_protocol {
IPPROTO_IP = 0, /**< IP protocol (pseudo-val for setsockopt() */
IPPROTO_ICMP = 1, /**< ICMP protocol */
IPPROTO_IGMP = 2, /**< IGMP protocol */
IPPROTO_IPIP = 4, /**< IPIP tunnels */
IPPROTO_TCP = 6, /**< TCP protocol */
IPPROTO_UDP = 17, /**< UDP protocol */
IPPROTO_IPV6 = 41, /**< IPv6 protocol */
IPPROTO_ICMPV6 = 58, /**< ICMPv6 protocol */
IPPROTO_RAW = 255, /**< RAW IP packets */
};
/** Protocol numbers for TLS protocols */
enum net_ip_protocol_secure {
IPPROTO_TLS_1_0 = 256, /**< TLS 1.0 protocol */
IPPROTO_TLS_1_1 = 257, /**< TLS 1.1 protocol */
IPPROTO_TLS_1_2 = 258, /**< TLS 1.2 protocol */
IPPROTO_DTLS_1_0 = 272, /**< DTLS 1.0 protocol */
IPPROTO_DTLS_1_2 = 273, /**< DTLS 1.2 protocol */
};
/** Socket type */
enum net_sock_type {
SOCK_STREAM = 1, /**< Stream socket type */
SOCK_DGRAM, /**< Datagram socket type */
SOCK_RAW /**< RAW socket type */
};
/** @brief Convert 16-bit value from network to host byte order.
*
* @param x The network byte order value to convert.
*
* @return Host byte order value.
*/
#define ntohs(x) sys_be16_to_cpu(x)
/** @brief Convert 32-bit value from network to host byte order.
*
* @param x The network byte order value to convert.
*
* @return Host byte order value.
*/
#define ntohl(x) sys_be32_to_cpu(x)
/** @brief Convert 64-bit value from network to host byte order.
*
* @param x The network byte order value to convert.
*
* @return Host byte order value.
*/
#define ntohll(x) sys_be64_to_cpu(x)
/** @brief Convert 16-bit value from host to network byte order.
*
* @param x The host byte order value to convert.
*
* @return Network byte order value.
*/
#define htons(x) sys_cpu_to_be16(x)
/** @brief Convert 32-bit value from host to network byte order.
*
* @param x The host byte order value to convert.
*
* @return Network byte order value.
*/
#define htonl(x) sys_cpu_to_be32(x)
/** @brief Convert 64-bit value from host to network byte order.
*
* @param x The host byte order value to convert.
*
* @return Network byte order value.
*/
#define htonll(x) sys_cpu_to_be64(x)
/** IPv6 address struct */
struct in6_addr {
union {
uint8_t s6_addr[16]; /**< IPv6 address buffer */
uint16_t s6_addr16[8]; /**< In big endian */
uint32_t s6_addr32[4]; /**< In big endian */
};
};
/** Binary size of the IPv6 address */
#define NET_IPV6_ADDR_SIZE 16
/** IPv4 address struct */
struct in_addr {
union {
uint8_t s4_addr[4]; /**< IPv4 address buffer */
uint16_t s4_addr16[2]; /**< In big endian */
uint32_t s4_addr32[1]; /**< In big endian */
uint32_t s_addr; /**< In big endian, for POSIX compatibility. */
};
};
/** Binary size of the IPv4 address */
#define NET_IPV4_ADDR_SIZE 4
/** Socket address family type */
typedef unsigned short int sa_family_t;
/** Length of a socket address */
#ifndef __socklen_t_defined
typedef size_t socklen_t;
#define __socklen_t_defined
#endif
/*
* Note that the sin_port and sin6_port are in network byte order
* in various sockaddr* structs.
*/
/** Socket address struct for IPv6. */
struct sockaddr_in6 {
sa_family_t sin6_family; /**< AF_INET6 */
uint16_t sin6_port; /**< Port number */
struct in6_addr sin6_addr; /**< IPv6 address */
uint8_t sin6_scope_id; /**< Interfaces for a scope */
};
/** Socket address struct for IPv4. */
struct sockaddr_in {
sa_family_t sin_family; /**< AF_INET */
uint16_t sin_port; /**< Port number */
struct in_addr sin_addr; /**< IPv4 address */
};
/** Socket address struct for packet socket. */
struct sockaddr_ll {
sa_family_t sll_family; /**< Always AF_PACKET */
uint16_t sll_protocol; /**< Physical-layer protocol */
int sll_ifindex; /**< Interface number */
uint16_t sll_hatype; /**< ARP hardware type */
uint8_t sll_pkttype; /**< Packet type */
uint8_t sll_halen; /**< Length of address */
uint8_t sll_addr[8]; /**< Physical-layer address, big endian */
};
/** @cond INTERNAL_HIDDEN */
/** Socket address struct for IPv6 where address is a pointer */
struct sockaddr_in6_ptr {
sa_family_t sin6_family; /**< AF_INET6 */
uint16_t sin6_port; /**< Port number */
struct in6_addr *sin6_addr; /**< IPv6 address */
uint8_t sin6_scope_id; /**< interfaces for a scope */
};
/** Socket address struct for IPv4 where address is a pointer */
struct sockaddr_in_ptr {
sa_family_t sin_family; /**< AF_INET */
uint16_t sin_port; /**< Port number */
struct in_addr *sin_addr; /**< IPv4 address */
};
/** Socket address struct for packet socket where address is a pointer */
struct sockaddr_ll_ptr {
sa_family_t sll_family; /**< Always AF_PACKET */
uint16_t sll_protocol; /**< Physical-layer protocol */
int sll_ifindex; /**< Interface number */
uint16_t sll_hatype; /**< ARP hardware type */
uint8_t sll_pkttype; /**< Packet type */
uint8_t sll_halen; /**< Length of address */
uint8_t *sll_addr; /**< Physical-layer address, big endian */
};
struct sockaddr_can_ptr {
sa_family_t can_family;
int can_ifindex;
};
/** @endcond */
#if !defined(HAVE_IOVEC)
/** IO vector array element */
struct iovec {
void *iov_base; /**< Pointer to data */
size_t iov_len; /**< Length of the data */
};
#endif
/** Message struct */
struct msghdr {
void *msg_name; /**< Optional socket address, big endian */
socklen_t msg_namelen; /**< Size of socket address */
struct iovec *msg_iov; /**< Scatter/gather array */
size_t msg_iovlen; /**< Number of elements in msg_iov */
void *msg_control; /**< Ancillary data */
size_t msg_controllen; /**< Ancillary data buffer len */
int msg_flags; /**< Flags on received message */
};
/** Control message ancillary data */
struct cmsghdr {
socklen_t cmsg_len; /**< Number of bytes, including header */
int cmsg_level; /**< Originating protocol */
int cmsg_type; /**< Protocol-specific type */
z_max_align_t cmsg_data[]; /**< Flexible array member to force alignment of cmsghdr */
};
/** @cond INTERNAL_HIDDEN */
/* Alignment for headers and data. These are arch specific but define
* them here atm if not found already.
*/
#if !defined(ALIGN_H)
#define ALIGN_H(x) ROUND_UP(x, __alignof__(struct cmsghdr))
#endif
#if !defined(ALIGN_D)
#define ALIGN_D(x) ROUND_UP(x, __alignof__(z_max_align_t))
#endif
/** @endcond */
#if !defined(CMSG_FIRSTHDR)
/**
* Returns a pointer to the first cmsghdr in the ancillary data buffer
* associated with the passed msghdr. It returns NULL if there isn't
* enough space for a cmsghdr in the buffer.
*/
#define CMSG_FIRSTHDR(msghdr) \
((msghdr)->msg_controllen >= sizeof(struct cmsghdr) ? \
(struct cmsghdr *)((msghdr)->msg_control) : NULL)
#endif
#if !defined(CMSG_NXTHDR)
/**
* Returns the next valid cmsghdr after the passed cmsghdr. It returns NULL
* when there isn't enough space left in the buffer.
*/
#define CMSG_NXTHDR(msghdr, cmsg) \
(((cmsg) == NULL) ? CMSG_FIRSTHDR(msghdr) : \
(((uint8_t *)(cmsg) + ALIGN_H((cmsg)->cmsg_len) + \
ALIGN_D(sizeof(struct cmsghdr)) > \
(uint8_t *)((msghdr)->msg_control) + (msghdr)->msg_controllen) ? \
NULL : \
(struct cmsghdr *)((uint8_t *)(cmsg) + \
ALIGN_H((cmsg)->cmsg_len))))
#endif
#if !defined(CMSG_DATA)
/**
* Returns a pointer to the data portion of a cmsghdr. The pointer returned
* cannot be assumed to be suitably aligned for accessing arbitrary payload
* data types. Applications should not cast it to a pointer type matching
* the payload, but should instead use memcpy(3) to copy data to or from a
* suitably declared object.
*/
#define CMSG_DATA(cmsg) ((uint8_t *)(cmsg) + ALIGN_D(sizeof(struct cmsghdr)))
#endif
#if !defined(CMSG_SPACE)
/**
* Returns the number of bytes an ancillary element with payload of the passed
* data length occupies.
*/
#define CMSG_SPACE(length) (ALIGN_D(sizeof(struct cmsghdr)) + ALIGN_H(length))
#endif
#if !defined(CMSG_LEN)
/**
* Returns the value to store in the cmsg_len member of the cmsghdr structure,
* taking into account any necessary alignment.
* It takes the data length as an argument.
*/
#define CMSG_LEN(length) (ALIGN_D(sizeof(struct cmsghdr)) + length)
#endif
/** @cond INTERNAL_HIDDEN */
/* Packet types. */
#define PACKET_HOST 0 /* To us */
#define PACKET_BROADCAST 1 /* To all */
#define PACKET_MULTICAST 2 /* To group */
#define PACKET_OTHERHOST 3 /* To someone else */
#define PACKET_OUTGOING 4 /* Originated by us */
#define PACKET_LOOPBACK 5
#define PACKET_FASTROUTE 6
/* ARP protocol HARDWARE identifiers. */
#define ARPHRD_ETHER 1
/* Note: These macros are defined in a specific order.
* The largest sockaddr size is the last one.
*/
#if defined(CONFIG_NET_IPV4)
#undef NET_SOCKADDR_MAX_SIZE
#undef NET_SOCKADDR_PTR_MAX_SIZE
#define NET_SOCKADDR_MAX_SIZE (sizeof(struct sockaddr_in))
#define NET_SOCKADDR_PTR_MAX_SIZE (sizeof(struct sockaddr_in_ptr))
#endif
#if defined(CONFIG_NET_SOCKETS_PACKET)
#undef NET_SOCKADDR_MAX_SIZE
#undef NET_SOCKADDR_PTR_MAX_SIZE
#define NET_SOCKADDR_MAX_SIZE (sizeof(struct sockaddr_ll))
#define NET_SOCKADDR_PTR_MAX_SIZE (sizeof(struct sockaddr_ll_ptr))
#endif
#if defined(CONFIG_NET_IPV6)
#undef NET_SOCKADDR_MAX_SIZE
#define NET_SOCKADDR_MAX_SIZE (sizeof(struct sockaddr_in6))
#if !defined(CONFIG_NET_SOCKETS_PACKET)
#undef NET_SOCKADDR_PTR_MAX_SIZE
#define NET_SOCKADDR_PTR_MAX_SIZE (sizeof(struct sockaddr_in6_ptr))
#endif
#endif
#if !defined(CONFIG_NET_IPV4)
#if !defined(CONFIG_NET_IPV6)
#if !defined(CONFIG_NET_SOCKETS_PACKET)
#define NET_SOCKADDR_MAX_SIZE (sizeof(struct sockaddr_in6))
#define NET_SOCKADDR_PTR_MAX_SIZE (sizeof(struct sockaddr_in6_ptr))
#endif
#endif
#endif
/** @endcond */
/** Generic sockaddr struct. Must be cast to proper type. */
struct sockaddr {
sa_family_t sa_family; /**< Address family */
/** @cond INTERNAL_HIDDEN */
char data[NET_SOCKADDR_MAX_SIZE - sizeof(sa_family_t)];
/** @endcond */
};
/** @cond INTERNAL_HIDDEN */
struct sockaddr_ptr {
sa_family_t family;
char data[NET_SOCKADDR_PTR_MAX_SIZE - sizeof(sa_family_t)];
};
/* Same as sockaddr in our case */
struct sockaddr_storage {
sa_family_t ss_family;
char data[NET_SOCKADDR_MAX_SIZE - sizeof(sa_family_t)];
};
/* Socket address struct for UNIX domain sockets */
struct sockaddr_un {
sa_family_t sun_family; /* AF_UNIX */
char sun_path[NET_SOCKADDR_MAX_SIZE - sizeof(sa_family_t)];
};
struct net_addr {
sa_family_t family;
union {
struct in6_addr in6_addr;
struct in_addr in_addr;
};
};
/** A pointer to IPv6 any address (all values zero) */
extern const struct in6_addr in6addr_any;
/** A pointer to IPv6 loopback address (::1) */
extern const struct in6_addr in6addr_loopback;
/** @endcond */
/** IPv6 address initializer */
#define IN6ADDR_ANY_INIT { { { 0, 0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0 } } }
/** IPv6 loopback address initializer */
#define IN6ADDR_LOOPBACK_INIT { { { 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 1 } } }
/** IPv4 any address */
#define INADDR_ANY 0
/** IPv4 address initializer */
#define INADDR_ANY_INIT { { { INADDR_ANY } } }
/** IPv6 loopback address initializer */
#define INADDR_LOOPBACK_INIT { { { 127, 0, 0, 1 } } }
/** Max length of the IPv4 address as a string. Defined by POSIX. */
#define INET_ADDRSTRLEN 16
/** Max length of the IPv6 address as a string. Takes into account possible
* mapped IPv4 addresses.
*/
#define INET6_ADDRSTRLEN 46
/** @cond INTERNAL_HIDDEN */
/* These are for internal usage of the stack */
#define NET_IPV6_ADDR_LEN sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx")
#define NET_IPV4_ADDR_LEN sizeof("xxx.xxx.xxx.xxx")
/** @endcond */
/** @brief IP Maximum Transfer Unit */
enum net_ip_mtu {
/** IPv6 MTU length. We must be able to receive this size IPv6 packet
* without fragmentation.
*/
#if defined(CONFIG_NET_NATIVE_IPV6)
NET_IPV6_MTU = CONFIG_NET_IPV6_MTU,
#else
NET_IPV6_MTU = 1280,
#endif
/** IPv4 MTU length. We must be able to receive this size IPv4 packet
* without fragmentation.
*/
NET_IPV4_MTU = 576,
};
/** @brief Network packet priority settings described in IEEE 802.1Q Annex I.1 */
enum net_priority {
NET_PRIORITY_BK = 1, /**< Background (lowest) */
NET_PRIORITY_BE = 0, /**< Best effort (default) */
NET_PRIORITY_EE = 2, /**< Excellent effort */
NET_PRIORITY_CA = 3, /**< Critical applications */
NET_PRIORITY_VI = 4, /**< Video, < 100 ms latency and jitter */
NET_PRIORITY_VO = 5, /**< Voice, < 10 ms latency and jitter */
NET_PRIORITY_IC = 6, /**< Internetwork control */
NET_PRIORITY_NC = 7 /**< Network control (highest) */
} __packed;
#define NET_MAX_PRIORITIES 8 /**< How many priority values there are */
/** @brief IPv6/IPv4 network connection tuple */
struct net_tuple {
struct net_addr *remote_addr; /**< IPv6/IPv4 remote address */
struct net_addr *local_addr; /**< IPv6/IPv4 local address */
uint16_t remote_port; /**< UDP/TCP remote port */
uint16_t local_port; /**< UDP/TCP local port */
enum net_ip_protocol ip_proto; /**< IP protocol */
};
/** @brief What is the current state of the network address */
enum net_addr_state {
NET_ADDR_ANY_STATE = -1, /**< Default (invalid) address type */
NET_ADDR_TENTATIVE = 0, /**< Tentative address */
NET_ADDR_PREFERRED, /**< Preferred address */
NET_ADDR_DEPRECATED, /**< Deprecated address */
} __packed;
/** @brief How the network address is assigned to network interface */
enum net_addr_type {
/** Default value. This is not a valid value. */
NET_ADDR_ANY = 0,
/** Auto configured address */
NET_ADDR_AUTOCONF,
/** Address is from DHCP */
NET_ADDR_DHCP,
/** Manually set address */
NET_ADDR_MANUAL,
/** Manually set address which is overridable by DHCP */
NET_ADDR_OVERRIDABLE,
} __packed;
/** @cond INTERNAL_HIDDEN */
struct net_ipv6_hdr {
uint8_t vtc;
uint8_t tcflow;
uint16_t flow;
uint16_t len;
uint8_t nexthdr;
uint8_t hop_limit;
uint8_t src[NET_IPV6_ADDR_SIZE];
uint8_t dst[NET_IPV6_ADDR_SIZE];
} __packed;
struct net_ipv6_frag_hdr {
uint8_t nexthdr;
uint8_t reserved;
uint16_t offset;
uint32_t id;
} __packed;
struct net_ipv4_hdr {
uint8_t vhl;
uint8_t tos;
uint16_t len;
uint8_t id[2];
uint8_t offset[2];
uint8_t ttl;
uint8_t proto;
uint16_t chksum;
uint8_t src[NET_IPV4_ADDR_SIZE];
uint8_t dst[NET_IPV4_ADDR_SIZE];
} __packed;
struct net_icmp_hdr {
uint8_t type;
uint8_t code;
uint16_t chksum;
} __packed;
struct net_udp_hdr {
uint16_t src_port;
uint16_t dst_port;
uint16_t len;
uint16_t chksum;
} __packed;
struct net_tcp_hdr {
uint16_t src_port;
uint16_t dst_port;
uint8_t seq[4];
uint8_t ack[4];
uint8_t offset;
uint8_t flags;
uint8_t wnd[2];
uint16_t chksum;
uint8_t urg[2];
uint8_t optdata[0];
} __packed;
static inline const char *net_addr_type2str(enum net_addr_type type)
{
switch (type) {
case NET_ADDR_AUTOCONF:
return "AUTO";
case NET_ADDR_DHCP:
return "DHCP";
case NET_ADDR_MANUAL:
return "MANUAL";
case NET_ADDR_OVERRIDABLE:
return "OVERRIDE";
case NET_ADDR_ANY:
default:
break;
}
return "<unknown>";
}
/* IPv6 extension headers types */
#define NET_IPV6_NEXTHDR_HBHO 0
#define NET_IPV6_NEXTHDR_DESTO 60
#define NET_IPV6_NEXTHDR_ROUTING 43
#define NET_IPV6_NEXTHDR_FRAG 44
#define NET_IPV6_NEXTHDR_NONE 59
/**
* This 2 unions are here temporarily, as long as net_context.h will
* be still public and not part of the core only.
*/
union net_ip_header {
struct net_ipv4_hdr *ipv4;
struct net_ipv6_hdr *ipv6;
};
union net_proto_header {
struct net_udp_hdr *udp;
struct net_tcp_hdr *tcp;
};
#define NET_UDPH_LEN 8 /* Size of UDP header */
#define NET_TCPH_LEN 20 /* Size of TCP header */
#define NET_ICMPH_LEN 4 /* Size of ICMP header */
#define NET_IPV6H_LEN 40 /* Size of IPv6 header */
#define NET_ICMPV6H_LEN NET_ICMPH_LEN /* Size of ICMPv6 header */
#define NET_IPV6UDPH_LEN (NET_UDPH_LEN + NET_IPV6H_LEN) /* IPv6 + UDP */
#define NET_IPV6TCPH_LEN (NET_TCPH_LEN + NET_IPV6H_LEN) /* IPv6 + TCP */
#define NET_IPV6ICMPH_LEN (NET_IPV6H_LEN + NET_ICMPH_LEN) /* ICMPv6 + IPv6 */
#define NET_IPV6_FRAGH_LEN 8
#define NET_IPV4H_LEN 20 /* Size of IPv4 header */
#define NET_ICMPV4H_LEN NET_ICMPH_LEN /* Size of ICMPv4 header */
#define NET_IPV4UDPH_LEN (NET_UDPH_LEN + NET_IPV4H_LEN) /* IPv4 + UDP */
#define NET_IPV4TCPH_LEN (NET_TCPH_LEN + NET_IPV4H_LEN) /* IPv4 + TCP */
#define NET_IPV4ICMPH_LEN (NET_IPV4H_LEN + NET_ICMPH_LEN) /* ICMPv4 + IPv4 */
#define NET_IPV6H_LENGTH_OFFSET 0x04 /* Offset of the Length field in the IPv6 header */
#define NET_IPV6_FRAGH_OFFSET_MASK 0xfff8 /* Mask for the 13-bit Fragment Offset field */
#define NET_IPV4_FRAGH_OFFSET_MASK 0x1fff /* Mask for the 13-bit Fragment Offset field */
#define NET_IPV4_MORE_FRAG_MASK 0x2000 /* Mask for the 1-bit More Fragments field */
#define NET_IPV4_DO_NOT_FRAG_MASK 0x4000 /* Mask for the 1-bit Do Not Fragment field */
/** @endcond */
/**
* @brief Check if the IPv6 address is a loopback address (::1).
*
* @param addr IPv6 address
*
* @return True if address is a loopback address, False otherwise.
*/
static inline bool net_ipv6_is_addr_loopback(struct in6_addr *addr)
{
return UNALIGNED_GET(&addr->s6_addr32[0]) == 0 &&
UNALIGNED_GET(&addr->s6_addr32[1]) == 0 &&
UNALIGNED_GET(&addr->s6_addr32[2]) == 0 &&
ntohl(UNALIGNED_GET(&addr->s6_addr32[3])) == 1;
}
/**
* @brief Check if the IPv6 address is a multicast address.
*
* @param addr IPv6 address
*
* @return True if address is multicast address, False otherwise.
*/
static inline bool net_ipv6_is_addr_mcast(const struct in6_addr *addr)
{
return addr->s6_addr[0] == 0xFF;
}
struct net_if;
struct net_if_config;
extern struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
struct net_if **iface);
/**
* @brief Check if IPv6 address is found in one of the network interfaces.
*
* @param addr IPv6 address
*
* @return True if address was found, False otherwise.
*/
static inline bool net_ipv6_is_my_addr(struct in6_addr *addr)
{
return net_if_ipv6_addr_lookup(addr, NULL) != NULL;
}
extern struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(
const struct in6_addr *addr, struct net_if **iface);
/**
* @brief Check if IPv6 multicast address is found in one of the
* network interfaces.
*
* @param maddr Multicast IPv6 address
*
* @return True if address was found, False otherwise.
*/
static inline bool net_ipv6_is_my_maddr(struct in6_addr *maddr)
{
return net_if_ipv6_maddr_lookup(maddr, NULL) != NULL;
}
/**
* @brief Check if two IPv6 addresses are same when compared after prefix mask.
*
* @param addr1 First IPv6 address.
* @param addr2 Second IPv6 address.
* @param length Prefix length (max length is 128).
*
* @return True if IPv6 prefixes are the same, False otherwise.
*/
static inline bool net_ipv6_is_prefix(const uint8_t *addr1,
const uint8_t *addr2,
uint8_t length)
{
uint8_t bits = 128 - length;
uint8_t bytes = length / 8U;
uint8_t remain = bits % 8;
uint8_t mask;
if (length > 128) {
return false;
}
if (memcmp(addr1, addr2, bytes)) {
return false;
}
if (!remain) {
/* No remaining bits, the prefixes are the same as first
* bytes are the same.
*/
return true;
}
/* Create a mask that has remaining most significant bits set */
mask = (uint8_t)((0xff << (8 - remain)) ^ 0xff) << remain;
return (addr1[bytes] & mask) == (addr2[bytes] & mask);
}
/**
* @brief Check if the IPv4 address is a loopback address (127.0.0.0/8).
*
* @param addr IPv4 address
*
* @return True if address is a loopback address, False otherwise.
*/
static inline bool net_ipv4_is_addr_loopback(struct in_addr *addr)
{
return addr->s4_addr[0] == 127U;
}
/**
* @brief Check if the IPv4 address is unspecified (all bits zero)
*
* @param addr IPv4 address.
*
* @return True if the address is unspecified, false otherwise.
*/
static inline bool net_ipv4_is_addr_unspecified(const struct in_addr *addr)
{
return UNALIGNED_GET(&addr->s_addr) == 0;
}
/**
* @brief Check if the IPv4 address is a multicast address.
*
* @param addr IPv4 address
*
* @return True if address is multicast address, False otherwise.
*/
static inline bool net_ipv4_is_addr_mcast(const struct in_addr *addr)
{
return (ntohl(UNALIGNED_GET(&addr->s_addr)) & 0xF0000000) == 0xE0000000;
}
/**
* @brief Check if the given IPv4 address is a link local address.
*
* @param addr A valid pointer on an IPv4 address
*
* @return True if it is, false otherwise.
*/
static inline bool net_ipv4_is_ll_addr(const struct in_addr *addr)
{
return (ntohl(UNALIGNED_GET(&addr->s_addr)) & 0xFFFF0000) == 0xA9FE0000;
}
/**
* @brief Check if the given IPv4 address is from a private address range.
*
* See path_to_url for details.
*
* @param addr A valid pointer on an IPv4 address
*
* @return True if it is, false otherwise.
*/
static inline bool net_ipv4_is_private_addr(const struct in_addr *addr)
{
uint32_t masked_24, masked_16, masked_12, masked_10, masked_8;
masked_24 = ntohl(UNALIGNED_GET(&addr->s_addr)) & 0xFFFFFF00;
masked_16 = masked_24 & 0xFFFF0000;
masked_12 = masked_24 & 0xFFF00000;
masked_10 = masked_24 & 0xFFC00000;
masked_8 = masked_24 & 0xFF000000;
return masked_8 == 0x0A000000 || /* 10.0.0.0/8 */
masked_10 == 0x64400000 || /* 100.64.0.0/10 */
masked_12 == 0xAC100000 || /* 172.16.0.0/12 */
masked_16 == 0xC0A80000 || /* 192.168.0.0/16 */
masked_24 == 0xC0000200 || /* 192.0.2.0/24 */
masked_24 == 0xC0336400 || /* 192.51.100.0/24 */
masked_24 == 0xCB007100; /* 203.0.113.0/24 */
}
/**
* @brief Copy an IPv4 or IPv6 address
*
* @param dest Destination IP address.
* @param src Source IP address.
*
* @return Destination address.
*/
#define net_ipaddr_copy(dest, src) \
UNALIGNED_PUT(UNALIGNED_GET(src), dest)
/**
* @brief Copy an IPv4 address raw buffer
*
* @param dest Destination IP address.
* @param src Source IP address.
*/
static inline void net_ipv4_addr_copy_raw(uint8_t *dest,
const uint8_t *src)
{
net_ipaddr_copy((struct in_addr *)dest, (const struct in_addr *)src);
}
/**
* @brief Copy an IPv6 address raw buffer
*
* @param dest Destination IP address.
* @param src Source IP address.
*/
static inline void net_ipv6_addr_copy_raw(uint8_t *dest,
const uint8_t *src)
{
memcpy(dest, src, sizeof(struct in6_addr));
}
/**
* @brief Compare two IPv4 addresses
*
* @param addr1 Pointer to IPv4 address.
* @param addr2 Pointer to IPv4 address.
*
* @return True if the addresses are the same, false otherwise.
*/
static inline bool net_ipv4_addr_cmp(const struct in_addr *addr1,
const struct in_addr *addr2)
{
return UNALIGNED_GET(&addr1->s_addr) == UNALIGNED_GET(&addr2->s_addr);
}
/**
* @brief Compare two raw IPv4 address buffers
*
* @param addr1 Pointer to IPv4 address buffer.
* @param addr2 Pointer to IPv4 address buffer.
*
* @return True if the addresses are the same, false otherwise.
*/
static inline bool net_ipv4_addr_cmp_raw(const uint8_t *addr1,
const uint8_t *addr2)
{
return net_ipv4_addr_cmp((const struct in_addr *)addr1,
(const struct in_addr *)addr2);
}
/**
* @brief Compare two IPv6 addresses
*
* @param addr1 Pointer to IPv6 address.
* @param addr2 Pointer to IPv6 address.
*
* @return True if the addresses are the same, false otherwise.
*/
static inline bool net_ipv6_addr_cmp(const struct in6_addr *addr1,
const struct in6_addr *addr2)
{
return !memcmp(addr1, addr2, sizeof(struct in6_addr));
}
/**
* @brief Compare two raw IPv6 address buffers
*
* @param addr1 Pointer to IPv6 address buffer.
* @param addr2 Pointer to IPv6 address buffer.
*
* @return True if the addresses are the same, false otherwise.
*/
static inline bool net_ipv6_addr_cmp_raw(const uint8_t *addr1,
const uint8_t *addr2)
{
return net_ipv6_addr_cmp((const struct in6_addr *)addr1,
(const struct in6_addr *)addr2);
}
/**
* @brief Check if the given IPv6 address is a link local address.
*
* @param addr A valid pointer on an IPv6 address
*
* @return True if it is, false otherwise.
*/
static inline bool net_ipv6_is_ll_addr(const struct in6_addr *addr)
{
return UNALIGNED_GET(&addr->s6_addr16[0]) == htons(0xFE80);
}
/**
* @brief Check if the given IPv6 address is a site local address.
*
* @param addr A valid pointer on an IPv6 address
*
* @return True if it is, false otherwise.
*/
static inline bool net_ipv6_is_sl_addr(const struct in6_addr *addr)
{
return UNALIGNED_GET(&addr->s6_addr16[0]) == htons(0xFEC0);
}
/**
* @brief Check if the given IPv6 address is a unique local address.
*
* @param addr A valid pointer on an IPv6 address
*
* @return True if it is, false otherwise.
*/
static inline bool net_ipv6_is_ula_addr(const struct in6_addr *addr)
{
return addr->s6_addr[0] == 0xFD;
}
/**
* @brief Check if the given IPv6 address is a global address.
*
* @param addr A valid pointer on an IPv6 address
*
* @return True if it is, false otherwise.
*/
static inline bool net_ipv6_is_global_addr(const struct in6_addr *addr)
{
return (addr->s6_addr[0] & 0xE0) == 0x20;
}
/**
* @brief Check if the given IPv6 address is from a private/local address range.
*
* See path_to_url for details.
*
* @param addr A valid pointer on an IPv6 address
*
* @return True if it is, false otherwise.
*/
static inline bool net_ipv6_is_private_addr(const struct in6_addr *addr)
{
uint32_t masked_32, masked_7;
masked_32 = ntohl(UNALIGNED_GET(&addr->s6_addr32[0]));
masked_7 = masked_32 & 0xfc000000;
return masked_32 == 0x20010db8 || /* 2001:db8::/32 */
masked_7 == 0xfc000000; /* fc00::/7 */
}
/**
* @brief Return pointer to any (all bits zeros) IPv6 address.
*
* @return Any IPv6 address.
*/
const struct in6_addr *net_ipv6_unspecified_address(void);
/**
* @brief Return pointer to any (all bits zeros) IPv4 address.
*
* @return Any IPv4 address.
*/
const struct in_addr *net_ipv4_unspecified_address(void);
/**
* @brief Return pointer to broadcast (all bits ones) IPv4 address.
*
* @return Broadcast IPv4 address.
*/
const struct in_addr *net_ipv4_broadcast_address(void);
struct net_if;
extern bool net_if_ipv4_addr_mask_cmp(struct net_if *iface,
const struct in_addr *addr);
/**
* @brief Check if the given address belongs to same subnet that
* has been configured for the interface.
*
* @param iface A valid pointer on an interface
* @param addr IPv4 address
*
* @return True if address is in same subnet, false otherwise.
*/
static inline bool net_ipv4_addr_mask_cmp(struct net_if *iface,
const struct in_addr *addr)
{
return net_if_ipv4_addr_mask_cmp(iface, addr);
}
extern bool net_if_ipv4_is_addr_bcast(struct net_if *iface,
const struct in_addr *addr);
/**
* @brief Check if the given IPv4 address is a broadcast address.
*
* @param iface Interface to use. Must be a valid pointer to an interface.
* @param addr IPv4 address
*
* @return True if address is a broadcast address, false otherwise.
*/
#if defined(CONFIG_NET_NATIVE_IPV4)
static inline bool net_ipv4_is_addr_bcast(struct net_if *iface,
const struct in_addr *addr)
{
if (net_ipv4_addr_cmp(addr, net_ipv4_broadcast_address())) {
return true;
}
return net_if_ipv4_is_addr_bcast(iface, addr);
}
#else
static inline bool net_ipv4_is_addr_bcast(struct net_if *iface,
const struct in_addr *addr)
{
ARG_UNUSED(iface);
ARG_UNUSED(addr);
return false;
}
#endif
extern struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
struct net_if **iface);
/**
* @brief Check if the IPv4 address is assigned to any network interface
* in the system.
*
* @param addr A valid pointer on an IPv4 address
*
* @return True if IPv4 address is found in one of the network interfaces,
* False otherwise.
*/
static inline bool net_ipv4_is_my_addr(const struct in_addr *addr)
{
bool ret;
ret = net_if_ipv4_addr_lookup(addr, NULL) != NULL;
if (!ret) {
ret = net_ipv4_is_addr_bcast(NULL, addr);
}
return ret;
}
/**
* @brief Check if the IPv6 address is unspecified (all bits zero)
*
* @param addr IPv6 address.
*
* @return True if the address is unspecified, false otherwise.
*/
static inline bool net_ipv6_is_addr_unspecified(const struct in6_addr *addr)
{
return UNALIGNED_GET(&addr->s6_addr32[0]) == 0 &&
UNALIGNED_GET(&addr->s6_addr32[1]) == 0 &&
UNALIGNED_GET(&addr->s6_addr32[2]) == 0 &&
UNALIGNED_GET(&addr->s6_addr32[3]) == 0;
}
/**
* @brief Check if the IPv6 address is solicited node multicast address
* FF02:0:0:0:0:1:FFXX:XXXX defined in RFC 3513
*
* @param addr IPv6 address.
*
* @return True if the address is solicited node address, false otherwise.
*/
static inline bool net_ipv6_is_addr_solicited_node(const struct in6_addr *addr)
{
return UNALIGNED_GET(&addr->s6_addr32[0]) == htonl(0xff020000) &&
UNALIGNED_GET(&addr->s6_addr32[1]) == 0x00000000 &&
UNALIGNED_GET(&addr->s6_addr32[2]) == htonl(0x00000001) &&
((UNALIGNED_GET(&addr->s6_addr32[3]) & htonl(0xff000000)) ==
htonl(0xff000000));
}
/**
* @brief Check if the IPv6 address is a given scope multicast
* address (FFyx::).
*
* @param addr IPv6 address
* @param scope Scope to check
*
* @return True if the address is in given scope multicast address,
* false otherwise.
*/
static inline bool net_ipv6_is_addr_mcast_scope(const struct in6_addr *addr,
int scope)
{
return (addr->s6_addr[0] == 0xff) && ((addr->s6_addr[1] & 0xF) == scope);
}
/**
* @brief Check if the IPv6 addresses have the same multicast scope (FFyx::).
*
* @param addr_1 IPv6 address 1
* @param addr_2 IPv6 address 2
*
* @return True if both addresses have same multicast scope,
* false otherwise.
*/
static inline bool net_ipv6_is_same_mcast_scope(const struct in6_addr *addr_1,
const struct in6_addr *addr_2)
{
return (addr_1->s6_addr[0] == 0xff) && (addr_2->s6_addr[0] == 0xff) &&
(addr_1->s6_addr[1] == addr_2->s6_addr[1]);
}
/**
* @brief Check if the IPv6 address is a global multicast address (FFxE::/16).
*
* @param addr IPv6 address.
*
* @return True if the address is global multicast address, false otherwise.
*/
static inline bool net_ipv6_is_addr_mcast_global(const struct in6_addr *addr)
{
return net_ipv6_is_addr_mcast_scope(addr, 0x0e);
}
/**
* @brief Check if the IPv6 address is a interface scope multicast
* address (FFx1::).
*
* @param addr IPv6 address.
*
* @return True if the address is a interface scope multicast address,
* false otherwise.
*/
static inline bool net_ipv6_is_addr_mcast_iface(const struct in6_addr *addr)
{
return net_ipv6_is_addr_mcast_scope(addr, 0x01);
}
/**
* @brief Check if the IPv6 address is a link local scope multicast
* address (FFx2::).
*
* @param addr IPv6 address.
*
* @return True if the address is a link local scope multicast address,
* false otherwise.
*/
static inline bool net_ipv6_is_addr_mcast_link(const struct in6_addr *addr)
{
return net_ipv6_is_addr_mcast_scope(addr, 0x02);
}
/**
* @brief Check if the IPv6 address is a mesh-local scope multicast
* address (FFx3::).
*
* @param addr IPv6 address.
*
* @return True if the address is a mesh-local scope multicast address,
* false otherwise.
*/
static inline bool net_ipv6_is_addr_mcast_mesh(const struct in6_addr *addr)
{
return net_ipv6_is_addr_mcast_scope(addr, 0x03);
}
/**
* @brief Check if the IPv6 address is a site scope multicast
* address (FFx5::).
*
* @param addr IPv6 address.
*
* @return True if the address is a site scope multicast address,
* false otherwise.
*/
static inline bool net_ipv6_is_addr_mcast_site(const struct in6_addr *addr)
{
return net_ipv6_is_addr_mcast_scope(addr, 0x05);
}
/**
* @brief Check if the IPv6 address is an organization scope multicast
* address (FFx8::).
*
* @param addr IPv6 address.
*
* @return True if the address is an organization scope multicast address,
* false otherwise.
*/
static inline bool net_ipv6_is_addr_mcast_org(const struct in6_addr *addr)
{
return net_ipv6_is_addr_mcast_scope(addr, 0x08);
}
/**
* @brief Check if the IPv6 address belongs to certain multicast group
*
* @param addr IPv6 address.
* @param group Group id IPv6 address, the values must be in network
* byte order
*
* @return True if the IPv6 multicast address belongs to given multicast
* group, false otherwise.
*/
static inline bool net_ipv6_is_addr_mcast_group(const struct in6_addr *addr,
const struct in6_addr *group)
{
return UNALIGNED_GET(&addr->s6_addr16[1]) == group->s6_addr16[1] &&
UNALIGNED_GET(&addr->s6_addr16[2]) == group->s6_addr16[2] &&
UNALIGNED_GET(&addr->s6_addr16[3]) == group->s6_addr16[3] &&
UNALIGNED_GET(&addr->s6_addr32[1]) == group->s6_addr32[1] &&
UNALIGNED_GET(&addr->s6_addr32[2]) == group->s6_addr32[1] &&
UNALIGNED_GET(&addr->s6_addr32[3]) == group->s6_addr32[3];
}
/**
* @brief Check if the IPv6 address belongs to the all nodes multicast group
*
* @param addr IPv6 address
*
* @return True if the IPv6 multicast address belongs to the all nodes multicast
* group, false otherwise
*/
static inline bool
net_ipv6_is_addr_mcast_all_nodes_group(const struct in6_addr *addr)
{
static const struct in6_addr all_nodes_mcast_group = {
{ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01 } }
};
return net_ipv6_is_addr_mcast_group(addr, &all_nodes_mcast_group);
}
/**
* @brief Check if the IPv6 address is a interface scope all nodes multicast
* address (FF01::1).
*
* @param addr IPv6 address.
*
* @return True if the address is a interface scope all nodes multicast address,
* false otherwise.
*/
static inline bool
net_ipv6_is_addr_mcast_iface_all_nodes(const struct in6_addr *addr)
{
return net_ipv6_is_addr_mcast_iface(addr) &&
net_ipv6_is_addr_mcast_all_nodes_group(addr);
}
/**
* @brief Check if the IPv6 address is a link local scope all nodes multicast
* address (FF02::1).
*
* @param addr IPv6 address.
*
* @return True if the address is a link local scope all nodes multicast
* address, false otherwise.
*/
static inline bool
net_ipv6_is_addr_mcast_link_all_nodes(const struct in6_addr *addr)
{
return net_ipv6_is_addr_mcast_link(addr) &&
net_ipv6_is_addr_mcast_all_nodes_group(addr);
}
/**
* @brief Create solicited node IPv6 multicast address
* FF02:0:0:0:0:1:FFXX:XXXX defined in RFC 3513
*
* @param src IPv6 address.
* @param dst IPv6 address.
*/
static inline
void net_ipv6_addr_create_solicited_node(const struct in6_addr *src,
struct in6_addr *dst)
{
dst->s6_addr[0] = 0xFF;
dst->s6_addr[1] = 0x02;
UNALIGNED_PUT(0, &dst->s6_addr16[1]);
UNALIGNED_PUT(0, &dst->s6_addr16[2]);
UNALIGNED_PUT(0, &dst->s6_addr16[3]);
UNALIGNED_PUT(0, &dst->s6_addr16[4]);
dst->s6_addr[10] = 0U;
dst->s6_addr[11] = 0x01;
dst->s6_addr[12] = 0xFF;
dst->s6_addr[13] = src->s6_addr[13];
UNALIGNED_PUT(UNALIGNED_GET(&src->s6_addr16[7]), &dst->s6_addr16[7]);
}
/** @brief Construct an IPv6 address from eight 16-bit words.
*
* @param addr IPv6 address
* @param addr0 16-bit word which is part of the address
* @param addr1 16-bit word which is part of the address
* @param addr2 16-bit word which is part of the address
* @param addr3 16-bit word which is part of the address
* @param addr4 16-bit word which is part of the address
* @param addr5 16-bit word which is part of the address
* @param addr6 16-bit word which is part of the address
* @param addr7 16-bit word which is part of the address
*/
static inline void net_ipv6_addr_create(struct in6_addr *addr,
uint16_t addr0, uint16_t addr1,
uint16_t addr2, uint16_t addr3,
uint16_t addr4, uint16_t addr5,
uint16_t addr6, uint16_t addr7)
{
UNALIGNED_PUT(htons(addr0), &addr->s6_addr16[0]);
UNALIGNED_PUT(htons(addr1), &addr->s6_addr16[1]);
UNALIGNED_PUT(htons(addr2), &addr->s6_addr16[2]);
UNALIGNED_PUT(htons(addr3), &addr->s6_addr16[3]);
UNALIGNED_PUT(htons(addr4), &addr->s6_addr16[4]);
UNALIGNED_PUT(htons(addr5), &addr->s6_addr16[5]);
UNALIGNED_PUT(htons(addr6), &addr->s6_addr16[6]);
UNALIGNED_PUT(htons(addr7), &addr->s6_addr16[7]);
}
/**
* @brief Create link local allnodes multicast IPv6 address
*
* @param addr IPv6 address
*/
static inline void net_ipv6_addr_create_ll_allnodes_mcast(struct in6_addr *addr)
{
net_ipv6_addr_create(addr, 0xff02, 0, 0, 0, 0, 0, 0, 0x0001);
}
/**
* @brief Create link local allrouters multicast IPv6 address
*
* @param addr IPv6 address
*/
static inline void net_ipv6_addr_create_ll_allrouters_mcast(struct in6_addr *addr)
{
net_ipv6_addr_create(addr, 0xff02, 0, 0, 0, 0, 0, 0, 0x0002);
}
/**
* @brief Create IPv4 mapped IPv6 address
*
* @param addr4 IPv4 address
* @param addr6 IPv6 address to be created
*/
static inline void net_ipv6_addr_create_v4_mapped(const struct in_addr *addr4,
struct in6_addr *addr6)
{
net_ipv6_addr_create(addr6, 0, 0, 0, 0, 0, 0xffff,
ntohs(addr4->s4_addr16[0]),
ntohs(addr4->s4_addr16[1]));
}
/**
* @brief Is the IPv6 address an IPv4 mapped one. The v4 mapped addresses
* look like \::ffff:a.b.c.d
*
* @param addr IPv6 address
*
* @return True if IPv6 address is a IPv4 mapped address, False otherwise.
*/
static inline bool net_ipv6_addr_is_v4_mapped(const struct in6_addr *addr)
{
if (UNALIGNED_GET(&addr->s6_addr32[0]) == 0 &&
UNALIGNED_GET(&addr->s6_addr32[1]) == 0 &&
UNALIGNED_GET(&addr->s6_addr16[5]) == 0xffff) {
return true;
}
return false;
}
/**
* @brief Create IPv6 address interface identifier
*
* @param addr IPv6 address
* @param lladdr Link local address
*/
static inline void net_ipv6_addr_create_iid(struct in6_addr *addr,
struct net_linkaddr *lladdr)
{
UNALIGNED_PUT(htonl(0xfe800000), &addr->s6_addr32[0]);
UNALIGNED_PUT(0, &addr->s6_addr32[1]);
switch (lladdr->len) {
case 2:
/* The generated IPv6 shall not toggle the
* Universal/Local bit. RFC 6282 ch 3.2.2
*/
if (lladdr->type == NET_LINK_IEEE802154) {
UNALIGNED_PUT(0, &addr->s6_addr32[2]);
addr->s6_addr[11] = 0xff;
addr->s6_addr[12] = 0xfe;
addr->s6_addr[13] = 0U;
addr->s6_addr[14] = lladdr->addr[0];
addr->s6_addr[15] = lladdr->addr[1];
}
break;
case 6:
/* We do not toggle the Universal/Local bit
* in Bluetooth. See RFC 7668 ch 3.2.2
*/
memcpy(&addr->s6_addr[8], lladdr->addr, 3);
addr->s6_addr[11] = 0xff;
addr->s6_addr[12] = 0xfe;
memcpy(&addr->s6_addr[13], lladdr->addr + 3, 3);
if (lladdr->type == NET_LINK_ETHERNET) {
addr->s6_addr[8] ^= 0x02;
}
break;
case 8:
memcpy(&addr->s6_addr[8], lladdr->addr, lladdr->len);
addr->s6_addr[8] ^= 0x02;
break;
}
}
/**
* @brief Check if given address is based on link layer address
*
* @return True if it is, False otherwise
*/
static inline bool net_ipv6_addr_based_on_ll(const struct in6_addr *addr,
const struct net_linkaddr *lladdr)
{
if (!addr || !lladdr) {
return false;
}
switch (lladdr->len) {
case 2:
if (!memcmp(&addr->s6_addr[14], lladdr->addr, lladdr->len) &&
addr->s6_addr[8] == 0U &&
addr->s6_addr[9] == 0U &&
addr->s6_addr[10] == 0U &&
addr->s6_addr[11] == 0xff &&
addr->s6_addr[12] == 0xfe) {
return true;
}
break;
case 6:
if (lladdr->type == NET_LINK_ETHERNET) {
if (!memcmp(&addr->s6_addr[9], &lladdr->addr[1], 2) &&
!memcmp(&addr->s6_addr[13], &lladdr->addr[3], 3) &&
addr->s6_addr[11] == 0xff &&
addr->s6_addr[12] == 0xfe &&
(addr->s6_addr[8] ^ 0x02) == lladdr->addr[0]) {
return true;
}
}
break;
case 8:
if (!memcmp(&addr->s6_addr[9], &lladdr->addr[1],
lladdr->len - 1) &&
(addr->s6_addr[8] ^ 0x02) == lladdr->addr[0]) {
return true;
}
break;
}
return false;
}
/**
* @brief Get sockaddr_in6 from sockaddr. This is a helper so that
* the code calling this function can be made shorter.
*
* @param addr Socket address
*
* @return Pointer to IPv6 socket address
*/
static inline struct sockaddr_in6 *net_sin6(const struct sockaddr *addr)
{
return (struct sockaddr_in6 *)addr;
}
/**
* @brief Get sockaddr_in from sockaddr. This is a helper so that
* the code calling this function can be made shorter.
*
* @param addr Socket address
*
* @return Pointer to IPv4 socket address
*/
static inline struct sockaddr_in *net_sin(const struct sockaddr *addr)
{
return (struct sockaddr_in *)addr;
}
/**
* @brief Get sockaddr_in6_ptr from sockaddr_ptr. This is a helper so that
* the code calling this function can be made shorter.
*
* @param addr Socket address
*
* @return Pointer to IPv6 socket address
*/
static inline
struct sockaddr_in6_ptr *net_sin6_ptr(const struct sockaddr_ptr *addr)
{
return (struct sockaddr_in6_ptr *)addr;
}
/**
* @brief Get sockaddr_in_ptr from sockaddr_ptr. This is a helper so that
* the code calling this function can be made shorter.
*
* @param addr Socket address
*
* @return Pointer to IPv4 socket address
*/
static inline
struct sockaddr_in_ptr *net_sin_ptr(const struct sockaddr_ptr *addr)
{
return (struct sockaddr_in_ptr *)addr;
}
/**
* @brief Get sockaddr_ll_ptr from sockaddr_ptr. This is a helper so that
* the code calling this function can be made shorter.
*
* @param addr Socket address
*
* @return Pointer to linklayer socket address
*/
static inline
struct sockaddr_ll_ptr *net_sll_ptr(const struct sockaddr_ptr *addr)
{
return (struct sockaddr_ll_ptr *)addr;
}
/**
* @brief Get sockaddr_can_ptr from sockaddr_ptr. This is a helper so that
* the code needing this functionality can be made shorter.
*
* @param addr Socket address
*
* @return Pointer to CAN socket address
*/
static inline
struct sockaddr_can_ptr *net_can_ptr(const struct sockaddr_ptr *addr)
{
return (struct sockaddr_can_ptr *)addr;
}
/**
* @brief Convert a string to IP address.
*
* @param family IP address family (AF_INET or AF_INET6)
* @param src IP address in a null terminated string
* @param dst Pointer to struct in_addr if family is AF_INET or
* pointer to struct in6_addr if family is AF_INET6
*
* @note This function doesn't do precise error checking,
* do not use for untrusted strings.
*
* @return 0 if ok, < 0 if error
*/
__syscall int net_addr_pton(sa_family_t family, const char *src, void *dst);
/**
* @brief Convert IP address to string form.
*
* @param family IP address family (AF_INET or AF_INET6)
* @param src Pointer to struct in_addr if family is AF_INET or
* pointer to struct in6_addr if family is AF_INET6
* @param dst Buffer for IP address as a null terminated string
* @param size Number of bytes available in the buffer
*
* @return dst pointer if ok, NULL if error
*/
__syscall char *net_addr_ntop(sa_family_t family, const void *src,
char *dst, size_t size);
/**
* @brief Parse a string that contains either IPv4 or IPv6 address
* and optional port, and store the information in user supplied
* sockaddr struct.
*
* @details Syntax of the IP address string:
* 192.0.2.1:80
* 192.0.2.42
* [2001:db8::1]:8080
* [2001:db8::2]
* 2001:db::42
* Note that the str_len parameter is used to restrict the amount of
* characters that are checked. If the string does not contain port
* number, then the port number in sockaddr is not modified.
*
* @param str String that contains the IP address.
* @param str_len Length of the string to be parsed.
* @param addr Pointer to user supplied struct sockaddr.
*
* @return True if parsing could be done, false otherwise.
*/
bool net_ipaddr_parse(const char *str, size_t str_len,
struct sockaddr *addr);
/**
* @brief Set the default port in the sockaddr structure.
* If the port is already set, then do nothing.
*
* @param addr Pointer to user supplied struct sockaddr.
* @param default_port Default port number to set.
*
* @return 0 if ok, <0 if error
*/
int net_port_set_default(struct sockaddr *addr, uint16_t default_port);
/**
* @brief Compare TCP sequence numbers.
*
* @details This function compares TCP sequence numbers,
* accounting for wraparound effects.
*
* @param seq1 First sequence number
* @param seq2 Seconds sequence number
*
* @return < 0 if seq1 < seq2, 0 if seq1 == seq2, > 0 if seq > seq2
*/
static inline int32_t net_tcp_seq_cmp(uint32_t seq1, uint32_t seq2)
{
return (int32_t)(seq1 - seq2);
}
/**
* @brief Check that one TCP sequence number is greater.
*
* @details This is convenience function on top of net_tcp_seq_cmp().
*
* @param seq1 First sequence number
* @param seq2 Seconds sequence number
*
* @return True if seq > seq2
*/
static inline bool net_tcp_seq_greater(uint32_t seq1, uint32_t seq2)
{
return net_tcp_seq_cmp(seq1, seq2) > 0;
}
/**
* @brief Convert a string of hex values to array of bytes.
*
* @details The syntax of the string is "ab:02:98:fa:42:01"
*
* @param buf Pointer to memory where the bytes are written.
* @param buf_len Length of the memory area.
* @param src String of bytes.
*
* @return 0 if ok, <0 if error
*/
int net_bytes_from_str(uint8_t *buf, int buf_len, const char *src);
/**
* @brief Convert Tx network packet priority to traffic class so we can place
* the packet into correct Tx queue.
*
* @param prio Network priority
*
* @return Tx traffic class that handles that priority network traffic.
*/
int net_tx_priority2tc(enum net_priority prio);
/**
* @brief Convert Rx network packet priority to traffic class so we can place
* the packet into correct Rx queue.
*
* @param prio Network priority
*
* @return Rx traffic class that handles that priority network traffic.
*/
int net_rx_priority2tc(enum net_priority prio);
/**
* @brief Convert network packet VLAN priority to network packet priority so we
* can place the packet into correct queue.
*
* @param priority VLAN priority
*
* @return Network priority
*/
static inline enum net_priority net_vlan2priority(uint8_t priority)
{
/* Map according to IEEE 802.1Q */
static const uint8_t vlan2priority[] = {
NET_PRIORITY_BE,
NET_PRIORITY_BK,
NET_PRIORITY_EE,
NET_PRIORITY_CA,
NET_PRIORITY_VI,
NET_PRIORITY_VO,
NET_PRIORITY_IC,
NET_PRIORITY_NC
};
if (priority >= ARRAY_SIZE(vlan2priority)) {
/* Use Best Effort as the default priority */
return NET_PRIORITY_BE;
}
return (enum net_priority)vlan2priority[priority];
}
/**
* @brief Convert network packet priority to network packet VLAN priority.
*
* @param priority Packet priority
*
* @return VLAN priority (PCP)
*/
static inline uint8_t net_priority2vlan(enum net_priority priority)
{
/* The conversion works both ways */
return (uint8_t)net_vlan2priority(priority);
}
/**
* @brief Return network address family value as a string. This is only usable
* for debugging.
*
* @param family Network address family code
*
* @return Network address family as a string, or NULL if family is unknown.
*/
const char *net_family2str(sa_family_t family);
/**
* @brief Add IPv6 prefix as a privacy extension filter.
*
* @details Note that the filters can either allow or deny listing.
*
* @param addr IPv6 prefix
* @param is_denylist Tells if this filter is for allowing or denying listing.
*
* @return 0 if ok, <0 if error
*/
#if defined(CONFIG_NET_IPV6_PE)
int net_ipv6_pe_add_filter(struct in6_addr *addr, bool is_denylist);
#else
static inline int net_ipv6_pe_add_filter(struct in6_addr *addr,
bool is_denylist)
{
ARG_UNUSED(addr);
ARG_UNUSED(is_denylist);
return -ENOTSUP;
}
#endif /* CONFIG_NET_IPV6_PE */
/**
* @brief Delete IPv6 prefix from privacy extension filter list.
*
* @param addr IPv6 prefix
*
* @return 0 if ok, <0 if error
*/
#if defined(CONFIG_NET_IPV6_PE)
int net_ipv6_pe_del_filter(struct in6_addr *addr);
#else
static inline int net_ipv6_pe_del_filter(struct in6_addr *addr)
{
ARG_UNUSED(addr);
return -ENOTSUP;
}
#endif /* CONFIG_NET_IPV6_PE */
#ifdef __cplusplus
}
#endif
#include <zephyr/syscalls/net_ip.h>
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_NET_IP_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_ip.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 14,176 |
```objective-c
/*
*
*/
/** @file
* @brief Dummy layer 2
*
* This is not to be included by the application.
*/
#ifndef ZEPHYR_INCLUDE_NET_DUMMY_H_
#define ZEPHYR_INCLUDE_NET_DUMMY_H_
#include <zephyr/net/net_if.h>
#include <zephyr/net/net_pkt.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Dummy L2/driver support functions
* @defgroup dummy Dummy L2/driver Support Functions
* @since 1.14
* @version 0.8.0
* @ingroup networking
* @{
*/
/** Dummy L2 API operations. */
struct dummy_api {
/**
* The net_if_api must be placed in first position in this
* struct so that we are compatible with network interface API.
*/
struct net_if_api iface_api;
/** Send a network packet */
int (*send)(const struct device *dev, struct net_pkt *pkt);
/**
* Receive a network packet (only limited use for this, for example
* receiving capturing packets and post processing them).
*/
enum net_verdict (*recv)(struct net_if *iface, struct net_pkt *pkt);
/** Start the device. Called when the bound network interface is brought up. */
int (*start)(const struct device *dev);
/** Stop the device. Called when the bound network interface is taken down. */
int (*stop)(const struct device *dev);
};
/* Make sure that the network interface API is properly setup inside
* dummy API struct (it is the first one).
*/
BUILD_ASSERT(offsetof(struct dummy_api, iface_api) == 0);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_DUMMY_H_ */
``` | /content/code_sandbox/include/zephyr/net/dummy.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 375 |
```objective-c
/**
* @file
* @brief BSD Sockets compatible API definitions
*
* An API for applications to use BSD Sockets like API.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_SOCKET_H_
#define ZEPHYR_INCLUDE_NET_SOCKET_H_
/**
* @brief BSD Sockets compatible API
* @defgroup bsd_sockets BSD Sockets compatible API
* @since 1.9
* @version 1.0.0
* @ingroup networking
* @{
*/
#include <zephyr/kernel.h>
#include <sys/types.h>
#include <zephyr/types.h>
#include <zephyr/device.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/socket_select.h>
#include <zephyr/net/socket_poll.h>
#include <zephyr/sys/iterable_sections.h>
#include <zephyr/sys/fdtable.h>
#include <zephyr/net/dns_resolve.h>
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @name Options for poll()
* @{
*/
/* ZSOCK_POLL* values are compatible with Linux */
/** zsock_poll: Poll for readability */
#define ZSOCK_POLLIN 1
/** zsock_poll: Poll for exceptional condition */
#define ZSOCK_POLLPRI 2
/** zsock_poll: Poll for writability */
#define ZSOCK_POLLOUT 4
/** zsock_poll: Poll results in error condition (output value only) */
#define ZSOCK_POLLERR 8
/** zsock_poll: Poll detected closed connection (output value only) */
#define ZSOCK_POLLHUP 0x10
/** zsock_poll: Invalid socket (output value only) */
#define ZSOCK_POLLNVAL 0x20
/** @} */
/**
* @name Options for sending and receiving data
* @{
*/
/** zsock_recv: Read data without removing it from socket input queue */
#define ZSOCK_MSG_PEEK 0x02
/** zsock_recvmsg: Control data buffer too small.
*/
#define ZSOCK_MSG_CTRUNC 0x08
/** zsock_recv: return the real length of the datagram, even when it was longer
* than the passed buffer
*/
#define ZSOCK_MSG_TRUNC 0x20
/** zsock_recv/zsock_send: Override operation to non-blocking */
#define ZSOCK_MSG_DONTWAIT 0x40
/** zsock_recv: block until the full amount of data can be returned */
#define ZSOCK_MSG_WAITALL 0x100
/** @} */
/**
* @name Options for shutdown() function
* @{
*/
/* Well-known values, e.g. from Linux man 2 shutdown:
* "The constants SHUT_RD, SHUT_WR, SHUT_RDWR have the value 0, 1, 2,
* respectively". Some software uses numeric values.
*/
/** zsock_shutdown: Shut down for reading */
#define ZSOCK_SHUT_RD 0
/** zsock_shutdown: Shut down for writing */
#define ZSOCK_SHUT_WR 1
/** zsock_shutdown: Shut down for both reading and writing */
#define ZSOCK_SHUT_RDWR 2
/** @} */
/**
* @defgroup secure_sockets_options Socket options for TLS
* @since 1.13
* @version 0.8.0
* @{
*/
/**
* @name Socket options for TLS
* @{
*/
/** Protocol level for TLS.
* Here, the same socket protocol level for TLS as in Linux was used.
*/
#define SOL_TLS 282
/** Socket option to select TLS credentials to use. It accepts and returns an
* array of sec_tag_t that indicate which TLS credentials should be used with
* specific socket.
*/
#define TLS_SEC_TAG_LIST 1
/** Write-only socket option to set hostname. It accepts a string containing
* the hostname (may be NULL to disable hostname verification). By default,
* hostname check is enforced for TLS clients.
*/
#define TLS_HOSTNAME 2
/** Socket option to select ciphersuites to use. It accepts and returns an array
* of integers with IANA assigned ciphersuite identifiers.
* If not set, socket will allow all ciphersuites available in the system
* (mbedTLS default behavior).
*/
#define TLS_CIPHERSUITE_LIST 3
/** Read-only socket option to read a ciphersuite chosen during TLS handshake.
* It returns an integer containing an IANA assigned ciphersuite identifier
* of chosen ciphersuite.
*/
#define TLS_CIPHERSUITE_USED 4
/** Write-only socket option to set peer verification level for TLS connection.
* This option accepts an integer with a peer verification level, compatible
* with mbedTLS values:
* - 0 - none
* - 1 - optional
* - 2 - required
*
* If not set, socket will use mbedTLS defaults (none for servers, required
* for clients).
*/
#define TLS_PEER_VERIFY 5
/** Write-only socket option to set role for DTLS connection. This option
* is irrelevant for TLS connections, as for them role is selected based on
* connect()/listen() usage. By default, DTLS will assume client role.
* This option accepts an integer with a TLS role, compatible with
* mbedTLS values:
* - 0 - client
* - 1 - server
*/
#define TLS_DTLS_ROLE 6
/** Socket option for setting the supported Application Layer Protocols.
* It accepts and returns a const char array of NULL terminated strings
* representing the supported application layer protocols listed during
* the TLS handshake.
*/
#define TLS_ALPN_LIST 7
/** Socket option to set DTLS min handshake timeout. The timeout starts at min,
* and upon retransmission the timeout is doubled util max is reached.
* Min and max arguments are separate options. The time unit is ms.
*/
#define TLS_DTLS_HANDSHAKE_TIMEOUT_MIN 8
/** Socket option to set DTLS max handshake timeout. The timeout starts at min,
* and upon retransmission the timeout is doubled util max is reached.
* Min and max arguments are separate options. The time unit is ms.
*/
#define TLS_DTLS_HANDSHAKE_TIMEOUT_MAX 9
/** Socket option for preventing certificates from being copied to the mbedTLS
* heap if possible. The option is only effective for DER certificates and is
* ignored for PEM certificates.
*/
#define TLS_CERT_NOCOPY 10
/** TLS socket option to use with offloading. The option instructs the network
* stack only to offload underlying TCP/UDP communication. The TLS/DTLS
* operation is handled by a native TLS/DTLS socket implementation from Zephyr.
*
* Note, that this option is only applicable if socket dispatcher is used
* (CONFIG_NET_SOCKETS_OFFLOAD_DISPATCHER is enabled).
* In such case, it should be the first socket option set on a newly created
* socket. After that, the application may use SO_BINDTODEVICE to choose the
* dedicated network interface for the underlying TCP/UDP socket.
*/
#define TLS_NATIVE 11
/** Socket option to control TLS session caching on a socket. Accepted values:
* - 0 - Disabled.
* - 1 - Enabled.
*/
#define TLS_SESSION_CACHE 12
/** Write-only socket option to purge session cache immediately.
* This option accepts any value.
*/
#define TLS_SESSION_CACHE_PURGE 13
/** Write-only socket option to control DTLS CID.
* The option accepts an integer, indicating the setting.
* Accepted values for the option are: 0, 1 and 2.
* Effective when set before connecting to the socket.
* - 0 - DTLS CID will be disabled.
* - 1 - DTLS CID will be enabled, and a 0 length CID value to be sent to the
* peer.
* - 2 - DTLS CID will be enabled, and the most recent value set with
* TLS_DTLS_CID_VALUE will be sent to the peer. Otherwise, a random value
* will be used.
*/
#define TLS_DTLS_CID 14
/** Read-only socket option to get DTLS CID status.
* The option accepts a pointer to an integer, indicating the setting upon
* return.
* Returned values for the option are:
* - 0 - DTLS CID is disabled.
* - 1 - DTLS CID is received on the downlink.
* - 2 - DTLS CID is sent to the uplink.
* - 3 - DTLS CID is used in both directions.
*/
#define TLS_DTLS_CID_STATUS 15
/** Socket option to set or get the value of the DTLS connection ID to be
* used for the DTLS session.
* The option accepts a byte array, holding the CID value.
*/
#define TLS_DTLS_CID_VALUE 16
/** Read-only socket option to get the value of the DTLS connection ID
* received from the peer.
* The option accepts a pointer to a byte array, holding the CID value upon
* return. The optlen returned will be 0 if the peer did not provide a
* connection ID, otherwise will contain the length of the CID value.
*/
#define TLS_DTLS_PEER_CID_VALUE 17
/** Socket option to configure DTLS socket behavior on connect().
* If set, DTLS connect() will execute the handshake with the configured peer.
* This is the default behavior.
* Otherwise, DTLS connect() will only configure peer address (as with regular
* UDP socket) and will not attempt to execute DTLS handshake. The handshake
* will take place in consecutive send()/recv() call.
*/
#define TLS_DTLS_HANDSHAKE_ON_CONNECT 18
/* Valid values for @ref TLS_PEER_VERIFY option */
#define TLS_PEER_VERIFY_NONE 0 /**< Peer verification disabled. */
#define TLS_PEER_VERIFY_OPTIONAL 1 /**< Peer verification optional. */
#define TLS_PEER_VERIFY_REQUIRED 2 /**< Peer verification required. */
/* Valid values for @ref TLS_DTLS_ROLE option */
#define TLS_DTLS_ROLE_CLIENT 0 /**< Client role in a DTLS session. */
#define TLS_DTLS_ROLE_SERVER 1 /**< Server role in a DTLS session. */
/* Valid values for @ref TLS_CERT_NOCOPY option */
#define TLS_CERT_NOCOPY_NONE 0 /**< Cert duplicated in heap */
#define TLS_CERT_NOCOPY_OPTIONAL 1 /**< Cert not copied in heap if DER */
/* Valid values for @ref TLS_SESSION_CACHE option */
#define TLS_SESSION_CACHE_DISABLED 0 /**< Disable TLS session caching. */
#define TLS_SESSION_CACHE_ENABLED 1 /**< Enable TLS session caching. */
/* Valid values for @ref TLS_DTLS_CID (Connection ID) option */
#define TLS_DTLS_CID_DISABLED 0 /**< CID is disabled */
#define TLS_DTLS_CID_SUPPORTED 1 /**< CID is supported */
#define TLS_DTLS_CID_ENABLED 2 /**< CID is enabled */
/* Valid values for @ref TLS_DTLS_CID_STATUS option */
#define TLS_DTLS_CID_STATUS_DISABLED 0 /**< CID is disabled */
#define TLS_DTLS_CID_STATUS_DOWNLINK 1 /**< CID is in use by us */
#define TLS_DTLS_CID_STATUS_UPLINK 2 /**< CID is in use by peer */
#define TLS_DTLS_CID_STATUS_BIDIRECTIONAL 3 /**< CID is in use by us and peer */
/** @} */ /* for @name */
/** @} */ /* for @defgroup */
/**
* @brief Definition used when querying address information.
*
* A linked list of these descriptors is returned by getaddrinfo(). The struct
* is also passed as hints when calling the getaddrinfo() function.
*/
struct zsock_addrinfo {
struct zsock_addrinfo *ai_next; /**< Pointer to next address entry */
int ai_flags; /**< Additional options */
int ai_family; /**< Address family of the returned addresses */
int ai_socktype; /**< Socket type, for example SOCK_STREAM or SOCK_DGRAM */
int ai_protocol; /**< Protocol for addresses, 0 means any protocol */
int ai_eflags; /**< Extended flags for special usage */
socklen_t ai_addrlen; /**< Length of the socket address */
struct sockaddr *ai_addr; /**< Pointer to the address */
char *ai_canonname; /**< Optional official name of the host */
/** @cond INTERNAL_HIDDEN */
struct sockaddr _ai_addr;
char _ai_canonname[DNS_MAX_NAME_SIZE + 1];
/** @endcond */
};
/**
* @brief Obtain a file descriptor's associated net context
*
* With CONFIG_USERSPACE enabled, the kernel's object permission system
* must apply to socket file descriptors. When a socket is opened, by default
* only the caller has permission, access by other threads will fail unless
* they have been specifically granted permission.
*
* This is achieved by tagging data structure definitions that implement the
* underlying object associated with a network socket file descriptor with
* '__net_socket`. All pointers to instances of these will be known to the
* kernel as kernel objects with type K_OBJ_NET_SOCKET.
*
* This API is intended for threads that need to grant access to the object
* associated with a particular file descriptor to another thread. The
* returned pointer represents the underlying K_OBJ_NET_SOCKET and
* may be passed to APIs like k_object_access_grant().
*
* In a system like Linux which has the notion of threads running in processes
* in a shared virtual address space, this sort of management is unnecessary as
* the scope of file descriptors is implemented at the process level.
*
* However in Zephyr the file descriptor scope is global, and MPU-based systems
* are not able to implement a process-like model due to the lack of memory
* virtualization hardware. They use discrete object permissions and memory
* domains instead to define thread access scope.
*
* User threads will have no direct access to the returned object
* and will fault if they try to access its memory; the pointer can only be
* used to make permission assignment calls, which follow exactly the rules
* for other kernel objects like device drivers and IPC.
*
* @param sock file descriptor
* @return pointer to associated network socket object, or NULL if the
* file descriptor wasn't valid or the caller had no access permission
*/
__syscall void *zsock_get_context_object(int sock);
/**
* @brief Create a network socket
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``socket()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*
* If CONFIG_USERSPACE is enabled, the caller will be granted access to the
* context object associated with the returned file descriptor.
* @see zsock_get_context_object()
*
*/
__syscall int zsock_socket(int family, int type, int proto);
/**
* @brief Create an unnamed pair of connected sockets
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``socketpair()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall int zsock_socketpair(int family, int type, int proto, int *sv);
/**
* @brief Close a network socket
*
* @details
* @rst
* Close a network socket.
* This function is also exposed as ``close()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined (in which case it
* may conflict with generic POSIX ``close()`` function).
* @endrst
*/
__syscall int zsock_close(int sock);
/**
* @brief Shutdown socket send/receive operations
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description, but currently this function has no effect in
* Zephyr and provided solely for compatibility with existing code.
* This function is also exposed as ``shutdown()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall int zsock_shutdown(int sock, int how);
/**
* @brief Bind a socket to a local network address
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``bind()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall int zsock_bind(int sock, const struct sockaddr *addr,
socklen_t addrlen);
/**
* @brief Connect a socket to a peer network address
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``connect()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall int zsock_connect(int sock, const struct sockaddr *addr,
socklen_t addrlen);
/**
* @brief Set up a STREAM socket to accept peer connections
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``listen()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall int zsock_listen(int sock, int backlog);
/**
* @brief Accept a connection on listening socket
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``accept()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall int zsock_accept(int sock, struct sockaddr *addr, socklen_t *addrlen);
/**
* @brief Send data to an arbitrary network address
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``sendto()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall ssize_t zsock_sendto(int sock, const void *buf, size_t len,
int flags, const struct sockaddr *dest_addr,
socklen_t addrlen);
/**
* @brief Send data to a connected peer
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``send()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
static inline ssize_t zsock_send(int sock, const void *buf, size_t len,
int flags)
{
return zsock_sendto(sock, buf, len, flags, NULL, 0);
}
/**
* @brief Send data to an arbitrary network address
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``sendmsg()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall ssize_t zsock_sendmsg(int sock, const struct msghdr *msg,
int flags);
/**
* @brief Receive data from an arbitrary network address
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``recvfrom()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall ssize_t zsock_recvfrom(int sock, void *buf, size_t max_len,
int flags, struct sockaddr *src_addr,
socklen_t *addrlen);
/**
* @brief Receive a message from an arbitrary network address
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``recvmsg()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall ssize_t zsock_recvmsg(int sock, struct msghdr *msg, int flags);
/**
* @brief Receive data from a connected peer
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``recv()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
static inline ssize_t zsock_recv(int sock, void *buf, size_t max_len,
int flags)
{
return zsock_recvfrom(sock, buf, max_len, flags, NULL, NULL);
}
/**
* @brief Control blocking/non-blocking mode of a socket
*
* @details
* @rst
* This functions allow to (only) configure a socket for blocking or
* non-blocking operation (O_NONBLOCK).
* This function is also exposed as ``fcntl()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined (in which case
* it may conflict with generic POSIX ``fcntl()`` function).
* @endrst
*/
__syscall int zsock_fcntl_impl(int sock, int cmd, int flags);
/** @cond INTERNAL_HIDDEN */
/*
* Need this wrapper because newer GCC versions got too smart and "typecheck"
* even macros.
*/
static inline int zsock_fcntl_wrapper(int sock, int cmd, ...)
{
va_list args;
int flags;
va_start(args, cmd);
flags = va_arg(args, int);
va_end(args);
return zsock_fcntl_impl(sock, cmd, flags);
}
#define zsock_fcntl zsock_fcntl_wrapper
/** @endcond */
/**
* @brief Control underlying socket parameters
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function enables querying or manipulating underlying socket parameters.
* Currently supported @p request values include ``ZFD_IOCTL_FIONBIO``, and
* ``ZFD_IOCTL_FIONREAD``, to set non-blocking mode, and query the number of
* bytes available to read, respectively.
* This function is also exposed as ``ioctl()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined (in which case
* it may conflict with generic POSIX ``ioctl()`` function).
* @endrst
*/
__syscall int zsock_ioctl_impl(int sock, unsigned long request, va_list ap);
/** @cond INTERNAL_HIDDEN */
static inline int zsock_ioctl_wrapper(int sock, unsigned long request, ...)
{
int ret;
va_list args;
va_start(args, request);
ret = zsock_ioctl_impl(sock, request, args);
va_end(args);
return ret;
}
#define zsock_ioctl zsock_ioctl_wrapper
/** @endcond */
/**
* @brief Efficiently poll multiple sockets for events
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``poll()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined (in which case
* it may conflict with generic POSIX ``poll()`` function).
* @endrst
*/
__syscall int zsock_poll(struct zsock_pollfd *fds, int nfds, int timeout);
/**
* @brief Get various socket options
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description. In Zephyr this function supports a subset of
* socket options described by POSIX, but also some additional options
* available in Linux (some options are dummy and provided to ease porting
* of existing code).
* This function is also exposed as ``getsockopt()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall int zsock_getsockopt(int sock, int level, int optname,
void *optval, socklen_t *optlen);
/**
* @brief Set various socket options
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description. In Zephyr this function supports a subset of
* socket options described by POSIX, but also some additional options
* available in Linux (some options are dummy and provided to ease porting
* of existing code).
* This function is also exposed as ``setsockopt()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall int zsock_setsockopt(int sock, int level, int optname,
const void *optval, socklen_t optlen);
/**
* @brief Get peer name
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``getpeername()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall int zsock_getpeername(int sock, struct sockaddr *addr,
socklen_t *addrlen);
/**
* @brief Get socket name
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``getsockname()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall int zsock_getsockname(int sock, struct sockaddr *addr,
socklen_t *addrlen);
/**
* @brief Get local host name
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``gethostname()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall int zsock_gethostname(char *buf, size_t len);
/**
* @brief Convert network address from internal to numeric ASCII form
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``inet_ntop()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
static inline char *zsock_inet_ntop(sa_family_t family, const void *src,
char *dst, size_t size)
{
return net_addr_ntop(family, src, dst, size);
}
/**
* @brief Convert network address from numeric ASCII form to internal representation
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``inet_pton()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
__syscall int zsock_inet_pton(sa_family_t family, const char *src, void *dst);
/** @cond INTERNAL_HIDDEN */
__syscall int z_zsock_getaddrinfo_internal(const char *host,
const char *service,
const struct zsock_addrinfo *hints,
struct zsock_addrinfo *res);
/** @endcond */
/* Flags for getaddrinfo() hints. */
/**
* @name Flags for getaddrinfo() hints
* @{
*/
/** Address for bind() (vs for connect()) */
#define AI_PASSIVE 0x1
/** Fill in ai_canonname */
#define AI_CANONNAME 0x2
/** Assume host address is in numeric notation, don't DNS lookup */
#define AI_NUMERICHOST 0x4
/** May return IPv4 mapped address for IPv6 */
#define AI_V4MAPPED 0x8
/** May return both native IPv6 and mapped IPv4 address for IPv6 */
#define AI_ALL 0x10
/** IPv4/IPv6 support depends on local system config */
#define AI_ADDRCONFIG 0x20
/** Assume service (port) is numeric */
#define AI_NUMERICSERV 0x400
/** Extra flags present (see RFC 5014) */
#define AI_EXTFLAGS 0x800
/** @} */
/**
* @brief Resolve a domain name to one or more network addresses
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``getaddrinfo()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
int zsock_getaddrinfo(const char *host, const char *service,
const struct zsock_addrinfo *hints,
struct zsock_addrinfo **res);
/**
* @brief Free results returned by zsock_getaddrinfo()
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``freeaddrinfo()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
void zsock_freeaddrinfo(struct zsock_addrinfo *ai);
/**
* @brief Convert zsock_getaddrinfo() error code to textual message
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``gai_strerror()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
const char *zsock_gai_strerror(int errcode);
/**
* @name Flags for getnameinfo()
* @{
*/
/** zsock_getnameinfo(): Resolve to numeric address. */
#define NI_NUMERICHOST 1
/** zsock_getnameinfo(): Resolve to numeric port number. */
#define NI_NUMERICSERV 2
/** zsock_getnameinfo(): Return only hostname instead of FQDN */
#define NI_NOFQDN 4
/** zsock_getnameinfo(): Dummy option for compatibility */
#define NI_NAMEREQD 8
/** zsock_getnameinfo(): Dummy option for compatibility */
#define NI_DGRAM 16
/* POSIX extensions */
/** zsock_getnameinfo(): Max supported hostname length */
#ifndef NI_MAXHOST
#define NI_MAXHOST 64
#endif
/** @} */
/**
* @brief Resolve a network address to a domain name or ASCII address
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``getnameinfo()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
int zsock_getnameinfo(const struct sockaddr *addr, socklen_t addrlen,
char *host, socklen_t hostlen,
char *serv, socklen_t servlen, int flags);
#if defined(CONFIG_NET_SOCKETS_POSIX_NAMES)
/**
* @name Socket APIs available if CONFIG_NET_SOCKETS_POSIX_NAMES is enabled
* @{
*/
/** POSIX wrapper for @ref zsock_pollfd */
#define pollfd zsock_pollfd
/** POSIX wrapper for @ref zsock_socket */
static inline int socket(int family, int type, int proto)
{
return zsock_socket(family, type, proto);
}
/** POSIX wrapper for @ref zsock_socketpair */
static inline int socketpair(int family, int type, int proto, int sv[2])
{
return zsock_socketpair(family, type, proto, sv);
}
/** POSIX wrapper for @ref zsock_close */
static inline int close(int sock)
{
return zsock_close(sock);
}
/** POSIX wrapper for @ref zsock_shutdown */
static inline int shutdown(int sock, int how)
{
return zsock_shutdown(sock, how);
}
/** POSIX wrapper for @ref zsock_bind */
static inline int bind(int sock, const struct sockaddr *addr, socklen_t addrlen)
{
return zsock_bind(sock, addr, addrlen);
}
/** POSIX wrapper for @ref zsock_connect */
static inline int connect(int sock, const struct sockaddr *addr,
socklen_t addrlen)
{
return zsock_connect(sock, addr, addrlen);
}
/** POSIX wrapper for @ref zsock_listen */
static inline int listen(int sock, int backlog)
{
return zsock_listen(sock, backlog);
}
/** POSIX wrapper for @ref zsock_accept */
static inline int accept(int sock, struct sockaddr *addr, socklen_t *addrlen)
{
return zsock_accept(sock, addr, addrlen);
}
/** POSIX wrapper for @ref zsock_send */
static inline ssize_t send(int sock, const void *buf, size_t len, int flags)
{
return zsock_send(sock, buf, len, flags);
}
/** POSIX wrapper for @ref zsock_recv */
static inline ssize_t recv(int sock, void *buf, size_t max_len, int flags)
{
return zsock_recv(sock, buf, max_len, flags);
}
/** POSIX wrapper for @ref zsock_sendto */
static inline ssize_t sendto(int sock, const void *buf, size_t len, int flags,
const struct sockaddr *dest_addr,
socklen_t addrlen)
{
return zsock_sendto(sock, buf, len, flags, dest_addr, addrlen);
}
/** POSIX wrapper for @ref zsock_sendmsg */
static inline ssize_t sendmsg(int sock, const struct msghdr *message,
int flags)
{
return zsock_sendmsg(sock, message, flags);
}
/** POSIX wrapper for @ref zsock_recvfrom */
static inline ssize_t recvfrom(int sock, void *buf, size_t max_len, int flags,
struct sockaddr *src_addr, socklen_t *addrlen)
{
return zsock_recvfrom(sock, buf, max_len, flags, src_addr, addrlen);
}
/** POSIX wrapper for @ref zsock_recvmsg */
static inline ssize_t recvmsg(int sock, struct msghdr *msg, int flags)
{
return zsock_recvmsg(sock, msg, flags);
}
/** POSIX wrapper for @ref zsock_poll */
static inline int poll(struct zsock_pollfd *fds, int nfds, int timeout)
{
return zsock_poll(fds, nfds, timeout);
}
/** POSIX wrapper for @ref zsock_getsockopt */
static inline int getsockopt(int sock, int level, int optname,
void *optval, socklen_t *optlen)
{
return zsock_getsockopt(sock, level, optname, optval, optlen);
}
/** POSIX wrapper for @ref zsock_setsockopt */
static inline int setsockopt(int sock, int level, int optname,
const void *optval, socklen_t optlen)
{
return zsock_setsockopt(sock, level, optname, optval, optlen);
}
/** POSIX wrapper for @ref zsock_getpeername */
static inline int getpeername(int sock, struct sockaddr *addr,
socklen_t *addrlen)
{
return zsock_getpeername(sock, addr, addrlen);
}
/** POSIX wrapper for @ref zsock_getsockname */
static inline int getsockname(int sock, struct sockaddr *addr,
socklen_t *addrlen)
{
return zsock_getsockname(sock, addr, addrlen);
}
/** POSIX wrapper for @ref zsock_getaddrinfo */
static inline int getaddrinfo(const char *host, const char *service,
const struct zsock_addrinfo *hints,
struct zsock_addrinfo **res)
{
return zsock_getaddrinfo(host, service, hints, res);
}
/** POSIX wrapper for @ref zsock_freeaddrinfo */
static inline void freeaddrinfo(struct zsock_addrinfo *ai)
{
zsock_freeaddrinfo(ai);
}
/** POSIX wrapper for @ref zsock_gai_strerror */
static inline const char *gai_strerror(int errcode)
{
return zsock_gai_strerror(errcode);
}
/** POSIX wrapper for @ref zsock_getnameinfo */
static inline int getnameinfo(const struct sockaddr *addr, socklen_t addrlen,
char *host, socklen_t hostlen,
char *serv, socklen_t servlen, int flags)
{
return zsock_getnameinfo(addr, addrlen, host, hostlen,
serv, servlen, flags);
}
/** POSIX wrapper for @ref zsock_addrinfo */
#define addrinfo zsock_addrinfo
/** POSIX wrapper for @ref zsock_gethostname */
static inline int gethostname(char *buf, size_t len)
{
return zsock_gethostname(buf, len);
}
/** POSIX wrapper for @ref zsock_inet_pton */
static inline int inet_pton(sa_family_t family, const char *src, void *dst)
{
return zsock_inet_pton(family, src, dst);
}
/** POSIX wrapper for @ref zsock_inet_ntop */
static inline char *inet_ntop(sa_family_t family, const void *src, char *dst,
size_t size)
{
return zsock_inet_ntop(family, src, dst, size);
}
/** POSIX wrapper for @ref ZSOCK_POLLIN */
#define POLLIN ZSOCK_POLLIN
/** POSIX wrapper for @ref ZSOCK_POLLOUT */
#define POLLOUT ZSOCK_POLLOUT
/** POSIX wrapper for @ref ZSOCK_POLLERR */
#define POLLERR ZSOCK_POLLERR
/** POSIX wrapper for @ref ZSOCK_POLLHUP */
#define POLLHUP ZSOCK_POLLHUP
/** POSIX wrapper for @ref ZSOCK_POLLNVAL */
#define POLLNVAL ZSOCK_POLLNVAL
/** POSIX wrapper for @ref ZSOCK_MSG_PEEK */
#define MSG_PEEK ZSOCK_MSG_PEEK
/** POSIX wrapper for @ref ZSOCK_MSG_CTRUNC */
#define MSG_CTRUNC ZSOCK_MSG_CTRUNC
/** POSIX wrapper for @ref ZSOCK_MSG_TRUNC */
#define MSG_TRUNC ZSOCK_MSG_TRUNC
/** POSIX wrapper for @ref ZSOCK_MSG_DONTWAIT */
#define MSG_DONTWAIT ZSOCK_MSG_DONTWAIT
/** POSIX wrapper for @ref ZSOCK_MSG_WAITALL */
#define MSG_WAITALL ZSOCK_MSG_WAITALL
/** POSIX wrapper for @ref ZSOCK_SHUT_RD */
#define SHUT_RD ZSOCK_SHUT_RD
/** POSIX wrapper for @ref ZSOCK_SHUT_WR */
#define SHUT_WR ZSOCK_SHUT_WR
/** POSIX wrapper for @ref ZSOCK_SHUT_RDWR */
#define SHUT_RDWR ZSOCK_SHUT_RDWR
/** POSIX wrapper for @ref DNS_EAI_BADFLAGS */
#define EAI_BADFLAGS DNS_EAI_BADFLAGS
/** POSIX wrapper for @ref DNS_EAI_NONAME */
#define EAI_NONAME DNS_EAI_NONAME
/** POSIX wrapper for @ref DNS_EAI_AGAIN */
#define EAI_AGAIN DNS_EAI_AGAIN
/** POSIX wrapper for @ref DNS_EAI_FAIL */
#define EAI_FAIL DNS_EAI_FAIL
/** POSIX wrapper for @ref DNS_EAI_NODATA */
#define EAI_NODATA DNS_EAI_NODATA
/** POSIX wrapper for @ref DNS_EAI_MEMORY */
#define EAI_MEMORY DNS_EAI_MEMORY
/** POSIX wrapper for @ref DNS_EAI_SYSTEM */
#define EAI_SYSTEM DNS_EAI_SYSTEM
/** POSIX wrapper for @ref DNS_EAI_SERVICE */
#define EAI_SERVICE DNS_EAI_SERVICE
/** POSIX wrapper for @ref DNS_EAI_SOCKTYPE */
#define EAI_SOCKTYPE DNS_EAI_SOCKTYPE
/** POSIX wrapper for @ref DNS_EAI_FAMILY */
#define EAI_FAMILY DNS_EAI_FAMILY
/** @} */
#endif /* defined(CONFIG_NET_SOCKETS_POSIX_NAMES) */
/**
* @name Network interface name description
* @{
*/
/** Network interface name length */
#if defined(CONFIG_NET_INTERFACE_NAME)
#define IFNAMSIZ CONFIG_NET_INTERFACE_NAME_LEN
#else
#define IFNAMSIZ Z_DEVICE_MAX_NAME_LEN
#endif
/** Interface description structure */
struct ifreq {
char ifr_name[IFNAMSIZ]; /**< Network interface name */
};
/** @} */
/**
* @name Socket level options (SOL_SOCKET)
* @{
*/
/** Socket-level option */
#define SOL_SOCKET 1
/* Socket options for SOL_SOCKET level */
/** Recording debugging information (ignored, for compatibility) */
#define SO_DEBUG 1
/** address reuse */
#define SO_REUSEADDR 2
/** Type of the socket */
#define SO_TYPE 3
/** Async error */
#define SO_ERROR 4
/** Bypass normal routing and send directly to host (ignored, for compatibility) */
#define SO_DONTROUTE 5
/** Transmission of broadcast messages is supported (ignored, for compatibility) */
#define SO_BROADCAST 6
/** Size of socket send buffer */
#define SO_SNDBUF 7
/** Size of socket recv buffer */
#define SO_RCVBUF 8
/** Enable sending keep-alive messages on connections */
#define SO_KEEPALIVE 9
/** Place out-of-band data into receive stream (ignored, for compatibility) */
#define SO_OOBINLINE 10
/** Socket priority */
#define SO_PRIORITY 12
/** Socket lingers on close (ignored, for compatibility) */
#define SO_LINGER 13
/** Allow multiple sockets to reuse a single port */
#define SO_REUSEPORT 15
/** Receive low watermark (ignored, for compatibility) */
#define SO_RCVLOWAT 18
/** Send low watermark (ignored, for compatibility) */
#define SO_SNDLOWAT 19
/**
* Receive timeout
* Applies to receive functions like recv(), but not to connect()
*/
#define SO_RCVTIMEO 20
/** Send timeout */
#define SO_SNDTIMEO 21
/** Bind a socket to an interface */
#define SO_BINDTODEVICE 25
/** Socket accepts incoming connections (ignored, for compatibility) */
#define SO_ACCEPTCONN 30
/** Timestamp TX RX or both packets. Supports multiple timestamp sources. */
#define SO_TIMESTAMPING 37
/** Protocol used with the socket */
#define SO_PROTOCOL 38
/** Domain used with SOCKET */
#define SO_DOMAIN 39
/** Enable SOCKS5 for Socket */
#define SO_SOCKS5 60
/** Socket TX time (when the data should be sent) */
#define SO_TXTIME 61
/** Socket TX time (same as SO_TXTIME) */
#define SCM_TXTIME SO_TXTIME
/** Timestamp generation flags */
/** Request RX timestamps generated by network adapter. */
#define SOF_TIMESTAMPING_RX_HARDWARE BIT(0)
/**
* Request TX timestamps generated by network adapter.
* This can be enabled via socket option or control messages.
*/
#define SOF_TIMESTAMPING_TX_HARDWARE BIT(1)
/** */
/** @} */
/**
* @name TCP level options (IPPROTO_TCP)
* @{
*/
/* Socket options for IPPROTO_TCP level */
/** Disable TCP buffering (ignored, for compatibility) */
#define TCP_NODELAY 1
/** Start keepalives after this period (seconds) */
#define TCP_KEEPIDLE 2
/** Interval between keepalives (seconds) */
#define TCP_KEEPINTVL 3
/** Number of keepalives before dropping connection */
#define TCP_KEEPCNT 4
/** @} */
/**
* @name IPv4 level options (IPPROTO_IP)
* @{
*/
/* Socket options for IPPROTO_IP level */
/** Set or receive the Type-Of-Service value for an outgoing packet. */
#define IP_TOS 1
/** Set or receive the Time-To-Live value for an outgoing packet. */
#define IP_TTL 2
/** Pass an IP_PKTINFO ancillary message that contains a
* pktinfo structure that supplies some information about the
* incoming packet.
*/
#define IP_PKTINFO 8
/**
* @brief Incoming IPv4 packet information.
*
* Used as ancillary data when calling recvmsg() and IP_PKTINFO socket
* option is set.
*/
struct in_pktinfo {
unsigned int ipi_ifindex; /**< Network interface index */
struct in_addr ipi_spec_dst; /**< Local address */
struct in_addr ipi_addr; /**< Header Destination address */
};
/** Set IPv4 multicast TTL value. */
#define IP_MULTICAST_TTL 33
/** Join IPv4 multicast group. */
#define IP_ADD_MEMBERSHIP 35
/** Leave IPv4 multicast group. */
#define IP_DROP_MEMBERSHIP 36
/**
* @brief Struct used when joining or leaving a IPv4 multicast group.
*/
struct ip_mreqn {
struct in_addr imr_multiaddr; /**< IP multicast group address */
struct in_addr imr_address; /**< IP address of local interface */
int imr_ifindex; /**< Network interface index */
};
/** @} */
/**
* @name IPv6 level options (IPPROTO_IPV6)
* @{
*/
/* Socket options for IPPROTO_IPV6 level */
/** Set the unicast hop limit for the socket. */
#define IPV6_UNICAST_HOPS 16
/** Set the multicast hop limit for the socket. */
#define IPV6_MULTICAST_HOPS 18
/** Join IPv6 multicast group. */
#define IPV6_ADD_MEMBERSHIP 20
/** Leave IPv6 multicast group. */
#define IPV6_DROP_MEMBERSHIP 21
/**
* @brief Struct used when joining or leaving a IPv6 multicast group.
*/
struct ipv6_mreq {
/** IPv6 multicast address of group */
struct in6_addr ipv6mr_multiaddr;
/** Network interface index of the local IPv6 address */
int ipv6mr_ifindex;
};
/** Don't support IPv4 access */
#define IPV6_V6ONLY 26
/** Pass an IPV6_RECVPKTINFO ancillary message that contains a
* in6_pktinfo structure that supplies some information about the
* incoming packet. See RFC 3542.
*/
#define IPV6_RECVPKTINFO 49
/** RFC5014: Source address selection. */
#define IPV6_ADDR_PREFERENCES 72
/** Prefer temporary address as source. */
#define IPV6_PREFER_SRC_TMP 0x0001
/** Prefer public address as source. */
#define IPV6_PREFER_SRC_PUBLIC 0x0002
/** Either public or temporary address is selected as a default source
* depending on the output interface configuration (this is the default value).
* This is Linux specific option not found in the RFC.
*/
#define IPV6_PREFER_SRC_PUBTMP_DEFAULT 0x0100
/** Prefer Care-of address as source. Ignored in Zephyr. */
#define IPV6_PREFER_SRC_COA 0x0004
/** Prefer Home address as source. Ignored in Zephyr. */
#define IPV6_PREFER_SRC_HOME 0x0400
/** Prefer CGA (Cryptographically Generated Address) address as source. Ignored in Zephyr. */
#define IPV6_PREFER_SRC_CGA 0x0008
/** Prefer non-CGA address as source. Ignored in Zephyr. */
#define IPV6_PREFER_SRC_NONCGA 0x0800
/**
* @brief Incoming IPv6 packet information.
*
* Used as ancillary data when calling recvmsg() and IPV6_RECVPKTINFO socket
* option is set.
*/
struct in6_pktinfo {
struct in6_addr ipi6_addr; /**< Destination IPv6 address */
unsigned int ipi6_ifindex; /**< Receive interface index */
};
/** Set or receive the traffic class value for an outgoing packet. */
#define IPV6_TCLASS 67
/** @} */
/**
* @name Backlog size for listen()
* @{
*/
/** listen: The maximum backlog queue length */
#define SOMAXCONN 128
/** @} */
/** @cond INTERNAL_HIDDEN */
/**
* @brief Registration information for a given BSD socket family.
*/
struct net_socket_register {
int family;
bool is_offloaded;
bool (*is_supported)(int family, int type, int proto);
int (*handler)(int family, int type, int proto);
#if defined(CONFIG_NET_SOCKETS_OBJ_CORE)
/* Store also the name of the socket type in order to be able to
* print it later.
*/
const char * const name;
#endif
};
#define NET_SOCKET_DEFAULT_PRIO CONFIG_NET_SOCKETS_PRIORITY_DEFAULT
#define NET_SOCKET_GET_NAME(socket_name, prio) \
__net_socket_register_##prio##_##socket_name
#if defined(CONFIG_NET_SOCKETS_OBJ_CORE)
#define K_OBJ_TYPE_SOCK K_OBJ_TYPE_ID_GEN("SOCK")
#define NET_SOCKET_REGISTER_NAME(_name) \
.name = STRINGIFY(_name),
#else
#define NET_SOCKET_REGISTER_NAME(_name)
#endif
#define _NET_SOCKET_REGISTER(socket_name, prio, _family, _is_supported, _handler, _is_offloaded) \
static const STRUCT_SECTION_ITERABLE(net_socket_register, \
NET_SOCKET_GET_NAME(socket_name, prio)) = { \
.family = _family, \
.is_offloaded = _is_offloaded, \
.is_supported = _is_supported, \
.handler = _handler, \
NET_SOCKET_REGISTER_NAME(socket_name) \
}
#define NET_SOCKET_REGISTER(socket_name, prio, _family, _is_supported, _handler) \
_NET_SOCKET_REGISTER(socket_name, prio, _family, _is_supported, _handler, false)
#define NET_SOCKET_OFFLOAD_REGISTER(socket_name, prio, _family, _is_supported, _handler) \
_NET_SOCKET_REGISTER(socket_name, prio, _family, _is_supported, _handler, true)
/** @endcond */
#ifdef __cplusplus
}
#endif
#include <zephyr/syscalls/socket.h>
/**
* @}
*/
/* Avoid circular loops with POSIX socket headers.
* We have these includes here so that we do not need
* to change the applications that were only including
* zephyr/net/socket.h header file.
*
* Additionally, if non-zephyr-prefixed headers are used here,
* native_sim pulls in those from the host rather than Zephyr's.
*
* This should be removed when CONFIG_NET_SOCKETS_POSIX_NAMES is removed.
*/
#if defined(CONFIG_POSIX_API)
#if !defined(ZEPHYR_INCLUDE_POSIX_ARPA_INET_H_)
#include <zephyr/posix/arpa/inet.h>
#endif
#if !defined(ZEPHYR_INCLUDE_POSIX_NETDB_H_)
#include <zephyr/posix/netdb.h>
#endif
#if !defined(ZEPHYR_INCLUDE_POSIX_UNISTD_H_)
#include <zephyr/posix/unistd.h>
#endif
#if !defined(ZEPHYR_INCLUDE_POSIX_POLL_H_)
#include <zephyr/posix/poll.h>
#endif
#if !defined(ZEPHYR_INCLUDE_POSIX_SYS_SOCKET_H_)
#include <zephyr/posix/sys/socket.h>
#endif
#endif /* CONFIG_POSIX_API */
#endif /* ZEPHYR_INCLUDE_NET_SOCKET_H_ */
``` | /content/code_sandbox/include/zephyr/net/socket.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 10,951 |
```objective-c
/*
*
*/
/** @file socket_select.h
*
* @brief BSD select support functions.
*/
#ifndef ZEPHYR_INCLUDE_NET_SOCKET_SELECT_H_
#define ZEPHYR_INCLUDE_NET_SOCKET_SELECT_H_
/**
* @brief BSD Sockets compatible API
* @defgroup bsd_sockets BSD Sockets compatible API
* @ingroup networking
* @{
*/
#include <zephyr/toolchain.h>
#include <zephyr/net/socket_types.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Socket file descriptor set. */
typedef struct zsock_fd_set {
uint32_t bitset[(CONFIG_ZVFS_OPEN_MAX + 31) / 32];
} zsock_fd_set;
/**
* @brief Legacy function to poll multiple sockets for events
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description. This function is provided to ease porting of
* existing code and not recommended for usage due to its inefficiency,
* use zsock_poll() instead. In Zephyr this function works only with
* sockets, not arbitrary file descriptors.
* This function is also exposed as ``select()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined (in which case
* it may conflict with generic POSIX ``select()`` function).
* @endrst
*/
__syscall int zsock_select(int nfds, zsock_fd_set *readfds,
zsock_fd_set *writefds,
zsock_fd_set *exceptfds,
struct zsock_timeval *timeout);
/** Number of file descriptors which can be added to zsock_fd_set */
#define ZSOCK_FD_SETSIZE (sizeof(((zsock_fd_set *)0)->bitset) * 8)
/**
* @brief Initialize (clear) fd_set
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``FD_ZERO()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
void ZSOCK_FD_ZERO(zsock_fd_set *set);
/**
* @brief Check whether socket is a member of fd_set
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``FD_ISSET()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
int ZSOCK_FD_ISSET(int fd, zsock_fd_set *set);
/**
* @brief Remove socket from fd_set
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``FD_CLR()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
void ZSOCK_FD_CLR(int fd, zsock_fd_set *set);
/**
* @brief Add socket to fd_set
*
* @details
* @rst
* See `POSIX.1-2017 article
* <path_to_url`__
* for normative description.
* This function is also exposed as ``FD_SET()``
* if :kconfig:option:`CONFIG_POSIX_API` is defined.
* @endrst
*/
void ZSOCK_FD_SET(int fd, zsock_fd_set *set);
/** @cond INTERNAL_HIDDEN */
#ifdef CONFIG_NET_SOCKETS_POSIX_NAMES
#define fd_set zsock_fd_set
#define FD_SETSIZE ZSOCK_FD_SETSIZE
static inline int select(int nfds, zsock_fd_set *readfds,
zsock_fd_set *writefds, zsock_fd_set *exceptfds,
struct timeval *timeout)
{
return zsock_select(nfds, readfds, writefds, exceptfds, timeout);
}
static inline void FD_ZERO(zsock_fd_set *set)
{
ZSOCK_FD_ZERO(set);
}
static inline int FD_ISSET(int fd, zsock_fd_set *set)
{
return ZSOCK_FD_ISSET(fd, set);
}
static inline void FD_CLR(int fd, zsock_fd_set *set)
{
ZSOCK_FD_CLR(fd, set);
}
static inline void FD_SET(int fd, zsock_fd_set *set)
{
ZSOCK_FD_SET(fd, set);
}
#endif /* CONFIG_NET_SOCKETS_POSIX_NAMES */
/** @endcond */
#ifdef __cplusplus
}
#endif
#include <zephyr/syscalls/socket_select.h>
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_SOCKET_SELECT_H_ */
``` | /content/code_sandbox/include/zephyr/net/socket_select.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,000 |
```objective-c
/** @file
* @brief DHCPv4 Server API
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_DHCPV4_SERVER_H_
#define ZEPHYR_INCLUDE_NET_DHCPV4_SERVER_H_
#include <zephyr/net/net_ip.h>
#include <zephyr/sys_clock.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief DHCPv4 server
* @defgroup dhcpv4_server DHCPv4 server
* @since 3.6
* @version 0.8.0
* @ingroup networking
* @{
*/
/** @cond INTERNAL_HIDDEN */
struct net_if;
#define DHCPV4_CLIENT_ID_MAX_SIZE 20
enum dhcpv4_server_addr_state {
DHCPV4_SERVER_ADDR_FREE,
DHCPV4_SERVER_ADDR_RESERVED,
DHCPV4_SERVER_ADDR_ALLOCATED,
DHCPV4_SERVER_ADDR_DECLINED,
};
struct dhcpv4_client_id {
uint8_t buf[DHCPV4_CLIENT_ID_MAX_SIZE];
uint8_t len;
};
struct dhcpv4_addr_slot {
enum dhcpv4_server_addr_state state;
struct dhcpv4_client_id client_id;
struct in_addr addr;
uint32_t lease_time;
k_timepoint_t expiry;
};
/** @endcond */
/**
* @brief Start DHCPv4 server instance on an iface
*
* @details Start DHCPv4 server on a given interface. The server will start
* listening for DHCPv4 Discover/Request messages on the interface and assign
* IPv4 addresses from the configured address pool accordingly.
*
* @param iface A valid pointer on an interface
* @param base_addr First IPv4 address from the DHCPv4 address pool. The number
* of addresses in the pool is configured statically with Kconfig
* (CONFIG_NET_DHCPV4_SERVER_ADDR_COUNT).
*
* @return 0 on success, a negative error code otherwise.
*/
int net_dhcpv4_server_start(struct net_if *iface, struct in_addr *base_addr);
/**
* @brief Stop DHCPv4 server instance on an iface
*
* @details Stop DHCPv4 server on a given interface. DHCPv4 requests will no
* longer be handled on the interface, and all of the allocations are cleared.
*
* @param iface A valid pointer on an interface
*
* @return 0 on success, a negative error code otherwise.
*/
int net_dhcpv4_server_stop(struct net_if *iface);
/**
* @typedef net_dhcpv4_lease_cb_t
* @brief Callback used while iterating over active DHCPv4 address leases
*
* @param iface Pointer to the network interface
* @param lease Pointer to the DHPCv4 address lease slot
* @param user_data A valid pointer to user data or NULL
*/
typedef void (*net_dhcpv4_lease_cb_t)(struct net_if *iface,
struct dhcpv4_addr_slot *lease,
void *user_data);
/**
* @brief Iterate over all DHCPv4 address leases on a given network interface
* and call callback for each lease. In case no network interface is provided
* (NULL interface pointer), will iterate over all interfaces running DHCPv4
* server instance.
*
* @param iface Pointer to the network interface, can be NULL
* @param cb User-supplied callback function to call
* @param user_data User specified data
*/
int net_dhcpv4_server_foreach_lease(struct net_if *iface,
net_dhcpv4_lease_cb_t cb,
void *user_data);
/**
* @typedef net_dhcpv4_server_provider_cb_t
* @brief Callback used to let application provide an address for a given
* client ID
* @details This function is called before assigning an address to a client,
* and lets the application override the address for a given client. If the
* callback returns 0, addr needs to be a valid address and will be assigned
* to the client. If the callback returns anything non-zero, the client will
* be assigned an address from the pool.
*
* @param iface Pointer to the network interface
* @param client_id Pointer to client requesting an address
* @param addr Address to be assigned to client
* @param user_data A valid pointer to user data or NULL
*/
typedef int (*net_dhcpv4_server_provider_cb_t)(struct net_if *iface,
const struct dhcpv4_client_id *client_id,
struct in_addr *addr,
void *user_data);
/**
* @brief Set the callback used to provide addresses to the DHCP server.
*
* @param cb User-supplied callback function to call
* @param user_data A valid pointer to user data or NULL
*/
void net_dhcpv4_server_set_provider_cb(net_dhcpv4_server_provider_cb_t cb,
void *user_data);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_DHCPV4_SERVER_H_ */
``` | /content/code_sandbox/include/zephyr/net/dhcpv4_server.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,059 |
```objective-c
/*
*
*/
/** @file
* @brief OpenThread L2 stack public header
*/
#ifndef ZEPHYR_INCLUDE_NET_OPENTHREAD_H_
#define ZEPHYR_INCLUDE_NET_OPENTHREAD_H_
/**
* @brief OpenThread Layer 2 abstraction layer
* @defgroup openthread OpenThread L2 abstraction layer
* @since 1.11
* @version 0.8.0
* @ingroup ieee802154
* @{
*/
#include <zephyr/kernel.h>
#include <zephyr/net/net_if.h>
#include <openthread/instance.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @cond INTERNAL_HIDDEN
*/
/**
* @brief Type of pkt_list
*/
struct pkt_list_elem {
struct net_pkt *pkt;
};
/**
* @brief OpenThread l2 private data.
*/
struct openthread_context {
/** Pointer to OpenThread stack instance */
otInstance *instance;
/** Pointer to OpenThread network interface */
struct net_if *iface;
/** Index indicates the head of pkt_list ring buffer */
uint16_t pkt_list_in_idx;
/** Index indicates the tail of pkt_list ring buffer */
uint16_t pkt_list_out_idx;
/** Flag indicates that pkt_list is full */
uint8_t pkt_list_full;
/** Array for storing net_pkt for OpenThread internal usage */
struct pkt_list_elem pkt_list[CONFIG_OPENTHREAD_PKT_LIST_SIZE];
/** A mutex to protect API calls from being preempted. */
struct k_mutex api_lock;
/** A work queue for all OpenThread activity */
struct k_work_q work_q;
/** Work object for OpenThread internal usage */
struct k_work api_work;
/** A list for state change callbacks */
sys_slist_t state_change_cbs;
};
/**
* INTERNAL_HIDDEN @endcond
*/
/** OpenThread state change callback */
/**
* @brief OpenThread state change callback structure
*
* Used to register a callback in the callback list. As many
* callbacks as needed can be added as long as each of them
* are unique pointers of struct openthread_state_changed_cb.
* Beware such structure should not be allocated on stack.
*/
struct openthread_state_changed_cb {
/**
* @brief Callback for notifying configuration or state changes.
*
* @param flags as per OpenThread otStateChangedCallback() aFlags parameter.
* See path_to_url#otstatechangedcallback
* @param ot_context the OpenThread context the callback is registered with.
* @param user_data Data to pass to the callback.
*/
void (*state_changed_cb)(otChangedFlags flags, struct openthread_context *ot_context,
void *user_data);
/** User data if required */
void *user_data;
/**
* Internally used field for list handling
* - user must not directly modify
*/
sys_snode_t node;
};
/**
* @brief Registers callbacks which will be called when certain configuration
* or state changes occur within OpenThread.
*
* @param ot_context the OpenThread context to register the callback with.
* @param cb callback struct to register.
*/
int openthread_state_changed_cb_register(struct openthread_context *ot_context,
struct openthread_state_changed_cb *cb);
/**
* @brief Unregisters OpenThread configuration or state changed callbacks.
*
* @param ot_context the OpenThread context to unregister the callback from.
* @param cb callback struct to unregister.
*/
int openthread_state_changed_cb_unregister(struct openthread_context *ot_context,
struct openthread_state_changed_cb *cb);
/**
* @brief Get OpenThread thread identification.
*/
k_tid_t openthread_thread_id_get(void);
/**
* @brief Get pointer to default OpenThread context.
*
* @retval !NULL On success.
* @retval NULL On failure.
*/
struct openthread_context *openthread_get_default_context(void);
/**
* @brief Get pointer to default OpenThread instance.
*
* @retval !NULL On success.
* @retval NULL On failure.
*/
struct otInstance *openthread_get_default_instance(void);
/**
* @brief Starts the OpenThread network.
*
* @details Depends on active settings: it uses stored network configuration,
* start joining procedure or uses default network configuration. Additionally
* when the device is MTD, it sets the SED mode to properly attach the network.
*
* @param ot_context
*/
int openthread_start(struct openthread_context *ot_context);
/**
* @brief Lock internal mutex before accessing OT API.
*
* @details OpenThread API is not thread-safe, therefore before accessing any
* API function, it's needed to lock the internal mutex, to prevent the
* OpenThread thread from preempting the API call.
*
* @param ot_context Context to lock.
*/
void openthread_api_mutex_lock(struct openthread_context *ot_context);
/**
* @brief Try to lock internal mutex before accessing OT API.
*
* @details This function behaves like openthread_api_mutex_lock() provided that
* the internal mutex is unlocked. Otherwise, it exists immediately and returns
* a negative value.
*
* @param ot_context Context to lock.
* @retval 0 On success.
* @retval <0 On failure.
*/
int openthread_api_mutex_try_lock(struct openthread_context *ot_context);
/**
* @brief Unlock internal mutex after accessing OT API.
*
* @param ot_context Context to unlock.
*/
void openthread_api_mutex_unlock(struct openthread_context *ot_context);
/** @cond INTERNAL_HIDDEN */
#define OPENTHREAD_L2_CTX_TYPE struct openthread_context
/** @endcond */
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_OPENTHREAD_H_ */
``` | /content/code_sandbox/include/zephyr/net/openthread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,216 |
```objective-c
/** @file
* @brief DNS Service Discovery
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_DNS_SD_H_
#define ZEPHYR_INCLUDE_NET_DNS_SD_H_
#include <stdint.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/iterable_sections.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief DNS Service Discovery
*
* @details This API enables services to be advertised via DNS. To
* advertise a service, system or application code should use
* @ref DNS_SD_REGISTER_TCP_SERVICE or
* @ref DNS_SD_REGISTER_UDP_SERVICE.
*
* @see <a href="path_to_url">RFC 6763</a>
*
* @defgroup dns_sd DNS Service Discovery
* @since 2.5
* @version 0.8.0
* @ingroup networking
* @{
*/
/** RFC 1034 Section 3.1 */
#define DNS_SD_INSTANCE_MIN_SIZE 1
/** RFC 1034 Section 3.1, RFC 6763 Section 7.2 */
#define DNS_SD_INSTANCE_MAX_SIZE 63
/** RFC 6763 Section 7.2 - inclusive of underscore */
#define DNS_SD_SERVICE_MIN_SIZE 2
/** RFC 6763 Section 7.2 - inclusive of underscore */
#define DNS_SD_SERVICE_MAX_SIZE 16
/** RFC 6763 Section 4.1.2 */
#define DNS_SD_SERVICE_PREFIX '_'
/** RFC 6763 Section 4.1.2 - either _tcp or _udp (case insensitive) */
#define DNS_SD_PROTO_SIZE 4
/** ICANN Rules for TLD naming */
#define DNS_SD_DOMAIN_MIN_SIZE 2
/** RFC 1034 Section 3.1, RFC 6763 Section 7.2 */
#define DNS_SD_DOMAIN_MAX_SIZE 63
/**
* Minimum number of segments in a fully-qualified name
*
* This represents FQN's of the form below
* ```
* <sn>._tcp.<domain>.
* ```
* Currently sub-types and service domains are unsupported and only the
* "local" domain is supported. Specifically, that excludes the following:
* ```
* <sub>._sub.<sn>._tcp.<servicedomain>.<parentdomain>.
* ```
* @see <a href="path_to_url">RFC 6763</a>, Section 7.2.
*/
#define DNS_SD_MIN_LABELS 3
/**
* Maximum number of segments in a fully-qualified name
*
* This represents FQN's of the form below
* ```
* <instance>.<sn>._tcp.<domain>.
* ```
*
* Currently sub-types and service domains are unsupported and only the
* "local" domain is supported. Specifically, that excludes the following:
* ```
* <sub>._sub.<sn>._tcp.<servicedomain>.<parentdomain>.
* ```
* @see <a href="path_to_url">RFC 6763</a>, Section 7.2.
*/
#define DNS_SD_MAX_LABELS 4
/**
* @brief Register a service for DNS Service Discovery
*
* This macro should be used for advanced use cases. Two simple use cases are
* when a custom @p _domain or a custom (non-standard) @p _proto is required.
*
* Another use case is when the port number is not preassigned. That could
* be for a number of reasons, but the most common use case would be for
* ephemeral port usage - i.e. when the service is bound using port number 0.
* In that case, Zephyr (like other OS's) will simply choose an unused port.
* When using ephemeral ports, it can be helpful to assign @p _port to the
* @ref sockaddr_in.sin_port field of an IPv4 @ref sockaddr_in, or to the
* @ref sockaddr_in6.sin6_port field of an IPv6 @ref sockaddr_in6.
*
* The service can be referenced using the @p _id variable.
*
* @param _id variable name for the DNS-SD service record
* @param _instance name of the service instance such as "My HTTP Server"
* @param _service name of the service, such as "_http"
* @param _proto protocol used by the service - either "_tcp" or "_udp"
* @param _domain the domain of the service, such as "local"
* @param _text information for the DNS TXT record
* @param _port a pointer to the port number that this service will use
*/
#define DNS_SD_REGISTER_SERVICE(_id, _instance, _service, _proto, \
_domain, _text, _port) \
static const STRUCT_SECTION_ITERABLE(dns_sd_rec, _id) = { \
.instance = _instance, \
.service = _service, \
.proto = _proto, \
.domain = _domain, \
.text = (const char *)_text, \
.text_size = sizeof(_text) - 1, \
.port = _port, \
}
/**
* @brief Register a TCP service for DNS Service Discovery
*
* This macro can be used for service advertisement using DNS-SD.
*
* The service can be referenced using the @p id variable.
*
* Example (with TXT):
* @code{.c}
* #include <zephyr/net/dns_sd.h>
* static const char bar_txt[] = {
* "\x06" "path=/"
* "\x0f" "this=is the way"
* "\x0e" "foo or=foo not"
* "\x17" "this=has\0embedded\0nulls"
* "\x04" "true"
* };
* DNS_SD_REGISTER_TCP_SERVICE(bar, CONFIG_NET_HOSTNAME,
* "_bar", "local", bar_txt, 4242);
* @endcode
*
* TXT records begin with a single length byte (hex-encoded)
* and contain key=value pairs. Thus, the length of the key-value pair
* must not exceed 255 bytes. Care must be taken to ensure that the
* encoded length value is correct.
*
* For additional rules on TXT encoding, see RFC 6763, Section 6.
* @param id variable name for the DNS-SD service record
* @param instance name of the service instance such as "My HTTP Server"
* @param service name of the service, such as "_http"
* @param domain the domain of the service, such as "local"
* @param text information for the DNS TXT record
* @param port the port number that this service will use
*
* @see <a href="path_to_url">RFC 6763</a>
*/
#define DNS_SD_REGISTER_TCP_SERVICE(id, instance, service, domain, text, \
port) \
static const uint16_t id ## _port = sys_cpu_to_be16(port); \
DNS_SD_REGISTER_SERVICE(id, instance, service, "_tcp", domain, \
text, &id ## _port)
/**
* @brief Register a UDP service for DNS Service Discovery
*
* This macro can be used for service advertisement using DNS-SD.
*
* The service can be referenced using the @p id variable.
*
* Example (no TXT):
* @code{.c}
* #include <zephyr/net/dns_sd.h>
* DNS_SD_REGISTER_UDP_SERVICE(foo, CONFIG_NET_HOSTNAME,
* "_foo", "local", DNS_SD_EMPTY_TXT, 4242);
* @endcode
*
* @param id variable name for the DNS-SD service record
* @param instance name of the service instance such as "My TFTP Server"
* @param service name of the service, such as "_tftp"
* @param domain the domain of the service, such as "local" or "zephyrproject.org"
* @param text information for the DNS TXT record
* @param port a pointer to the port number that this service will use
*
* @see <a href="path_to_url">RFC 6763</a>
*/
#define DNS_SD_REGISTER_UDP_SERVICE(id, instance, service, domain, text, \
port) \
static const uint16_t id ## _port = sys_cpu_to_be16(port); \
DNS_SD_REGISTER_SERVICE(id, instance, service, "_udp", domain, \
text, &id ## _port)
/** Empty DNS-SD TXT specifier */
#define DNS_SD_EMPTY_TXT dns_sd_empty_txt
/**
* @brief DNS Service Discovery record
*
* This structure used in the implementation of RFC 6763 and should not
* need to be accessed directly from application code.
*
* The @a port pointer must be non-NULL. When the value in @a port
* is non-zero, the service is advertised as being on that particular
* port. When the value in @a port is zero, then the service is not
* advertised.
*
* Thus, it is possible for multiple services to advertise on a
* particular port if they hard-code the port.
*
* @see <a href="path_to_url">RFC 6763</a>
*/
struct dns_sd_rec {
/** "<Instance>" - e.g. "My HTTP Server" */
const char *instance;
/** Top half of the "<Service>" such as "_http" */
const char *service;
/** Bottom half of the "<Service>" "_tcp" or "_udp" */
const char *proto;
/** "<Domain>" such as "local" or "zephyrproject.org" */
const char *domain;
/** DNS TXT record */
const char *text;
/** Size (in bytes) of the DNS TXT record */
size_t text_size;
/** A pointer to the port number used by the service */
const uint16_t *port;
};
/** @cond INTERNAL_HIDDEN */
/**
* @brief Empty TXT specifier for DNS-SD
*
* @internal
*/
extern const char dns_sd_empty_txt[1];
/**
* @brief Wildcard Port specifier for DNS-SD
*
* @internal
*/
extern const uint16_t dns_sd_port_zero;
/** @endcond */
/**
* @brief Obtain the size of DNS-SD TXT data
*
* @param rec the record to in question
* @return the size of the text field
*/
static inline size_t dns_sd_txt_size(const struct dns_sd_rec *rec)
{
return rec->text_size;
}
/**
* @brief Check if @a rec is a DNS-SD Service Type Enumeration
*
* DNS-SD Service Type Enumeration is used by network tooling to
* acquire a list of all mDNS-advertised services belonging to a
* particular host on a particular domain.
*
* For example, for the domain '.local', the equivalent query
* would be '_services._dns-sd._udp.local'.
*
* Currently, only the '.local' domain is supported.
*
* @see <a href="path_to_url#section-9">Service Type Enumeration, RFC 6763</a>.
*
* @param rec the record to in question
* @return true if @a rec is a DNS-SD Service Type Enumeration
*/
bool dns_sd_is_service_type_enumeration(const struct dns_sd_rec *rec);
/**
* @brief Create a wildcard filter for DNS-SD records
*
* @param filter a pointer to the filter to use
*/
void dns_sd_create_wildcard_filter(struct dns_sd_rec *filter);
/**
* @}
*/
#ifdef __cplusplus
};
#endif
#endif /* ZEPHYR_INCLUDE_NET_DNS_SD_H_ */
``` | /content/code_sandbox/include/zephyr/net/dns_sd.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,465 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public API for network link address
*/
#ifndef ZEPHYR_INCLUDE_NET_NET_LINKADDR_H_
#define ZEPHYR_INCLUDE_NET_NET_LINKADDR_H_
#include <zephyr/types.h>
#include <stdbool.h>
#include <errno.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Network link address library
* @defgroup net_linkaddr Network Link Address Library
* @since 1.0
* @version 1.0.0
* @ingroup networking
* @{
*/
/** Maximum length of the link address */
#ifdef CONFIG_NET_L2_IEEE802154
#define NET_LINK_ADDR_MAX_LENGTH 8
#else
#ifdef CONFIG_NET_L2_PPP
#define NET_LINK_ADDR_MAX_LENGTH 8
#else
#define NET_LINK_ADDR_MAX_LENGTH 6
#endif
#endif
/**
* Type of the link address. This indicates the network technology that this
* address is used in. Note that in order to save space we store the value
* into a uint8_t variable, so please do not introduce any values > 255 in
* this enum.
*/
enum net_link_type {
/** Unknown link address type. */
NET_LINK_UNKNOWN = 0,
/** IEEE 802.15.4 link address. */
NET_LINK_IEEE802154,
/** Bluetooth IPSP link address. */
NET_LINK_BLUETOOTH,
/** Ethernet link address. */
NET_LINK_ETHERNET,
/** Dummy link address. Used in testing apps and loopback support. */
NET_LINK_DUMMY,
/** CANBUS link address. */
NET_LINK_CANBUS_RAW,
} __packed;
/**
* @brief Hardware link address structure
*
* Used to hold the link address information
*/
struct net_linkaddr {
/** The array of byte representing the address */
uint8_t *addr; /* in big endian */
/** Length of that address array */
uint8_t len;
/** What kind of address is this for */
uint8_t type;
};
/**
* @brief Hardware link address structure
*
* Used to hold the link address information. This variant is needed
* when we have to store the link layer address.
*
* Note that you cannot cast this to net_linkaddr as uint8_t * is
* handled differently than uint8_t addr[] and the fields are purposely
* in different order.
*/
struct net_linkaddr_storage {
/** What kind of address is this for */
uint8_t type;
/** The real length of the ll address. */
uint8_t len;
/** The array of bytes representing the address */
uint8_t addr[NET_LINK_ADDR_MAX_LENGTH]; /* in big endian */
};
/**
* @brief Compare two link layer addresses.
*
* @param lladdr1 Pointer to a link layer address
* @param lladdr2 Pointer to a link layer address
*
* @return True if the addresses are the same, false otherwise.
*/
static inline bool net_linkaddr_cmp(struct net_linkaddr *lladdr1,
struct net_linkaddr *lladdr2)
{
if (!lladdr1 || !lladdr2) {
return false;
}
if (lladdr1->len != lladdr2->len) {
return false;
}
return !memcmp(lladdr1->addr, lladdr2->addr, lladdr1->len);
}
/**
*
* @brief Set the member data of a link layer address storage structure.
*
* @param lladdr_store The link address storage structure to change.
* @param new_addr Array of bytes containing the link address.
* @param new_len Length of the link address array.
* This value should always be <= NET_LINK_ADDR_MAX_LENGTH.
*/
static inline int net_linkaddr_set(struct net_linkaddr_storage *lladdr_store,
uint8_t *new_addr, uint8_t new_len)
{
if (!lladdr_store || !new_addr) {
return -EINVAL;
}
if (new_len > NET_LINK_ADDR_MAX_LENGTH) {
return -EMSGSIZE;
}
lladdr_store->len = new_len;
memcpy(lladdr_store->addr, new_addr, new_len);
return 0;
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_NET_LINKADDR_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_linkaddr.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 911 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public functions for the Precision Time Protocol Stack.
*
*/
#ifndef ZEPHYR_INCLUDE_NET_GPTP_H_
#define ZEPHYR_INCLUDE_NET_GPTP_H_
/**
* @brief generic Precision Time Protocol (gPTP) support
* @defgroup gptp gPTP support
* @since 1.13
* @version 0.1.0
* @ingroup networking
* @{
*/
#include <zephyr/net/net_core.h>
#include <zephyr/net/ptp_time.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
/** @cond INTERNAL_HIDDEN */
#define GPTP_OFFSET_SCALED_LOG_VAR_UNKNOWN 0x436A
#define GPTP_PRIORITY1_NON_GM_CAPABLE 255
#define GPTP_PRIORITY1_GM_CAPABLE 248
#if defined(CONFIG_NET_GPTP_BMCA_PRIORITY2)
#define GPTP_PRIORITY2_DEFAULT CONFIG_NET_GPTP_BMCA_PRIORITY2
#else
#define GPTP_PRIORITY2_DEFAULT 248
#endif
/** @endcond */
/**
* @brief Scaled Nanoseconds.
*/
struct gptp_scaled_ns {
/** High half. */
int32_t high;
/** Low half. */
int64_t low;
} __packed;
/**
* @brief UScaled Nanoseconds.
*/
struct gptp_uscaled_ns {
/** High half. */
uint32_t high;
/** Low half. */
uint64_t low;
} __packed;
/** @cond INTERNAL_HIDDEN */
#if defined(CONFIG_NEWLIB_LIBC)
#include <math.h>
#define GPTP_POW2(exp) pow(2, exp)
#else
static inline double gptp_pow2(int exp)
{
double res;
if (exp >= 0) {
res = 1 << exp;
} else {
res = 1.0;
while (exp++) {
res /= 2;
}
}
return res;
}
#define GPTP_POW2(exp) gptp_pow2(exp)
#endif
/* Pre-calculated constants */
/* 2^16 */
#define GPTP_POW2_16 65536.0
/* 2^41 */
#define GPTP_POW2_41 2199023255552.0
/* Message types. Event messages have BIT(3) set to 0, and general messages
* have that bit set to 1. IEEE 802.1AS chapter 10.5.2.2.2
*/
#define GPTP_SYNC_MESSAGE 0x00
#define GPTP_DELAY_REQ_MESSAGE 0x01
#define GPTP_PATH_DELAY_REQ_MESSAGE 0x02
#define GPTP_PATH_DELAY_RESP_MESSAGE 0x03
#define GPTP_FOLLOWUP_MESSAGE 0x08
#define GPTP_DELAY_RESP_MESSAGE 0x09
#define GPTP_PATH_DELAY_FOLLOWUP_MESSAGE 0x0a
#define GPTP_ANNOUNCE_MESSAGE 0x0b
#define GPTP_SIGNALING_MESSAGE 0x0c
#define GPTP_MANAGEMENT_MESSAGE 0x0d
#define GPTP_IS_EVENT_MSG(msg_type) (!((msg_type) & BIT(3)))
#define GPTP_CLOCK_ID_LEN 8
/** @endcond */
/**
* @brief Port Identity.
*/
struct gptp_port_identity {
/** Clock identity of the port. */
uint8_t clk_id[GPTP_CLOCK_ID_LEN];
/** Number of the port. */
uint16_t port_number;
} __packed;
/** gPTP message flags */
struct gptp_flags {
union {
/** Byte access. */
uint8_t octets[2];
/** Whole field access. */
uint16_t all;
};
} __packed;
/** gPTP message header */
struct gptp_hdr {
/** Type of the message. */
uint8_t message_type:4;
/** Transport specific, always 1. */
uint8_t transport_specific:4;
/** Version of the PTP, always 2. */
uint8_t ptp_version:4;
/** Reserved field. */
uint8_t reserved0:4;
/** Total length of the message from the header to the last TLV. */
uint16_t message_length;
/** Domain number, always 0. */
uint8_t domain_number;
/** Reserved field. */
uint8_t reserved1;
/** Message flags. */
struct gptp_flags flags;
/** Correction Field. The content depends of the message type. */
int64_t correction_field;
/** Reserved field. */
uint32_t reserved2;
/** Port Identity of the sender. */
struct gptp_port_identity port_id;
/** Sequence Id. */
uint16_t sequence_id;
/** Control value. Sync: 0, Follow-up: 2, Others: 5. */
uint8_t control;
/** Message Interval in Log2 for Sync and Announce messages. */
int8_t log_msg_interval;
} __packed;
/** @cond INTERNAL_HIDDEN */
#define GPTP_GET_CURRENT_TIME_USCALED_NS(port, uscaled_ns_ptr) \
do { \
(uscaled_ns_ptr)->low = \
gptp_get_current_time_nanosecond(port) << 16; \
(uscaled_ns_ptr)->high = 0; \
} while (false)
/** @endcond */
/**
* @typedef gptp_phase_dis_callback_t
* @brief Define callback that is called after a phase discontinuity has been
* sent by the grandmaster.
* @param gm_identity A pointer to first element of a ClockIdentity array.
* The size of the array is GPTP_CLOCK_ID_LEN.
* @param time_base A pointer to the value of timeBaseIndicator of the current
* grandmaster.
* @param last_gm_ph_change A pointer to the value of lastGmPhaseChange received
* from grandmaster.
* @param last_gm_freq_change A pointer to the value of lastGmFreqChange
* received from the grandmaster.
*/
typedef void (*gptp_phase_dis_callback_t)(
uint8_t *gm_identity,
uint16_t *time_base,
struct gptp_scaled_ns *last_gm_ph_change,
double *last_gm_freq_change);
/**
* @brief Phase discontinuity callback structure.
*
* Stores the phase discontinuity callback information. Caller must make sure
* that the variable pointed by this is valid during the lifetime of
* registration. Typically this means that the variable cannot be
* allocated from stack.
*/
struct gptp_phase_dis_cb {
/** Node information for the slist. */
sys_snode_t node;
/** Phase discontinuity callback. */
gptp_phase_dis_callback_t cb;
};
/**
* @brief ClockSourceTime.invoke function parameters
*
* Parameters passed by ClockSourceTime.invoke function.
*/
struct gptp_clk_src_time_invoke_params {
/** Frequency change on the last Time Base Indicator Change. */
double last_gm_freq_change;
/** The time this function is invoked. */
struct net_ptp_extended_time src_time;
/** Phase change on the last Time Base Indicator Change. */
struct gptp_scaled_ns last_gm_phase_change;
/** Time Base - changed only if Phase or Frequency changes. */
uint16_t time_base_indicator;
};
/**
* @brief Register a phase discontinuity callback.
*
* @param phase_dis Caller specified handler for the callback.
* @param cb Callback to register.
*/
void gptp_register_phase_dis_cb(struct gptp_phase_dis_cb *phase_dis,
gptp_phase_dis_callback_t cb);
/**
* @brief Unregister a phase discontinuity callback.
*
* @param phase_dis Caller specified handler for the callback.
*/
void gptp_unregister_phase_dis_cb(struct gptp_phase_dis_cb *phase_dis);
/**
* @brief Call a phase discontinuity callback function.
*/
void gptp_call_phase_dis_cb(void);
/**
* @brief Get gPTP time.
*
* @param slave_time A pointer to structure where timestamp will be saved.
* @param gm_present A pointer to a boolean where status of the
* presence of a grand master will be saved.
*
* @return Error code. 0 if no error.
*/
int gptp_event_capture(struct net_ptp_time *slave_time, bool *gm_present);
/**
* @brief Utility function to print clock id to a user supplied buffer.
*
* @param clk_id Clock id
* @param output Output buffer
* @param output_len Output buffer len
*
* @return Pointer to output buffer
*/
char *gptp_sprint_clock_id(const uint8_t *clk_id, char *output,
size_t output_len);
/**
* @typedef gptp_port_cb_t
* @brief Callback used while iterating over gPTP ports
*
* @param port Port number
* @param iface Pointer to network interface
* @param user_data A valid pointer to user data or NULL
*/
typedef void (*gptp_port_cb_t)(int port, struct net_if *iface,
void *user_data);
/**
* @brief Go through all the gPTP ports and call callback for each of them.
*
* @param cb User-supplied callback function to call
* @param user_data User specified data
*/
void gptp_foreach_port(gptp_port_cb_t cb, void *user_data);
/**
* @brief Get gPTP domain.
* @details This contains all the configuration / status of the gPTP domain.
*
* @return Pointer to domain or NULL if not found.
*/
struct gptp_domain *gptp_get_domain(void);
/**
* @brief This interface is used by the ClockSource entity to provide time to
* the ClockMaster entity of a time-aware system.
*
* @param arg Current state and parameters of the ClockSource entity.
*/
void gptp_clk_src_time_invoke(struct gptp_clk_src_time_invoke_params *arg);
/**
* @brief Return pointer to gPTP packet header in network packet.
*
* @param pkt Network packet (received or sent)
*
* @return Pointer to gPTP header.
*/
struct gptp_hdr *gptp_get_hdr(struct net_pkt *pkt);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_GPTP_H_ */
``` | /content/code_sandbox/include/zephyr/net/gptp.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,224 |
```objective-c
/** @file
* @brief CoAP client API
*
* An API for applications to do CoAP requests
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_COAP_CLIENT_H_
#define ZEPHYR_INCLUDE_NET_COAP_CLIENT_H_
/**
* @brief CoAP client API
* @defgroup coap_client CoAP client API
* @since 3.4
* @version 0.1.0
* @ingroup networking
* @{
*/
#include <zephyr/net/coap.h>
#include <zephyr/kernel.h>
/** Maximum size of a CoAP message */
#define MAX_COAP_MSG_LEN (CONFIG_COAP_CLIENT_MESSAGE_HEADER_SIZE + \
CONFIG_COAP_CLIENT_MESSAGE_SIZE)
/**
* @typedef coap_client_response_cb_t
* @brief Callback for CoAP request.
*
* This callback is called for responses to CoAP client requests.
* It is used to indicate errors, response codes from server or to deliver payload.
* Blockwise transfers cause this callback to be called sequentially with increasing payload offset
* and only partial content in buffer pointed by payload parameter.
*
* @param result_code Result code of the response. Negative if there was a failure in send.
* @ref coap_response_code for positive.
* @param offset Payload offset from the beginning of a blockwise transfer.
* @param payload Buffer containing the payload from the response. NULL for empty payload.
* @param len Size of the payload.
* @param last_block Indicates the last block of the response.
* @param user_data User provided context.
*/
typedef void (*coap_client_response_cb_t)(int16_t result_code,
size_t offset, const uint8_t *payload, size_t len,
bool last_block, void *user_data);
/**
* @brief Representation of a CoAP client request.
*/
struct coap_client_request {
enum coap_method method; /**< Method of the request */
bool confirmable; /**< CoAP Confirmable/Non-confirmable message */
const char *path; /**< Path of the requested resource */
enum coap_content_format fmt; /**< Content format to be used */
uint8_t *payload; /**< User allocated buffer for send request */
size_t len; /**< Length of the payload */
coap_client_response_cb_t cb; /**< Callback when response received */
struct coap_client_option *options; /**< Extra options to be added to request */
uint8_t num_options; /**< Number of extra options */
void *user_data; /**< User provided context */
};
/**
* @brief Representation of extra options for the CoAP client request
*/
struct coap_client_option {
/** Option code */
uint16_t code;
#if defined(CONFIG_COAP_EXTENDED_OPTIONS_LEN)
/** Option len */
uint16_t len;
/** Buffer for the length */
uint8_t value[CONFIG_COAP_EXTENDED_OPTIONS_LEN_VALUE];
#else
/** Option len */
uint8_t len;
/** Buffer for the length */
uint8_t value[12];
#endif
};
/** @cond INTERNAL_HIDDEN */
struct coap_client_internal_request {
uint8_t request_token[COAP_TOKEN_MAX_LEN];
uint32_t offset;
uint32_t last_id;
uint8_t request_tkl;
bool request_ongoing;
atomic_t in_callback;
struct coap_block_context recv_blk_ctx;
struct coap_block_context send_blk_ctx;
struct coap_pending pending;
struct coap_client_request coap_request;
struct coap_packet request;
uint8_t request_tag[COAP_TOKEN_MAX_LEN];
/* For GETs with observe option set */
bool is_observe;
int last_response_id;
};
struct coap_client {
int fd;
struct sockaddr address;
socklen_t socklen;
bool response_ready;
struct k_mutex lock;
uint8_t send_buf[MAX_COAP_MSG_LEN];
uint8_t recv_buf[MAX_COAP_MSG_LEN];
struct coap_client_internal_request requests[CONFIG_COAP_CLIENT_MAX_REQUESTS];
struct coap_option echo_option;
bool send_echo;
};
/** @endcond */
/**
* @brief Initialize the CoAP client.
*
* @param[in] client Client instance.
* @param[in] info Name for the receiving thread of the client. Setting this NULL will result as
* default name of "coap_client".
*
* @return int Zero on success, otherwise a negative error code.
*/
int coap_client_init(struct coap_client *client, const char *info);
/**
* @brief Send CoAP request
*
* Operation is handled asynchronously using a background thread.
* If the socket isn't connected to a destination address, user must provide a destination address,
* otherwise the address should be set as NULL.
* Once the callback is called with last block set as true, socket can be closed or
* used for another query.
*
* @param client Client instance.
* @param sock Open socket file descriptor.
* @param addr the destination address of the request, NULL if socket is already connected.
* @param req CoAP request structure
* @param params Pointer to transmission parameters structure or NULL to use default values.
* @return zero when operation started successfully or negative error code otherwise.
*/
int coap_client_req(struct coap_client *client, int sock, const struct sockaddr *addr,
struct coap_client_request *req, struct coap_transmission_parameters *params);
/**
* @brief Cancel all current requests.
*
* This is intended for canceling long-running requests (e.g. GETs with the OBSERVE option set)
* which has gone stale for some reason.
*
* @param client Client instance.
*/
void coap_client_cancel_requests(struct coap_client *client);
/**
* @brief Initialise a Block2 option to be added to a request
*
* If the application expects a request to require a blockwise transfer, it may pre-emptively
* suggest a maximum block size to the server - see RFC7959 Figure 3: Block-Wise GET with Early
* Negotiation.
*
* This helper function returns a Block2 option to send with the initial request.
*
* @return CoAP client initial Block2 option structure
*/
static inline struct coap_client_option coap_client_option_initial_block2(void)
{
struct coap_client_option block2 = {
.code = COAP_OPTION_BLOCK2,
.len = 1,
.value[0] = coap_bytes_to_block_size(CONFIG_COAP_CLIENT_BLOCK_SIZE),
};
return block2;
}
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_COAP_CLIENT_H_ */
``` | /content/code_sandbox/include/zephyr/net/coap_client.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,359 |
```objective-c
/*
*
*/
/** @file mqtt.h
*
* @brief MQTT Client Implementation
*
* @note The implementation assumes TCP module is enabled.
*
* @note By default the implementation uses MQTT version 3.1.1.
*
* @defgroup mqtt_socket MQTT Client library
* @since 1.14
* @version 0.8.0
* @ingroup networking
* @{
*/
#ifndef ZEPHYR_INCLUDE_NET_MQTT_H_
#define ZEPHYR_INCLUDE_NET_MQTT_H_
#include <stddef.h>
#include <zephyr/kernel.h>
#include <zephyr/types.h>
#include <zephyr/net/tls_credentials.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/sys/mutex.h>
#include <zephyr/net/websocket.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief MQTT Asynchronous Events notified to the application from the module
* through the callback registered by the application.
*/
enum mqtt_evt_type {
/** Acknowledgment of connection request. Event result accompanying
* the event indicates whether the connection failed or succeeded.
*/
MQTT_EVT_CONNACK,
/** Disconnection Event. MQTT Client Reference is no longer valid once
* this event is received for the client.
*/
MQTT_EVT_DISCONNECT,
/** Publish event received when message is published on a topic client
* is subscribed to.
*
* @note PUBLISH event structure only contains payload size, the payload
* data parameter should be ignored. Payload content has to be
* read manually with @ref mqtt_read_publish_payload function.
*/
MQTT_EVT_PUBLISH,
/** Acknowledgment for published message with QoS 1. */
MQTT_EVT_PUBACK,
/** Reception confirmation for published message with QoS 2. */
MQTT_EVT_PUBREC,
/** Release of published message with QoS 2. */
MQTT_EVT_PUBREL,
/** Confirmation to a publish release message with QoS 2. */
MQTT_EVT_PUBCOMP,
/** Acknowledgment to a subscribe request. */
MQTT_EVT_SUBACK,
/** Acknowledgment to a unsubscribe request. */
MQTT_EVT_UNSUBACK,
/** Ping Response from server. */
MQTT_EVT_PINGRESP,
};
/** @brief MQTT version protocol level. */
enum mqtt_version {
MQTT_VERSION_3_1_0 = 3, /**< Protocol level for 3.1.0. */
MQTT_VERSION_3_1_1 = 4 /**< Protocol level for 3.1.1. */
};
/** @brief MQTT Quality of Service types. */
enum mqtt_qos {
/** Lowest Quality of Service, no acknowledgment needed for published
* message.
*/
MQTT_QOS_0_AT_MOST_ONCE = 0x00,
/** Medium Quality of Service, if acknowledgment expected for published
* message, duplicate messages permitted.
*/
MQTT_QOS_1_AT_LEAST_ONCE = 0x01,
/** Highest Quality of Service, acknowledgment expected and message
* shall be published only once. Message not published to interested
* parties unless client issues a PUBREL.
*/
MQTT_QOS_2_EXACTLY_ONCE = 0x02
};
/** @brief MQTT CONNACK return codes. */
enum mqtt_conn_return_code {
/** Connection accepted. */
MQTT_CONNECTION_ACCEPTED = 0x00,
/** The Server does not support the level of the MQTT protocol
* requested by the Client.
*/
MQTT_UNACCEPTABLE_PROTOCOL_VERSION = 0x01,
/** The Client identifier is correct UTF-8 but not allowed by the
* Server.
*/
MQTT_IDENTIFIER_REJECTED = 0x02,
/** The Network Connection has been made but the MQTT service is
* unavailable.
*/
MQTT_SERVER_UNAVAILABLE = 0x03,
/** The data in the user name or password is malformed. */
MQTT_BAD_USER_NAME_OR_PASSWORD = 0x04,
/** The Client is not authorized to connect. */
MQTT_NOT_AUTHORIZED = 0x05
};
/** @brief MQTT SUBACK return codes. */
enum mqtt_suback_return_code {
/** Subscription with QoS 0 succeeded. */
MQTT_SUBACK_SUCCESS_QoS_0 = 0x00,
/** Subscription with QoS 1 succeeded. */
MQTT_SUBACK_SUCCESS_QoS_1 = 0x01,
/** Subscription with QoS 2 succeeded. */
MQTT_SUBACK_SUCCESS_QoS_2 = 0x02,
/** Subscription for a topic failed. */
MQTT_SUBACK_FAILURE = 0x80
};
/** @brief Abstracts UTF-8 encoded strings. */
struct mqtt_utf8 {
const uint8_t *utf8; /**< Pointer to UTF-8 string. */
uint32_t size; /**< Size of UTF string, in bytes. */
};
/**
* @brief Initialize UTF-8 encoded string from C literal string.
*
* Use it as follows:
*
* struct mqtt_utf8 password = MQTT_UTF8_LITERAL("my_pass");
*
* @param[in] literal Literal string from which to generate mqtt_utf8 object.
*/
#define MQTT_UTF8_LITERAL(literal) \
((struct mqtt_utf8) {literal, sizeof(literal) - 1})
/** @brief Abstracts binary strings. */
struct mqtt_binstr {
uint8_t *data; /**< Pointer to binary stream. */
uint32_t len; /**< Length of binary stream. */
};
/** @brief Abstracts MQTT UTF-8 encoded topic that can be subscribed
* to or published.
*/
struct mqtt_topic {
/** Topic on to be published or subscribed to. */
struct mqtt_utf8 topic;
/** Quality of service requested for the subscription.
* @ref mqtt_qos for details.
*/
uint8_t qos;
};
/** @brief Parameters for a publish message. */
struct mqtt_publish_message {
struct mqtt_topic topic; /**< Topic on which data was published. */
struct mqtt_binstr payload; /**< Payload on the topic published. */
};
/** @brief Parameters for a connection acknowledgment (CONNACK). */
struct mqtt_connack_param {
/** The Session Present flag enables a Client to establish whether
* the Client and Server have a consistent view about whether there
* is already stored Session state.
*/
uint8_t session_present_flag;
/** The appropriate non-zero Connect return code indicates if the Server
* is unable to process a connection request for some reason.
*/
enum mqtt_conn_return_code return_code;
};
/** @brief Parameters for MQTT publish acknowledgment (PUBACK). */
struct mqtt_puback_param {
/** Message id of the PUBLISH message being acknowledged */
uint16_t message_id;
};
/** @brief Parameters for MQTT publish receive (PUBREC). */
struct mqtt_pubrec_param {
/** Message id of the PUBLISH message being acknowledged */
uint16_t message_id;
};
/** @brief Parameters for MQTT publish release (PUBREL). */
struct mqtt_pubrel_param {
/** Message id of the PUBREC message being acknowledged */
uint16_t message_id;
};
/** @brief Parameters for MQTT publish complete (PUBCOMP). */
struct mqtt_pubcomp_param {
/** Message id of the PUBREL message being acknowledged */
uint16_t message_id;
};
/** @brief Parameters for MQTT subscription acknowledgment (SUBACK). */
struct mqtt_suback_param {
/** Message id of the SUBSCRIBE message being acknowledged */
uint16_t message_id;
/** Return codes indicating maximum QoS level granted for each topic
* in the subscription list.
*/
struct mqtt_binstr return_codes;
};
/** @brief Parameters for MQTT unsubscribe acknowledgment (UNSUBACK). */
struct mqtt_unsuback_param {
/** Message id of the UNSUBSCRIBE message being acknowledged */
uint16_t message_id;
};
/** @brief Parameters for a publish message (PUBLISH). */
struct mqtt_publish_param {
/** Messages including topic, QoS and its payload (if any)
* to be published.
*/
struct mqtt_publish_message message;
/** Message id used for the publish message. Redundant for QoS 0. */
uint16_t message_id;
/** Duplicate flag. If 1, it indicates the message is being
* retransmitted. Has no meaning with QoS 0.
*/
uint8_t dup_flag : 1;
/** Retain flag. If 1, the message shall be stored persistently
* by the broker.
*/
uint8_t retain_flag : 1;
};
/** @brief List of topics in a subscription request. */
struct mqtt_subscription_list {
/** Array containing topics along with QoS for each. */
struct mqtt_topic *list;
/** Number of topics in the subscription list */
uint16_t list_count;
/** Message id used to identify subscription request. */
uint16_t message_id;
};
/**
* @brief Defines event parameters notified along with asynchronous events
* to the application.
*/
union mqtt_evt_param {
/** Parameters accompanying MQTT_EVT_CONNACK event. */
struct mqtt_connack_param connack;
/** Parameters accompanying MQTT_EVT_PUBLISH event.
*
* @note PUBLISH event structure only contains payload size, the payload
* data parameter should be ignored. Payload content has to be
* read manually with @ref mqtt_read_publish_payload function.
*/
struct mqtt_publish_param publish;
/** Parameters accompanying MQTT_EVT_PUBACK event. */
struct mqtt_puback_param puback;
/** Parameters accompanying MQTT_EVT_PUBREC event. */
struct mqtt_pubrec_param pubrec;
/** Parameters accompanying MQTT_EVT_PUBREL event. */
struct mqtt_pubrel_param pubrel;
/** Parameters accompanying MQTT_EVT_PUBCOMP event. */
struct mqtt_pubcomp_param pubcomp;
/** Parameters accompanying MQTT_EVT_SUBACK event. */
struct mqtt_suback_param suback;
/** Parameters accompanying MQTT_EVT_UNSUBACK event. */
struct mqtt_unsuback_param unsuback;
};
/** @brief Defines MQTT asynchronous event notified to the application. */
struct mqtt_evt {
/** Identifies the event. */
enum mqtt_evt_type type;
/** Contains parameters (if any) accompanying the event. */
union mqtt_evt_param param;
/** Event result. 0 or a negative error code (errno.h) indicating
* reason of failure.
*/
int result;
};
struct mqtt_client;
/**
* @brief Asynchronous event notification callback registered by the
* application.
*
* @param[in] client Identifies the client for which the event is notified.
* @param[in] evt Event description along with result and associated
* parameters (if any).
*/
typedef void (*mqtt_evt_cb_t)(struct mqtt_client *client,
const struct mqtt_evt *evt);
/** @brief TLS configuration for secure MQTT transports. */
struct mqtt_sec_config {
/** Indicates the preference for peer verification. */
int peer_verify;
/** Indicates the number of entries in the cipher list. */
uint32_t cipher_count;
/** Indicates the list of ciphers to be used for the session.
* May be NULL to use the default ciphers.
*/
const int *cipher_list;
/** Indicates the number of entries in the sec tag list. */
uint32_t sec_tag_count;
/** Indicates the list of security tags to be used for the session. */
const sec_tag_t *sec_tag_list;
#if defined(CONFIG_MQTT_LIB_TLS_USE_ALPN)
/**
* Pointer to array of string indicating the ALPN protocol name.
* May be NULL to skip ALPN protocol negotiation.
*/
const char **alpn_protocol_name_list;
/**
* Indicate number of ALPN protocol name in alpn protocol name list.
*/
uint32_t alpn_protocol_name_count;
#endif
/** Peer hostname for ceritificate verification.
* May be NULL to skip hostname verification.
*/
const char *hostname;
/** Indicates the preference for copying certificates to the heap. */
int cert_nocopy;
};
/** @brief MQTT transport type. */
enum mqtt_transport_type {
/** Use non secure TCP transport for MQTT connection. */
MQTT_TRANSPORT_NON_SECURE,
#if defined(CONFIG_MQTT_LIB_TLS)
/** Use secure TCP transport (TLS) for MQTT connection. */
MQTT_TRANSPORT_SECURE,
#endif /* CONFIG_MQTT_LIB_TLS */
#if defined(CONFIG_MQTT_LIB_WEBSOCKET)
/** Use non secure Websocket transport for MQTT connection. */
MQTT_TRANSPORT_NON_SECURE_WEBSOCKET,
#if defined(CONFIG_MQTT_LIB_TLS)
/** Use secure Websocket transport (TLS) for MQTT connection. */
MQTT_TRANSPORT_SECURE_WEBSOCKET,
#endif
#endif /* CONFIG_MQTT_LIB_WEBSOCKET */
#if defined(CONFIG_MQTT_LIB_CUSTOM_TRANSPORT)
/** Use custom transport for MQTT connection. */
MQTT_TRANSPORT_CUSTOM,
#endif /* CONFIG_MQTT_LIB_CUSTOM_TRANSPORT */
/** Shall not be used as a transport type.
* Indicator of maximum transport types possible.
*/
MQTT_TRANSPORT_NUM
};
/** @brief MQTT transport specific data. */
struct mqtt_transport {
/** Transport type selection for client instance.
* @ref mqtt_transport_type for possible values. MQTT_TRANSPORT_MAX
* is not a valid type.
*/
enum mqtt_transport_type type;
/** Use either unsecured TCP or secured TLS transport */
union {
/** TCP socket transport for MQTT */
struct {
/** Socket descriptor. */
int sock;
} tcp;
#if defined(CONFIG_MQTT_LIB_TLS)
/** TLS socket transport for MQTT */
struct {
/** Socket descriptor. */
int sock;
/** TLS configuration. See @ref mqtt_sec_config for
* details.
*/
struct mqtt_sec_config config;
} tls;
#endif /* CONFIG_MQTT_LIB_TLS */
};
#if defined(CONFIG_MQTT_LIB_WEBSOCKET)
/** Websocket transport for MQTT */
struct {
/** Websocket configuration. */
struct websocket_request config;
/** Socket descriptor */
int sock;
/** Websocket timeout, in milliseconds. */
int32_t timeout;
} websocket;
#endif
#if defined(CONFIG_MQTT_LIB_CUSTOM_TRANSPORT)
/** User defined data for custom transport for MQTT. */
void *custom_transport_data;
#endif /* CONFIG_MQTT_LIB_CUSTOM_TRANSPORT */
#if defined(CONFIG_SOCKS)
struct {
struct sockaddr addr;
socklen_t addrlen;
} proxy;
#endif
};
/** @brief MQTT internal state. */
struct mqtt_internal {
/** Internal. Mutex to protect access to the client instance. */
struct sys_mutex mutex;
/** Internal. Wall clock value (in milliseconds) of the last activity
* that occurred. Needed for periodic PING.
*/
uint32_t last_activity;
/** Internal. Client's state in the connection. */
uint32_t state;
/** Internal. Packet length read so far. */
uint32_t rx_buf_datalen;
/** Internal. Remaining payload length to read. */
uint32_t remaining_payload;
};
/**
* @brief MQTT Client definition to maintain information relevant to the
* client.
*/
struct mqtt_client {
/** MQTT client internal state. */
struct mqtt_internal internal;
/** MQTT transport configuration and data. */
struct mqtt_transport transport;
/** Unique client identification to be used for the connection. */
struct mqtt_utf8 client_id;
/** Broker details, for example, address, port. Address type should
* be compatible with transport used.
*/
const void *broker;
/** User name (if any) to be used for the connection. NULL indicates
* no user name.
*/
struct mqtt_utf8 *user_name;
/** Password (if any) to be used for the connection. Note that if
* password is provided, user name shall also be provided. NULL
* indicates no password.
*/
struct mqtt_utf8 *password;
/** Will topic and QoS. Can be NULL. */
struct mqtt_topic *will_topic;
/** Will message. Can be NULL. Non NULL value valid only if will topic
* is not NULL.
*/
struct mqtt_utf8 *will_message;
/** Application callback registered with the module to get MQTT events.
*/
mqtt_evt_cb_t evt_cb;
/** Receive buffer used for MQTT packet reception in RX path. */
uint8_t *rx_buf;
/** Size of receive buffer. */
uint32_t rx_buf_size;
/** Transmit buffer used for creating MQTT packet in TX path. */
uint8_t *tx_buf;
/** Size of transmit buffer. */
uint32_t tx_buf_size;
/** Keepalive interval for this client in seconds.
* Default is CONFIG_MQTT_KEEPALIVE.
*/
uint16_t keepalive;
/** MQTT protocol version. */
uint8_t protocol_version;
/** Unanswered PINGREQ count on this connection. */
int8_t unacked_ping;
/** Will retain flag, 1 if will message shall be retained persistently.
*/
uint8_t will_retain : 1;
/** Clean session flag indicating a fresh (1) or a retained session (0).
* Default is CONFIG_MQTT_CLEAN_SESSION.
*/
uint8_t clean_session : 1;
/** User specific opaque data */
void *user_data;
};
/**
* @brief Initializes the client instance.
*
* @param[in] client Client instance for which the procedure is requested.
* Shall not be NULL.
*
* @note Shall be called to initialize client structure, before setting any
* client parameters and before connecting to broker.
*/
void mqtt_client_init(struct mqtt_client *client);
#if defined(CONFIG_SOCKS)
/*
* @brief Set proxy server details
*
* @param[in] client Client instance for which the procedure is requested,
* Shall not be NULL.
* @param[in] proxy_addr Proxy server address.
* @param[in] addrlen Proxy server address length.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*
* @note Must be called before calling mqtt_connect().
*/
int mqtt_client_set_proxy(struct mqtt_client *client,
struct sockaddr *proxy_addr,
socklen_t addrlen);
#endif
/**
* @brief API to request new MQTT client connection.
*
* @param[in] client Client instance for which the procedure is requested.
* Shall not be NULL.
*
* @note This memory is assumed to be resident until mqtt_disconnect is called.
* @note Any subsequent changes to parameters like broker address, user name,
* device id, etc. have no effect once MQTT connection is established.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*
* @note Default protocol revision used for connection request is 3.1.1. Please
* set client.protocol_version = MQTT_VERSION_3_1_0 to use protocol 3.1.0.
* @note
* Please modify @kconfig{CONFIG_MQTT_KEEPALIVE} time to override default
* of 1 minute.
*/
int mqtt_connect(struct mqtt_client *client);
/**
* @brief API to publish messages on topics.
*
* @param[in] client Client instance for which the procedure is requested.
* Shall not be NULL.
* @param[in] param Parameters to be used for the publish message.
* Shall not be NULL.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_publish(struct mqtt_client *client,
const struct mqtt_publish_param *param);
/**
* @brief API used by client to send acknowledgment on receiving QoS1 publish
* message. Should be called on reception of @ref MQTT_EVT_PUBLISH with
* QoS level @ref MQTT_QOS_1_AT_LEAST_ONCE.
*
* @param[in] client Client instance for which the procedure is requested.
* Shall not be NULL.
* @param[in] param Identifies message being acknowledged.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_publish_qos1_ack(struct mqtt_client *client,
const struct mqtt_puback_param *param);
/**
* @brief API used by client to send acknowledgment on receiving QoS2 publish
* message. Should be called on reception of @ref MQTT_EVT_PUBLISH with
* QoS level @ref MQTT_QOS_2_EXACTLY_ONCE.
*
* @param[in] client Identifies client instance for which the procedure is
* requested. Shall not be NULL.
* @param[in] param Identifies message being acknowledged.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_publish_qos2_receive(struct mqtt_client *client,
const struct mqtt_pubrec_param *param);
/**
* @brief API used by client to request release of QoS2 publish message.
* Should be called on reception of @ref MQTT_EVT_PUBREC.
*
* @param[in] client Client instance for which the procedure is requested.
* Shall not be NULL.
* @param[in] param Identifies message being released.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_publish_qos2_release(struct mqtt_client *client,
const struct mqtt_pubrel_param *param);
/**
* @brief API used by client to send acknowledgment on receiving QoS2 publish
* release message. Should be called on reception of
* @ref MQTT_EVT_PUBREL.
*
* @param[in] client Identifies client instance for which the procedure is
* requested. Shall not be NULL.
* @param[in] param Identifies message being completed.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_publish_qos2_complete(struct mqtt_client *client,
const struct mqtt_pubcomp_param *param);
/**
* @brief API to request subscription of one or more topics on the connection.
*
* @param[in] client Identifies client instance for which the procedure
* is requested. Shall not be NULL.
* @param[in] param Subscription parameters. Shall not be NULL.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_subscribe(struct mqtt_client *client,
const struct mqtt_subscription_list *param);
/**
* @brief API to request unsubscription of one or more topics on the connection.
*
* @param[in] client Identifies client instance for which the procedure is
* requested. Shall not be NULL.
* @param[in] param Parameters describing topics being unsubscribed from.
* Shall not be NULL.
*
* @note QoS included in topic description is unused in this API.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_unsubscribe(struct mqtt_client *client,
const struct mqtt_subscription_list *param);
/**
* @brief API to send MQTT ping. The use of this API is optional, as the library
* handles the connection keep-alive on it's own, see @ref mqtt_live.
*
* @param[in] client Identifies client instance for which procedure is
* requested.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_ping(struct mqtt_client *client);
/**
* @brief API to disconnect MQTT connection.
*
* @param[in] client Identifies client instance for which procedure is
* requested.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_disconnect(struct mqtt_client *client);
/**
* @brief API to abort MQTT connection. This will close the corresponding
* transport without closing the connection gracefully at the MQTT level
* (with disconnect message).
*
* @param[in] client Identifies client instance for which procedure is
* requested.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_abort(struct mqtt_client *client);
/**
* @brief This API should be called periodically for the client to be able
* to keep the connection alive by sending Ping Requests if need be.
*
* @param[in] client Client instance for which the procedure is requested.
* Shall not be NULL.
*
* @note Application shall ensure that the periodicity of calling this function
* makes it possible to respect the Keep Alive time agreed with the
* broker on connection. @ref mqtt_connect for details on Keep Alive
* time.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_live(struct mqtt_client *client);
/**
* @brief Helper function to determine when next keep alive message should be
* sent. Can be used for instance as a source for `poll` timeout.
*
* @param[in] client Client instance for which the procedure is requested.
*
* @return Time in milliseconds until next keep alive message is expected to
* be sent. Function will return -1 if keep alive messages are
* not enabled.
*/
int mqtt_keepalive_time_left(const struct mqtt_client *client);
/**
* @brief Receive an incoming MQTT packet. The registered callback will be
* called with the packet content.
*
* @note In case of PUBLISH message, the payload has to be read separately with
* @ref mqtt_read_publish_payload function. The size of the payload to
* read is provided in the publish event structure.
*
* @note This is a non-blocking call.
*
* @param[in] client Client instance for which the procedure is requested.
* Shall not be NULL.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_input(struct mqtt_client *client);
/**
* @brief Read the payload of the received PUBLISH message. This function should
* be called within the MQTT event handler, when MQTT PUBLISH message is
* notified.
*
* @note This is a non-blocking call.
*
* @param[in] client Client instance for which the procedure is requested.
* Shall not be NULL.
* @param[out] buffer Buffer where payload should be stored.
* @param[in] length Length of the buffer, in bytes.
*
* @return Number of bytes read or a negative error code (errno.h) indicating
* reason of failure.
*/
int mqtt_read_publish_payload(struct mqtt_client *client, void *buffer,
size_t length);
/**
* @brief Blocking version of @ref mqtt_read_publish_payload function.
*
* @param[in] client Client instance for which the procedure is requested.
* Shall not be NULL.
* @param[out] buffer Buffer where payload should be stored.
* @param[in] length Length of the buffer, in bytes.
*
* @return Number of bytes read or a negative error code (errno.h) indicating
* reason of failure.
*/
int mqtt_read_publish_payload_blocking(struct mqtt_client *client, void *buffer,
size_t length);
/**
* @brief Blocking version of @ref mqtt_read_publish_payload function which
* runs until the required number of bytes are read.
*
* @param[in] client Client instance for which the procedure is requested.
* Shall not be NULL.
* @param[out] buffer Buffer where payload should be stored.
* @param[in] length Number of bytes to read.
*
* @return 0 if success, otherwise a negative error code (errno.h) indicating
* reason of failure.
*/
int mqtt_readall_publish_payload(struct mqtt_client *client, uint8_t *buffer,
size_t length);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_MQTT_H_ */
/**@} */
``` | /content/code_sandbox/include/zephyr/net/mqtt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,940 |
```objective-c
/*
*
*/
/** @file icmp.h
*
* @brief ICMP sending and receiving.
*
* @defgroup icmp Send and receive IPv4 or IPv6 ICMP Echo Request messages.
* @since 3.5
* @version 0.8.0
* @ingroup networking
* @{
*/
#ifndef ZEPHYR_INCLUDE_NET_ICMP_H_
#define ZEPHYR_INCLUDE_NET_ICMP_H_
#include <stddef.h>
#include <zephyr/kernel.h>
#include <zephyr/types.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/net_if.h>
#include <zephyr/net/net_pkt.h>
#ifdef __cplusplus
extern "C" {
#endif
#define NET_ICMPV4_ECHO_REQUEST 8 /**< ICMPv4 Echo-Request */
#define NET_ICMPV4_ECHO_REPLY 0 /**< ICMPv4 Echo-Reply */
#define NET_ICMPV6_ECHO_REQUEST 128 /**< ICMPv6 Echo-Request */
#define NET_ICMPV6_ECHO_REPLY 129 /**< ICMPv6 Echo-Reply */
struct net_icmp_ctx;
struct net_icmp_ip_hdr;
struct net_icmp_ping_params;
/**
* @typedef net_icmp_handler_t
* @brief Handler function that is called when ICMP response is received.
*
* @param ctx ICMP context to use.
* @param pkt Received ICMP response network packet.
* @param ip_hdr IP header of the packet.
* @param icmp_hdr ICMP header of the packet.
* @param user_data A valid pointer to user data or NULL
*/
typedef int (*net_icmp_handler_t)(struct net_icmp_ctx *ctx,
struct net_pkt *pkt,
struct net_icmp_ip_hdr *ip_hdr,
struct net_icmp_hdr *icmp_hdr,
void *user_data);
/**
* @typedef net_icmp_offload_ping_handler_t
* @brief Handler function that is called when an Echo-Request is sent
* to offloaded device. This handler is typically setup by the
* device driver so that it can catch the ping request and send
* it to the offloaded device.
*
* @param ctx ICMP context used in this request.
* @param iface Network interface, can be set to NULL in which case the
* interface is selected according to destination address.
* @param dst IP address of the target host.
* @param params Echo-Request specific parameters. May be NULL in which case
* suitable default parameters are used.
* @param user_data User supplied opaque data passed to the handler. May be NULL.
*
*/
typedef int (*net_icmp_offload_ping_handler_t)(struct net_icmp_ctx *ctx,
struct net_if *iface,
struct sockaddr *dst,
struct net_icmp_ping_params *params,
void *user_data);
/**
* @brief ICMP context structure.
*/
struct net_icmp_ctx {
/** List node */
sys_snode_t node;
/** ICMP response handler */
net_icmp_handler_t handler;
/** Network interface where the ICMP request was sent */
struct net_if *iface;
/** Opaque user supplied data */
void *user_data;
/** ICMP type of the response we are waiting */
uint8_t type;
/** ICMP code of the response type we are waiting */
uint8_t code;
};
/**
* @brief Struct presents either IPv4 or IPv6 header in ICMP response message.
*/
struct net_icmp_ip_hdr {
union {
/** IPv4 header in response message. */
struct net_ipv4_hdr *ipv4;
/** IPv6 header in response message. */
struct net_ipv6_hdr *ipv6;
};
/** Is the header IPv4 or IPv6 one. Value of either AF_INET or AF_INET6 */
sa_family_t family;
};
/**
* @brief Struct presents parameters that are needed when sending
* Echo-Request (ping) messages.
*/
struct net_icmp_ping_params {
/** An identifier to aid in matching Echo Replies to this Echo Request.
* May be zero.
*/
uint16_t identifier;
/** A sequence number to aid in matching Echo Replies to this
* Echo Request. May be zero.
*/
uint16_t sequence;
/** Can be either IPv4 Type-of-service field value, or IPv6 Traffic
* Class field value. Represents combined DSCP and ECN values.
*/
uint8_t tc_tos;
/** Network packet priority. */
int priority;
/** Arbitrary payload data that will be included in the Echo Reply
* verbatim. May be NULL.
*/
const void *data;
/** Size of the Payload Data in bytes. May be zero. In case data
* pointer is NULL, the function will generate the payload up to
* the requested size.
*/
size_t data_size;
};
/**
* @brief Initialize the ICMP context structure. Must be called before
* ICMP messages can be sent. This will register handler to the
* system.
*
* @param ctx ICMP context used in this request.
* @param type Type of ICMP message we are handling.
* @param code Code of ICMP message we are handling.
* @param handler Callback function that is called when a response is received.
*/
int net_icmp_init_ctx(struct net_icmp_ctx *ctx, uint8_t type, uint8_t code,
net_icmp_handler_t handler);
/**
* @brief Cleanup the ICMP context structure. This will unregister the ICMP handler
* from the system.
*
* @param ctx ICMP context used in this request.
*/
int net_icmp_cleanup_ctx(struct net_icmp_ctx *ctx);
/**
* @brief Send ICMP echo request message.
*
* @param ctx ICMP context used in this request.
* @param iface Network interface, can be set to NULL in which case the
* interface is selected according to destination address.
* @param dst IP address of the target host.
* @param params Echo-Request specific parameters. May be NULL in which case
* suitable default parameters are used.
* @param user_data User supplied opaque data passed to the handler. May be NULL.
*
* @return Return 0 if the sending succeed, <0 otherwise.
*/
int net_icmp_send_echo_request(struct net_icmp_ctx *ctx,
struct net_if *iface,
struct sockaddr *dst,
struct net_icmp_ping_params *params,
void *user_data);
/**
* @brief ICMP offload context structure.
*/
struct net_icmp_offload {
/** List node */
sys_snode_t node;
/**
* ICMP response handler. Currently there is only one handler.
* This means that one offloaded ping request/response can be going
* on at the same time.
*/
net_icmp_handler_t handler;
/** ICMP offloaded ping handler */
net_icmp_offload_ping_handler_t ping_handler;
/** Offloaded network interface */
struct net_if *iface;
};
/**
* @brief Register a handler function that is called when an Echo-Request
* is sent to the offloaded device. This function is typically
* called by a device driver so that it can do the actual offloaded
* ping call.
*
* @param ctx ICMP offload context used for this interface.
* @param iface Network interface of the offloaded device.
* @param ping_handler Function to be called when offloaded ping request is done.
*
* @return Return 0 if the register succeed, <0 otherwise.
*/
int net_icmp_register_offload_ping(struct net_icmp_offload *ctx,
struct net_if *iface,
net_icmp_offload_ping_handler_t ping_handler);
/**
* @brief Unregister the offload handler.
*
* @param ctx ICMP offload context used for this interface.
*
* @return Return 0 if the call succeed, <0 otherwise.
*/
int net_icmp_unregister_offload_ping(struct net_icmp_offload *ctx);
/**
* @brief Get a ICMP response handler function for an offloaded device.
* When a ping response is received by the driver, it should call
* the handler function with proper parameters so that the ICMP response
* is received by the net stack.
*
* @param ctx ICMP offload context used in this request.
* @param resp_handler Function to be called when offloaded ping response
* is received by the offloaded driver. The ICMP response handler
* function is returned and the caller should call it when appropriate.
*
* @return Return 0 if the call succeed, <0 otherwise.
*/
int net_icmp_get_offload_rsp_handler(struct net_icmp_offload *ctx,
net_icmp_handler_t *resp_handler);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_ICMP_H */
/**@} */
``` | /content/code_sandbox/include/zephyr/net/icmp.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,847 |
```objective-c
/** @file
* @brief Virtual Network Interface
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_VIRTUAL_H_
#define ZEPHYR_INCLUDE_NET_VIRTUAL_H_
#include <zephyr/kernel.h>
#include <zephyr/types.h>
#include <stdbool.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/net_pkt.h>
#include <zephyr/sys/util.h>
#include <zephyr/net/net_if.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Virtual network interface support functions
* @defgroup virtual Virtual Network Interface Support Functions
* @since 2.6
* @version 0.8.0
* @ingroup networking
* @{
*/
/** Virtual interface capabilities */
enum virtual_interface_caps {
/** IPIP tunnel */
VIRTUAL_INTERFACE_IPIP = BIT(1),
/** Virtual LAN interface (VLAN) */
VIRTUAL_INTERFACE_VLAN = BIT(2),
/** @cond INTERNAL_HIDDEN */
/* Marker for capabilities - must be at the end of the enum.
* It is here because the capability list cannot be empty.
*/
VIRTUAL_INTERFACE_NUM_CAPS
/** @endcond */
};
/** @cond INTERNAL_HIDDEN */
enum virtual_interface_config_type {
VIRTUAL_INTERFACE_CONFIG_TYPE_PEER_ADDRESS,
VIRTUAL_INTERFACE_CONFIG_TYPE_MTU,
VIRTUAL_INTERFACE_CONFIG_TYPE_LINK_TYPE,
};
struct virtual_interface_link_types {
int count;
uint16_t type[COND_CODE_1(CONFIG_NET_CAPTURE_COOKED_MODE,
(CONFIG_NET_CAPTURE_COOKED_MODE_MAX_LINK_TYPES),
(1))];
};
struct virtual_interface_config {
sa_family_t family;
union {
struct in_addr peer4addr;
struct in6_addr peer6addr;
int mtu;
struct virtual_interface_link_types link_types;
};
};
#if defined(CONFIG_NET_L2_VIRTUAL)
#define VIRTUAL_MAX_NAME_LEN CONFIG_NET_L2_VIRTUAL_MAX_NAME_LEN
#else
#define VIRTUAL_MAX_NAME_LEN 0
#endif
/** @endcond */
/** Virtual L2 API operations. */
struct virtual_interface_api {
/**
* The net_if_api must be placed in first position in this
* struct so that we are compatible with network interface API.
*/
struct net_if_api iface_api;
/** Get the virtual interface capabilities */
enum virtual_interface_caps (*get_capabilities)(struct net_if *iface);
/** Start the device */
int (*start)(const struct device *dev);
/** Stop the device */
int (*stop)(const struct device *dev);
/** Send a network packet */
int (*send)(struct net_if *iface, struct net_pkt *pkt);
/**
* Receive a network packet.
* The callback returns NET_OK if this interface will accept the
* packet and pass it upper layers, NET_DROP if the packet is to be
* dropped and NET_CONTINUE to pass it to next interface.
*/
enum net_verdict (*recv)(struct net_if *iface, struct net_pkt *pkt);
/** Pass the attachment information to virtual interface */
int (*attach)(struct net_if *virtual_iface, struct net_if *iface);
/** Set specific L2 configuration */
int (*set_config)(struct net_if *iface,
enum virtual_interface_config_type type,
const struct virtual_interface_config *config);
/** Get specific L2 configuration */
int (*get_config)(struct net_if *iface,
enum virtual_interface_config_type type,
struct virtual_interface_config *config);
};
/* Make sure that the network interface API is properly setup inside
* Virtual API struct (it is the first one).
*/
BUILD_ASSERT(offsetof(struct virtual_interface_api, iface_api) == 0);
/** Virtual L2 context that is needed to binding to the real network interface
*/
struct virtual_interface_context {
/** @cond INTERNAL_HIDDEN */
/* Keep track of contexts */
sys_snode_t node;
/* My virtual network interface */
struct net_if *virtual_iface;
/** @endcond */
/**
* Other network interface this virtual network interface is
* attached to. These values can be chained so virtual network
* interfaces can run on top of other virtual interfaces.
*/
struct net_if *iface;
/**
* This tells what L2 features does virtual support.
*/
enum net_l2_flags virtual_l2_flags;
/** Is this context already initialized */
bool is_init;
/** Link address for this network interface */
struct net_linkaddr_storage lladdr;
/** User friendly name of this L2 layer. */
char name[VIRTUAL_MAX_NAME_LEN];
};
/**
* @brief Attach virtual network interface to the given network interface.
*
* @param virtual_iface Virtual network interface.
* @param iface Network interface we are attached to. This can be NULL,
* if we want to detach.
*
* @return 0 if ok, <0 if attaching failed
*/
int net_virtual_interface_attach(struct net_if *virtual_iface,
struct net_if *iface);
/**
* @brief Return network interface related to this virtual network interface.
* The returned network interface is below this virtual network interface.
*
* @param iface Virtual network interface.
*
* @return Network interface related to this virtual interface or
* NULL if no such interface exists.
*/
struct net_if *net_virtual_get_iface(struct net_if *iface);
/**
* @brief Return the name of the virtual network interface L2.
*
* @param iface Virtual network interface.
* @param buf Buffer to store the name
* @param len Max buffer length
*
* @return Name of the virtual network interface.
*/
char *net_virtual_get_name(struct net_if *iface, char *buf, size_t len);
/**
* @brief Set the name of the virtual network interface L2.
*
* @param iface Virtual network interface.
* @param name Name of the virtual L2 layer.
*/
void net_virtual_set_name(struct net_if *iface, const char *name);
/**
* @brief Set the L2 flags of the virtual network interface.
*
* @param iface Virtual network interface.
* @param flags L2 flags to set.
*
* @return Previous flags that were set.
*/
enum net_l2_flags net_virtual_set_flags(struct net_if *iface,
enum net_l2_flags flags);
/**
* @brief Feed the IP pkt to stack if tunneling is enabled.
*
* @param input_iface Network interface receiving the pkt.
* @param remote_addr IP address of the sender.
* @param pkt Network packet.
*
* @return Verdict what to do with the packet.
*/
enum net_verdict net_virtual_input(struct net_if *input_iface,
struct net_addr *remote_addr,
struct net_pkt *pkt);
/** @cond INTERNAL_HIDDEN */
/**
* @brief Initialize the network interface so that a virtual
* interface can be attached to it.
*
* @param iface Network interface
*/
#if defined(CONFIG_NET_L2_VIRTUAL)
void net_virtual_init(struct net_if *iface);
#else
static inline void net_virtual_init(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif
/**
* @brief Update the carrier state of the virtual network interface.
* This is called if the underlying interface is going down.
*
* @param iface Network interface
*/
#if defined(CONFIG_NET_L2_VIRTUAL)
void net_virtual_disable(struct net_if *iface);
#else
static inline void net_virtual_disable(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif
/**
* @brief Update the carrier state of the virtual network interface.
* This is called if the underlying interface is going up.
*
* @param iface Network interface
*/
#if defined(CONFIG_NET_L2_VIRTUAL)
void net_virtual_enable(struct net_if *iface);
#else
static inline void net_virtual_enable(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif
#define VIRTUAL_L2_CTX_TYPE struct virtual_interface_context
/**
* @brief Return virtual device hardware capability information.
*
* @param iface Network interface
*
* @return Hardware capabilities
*/
static inline enum virtual_interface_caps
net_virtual_get_iface_capabilities(struct net_if *iface)
{
const struct virtual_interface_api *virt =
(struct virtual_interface_api *)net_if_get_device(iface)->api;
if (!virt->get_capabilities) {
return (enum virtual_interface_caps)0;
}
return virt->get_capabilities(iface);
}
#define Z_NET_VIRTUAL_INTERFACE_INIT(node_id, dev_id, name, init_fn, \
pm, data, config, prio, api, mtu) \
Z_NET_DEVICE_INIT(node_id, dev_id, name, init_fn, pm, data, \
config, prio, api, VIRTUAL_L2, \
NET_L2_GET_CTX_TYPE(VIRTUAL_L2), mtu)
#define Z_NET_VIRTUAL_INTERFACE_INIT_INSTANCE(node_id, dev_id, name, \
inst, init_fn, pm, data, \
config, prio, api, mtu) \
Z_NET_DEVICE_INIT_INSTANCE(node_id, dev_id, name, inst, \
init_fn, pm, data, \
config, prio, api, VIRTUAL_L2, \
NET_L2_GET_CTX_TYPE(VIRTUAL_L2), mtu)
/** @endcond */
/**
* @brief Create a virtual network interface. Binding to another interface
* is done at runtime by calling net_virtual_interface_attach().
* The attaching is done automatically when setting up tunneling
* when peer IP address is set in IP tunneling driver.
*
* @param dev_id Network device id.
* @param name The name this instance of the driver exposes to
* the system.
* @param init_fn Address to the init function of the driver.
* @param pm Reference to struct pm_device associated with the device.
* (optional).
* @param data Pointer to the device's private data.
* @param config The address to the structure containing the
* configuration information for this instance of the driver.
* @param prio The initialization level at which configuration occurs.
* @param api Provides an initial pointer to the API function struct
* used by the driver. Can be NULL.
* @param mtu Maximum transfer unit in bytes for this network interface.
* This is the default value and its value can be tweaked at runtime.
*/
#define NET_VIRTUAL_INTERFACE_INIT(dev_id, name, init_fn, pm, data, \
config, prio, api, mtu) \
Z_NET_VIRTUAL_INTERFACE_INIT(DT_INVALID_NODE, dev_id, name, \
init_fn, pm, data, config, prio, \
api, mtu)
/**
* @brief Create a virtual network interface. Binding to another interface
* is done at runtime by calling net_virtual_interface_attach().
* The attaching is done automatically when setting up tunneling
* when peer IP address is set in IP tunneling driver.
*
* @param dev_id Network device id.
* @param name The name this instance of the driver exposes to
* the system.
* @param inst instance number
* @param init_fn Address to the init function of the driver.
* @param pm Reference to struct pm_device associated with the device.
* (optional).
* @param data Pointer to the device's private data.
* @param config The address to the structure containing the
* configuration information for this instance of the driver.
* @param prio The initialization level at which configuration occurs.
* @param api Provides an initial pointer to the API function struct
* used by the driver. Can be NULL.
* @param mtu Maximum transfer unit in bytes for this network interface.
* This is the default value and its value can be tweaked at runtime.
*/
#define NET_VIRTUAL_INTERFACE_INIT_INSTANCE(dev_id, name, inst, \
init_fn, pm, data, \
config, prio, api, mtu) \
Z_NET_VIRTUAL_INTERFACE_INIT_INSTANCE(DT_INVALID_NODE, dev_id, \
name, inst, \
init_fn, pm, data, config, \
prio, api, mtu)
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_VIRTUAL_H_ */
``` | /content/code_sandbox/include/zephyr/net/virtual.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,538 |
```objective-c
/** @file
* @brief VLAN specific definitions.
*
* Virtual LAN specific definitions.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_ETHERNET_VLAN_H_
#define ZEPHYR_INCLUDE_NET_ETHERNET_VLAN_H_
/**
* @brief VLAN definitions and helpers
* @defgroup vlan_api Virtual LAN definitions and helpers
* @since 1.12
* @version 0.8.0
* @ingroup networking
* @{
*/
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Unspecified VLAN tag value */
#define NET_VLAN_TAG_UNSPEC 0x0fff
/**
* @brief Get VLAN identifier from TCI.
*
* @param tci VLAN tag control information.
*
* @return VLAN identifier.
*/
static inline uint16_t net_eth_vlan_get_vid(uint16_t tci)
{
return tci & 0x0fff;
}
/**
* @brief Get Drop Eligible Indicator from TCI.
*
* @param tci VLAN tag control information.
*
* @return Drop eligible indicator.
*/
static inline uint8_t net_eth_vlan_get_dei(uint16_t tci)
{
return (tci >> 12) & 0x01;
}
/**
* @brief Get Priority Code Point from TCI.
*
* @param tci VLAN tag control information.
*
* @return Priority code point.
*/
static inline uint8_t net_eth_vlan_get_pcp(uint16_t tci)
{
return (tci >> 13) & 0x07;
}
/**
* @brief Set VLAN identifier to TCI.
*
* @param tci VLAN tag control information.
* @param vid VLAN identifier.
*
* @return New TCI value.
*/
static inline uint16_t net_eth_vlan_set_vid(uint16_t tci, uint16_t vid)
{
return (tci & 0xf000) | (vid & 0x0fff);
}
/**
* @brief Set Drop Eligible Indicator to TCI.
*
* @param tci VLAN tag control information.
* @param dei Drop eligible indicator.
*
* @return New TCI value.
*/
static inline uint16_t net_eth_vlan_set_dei(uint16_t tci, bool dei)
{
return (tci & 0xefff) | ((!!dei) << 12);
}
/**
* @brief Set Priority Code Point to TCI.
*
* @param tci VLAN tag control information.
* @param pcp Priority code point.
*
* @return New TCI value.
*/
static inline uint16_t net_eth_vlan_set_pcp(uint16_t tci, uint8_t pcp)
{
return (tci & 0x1fff) | ((pcp & 0x07) << 13);
}
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_ETHERNET_VLAN_H_ */
``` | /content/code_sandbox/include/zephyr/net/ethernet_vlan.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 608 |
```objective-c
/*
*
*/
/**
* @file
* @brief PPP (Point-to-Point Protocol)
*/
#ifndef ZEPHYR_INCLUDE_NET_PPP_H_
#define ZEPHYR_INCLUDE_NET_PPP_H_
#include <zephyr/net/net_if.h>
#include <zephyr/net/net_pkt.h>
#include <zephyr/net/net_stats.h>
#include <zephyr/net/net_mgmt.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Point-to-point (PPP) L2/driver support functions
* @defgroup ppp PPP L2/driver Support Functions
* @since 2.0
* @version 0.8.0
* @ingroup networking
* @{
*/
/** PPP maximum receive unit (MRU) */
#define PPP_MRU CONFIG_NET_PPP_MTU_MRU
/** PPP maximum transfer unit (MTU) */
#define PPP_MTU PPP_MRU
/** Max length of terminate description string */
#define PPP_MAX_TERMINATE_REASON_LEN 32
/** Length of network interface identifier */
#define PPP_INTERFACE_IDENTIFIER_LEN 8
/** PPP L2 API */
struct ppp_api {
/**
* The net_if_api must be placed in first position in this
* struct so that we are compatible with network interface API.
*/
struct net_if_api iface_api;
/** Start the device */
int (*start)(const struct device *dev);
/** Stop the device */
int (*stop)(const struct device *dev);
/** Send a network packet */
int (*send)(const struct device *dev, struct net_pkt *pkt);
#if defined(CONFIG_NET_STATISTICS_PPP)
/** Collect optional PPP specific statistics. This pointer
* should be set by driver if statistics needs to be collected
* for that driver.
*/
struct net_stats_ppp *(*get_stats)(const struct device *dev);
#endif
};
/* Make sure that the network interface API is properly setup inside
* PPP API struct (it is the first one).
*/
BUILD_ASSERT(offsetof(struct ppp_api, iface_api) == 0);
/**
* PPP protocol types.
* See path_to_url
* for details.
*/
enum ppp_protocol_type {
PPP_IP = 0x0021, /**< RFC 1332 */
PPP_IPV6 = 0x0057, /**< RFC 5072 */
PPP_IPCP = 0x8021, /**< RFC 1332 */
PPP_ECP = 0x8053, /**< RFC 1968 */
PPP_IPV6CP = 0x8057, /**< RFC 5072 */
PPP_CCP = 0x80FD, /**< RFC 1962 */
PPP_LCP = 0xc021, /**< RFC 1661 */
PPP_PAP = 0xc023, /**< RFC 1334 */
PPP_CHAP = 0xc223, /**< RFC 1334 */
PPP_EAP = 0xc227, /**< RFC 2284 */
};
/**
* PPP phases
*/
enum ppp_phase {
/** Physical-layer not ready */
PPP_DEAD,
/** Link is being established */
PPP_ESTABLISH,
/** Link authentication with peer */
PPP_AUTH,
/** Network connection establishment */
PPP_NETWORK,
/** Network running */
PPP_RUNNING,
/** Link termination */
PPP_TERMINATE,
};
/** @cond INTERNAL_HIDDEN */
/**
* PPP states, RFC 1661 ch. 4.2
*/
enum ppp_state {
PPP_INITIAL,
PPP_STARTING,
PPP_CLOSED,
PPP_STOPPED,
PPP_CLOSING,
PPP_STOPPING,
PPP_REQUEST_SENT,
PPP_ACK_RECEIVED,
PPP_ACK_SENT,
PPP_OPENED
};
/**
* PPP protocol operations from RFC 1661
*/
enum ppp_packet_type {
PPP_CONFIGURE_REQ = 1,
PPP_CONFIGURE_ACK = 2,
PPP_CONFIGURE_NACK = 3,
PPP_CONFIGURE_REJ = 4,
PPP_TERMINATE_REQ = 5,
PPP_TERMINATE_ACK = 6,
PPP_CODE_REJ = 7,
PPP_PROTOCOL_REJ = 8,
PPP_ECHO_REQ = 9,
PPP_ECHO_REPLY = 10,
PPP_DISCARD_REQ = 11
};
/** @endcond */
/**
* LCP option types from RFC 1661 ch. 6
*/
enum lcp_option_type {
/** Reserved option value (do not use) */
LCP_OPTION_RESERVED = 0,
/** Maximum-Receive-Unit */
LCP_OPTION_MRU = 1,
/** Async-Control-Character-Map */
LCP_OPTION_ASYNC_CTRL_CHAR_MAP = 2,
/** Authentication-Protocol */
LCP_OPTION_AUTH_PROTO = 3,
/** Quality-Protocol */
LCP_OPTION_QUALITY_PROTO = 4,
/** Magic-Number */
LCP_OPTION_MAGIC_NUMBER = 5,
/** Protocol-Field-Compression */
LCP_OPTION_PROTO_COMPRESS = 7,
/** Address-and-Control-Field-Compression */
LCP_OPTION_ADDR_CTRL_COMPRESS = 8
} __packed;
/**
* IPCP option types from RFC 1332
*/
enum ipcp_option_type {
/** Reserved IPCP option value (do not use) */
IPCP_OPTION_RESERVED = 0,
/** IP Addresses */
IPCP_OPTION_IP_ADDRESSES = 1,
/** IP Compression Protocol */
IPCP_OPTION_IP_COMP_PROTO = 2,
/** IP Address */
IPCP_OPTION_IP_ADDRESS = 3,
/* RFC 1877 */
/** Primary DNS Server Address */
IPCP_OPTION_DNS1 = 129,
/** Primary NBNS Server Address */
IPCP_OPTION_NBNS1 = 130,
/** Secondary DNS Server Address */
IPCP_OPTION_DNS2 = 131,
/** Secondary NBNS Server Address */
IPCP_OPTION_NBNS2 = 132,
} __packed;
/**
* IPV6CP option types from RFC 5072
*/
enum ipv6cp_option_type {
/** Reserved IPV6CP option value (do not use) */
IPV6CP_OPTION_RESERVED = 0,
/** Interface identifier */
IPV6CP_OPTION_INTERFACE_IDENTIFIER = 1,
} __packed;
/**
* @typedef net_ppp_lcp_echo_reply_cb_t
* @brief A callback function that can be called if a Echo-Reply needs to
* be received.
* @param user_data User settable data that is passed to the callback
* function.
* @param user_data_len Length of the user data.
*/
typedef void (*net_ppp_lcp_echo_reply_cb_t)(void *user_data,
size_t user_data_len);
struct ppp_my_option_data;
struct ppp_my_option_info;
/**
* Generic PPP Finite State Machine
*/
struct ppp_fsm {
/** Timeout timer */
struct k_work_delayable timer;
/** FSM callbacks */
struct {
/** Acknowledge Configuration Information */
int (*config_info_ack)(struct ppp_fsm *fsm,
struct net_pkt *pkt,
uint16_t length);
/** Add Configuration Information */
struct net_pkt *(*config_info_add)(struct ppp_fsm *fsm);
/** Length of Configuration Information */
int (*config_info_len)(struct ppp_fsm *fsm);
/** Negative Acknowledge Configuration Information */
int (*config_info_nack)(struct ppp_fsm *fsm,
struct net_pkt *pkt,
uint16_t length,
bool rejected);
/** Request peer's Configuration Information */
int (*config_info_req)(struct ppp_fsm *fsm,
struct net_pkt *pkt,
uint16_t length,
struct net_pkt *ret_pkt);
/** Reject Configuration Information */
int (*config_info_rej)(struct ppp_fsm *fsm,
struct net_pkt *pkt,
uint16_t length);
/** Reset Configuration Information */
void (*config_info_reset)(struct ppp_fsm *fsm);
/** FSM goes to OPENED state */
void (*up)(struct ppp_fsm *fsm);
/** FSM leaves OPENED state */
void (*down)(struct ppp_fsm *fsm);
/** Starting this protocol */
void (*starting)(struct ppp_fsm *fsm);
/** Quitting this protocol */
void (*finished)(struct ppp_fsm *fsm);
/** We received Protocol-Reject */
void (*proto_reject)(struct ppp_fsm *fsm);
/** Retransmit */
void (*retransmit)(struct ppp_fsm *fsm);
/** Any code that is not understood by PPP is passed to
* this FSM for further processing.
*/
enum net_verdict (*proto_extension)(struct ppp_fsm *fsm,
enum ppp_packet_type code,
uint8_t id,
struct net_pkt *pkt);
} cb;
/** My options */
struct {
/** Options information */
const struct ppp_my_option_info *info;
/** Options negotiation data */
struct ppp_my_option_data *data;
/** Number of negotiated options */
size_t count;
} my_options;
/** Option bits */
uint32_t flags;
/** Number of re-transmissions left */;
uint32_t retransmits;
/** Number of NACK loops since last ACK */
uint32_t nack_loops;
/** Number of NACKs received */
uint32_t recv_nack_loops;
/** Reason for closing protocol */
char terminate_reason[PPP_MAX_TERMINATE_REASON_LEN];
/** PPP protocol number for this FSM */
uint16_t protocol;
/** Current state of PPP link */
enum ppp_state state;
/** Protocol/layer name of this FSM (for debugging) */
const char *name;
/** Current id */
uint8_t id;
/** Current request id */
uint8_t req_id;
/** Have received valid Ack, Nack or Reject to a Request */
uint8_t ack_received : 1;
};
/** @cond INTERNAL_HIDDEN */
#define PPP_MY_OPTION_ACKED BIT(0)
#define PPP_MY_OPTION_REJECTED BIT(1)
struct ppp_my_option_data {
uint32_t flags;
};
#define IPCP_NUM_MY_OPTIONS 3
#define IPV6CP_NUM_MY_OPTIONS 1
enum ppp_flags {
PPP_CARRIER_UP,
};
/** @endcond */
/** Link control protocol options */
struct lcp_options {
/** Magic number */
uint32_t magic;
/** Async char map */
uint32_t async_map;
/** Maximum Receive Unit value */
uint16_t mru;
/** Which authentication protocol was negotiated (0 means none) */
uint16_t auth_proto;
};
#if defined(CONFIG_NET_L2_PPP_OPTION_MRU)
#define LCP_NUM_MY_OPTIONS 1
#endif
/** IPv4 control protocol options */
struct ipcp_options {
/** IPv4 address */
struct in_addr address;
/** Primary DNS server address */
struct in_addr dns1_address;
/** Secondary DNS server address */
struct in_addr dns2_address;
};
/** IPv6 control protocol options */
struct ipv6cp_options {
/** Interface identifier */
uint8_t iid[PPP_INTERFACE_IDENTIFIER_LEN];
};
/** PPP L2 context specific to certain network interface */
struct ppp_context {
/** Flags representing PPP state, which are accessed from multiple
* threads.
*/
atomic_t flags;
/** PPP startup worker. */
struct k_work_delayable startup;
/** LCP options */
struct {
/** Finite state machine for LCP */
struct ppp_fsm fsm;
/** Options that we want to request */
struct lcp_options my_options;
/** Options that peer want to request */
struct lcp_options peer_options;
/** Magic-Number value */
uint32_t magic;
#if defined(CONFIG_NET_L2_PPP_OPTION_MRU)
struct ppp_my_option_data my_options_data[LCP_NUM_MY_OPTIONS];
#endif
} lcp;
#if defined(CONFIG_NET_IPV4)
/** ICMP options */
struct {
/** Finite state machine for IPCP */
struct ppp_fsm fsm;
/** Options that we want to request */
struct ipcp_options my_options;
/** Options that peer want to request */
struct ipcp_options peer_options;
/** My options runtime data */
struct ppp_my_option_data my_options_data[IPCP_NUM_MY_OPTIONS];
} ipcp;
#endif
#if defined(CONFIG_NET_IPV6)
/** IPV6CP options */
struct {
/** Finite state machine for IPV6CP */
struct ppp_fsm fsm;
/** Options that we want to request */
struct ipv6cp_options my_options;
/** Options that peer want to request */
struct ipv6cp_options peer_options;
/** My options runtime data */
struct ppp_my_option_data my_options_data[IPV6CP_NUM_MY_OPTIONS];
} ipv6cp;
#endif
#if defined(CONFIG_NET_L2_PPP_PAP)
/** PAP options */
struct {
/** Finite state machine for PAP */
struct ppp_fsm fsm;
} pap;
#endif
#if defined(CONFIG_NET_SHELL)
/** Network shell PPP command internal data */
struct {
/** Ping command internal data */
struct {
/** Callback to be called when Echo-Reply is received.
*/
net_ppp_lcp_echo_reply_cb_t cb;
/** User specific data for the callback */
void *user_data;
/** User data length */
size_t user_data_len;
} echo_reply;
/** Used when waiting Echo-Reply */
struct k_sem wait_echo_reply;
/** Echo-Req data value */
uint32_t echo_req_data;
/** Echo-Reply data value */
uint32_t echo_reply_data;
} shell;
#endif
/** Network interface related to this PPP connection */
struct net_if *iface;
/** Network management callback structure */
struct net_mgmt_event_callback mgmt_evt_cb;
/** Current phase of PPP link */
enum ppp_phase phase;
/** Signal when PPP link is terminated */
struct k_sem wait_ppp_link_terminated;
/** Signal when PPP link is down */
struct k_sem wait_ppp_link_down;
/** This tells what features the PPP supports. */
enum net_l2_flags ppp_l2_flags;
/** This tells how many network protocols are open */
int network_protos_open;
/** This tells how many network protocols are up */
int network_protos_up;
/** Is PPP ready to receive packets */
uint16_t is_ready_to_serve : 1;
/** Is PPP L2 enabled or not */
uint16_t is_enabled : 1;
/** PPP enable pending */
uint16_t is_enable_done : 1;
/** IPCP status (up / down) */
uint16_t is_ipcp_up : 1;
/** IPCP open status (open / closed) */
uint16_t is_ipcp_open : 1;
/** IPV6CP status (up / down) */
uint16_t is_ipv6cp_up : 1;
/** IPV6CP open status (open / closed) */
uint16_t is_ipv6cp_open : 1;
/** PAP status (up / down) */
uint16_t is_pap_up : 1;
/** PAP open status (open / closed) */
uint16_t is_pap_open : 1;
};
/**
* @brief Initialize PPP L2 stack for a given interface
*
* @param iface A valid pointer to a network interface
*/
void net_ppp_init(struct net_if *iface);
/* Management API for PPP */
/** @cond INTERNAL_HIDDEN */
#define PPP_L2_CTX_TYPE struct ppp_context
#define _NET_PPP_LAYER NET_MGMT_LAYER_L2
#define _NET_PPP_CODE 0x209
#define _NET_PPP_BASE (NET_MGMT_IFACE_BIT | \
NET_MGMT_LAYER(_NET_PPP_LAYER) | \
NET_MGMT_LAYER_CODE(_NET_PPP_CODE))
#define _NET_PPP_EVENT (_NET_PPP_BASE | NET_MGMT_EVENT_BIT)
enum net_event_ppp_cmd {
NET_EVENT_PPP_CMD_CARRIER_ON = 1,
NET_EVENT_PPP_CMD_CARRIER_OFF,
NET_EVENT_PPP_CMD_PHASE_RUNNING,
NET_EVENT_PPP_CMD_PHASE_DEAD,
};
struct net_if;
/** @endcond */
/** Event emitted when PPP carrier is on */
#define NET_EVENT_PPP_CARRIER_ON \
(_NET_PPP_EVENT | NET_EVENT_PPP_CMD_CARRIER_ON)
/** Event emitted when PPP carrier is off */
#define NET_EVENT_PPP_CARRIER_OFF \
(_NET_PPP_EVENT | NET_EVENT_PPP_CMD_CARRIER_OFF)
/** Event emitted when PPP goes into running phase */
#define NET_EVENT_PPP_PHASE_RUNNING \
(_NET_PPP_EVENT | NET_EVENT_PPP_CMD_PHASE_RUNNING)
/** Event emitted when PPP goes into dead phase */
#define NET_EVENT_PPP_PHASE_DEAD \
(_NET_PPP_EVENT | NET_EVENT_PPP_CMD_PHASE_DEAD)
/**
* @brief Raise CARRIER_ON event when PPP is connected.
*
* @param iface PPP network interface.
*/
#if defined(CONFIG_NET_L2_PPP_MGMT)
void ppp_mgmt_raise_carrier_on_event(struct net_if *iface);
#else
static inline void ppp_mgmt_raise_carrier_on_event(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif
/**
* @brief Raise CARRIER_OFF event when PPP is disconnected.
*
* @param iface PPP network interface.
*/
#if defined(CONFIG_NET_L2_PPP_MGMT)
void ppp_mgmt_raise_carrier_off_event(struct net_if *iface);
#else
static inline void ppp_mgmt_raise_carrier_off_event(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif
/**
* @brief Raise PHASE_RUNNING event when PPP reaching RUNNING phase
*
* @param iface PPP network interface.
*/
#if defined(CONFIG_NET_L2_PPP_MGMT)
void ppp_mgmt_raise_phase_running_event(struct net_if *iface);
#else
static inline void ppp_mgmt_raise_phase_running_event(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif
/**
* @brief Raise PHASE_DEAD event when PPP reaching DEAD phase
*
* @param iface PPP network interface.
*/
#if defined(CONFIG_NET_L2_PPP_MGMT)
void ppp_mgmt_raise_phase_dead_event(struct net_if *iface);
#else
static inline void ppp_mgmt_raise_phase_dead_event(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif
/**
* @brief Send PPP Echo-Request to peer. We expect to receive Echo-Reply back.
*
* @param idx PPP network interface index
* @param timeout Amount of time to wait Echo-Reply. The value is in
* milliseconds.
*
* @return 0 if Echo-Reply was received, < 0 if there is a timeout or network
* index is not a valid PPP network index.
*/
#if defined(CONFIG_NET_L2_PPP)
int net_ppp_ping(int idx, int32_t timeout);
#else
static inline int net_ppp_ping(int idx, int32_t timeout)
{
ARG_UNUSED(idx);
ARG_UNUSED(timeout);
return -ENOTSUP;
}
#endif
/**
* @brief Get PPP context information. This is only used by net-shell to
* print information about PPP.
*
* @param idx PPP network interface index
*
* @return PPP context or NULL if idx is invalid.
*/
#if defined(CONFIG_NET_L2_PPP) && defined(CONFIG_NET_SHELL)
struct ppp_context *net_ppp_context_get(int idx);
#else
static inline struct ppp_context *net_ppp_context_get(int idx)
{
ARG_UNUSED(idx);
return NULL;
}
#endif
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_PPP_H_ */
``` | /content/code_sandbox/include/zephyr/net/ppp.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,215 |
```objective-c
/** @file
* @brief Trickle timer library
*
* This implements Trickle timer as specified in RFC 6206
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_TRICKLE_H_
#define ZEPHYR_INCLUDE_NET_TRICKLE_H_
#include <stdbool.h>
#include <zephyr/types.h>
#include <zephyr/kernel.h>
#include <zephyr/net/net_core.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Trickle algorithm library
* @defgroup trickle Trickle Algorithm Library
* @since 1.7
* @version 0.8.0
* @ingroup networking
* @{
*/
struct net_trickle;
/**
* @typedef net_trickle_cb_t
* @brief Trickle timer callback.
*
* @details The callback is called after Trickle timeout expires.
*
* @param trickle The trickle context to use.
* @param do_suppress Is TX allowed (true) or not (false).
* @param user_data The user data given in net_trickle_start() call.
*/
typedef void (*net_trickle_cb_t)(struct net_trickle *trickle,
bool do_suppress, void *user_data);
/**
* The variable names are taken directly from RFC 6206 when applicable.
* Note that the struct members should not be accessed directly but
* only via the Trickle API.
*/
struct net_trickle {
uint32_t I; /**< Current interval size */
uint32_t Imin; /**< Min interval size in ms */
uint32_t Istart; /**< Start of the interval in ms */
uint32_t Imax_abs; /**< Max interval size in ms (not doublings) */
uint8_t Imax; /**< Max number of doublings */
uint8_t k; /**< Redundancy constant */
uint8_t c; /**< Consistency counter */
bool double_to; /**< Flag telling if the internval is doubled */
struct k_work_delayable timer; /**< Internal timer struct */
net_trickle_cb_t cb; /**< Callback to be called when timer expires */
void *user_data; /**< User specific opaque data */
};
/** @cond INTERNAL_HIDDEN */
#define NET_TRICKLE_INFINITE_REDUNDANCY 0
/** @endcond */
/**
* @brief Create a Trickle timer.
*
* @param trickle Pointer to Trickle struct.
* @param Imin Imin configuration parameter in ms.
* @param Imax Max number of doublings.
* @param k Redundancy constant parameter. See RFC 6206 for details.
*
* @return Return 0 if ok and <0 if error.
*/
int net_trickle_create(struct net_trickle *trickle,
uint32_t Imin,
uint8_t Imax,
uint8_t k);
/**
* @brief Start a Trickle timer.
*
* @param trickle Pointer to Trickle struct.
* @param cb User callback to call at time T within the current trickle
* interval
* @param user_data User pointer that is passed to callback.
*
* @return Return 0 if ok and <0 if error.
*/
int net_trickle_start(struct net_trickle *trickle,
net_trickle_cb_t cb,
void *user_data);
/**
* @brief Stop a Trickle timer.
*
* @param trickle Pointer to Trickle struct.
*
* @return Return 0 if ok and <0 if error.
*/
int net_trickle_stop(struct net_trickle *trickle);
/**
* @brief To be called by the protocol handler when it hears a consistent
* network transmission.
*
* @param trickle Pointer to Trickle struct.
*/
void net_trickle_consistency(struct net_trickle *trickle);
/**
* @brief To be called by the protocol handler when it hears an inconsistent
* network transmission.
*
* @param trickle Pointer to Trickle struct.
*/
void net_trickle_inconsistency(struct net_trickle *trickle);
/**
* @brief Check if the Trickle timer is running or not.
*
* @param trickle Pointer to Trickle struct.
*
* @return Return True if timer is running and False if not.
*/
static inline bool net_trickle_is_running(struct net_trickle *trickle)
{
NET_ASSERT(trickle);
return trickle->I != 0U;
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_TRICKLE_H_ */
``` | /content/code_sandbox/include/zephyr/net/trickle.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 933 |
```objective-c
/*
*
*/
/** @file
* @brief CoAP Service API
*
* An API for applications to respond to CoAP requests
*/
#ifndef ZEPHYR_INCLUDE_NET_COAP_SERVICE_H_
#define ZEPHYR_INCLUDE_NET_COAP_SERVICE_H_
#include <zephyr/net/coap.h>
#include <zephyr/sys/iterable_sections.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief CoAP Service API
* @defgroup coap_service CoAP service API
* @since 3.6
* @version 0.1.0
* @ingroup networking
* @{
*/
/**
* @name CoAP Service configuration flags
* @anchor COAP_SERVICE_FLAGS
* @{
*/
/** Start the service on boot. */
#define COAP_SERVICE_AUTOSTART BIT(0)
/** @} */
/** @cond INTERNAL_HIDDEN */
struct coap_service_data {
int sock_fd;
struct coap_observer observers[CONFIG_COAP_SERVICE_OBSERVERS];
struct coap_pending pending[CONFIG_COAP_SERVICE_PENDING_MESSAGES];
};
struct coap_service {
const char *name;
const char *host;
uint16_t *port;
uint8_t flags;
struct coap_resource *res_begin;
struct coap_resource *res_end;
struct coap_service_data *data;
};
#define __z_coap_service_define(_name, _host, _port, _flags, _res_begin, _res_end) \
static struct coap_service_data coap_service_data_##_name = { \
.sock_fd = -1, \
}; \
const STRUCT_SECTION_ITERABLE(coap_service, _name) = { \
.name = STRINGIFY(_name), \
.host = _host, \
.port = (uint16_t *)(_port), \
.flags = _flags, \
.res_begin = (_res_begin), \
.res_end = (_res_end), \
.data = &coap_service_data_##_name, \
}
/** @endcond */
/**
* @brief Define a static CoAP resource owned by the service named @p _service .
*
* @note The handlers registered with the resource can return a CoAP response code to reply with
* an acknowledge without any payload, nothing is sent if the return value is 0 or negative.
* As seen in the example.
*
* @code{.c}
* static const struct gpio_dt_spec led = GPIO_DT_SPEC_GET(DT_ALIAS(led0), gpios);
*
* static int led_put(struct coap_resource *resource, struct coap_packet *request,
* struct sockaddr *addr, socklen_t addr_len)
* {
* const uint8_t *payload;
* uint16_t payload_len;
*
* payload = coap_packet_get_payload(request, &payload_len);
* if (payload_len != 1) {
* return COAP_RESPONSE_CODE_BAD_REQUEST;
* }
*
* if (gpio_pin_set_dt(&led, payload[0]) < 0) {
* return COAP_RESPONSE_CODE_INTERNAL_ERROR;
* }
*
* return COAP_RESPONSE_CODE_CHANGED;
* }
*
* COAP_RESOURCE_DEFINE(my_resource, my_service, {
* .put = led_put,
* });
* @endcode
*
* @param _name Name of the resource.
* @param _service Name of the associated service.
*/
#define COAP_RESOURCE_DEFINE(_name, _service, ...) \
STRUCT_SECTION_ITERABLE_ALTERNATE(coap_resource_##_service, coap_resource, _name) \
= __VA_ARGS__
/**
* @brief Define a CoAP service with static resources.
*
* @note The @p _host parameter can be `NULL`. If not, it is used to specify an IP address either in
* IPv4 or IPv6 format a fully-qualified hostname or a virtual host, otherwise the any address is
* used.
*
* @note The @p _port parameter must be non-`NULL`. It points to a location that specifies the port
* number to use for the service. If the specified port number is zero, then an ephemeral port
* number will be used and the actual port number assigned will be written back to memory. For
* ephemeral port numbers, the memory pointed to by @p _port must be writeable.
*
* @param _name Name of the service.
* @param _host IP address or hostname associated with the service.
* @param[inout] _port Pointer to port associated with the service.
* @param _flags Configuration flags @see @ref COAP_SERVICE_FLAGS.
*/
#define COAP_SERVICE_DEFINE(_name, _host, _port, _flags) \
extern struct coap_resource _CONCAT(_coap_resource_##_name, _list_start)[]; \
extern struct coap_resource _CONCAT(_coap_resource_##_name, _list_end)[]; \
__z_coap_service_define(_name, _host, _port, _flags, \
&_CONCAT(_coap_resource_##_name, _list_start)[0], \
&_CONCAT(_coap_resource_##_name, _list_end)[0])
/**
* @brief Count the number of CoAP services.
*
* @param[out] _dst Pointer to location where result is written.
*/
#define COAP_SERVICE_COUNT(_dst) STRUCT_SECTION_COUNT(coap_service, _dst)
/**
* @brief Count CoAP service static resources.
*
* @param _service Pointer to a service.
*/
#define COAP_SERVICE_RESOURCE_COUNT(_service) ((_service)->res_end - (_service)->res_begin)
/**
* @brief Check if service has the specified resource.
*
* @param _service Pointer to a service.
* @param _resource Pointer to a resource.
*/
#define COAP_SERVICE_HAS_RESOURCE(_service, _resource) \
((_service)->res_begin <= _resource && _resource < (_service)->res_end)
/**
* @brief Iterate over all CoAP services.
*
* @param _it Name of iterator (of type @ref coap_service)
*/
#define COAP_SERVICE_FOREACH(_it) STRUCT_SECTION_FOREACH(coap_service, _it)
/**
* @brief Iterate over static CoAP resources associated with a given @p _service.
*
* @note This macro requires that @p _service is defined with @ref COAP_SERVICE_DEFINE.
*
* @param _service Name of CoAP service
* @param _it Name of iterator (of type @ref coap_resource)
*/
#define COAP_RESOURCE_FOREACH(_service, _it) \
STRUCT_SECTION_FOREACH_ALTERNATE(coap_resource_##_service, coap_resource, _it)
/**
* @brief Iterate over all static resources associated with @p _service .
*
* @note This macro is suitable for a @p _service defined with @ref COAP_SERVICE_DEFINE.
*
* @param _service Pointer to COAP service
* @param _it Name of iterator (of type @ref coap_resource)
*/
#define COAP_SERVICE_FOREACH_RESOURCE(_service, _it) \
for (struct coap_resource *_it = (_service)->res_begin; ({ \
__ASSERT(_it <= (_service)->res_end, "unexpected list end location"); \
_it < (_service)->res_end; \
}); _it++)
/**
* @brief Start the provided @p service .
*
* @note This function is suitable for a @p service defined with @ref COAP_SERVICE_DEFINE.
*
* @param service Pointer to CoAP service
* @retval 0 in case of success.
* @retval -EALREADY in case of an already running service.
* @retval -ENOTSUP in case the server has no valid host and port configuration.
*/
int coap_service_start(const struct coap_service *service);
/**
* @brief Stop the provided @p service .
*
* @note This function is suitable for a @p service defined with @ref COAP_SERVICE_DEFINE.
*
* @param service Pointer to CoAP service
* @retval 0 in case of success.
* @retval -EALREADY in case the service isn't running.
*/
int coap_service_stop(const struct coap_service *service);
/**
* @brief Query the provided @p service running state.
*
* @note This function is suitable for a @p service defined with @ref COAP_SERVICE_DEFINE.
*
* @param service Pointer to CoAP service
* @retval 1 if the service is running
* @retval 0 if the service is stopped
* @retval negative in case of an error.
*/
int coap_service_is_running(const struct coap_service *service);
/**
* @brief Send a CoAP message from the provided @p service .
*
* @note This function is suitable for a @p service defined with @ref COAP_SERVICE_DEFINE.
*
* @param service Pointer to CoAP service
* @param cpkt CoAP Packet to send
* @param addr Peer address
* @param addr_len Peer address length
* @param params Pointer to transmission parameters structure or NULL to use default values.
* @return 0 in case of success or negative in case of error.
*/
int coap_service_send(const struct coap_service *service, const struct coap_packet *cpkt,
const struct sockaddr *addr, socklen_t addr_len,
const struct coap_transmission_parameters *params);
/**
* @brief Send a CoAP message from the provided @p resource .
*
* @note This function is suitable for a @p resource defined with @ref COAP_RESOURCE_DEFINE.
*
* @param resource Pointer to CoAP resource
* @param cpkt CoAP Packet to send
* @param addr Peer address
* @param addr_len Peer address length
* @param params Pointer to transmission parameters structure or NULL to use default values.
* @return 0 in case of success or negative in case of error.
*/
int coap_resource_send(const struct coap_resource *resource, const struct coap_packet *cpkt,
const struct sockaddr *addr, socklen_t addr_len,
const struct coap_transmission_parameters *params);
/**
* @brief Parse a CoAP observe request for the provided @p resource .
*
* @note This function is suitable for a @p resource defined with @ref COAP_RESOURCE_DEFINE.
*
* If the observe option value is equal to 0, an observer will be added, if the value is equal
* to 1, an existing observer will be removed.
*
* @param resource Pointer to CoAP resource
* @param request CoAP request to parse
* @param addr Peer address
* @return the observe option value in case of success or negative in case of error.
*/
int coap_resource_parse_observe(struct coap_resource *resource, const struct coap_packet *request,
const struct sockaddr *addr);
/**
* @brief Lookup an observer by address and remove it from the @p resource .
*
* @note This function is suitable for a @p resource defined with @ref COAP_RESOURCE_DEFINE.
*
* @param resource Pointer to CoAP resource
* @param addr Peer address
* @return 0 in case of success or negative in case of error.
*/
int coap_resource_remove_observer_by_addr(struct coap_resource *resource,
const struct sockaddr *addr);
/**
* @brief Lookup an observer by token and remove it from the @p resource .
*
* @note This function is suitable for a @p resource defined with @ref COAP_RESOURCE_DEFINE.
*
* @param resource Pointer to CoAP resource
* @param token Pointer to the token
* @param token_len Length of valid bytes in the token
* @return 0 in case of success or negative in case of error.
*/
int coap_resource_remove_observer_by_token(struct coap_resource *resource,
const uint8_t *token, uint8_t token_len);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_COAP_SERVICE_H_ */
``` | /content/code_sandbox/include/zephyr/net/coap_service.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,573 |
```objective-c
/** @file
@brief Ethernet
This is not to be included by the application.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_ETHERNET_H_
#define ZEPHYR_INCLUDE_NET_ETHERNET_H_
#include <zephyr/kernel.h>
#include <zephyr/types.h>
#include <stdbool.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/net_pkt.h>
#include <zephyr/net/lldp.h>
#include <zephyr/sys/util.h>
#include <zephyr/net/net_if.h>
#include <zephyr/net/ethernet_vlan.h>
#include <zephyr/net/ptp_time.h>
#if defined(CONFIG_NET_DSA)
#include <zephyr/net/dsa.h>
#endif
#if defined(CONFIG_NET_ETHERNET_BRIDGE)
#include <zephyr/net/ethernet_bridge.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Ethernet support functions
* @defgroup ethernet Ethernet Support Functions
* @since 1.0
* @version 0.8.0
* @ingroup networking
* @{
*/
#define NET_ETH_ADDR_LEN 6U /**< Ethernet MAC address length */
/** Ethernet address */
struct net_eth_addr {
uint8_t addr[NET_ETH_ADDR_LEN]; /**< Buffer storing the address */
};
/** @cond INTERNAL_HIDDEN */
#define NET_ETH_HDR(pkt) ((struct net_eth_hdr *)net_pkt_data(pkt))
#define NET_ETH_PTYPE_CAN 0x000C /* CAN: Controller Area Network */
#define NET_ETH_PTYPE_CANFD 0x000D /* CANFD: CAN flexible data rate*/
#define NET_ETH_PTYPE_HDLC 0x0019 /* HDLC frames (like in PPP) */
#define NET_ETH_PTYPE_ARP 0x0806
#define NET_ETH_PTYPE_IP 0x0800
#define NET_ETH_PTYPE_TSN 0x22f0 /* TSN (IEEE 1722) packet */
#define NET_ETH_PTYPE_IPV6 0x86dd
#define NET_ETH_PTYPE_VLAN 0x8100
#define NET_ETH_PTYPE_PTP 0x88f7
#define NET_ETH_PTYPE_LLDP 0x88cc
#define NET_ETH_PTYPE_ALL 0x0003 /* from linux/if_ether.h */
#define NET_ETH_PTYPE_ECAT 0x88a4
#define NET_ETH_PTYPE_EAPOL 0x888e
#define NET_ETH_PTYPE_IEEE802154 0x00F6 /* from linux/if_ether.h: IEEE802.15.4 frame */
#if !defined(ETH_P_ALL)
#define ETH_P_ALL NET_ETH_PTYPE_ALL
#endif
#if !defined(ETH_P_IP)
#define ETH_P_IP NET_ETH_PTYPE_IP
#endif
#if !defined(ETH_P_ARP)
#define ETH_P_ARP NET_ETH_PTYPE_ARP
#endif
#if !defined(ETH_P_IPV6)
#define ETH_P_IPV6 NET_ETH_PTYPE_IPV6
#endif
#if !defined(ETH_P_8021Q)
#define ETH_P_8021Q NET_ETH_PTYPE_VLAN
#endif
#if !defined(ETH_P_TSN)
#define ETH_P_TSN NET_ETH_PTYPE_TSN
#endif
#if !defined(ETH_P_ECAT)
#define ETH_P_ECAT NET_ETH_PTYPE_ECAT
#endif
#if !defined(ETH_P_EAPOL)
#define ETH_P_EAPOL NET_ETH_PTYPE_EAPOL
#endif
#if !defined(ETH_P_IEEE802154)
#define ETH_P_IEEE802154 NET_ETH_PTYPE_IEEE802154
#endif
#if !defined(ETH_P_CAN)
#define ETH_P_CAN NET_ETH_PTYPE_CAN
#endif
#if !defined(ETH_P_CANFD)
#define ETH_P_CANFD NET_ETH_PTYPE_CANFD
#endif
#if !defined(ETH_P_HDLC)
#define ETH_P_HDLC NET_ETH_PTYPE_HDLC
#endif
/** @endcond */
#define NET_ETH_MINIMAL_FRAME_SIZE 60 /**< Minimum Ethernet frame size */
#define NET_ETH_MTU 1500 /**< Ethernet MTU size */
/** @cond INTERNAL_HIDDEN */
#if defined(CONFIG_NET_VLAN)
#define _NET_ETH_MAX_HDR_SIZE (sizeof(struct net_eth_vlan_hdr))
#else
#define _NET_ETH_MAX_HDR_SIZE (sizeof(struct net_eth_hdr))
#endif
#define _NET_ETH_MAX_FRAME_SIZE (NET_ETH_MTU + _NET_ETH_MAX_HDR_SIZE)
/*
* Extend the max frame size for DSA (KSZ8794) by one byte (to 1519) to
* store tail tag.
*/
#if defined(CONFIG_NET_DSA)
#define NET_ETH_MAX_FRAME_SIZE (_NET_ETH_MAX_FRAME_SIZE + DSA_TAG_SIZE)
#define NET_ETH_MAX_HDR_SIZE (_NET_ETH_MAX_HDR_SIZE + DSA_TAG_SIZE)
#else
#define NET_ETH_MAX_FRAME_SIZE (_NET_ETH_MAX_FRAME_SIZE)
#define NET_ETH_MAX_HDR_SIZE (_NET_ETH_MAX_HDR_SIZE)
#endif
#define NET_ETH_VLAN_HDR_SIZE 4
/** @endcond */
/** @brief Ethernet hardware capabilities */
enum ethernet_hw_caps {
/** TX Checksum offloading supported for all of IPv4, UDP, TCP */
ETHERNET_HW_TX_CHKSUM_OFFLOAD = BIT(0),
/** RX Checksum offloading supported for all of IPv4, UDP, TCP */
ETHERNET_HW_RX_CHKSUM_OFFLOAD = BIT(1),
/** VLAN supported */
ETHERNET_HW_VLAN = BIT(2),
/** Enabling/disabling auto negotiation supported */
ETHERNET_AUTO_NEGOTIATION_SET = BIT(3),
/** 10 Mbits link supported */
ETHERNET_LINK_10BASE_T = BIT(4),
/** 100 Mbits link supported */
ETHERNET_LINK_100BASE_T = BIT(5),
/** 1 Gbits link supported */
ETHERNET_LINK_1000BASE_T = BIT(6),
/** Changing duplex (half/full) supported */
ETHERNET_DUPLEX_SET = BIT(7),
/** IEEE 802.1AS (gPTP) clock supported */
ETHERNET_PTP = BIT(8),
/** IEEE 802.1Qav (credit-based shaping) supported */
ETHERNET_QAV = BIT(9),
/** Promiscuous mode supported */
ETHERNET_PROMISC_MODE = BIT(10),
/** Priority queues available */
ETHERNET_PRIORITY_QUEUES = BIT(11),
/** MAC address filtering supported */
ETHERNET_HW_FILTERING = BIT(12),
/** Link Layer Discovery Protocol supported */
ETHERNET_LLDP = BIT(13),
/** VLAN Tag stripping */
ETHERNET_HW_VLAN_TAG_STRIP = BIT(14),
/** DSA switch slave port */
ETHERNET_DSA_SLAVE_PORT = BIT(15),
/** DSA switch master port */
ETHERNET_DSA_MASTER_PORT = BIT(16),
/** IEEE 802.1Qbv (scheduled traffic) supported */
ETHERNET_QBV = BIT(17),
/** IEEE 802.1Qbu (frame preemption) supported */
ETHERNET_QBU = BIT(18),
/** TXTIME supported */
ETHERNET_TXTIME = BIT(19),
/** TX-Injection supported */
ETHERNET_TXINJECTION_MODE = BIT(20),
};
/** @cond INTERNAL_HIDDEN */
enum ethernet_config_type {
ETHERNET_CONFIG_TYPE_AUTO_NEG,
ETHERNET_CONFIG_TYPE_LINK,
ETHERNET_CONFIG_TYPE_DUPLEX,
ETHERNET_CONFIG_TYPE_MAC_ADDRESS,
ETHERNET_CONFIG_TYPE_QAV_PARAM,
ETHERNET_CONFIG_TYPE_QBV_PARAM,
ETHERNET_CONFIG_TYPE_QBU_PARAM,
ETHERNET_CONFIG_TYPE_TXTIME_PARAM,
ETHERNET_CONFIG_TYPE_PROMISC_MODE,
ETHERNET_CONFIG_TYPE_PRIORITY_QUEUES_NUM,
ETHERNET_CONFIG_TYPE_FILTER,
ETHERNET_CONFIG_TYPE_PORTS_NUM,
ETHERNET_CONFIG_TYPE_T1S_PARAM,
ETHERNET_CONFIG_TYPE_TXINJECTION_MODE,
ETHERNET_CONFIG_TYPE_RX_CHECKSUM_SUPPORT,
ETHERNET_CONFIG_TYPE_TX_CHECKSUM_SUPPORT
};
enum ethernet_qav_param_type {
ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH,
ETHERNET_QAV_PARAM_TYPE_IDLE_SLOPE,
ETHERNET_QAV_PARAM_TYPE_OPER_IDLE_SLOPE,
ETHERNET_QAV_PARAM_TYPE_TRAFFIC_CLASS,
ETHERNET_QAV_PARAM_TYPE_STATUS,
};
enum ethernet_t1s_param_type {
ETHERNET_T1S_PARAM_TYPE_PLCA_CONFIG,
};
/** @endcond */
/** Ethernet T1S specific parameters */
struct ethernet_t1s_param {
/** Type of T1S parameter */
enum ethernet_t1s_param_type type;
union {
/**
* PLCA is the Physical Layer (PHY) Collision
* Avoidance technique employed with multidrop
* 10Base-T1S standard.
*
* The PLCA parameters are described in standard [1]
* as registers in memory map 4 (MMS = 4) (point 9.6).
*
* IDVER (PLCA ID Version)
* CTRL0 (PLCA Control 0)
* CTRL1 (PLCA Control 1)
* STATUS (PLCA Status)
* TOTMR (PLCA TO Control)
* BURST (PLCA Burst Control)
*
* Those registers are implemented by each OA TC6
* compliant vendor (like for e.g. LAN865x - e.g. [2]).
*
* Documents:
* [1] - "OPEN Alliance 10BASE-T1x MAC-PHY Serial
* Interface" (ver. 1.1)
* [2] - "DS60001734C" - LAN865x data sheet
*/
struct {
/** T1S PLCA enabled */
bool enable;
/** T1S PLCA node id range: 0 to 254 */
uint8_t node_id;
/** T1S PLCA node count range: 1 to 255 */
uint8_t node_count;
/** T1S PLCA burst count range: 0x0 to 0xFF */
uint8_t burst_count;
/** T1S PLCA burst timer */
uint8_t burst_timer;
/** T1S PLCA TO value */
uint8_t to_timer;
} plca;
};
};
/** Ethernet Qav specific parameters */
struct ethernet_qav_param {
/** ID of the priority queue to use */
int queue_id;
/** Type of Qav parameter */
enum ethernet_qav_param_type type;
union {
/** True if Qav is enabled for queue */
bool enabled;
/** Delta Bandwidth (percentage of bandwidth) */
unsigned int delta_bandwidth;
/** Idle Slope (bits per second) */
unsigned int idle_slope;
/** Oper Idle Slope (bits per second) */
unsigned int oper_idle_slope;
/** Traffic class the queue is bound to */
unsigned int traffic_class;
};
};
/** @cond INTERNAL_HIDDEN */
enum ethernet_qbv_param_type {
ETHERNET_QBV_PARAM_TYPE_STATUS,
ETHERNET_QBV_PARAM_TYPE_GATE_CONTROL_LIST,
ETHERNET_QBV_PARAM_TYPE_GATE_CONTROL_LIST_LEN,
ETHERNET_QBV_PARAM_TYPE_TIME,
};
enum ethernet_qbv_state_type {
ETHERNET_QBV_STATE_TYPE_ADMIN,
ETHERNET_QBV_STATE_TYPE_OPER,
};
enum ethernet_gate_state_operation {
ETHERNET_SET_GATE_STATE,
ETHERNET_SET_AND_HOLD_MAC_STATE,
ETHERNET_SET_AND_RELEASE_MAC_STATE,
};
/** @endcond */
/** Ethernet Qbv specific parameters */
struct ethernet_qbv_param {
/** Port id */
int port_id;
/** Type of Qbv parameter */
enum ethernet_qbv_param_type type;
/** What state (Admin/Oper) parameters are these */
enum ethernet_qbv_state_type state;
union {
/** True if Qbv is enabled or not */
bool enabled;
/** Gate control information */
struct {
/** True = open, False = closed */
bool gate_status[NET_TC_TX_COUNT];
/** GateState operation */
enum ethernet_gate_state_operation operation;
/** Time interval ticks (nanoseconds) */
uint32_t time_interval;
/** Gate control list row */
uint16_t row;
} gate_control;
/** Number of entries in gate control list */
uint32_t gate_control_list_len;
/**
* The time values are set in one go when type is set to
* ETHERNET_QBV_PARAM_TYPE_TIME
*/
struct {
/** Base time */
struct net_ptp_extended_time base_time;
/** Cycle time */
struct net_ptp_time cycle_time;
/** Extension time (nanoseconds) */
uint32_t extension_time;
};
};
};
/** @cond INTERNAL_HIDDEN */
enum ethernet_qbu_param_type {
ETHERNET_QBU_PARAM_TYPE_STATUS,
ETHERNET_QBU_PARAM_TYPE_RELEASE_ADVANCE,
ETHERNET_QBU_PARAM_TYPE_HOLD_ADVANCE,
ETHERNET_QBU_PARAM_TYPE_PREEMPTION_STATUS_TABLE,
/* Some preemption settings are from Qbr spec. */
ETHERNET_QBR_PARAM_TYPE_LINK_PARTNER_STATUS,
ETHERNET_QBR_PARAM_TYPE_ADDITIONAL_FRAGMENT_SIZE,
};
enum ethernet_qbu_preempt_status {
ETHERNET_QBU_STATUS_EXPRESS,
ETHERNET_QBU_STATUS_PREEMPTABLE
} __packed;
/** @endcond */
/** Ethernet Qbu specific parameters */
struct ethernet_qbu_param {
/** Port id */
int port_id;
/** Type of Qbu parameter */
enum ethernet_qbu_param_type type;
union {
/** Hold advance (nanoseconds) */
uint32_t hold_advance;
/** Release advance (nanoseconds) */
uint32_t release_advance;
/** sequence of framePreemptionAdminStatus values */
enum ethernet_qbu_preempt_status
frame_preempt_statuses[NET_TC_TX_COUNT];
/** True if Qbu is enabled or not */
bool enabled;
/** Link partner status (from Qbr) */
bool link_partner_status;
/**
* Additional fragment size (from Qbr). The minimum non-final
* fragment size is (additional_fragment_size + 1) * 64 octets
*/
uint8_t additional_fragment_size : 2;
};
};
/** @cond INTERNAL_HIDDEN */
enum ethernet_filter_type {
ETHERNET_FILTER_TYPE_SRC_MAC_ADDRESS,
ETHERNET_FILTER_TYPE_DST_MAC_ADDRESS,
};
/** @endcond */
/** Types of Ethernet L2 */
enum ethernet_if_types {
/** IEEE 802.3 Ethernet (default) */
L2_ETH_IF_TYPE_ETHERNET,
/** IEEE 802.11 Wi-Fi*/
L2_ETH_IF_TYPE_WIFI,
} __packed;
/** Ethernet filter description */
struct ethernet_filter {
/** Type of filter */
enum ethernet_filter_type type;
/** MAC address to filter */
struct net_eth_addr mac_address;
/** Set (true) or unset (false) the filter */
bool set;
};
/** @cond INTERNAL_HIDDEN */
enum ethernet_txtime_param_type {
ETHERNET_TXTIME_PARAM_TYPE_ENABLE_QUEUES,
};
/** @endcond */
/** Ethernet TXTIME specific parameters */
struct ethernet_txtime_param {
/** Type of TXTIME parameter */
enum ethernet_txtime_param_type type;
/** Queue number for configuring TXTIME */
int queue_id;
/** Enable or disable TXTIME per queue */
bool enable_txtime;
};
/** Protocols that are supported by checksum offloading */
enum ethernet_checksum_support {
/** Device does not support any L3/L4 checksum offloading */
ETHERNET_CHECKSUM_SUPPORT_NONE = NET_IF_CHECKSUM_NONE_BIT,
/** Device supports checksum offloading for the IPv4 header */
ETHERNET_CHECKSUM_SUPPORT_IPV4_HEADER = NET_IF_CHECKSUM_IPV4_HEADER_BIT,
/** Device supports checksum offloading for ICMPv4 payload (implies IPv4 header) */
ETHERNET_CHECKSUM_SUPPORT_IPV4_ICMP = NET_IF_CHECKSUM_IPV4_ICMP_BIT,
/** Device supports checksum offloading for the IPv6 header */
ETHERNET_CHECKSUM_SUPPORT_IPV6_HEADER = NET_IF_CHECKSUM_IPV6_HEADER_BIT,
/** Device supports checksum offloading for ICMPv6 payload (implies IPv6 header) */
ETHERNET_CHECKSUM_SUPPORT_IPV6_ICMP = NET_IF_CHECKSUM_IPV6_ICMP_BIT,
/** Device supports TCP checksum offloading for all supported IP protocols */
ETHERNET_CHECKSUM_SUPPORT_TCP = NET_IF_CHECKSUM_TCP_BIT,
/** Device supports UDP checksum offloading for all supported IP protocols */
ETHERNET_CHECKSUM_SUPPORT_UDP = NET_IF_CHECKSUM_UDP_BIT,
};
/** @cond INTERNAL_HIDDEN */
struct ethernet_config {
union {
bool auto_negotiation;
bool full_duplex;
bool promisc_mode;
bool txinjection_mode;
struct {
bool link_10bt;
bool link_100bt;
bool link_1000bt;
} l;
struct net_eth_addr mac_address;
struct ethernet_t1s_param t1s_param;
struct ethernet_qav_param qav_param;
struct ethernet_qbv_param qbv_param;
struct ethernet_qbu_param qbu_param;
struct ethernet_txtime_param txtime_param;
int priority_queues_num;
int ports_num;
enum ethernet_checksum_support chksum_support;
struct ethernet_filter filter;
};
};
/** @endcond */
/** Ethernet L2 API operations. */
struct ethernet_api {
/**
* The net_if_api must be placed in first position in this
* struct so that we are compatible with network interface API.
*/
struct net_if_api iface_api;
/** Collect optional ethernet specific statistics. This pointer
* should be set by driver if statistics needs to be collected
* for that driver.
*/
#if defined(CONFIG_NET_STATISTICS_ETHERNET)
struct net_stats_eth *(*get_stats)(const struct device *dev);
#endif
/** Start the device */
int (*start)(const struct device *dev);
/** Stop the device */
int (*stop)(const struct device *dev);
/** Get the device capabilities */
enum ethernet_hw_caps (*get_capabilities)(const struct device *dev);
/** Set specific hardware configuration */
int (*set_config)(const struct device *dev,
enum ethernet_config_type type,
const struct ethernet_config *config);
/** Get hardware specific configuration */
int (*get_config)(const struct device *dev,
enum ethernet_config_type type,
struct ethernet_config *config);
/** The IP stack will call this function when a VLAN tag is enabled
* or disabled. If enable is set to true, then the VLAN tag was added,
* if it is false then the tag was removed. The driver can utilize
* this information if needed.
*/
#if defined(CONFIG_NET_VLAN)
int (*vlan_setup)(const struct device *dev, struct net_if *iface,
uint16_t tag, bool enable);
#endif /* CONFIG_NET_VLAN */
/** Return ptp_clock device that is tied to this ethernet device */
#if defined(CONFIG_PTP_CLOCK)
const struct device *(*get_ptp_clock)(const struct device *dev);
#endif /* CONFIG_PTP_CLOCK */
/** Return PHY device that is tied to this ethernet device */
const struct device *(*get_phy)(const struct device *dev);
/** Send a network packet */
int (*send)(const struct device *dev, struct net_pkt *pkt);
};
/** @cond INTERNAL_HIDDEN */
/* Make sure that the network interface API is properly setup inside
* Ethernet API struct (it is the first one).
*/
BUILD_ASSERT(offsetof(struct ethernet_api, iface_api) == 0);
struct net_eth_hdr {
struct net_eth_addr dst;
struct net_eth_addr src;
uint16_t type;
} __packed;
struct ethernet_vlan {
/** Network interface that has VLAN enabled */
struct net_if *iface;
/** VLAN tag */
uint16_t tag;
};
#if defined(CONFIG_NET_VLAN_COUNT)
#define NET_VLAN_MAX_COUNT CONFIG_NET_VLAN_COUNT
#else
/* Even thou there are no VLAN support, the minimum count must be set to 1.
*/
#define NET_VLAN_MAX_COUNT 1
#endif
/** @endcond */
/** Ethernet LLDP specific parameters */
struct ethernet_lldp {
/** Used for track timers */
sys_snode_t node;
/** LLDP Data Unit mandatory TLVs for the interface. */
const struct net_lldpdu *lldpdu;
/** LLDP Data Unit optional TLVs for the interface */
const uint8_t *optional_du;
/** Length of the optional Data Unit TLVs */
size_t optional_len;
/** Network interface that has LLDP supported. */
struct net_if *iface;
/** LLDP TX timer start time */
int64_t tx_timer_start;
/** LLDP TX timeout */
uint32_t tx_timer_timeout;
/** LLDP RX callback function */
net_lldp_recv_cb_t cb;
};
/** @cond INTERNAL_HIDDEN */
enum ethernet_flags {
ETH_CARRIER_UP,
};
/** Ethernet L2 context that is needed for VLAN */
struct ethernet_context {
/** Flags representing ethernet state, which are accessed from multiple
* threads.
*/
atomic_t flags;
#if defined(CONFIG_NET_ETHERNET_BRIDGE)
struct eth_bridge_iface_context bridge;
#endif
/** Carrier ON/OFF handler worker. This is used to create
* network interface UP/DOWN event when ethernet L2 driver
* notices carrier ON/OFF situation. We must not create another
* network management event from inside management handler thus
* we use worker thread to trigger the UP/DOWN event.
*/
struct k_work carrier_work;
/** Network interface. */
struct net_if *iface;
#if defined(CONFIG_NET_LLDP)
struct ethernet_lldp lldp[NET_VLAN_MAX_COUNT];
#endif
/**
* This tells what L2 features does ethernet support.
*/
enum net_l2_flags ethernet_l2_flags;
#if defined(CONFIG_NET_L2_PTP)
/** The PTP port number for this network device. We need to store the
* port number here so that we do not need to fetch it for every
* incoming PTP packet.
*/
int port;
#endif
#if defined(CONFIG_NET_DSA)
/** DSA RX callback function - for custom processing - like e.g.
* redirecting packets when MAC address is caught
*/
dsa_net_recv_cb_t dsa_recv_cb;
/** Switch physical port number */
uint8_t dsa_port_idx;
/** DSA context pointer */
struct dsa_context *dsa_ctx;
/** Send a network packet via DSA master port */
dsa_send_t dsa_send;
#endif
/** Is network carrier up */
bool is_net_carrier_up : 1;
/** Is this context already initialized */
bool is_init : 1;
/** Types of Ethernet network interfaces */
enum ethernet_if_types eth_if_type;
};
/**
* @brief Initialize Ethernet L2 stack for a given interface
*
* @param iface A valid pointer to a network interface
*/
void ethernet_init(struct net_if *iface);
#define ETHERNET_L2_CTX_TYPE struct ethernet_context
/* Separate header for VLAN as some of device interfaces might not
* support VLAN.
*/
struct net_eth_vlan_hdr {
struct net_eth_addr dst;
struct net_eth_addr src;
struct {
uint16_t tpid; /* tag protocol id */
uint16_t tci; /* tag control info */
} vlan;
uint16_t type;
} __packed;
/** @endcond */
/**
* @brief Check if the Ethernet MAC address is a broadcast address.
*
* @param addr A valid pointer to a Ethernet MAC address.
*
* @return true if address is a broadcast address, false if not
*/
static inline bool net_eth_is_addr_broadcast(struct net_eth_addr *addr)
{
if (addr->addr[0] == 0xff &&
addr->addr[1] == 0xff &&
addr->addr[2] == 0xff &&
addr->addr[3] == 0xff &&
addr->addr[4] == 0xff &&
addr->addr[5] == 0xff) {
return true;
}
return false;
}
/**
* @brief Check if the Ethernet MAC address is a all zeroes address.
*
* @param addr A valid pointer to an Ethernet MAC address.
*
* @return true if address is an all zeroes address, false if not
*/
static inline bool net_eth_is_addr_all_zeroes(struct net_eth_addr *addr)
{
if (addr->addr[0] == 0x00 &&
addr->addr[1] == 0x00 &&
addr->addr[2] == 0x00 &&
addr->addr[3] == 0x00 &&
addr->addr[4] == 0x00 &&
addr->addr[5] == 0x00) {
return true;
}
return false;
}
/**
* @brief Check if the Ethernet MAC address is unspecified.
*
* @param addr A valid pointer to a Ethernet MAC address.
*
* @return true if address is unspecified, false if not
*/
static inline bool net_eth_is_addr_unspecified(struct net_eth_addr *addr)
{
if (addr->addr[0] == 0x00 &&
addr->addr[1] == 0x00 &&
addr->addr[2] == 0x00 &&
addr->addr[3] == 0x00 &&
addr->addr[4] == 0x00 &&
addr->addr[5] == 0x00) {
return true;
}
return false;
}
/**
* @brief Check if the Ethernet MAC address is a multicast address.
*
* @param addr A valid pointer to a Ethernet MAC address.
*
* @return true if address is a multicast address, false if not
*/
static inline bool net_eth_is_addr_multicast(struct net_eth_addr *addr)
{
#if defined(CONFIG_NET_IPV6)
if (addr->addr[0] == 0x33 &&
addr->addr[1] == 0x33) {
return true;
}
#endif
#if defined(CONFIG_NET_IPV4)
if (addr->addr[0] == 0x01 &&
addr->addr[1] == 0x00 &&
addr->addr[2] == 0x5e) {
return true;
}
#endif
return false;
}
/**
* @brief Check if the Ethernet MAC address is a group address.
*
* @param addr A valid pointer to a Ethernet MAC address.
*
* @return true if address is a group address, false if not
*/
static inline bool net_eth_is_addr_group(struct net_eth_addr *addr)
{
return addr->addr[0] & 0x01;
}
/**
* @brief Check if the Ethernet MAC address is valid.
*
* @param addr A valid pointer to a Ethernet MAC address.
*
* @return true if address is valid, false if not
*/
static inline bool net_eth_is_addr_valid(struct net_eth_addr *addr)
{
return !net_eth_is_addr_unspecified(addr) && !net_eth_is_addr_group(addr);
}
/**
* @brief Check if the Ethernet MAC address is a LLDP multicast address.
*
* @param addr A valid pointer to a Ethernet MAC address.
*
* @return true if address is a LLDP multicast address, false if not
*/
static inline bool net_eth_is_addr_lldp_multicast(struct net_eth_addr *addr)
{
#if defined(CONFIG_NET_GPTP) || defined(CONFIG_NET_LLDP)
if (addr->addr[0] == 0x01 &&
addr->addr[1] == 0x80 &&
addr->addr[2] == 0xc2 &&
addr->addr[3] == 0x00 &&
addr->addr[4] == 0x00 &&
addr->addr[5] == 0x0e) {
return true;
}
#else
ARG_UNUSED(addr);
#endif
return false;
}
/**
* @brief Check if the Ethernet MAC address is a PTP multicast address.
*
* @param addr A valid pointer to a Ethernet MAC address.
*
* @return true if address is a PTP multicast address, false if not
*/
static inline bool net_eth_is_addr_ptp_multicast(struct net_eth_addr *addr)
{
#if defined(CONFIG_NET_GPTP)
if (addr->addr[0] == 0x01 &&
addr->addr[1] == 0x1b &&
addr->addr[2] == 0x19 &&
addr->addr[3] == 0x00 &&
addr->addr[4] == 0x00 &&
addr->addr[5] == 0x00) {
return true;
}
#else
ARG_UNUSED(addr);
#endif
return false;
}
/**
* @brief Return Ethernet broadcast address.
*
* @return Ethernet broadcast address.
*/
const struct net_eth_addr *net_eth_broadcast_addr(void);
/**
* @brief Convert IPv4 multicast address to Ethernet address.
*
* @param ipv4_addr IPv4 multicast address
* @param mac_addr Output buffer for Ethernet address
*/
void net_eth_ipv4_mcast_to_mac_addr(const struct in_addr *ipv4_addr,
struct net_eth_addr *mac_addr);
/**
* @brief Convert IPv6 multicast address to Ethernet address.
*
* @param ipv6_addr IPv6 multicast address
* @param mac_addr Output buffer for Ethernet address
*/
void net_eth_ipv6_mcast_to_mac_addr(const struct in6_addr *ipv6_addr,
struct net_eth_addr *mac_addr);
/**
* @brief Return ethernet device hardware capability information.
*
* @param iface Network interface
*
* @return Hardware capabilities
*/
static inline
enum ethernet_hw_caps net_eth_get_hw_capabilities(struct net_if *iface)
{
const struct ethernet_api *eth =
(struct ethernet_api *)net_if_get_device(iface)->api;
if (!eth->get_capabilities) {
return (enum ethernet_hw_caps)0;
}
return eth->get_capabilities(net_if_get_device(iface));
}
/**
* @brief Return ethernet device hardware configuration information.
*
* @param iface Network interface
* @param type configuration type
* @param config Ethernet configuration
*
* @return 0 if ok, <0 if error
*/
static inline
int net_eth_get_hw_config(struct net_if *iface, enum ethernet_config_type type,
struct ethernet_config *config)
{
const struct ethernet_api *eth =
(struct ethernet_api *)net_if_get_device(iface)->api;
if (!eth->get_config) {
return -ENOTSUP;
}
return eth->get_config(net_if_get_device(iface), type, config);
}
/**
* @brief Add VLAN tag to the interface.
*
* @param iface Interface to use.
* @param tag VLAN tag to add
*
* @return 0 if ok, <0 if error
*/
#if defined(CONFIG_NET_VLAN)
int net_eth_vlan_enable(struct net_if *iface, uint16_t tag);
#else
static inline int net_eth_vlan_enable(struct net_if *iface, uint16_t tag)
{
ARG_UNUSED(iface);
ARG_UNUSED(tag);
return -EINVAL;
}
#endif
/**
* @brief Remove VLAN tag from the interface.
*
* @param iface Interface to use.
* @param tag VLAN tag to remove
*
* @return 0 if ok, <0 if error
*/
#if defined(CONFIG_NET_VLAN)
int net_eth_vlan_disable(struct net_if *iface, uint16_t tag);
#else
static inline int net_eth_vlan_disable(struct net_if *iface, uint16_t tag)
{
ARG_UNUSED(iface);
ARG_UNUSED(tag);
return -EINVAL;
}
#endif
/**
* @brief Return VLAN tag specified to network interface.
*
* Note that the interface parameter must be the VLAN interface,
* and not the Ethernet one.
*
* @param iface VLAN network interface.
*
* @return VLAN tag for this interface or NET_VLAN_TAG_UNSPEC if VLAN
* is not configured for that interface.
*/
#if defined(CONFIG_NET_VLAN)
uint16_t net_eth_get_vlan_tag(struct net_if *iface);
#else
static inline uint16_t net_eth_get_vlan_tag(struct net_if *iface)
{
ARG_UNUSED(iface);
return NET_VLAN_TAG_UNSPEC;
}
#endif
/**
* @brief Return network interface related to this VLAN tag
*
* @param iface Main network interface (not the VLAN one).
* @param tag VLAN tag
*
* @return Network interface related to this tag or NULL if no such interface
* exists.
*/
#if defined(CONFIG_NET_VLAN)
struct net_if *net_eth_get_vlan_iface(struct net_if *iface, uint16_t tag);
#else
static inline
struct net_if *net_eth_get_vlan_iface(struct net_if *iface, uint16_t tag)
{
ARG_UNUSED(iface);
ARG_UNUSED(tag);
return NULL;
}
#endif
/**
* @brief Return main network interface that is attached to this VLAN tag.
*
* @param iface VLAN network interface. This is used to get the
* pointer to ethernet L2 context
*
* @return Network interface related to this tag or NULL if no such interface
* exists.
*/
#if defined(CONFIG_NET_VLAN)
struct net_if *net_eth_get_vlan_main(struct net_if *iface);
#else
static inline
struct net_if *net_eth_get_vlan_main(struct net_if *iface)
{
ARG_UNUSED(iface);
return NULL;
}
#endif
/**
* @brief Check if there are any VLAN interfaces enabled to this specific
* Ethernet network interface.
*
* Note that the iface must be the actual Ethernet interface and not the
* virtual VLAN interface.
*
* @param ctx Ethernet context
* @param iface Ethernet network interface
*
* @return True if there are enabled VLANs for this network interface,
* false if not.
*/
#if defined(CONFIG_NET_VLAN)
bool net_eth_is_vlan_enabled(struct ethernet_context *ctx,
struct net_if *iface);
#else
static inline bool net_eth_is_vlan_enabled(struct ethernet_context *ctx,
struct net_if *iface)
{
ARG_UNUSED(ctx);
ARG_UNUSED(iface);
return false;
}
#endif
/**
* @brief Get VLAN status for a given network interface (enabled or not).
*
* @param iface Network interface
*
* @return True if VLAN is enabled for this network interface, false if not.
*/
#if defined(CONFIG_NET_VLAN)
bool net_eth_get_vlan_status(struct net_if *iface);
#else
static inline bool net_eth_get_vlan_status(struct net_if *iface)
{
ARG_UNUSED(iface);
return false;
}
#endif
/**
* @brief Check if the given interface is a VLAN interface.
*
* @param iface Network interface
*
* @return True if this network interface is VLAN one, false if not.
*/
#if defined(CONFIG_NET_VLAN)
bool net_eth_is_vlan_interface(struct net_if *iface);
#else
static inline bool net_eth_is_vlan_interface(struct net_if *iface)
{
ARG_UNUSED(iface);
return false;
}
#endif
/** @cond INTERNAL_HIDDEN */
#if !defined(CONFIG_ETH_DRIVER_RAW_MODE)
#define Z_ETH_NET_DEVICE_INIT_INSTANCE(node_id, dev_id, name, instance, \
init_fn, pm, data, config, prio, \
api, mtu) \
Z_NET_DEVICE_INIT_INSTANCE(node_id, dev_id, name, instance, \
init_fn, pm, data, config, prio, \
api, ETHERNET_L2, \
NET_L2_GET_CTX_TYPE(ETHERNET_L2), mtu)
#else /* CONFIG_ETH_DRIVER_RAW_MODE */
#define Z_ETH_NET_DEVICE_INIT_INSTANCE(node_id, dev_id, name, instance, \
init_fn, pm, data, config, prio, \
api, mtu) \
Z_DEVICE_STATE_DEFINE(dev_id); \
Z_DEVICE_DEFINE(node_id, dev_id, name, init_fn, pm, data, \
config, POST_KERNEL, prio, api, \
&Z_DEVICE_STATE_NAME(dev_id));
#endif /* CONFIG_ETH_DRIVER_RAW_MODE */
#define Z_ETH_NET_DEVICE_INIT(node_id, dev_id, name, init_fn, pm, data, \
config, prio, api, mtu) \
Z_ETH_NET_DEVICE_INIT_INSTANCE(node_id, dev_id, name, 0, \
init_fn, pm, data, config, prio, \
api, mtu)
/** @endcond */
/**
* @brief Create an Ethernet network interface and bind it to network device.
*
* @param dev_id Network device id.
* @param name The name this instance of the driver exposes to
* the system.
* @param init_fn Address to the init function of the driver.
* @param pm Reference to struct pm_device associated with the device.
* (optional).
* @param data Pointer to the device's private data.
* @param config The address to the structure containing the
* configuration information for this instance of the driver.
* @param prio The initialization level at which configuration occurs.
* @param api Provides an initial pointer to the API function struct
* used by the driver. Can be NULL.
* @param mtu Maximum transfer unit in bytes for this network interface.
*/
#define ETH_NET_DEVICE_INIT(dev_id, name, init_fn, pm, data, config, \
prio, api, mtu) \
Z_ETH_NET_DEVICE_INIT(DT_INVALID_NODE, dev_id, name, init_fn, \
pm, data, config, prio, api, mtu)
/**
* @brief Create multiple Ethernet network interfaces and bind them to network
* devices.
* If your network device needs more than one instance of a network interface,
* use this macro below and provide a different instance suffix each time
* (0, 1, 2, ... or a, b, c ... whatever works for you)
*
* @param dev_id Network device id.
* @param name The name this instance of the driver exposes to
* the system.
* @param instance Instance identifier.
* @param init_fn Address to the init function of the driver.
* @param pm Reference to struct pm_device associated with the device.
* (optional).
* @param data Pointer to the device's private data.
* @param config The address to the structure containing the
* configuration information for this instance of the driver.
* @param prio The initialization level at which configuration occurs.
* @param api Provides an initial pointer to the API function struct
* used by the driver. Can be NULL.
* @param mtu Maximum transfer unit in bytes for this network interface.
*/
#define ETH_NET_DEVICE_INIT_INSTANCE(dev_id, name, instance, init_fn, \
pm, data, config, prio, api, mtu) \
Z_ETH_NET_DEVICE_INIT_INSTANCE(DT_INVALID_NODE, dev_id, name, \
instance, init_fn, pm, data, \
config, prio, api, mtu)
/**
* @brief Like ETH_NET_DEVICE_INIT but taking metadata from a devicetree.
* Create an Ethernet network interface and bind it to network device.
*
* @param node_id The devicetree node identifier.
* @param init_fn Address to the init function of the driver.
* @param pm Reference to struct pm_device associated with the device.
* (optional).
* @param data Pointer to the device's private data.
* @param config The address to the structure containing the
* configuration information for this instance of the driver.
* @param prio The initialization level at which configuration occurs.
* @param api Provides an initial pointer to the API function struct
* used by the driver. Can be NULL.
* @param mtu Maximum transfer unit in bytes for this network interface.
*/
#define ETH_NET_DEVICE_DT_DEFINE(node_id, init_fn, pm, data, config, \
prio, api, mtu) \
Z_ETH_NET_DEVICE_INIT(node_id, Z_DEVICE_DT_DEV_ID(node_id), \
DEVICE_DT_NAME(node_id), init_fn, pm, \
data, config, prio, api, mtu)
/**
* @brief Like ETH_NET_DEVICE_DT_DEFINE for an instance of a DT_DRV_COMPAT
* compatible
*
* @param inst instance number. This is replaced by
* <tt>DT_DRV_COMPAT(inst)</tt> in the call to ETH_NET_DEVICE_DT_DEFINE.
*
* @param ... other parameters as expected by ETH_NET_DEVICE_DT_DEFINE.
*/
#define ETH_NET_DEVICE_DT_INST_DEFINE(inst, ...) \
ETH_NET_DEVICE_DT_DEFINE(DT_DRV_INST(inst), __VA_ARGS__)
/**
* @brief Inform ethernet L2 driver that ethernet carrier is detected.
* This happens when cable is connected.
*
* @param iface Network interface
*/
void net_eth_carrier_on(struct net_if *iface);
/**
* @brief Inform ethernet L2 driver that ethernet carrier was lost.
* This happens when cable is disconnected.
*
* @param iface Network interface
*/
void net_eth_carrier_off(struct net_if *iface);
/**
* @brief Set promiscuous mode either ON or OFF.
*
* @param iface Network interface
*
* @param enable on (true) or off (false)
*
* @return 0 if mode set or unset was successful, <0 otherwise.
*/
int net_eth_promisc_mode(struct net_if *iface, bool enable);
/**
* @brief Set TX-Injection mode either ON or OFF.
*
* @param iface Network interface
*
* @param enable on (true) or off (false)
*
* @return 0 if mode set or unset was successful, <0 otherwise.
*/
int net_eth_txinjection_mode(struct net_if *iface, bool enable);
/**
* @brief Set or unset HW filtering for MAC address @p mac.
*
* @param iface Network interface
* @param mac Pointer to an ethernet MAC address
* @param type Filter type, either source or destination
* @param enable Set (true) or unset (false)
*
* @return 0 if filter set or unset was successful, <0 otherwise.
*/
int net_eth_mac_filter(struct net_if *iface, struct net_eth_addr *mac,
enum ethernet_filter_type type, bool enable);
/**
* @brief Return the PHY device that is tied to this ethernet network interface.
*
* @param iface Network interface
*
* @return Pointer to PHY device if found, NULL if not found.
*/
const struct device *net_eth_get_phy(struct net_if *iface);
/**
* @brief Return PTP clock that is tied to this ethernet network interface.
*
* @param iface Network interface
*
* @return Pointer to PTP clock if found, NULL if not found or if this
* ethernet interface does not support PTP.
*/
#if defined(CONFIG_PTP_CLOCK)
const struct device *net_eth_get_ptp_clock(struct net_if *iface);
#else
static inline const struct device *net_eth_get_ptp_clock(struct net_if *iface)
{
ARG_UNUSED(iface);
return NULL;
}
#endif
/**
* @brief Return PTP clock that is tied to this ethernet network interface
* index.
*
* @param index Network interface index
*
* @return Pointer to PTP clock if found, NULL if not found or if this
* ethernet interface index does not support PTP.
*/
__syscall const struct device *net_eth_get_ptp_clock_by_index(int index);
/**
* @brief Return PTP port number attached to this interface.
*
* @param iface Network interface
*
* @return Port number, no such port if < 0
*/
#if defined(CONFIG_NET_L2_PTP)
int net_eth_get_ptp_port(struct net_if *iface);
#else
static inline int net_eth_get_ptp_port(struct net_if *iface)
{
ARG_UNUSED(iface);
return -ENODEV;
}
#endif /* CONFIG_NET_L2_PTP */
/**
* @brief Set PTP port number attached to this interface.
*
* @param iface Network interface
* @param port Port number to set
*/
#if defined(CONFIG_NET_L2_PTP)
void net_eth_set_ptp_port(struct net_if *iface, int port);
#else
static inline void net_eth_set_ptp_port(struct net_if *iface, int port)
{
ARG_UNUSED(iface);
ARG_UNUSED(port);
}
#endif /* CONFIG_NET_L2_PTP */
/**
* @brief Check if the Ethernet L2 network interface can perform Wi-Fi.
*
* @param iface Pointer to network interface
*
* @return True if interface supports Wi-Fi, False otherwise.
*/
static inline bool net_eth_type_is_wifi(struct net_if *iface)
{
const struct ethernet_context *ctx = (struct ethernet_context *)
net_if_l2_data(iface);
return ctx->eth_if_type == L2_ETH_IF_TYPE_WIFI;
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#include <zephyr/syscalls/ethernet.h>
#endif /* ZEPHYR_INCLUDE_NET_ETHERNET_H_ */
``` | /content/code_sandbox/include/zephyr/net/ethernet.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 9,552 |
```objective-c
/** @file
@brief LLDP definitions and handler
This is not to be included by the application.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_LLDP_H_
#define ZEPHYR_INCLUDE_NET_LLDP_H_
/**
* @brief LLDP definitions and helpers
* @defgroup lldp Link Layer Discovery Protocol definitions and helpers
* @since 1.13
* @version 0.8.0
* @ingroup networking
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/** @cond INTERNAL_HIDDEN */
#define LLDP_TLV_GET_LENGTH(type_length) (type_length & BIT_MASK(9))
#define LLDP_TLV_GET_TYPE(type_length) ((uint8_t)(type_length >> 9))
/* LLDP Definitions */
/* According to the spec, End of LLDPDU TLV value is constant. */
#define NET_LLDP_END_LLDPDU_VALUE 0x0000
/*
* For the Chassis ID TLV Value, if subtype is a MAC address then we must
* use values from CONFIG_NET_LLDP_CHASSIS_ID_MAC0 through
* CONFIG_NET_LLDP_CHASSIS_ID_MAC5. If not, we use CONFIG_NET_LLDP_CHASSIS_ID.
*
* FIXME: implement a similar scheme for subtype 5 (network address).
*/
#if defined(CONFIG_NET_LLDP_CHASSIS_ID_SUBTYPE)
#if (CONFIG_NET_LLDP_CHASSIS_ID_SUBTYPE == 4)
#define NET_LLDP_CHASSIS_ID_VALUE \
{ \
CONFIG_NET_LLDP_CHASSIS_ID_MAC0, \
CONFIG_NET_LLDP_CHASSIS_ID_MAC1, \
CONFIG_NET_LLDP_CHASSIS_ID_MAC2, \
CONFIG_NET_LLDP_CHASSIS_ID_MAC3, \
CONFIG_NET_LLDP_CHASSIS_ID_MAC4, \
CONFIG_NET_LLDP_CHASSIS_ID_MAC5 \
}
#define NET_LLDP_CHASSIS_ID_VALUE_LEN (6)
#else
#define NET_LLDP_CHASSIS_ID_VALUE CONFIG_NET_LLDP_CHASSIS_ID
#define NET_LLDP_CHASSIS_ID_VALUE_LEN (sizeof(CONFIG_NET_LLDP_CHASSIS_ID) - 1)
#endif
#else
#define NET_LLDP_CHASSIS_ID_VALUE 0
#define NET_LLDP_CHASSIS_ID_VALUE_LEN 0
#endif
/*
* For the Port ID TLV Value, if subtype is a MAC address then we must
* use values from CONFIG_NET_LLDP_PORT_ID_MAC0 through
* CONFIG_NET_LLDP_PORT_ID_MAC5. If not, we use CONFIG_NET_LLDP_PORT_ID.
*
* FIXME: implement a similar scheme for subtype 4 (network address).
*/
#if defined(CONFIG_NET_LLDP_PORT_ID_SUBTYPE)
#if (CONFIG_NET_LLDP_PORT_ID_SUBTYPE == 3)
#define NET_LLDP_PORT_ID_VALUE \
{ \
CONFIG_NET_LLDP_PORT_ID_MAC0, \
CONFIG_NET_LLDP_PORT_ID_MAC1, \
CONFIG_NET_LLDP_PORT_ID_MAC2, \
CONFIG_NET_LLDP_PORT_ID_MAC3, \
CONFIG_NET_LLDP_PORT_ID_MAC4, \
CONFIG_NET_LLDP_PORT_ID_MAC5 \
}
#define NET_LLDP_PORT_ID_VALUE_LEN (6)
#else
#define NET_LLDP_PORT_ID_VALUE CONFIG_NET_LLDP_PORT_ID
#define NET_LLDP_PORT_ID_VALUE_LEN (sizeof(CONFIG_NET_LLDP_PORT_ID) - 1)
#endif
#else
#define NET_LLDP_PORT_ID_VALUE 0
#define NET_LLDP_PORT_ID_VALUE_LEN 0
#endif
/*
* TLVs Length.
* Note that TLVs that have a subtype must have a byte added to their length.
*/
#define NET_LLDP_CHASSIS_ID_TLV_LEN (NET_LLDP_CHASSIS_ID_VALUE_LEN + 1)
#define NET_LLDP_PORT_ID_TLV_LEN (NET_LLDP_PORT_ID_VALUE_LEN + 1)
#define NET_LLDP_TTL_TLV_LEN (2)
/*
* Time to Live value.
* Calculate based on section 9.2.5.22 from LLDP spec.
*
* FIXME: when the network interface is about to be disabled TTL shall be set
* to zero so LLDP Rx agents can invalidate the entry related to this node.
*/
#if defined(CONFIG_NET_LLDP_TX_INTERVAL) && defined(CONFIG_NET_LLDP_TX_HOLD)
#define NET_LLDP_TTL \
MIN((CONFIG_NET_LLDP_TX_INTERVAL * CONFIG_NET_LLDP_TX_HOLD) + 1, 65535)
#endif
struct net_if;
/** @endcond */
/** TLV Types. Please refer to table 8-1 from IEEE 802.1AB standard. */
enum net_lldp_tlv_type {
LLDP_TLV_END_LLDPDU = 0, /**< End Of LLDPDU (optional) */
LLDP_TLV_CHASSIS_ID = 1, /**< Chassis ID (mandatory) */
LLDP_TLV_PORT_ID = 2, /**< Port ID (mandatory) */
LLDP_TLV_TTL = 3, /**< Time To Live (mandatory) */
LLDP_TLV_PORT_DESC = 4, /**< Port Description (optional) */
LLDP_TLV_SYSTEM_NAME = 5, /**< System Name (optional) */
LLDP_TLV_SYSTEM_DESC = 6, /**< System Description (optional) */
LLDP_TLV_SYSTEM_CAPABILITIES = 7, /**< System Capability (optional) */
LLDP_TLV_MANAGEMENT_ADDR = 8, /**< Management Address (optional) */
/* Types 9 - 126 are reserved. */
LLDP_TLV_ORG_SPECIFIC = 127, /**< Org specific TLVs (optional) */
};
/** Chassis ID TLV, see chapter 8.5.2 in IEEE 802.1AB */
struct net_lldp_chassis_tlv {
/** 7 bits for type, 9 bits for length */
uint16_t type_length;
/** ID subtype */
uint8_t subtype;
/** Chassis ID value */
uint8_t value[NET_LLDP_CHASSIS_ID_VALUE_LEN];
} __packed;
/** Port ID TLV, see chapter 8.5.3 in IEEE 802.1AB */
struct net_lldp_port_tlv {
/** 7 bits for type, 9 bits for length */
uint16_t type_length;
/** ID subtype */
uint8_t subtype;
/** Port ID value */
uint8_t value[NET_LLDP_PORT_ID_VALUE_LEN];
} __packed;
/** Time To Live TLV, see chapter 8.5.4 in IEEE 802.1AB */
struct net_lldp_time_to_live_tlv {
/** 7 bits for type, 9 bits for length */
uint16_t type_length;
/** Time To Live (TTL) value */
uint16_t ttl;
} __packed;
/**
* LLDP Data Unit (LLDPDU) shall contain the following ordered TLVs
* as stated in "8.2 LLDPDU format" from the IEEE 802.1AB
*/
struct net_lldpdu {
struct net_lldp_chassis_tlv chassis_id; /**< Mandatory Chassis TLV */
struct net_lldp_port_tlv port_id; /**< Mandatory Port TLV */
struct net_lldp_time_to_live_tlv ttl; /**< Mandatory TTL TLV */
} __packed;
/**
* @brief Set the LLDP data unit for a network interface.
*
* @param iface Network interface
* @param lldpdu LLDP data unit struct
*
* @return 0 if ok, <0 if error
*/
int net_lldp_config(struct net_if *iface, const struct net_lldpdu *lldpdu);
/**
* @brief Set the Optional LLDP TLVs for a network interface.
*
* @param iface Network interface
* @param tlv LLDP optional TLVs following mandatory part
* @param len Length of the optional TLVs
*
* @return 0 if ok, <0 if error
*/
int net_lldp_config_optional(struct net_if *iface, const uint8_t *tlv,
size_t len);
/**
* @brief Initialize LLDP engine.
*/
void net_lldp_init(void);
/**
* @brief LLDP Receive packet callback
*
* Callback gets called upon receiving packet. It is responsible for
* freeing packet or indicating to the stack that it needs to free packet
* by returning correct net_verdict.
*
* Returns:
* - NET_DROP, if packet was invalid, rejected or we want the stack to free it.
* In this case the core stack will free the packet.
* - NET_OK, if the packet was accepted, in this case the ownership of the
* net_pkt goes to callback and core network stack will forget it.
*/
typedef enum net_verdict (*net_lldp_recv_cb_t)(struct net_if *iface,
struct net_pkt *pkt);
/**
* @brief Register LLDP Rx callback function
*
* @param iface Network interface
* @param cb Callback function
*
* @return 0 if ok, < 0 if error
*/
int net_lldp_register_callback(struct net_if *iface, net_lldp_recv_cb_t cb);
/**
* @brief Parse LLDP packet
*
* @param iface Network interface
* @param pkt Network packet
*
* @return Return the policy for network buffer
*/
enum net_verdict net_lldp_recv(struct net_if *iface, struct net_pkt *pkt);
/**
* @brief Set LLDP protocol data unit (LLDPDU) for the network interface.
*
* @param iface Network interface
*
* @return <0 if error, index in lldp array if iface is found there
*/
#if defined(CONFIG_NET_LLDP)
int net_lldp_set_lldpdu(struct net_if *iface);
#else
#define net_lldp_set_lldpdu(iface)
#endif
/**
* @brief Unset LLDP protocol data unit (LLDPDU) for the network interface.
*
* @param iface Network interface
*/
#if defined(CONFIG_NET_LLDP)
void net_lldp_unset_lldpdu(struct net_if *iface);
#else
#define net_lldp_unset_lldpdu(iface)
#endif
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_LLDP_H_ */
``` | /content/code_sandbox/include/zephyr/net/lldp.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,200 |
```objective-c
/** @file
* @brief HTTP request methods
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_HTTP_METHOD_H_
#define ZEPHYR_INCLUDE_NET_HTTP_METHOD_H_
/**
* @brief HTTP request methods
* @defgroup http_methods HTTP request methods
* @since 3.3
* @version 0.8.0
* @ingroup networking
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/** @brief HTTP Request Methods */
enum http_method {
HTTP_DELETE = 0, /**< DELETE */
HTTP_GET = 1, /**< GET */
HTTP_HEAD = 2, /**< HEAD */
HTTP_POST = 3, /**< POST */
HTTP_PUT = 4, /**< PUT */
HTTP_CONNECT = 5, /**< CONNECT */
HTTP_OPTIONS = 6, /**< OPTIONS */
HTTP_TRACE = 7, /**< TRACE */
HTTP_COPY = 8, /**< COPY */
HTTP_LOCK = 9, /**< LOCK */
HTTP_MKCOL = 10, /**< MKCOL */
HTTP_MOVE = 11, /**< MOVE */
HTTP_PROPFIND = 12, /**< PROPFIND */
HTTP_PROPPATCH = 13, /**< PROPPATCH */
HTTP_SEARCH = 14, /**< SEARCH */
HTTP_UNLOCK = 15, /**< UNLOCK */
HTTP_BIND = 16, /**< BIND */
HTTP_REBIND = 17, /**< REBIND */
HTTP_UNBIND = 18, /**< UNBIND */
HTTP_ACL = 19, /**< ACL */
HTTP_REPORT = 20, /**< REPORT */
HTTP_MKACTIVITY = 21, /**< MKACTIVITY */
HTTP_CHECKOUT = 22, /**< CHECKOUT */
HTTP_MERGE = 23, /**< MERGE */
HTTP_MSEARCH = 24, /**< MSEARCH */
HTTP_NOTIFY = 25, /**< NOTIFY */
HTTP_SUBSCRIBE = 26, /**< SUBSCRIBE */
HTTP_UNSUBSCRIBE = 27, /**< UNSUBSCRIBE */
HTTP_PATCH = 28, /**< PATCH */
HTTP_PURGE = 29, /**< PURGE */
HTTP_MKCALENDAR = 30, /**< MKCALENDAR */
HTTP_LINK = 31, /**< LINK */
HTTP_UNLINK = 32, /**< UNLINK */
/** @cond INTERNAL_HIDDEN */
HTTP_METHOD_END_VALUE /* keep this the last value */
/** @endcond */
};
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif
``` | /content/code_sandbox/include/zephyr/net/http/method.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 515 |
```objective-c
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef ZEPHYR_INCLUDE_NET_HTTP_PARSER_H_
#define ZEPHYR_INCLUDE_NET_HTTP_PARSER_H_
/* Also update SONAME in the Makefile whenever you change these. */
#define HTTP_PARSER_VERSION_MAJOR 2
#define HTTP_PARSER_VERSION_MINOR 7
#define HTTP_PARSER_VERSION_PATCH 1
#include <sys/types.h>
#if defined(_WIN32) && !defined(__MINGW32__) && \
(!defined(_MSC_VER) || _MSC_VER < 1600) && !defined(__WINE__)
#include <BaseTsd.h>
#include <stddef.h>
typedef __int8 int8_t;
typedef unsigned __int8 uint8_t;
typedef __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
#else
#include <zephyr/types.h>
#include <stddef.h>
#endif
#include <zephyr/net/http/method.h>
#include <zephyr/net/http/parser_state.h>
#include <zephyr/net/http/parser_url.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Maximum header size allowed. If the macro is not defined
* before including this header then the default is used. To
* change the maximum header size, define the macro in the build
* environment (e.g. -DHTTP_MAX_HEADER_SIZE=<value>). To remove
* the effective limit on the size of the header, define the macro
* to a very large number (e.g. -DHTTP_MAX_HEADER_SIZE=0x7fffffff)
*/
#ifndef HTTP_MAX_HEADER_SIZE
# define HTTP_MAX_HEADER_SIZE (80 * 1024)
#endif
struct http_parser;
struct http_parser_settings;
/* Callbacks should return non-zero to indicate an error. The parser will
* then halt execution.
*
* The one exception is on_headers_complete. In a HTTP_RESPONSE parser
* returning '1' from on_headers_complete will tell the parser that it
* should not expect a body. This is used when receiving a response to a
* HEAD request which may contain 'Content-Length' or 'Transfer-Encoding:
* chunked' headers that indicate the presence of a body.
*
* Returning `2` from on_headers_complete will tell parser that it should not
* expect neither a body nor any further responses on this connection. This is
* useful for handling responses to a CONNECT request which may not contain
* `Upgrade` or `Connection: upgrade` headers.
*
* http_data_cb does not return data chunks. It will be called arbitrarily
* many times for each string. E.G. you might get 10 callbacks for "on_url"
* each providing just a few characters more data.
*/
typedef int (*http_data_cb)(struct http_parser *, const char *at,
size_t length);
typedef int (*http_cb)(struct http_parser *);
enum http_parser_type { HTTP_REQUEST, HTTP_RESPONSE, HTTP_BOTH };
/* Flag values for http_parser.flags field */
enum flags {
F_CHUNKED = 1 << 0,
F_CONNECTION_KEEP_ALIVE = 1 << 1,
F_CONNECTION_CLOSE = 1 << 2,
F_CONNECTION_UPGRADE = 1 << 3,
F_TRAILING = 1 << 4,
F_UPGRADE = 1 << 5,
F_SKIPBODY = 1 << 6,
F_CONTENTLENGTH = 1 << 7
};
enum http_errno {
HPE_OK,
HPE_CB_message_begin,
HPE_CB_url,
HPE_CB_header_field,
HPE_CB_header_value,
HPE_CB_headers_complete,
HPE_CB_body,
HPE_CB_message_complete,
HPE_CB_status,
HPE_CB_chunk_header,
HPE_CB_chunk_complete,
HPE_INVALID_EOF_STATE,
HPE_HEADER_OVERFLOW,
HPE_CLOSED_CONNECTION,
HPE_INVALID_VERSION,
HPE_INVALID_STATUS,
HPE_INVALID_METHOD,
HPE_INVALID_URL,
HPE_INVALID_HOST,
HPE_INVALID_PORT,
HPE_INVALID_PATH,
HPE_INVALID_QUERY_STRING,
HPE_INVALID_FRAGMENT,
HPE_LF_EXPECTED,
HPE_INVALID_HEADER_TOKEN,
HPE_INVALID_CONTENT_LENGTH,
HPE_UNEXPECTED_CONTENT_LENGTH,
HPE_INVALID_CHUNK_SIZE,
HPE_INVALID_CONSTANT,
HPE_INVALID_INTERNAL_STATE,
HPE_STRICT,
HPE_PAUSED,
HPE_UNKNOWN
};
/* Get an http_errno value from an http_parser */
#define HTTP_PARSER_ERRNO(p) ((enum http_errno) (p)->http_errno)
struct http_parser {
/** PRIVATE **/
unsigned int type : 2; /* enum http_parser_type */
unsigned int flags : 8; /* F_xxx values from 'flags' enum;
* semi-public
*/
unsigned int state : 7; /* enum state from http_parser.c */
unsigned int header_state : 7; /* enum header_state from http_parser.c
*/
unsigned int index : 7; /* index into current matcher */
unsigned int lenient_http_headers : 1;
uint32_t nread; /* # bytes read in various scenarios */
uint64_t content_length; /* # bytes in body (0 if no Content-Length
* header)
*/
/** READ-ONLY **/
unsigned short http_major;
unsigned short http_minor;
unsigned int status_code : 16; /* responses only */
unsigned int method : 8; /* requests only */
unsigned int http_errno : 7;
/* 1 = Upgrade header was present and the parser has exited because of
* that.
* 0 = No upgrade header present.
* Should be checked when http_parser_execute() returns in addition to
* error checking.
*/
unsigned int upgrade : 1;
/** PUBLIC **/
void *data; /* A pointer to get hook to the "connection" or "socket"
* object
*/
/* Remote socket address of http connection, where parser can initiate
* replies if necessary.
*/
const struct sockaddr *addr;
};
struct http_parser_settings {
http_cb on_message_begin;
http_data_cb on_url;
http_data_cb on_status;
http_data_cb on_header_field;
http_data_cb on_header_value;
http_cb on_headers_complete;
http_data_cb on_body;
http_cb on_message_complete;
/* When on_chunk_header is called, the current chunk length is stored
* in parser->content_length.
*/
http_cb on_chunk_header;
http_cb on_chunk_complete;
};
/* Returns the library version. Bits 16-23 contain the major version number,
* bits 8-15 the minor version number and bits 0-7 the patch level.
* Usage example:
*
* unsigned long version = http_parser_version();
* unsigned major = (version >> 16) & 255;
* unsigned minor = (version >> 8) & 255;
* unsigned patch = version & 255;
* printf("http_parser v%u.%u.%u\n", major, minor, patch);
*/
unsigned long http_parser_version(void);
void http_parser_init(struct http_parser *parser, enum http_parser_type type);
/* Initialize http_parser_settings members to 0
*/
void http_parser_settings_init(struct http_parser_settings *settings);
/* Executes the parser. Returns number of parsed bytes. Sets
* `parser->http_errno` on error.
*/
size_t http_parser_execute(struct http_parser *parser,
const struct http_parser_settings *settings,
const char *data, size_t len);
/* If http_should_keep_alive() in the on_headers_complete or
* on_message_complete callback returns 0, then this should be
* the last message on the connection.
* If you are the server, respond with the "Connection: close" header.
* If you are the client, close the connection.
*/
int http_should_keep_alive(const struct http_parser *parser);
/* Returns a string version of the HTTP method. */
const char *http_method_str(enum http_method m);
/* Return a string name of the given error */
const char *http_errno_name(enum http_errno err);
/* Return a string description of the given error */
const char *http_errno_description(enum http_errno err);
/* Pause or un-pause the parser; a nonzero value pauses */
void http_parser_pause(struct http_parser *parser, int paused);
/* Checks if this is the final chunk of the body. */
int http_body_is_final(const struct http_parser *parser);
#ifdef __cplusplus
}
#endif
#endif
``` | /content/code_sandbox/include/zephyr/net/http/parser.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,995 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_HTTP_SERVICE_H_
#define ZEPHYR_INCLUDE_NET_HTTP_SERVICE_H_
/**
* @file service.h
*
* @brief HTTP service API
*
* @defgroup http_service HTTP service API
* @since 3.4
* @version 0.1.0
* @ingroup networking
* @{
*/
#include <stdint.h>
#include <stddef.h>
#include <zephyr/sys/util_macro.h>
#include <zephyr/sys/iterable_sections.h>
#include <zephyr/net/tls_credentials.h>
#ifdef __cplusplus
extern "C" {
#endif
/** HTTP resource description */
struct http_resource_desc {
/** Resource name */
const char *resource;
/** Detail associated with this resource */
void *detail;
};
/**
* @brief Define a static HTTP resource
*
* A static HTTP resource is one that is known prior to system initialization. In contrast,
* dynamic resources may be discovered upon system initialization. Dynamic resources may also be
* inserted, or removed by events originating internally or externally to the system at runtime.
*
* @note The @p _resource is the URL without the associated protocol, host, or URL parameters. E.g.
* the resource for `path_to_url#param1=value1` would be `/bar/baz.html`. It
* is often referred to as the "path" of the URL. Every `(service, resource)` pair should be
* unique. The @p _resource must be non-NULL.
*
* @param _name Name of the resource.
* @param _service Name of the associated service.
* @param _resource Pathname-like string identifying the resource.
* @param _detail Implementation-specific detail associated with the resource.
*/
#define HTTP_RESOURCE_DEFINE(_name, _service, _resource, _detail) \
const STRUCT_SECTION_ITERABLE_ALTERNATE(http_resource_desc_##_service, http_resource_desc, \
_name) = { \
.resource = _resource, \
.detail = (void *)(_detail), \
}
/** @cond INTERNAL_HIDDEN */
struct http_service_desc {
const char *host;
uint16_t *port;
void *detail;
size_t concurrent;
size_t backlog;
struct http_resource_desc *res_begin;
struct http_resource_desc *res_end;
#if defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS)
const sec_tag_t *sec_tag_list;
size_t sec_tag_list_size;
#endif
};
#define __z_http_service_define(_name, _host, _port, _concurrent, _backlog, _detail, _res_begin, \
_res_end, ...) \
static const STRUCT_SECTION_ITERABLE(http_service_desc, _name) = { \
.host = _host, \
.port = (uint16_t *)(_port), \
.detail = (void *)(_detail), \
.concurrent = (_concurrent), \
.backlog = (_backlog), \
.res_begin = (_res_begin), \
.res_end = (_res_end), \
COND_CODE_1(CONFIG_NET_SOCKETS_SOCKOPT_TLS, \
(.sec_tag_list = COND_CODE_0(NUM_VA_ARGS_LESS_1(__VA_ARGS__), (NULL), \
(GET_ARG_N(1, __VA_ARGS__))),), ()) \
COND_CODE_1(CONFIG_NET_SOCKETS_SOCKOPT_TLS, \
(.sec_tag_list_size = COND_CODE_0(NUM_VA_ARGS_LESS_1(__VA_ARGS__), (0),\
(GET_ARG_N(1, GET_ARGS_LESS_N(1, __VA_ARGS__))))), ())\
}
/** @endcond */
/**
* @brief Define an HTTP service without static resources.
*
* @note The @p _host parameter must be non-`NULL`. It is used to specify an IP address either in
* IPv4 or IPv6 format a fully-qualified hostname or a virtual host.
*
* @note The @p _port parameter must be non-`NULL`. It points to a location that specifies the port
* number to use for the service. If the specified port number is zero, then an ephemeral port
* number will be used and the actual port number assigned will be written back to memory. For
* ephemeral port numbers, the memory pointed to by @p _port must be writeable.
*
* @param _name Name of the service.
* @param _host IP address or hostname associated with the service.
* @param[inout] _port Pointer to port associated with the service.
* @param _concurrent Maximum number of concurrent clients.
* @param _backlog Maximum number queued connections.
* @param _detail Implementation-specific detail associated with the service.
*/
#define HTTP_SERVICE_DEFINE_EMPTY(_name, _host, _port, _concurrent, _backlog, _detail) \
__z_http_service_define(_name, _host, _port, _concurrent, _backlog, _detail, NULL, NULL)
/**
* @brief Define an HTTPS service without static resources.
*
* @note The @p _host parameter must be non-`NULL`. It is used to specify an IP address either in
* IPv4 or IPv6 format a fully-qualified hostname or a virtual host.
*
* @note The @p _port parameter must be non-`NULL`. It points to a location that specifies the port
* number to use for the service. If the specified port number is zero, then an ephemeral port
* number will be used and the actual port number assigned will be written back to memory. For
* ephemeral port numbers, the memory pointed to by @p _port must be writeable.
*
* @param _name Name of the service.
* @param _host IP address or hostname associated with the service.
* @param[inout] _port Pointer to port associated with the service.
* @param _concurrent Maximum number of concurrent clients.
* @param _backlog Maximum number queued connections.
* @param _detail Implementation-specific detail associated with the service.
* @param _sec_tag_list TLS security tag list used to setup a HTTPS socket.
* @param _sec_tag_list_size TLS security tag list size used to setup a HTTPS socket.
*/
#define HTTPS_SERVICE_DEFINE_EMPTY(_name, _host, _port, _concurrent, _backlog, _detail, \
_sec_tag_list, _sec_tag_list_size) \
__z_http_service_define(_name, _host, _port, _concurrent, _backlog, _detail, NULL, NULL, \
_sec_tag_list, _sec_tag_list_size); \
BUILD_ASSERT(IS_ENABLED(CONFIG_NET_SOCKETS_SOCKOPT_TLS), \
"TLS is required for HTTP secure (CONFIG_NET_SOCKETS_SOCKOPT_TLS)")
/**
* @brief Define an HTTP service with static resources.
*
* @note The @p _host parameter must be non-`NULL`. It is used to specify an IP address either in
* IPv4 or IPv6 format a fully-qualified hostname or a virtual host.
*
* @note The @p _port parameter must be non-`NULL`. It points to a location that specifies the port
* number to use for the service. If the specified port number is zero, then an ephemeral port
* number will be used and the actual port number assigned will be written back to memory. For
* ephemeral port numbers, the memory pointed to by @p _port must be writeable.
*
* @param _name Name of the service.
* @param _host IP address or hostname associated with the service.
* @param[inout] _port Pointer to port associated with the service.
* @param _concurrent Maximum number of concurrent clients.
* @param _backlog Maximum number queued connections.
* @param _detail Implementation-specific detail associated with the service.
*/
#define HTTP_SERVICE_DEFINE(_name, _host, _port, _concurrent, _backlog, _detail) \
extern struct http_resource_desc _CONCAT(_http_resource_desc_##_name, _list_start)[]; \
extern struct http_resource_desc _CONCAT(_http_resource_desc_##_name, _list_end)[]; \
__z_http_service_define(_name, _host, _port, _concurrent, _backlog, _detail, \
&_CONCAT(_http_resource_desc_##_name, _list_start)[0], \
&_CONCAT(_http_resource_desc_##_name, _list_end)[0])
/**
* @brief Define an HTTPS service with static resources.
*
* @note The @p _host parameter must be non-`NULL`. It is used to specify an IP address either in
* IPv4 or IPv6 format a fully-qualified hostname or a virtual host.
*
* @note The @p _port parameter must be non-`NULL`. It points to a location that specifies the port
* number to use for the service. If the specified port number is zero, then an ephemeral port
* number will be used and the actual port number assigned will be written back to memory. For
* ephemeral port numbers, the memory pointed to by @p _port must be writeable.
*
* @param _name Name of the service.
* @param _host IP address or hostname associated with the service.
* @param[inout] _port Pointer to port associated with the service.
* @param _concurrent Maximum number of concurrent clients.
* @param _backlog Maximum number queued connections.
* @param _detail Implementation-specific detail associated with the service.
* @param _sec_tag_list TLS security tag list used to setup a HTTPS socket.
* @param _sec_tag_list_size TLS security tag list size used to setup a HTTPS socket.
*/
#define HTTPS_SERVICE_DEFINE(_name, _host, _port, _concurrent, _backlog, _detail, \
_sec_tag_list, _sec_tag_list_size) \
extern struct http_resource_desc _CONCAT(_http_resource_desc_##_name, _list_start)[]; \
extern struct http_resource_desc _CONCAT(_http_resource_desc_##_name, _list_end)[]; \
__z_http_service_define(_name, _host, _port, _concurrent, _backlog, _detail, \
&_CONCAT(_http_resource_desc_##_name, _list_start)[0], \
&_CONCAT(_http_resource_desc_##_name, _list_end)[0], \
_sec_tag_list, _sec_tag_list_size); \
BUILD_ASSERT(IS_ENABLED(CONFIG_NET_SOCKETS_SOCKOPT_TLS), \
"TLS is required for HTTP secure (CONFIG_NET_SOCKETS_SOCKOPT_TLS)")
/**
* @brief Count the number of HTTP services.
*
* @param[out] _dst Pointer to location where result is written.
*/
#define HTTP_SERVICE_COUNT(_dst) STRUCT_SECTION_COUNT(http_service_desc, _dst)
/**
* @brief Count HTTP service static resources.
*
* @param _service Pointer to a service.
*/
#define HTTP_SERVICE_RESOURCE_COUNT(_service) ((_service)->res_end - (_service)->res_begin)
/**
* @brief Iterate over all HTTP services.
*
* @param _it Name of http_service_desc iterator
*/
#define HTTP_SERVICE_FOREACH(_it) STRUCT_SECTION_FOREACH(http_service_desc, _it)
/**
* @brief Iterate over static HTTP resources associated with a given @p _service.
*
* @note This macro requires that @p _service is defined with @ref HTTP_SERVICE_DEFINE.
*
* @param _service Name of HTTP service
* @param _it Name of iterator (of type @ref http_resource_desc)
*/
#define HTTP_RESOURCE_FOREACH(_service, _it) \
STRUCT_SECTION_FOREACH_ALTERNATE(http_resource_desc_##_service, http_resource_desc, _it)
/**
* @brief Iterate over all static resources associated with @p _service .
*
* @note This macro is suitable for a @p _service defined with either @ref HTTP_SERVICE_DEFINE
* or @ref HTTP_SERVICE_DEFINE_EMPTY.
*
* @param _service Pointer to HTTP service
* @param _it Name of iterator (of type @ref http_resource_desc)
*/
#define HTTP_SERVICE_FOREACH_RESOURCE(_service, _it) \
for (struct http_resource_desc *_it = (_service)->res_begin; ({ \
__ASSERT(_it <= (_service)->res_end, "unexpected list end location"); \
_it < (_service)->res_end; \
}); \
_it++)
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_HTTP_SERVICE_H_ */
``` | /content/code_sandbox/include/zephyr/net/http/service.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,683 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_HTTP_SERVER_H_
#define ZEPHYR_INCLUDE_NET_HTTP_SERVER_H_
/**
* @file server.h
*
* @brief HTTP server API
*
* @defgroup http_server HTTP server API
* @since 3.7
* @version 0.1.0
* @ingroup networking
* @{
*/
#include <stdint.h>
#include <zephyr/kernel.h>
#include <zephyr/net/http/parser.h>
#include <zephyr/net/http/hpack.h>
#include <zephyr/net/socket.h>
#ifdef __cplusplus
extern "C" {
#endif
/** @cond INTERNAL_HIDDEN */
#if defined(CONFIG_HTTP_SERVER)
#define HTTP_SERVER_CLIENT_BUFFER_SIZE CONFIG_HTTP_SERVER_CLIENT_BUFFER_SIZE
#define HTTP_SERVER_MAX_STREAMS CONFIG_HTTP_SERVER_MAX_STREAMS
#define HTTP_SERVER_MAX_CONTENT_TYPE_LEN CONFIG_HTTP_SERVER_MAX_CONTENT_TYPE_LENGTH
#define HTTP_SERVER_MAX_URL_LENGTH CONFIG_HTTP_SERVER_MAX_URL_LENGTH
#else
#define HTTP_SERVER_CLIENT_BUFFER_SIZE 0
#define HTTP_SERVER_MAX_STREAMS 0
#define HTTP_SERVER_MAX_CONTENT_TYPE_LEN 0
#define HTTP_SERVER_MAX_URL_LENGTH 0
#endif
/* Maximum header field name / value length. This is only used to detect Upgrade and
* websocket header fields and values in the http1 server so the value is quite short.
*/
#define HTTP_SERVER_MAX_HEADER_LEN 32
#define HTTP2_PREFACE "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
/** @endcond */
/**
* @brief HTTP server resource type.
*/
enum http_resource_type {
/** Static resource, cannot be modified on runtime. */
HTTP_RESOURCE_TYPE_STATIC,
/** serves static gzipped files from a filesystem */
HTTP_RESOURCE_TYPE_STATIC_FS,
/** Dynamic resource, server interacts with the application via registered
* @ref http_resource_dynamic_cb_t.
*/
HTTP_RESOURCE_TYPE_DYNAMIC,
/** Websocket resource, application takes control over Websocket connection
* after and upgrade.
*/
HTTP_RESOURCE_TYPE_WEBSOCKET,
};
/**
* @brief Representation of a server resource, common for all resource types.
*/
struct http_resource_detail {
/** Bitmask of supported HTTP methods (@ref http_method). */
uint32_t bitmask_of_supported_http_methods;
/** Resource type. */
enum http_resource_type type;
/** Length of the URL path. */
int path_len;
/** Content encoding of the resource. */
const char *content_encoding;
/** Content type of the resource. */
const char *content_type;
};
/** @cond INTERNAL_HIDDEN */
BUILD_ASSERT(NUM_BITS(
sizeof(((struct http_resource_detail *)0)->bitmask_of_supported_http_methods))
>= (HTTP_METHOD_END_VALUE - 1));
/** @endcond */
/**
* @brief Representation of a static server resource.
*/
struct http_resource_detail_static {
/** Common resource details. */
struct http_resource_detail common;
/** Content of the static resource. */
const void *static_data;
/** Size of the static resource. */
size_t static_data_len;
};
/** @cond INTERNAL_HIDDEN */
/* Make sure that the common is the first in the struct. */
BUILD_ASSERT(offsetof(struct http_resource_detail_static, common) == 0);
/** @endcond */
/**
* @brief Representation of a static filesystem server resource.
*/
struct http_resource_detail_static_fs {
/** Common resource details. */
struct http_resource_detail common;
/** Path in the local filesystem */
const char *fs_path;
};
/** @cond INTERNAL_HIDDEN */
/* Make sure that the common is the first in the struct. */
BUILD_ASSERT(offsetof(struct http_resource_detail_static_fs, common) == 0);
/** @endcond */
struct http_content_type {
const char *extension;
size_t extension_len;
const char *content_type;
};
#define HTTP_SERVER_CONTENT_TYPE(_extension, _content_type) \
const STRUCT_SECTION_ITERABLE(http_content_type, _extension) = { \
.extension = STRINGIFY(_extension), \
.extension_len = sizeof(STRINGIFY(_extension)) - 1, \
.content_type = _content_type, \
};
#define HTTP_SERVER_CONTENT_TYPE_FOREACH(_it) STRUCT_SECTION_FOREACH(http_content_type, _it)
struct http_client_ctx;
/** Indicates the status of the currently processed piece of data. */
enum http_data_status {
/** Transaction aborted, data incomplete. */
HTTP_SERVER_DATA_ABORTED = -1,
/** Transaction incomplete, more data expected. */
HTTP_SERVER_DATA_MORE = 0,
/** Final data fragment in current transaction. */
HTTP_SERVER_DATA_FINAL = 1,
};
/**
* @typedef http_resource_dynamic_cb_t
* @brief Callback used when data is received. Data to be sent to client
* can be specified.
*
* @param client HTTP context information for this client connection.
* @param status HTTP data status, indicate whether more data is expected or not.
* @param data_buffer Data received.
* @param data_len Amount of data received.
* @param user_data User specified data.
*
* @return >0 amount of data to be sent to client, let server to call this
* function again when new data is received.
* 0 nothing to sent to client, close the connection
* <0 error, close the connection.
*/
typedef int (*http_resource_dynamic_cb_t)(struct http_client_ctx *client,
enum http_data_status status,
uint8_t *data_buffer,
size_t data_len,
void *user_data);
/**
* @brief Representation of a dynamic server resource.
*/
struct http_resource_detail_dynamic {
/** Common resource details. */
struct http_resource_detail common;
/** Resource callback used by the server to interact with the
* application.
*/
http_resource_dynamic_cb_t cb;
/** Data buffer used to exchanged data between server and the,
* application.
*/
uint8_t *data_buffer;
/** Length of the data in the data buffer. */
size_t data_buffer_len;
/** A pointer to the client currently processing resource, used to
* prevent concurrent access to the resource from multiple clients.
*/
struct http_client_ctx *holder;
/** A pointer to the user data registered by the application. */
void *user_data;
};
/** @cond INTERNAL_HIDDEN */
BUILD_ASSERT(offsetof(struct http_resource_detail_dynamic, common) == 0);
/** @endcond */
/**
* @typedef http_resource_websocket_cb_t
* @brief Callback used when a Websocket connection is setup. The application
* will need to handle all functionality related to the connection like
* reading and writing websocket data, and closing the connection.
*
* @param ws_socket A socket for the Websocket data.
* @param user_data User specified data.
*
* @return 0 Accepting the connection, HTTP server library will no longer
* handle data to/from the socket and it is application responsibility
* to send and receive data to/from the supplied socket.
* <0 error, close the connection.
*/
typedef int (*http_resource_websocket_cb_t)(int ws_socket,
void *user_data);
/** @brief Representation of a websocket server resource */
struct http_resource_detail_websocket {
/** Common resource details. */
struct http_resource_detail common;
/** Websocket socket value */
int ws_sock;
/** Resource callback used by the server to interact with the
* application.
*/
http_resource_websocket_cb_t cb;
/** Data buffer used to exchanged data between server and the,
* application.
*/
uint8_t *data_buffer;
/** Length of the data in the data buffer. */
size_t data_buffer_len;
/** A pointer to the user data registered by the application. */
void *user_data;
};
/** @cond INTERNAL_HIDDEN */
BUILD_ASSERT(offsetof(struct http_resource_detail_websocket, common) == 0);
/** @endcond */
/** @cond INTERNAL_HIDDEN */
enum http2_stream_state {
HTTP2_STREAM_IDLE,
HTTP2_STREAM_RESERVED_LOCAL,
HTTP2_STREAM_RESERVED_REMOTE,
HTTP2_STREAM_OPEN,
HTTP2_STREAM_HALF_CLOSED_LOCAL,
HTTP2_STREAM_HALF_CLOSED_REMOTE,
HTTP2_STREAM_CLOSED
};
enum http_server_state {
HTTP_SERVER_FRAME_HEADER_STATE,
HTTP_SERVER_PREFACE_STATE,
HTTP_SERVER_REQUEST_STATE,
HTTP_SERVER_FRAME_DATA_STATE,
HTTP_SERVER_FRAME_HEADERS_STATE,
HTTP_SERVER_FRAME_SETTINGS_STATE,
HTTP_SERVER_FRAME_PRIORITY_STATE,
HTTP_SERVER_FRAME_WINDOW_UPDATE_STATE,
HTTP_SERVER_FRAME_CONTINUATION_STATE,
HTTP_SERVER_FRAME_PING_STATE,
HTTP_SERVER_FRAME_RST_STREAM_STATE,
HTTP_SERVER_FRAME_GOAWAY_STATE,
HTTP_SERVER_FRAME_PADDING_STATE,
HTTP_SERVER_DONE_STATE,
};
enum http1_parser_state {
HTTP1_INIT_HEADER_STATE,
HTTP1_WAITING_HEADER_STATE,
HTTP1_RECEIVING_HEADER_STATE,
HTTP1_RECEIVED_HEADER_STATE,
HTTP1_RECEIVING_DATA_STATE,
HTTP1_MESSAGE_COMPLETE_STATE,
};
#define HTTP_SERVER_INITIAL_WINDOW_SIZE 65536
#define HTTP_SERVER_WS_MAX_SEC_KEY_LEN 32
/** @endcond */
/** @brief HTTP/2 stream representation. */
struct http2_stream_ctx {
int stream_id; /**< Stream identifier. */
enum http2_stream_state stream_state; /**< Stream state. */
int window_size; /**< Stream-level window size. */
/** Flag indicating that headers were sent in the reply. */
bool headers_sent : 1;
/** Flag indicating that END_STREAM flag was sent. */
bool end_stream_sent : 1;
};
/** @brief HTTP/2 frame representation. */
struct http2_frame {
uint32_t length; /**< Frame payload length. */
uint32_t stream_identifier; /**< Stream ID the frame belongs to. */
uint8_t type; /**< Frame type. */
uint8_t flags; /**< Frame flags. */
uint8_t padding_len; /**< Frame padding length. */
};
/**
* @brief Representation of an HTTP client connected to the server.
*/
struct http_client_ctx {
/** Socket descriptor associated with the server. */
int fd;
/** Client data buffer. */
unsigned char buffer[HTTP_SERVER_CLIENT_BUFFER_SIZE];
/** Cursor indicating currently processed byte. */
unsigned char *cursor;
/** Data left to process in the buffer. */
size_t data_len;
/** Connection-level window size. */
int window_size;
/** Server state for the associated client. */
enum http_server_state server_state;
/** Currently processed HTTP/2 frame. */
struct http2_frame current_frame;
/** Currently processed resource detail. */
struct http_resource_detail *current_detail;
/** Currently processed stream. */
struct http2_stream_ctx *current_stream;
/** HTTP/2 header parser context. */
struct http_hpack_header_buf header_field;
/** HTTP/2 streams context. */
struct http2_stream_ctx streams[HTTP_SERVER_MAX_STREAMS];
/** HTTP/1 parser configuration. */
struct http_parser_settings parser_settings;
/** HTTP/1 parser context. */
struct http_parser parser;
/** Request URL. */
unsigned char url_buffer[HTTP_SERVER_MAX_URL_LENGTH];
/** Request content type. */
unsigned char content_type[HTTP_SERVER_MAX_CONTENT_TYPE_LEN];
/** Temp buffer for currently processed header (HTTP/1 only). */
unsigned char header_buffer[HTTP_SERVER_MAX_HEADER_LEN];
/** Request content length. */
size_t content_len;
/** Request method. */
enum http_method method;
/** HTTP/1 parser state. */
enum http1_parser_state parser_state;
/** Length of the payload length in the currently processed request
* fragment (HTTP/1 only).
*/
int http1_frag_data_len;
/** Client inactivity timer. The client connection is closed by the
* server when it expires.
*/
struct k_work_delayable inactivity_timer;
/** @cond INTERNAL_HIDDEN */
/** Websocket security key. */
IF_ENABLED(CONFIG_WEBSOCKET, (uint8_t ws_sec_key[HTTP_SERVER_WS_MAX_SEC_KEY_LEN]));
/** @endcond */
/** Flag indicating that HTTP2 preface was sent. */
bool preface_sent : 1;
/** Flag indicating that HTTP1 headers were sent. */
bool http1_headers_sent : 1;
/** Flag indicating that upgrade header was present in the request. */
bool has_upgrade_header : 1;
/** Flag indicating HTTP/2 upgrade takes place. */
bool http2_upgrade : 1;
/** Flag indicating Websocket upgrade takes place. */
bool websocket_upgrade : 1;
/** Flag indicating Websocket key is being processed. */
bool websocket_sec_key_next : 1;
/** The next frame on the stream is expectd to be a continuation frame. */
bool expect_continuation : 1;
};
/** @brief Start the HTTP2 server.
*
* The server runs in a background thread. Once started, the server will create
* a server socket for all HTTP services registered in the system and accept
* connections from clients (see @ref HTTP_SERVICE_DEFINE).
*/
int http_server_start(void);
/** @brief Stop the HTTP2 server.
*
* All server sockets are closed and the server thread is suspended.
*/
int http_server_stop(void);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif
``` | /content/code_sandbox/include/zephyr/net/http/server.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,767 |
```objective-c
/** @file
* @brief HTTP HPACK
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_HTTP_SERVER_HPACK_H_
#define ZEPHYR_INCLUDE_NET_HTTP_SERVER_HPACK_H_
#include <stddef.h>
#include <stdint.h>
/**
* @brief HTTP HPACK
* @defgroup http_hpack HTTP HPACK
* @since 3.7
* @version 0.1.0
* @ingroup networking
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/** @cond INTERNAL_HIDDEN */
enum http_hpack_static_key {
HTTP_SERVER_HPACK_INVALID = 0,
HTTP_SERVER_HPACK_AUTHORITY = 1,
HTTP_SERVER_HPACK_METHOD_GET = 2,
HTTP_SERVER_HPACK_METHOD_POST = 3,
HTTP_SERVER_HPACK_PATH_ROOT = 4,
HTTP_SERVER_HPACK_PATH_INDEX = 5,
HTTP_SERVER_HPACK_SCHEME_HTTP = 6,
HTTP_SERVER_HPACK_SCHEME_HTTPS = 7,
HTTP_SERVER_HPACK_STATUS_200 = 8,
HTTP_SERVER_HPACK_STATUS_204 = 9,
HTTP_SERVER_HPACK_STATUS_206 = 10,
HTTP_SERVER_HPACK_STATUS_304 = 11,
HTTP_SERVER_HPACK_STATUS_400 = 12,
HTTP_SERVER_HPACK_STATUS_404 = 13,
HTTP_SERVER_HPACK_STATUS_500 = 14,
HTTP_SERVER_HPACK_ACCEPT_CHARSET = 15,
HTTP_SERVER_HPACK_ACCEPT_ENCODING = 16,
HTTP_SERVER_HPACK_ACCEPT_LANGUAGE = 17,
HTTP_SERVER_HPACK_ACCEPT_RANGES = 18,
HTTP_SERVER_HPACK_ACCEPT = 19,
HTTP_SERVER_HPACK_ACCESS_CONTROL_ALLOW_ORIGIN = 20,
HTTP_SERVER_HPACK_AGE = 21,
HTTP_SERVER_HPACK_ALLOW = 22,
HTTP_SERVER_HPACK_AUTHORIZATION = 23,
HTTP_SERVER_HPACK_CACHE_CONTROL = 24,
HTTP_SERVER_HPACK_CONTENT_DISPOSITION = 25,
HTTP_SERVER_HPACK_CONTENT_ENCODING = 26,
HTTP_SERVER_HPACK_CONTENT_LANGUAGE = 27,
HTTP_SERVER_HPACK_CONTENT_LENGTH = 28,
HTTP_SERVER_HPACK_CONTENT_LOCATION = 29,
HTTP_SERVER_HPACK_CONTENT_RANGE = 30,
HTTP_SERVER_HPACK_CONTENT_TYPE = 31,
HTTP_SERVER_HPACK_COOKIE = 32,
HTTP_SERVER_HPACK_DATE = 33,
HTTP_SERVER_HPACK_ETAG = 34,
HTTP_SERVER_HPACK_EXPECT = 35,
HTTP_SERVER_HPACK_EXPIRES = 36,
HTTP_SERVER_HPACK_FROM = 37,
HTTP_SERVER_HPACK_HOST = 38,
HTTP_SERVER_HPACK_IF_MATCH = 39,
HTTP_SERVER_HPACK_IF_MODIFIED_SINCE = 40,
HTTP_SERVER_HPACK_IF_NONE_MATCH = 41,
HTTP_SERVER_HPACK_IF_RANGE = 42,
HTTP_SERVER_HPACK_IF_UNMODIFIED_SINCE = 43,
HTTP_SERVER_HPACK_LAST_MODIFIED = 44,
HTTP_SERVER_HPACK_LINK = 45,
HTTP_SERVER_HPACK_LOCATION = 46,
HTTP_SERVER_HPACK_MAX_FORWARDS = 47,
HTTP_SERVER_HPACK_PROXY_AUTHENTICATE = 48,
HTTP_SERVER_HPACK_PROXY_AUTHORIZATION = 49,
HTTP_SERVER_HPACK_RANGE = 50,
HTTP_SERVER_HPACK_REFERER = 51,
HTTP_SERVER_HPACK_REFRESH = 52,
HTTP_SERVER_HPACK_RETRY_AFTER = 53,
HTTP_SERVER_HPACK_SERVER = 54,
HTTP_SERVER_HPACK_SET_COOKIE = 55,
HTTP_SERVER_HPACK_STRICT_TRANSPORT_SECURITY = 56,
HTTP_SERVER_HPACK_TRANSFER_ENCODING = 57,
HTTP_SERVER_HPACK_USER_AGENT = 58,
HTTP_SERVER_HPACK_VARY = 59,
HTTP_SERVER_HPACK_VIA = 60,
HTTP_SERVER_HPACK_WWW_AUTHENTICATE = 61,
};
/* TODO Kconfig */
#define HTTP2_HEADER_FIELD_MAX_LEN 256
#if defined(CONFIG_HTTP_SERVER)
#define HTTP_SERVER_HUFFMAN_DECODE_BUFFER_SIZE CONFIG_HTTP_SERVER_HUFFMAN_DECODE_BUFFER_SIZE
#else
#define HTTP_SERVER_HUFFMAN_DECODE_BUFFER_SIZE 0
#endif
/** @endcond */
/** HTTP2 header field with decoding buffer. */
struct http_hpack_header_buf {
/** A pointer to the decoded header field name. */
const char *name;
/** A pointer to the decoded header field value. */
const char *value;
/** Length of the decoded header field name. */
size_t name_len;
/** Length of the decoded header field value. */
size_t value_len;
/** Encoding/Decoding buffer. Used with Huffman encoding/decoding. */
uint8_t buf[HTTP_SERVER_HUFFMAN_DECODE_BUFFER_SIZE];
/** Length of the data in the decoding buffer. */
size_t datalen;
};
/** @cond INTERNAL_HIDDEN */
int http_hpack_huffman_decode(const uint8_t *encoded_buf, size_t encoded_len,
uint8_t *buf, size_t buflen);
int http_hpack_huffman_encode(const uint8_t *str, size_t str_len,
uint8_t *buf, size_t buflen);
int http_hpack_decode_header(const uint8_t *buf, size_t datalen,
struct http_hpack_header_buf *header);
int http_hpack_encode_header(uint8_t *buf, size_t buflen,
struct http_hpack_header_buf *header);
/** @endcond */
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif
``` | /content/code_sandbox/include/zephyr/net/http/hpack.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,159 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public IEEE 802.15.4 Driver API
*
* @note All references to the standard in this file cite IEEE 802.15.4-2020.
*/
#ifndef ZEPHYR_INCLUDE_NET_IEEE802154_RADIO_H_
#define ZEPHYR_INCLUDE_NET_IEEE802154_RADIO_H_
#include <zephyr/device.h>
#include <zephyr/net/net_if.h>
#include <zephyr/net/net_pkt.h>
#include <zephyr/net/net_time.h>
#include <zephyr/net/ieee802154.h>
#include <zephyr/net/ieee802154_ie.h>
#include <zephyr/sys/util.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup ieee802154_driver IEEE 802.15.4 Drivers
* @since 1.0
* @version 0.8.0
* @ingroup ieee802154
*
* @brief IEEE 802.15.4 driver API
*
* @details This API provides a common representation of vendor-specific
* hardware and firmware to the native IEEE 802.15.4 L2 and OpenThread stacks.
* **Application developers should never interface directly with this API.** It
* is of interest to driver maintainers only.
*
* The IEEE 802.15.4 driver API consists of two separate parts:
* - a basic, mostly PHY-level driver API to be implemented by all drivers,
* - several optional MAC-level extension points to offload performance
* critical or timing sensitive aspects at MAC level to the driver hardware
* or firmware ("hard" MAC).
*
* Implementing the basic driver API will ensure integration with the native L2
* stack as well as basic support for OpenThread. Depending on the hardware,
* offloading to vendor-specific hardware or firmware features may be required
* to achieve full compliance with the Thread protocol or IEEE 802.15.4
* subprotocols (e.g. fast enough ACK packages, precise timing of timed TX/RX in
* the TSCH or CSL subprotocols).
*
* Whether or not MAC-level offloading extension points need to be implemented
* is to be decided by individual driver maintainers. Upper layers SHOULD
* provide a "soft" MAC fallback whenever possible.
*
* @note All section, table and figure references are to the IEEE 802.15.4-2020
* standard.
*
* @{
*/
/**
* @name IEEE 802.15.4-2020, Section 6: MAC functional description
* @{
*/
/**
* The symbol period (and therefore symbol rate) is defined in section 6.1: "Some
* of the timing parameters in definition of the MAC are in units of PHY symbols.
* For PHYs that have multiple symbol periods, the duration to be used for the
* MAC parameters is defined in that PHY clause."
*
* This is not necessarily the true physical symbol period, so take care to use
* this macro only when either the symbol period used for MAC timing is the same
* as the physical symbol period or if you actually mean the MAC timing symbol
* period.
*
* PHY specific symbol periods are defined in PHY specific sections below.
*/
#define IEEE802154_PHY_SYMBOLS_PER_SECOND(symbol_period_ns) (NSEC_PER_SEC / symbol_period_ns)
/** @} */
/**
* @name IEEE 802.15.4-2020, Section 8: MAC services
* @{
*/
/**
* The number of PHY symbols forming a superframe slot when the superframe order
* is equal to zero, see sections 8.4.2, table 8-93, aBaseSlotDuration and
* section 6.2.1.
*/
#define IEEE802154_MAC_A_BASE_SLOT_DURATION 60U
/**
* The number of slots contained in any superframe, see section 8.4.2,
* table 8-93, aNumSuperframeSlots.
*/
#define IEEE802154_MAC_A_NUM_SUPERFRAME_SLOTS 16U
/**
* The number of PHY symbols forming a superframe when the superframe order is
* equal to zero, see section 8.4.2, table 8-93, aBaseSuperframeDuration.
*/
#define IEEE802154_MAC_A_BASE_SUPERFRAME_DURATION \
(IEEE802154_MAC_A_BASE_SLOT_DURATION * IEEE802154_MAC_A_NUM_SUPERFRAME_SLOTS)
/**
* MAC PIB attribute aUnitBackoffPeriod, see section 8.4.2, table 8-93, in symbol
* periods, valid for all PHYs except SUN PHY in the 920 MHz band.
*/
#define IEEE802154_MAC_A_UNIT_BACKOFF_PERIOD(turnaround_time) \
(turnaround_time + IEEE802154_PHY_A_CCA_TIME)
/**
* Default macResponseWaitTime in multiples of aBaseSuperframeDuration as
* defined in section 8.4.3.1, table 8-94.
*/
#define IEEE802154_MAC_RESPONSE_WAIT_TIME_DEFAULT 32U
/** @} */
/**
* @name IEEE 802.15.4-2020, Section 10: General PHY requirements
* @{
*/
/**
* @brief PHY channel pages, see section 10.1.3
*
* @details A device driver must support the mandatory channel pages, frequency
* bands and channels of at least one IEEE 802.15.4 PHY.
*
* Channel page and number assignments have developed over several versions of
* the standard and are not particularly well documented. Therefore some notes
* about peculiarities of channel pages and channel numbering:
* - The 2006 version of the standard had a read-only phyChannelsSupported PHY
* PIB attribute that represented channel page/number combinations as a
* bitmap. This attribute was removed in later versions of the standard as the
* number of channels increased beyond what could be represented by a bit map.
* That's the reason why it was decided to represent supported channels as a
* combination of channel pages and ranges instead.
* - In the 2020 version of the standard, 13 channel pages are explicitly
* defined, but up to 32 pages could in principle be supported. This was a
* hard requirement in the 2006 standard. In later standards it is implicit
* from field specifications, e.g. the MAC PIB attribute macChannelPage
* (section 8.4.3.4, table 8-100) or channel page fields used in the SRM
* protocol (see section 8.2.26.5).
* - ASK PHY (channel page one) was deprecated in the 2015 version of the
* standard. The 2020 version of the standard is a bit ambivalent whether
* channel page one disappeared as well or should be interpreted as O-QPSK now
* (see section 10.1.3.3). In Zephyr this ambivalence is resolved by
* deprecating channel page one.
* - For some PHYs the standard doesn't clearly specify a channel page, namely
* the GFSK, RS-GFSK, CMB and TASK PHYs. These are all rather new and left out
* in our list as long as no driver wants to implement them.
*
* @warning The bit numbers are not arbitrary but represent the channel
* page numbers as defined by the standard. Therefore do not change the
* bit numbering.
*/
enum ieee802154_phy_channel_page {
/**
* Channel page zero supports the 2.4G channels of the O-QPSK PHY and
* all channels from the BPSK PHYs initially defined in the 2003
* editions of the standard. For channel page zero, 16 channels are
* available in the 2450 MHz band (channels 11-26, O-QPSK), 10 in the
* 915 MHz band (channels 1-10, BPSK), and 1 in the 868 MHz band
* (channel 0, BPSK).
*
* You can retrieve the channels supported by a specific driver on this
* page via @ref IEEE802154_ATTR_PHY_SUPPORTED_CHANNEL_RANGES attribute.
*
* see section 10.1.3.3
*/
IEEE802154_ATTR_PHY_CHANNEL_PAGE_ZERO_OQPSK_2450_BPSK_868_915 = BIT(0),
/** Formerly ASK PHY - deprecated in IEEE 802.15.4-2015 */
IEEE802154_ATTR_PHY_CHANNEL_PAGE_ONE_DEPRECATED = BIT(1),
/** O-QPSK PHY - 868 MHz and 915 MHz bands, see section 10.1.3.3 */
IEEE802154_ATTR_PHY_CHANNEL_PAGE_TWO_OQPSK_868_915 = BIT(2),
/** CSS PHY - 2450 MHz band, see section 10.1.3.4 */
IEEE802154_ATTR_PHY_CHANNEL_PAGE_THREE_CSS = BIT(3),
/** UWB PHY - SubG, low and high bands, see section 10.1.3.5 */
IEEE802154_ATTR_PHY_CHANNEL_PAGE_FOUR_HRP_UWB = BIT(4),
/** O-QPSK PHY - 780 MHz band, see section 10.1.3.2 */
IEEE802154_ATTR_PHY_CHANNEL_PAGE_FIVE_OQPSK_780 = BIT(5),
/** reserved - not currently assigned */
IEEE802154_ATTR_PHY_CHANNEL_PAGE_SIX_RESERVED = BIT(6),
/** MSK PHY - 780 MHz and 2450 MHz bands, see sections 10.1.3.6, 10.1.3.7 */
IEEE802154_ATTR_PHY_CHANNEL_PAGE_SEVEN_MSK = BIT(7),
/** LRP UWB PHY, see sections 10.1.3.8 */
IEEE802154_ATTR_PHY_CHANNEL_PAGE_EIGHT_LRP_UWB = BIT(8),
/**
* SUN FSK/OFDM/O-QPSK PHYs - predefined bands, operating modes and
* channels, see sections 10.1.3.9
*/
IEEE802154_ATTR_PHY_CHANNEL_PAGE_NINE_SUN_PREDEFINED = BIT(9),
/**
* SUN FSK/OFDM/O-QPSK PHYs - generic modulation and channel
* description, see sections 10.1.3.9, 7.4.4.11
*/
IEEE802154_ATTR_PHY_CHANNEL_PAGE_TEN_SUN_FSK_GENERIC = BIT(10),
/** O-QPSK PHY - 2380 MHz band, see section 10.1.3.10 */
IEEE802154_ATTR_PHY_CHANNEL_PAGE_ELEVEN_OQPSK_2380 = BIT(11),
/** LECIM DSSS/FSK PHYs, see section 10.1.3.11 */
IEEE802154_ATTR_PHY_CHANNEL_PAGE_TWELVE_LECIM = BIT(12),
/** RCC PHY, see section 10.1.3.12 */
IEEE802154_ATTR_PHY_CHANNEL_PAGE_THIRTEEN_RCC = BIT(13),
};
/**
* Represents a supported channel range, see @ref
* ieee802154_phy_supported_channels.
*/
struct ieee802154_phy_channel_range {
uint16_t from_channel; /**< From channel range */
uint16_t to_channel; /**< To channel range */
};
/**
* Represents a list channels supported by a driver for a given interface, see
* @ref IEEE802154_ATTR_PHY_SUPPORTED_CHANNEL_RANGES.
*/
struct ieee802154_phy_supported_channels {
/**
* @brief Pointer to an array of channel range structures.
*
* @warning The pointer must be valid and constant throughout the life
* of the interface.
*/
const struct ieee802154_phy_channel_range *const ranges;
/** @brief The number of currently available channel ranges. */
const uint8_t num_ranges;
};
/**
* @brief Allocate memory for the supported channels driver attribute with a
* single channel range constant across all driver instances. This is what most
* IEEE 802.15.4 drivers need.
*
* @details Example usage:
*
* @code{.c}
* IEEE802154_DEFINE_PHY_SUPPORTED_CHANNELS(drv_attr, 11, 26);
* @endcode
*
* The attribute may then be referenced like this:
*
* @code{.c}
* ... &drv_attr.phy_supported_channels ...
* @endcode
*
* See @ref ieee802154_attr_get_channel_page_and_range() for a further shortcut
* that can be combined with this macro.
*
* @param drv_attr name of the local static variable to be declared for the
* local attributes structure
* @param from the first channel to be supported
* @param to the last channel to be supported
*/
#define IEEE802154_DEFINE_PHY_SUPPORTED_CHANNELS(drv_attr, from, to) \
static const struct { \
const struct ieee802154_phy_channel_range phy_channel_range; \
const struct ieee802154_phy_supported_channels phy_supported_channels; \
} drv_attr = { \
.phy_channel_range = {.from_channel = (from), .to_channel = (to)}, \
.phy_supported_channels = \
{ \
.ranges = &drv_attr.phy_channel_range, \
.num_ranges = 1U, \
}, \
}
/** @} */
/**
* @name IEEE 802.15.4-2020, Section 11: PHY services
* @{
*/
/**
* Default PHY PIB attribute aTurnaroundTime, in PHY symbols, see section 11.3,
* table 11-1.
*/
#define IEEE802154_PHY_A_TURNAROUND_TIME_DEFAULT 12U
/**
* PHY PIB attribute aTurnaroundTime for SUN, RS-GFSK, TVWS, and LECIM FSK PHY,
* in PHY symbols, see section 11.3, table 11-1.
*/
#define IEEE802154_PHY_A_TURNAROUND_TIME_1MS(symbol_period_ns) \
DIV_ROUND_UP(NSEC_PER_MSEC, symbol_period_ns)
/**
* PHY PIB attribute aCcaTime, in PHY symbols, all PHYs except for SUN O-QPSK,
* see section 11.3, table 11-1.
*/
#define IEEE802154_PHY_A_CCA_TIME 8U
/** @} */
/**
* @name IEEE 802.15.4-2020, Section 12: O-QPSK PHY
* @{
*/
/** O-QPSK 868Mhz band symbol period, see section 12.3.3 */
#define IEEE802154_PHY_OQPSK_868MHZ_SYMBOL_PERIOD_NS 40000LL
/**
* O-QPSK 780MHz, 915MHz, 2380MHz and 2450MHz bands symbol period,
* see section 12.3.3
*/
#define IEEE802154_PHY_OQPSK_780_TO_2450MHZ_SYMBOL_PERIOD_NS 16000LL
/** @} */
/**
* @name IEEE 802.15.4-2020, Section 13: BPSK PHY
* @{
*/
/** BPSK 868MHz band symbol period, see section 13.3.3 */
#define IEEE802154_PHY_BPSK_868MHZ_SYMBOL_PERIOD_NS 50000LL
/** BPSK 915MHz band symbol period, see section 13.3.3 */
#define IEEE802154_PHY_BPSK_915MHZ_SYMBOL_PERIOD_NS 25000LL
/** @} */
/**
* @name IEEE 802.15.4-2020, Section 15: HRP UWB PHY
*
* @details For HRP UWB the symbol period is derived from the preamble symbol period
* (T_psym), see section 11.3, table 11-1 and section 15.2.5, table 15-4
* (confirmed in IEEE 802.15.4z, section 15.1). Choosing among those periods
* cannot be done based on channel page and channel alone. The mean pulse
* repetition frequency must also be known, see the 'UwbPrf' parameter of the
* MCPS-DATA.request primitive (section 8.3.2, table 8-88) and the preamble
* parameters for HRP-ERDEV length 91 codes (IEEE 802.15.4z, section 15.2.6.2,
* table 15-7b).
* @{
*/
/** Nominal PRF 4MHz symbol period */
#define IEEE802154_PHY_HRP_UWB_PRF4_TPSYM_SYMBOL_PERIOD_NS 3974.36F
/** Nominal PRF 16MHz symbol period */
#define IEEE802154_PHY_HRP_UWB_PRF16_TPSYM_SYMBOL_PERIOD_NS 993.59F
/** Nominal PRF 64MHz symbol period */
#define IEEE802154_PHY_HRP_UWB_PRF64_TPSYM_SYMBOL_PERIOD_NS 1017.63F
/** ERDEV symbol period */
#define IEEE802154_PHY_HRP_UWB_ERDEV_TPSYM_SYMBOL_PERIOD_NS 729.17F
/** @brief represents the nominal pulse rate frequency of an HRP UWB PHY */
enum ieee802154_phy_hrp_uwb_nominal_prf {
/** standard modes, see section 8.3.2, table 8-88. */
IEEE802154_PHY_HRP_UWB_PRF_OFF = 0,
IEEE802154_PHY_HRP_UWB_NOMINAL_4_M = BIT(0),
IEEE802154_PHY_HRP_UWB_NOMINAL_16_M = BIT(1),
IEEE802154_PHY_HRP_UWB_NOMINAL_64_M = BIT(2),
/**
* enhanced ranging device (ERDEV) modes not specified in table 8-88,
* see IEEE 802.15.4z, section 15.1, section 15.2.6.2, table 15-7b,
* section 15.3.4.2 and section 15.3.4.3.
*/
IEEE802154_PHY_HRP_UWB_NOMINAL_64_M_BPRF = BIT(3),
IEEE802154_PHY_HRP_UWB_NOMINAL_128_M_HPRF = BIT(4),
IEEE802154_PHY_HRP_UWB_NOMINAL_256_M_HPRF = BIT(5),
};
/** RDEV device mask */
#define IEEE802154_PHY_HRP_UWB_RDEV \
(IEEE802154_PHY_HRP_UWB_NOMINAL_4_M | IEEE802154_PHY_HRP_UWB_NOMINAL_16_M | \
IEEE802154_PHY_HRP_UWB_NOMINAL_64_M)
/** ERDEV device mask */
#define IEEE802154_PHY_HRP_UWB_ERDEV \
(IEEE802154_PHY_HRP_UWB_NOMINAL_64_M_BPRF | IEEE802154_PHY_HRP_UWB_NOMINAL_128_M_HPRF | \
IEEE802154_PHY_HRP_UWB_NOMINAL_256_M_HPRF)
/** @} */
/**
* @name IEEE 802.15.4-2020, Section 19: SUN FSK PHY
* @{
*/
/** SUN FSK 863Mhz and 915MHz band symbol periods, see section 19.1, table 19-1 */
#define IEEE802154_PHY_SUN_FSK_863MHZ_915MHZ_SYMBOL_PERIOD_NS 20000LL
/** SUN FSK PHY header length, in bytes, see section 19.2.4 */
#define IEEE802154_PHY_SUN_FSK_PHR_LEN 2
/** @} */
/**
* @name IEEE 802.15.4 Driver API
* @{
*/
/**
* IEEE 802.15.4 driver capabilities
*
* Any driver properties that can be represented in binary form should be
* modeled as capabilities. These are called "hardware" capabilities for
* historical reasons but may also represent driver firmware capabilities (e.g.
* MAC offloading features).
*/
enum ieee802154_hw_caps {
/*
* PHY capabilities
*
* The following capabilities describe features of the underlying radio
* hardware (PHY/L1).
*/
/** Energy detection (ED) supported (optional) */
IEEE802154_HW_ENERGY_SCAN = BIT(0),
/*
* MAC offloading capabilities (optional)
*
* The following MAC/L2 features may optionally be offloaded to
* specialized hardware or proprietary driver firmware ("hard MAC").
*
* L2 implementations will have to provide a "soft MAC" fallback for
* these features in case the driver does not support them natively.
*
* Note: Some of these offloading capabilities may be mandatory in
* practice to stay within timing requirements of certain IEEE 802.15.4
* protocols, e.g. CPUs may not be fast enough to send ACKs within the
* required delays in the 2.4 GHz band without hard MAC support.
*/
/** Frame checksum verification supported */
IEEE802154_HW_FCS = BIT(1),
/** Filtering of PAN ID, extended and short address supported */
IEEE802154_HW_FILTER = BIT(2),
/** Promiscuous mode supported */
IEEE802154_HW_PROMISC = BIT(3),
/** CSMA-CA procedure supported on TX */
IEEE802154_HW_CSMA = BIT(4),
/** Waits for ACK on TX if AR bit is set in TX pkt */
IEEE802154_HW_TX_RX_ACK = BIT(5),
/** Supports retransmission on TX ACK timeout */
IEEE802154_HW_RETRANSMISSION = BIT(6),
/** Sends ACK on RX if AR bit is set in RX pkt */
IEEE802154_HW_RX_TX_ACK = BIT(7),
/** TX at specified time supported */
IEEE802154_HW_TXTIME = BIT(8),
/** TX directly from sleep supported
*
* @note This HW capability does not conform to the requirements
* specified in #61227 as it closely couples the driver to OpenThread's
* capability and device model which is different from Zephyr's:
* - "Sleeping" is a well defined term in Zephyr related to internal
* power and thread management and different from "RX off" as
* defined in OT.
* - Currently all OT-capable drivers have the "sleep to TX"
* capability anyway plus we expect future drivers to implement it
* ootb as well, so no information is actually conveyed by this
* capability.
* - The `start()`/`stop()` API of a net device controls the
* interface's operational state. Drivers MUST respond with
* -ENETDOWN when calling `tx()` while their operational state is
* "DOWN", only devices in the "UP" state MAY transmit packets (RFC
* 2863).
* - A migration path has been defined in #63670 for actual removal of
* this capability in favor of a standard compliant
* `configure(rx_on/rx_off)` call, see there for details.
*
* @deprecated Drivers and L2 SHALL not introduce additional references
* to this capability and remove existing ones as outlined in #63670.
*/
IEEE802154_HW_SLEEP_TO_TX = BIT(9),
/** Timed RX window scheduling supported */
IEEE802154_HW_RXTIME = BIT(10),
/** TX security supported (key management, encryption and authentication) */
IEEE802154_HW_TX_SEC = BIT(11),
/** RxOnWhenIdle handling supported */
IEEE802154_RX_ON_WHEN_IDLE = BIT(12),
/* Note: Update also IEEE802154_HW_CAPS_BITS_COMMON_COUNT when changing
* the ieee802154_hw_caps type.
*/
};
/** @brief Number of bits used by ieee802154_hw_caps type. */
#define IEEE802154_HW_CAPS_BITS_COMMON_COUNT (13)
/** @brief This and higher values are specific to the protocol- or driver-specific extensions. */
#define IEEE802154_HW_CAPS_BITS_PRIV_START IEEE802154_HW_CAPS_BITS_COMMON_COUNT
/** Filter type, see @ref ieee802154_radio_api::filter */
enum ieee802154_filter_type {
IEEE802154_FILTER_TYPE_IEEE_ADDR, /**< Address type filter */
IEEE802154_FILTER_TYPE_SHORT_ADDR, /**< Short address type filter */
IEEE802154_FILTER_TYPE_PAN_ID, /**< PAN id type filter */
IEEE802154_FILTER_TYPE_SRC_IEEE_ADDR, /**< Source address type filter */
IEEE802154_FILTER_TYPE_SRC_SHORT_ADDR, /**< Source short address type filter */
};
/** Driver events, see @ref IEEE802154_CONFIG_EVENT_HANDLER */
enum ieee802154_event {
/** Data transmission started */
IEEE802154_EVENT_TX_STARTED,
/** Data reception failed */
IEEE802154_EVENT_RX_FAILED,
/**
* An RX slot ended, requires @ref IEEE802154_HW_RXTIME.
*
* @note This event SHALL not be triggered by drivers when RX is
* synchronously switched of due to a call to `stop()` or an RX slot
* being configured.
*/
IEEE802154_EVENT_RX_OFF,
};
/** RX failed event reasons, see @ref IEEE802154_EVENT_RX_FAILED */
enum ieee802154_rx_fail_reason {
/** Nothing received */
IEEE802154_RX_FAIL_NOT_RECEIVED,
/** Frame had invalid checksum */
IEEE802154_RX_FAIL_INVALID_FCS,
/** Address did not match */
IEEE802154_RX_FAIL_ADDR_FILTERED,
/** General reason */
IEEE802154_RX_FAIL_OTHER
};
/** Energy scan callback */
typedef void (*energy_scan_done_cb_t)(const struct device *dev,
int16_t max_ed);
/** Driver event callback */
typedef void (*ieee802154_event_cb_t)(const struct device *dev,
enum ieee802154_event evt,
void *event_params);
/** Filter value, see @ref ieee802154_radio_api::filter */
struct ieee802154_filter {
union {
/** Extended address, in little endian */
uint8_t *ieee_addr;
/** Short address, in CPU byte order */
uint16_t short_addr;
/** PAN ID, in CPU byte order */
uint16_t pan_id;
};
};
/**
* Key configuration for transmit security offloading, see @ref
* IEEE802154_CONFIG_MAC_KEYS.
*/
struct ieee802154_key {
/** Key material */
uint8_t *key_value;
/** Initial value of frame counter associated with the key, see section 9.4.3 */
uint32_t key_frame_counter;
/** Indicates if per-key frame counter should be used, see section 9.4.3 */
bool frame_counter_per_key;
/** Key Identifier Mode, see section 9.4.2.3, Table 9-7 */
uint8_t key_id_mode;
/** Key Identifier, see section 9.4.4 */
uint8_t *key_id;
};
/** IEEE 802.15.4 Transmission mode. */
enum ieee802154_tx_mode {
/** Transmit packet immediately, no CCA. */
IEEE802154_TX_MODE_DIRECT,
/** Perform CCA before packet transmission. */
IEEE802154_TX_MODE_CCA,
/**
* Perform full CSMA/CA procedure before packet transmission.
*
* @note requires IEEE802154_HW_CSMA capability.
*/
IEEE802154_TX_MODE_CSMA_CA,
/**
* Transmit packet in the future, at the specified time, no CCA.
*
* @note requires IEEE802154_HW_TXTIME capability.
*/
IEEE802154_TX_MODE_TXTIME,
/**
* Transmit packet in the future, perform CCA before transmission.
*
* @note requires IEEE802154_HW_TXTIME capability.
*
* @note Required for Thread 1.2 Coordinated Sampled Listening feature
* (see Thread specification 1.2.0, ch. 3.2.6.3).
*/
IEEE802154_TX_MODE_TXTIME_CCA,
/** Number of modes defined in ieee802154_tx_mode. */
IEEE802154_TX_MODE_COMMON_COUNT,
/** This and higher values are specific to the protocol- or driver-specific extensions. */
IEEE802154_TX_MODE_PRIV_START = IEEE802154_TX_MODE_COMMON_COUNT,
};
/** IEEE 802.15.4 Frame Pending Bit table address matching mode. */
enum ieee802154_fpb_mode {
/** The pending bit shall be set only for addresses found in the list. */
IEEE802154_FPB_ADDR_MATCH_THREAD,
/** The pending bit shall be cleared for short addresses found in the
* list.
*/
IEEE802154_FPB_ADDR_MATCH_ZIGBEE,
};
/** IEEE 802.15.4 driver configuration types. */
enum ieee802154_config_type {
/**
* Indicates how the driver should set the Frame Pending bit in ACK
* responses for Data Requests. If enabled, the driver should determine
* whether to set the bit or not based on the information provided with
* @ref IEEE802154_CONFIG_ACK_FPB config and FPB address matching mode
* specified. Otherwise, Frame Pending bit should be set to ``1`` (see
* section 6.7.3).
*
* @note requires @ref IEEE802154_HW_TX_RX_ACK capability and is
* available in any interface operational state.
*/
IEEE802154_CONFIG_AUTO_ACK_FPB,
/**
* Indicates whether to set ACK Frame Pending bit for specific address
* or not. Disabling the Frame Pending bit with no address provided
* (NULL pointer) should disable it for all enabled addresses.
*
* @note requires @ref IEEE802154_HW_TX_RX_ACK capability and is
* available in any interface operational state.
*/
IEEE802154_CONFIG_ACK_FPB,
/**
* Indicates whether the device is a PAN coordinator. This influences
* packet filtering.
*
* @note Available in any interface operational state.
*/
IEEE802154_CONFIG_PAN_COORDINATOR,
/**
* Enable/disable promiscuous mode.
*
* @note Available in any interface operational state.
*/
IEEE802154_CONFIG_PROMISCUOUS,
/**
* Specifies new IEEE 802.15.4 driver event handler. Specifying NULL as
* a handler will disable events notification.
*
* @note Available in any interface operational state.
*/
IEEE802154_CONFIG_EVENT_HANDLER,
/**
* Updates MAC keys, key index and the per-key frame counter for drivers
* supporting transmit security offloading, see section 9.5, tables 9-9
* and 9-10. The key configuration SHALL NOT be accepted if the frame
* counter (in case frame counter per key is true) is not strictly
* larger than the current frame counter associated with the same key,
* see sections 8.2.2, 9.2.4 g/h) and 9.4.3.
*
* @note Requires @ref IEEE802154_HW_TX_SEC capability and is available
* in any interface operational state.
*/
IEEE802154_CONFIG_MAC_KEYS,
/**
* Sets the current MAC frame counter value associated with the
* interface for drivers supporting transmit security offloading, see
* section 9.5, table 9-8, secFrameCounter.
*
* @warning The frame counter MUST NOT be accepted if it is not
* strictly greater than the current frame counter associated with the
* interface, see sections 8.2.2, 9.2.4 g/h) and 9.4.3. Otherwise the
* replay protection provided by the frame counter may be compromised.
* Drivers SHALL return -EINVAL in case the configured frame counter
* does not conform to this requirement.
*
* @note Requires @ref IEEE802154_HW_TX_SEC capability and is available
* in any interface operational state.
*/
IEEE802154_CONFIG_FRAME_COUNTER,
/**
* Sets the current MAC frame counter value if the provided value is greater than
* the current one.
*
* @note Requires @ref IEEE802154_HW_TX_SEC capability and is available
* in any interface operational state.
*
* @warning This configuration option does not conform to the
* requirements specified in #61227 as it is redundant with @ref
* IEEE802154_CONFIG_FRAME_COUNTER, and will therefore be deprecated in
* the future.
*/
IEEE802154_CONFIG_FRAME_COUNTER_IF_LARGER,
/**
* Set or unset a radio reception window (RX slot). This can be used for
* any scheduled reception, e.g.: Zigbee GP device, CSL, TSCH, etc.
*
* @details The start and duration parameters of the RX slot are
* relative to the network subsystem's local clock. If the start
* parameter of the RX slot is -1 then any previously configured RX
* slot SHALL be canceled immediately. If the start parameter is any
* value in the past (including 0) or the duration parameter is zero
* then the receiver SHALL remain off forever until the RX slot has
* either been removed or re-configured to point to a future start
* time. If an RX slot is configured while the previous RX slot is
* still scheduled, then the previous slot SHALL be cancelled and the
* new slot scheduled instead.
*
* RX slots MAY be programmed while the driver is "DOWN". If any past
* or future RX slot is configured when calling `start()` then the
* interface SHALL be placed in "UP" state but the receiver SHALL not
* be started.
*
* The driver SHALL take care to start/stop the receiver autonomously,
* asynchronously and automatically around the RX slot. The driver
* SHALL resume power just before the RX slot and suspend it again
* after the slot unless another programmed event forces the driver not
* to suspend. The driver SHALL switch to the programmed channel
* before the RX slot and back to the channel set with set_channel()
* after the RX slot. If the driver interface is "DOWN" when the start
* time of an RX slot arrives, then the RX slot SHALL not be observed
* and the receiver SHALL remain off.
*
* If the driver is "UP" while configuring an RX slot, the driver SHALL
* turn off the receiver immediately and (possibly asynchronously) put
* the driver into the lowest possible power saving mode until the
* start of the RX slot. If the driver is "UP" while the RX slot is
* deleted, then the driver SHALL enable the receiver immediately. The
* receiver MUST be ready to receive packets before returning from the
* `configure()` operation in this case.
*
* This behavior means that setting an RX slot implicitly sets the MAC
* PIB attribute macRxOnWhenIdle (see section 8.4.3.1, table 8-94) to
* "false" while deleting the RX slot implicitly sets macRxOnWhenIdle to
* "true".
*
* @note requires @ref IEEE802154_HW_RXTIME capability and is available
* in any interface operational state.
*
* @note Required for Thread 1.2 Coordinated Sampled Listening feature
* (see Thread specification 1.2.0, ch. 3.2.6.3).
*/
IEEE802154_CONFIG_RX_SLOT,
/**
* Enables or disables a device as a CSL receiver and configures its CSL
* period.
*
* @details Configures the CSL period in units of 10 symbol periods.
* Values greater than zero enable CSL if the driver supports it and the
* device starts to operate as a CSL receiver. Setting this to zero
* disables CSL on the device. If the driver does not support CSL, the
* configuration call SHALL return -ENOTSUP.
*
* See section 7.4.2.3 and section 8.4.3.6, table 8-104, macCslPeriod.
*
* @note Confusingly the standard calls the CSL receiver "CSL
* coordinator" (i.e. "coordinating the CSL protocol timing", see
* section 6.12.2.2), although, typically, a CSL coordinator is NOT also
* an IEEE 802.15.4 FFD coordinator or PAN coordintor but a simple RFD
* end device (compare the device roles outlined in sections 5.1, 5.3,
* 5.5 and 6.1). To avoid confusion we therefore prefer calling CSL
* coordinators (typically an RFD end device) "CSL receivers" and CSL
* peer devices (typically FFD coordinators or PAN coordinators) "CSL
* transmitters". Also note that at this time, we do NOT support
* unsynchronized transmission with CSL wake up frames as specified in
* section 6.12.2.4.4.
*
* To offload CSL receiver timing to the driver the upper layer SHALL
* combine several configuration options in the following way:
*
* 1. Use @ref IEEE802154_CONFIG_ENH_ACK_HEADER_IE once with an
* appropriate pre-filled CSL IE and the CSL phase set to an
* arbitrary value or left uninitialized. The CSL phase SHALL be
* injected on-the-fly by the driver at runtime as outlined in 2.
* below. Adding a short and extended address will inform the driver
* of the specific CSL receiver to which it SHALL inject CSL IEs. If
* no addresses are given then the CSL IE will be injected into all
* enhanced ACK frames as soon as CSL is enabled. This configuration
* SHALL be done before enabling CSL by setting a CSL period greater
* than zero.
*
* 2. Configure @ref IEEE802154_CONFIG_EXPECTED_RX_TIME immediately
* followed by @ref IEEE802154_CONFIG_CSL_PERIOD. To prevent race
* conditions, the upper layer SHALL ensure that the receiver is not
* enabled during or between the two calls (e.g. by a previously
* configured RX slot) nor SHALL a frame be transmitted concurrently.
*
* The expected RX time SHALL point to the end of SFD of an ideally
* timed RX frame in an arbitrary past or future CSL channel sample,
* i.e. whose "end of SFD" arrives exactly at the locally predicted
* time inside the CSL channel sample.
*
* The driver SHALL derive CSL anchor points and the CSL phase from
* the given expected RX time as follows:
*
* cslAnchorPointNs = last expected RX time
* + PHY-specific PHR duration in ns
*
* startOfMhrNs = start of MHR of the frame containing the
* CSL IE relative to the local network clock
*
* cslPhase = (startOfMhrNs - cslAnchorPointNs)
* / (10 * PHY specific symbol period in ns)
* % cslPeriod
*
* The driver SHALL set the CSL phase in the IE configured in 1. and
* inject that IE on-the-fly into outgoing enhanced ACK frames if the
* destination address conforms to the IE's address filter.
*
* 3. Use @ref IEEE802154_CONFIG_RX_SLOT periodically to schedule
* each CSL channel sample early enough before its start time. The
* size of the CSL channel sample SHALL take relative clock drift and
* scheduling uncertainties with respect to CSL transmitters into
* account as specified by the standard such that at least the full
* SHR of a legitimate RX frame is guaranteed to land inside the
* channel sample.
*
* To this avail, the last configured expected RX time plus an
* integer number of CSL periods SHALL point to a fixed offset of the
* RX slot (not necessarily its center):
*
* expectedRxTimeNs_N = last expected RX time
* + N * (cslPeriod * 10 * PHY-specific symbol period in ns)
*
* expectedRxTimeNs_N - rxSlot_N.start == const for all N
*
* While the configured CSL period is greater than zero, drivers
* SHOULD validate the offset of the expected RX time inside each RX
* slot accordingly. If the driver finds that the offset varies from
* slot to slot, drivers SHOULD log the difference but SHALL
* nevertheless accept and schedule the RX slot with a zero success
* value to work around minor implementation or rounding errors in
* upper layers.
*
* Configure and start a CSL receiver:
*
* ENH_ACK_HEADER_IE
* |
* | EXPECTED_RX_TIME (end of SFD of a perfectly timed RX frame
* | | in any past or future channel sample)
* | |
* | | CSL_PERIOD (>0) RX_SLOT
* | | | |
* v v v v
* -----------------------------------------------[-CSL channel sample ]----+
* ^ |
* | |
* +--------------------- loop ---------+
*
* Disable CSL on the receiver:
*
* CSL_PERIOD (=0)
* |
* v
* ---------------------
*
* Update the CSL period to a new value:
*
* EXPECTED_RX_TIME (based on updated period)
* |
* | CSL_PERIOD (>0, updated) RX_SLOT
* | | |
* v v v
* -----------------------------------------------[-CSL channel sample ]----+
* ^ |
* | |
* +--------------------- loop ---------+
*
* @note Available in any interface operational state.
*
* @note Required for Thread 1.2 Coordinated Sampled Listening feature
* (see Thread specification 1.2.0, ch. 3.2.6.3).
*/
IEEE802154_CONFIG_CSL_PERIOD,
/**
* Configure a timepoint at which an RX frame is expected to arrive.
*
* @details Configure the nanosecond resolution timepoint relative to
* the network subsystem's local clock at which an RX frame's end of SFD
* (i.e. equivalently its end of SHR, start of PHR, or in the case of
* PHYs with RDEV or ERDEV capability the RMARKER) is expected to arrive
* at the local antenna assuming perfectly synchronized local and remote
* network clocks and zero distance between antennas.
*
* This parameter MAY be used to offload parts of timing sensitive TDMA
* (e.g. TSCH, beacon-enabled PAN including DSME), low-energy (e.g.
* CSL, RIT) or ranging (TDoA) protocols to the driver. In these
* protocols, medium access is tightly controlled such that the expected
* arrival time of a frame can be predicted within a well-defined time
* window. This feature will typically be combined with @ref
* IEEE802154_CONFIG_RX_SLOT although this is not a hard requirement.
*
* The "expected RX time" MAY be interpreted slightly differently
* depending on the protocol context:
* - CSL phase (i.e. time to the next expected CSL transmission) or anchor
* time (i.e. any arbitrary timepoint with "zero CSL phase") SHALL be
* derived by adding the PHY header duration to the expected RX time
* to calculate the "start of MHR" ("first symbol of MAC", see section
* 6.12.2.1) required by the CSL protocol, compare @ref
* IEEE802154_CONFIG_CSL_PERIOD.
* - In TSCH the expected RX time MAY be set to macTsRxOffset +
* macTsRxWait / 2. Then the time correction SHALL be calculated as
* the expected RX time minus actual arrival timestamp, see section
* 6.5.4.3.
* - In ranging applications, time difference of arrival (TDOA) MAY be
* calculated inside the driver comparing actual RMARKER timestamps
* against the assumed synchronized time at which the ranging frame
* was sent, see IEEE 802.15.4z.
*
* In case of periodic protocols (e.g. CSL channel samples, periodic
* beacons of a single PAN, periodic ranging "blinks"), a single
* timestamp at any time in the past or in the future may be given from
* which other expected timestamps can be derived by adding or
* subtracting multiples of the RX period. See e.g. the CSL
* documentation in this API.
*
* Additionally this parameter MAY be used by drivers to discipline
* their local representation of a distributed network clock by deriving
* synchronization instants related to a remote representation of the
* same clock (as in PTP).
*
* @note Available in any interface operational state.
*
* @note Required for Thread 1.2 Coordinated Sampled Listening feature
* (see Thread specification 1.2.0, ch. 3.2.6.3).
*/
IEEE802154_CONFIG_EXPECTED_RX_TIME,
/**
* Adds a header information element (IE) to be injected into enhanced
* ACK frames generated by the driver if the given destination address
* filter matches.
*
* @details Drivers implementing the @ref IEEE802154_HW_RX_TX_ACK
* capability generate ACK frames autonomously. Setting this
* configuration will ask the driver to inject the given preconfigured
* header IE when generating enhanced ACK frames where appropriate by
* the standard. IEs for all other frame types SHALL be provided by L2.
*
* The driver shall return -ENOTSUP in the following cases:
* - It does not support the @ref IEEE802154_HW_RX_TX_ACK,
* - It does not support header IE injection,
* - It cannot inject the runtime fields on-the-fly required for the
* given IE element ID (see list below).
*
* Enhanced ACK header IEs (element IDs in parentheses) that either
* need to be rejected or explicitly supported and parsed by the driver
* because they require on-the-fly timing information injection are:
* - CSL IE (0x1a)
* - Rendezvous Time IE (0x1d)
* - Time Correction IE (0x1e)
*
* Drivers accepting this configuration option SHALL check the list of
* configured IEs for each outgoing enhanced ACK frame, select the ones
* appropriate for the received frame based on their element ID, inject
* any required runtime information on-the-fly and include the selected
* IEs into the enhanced ACK frame's MAC header.
*
* Drivers supporting enhanced ACK header IE injection SHALL
* autonomously inject header termination IEs as required by the
* standard.
*
* A destination short address and extended address MAY be given by L2
* to filter the devices to which the given IE is included. Setting the
* short address to the broadcast address and the extended address to
* NULL will inject the given IE into all ACK frames unless a more
* specific filter is also present for any given destination device
* (fallback configuration). L2 SHALL take care to either set both
* address fields to valid device addresses or none.
*
* This configuration type may be called several times with distinct
* element IDs and/or addresses. The driver SHALL either store all
* configured IE/address combinations or return -ENOMEM if no
* additional configuration can be stored.
*
* Configuring a header IE with a previously configured element ID and
* address filter SHALL override the previous configuration. This
* implies that repetition of the same header IE/address combination is
* NOT supported.
*
* Configuring an existing element ID/address filter combination with
* the header IE's length field set to zero SHALL remove that
* configuration. SHALL remove the fallback configuration if no address
* is given.
*
* Configuring a header IE for an address filter with the header IE
* pointer set to NULL SHALL remove all header IE's for that address
* filter. SHALL remove ALL header IE configuration (including but not
* limited to fallbacks) if no address is given.
*
* If any of the deleted configurations didn't previously exist, then
* the call SHALL be ignored. Whenever the length field is set to zero,
* the content fields MUST NOT be accessed by the driver.
*
* L2 SHALL minimize the space required to keep IE configuration inside
* the driver by consolidating address filters and by removing
* configuration that is no longer required.
*
* @note requires @ref IEEE802154_HW_RX_TX_ACK capability and is
* available in any interface operational state. Currently we only
* support header IEs but that may change in the future.
*
* @note Required for Thread 1.2 Coordinated Sampled Listening feature
* (see Thread specification 1.2.0, ch. 3.2.6.3).
*
* @note Required for Thread 1.2 Link Metrics feature (see Thread
* specification 1.2.0, ch. 4.11.3.3).
*/
IEEE802154_CONFIG_ENH_ACK_HEADER_IE,
/**
* Enable/disable RxOnWhenIdle MAC PIB attribute (Table 8-94).
*
* Since there is no clear guidance in IEEE 802.15.4 specification about the definition of
* an "idle period", this implementation expects that drivers use the RxOnWhenIdle attribute
* to determine next radio state (false --> off, true --> receive) in the following
* scenarios:
* - Finalization of a regular frame reception task, provided that:
* - The frame is received without errors and passes the filtering and it's not an
* spurious ACK.
* - ACK is not requested or transmission of ACK is not possible due to internal
* conditions.
* - Finalization of a frame transmission or transmission of an ACK frame, when ACK is not
* requested in the transmitted frame.
* - Finalization of the reception operation of a requested ACK due to:
* - ACK timeout expiration.
* - Reception of an invalid ACK or not an ACK frame.
* - Reception of the proper ACK, unless the transmitted frame was a Data Request Command
* and the frame pending bit on the received ACK is set to true. In this case the radio
* platform implementation SHOULD keep the receiver on until a determined timeout which
* triggers an idle period start.
* - Finalization of a stand alone CCA task.
* - Finalization of a CCA operation with busy result during CSMA/CA procedure.
* - Finalization of an Energy Detection task.
* - Finalization of a scheduled radio reception window
* (see @ref IEEE802154_CONFIG_RX_SLOT).
*/
IEEE802154_CONFIG_RX_ON_WHEN_IDLE,
/** Number of types defined in ieee802154_config_type. */
IEEE802154_CONFIG_COMMON_COUNT,
/** This and higher values are specific to the protocol- or driver-specific extensions. */
IEEE802154_CONFIG_PRIV_START = IEEE802154_CONFIG_COMMON_COUNT,
};
/**
* Configuring an RX slot with the start parameter set to this value will cancel
* and delete any previously configured RX slot.
*/
#define IEEE802154_CONFIG_RX_SLOT_NONE -1LL
/**
* Configuring an RX slot with this start parameter while the driver is "down",
* will keep RX off when the driver is being started. Configuring an RX slot
* with this start value while the driver is "up" will immediately switch RX off
* until either the slot is deleted, see @ref IEEE802154_CONFIG_RX_SLOT_NONE or
* a slot with a future start parameter is configured and that start time
* arrives.
*/
#define IEEE802154_CONFIG_RX_SLOT_OFF 0LL
/** IEEE 802.15.4 driver configuration data. */
struct ieee802154_config {
/** Configuration data. */
union {
/** see @ref IEEE802154_CONFIG_AUTO_ACK_FPB */
struct {
bool enabled; /**< Is auto ACK FPB enabled */
enum ieee802154_fpb_mode mode; /**< Auto ACK FPB mode */
} auto_ack_fpb;
/** see @ref IEEE802154_CONFIG_ACK_FPB */
struct {
uint8_t *addr; /**< little endian for both short and extended address */
bool extended; /**< Is extended address */
bool enabled; /**< Is enabled */
} ack_fpb;
/** see @ref IEEE802154_CONFIG_PAN_COORDINATOR */
bool pan_coordinator;
/** see @ref IEEE802154_CONFIG_PROMISCUOUS */
bool promiscuous;
/** see @ref IEEE802154_CONFIG_RX_ON_WHEN_IDLE */
bool rx_on_when_idle;
/** see @ref IEEE802154_CONFIG_EVENT_HANDLER */
ieee802154_event_cb_t event_handler;
/**
* @brief see @ref IEEE802154_CONFIG_MAC_KEYS
*
* @details Pointer to an array containing a list of keys used
* for MAC encryption. Refer to secKeyIdLookupDescriptor and
* secKeyDescriptor in IEEE 802.15.4
*
* The key_value field points to a buffer containing the 16 byte
* key. The buffer SHALL be copied by the driver before
* returning from the call.
*
* The variable length array is terminated by key_value field
* set to NULL.
*/
struct ieee802154_key *mac_keys;
/** see @ref IEEE802154_CONFIG_FRAME_COUNTER */
uint32_t frame_counter;
/** see @ref IEEE802154_CONFIG_RX_SLOT */
struct {
/**
* Nanosecond resolution timestamp relative to the
* network subsystem's local clock defining the start of
* the RX window during which the receiver is expected
* to be listening (i.e. not including any driver
* startup times).
*
* Configuring an rx_slot with the start attribute set
* to -1 will cancel and delete any previously active rx
* slot.
*/
net_time_t start;
/**
* Nanosecond resolution duration of the RX window
* relative to the above RX window start time during
* which the receiver is expected to be listening (i.e.
* not including any shutdown times). Only positive
* values larger than or equal zero are allowed.
*
* Setting the duration to zero will disable the
* receiver, no matter what the start parameter.
*/
net_time_t duration;
/**
* Used channel
*/
uint8_t channel;
} rx_slot;
/**
* see @ref IEEE802154_CONFIG_CSL_PERIOD
*
* in CPU byte order
*/
uint32_t csl_period;
/**
* see @ref IEEE802154_CONFIG_EXPECTED_RX_TIME
*/
net_time_t expected_rx_time;
/** see @ref IEEE802154_CONFIG_ENH_ACK_HEADER_IE */
struct {
/**
* Pointer to the header IE, see section 7.4.2.1,
* figure 7-21
*
* Certain header IEs may be incomplete if they require
* timing information to be injected at runtime
* on-the-fly, see the list in @ref
* IEEE802154_CONFIG_ENH_ACK_HEADER_IE.
*/
struct ieee802154_header_ie *header_ie;
/**
* Filters the devices that will receive this IE by
* extended address. MAY be set to NULL to configure a
* fallback for all devices (implies that short_addr
* MUST also be set to @ref
* IEEE802154_BROADCAST_ADDRESS).
*
* in big endian
*/
const uint8_t *ext_addr;
/**
* Filters the devices that will receive this IE by
* short address. MAY be set to @ref
* IEEE802154_BROADCAST_ADDRESS to configure a fallback
* for all devices (implies that ext_addr MUST also set
* to NULL in this case).
*
* in CPU byte order
*/
uint16_t short_addr;
/**
* Flag for purging enh ACK header IEs.
* When flag is set to true, driver should remove all existing
* header IEs, and all other entries in config should be ignored.
* This means that purging current header IEs and
* configuring a new one in the same call is not allowed.
*/
bool purge_ie;
} ack_ie;
};
};
/**
* @brief IEEE 802.15.4 driver attributes.
*
* See @ref ieee802154_attr_value and @ref ieee802154_radio_api for usage
* details.
*/
enum ieee802154_attr {
/**
* Retrieves a bit field with supported channel pages. This attribute
* SHALL be implemented by all drivers.
*/
IEEE802154_ATTR_PHY_SUPPORTED_CHANNEL_PAGES,
/**
* Retrieves a pointer to the array of supported channel ranges within
* the currently configured channel page. This attribute SHALL be
* implemented by all drivers.
*/
IEEE802154_ATTR_PHY_SUPPORTED_CHANNEL_RANGES,
/**
* Retrieves a bit field with supported HRP UWB nominal pulse repetition
* frequencies. This attribute SHALL be implemented by all devices that
* support channel page four (HRP UWB).
*/
IEEE802154_ATTR_PHY_HRP_UWB_SUPPORTED_PRFS,
/** Number of attributes defined in ieee802154_attr. */
IEEE802154_ATTR_COMMON_COUNT,
/** This and higher values are specific to the protocol- or
* driver-specific extensions.
*/
IEEE802154_ATTR_PRIV_START = IEEE802154_ATTR_COMMON_COUNT,
};
/**
* @brief IEEE 802.15.4 driver attribute values.
*
* @details This structure is reserved to scalar and structured attributes that
* originate in the driver implementation and can neither be implemented as
* boolean @ref ieee802154_hw_caps nor be derived directly or indirectly by the
* MAC (L2) layer. In particular this structure MUST NOT be used to return
* configuration data that originate from L2.
*
* @note To keep this union reasonably small, any attribute requiring a large
* memory area, SHALL be provided pointing to static memory allocated by the
* driver and valid throughout the lifetime of the driver instance.
*/
struct ieee802154_attr_value {
union {
/* TODO: Implement configuration of phyCurrentPage once drivers
* need to support channel page switching at runtime.
*/
/**
* @brief A bit field that represents the supported channel
* pages, see @ref ieee802154_phy_channel_page.
*
* @note To keep the API extensible as required by the standard,
* supported pages are modeled as a bitmap to support drivers
* that implement runtime switching between multiple channel
* pages.
*
* @note Currently none of the Zephyr drivers implements more
* than one channel page at runtime, therefore only one bit will
* be set and the current channel page (see the PHY PIB
* attribute phyCurrentPage, section 11.3, table 11-2) is
* considered to be read-only, fixed and "well known" via the
* supported channel pages attribute.
*/
uint32_t phy_supported_channel_pages;
/**
* @brief Pointer to a structure representing channel ranges
* currently available on the selected channel page.
*
* @warning The pointer must be valid and constant throughout
* the life of the interface.
*
* @details The selected channel page corresponds to the
* phyCurrentPage PHY PIB attribute, see the description of
* phy_supported_channel_pages above. Currently it can be
* retrieved via the @ref
* IEEE802154_ATTR_PHY_SUPPORTED_CHANNEL_PAGES attribute.
*
* Most drivers will expose a single channel page with a single,
* often zero-based, fixed channel range.
*
* Some notable exceptions:
* * The legacy channel page (zero) exposes ranges in different
* bands and even PHYs that are usually not implemented by a
* single driver.
* * SUN and LECIM PHYs specify a large number of bands and
* operating modes on a single page with overlapping channel
* ranges each. Some of these ranges are not zero-based or
* contain "holes". This explains why several ranges may be
* necessary to represent all available channels.
* * UWB PHYs often support partial channel ranges on the same
* channel page depending on the supported bands.
*
* In these cases, drivers may expose custom configuration
* attributes (Kconfig, devicetree, runtime, ...) that allow
* switching between sub-ranges within the same channel page
* (e.g. switching between SubG and 2.4G bands on channel page
* zero or switching between multiple operating modes in the SUN
* or LECIM PHYs.
*/
const struct ieee802154_phy_supported_channels *phy_supported_channels;
/* TODO: Allow the PRF to be configured for each TX call once
* drivers need to support PRF switching at runtime.
*/
/**
* @brief A bit field representing supported HRP UWB pulse
* repetition frequencies (PRF), see enum
* ieee802154_phy_hrp_uwb_nominal_prf.
*
* @note Currently none of the Zephyr HRP UWB drivers implements
* more than one nominal PRF at runtime, therefore only one bit
* will be set and the current PRF (UwbPrf, MCPS-DATA.request,
* section 8.3.2, table 8-88) is considered to be read-only,
* fixed and "well known" via the supported PRF attribute.
*/
uint32_t phy_hrp_uwb_supported_nominal_prfs;
};
};
/**
* @brief Helper function to handle channel page and range to be called from
* drivers' attr_get() implementation. This only applies to drivers with a
* single channel page.
*
* @param attr The attribute to be retrieved.
* @param phy_supported_channel_page The driver's unique channel page.
* @param phy_supported_channels Pointer to the structure that contains the
* driver's channel range or ranges.
* @param value The pointer to the value struct provided by the user.
*
* @retval 0 if the attribute could be resolved
* @retval -ENOENT if the attribute could not be resolved
*/
static inline int ieee802154_attr_get_channel_page_and_range(
enum ieee802154_attr attr,
const enum ieee802154_phy_channel_page phy_supported_channel_page,
const struct ieee802154_phy_supported_channels *phy_supported_channels,
struct ieee802154_attr_value *value)
{
switch (attr) {
case IEEE802154_ATTR_PHY_SUPPORTED_CHANNEL_PAGES:
value->phy_supported_channel_pages = phy_supported_channel_page;
return 0;
case IEEE802154_ATTR_PHY_SUPPORTED_CHANNEL_RANGES:
value->phy_supported_channels = phy_supported_channels;
return 0;
default:
return -ENOENT;
}
}
/**
* @brief IEEE 802.15.4 driver interface API.
*
* @note This structure is called "radio" API for backwards compatibility. A
* better name would be "IEEE 802.15.4 driver API" as typical drivers will not
* only implement L1/radio (PHY) features but also L2 (MAC) features if the
* vendor-specific driver hardware or firmware offers offloading opportunities.
*
* @details While L1-level driver features are exclusively implemented by
* drivers and MAY be mandatory to support certain application requirements, L2
* features SHOULD be optional by default and only need to be implemented for
* performance optimization or precise timing as deemed necessary by driver
* maintainers. Fallback implementations ("Soft MAC") SHOULD be provided in the
* driver-independent L2 layer for all L2/MAC features especially if these
* features are not implemented in vendor hardware/firmware by a majority of
* existing in-tree drivers. If, however, a driver offers offloading
* opportunities then L2 implementations SHALL delegate performance critical or
* resource intensive tasks to the driver.
*
* All drivers SHALL support two externally observable interface operational
* states: "UP" and "DOWN". Drivers MAY additionally support a "TESTING"
* interface state (see `continuous_carrier()`).
*
* The following rules apply:
* * An interface is considered "UP" when it is able to transmit and receive
* packets, "DOWN" otherwise (see precise definitions of the corresponding
* ifOperStatus values in RFC 2863, section 3.1.14, @ref net_if_oper_state and
* the `continuous_carrier()` exception below). A device that has its receiver
* temporarily disabled during "UP" state due to an active receive window
* configuration is still considered "UP".
* * Upper layers will assume that the interface managed by the driver is "UP"
* after a call to `start()` returned zero or `-EALREADY`. Upper layers assume
* that the interface is "DOWN" after calling `stop()` returned zero or
* `-EALREADY`.
* * The driver SHALL block `start()`/`stop()` calls until the interface fully
* transitioned to the new state (e.g. the receiver is operational, ongoing
* transmissions were finished, etc.). Drivers SHOULD yield the calling thread
* (i.e. "sleep") if waiting for the new state without CPU interaction is
* possible.
* * Drivers are responsible of guaranteeing atomicity of state changes.
* Appropriate means of synchronization SHALL be implemented (locking, atomic
* flags, ...).
* * While the interface is "DOWN", the driver SHALL be placed in the lowest
* possible power state. The driver MAY return from a call to `stop()` before
* it reaches the lowest possible power state, i.e. manage power
* asynchronously. While the interface is "UP", the driver SHOULD
* autonomously and asynchronously transition to lower power states whenever
* possible. If the driver claims to support timed RX/TX capabilities and the
* upper layers configure an RX slot, then the driver SHALL immediately
* transition (asynchronously) to the lowest possible power state until the
* start of the RX slot or until a scheduled packet needs to be transmitted.
* * The driver SHALL NOT change the interface's "UP"/"DOWN" state on its own.
* Initially, the interface SHALL be in the "DOWN" state.
* * Drivers that implement the optional `continuous_carrier()` operation will
* be considered to be in the RFC 2863 "testing" ifOperStatus state if that
* operation returns zero. This state is active until either `start()` or
* `stop()` is called. If `continuous_carrier()` returns a non-zero value then
* the previous state is assumed by upper layers.
* * If calls to `start()`/`stop()` return any other value than zero or
* `-EALREADY`, upper layers will consider the interface to be in a
* "lowerLayerDown" state as defined in RFC 2863.
* * The RFC 2863 "dormant", "unknown" and "notPresent" ifOperStatus states are
* currently not supported. The "lowerLevelUp" state.
* * The `ed_scan()`, `cca()` and `tx()` operations SHALL only be supported in
* the "UP" state and return `-ENETDOWN` in any other state. See the
* function-level API documentation below for further details.
*
* @note In case of devices that support timed RX/TX, the "UP" state is not
* equal to "receiver enabled". If a receive window (i.e. RX slot, see @ref
* IEEE802154_CONFIG_RX_SLOT) is configured before calling `start()` then the
* receiver will not be enabled when transitioning to the "UP" state.
* Configuring a receive window while the interface is "UP" will cause the
* receiver to be disabled immediately until the configured reception time has
* arrived.
*/
struct ieee802154_radio_api {
/**
* @brief network interface API
*
* @note Network devices must extend the network interface API. It is
* therefore mandatory to place it at the top of the driver API struct so
* that it can be cast to a network interface.
*/
struct net_if_api iface_api;
/**
* @brief Get the device driver capabilities.
*
* @note Implementations SHALL be **isr-ok** and MUST NOT **sleep**. MAY
* be called in any interface state once the driver is fully initialized
* ("ready").
*
* @param dev pointer to IEEE 802.15.4 driver device
*
* @return Bit field with all supported device driver capabilities.
*/
enum ieee802154_hw_caps (*get_capabilities)(const struct device *dev);
/**
* @brief Clear Channel Assessment - Check channel's activity
*
* @note Implementations SHALL be **isr-ok** and MAY **sleep**. SHALL
* return -ENETDOWN unless the interface is "UP".
*
* @param dev pointer to IEEE 802.15.4 driver device
*
* @retval 0 the channel is available
* @retval -EBUSY The channel is busy.
* @retval -EWOULDBLOCK The operation is called from ISR context but
* temporarily cannot be executed without blocking.
* @retval -ENETDOWN The interface is not "UP".
* @retval -ENOTSUP CCA is not supported by this driver.
* @retval -EIO The CCA procedure could not be executed.
*/
int (*cca)(const struct device *dev);
/**
* @brief Set current channel
*
* @note Implementations SHALL be **isr-ok** and MAY **sleep**. SHALL
* return -EIO unless the interface is either "UP" or "DOWN".
*
* @param dev pointer to IEEE 802.15.4 driver device
* @param channel the number of the channel to be set in CPU byte order
*
* @retval 0 channel was successfully set
* @retval -EALREADY The previous channel is the same as the requested
* channel.
* @retval -EINVAL The given channel is not within the range of valid
* channels of the driver's current channel page, see the
* IEEE802154_ATTR_PHY_SUPPORTED_CHANNEL_RANGES driver attribute.
* @retval -EWOULDBLOCK The operation is called from ISR context but
* temporarily cannot be executed without blocking.
* @retval -ENOTSUP The given channel is within the range of valid
* channels of the driver's current channel page but unsupported by the
* current driver.
* @retval -EIO The channel could not be set.
*/
int (*set_channel)(const struct device *dev, uint16_t channel);
/**
* @brief Set/Unset PAN ID, extended or short address filters.
*
* @note requires IEEE802154_HW_FILTER capability.
*
* @note Implementations SHALL be **isr-ok** and MAY **sleep**. SHALL
* return -EIO unless the interface is either "UP" or "DOWN".
*
* @param dev pointer to IEEE 802.15.4 driver device
* @param set true to set the filter, false to remove it
* @param type the type of entity to be added/removed from the filter
* list (a PAN ID or a source/destination address)
* @param filter the entity to be added/removed from the filter list
*
* @retval 0 The filter was successfully added/removed.
* @retval -EINVAL The given filter entity or filter entity type
* was not valid.
* @retval -EWOULDBLOCK The operation is called from ISR context but
* temporarily cannot be executed without blocking.
* @retval -ENOTSUP Setting/removing this filter or filter type
* is not supported by this driver.
* @retval -EIO Error while setting/removing the filter.
*/
int (*filter)(const struct device *dev,
bool set,
enum ieee802154_filter_type type,
const struct ieee802154_filter *filter);
/**
* @brief Set TX power level in dbm
*
* @note Implementations SHALL be **isr-ok** and MAY **sleep**. SHALL
* return -EIO unless the interface is either "UP" or "DOWN".
*
* @param dev pointer to IEEE 802.15.4 driver device
* @param dbm TX power in dbm
*
* @retval 0 The TX power was successfully set.
* @retval -EINVAL The given dbm value is invalid or not supported by
* the driver.
* @retval -EWOULDBLOCK The operation is called from ISR context but
* temporarily cannot be executed without blocking.
* @retval -EIO The TX power could not be set.
*/
int (*set_txpower)(const struct device *dev, int16_t dbm);
/**
* @brief Transmit a packet fragment as a single frame
*
* @details Depending on the level of offloading features supported by
* the driver, the frame MAY not be fully encrypted/authenticated or it
* MAY not contain an FCS. It is the responsibility of L2
* implementations to prepare the frame according to the offloading
* capabilities announced by the driver and to decide whether CCA,
* CSMA/CA, ACK or retransmission procedures need to be executed outside
* ("soft MAC") or inside ("hard MAC") the driver .
*
* All frames originating from L2 SHALL have all required IEs
* pre-allocated and pre-filled such that the driver does not have to
* parse and manipulate IEs at all. This includes ACK packets if the
* driver does not have the @ref IEEE802154_HW_RX_TX_ACK capability.
* Also see @ref IEEE802154_CONFIG_ENH_ACK_HEADER_IE for drivers that
* have the @ref IEEE802154_HW_RX_TX_ACK capability.
*
* IEs that cannot be prepared by L2 unless the TX time is known (e.g.
* CSL IE, Rendezvous Time IE, Time Correction IE, ...) SHALL be sent in
* any of the timed TX modes with appropriate timing information
* pre-filled in the IE such that drivers do not have to parse and
* manipulate IEs at all unless the frame is generated by the driver
* itself.
*
* In case any of the timed TX modes is supported and used (see @ref
* ieee802154_hw_caps and @ref ieee802154_tx_mode), the driver SHALL
* take responsibility of scheduling and sending the packet at the
* precise programmed time autonomously without further interaction by
* upper layers. The call to `tx()` will block until the package has
* either been sent successfully (possibly including channel acquisition
* and packet acknowledgment) or a terminal transmission error occurred.
* The driver SHALL sleep and keep power consumption to the lowest
* possible level until the scheduled transmission time arrives or
* during any other idle waiting time.
*
* @warning The driver SHALL NOT take ownership of the given network
* packet and frame (fragment) buffer. Any data required by the driver
* including the actual frame content must be read synchronously and
* copied internally if needed at a later time (e.g. the contents of IEs
* required for protocol configuration, states of frame counters,
* sequence numbers, etc). Both, the packet and the buffer MAY be
* re-used or released by upper layers immediately after the function
* returns.
*
* @note Implementations MAY **sleep** and will usually NOT be
* **isr-ok** - especially when timed TX, CSMA/CA, retransmissions,
* auto-ACK or any other offloading feature is supported that implies
* considerable idle waiting time. SHALL return `-ENETDOWN` unless the
* interface is "UP".
*
* @param dev pointer to IEEE 802.15.4 driver device
* @param mode the transmission mode, some of which require specific
* offloading capabilities.
* @param pkt pointer to the network packet to be transmitted.
* @param frag pointer to a network buffer containing a single fragment
* with the frame data to be transmitted
*
* @retval 0 The frame was successfully sent or scheduled. If the driver
* supports ACK offloading and the frame requested acknowledgment (AR bit
* set), this means that the packet was successfully acknowledged by its
* peer.
* @retval -EINVAL Invalid packet (e.g. an expected IE is missing or the
* encryption/authentication state is not as expected).
* @retval -EBUSY The frame could not be sent because the medium was
* busy (CSMA/CA or CCA offloading feature only).
* @retval -ENOMSG The frame was not confirmed by an ACK packet (TX ACK
* offloading feature only) or the received ACK packet was invalid.
* @retval -ENOBUFS The frame could not be scheduled due to missing
* internal resources (timed TX offloading feature only).
* @retval -ENETDOWN The interface is not "UP".
* @retval -ENOTSUP The given TX mode is not supported.
* @retval -EIO The frame could not be sent due to some unspecified
* driver error (e.g. the driver being busy).
*/
int (*tx)(const struct device *dev, enum ieee802154_tx_mode mode,
struct net_pkt *pkt, struct net_buf *frag);
/**
* @brief Start the device.
*
* @details Upper layers will assume the interface is "UP" if this
* operation returns with zero or `-EALREADY`. The interface is placed
* in receive mode before returning from this operation unless an RX
* slot has been configured (even if it lies in the past, see @ref
* IEEE802154_CONFIG_RX_SLOT).
*
* @note Implementations SHALL be **isr-ok** and MAY **sleep**. MAY be
* called in any interface state once the driver is fully initialized
* ("ready").
*
* @param dev pointer to IEEE 802.15.4 driver device
*
* @retval 0 The driver was successfully started.
* @retval -EALREADY The driver was already "UP".
* @retval -EWOULDBLOCK The operation is called from ISR context but
* temporarily cannot be executed without blocking.
* @retval -EIO The driver could not be started.
*/
int (*start)(const struct device *dev);
/**
* @brief Stop the device.
*
* @details Upper layers will assume the interface is "DOWN" if this
* operation returns with zero or `-EALREADY`. The driver switches off
* the receiver before returning if it was previously on. The driver
* enters the lowest possible power mode after this operation is called.
* This MAY happen asynchronously (i.e. after the operation already
* returned control).
*
* @note Implementations SHALL be **isr-ok** and MAY **sleep**. MAY be
* called in any interface state once the driver is fully initialized
* ("ready").
*
* @param dev pointer to IEEE 802.15.4 driver device
*
* @retval 0 The driver was successfully stopped.
* @retval -EWOULDBLOCK The operation is called from ISR context but
* temporarily cannot be executed without blocking.
* @retval -EALREADY The driver was already "DOWN".
* @retval -EIO The driver could not be stopped.
*/
int (*stop)(const struct device *dev);
/**
* @brief Start continuous carrier wave transmission.
*
* @details The method blocks until the interface has started to emit a
* continuous carrier. To leave this mode, `start()` or `stop()` should
* be called, which will put the driver back into the "UP" or "DOWN"
* states, respectively.
*
* @note Implementations MAY **sleep** and will usually NOT be
* **isr-ok**. MAY be called in any interface state once the driver is
* fully initialized ("ready").
*
* @param dev pointer to IEEE 802.15.4 driver device
*
* @retval 0 continuous carrier wave transmission started
* @retval -EALREADY The driver was already in "TESTING" state and
* emitting a continuous carrier.
* @retval -EIO not started
*/
int (*continuous_carrier)(const struct device *dev);
/**
* @brief Set or update driver configuration.
*
* @details The method blocks until the interface has been reconfigured
* atomically with respect to ongoing package reception, transmission or
* any other ongoing driver operation.
*
* @note Implementations SHALL be **isr-ok** and MAY **sleep**. MAY be
* called in any interface state once the driver is fully initialized
* ("ready"). Some configuration options may not be supported in all
* interface operational states, see the detailed specifications in @ref
* ieee802154_config_type. In this case the operation returns `-EACCES`.
*
* @param dev pointer to IEEE 802.15.4 driver device
* @param type the configuration type to be set
* @param config the configuration parameters to be set for the given
* configuration type
*
* @retval 0 configuration successful
* @retval -EINVAL The configuration parameters are invalid for the
* given configuration type.
* @retval -ENOTSUP The given configuration type is not supported by
* this driver.
* @retval -EACCES The given configuration type is supported by this
* driver but cannot be configured in the current interface operational
* state.
* @retval -ENOMEM The configuration cannot be saved due to missing
* memory resources.
* @retval -ENOENT The resource referenced in the configuration
* parameters cannot be found in the configuration.
* @retval -EWOULDBLOCK The operation is called from ISR context but
* temporarily cannot be executed without blocking.
* @retval -EIO An internal error occurred while trying to configure the
* given configuration parameter.
*/
int (*configure)(const struct device *dev,
enum ieee802154_config_type type,
const struct ieee802154_config *config);
/**
* @brief Run an energy detection scan.
*
* @note requires IEEE802154_HW_ENERGY_SCAN capability
*
* @note The radio channel must be set prior to calling this function.
*
* @note Implementations SHALL be **isr-ok** and MAY **sleep**. SHALL
* return `-ENETDOWN` unless the interface is "UP".
*
* @param dev pointer to IEEE 802.15.4 driver device
* @param duration duration of energy scan in ms
* @param done_cb function called when the energy scan has finished
*
* @retval 0 the energy detection scan was successfully scheduled
*
* @retval -EBUSY the energy detection scan could not be scheduled at
* this time
* @retval -EALREADY a previous energy detection scan has not finished
* yet.
* @retval -ENETDOWN The interface is not "UP".
* @retval -ENOTSUP This driver does not support energy scans.
* @retval -EIO The energy detection procedure could not be executed.
*/
int (*ed_scan)(const struct device *dev,
uint16_t duration,
energy_scan_done_cb_t done_cb);
/**
* @brief Get the current time in nanoseconds relative to the network
* subsystem's local uptime clock as represented by this network
* interface.
*
* See @ref net_time_t for semantic details.
*
* @note requires IEEE802154_HW_TXTIME and/or IEEE802154_HW_RXTIME
* capabilities. Implementations SHALL be **isr-ok** and MUST NOT
* **sleep**. MAY be called in any interface state once the driver is
* fully initialized ("ready").
*
* @param dev pointer to IEEE 802.15.4 driver device
*
* @return nanoseconds relative to the network subsystem's local clock,
* -1 if an error occurred or the operation is not supported
*/
net_time_t (*get_time)(const struct device *dev);
/**
* @brief Get the current estimated worst case accuracy (maximum
* deviation from the nominal frequency) of the network subsystem's
* local clock used to calculate tolerances and guard times when
* scheduling delayed receive or transmit radio operations.
*
* The deviation is given in units of PPM (parts per million).
*
* @note requires IEEE802154_HW_TXTIME and/or IEEE802154_HW_RXTIME
* capabilities.
*
* @note Implementations may estimate this value based on current
* operating conditions (e.g. temperature). Implementations SHALL be
* **isr-ok** and MUST NOT **sleep**. MAY be called in any interface
* state once the driver is fully initialized ("ready").
*
* @param dev pointer to IEEE 802.15.4 driver device
*
* @return current estimated clock accuracy in PPM
*/
uint8_t (*get_sch_acc)(const struct device *dev);
/**
* @brief Get the value of a driver specific attribute.
*
* @note This function SHALL NOT return any values configurable by the
* MAC (L2) layer. It is reserved to non-boolean (i.e. scalar or
* structured) attributes that originate from the driver implementation
* and cannot be directly or indirectly derived by L2. Boolean
* attributes SHALL be implemented as @ref ieee802154_hw_caps.
*
* @note Implementations SHALL be **isr-ok** and MUST NOT **sleep**. MAY
* be called in any interface state once the driver is fully initialized
* ("ready").
*
* @retval 0 The requested attribute is supported by the driver and the
* value can be retrieved from the corresponding @ref ieee802154_attr_value
* member.
*
* @retval -ENOENT The driver does not provide the requested attribute.
* The value structure has not been updated with attribute data. The
* content of the value attribute is undefined.
*/
int (*attr_get)(const struct device *dev,
enum ieee802154_attr attr,
struct ieee802154_attr_value *value);
};
/* Make sure that the network interface API is properly setup inside
* IEEE 802.15.4 driver API struct (it is the first one).
*/
BUILD_ASSERT(offsetof(struct ieee802154_radio_api, iface_api) == 0);
/** @} */
/**
* @name IEEE 802.15.4 driver utils
* @{
*/
/** @cond INTERNAL_HIDDEN */
#define IEEE802154_AR_FLAG_SET (0x20)
/** INTERNAL_HIDDEN @endcond */
/**
* @brief Check if the AR flag is set on the frame inside the given @ref
* net_pkt.
*
* @param frag A valid pointer on a net_buf structure, must not be NULL,
* and its length should be at least 1 byte (ImmAck frames are the
* shortest supported frames with 3 bytes excluding FCS).
*
* @return true if AR flag is set, false otherwise
*/
static inline bool ieee802154_is_ar_flag_set(struct net_buf *frag)
{
return (*frag->data & IEEE802154_AR_FLAG_SET);
}
/** @} */
/**
* @name IEEE 802.15.4 driver callbacks
* @{
*/
/* TODO: Fix drivers to either unref the packet before they return NET_OK or to
* return NET_CONTINUE instead. See note below.
*/
/**
* @brief IEEE 802.15.4 driver ACK handling callback into L2 that drivers must
* call when receiving an ACK package.
*
* @details The IEEE 802.15.4 standard prescribes generic procedures for ACK
* handling on L2 (MAC) level. L2 stacks therefore have to provides a
* fast and re-usable generic implementation of this callback for
* drivers to call when receiving an ACK packet.
*
* Note: This function is part of Zephyr's 802.15.4 stack driver -> L2
* "inversion-of-control" adaptation API and must be implemented by all
* IEEE 802.15.4 L2 stacks.
*
* @param iface A valid pointer on a network interface that received the packet
* @param pkt A valid pointer on a packet to check
*
* @return NET_OK if L2 handles the ACK package, NET_CONTINUE or NET_DROP otherwise.
*
* @warning Deviating from other functions in the net stack returning
* net_verdict, this function will not unref the package even if it returns
* NET_OK.
*/
extern enum net_verdict ieee802154_handle_ack(struct net_if *iface, struct net_pkt *pkt);
/**
* @brief IEEE 802.15.4 driver initialization callback into L2 called by drivers
* to initialize the active L2 stack for a given interface.
*
* @details Drivers must call this function as part of their own initialization
* routine.
*
* Note: This function is part of Zephyr's 802.15.4 stack driver -> L2
* "inversion-of-control" adaptation API and must be implemented by all
* IEEE 802.15.4 L2 stacks.
*
* @param iface A valid pointer on a network interface
*/
#ifndef CONFIG_IEEE802154_RAW_MODE
extern void ieee802154_init(struct net_if *iface);
#else
#define ieee802154_init(_iface_)
#endif /* CONFIG_IEEE802154_RAW_MODE */
/** @} */
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_IEEE802154_RADIO_H_ */
``` | /content/code_sandbox/include/zephyr/net/ieee802154_radio.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 19,833 |
```objective-c
/** @file
* @brief HTTP client API
*
* An API for applications do HTTP requests
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_HTTP_CLIENT_H_
#define ZEPHYR_INCLUDE_NET_HTTP_CLIENT_H_
/**
* @brief HTTP client API
* @defgroup http_client HTTP client API
* @since 2.1
* @version 0.2.0
* @ingroup networking
* @{
*/
#include <zephyr/kernel.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/http/parser.h>
#ifdef __cplusplus
extern "C" {
#endif
/** @cond INTERNAL_HIDDEN */
#if !defined(HTTP_CRLF)
#define HTTP_CRLF "\r\n"
#endif
#if !defined(HTTP_STATUS_STR_SIZE)
#define HTTP_STATUS_STR_SIZE 32
#endif
/** @endcond */
/** Is there more data to come */
enum http_final_call {
HTTP_DATA_MORE = 0, /**< More data will come */
HTTP_DATA_FINAL = 1, /**< End of data */
};
struct http_request;
struct http_response;
/**
* @typedef http_payload_cb_t
* @brief Callback used when data needs to be sent to the server.
*
* @param sock Socket id of the connection
* @param req HTTP request information
* @param user_data User specified data specified in http_client_req()
*
* @return >=0 amount of data sent, in this case http_client_req() should
* continue sending data,
* <0 if http_client_req() should return the error code to the
* caller.
*/
typedef int (*http_payload_cb_t)(int sock,
struct http_request *req,
void *user_data);
/**
* @typedef http_header_cb_t
* @brief Callback can be used if application wants to construct additional
* HTTP headers when the HTTP request is sent. Usage of this is optional.
*
* @param sock Socket id of the connection
* @param req HTTP request information
* @param user_data User specified data specified in http_client_req()
*
* @return >=0 amount of data sent, in this case http_client_req() should
* continue sending data,
* <0 if http_client_req() should return the error code to the
* caller.
*/
typedef int (*http_header_cb_t)(int sock,
struct http_request *req,
void *user_data);
/**
* @typedef http_response_cb_t
* @brief Callback used when data is received from the server.
*
* @param rsp HTTP response information
* @param final_data Does this data buffer contain all the data or
* is there still more data to come.
* @param user_data User specified data specified in http_client_req()
*/
typedef void (*http_response_cb_t)(struct http_response *rsp,
enum http_final_call final_data,
void *user_data);
/**
* HTTP response from the server.
*/
struct http_response {
/** HTTP parser settings for the application usage */
const struct http_parser_settings *http_cb;
/** User provided HTTP response callback which is
* called when a response is received to a sent HTTP
* request.
*/
http_response_cb_t cb;
/**
* recv_buffer that contains header + body
* _______________________________________
*
* |-------- body_frag_len ---------|
* |--------------------- data len --------------------|
* ---------------------------------------------------------------
* ..header | header | body | body..
* ---------------------------------------------------------------
*
* recv_buf body_frag_start
*
*
* recv_buffer that contains body only
* ___________________________________
*
* |------------------ body_frag_len ------------------|
* |--------------------- data len --------------------|
* ---------------------------------------------------------------
* ..header/body | body | body..
* ---------------------------------------------------------------
*
* recv_buf
* body_frag_start
*
* body_frag_start >= recv_buf
* body_frag_len = data_len - (body_frag_start - recv_buf)
*/
/** Start address of the body fragment contained in the recv_buf */
uint8_t *body_frag_start;
/** Length of the body fragment contained in the recv_buf */
size_t body_frag_len;
/** Where the response is stored, this is to be
* provided by the user.
*/
uint8_t *recv_buf;
/** Response buffer maximum length */
size_t recv_buf_len;
/** Length of the data in the result buf. If the value
* is larger than recv_buf_len, then it means that
* the data is truncated and could not be fully copied
* into recv_buf. This can only happen if the user
* did not set the response callback. If the callback
* is set, then the HTTP client API will call response
* callback many times so that all the data is
* delivered to the user. Will be zero in the event of
* a null response.
*/
size_t data_len;
/** HTTP Content-Length field value. Will be set to zero
* in the event of a null response.
*/
size_t content_length;
/** Amount of data given to the response callback so far, including the
* current data given to the callback. This should be equal to the
* content_length field once the entire body has been received. Will be
* zero if a null response is given.
*/
size_t processed;
/** See path_to_url#section-3.1.2 for more information.
* The status-code element is a 3-digit integer code
*
* The reason-phrase element exists for the sole
* purpose of providing a textual description
* associated with the numeric status code. A client
* SHOULD ignore the reason-phrase content.
*
* Will be blank if a null HTTP response is given.
*/
char http_status[HTTP_STATUS_STR_SIZE];
/** Numeric HTTP status code which corresponds to the
* textual description. Set to zero if null response is
* given. Otherwise, will be a 3-digit integer code if
* valid HTTP response is given.
*/
uint16_t http_status_code;
uint8_t cl_present : 1; /**< Is Content-Length field present */
uint8_t body_found : 1; /**< Is message body found */
uint8_t message_complete : 1; /**< Is HTTP message parsing complete */
};
/** HTTP client internal data that the application should not touch
*/
struct http_client_internal_data {
/** HTTP parser context */
struct http_parser parser;
/** HTTP parser settings */
struct http_parser_settings parser_settings;
/** HTTP response specific data (filled by http_client_req() when
* data is received)
*/
struct http_response response;
/** User data */
void *user_data;
/** HTTP socket */
int sock;
};
/**
* HTTP client request. This contains all the data that is needed when doing
* a HTTP request.
*/
struct http_request {
/** HTTP client request internal data */
struct http_client_internal_data internal;
/* User should fill in following parameters */
/** The HTTP method: GET, HEAD, OPTIONS, POST, ... */
enum http_method method;
/** User supplied callback function to call when response is
* received.
*/
http_response_cb_t response;
/** User supplied list of HTTP callback functions if the
* calling application wants to know the parsing status or the HTTP
* fields. This is optional and normally not needed.
*/
const struct http_parser_settings *http_cb;
/** User supplied buffer where received data is stored */
uint8_t *recv_buf;
/** Length of the user supplied receive buffer */
size_t recv_buf_len;
/** The URL for this request, for example: /index.html */
const char *url;
/** The HTTP protocol, for example "HTTP/1.1" */
const char *protocol;
/** The HTTP header fields (application specific)
* The Content-Type may be specified here or in the next field.
* Depending on your application, the Content-Type may vary, however
* some header fields may remain constant through the application's
* life cycle. This is a NULL terminated list of header fields.
*/
const char **header_fields;
/** The value of the Content-Type header field, may be NULL */
const char *content_type_value;
/** Hostname to be used in the request */
const char *host;
/** Port number to be used in the request */
const char *port;
/** User supplied callback function to call when payload
* needs to be sent. This can be NULL in which case the payload field
* in http_request is used. The idea of this payload callback is to
* allow user to send more data that is practical to store in allocated
* memory.
*/
http_payload_cb_t payload_cb;
/** Payload, may be NULL */
const char *payload;
/** Payload length is used to calculate Content-Length. Set to 0
* for chunked transfers.
*/
size_t payload_len;
/** User supplied callback function to call when optional headers need
* to be sent. This can be NULL, in which case the optional_headers
* field in http_request is used. The idea of this optional_headers
* callback is to allow user to send more HTTP header data that is
* practical to store in allocated memory.
*/
http_header_cb_t optional_headers_cb;
/** A NULL terminated list of any optional headers that
* should be added to the HTTP request. May be NULL.
* If the optional_headers_cb is specified, then this field is ignored.
* Note that there are two similar fields that contain headers,
* the header_fields above and this optional_headers. This is done
* like this to support Websocket use case where Websocket will use
* header_fields variable and any optional application specific
* headers will be placed into this field.
*/
const char **optional_headers;
};
/**
* @brief Do a HTTP request. The callback is called when data is received
* from the HTTP server. The caller must have created a connection to the
* server before calling this function so connect() call must have be done
* successfully for the socket.
*
* @param sock Socket id of the connection.
* @param req HTTP request information
* @param timeout Max timeout to wait for the data. The timeout value cannot be
* 0 as there would be no time to receive the data.
* The timeout value is in milliseconds.
* @param user_data User specified data that is passed to the callback.
*
* @return <0 if error, >=0 amount of data sent to the server
*/
int http_client_req(int sock, struct http_request *req,
int32_t timeout, void *user_data);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_HTTP_CLIENT_H_ */
``` | /content/code_sandbox/include/zephyr/net/http/client.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,331 |
```objective-c
/** @file
* @brief HTTP response status codes
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_HTTP_STATUS_H_
#define ZEPHYR_INCLUDE_NET_HTTP_STATUS_H_
/**
* @brief HTTP response status codes
* @defgroup http_status_codes HTTP response status codes
* @since 3.3
* @version 0.8.0
* @ingroup networking
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief HTTP response status codes
*
* @note HTTP response status codes are subject to IANA approval.
*
* @see <a href="path_to_url">Hypertext Transfer Protocol (HTTP) Status Code Registry</a>
* @see <a href="path_to_url">RFC9110</a>
* @see <a href="path_to_url">HTTP response status codes</a>
*/
enum http_status {
HTTP_100_CONTINUE = 100, /**< Continue */
HTTP_101_SWITCHING_PROTOCOLS = 101, /**< Switching Protocols */
HTTP_102_PROCESSING = 102, /**< Processing */
HTTP_103_EARLY_HINTS = 103, /**< Early Hints */
HTTP_200_OK = 200, /**< OK */
HTTP_201_CREATED = 201, /**< Created */
HTTP_202_ACCEPTED = 202, /**< Accepted */
HTTP_203_NON_AUTHORITATIVE_INFORMATION = 203, /**< Non-Authoritative Information */
HTTP_204_NO_CONTENT = 204, /**< No Content */
HTTP_205_RESET_CONTENT = 205, /**< Reset Content */
HTTP_206_PARTIAL_CONTENT = 206, /**< Partial Content */
HTTP_207_MULTI_STATUS = 207, /**< Multi-Status */
HTTP_208_ALREADY_REPORTED = 208, /**< Already Reported */
HTTP_226_IM_USED = 226, /**< IM Used */
HTTP_300_MULTIPLE_CHOICES = 300, /**< Multiple Choices */
HTTP_301_MOVED_PERMANENTLY = 301, /**< Moved Permanently */
HTTP_302_FOUND = 302, /**< Found */
HTTP_303_SEE_OTHER = 303, /**< See Other */
HTTP_304_NOT_MODIFIED = 304, /**< Not Modified */
HTTP_305_USE_PROXY = 305, /**< Use Proxy */
HTTP_306_UNUSED = 306, /**< unused */
HTTP_307_TEMPORARY_REDIRECT = 307, /**< Temporary Redirect */
HTTP_308_PERMANENT_REDIRECT = 308, /**< Permanent Redirect */
HTTP_400_BAD_REQUEST = 400, /**< Bad Request */
HTTP_401_UNAUTHORIZED = 401, /**< Unauthorized */
HTTP_402_PAYMENT_REQUIRED = 402, /**< Payment Required */
HTTP_403_FORBIDDEN = 403, /**< Forbidden */
HTTP_404_NOT_FOUND = 404, /**< Not Found */
HTTP_405_METHOD_NOT_ALLOWED = 405, /**< Method Not Allowed */
HTTP_406_NOT_ACCEPTABLE = 406, /**< Not Acceptable */
HTTP_407_PROXY_AUTHENTICATION_REQUIRED = 407, /**< Proxy Authentication Required */
HTTP_408_REQUEST_TIMEOUT = 408, /**< Request Timeout */
HTTP_409_CONFLICT = 409, /**< Conflict */
HTTP_410_GONE = 410, /**< Gone */
HTTP_411_LENGTH_REQUIRED = 411, /**< Length Required */
HTTP_412_PRECONDITION_FAILED = 412, /**< Precondition Failed */
HTTP_413_PAYLOAD_TOO_LARGE = 413, /**< Payload Too Large */
HTTP_414_URI_TOO_LONG = 414, /**< URI Too Long */
HTTP_415_UNSUPPORTED_MEDIA_TYPE = 415, /**< Unsupported Media Type */
HTTP_416_RANGE_NOT_SATISFIABLE = 416, /**< Range Not Satisfiable */
HTTP_417_EXPECTATION_FAILED = 417, /**< Expectation Failed */
HTTP_418_IM_A_TEAPOT = 418, /**< I'm a teapot */
HTTP_421_MISDIRECTED_REQUEST = 421, /**< Misdirected Request */
HTTP_422_UNPROCESSABLE_ENTITY = 422, /**< Unprocessable Entity */
HTTP_423_LOCKED = 423, /**< Locked */
HTTP_424_FAILED_DEPENDENCY = 424, /**< Failed Dependency */
HTTP_425_TOO_EARLY = 425, /**< Too Early */
HTTP_426_UPGRADE_REQUIRED = 426, /**< Upgrade Required */
HTTP_428_PRECONDITION_REQUIRED = 428, /**< Precondition Required */
HTTP_429_TOO_MANY_REQUESTS = 429, /**< Too Many Requests */
HTTP_431_REQUEST_HEADER_FIELDS_TOO_LARGE = 431, /**< Request Header Fields Too Large */
HTTP_451_UNAVAILABLE_FOR_LEGAL_REASONS = 451, /**< Unavailable For Legal Reasons */
HTTP_500_INTERNAL_SERVER_ERROR = 500, /**< Internal Server Error */
HTTP_501_NOT_IMPLEMENTED = 501, /**< Not Implemented */
HTTP_502_BAD_GATEWAY = 502, /**< Bad Gateway */
HTTP_503_SERVICE_UNAVAILABLE = 503, /**< Service Unavailable */
HTTP_504_GATEWAY_TIMEOUT = 504, /**< Gateway Timeout */
HTTP_505_HTTP_VERSION_NOT_SUPPORTED = 505, /**< HTTP Version Not Supported */
HTTP_506_VARIANT_ALSO_NEGOTIATES = 506, /**< Variant Also Negotiates */
HTTP_507_INSUFFICIENT_STORAGE = 507, /**< Insufficient Storage */
HTTP_508_LOOP_DETECTED = 508, /**< Loop Detected */
HTTP_510_NOT_EXTENDED = 510, /**< Not Extended */
HTTP_511_NETWORK_AUTHENTICATION_REQUIRED = 511, /**< Network Authentication Required */
};
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif
``` | /content/code_sandbox/include/zephyr/net/http/status.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,185 |
```objective-c
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef ZEPHYR_INCLUDE_NET_HTTP_PARSER_STATE_H_
#define ZEPHYR_INCLUDE_NET_HTTP_PARSER_STATE_H_
#ifdef __cplusplus
extern "C" {
#endif
enum state {
s_dead = 1, /* important that this is > 0 */
s_start_req_or_res,
s_res_or_resp_H,
s_start_res,
s_res_H,
s_res_HT,
s_res_HTT,
s_res_HTTP,
s_res_first_http_major,
s_res_http_major,
s_res_first_http_minor,
s_res_http_minor,
s_res_first_status_code,
s_res_status_code,
s_res_status_start,
s_res_status,
s_res_line_almost_done,
s_start_req,
s_req_method,
s_req_spaces_before_url,
s_req_schema,
s_req_schema_slash,
s_req_schema_slash_slash,
s_req_server_start,
s_req_server,
s_req_server_with_at,
s_req_path,
s_req_query_string_start,
s_req_query_string,
s_req_fragment_start,
s_req_fragment,
s_req_http_start,
s_req_http_H,
s_req_http_HT,
s_req_http_HTT,
s_req_http_HTTP,
s_req_first_http_major,
s_req_http_major,
s_req_first_http_minor,
s_req_http_minor,
s_req_line_almost_done,
s_header_field_start,
s_header_field,
s_header_value_discard_ws,
s_header_value_discard_ws_almost_done,
s_header_value_discard_lws,
s_header_value_start,
s_header_value,
s_header_value_lws,
s_header_almost_done,
s_chunk_size_start,
s_chunk_size,
s_chunk_parameters,
s_chunk_size_almost_done,
s_headers_almost_done,
s_headers_done,
/* Important: 's_headers_done' must be the last 'header' state. All
* states beyond this must be 'body' states. It is used for overflow
* checking. See the PARSING_HEADER() macro.
*/
s_chunk_data,
s_chunk_data_almost_done,
s_chunk_data_done,
s_body_identity,
s_body_identity_eof,
s_message_done
};
#ifdef __cplusplus
}
#endif
#endif
``` | /content/code_sandbox/include/zephyr/net/http/parser_state.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 655 |
```objective-c
/*
*
*/
/**
* @file
* @brief HTTP2 frame information.
*/
#ifndef ZEPHYR_INCLUDE_NET_HTTP_SERVER_FRAME_H_
#define ZEPHYR_INCLUDE_NET_HTTP_SERVER_FRAME_H_
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/** HTTP2 frame types */
enum http2_frame_type {
/** Data frame */
HTTP2_DATA_FRAME = 0x00,
/** Headers frame */
HTTP2_HEADERS_FRAME = 0x01,
/** Priority frame */
HTTP2_PRIORITY_FRAME = 0x02,
/** Reset stream frame */
HTTP2_RST_STREAM_FRAME = 0x03,
/** Settings frame */
HTTP2_SETTINGS_FRAME = 0x04,
/** Push promise frame */
HTTP2_PUSH_PROMISE_FRAME = 0x05,
/** Ping frame */
HTTP2_PING_FRAME = 0x06,
/** Goaway frame */
HTTP2_GOAWAY_FRAME = 0x07,
/** Window update frame */
HTTP2_WINDOW_UPDATE_FRAME = 0x08,
/** Continuation frame */
HTTP2_CONTINUATION_FRAME = 0x09
};
/** @cond INTERNAL_HIDDEN */
#define HTTP2_FLAG_SETTINGS_ACK 0x01
#define HTTP2_FLAG_END_HEADERS 0x04
#define HTTP2_FLAG_END_STREAM 0x01
#define HTTP2_FLAG_PADDED 0x08
#define HTTP2_FLAG_PRIORITY 0x20
#define HTTP2_FRAME_HEADER_SIZE 9
#define HTTP2_FRAME_LENGTH_OFFSET 0
#define HTTP2_FRAME_TYPE_OFFSET 3
#define HTTP2_FRAME_FLAGS_OFFSET 4
#define HTTP2_FRAME_STREAM_ID_OFFSET 5
#define HTTP2_FRAME_STREAM_ID_MASK 0x7FFFFFFF
#define HTTP2_HEADERS_FRAME_PRIORITY_LEN 5
#define HTTP2_PRIORITY_FRAME_LEN 5
#define HTTP2_RST_STREAM_FRAME_LEN 4
/** @endcond */
/** HTTP2 settings field */
struct http2_settings_field {
uint16_t id; /**< Field id */
uint32_t value; /**< Field value */
} __packed;
/** @brief HTTP2 settings */
enum http2_settings {
/** Header table size */
HTTP2_SETTINGS_HEADER_TABLE_SIZE = 1,
/** Enable push */
HTTP2_SETTINGS_ENABLE_PUSH = 2,
/** Maximum number of concurrent streams */
HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS = 3,
/** Initial window size */
HTTP2_SETTINGS_INITIAL_WINDOW_SIZE = 4,
/** Max frame size */
HTTP2_SETTINGS_MAX_FRAME_SIZE = 5,
/** Max header list size */
HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE = 6,
};
#ifdef __cplusplus
}
#endif
#endif
``` | /content/code_sandbox/include/zephyr/net/http/frame.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 571 |
```objective-c
/*
*
*/
/**
* @file
* @brief Connectivity implementation for drivers exposing the wifi_mgmt API
*/
#ifndef ZEPHYR_INCLUDE_CONN_MGR_CONNECTIVITY_WIFI_MGMT_H_
#define ZEPHYR_INCLUDE_CONN_MGR_CONNECTIVITY_WIFI_MGMT_H_
#include <zephyr/net/conn_mgr_connectivity_impl.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* Context type for generic WIFI_MGMT connectivity backend.
*/
#define CONNECTIVITY_WIFI_MGMT_CTX_TYPE void *
/**
* @brief Associate the generic WIFI_MGMT implementation with a network device
*
* @param dev_id Network device id.
*/
#define CONNECTIVITY_WIFI_MGMT_BIND(dev_id) \
IF_ENABLED(CONFIG_NET_CONNECTION_MANAGER_CONNECTIVITY_WIFI_MGMT, \
(CONN_MGR_CONN_DECLARE_PUBLIC(CONNECTIVITY_WIFI_MGMT); \
CONN_MGR_BIND_CONN(dev_id, CONNECTIVITY_WIFI_MGMT)))
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_CONN_MGR_CONNECTIVITY_WIFI_MGMT_H_ */
``` | /content/code_sandbox/include/zephyr/net/conn_mgr/connectivity_wifi_mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 214 |
```objective-c
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef ZEPHYR_INCLUDE_NET_HTTP_PARSER_URL_H_
#define ZEPHYR_INCLUDE_NET_HTTP_PARSER_URL_H_
#include <sys/types.h>
#include <zephyr/types.h>
#include <stddef.h>
#include <zephyr/net/http/parser_state.h>
#ifdef __cplusplus
extern "C" {
#endif
enum http_parser_url_fields {
UF_SCHEMA = 0
, UF_HOST = 1
, UF_PORT = 2
, UF_PATH = 3
, UF_QUERY = 4
, UF_FRAGMENT = 5
, UF_USERINFO = 6
, UF_MAX = 7
};
/* Result structure for http_parser_url_fields().
*
* Callers should index into field_data[] with UF_* values iff field_set
* has the relevant (1 << UF_*) bit set. As a courtesy to clients (and
* because we probably have padding left over), we convert any port to
* a uint16_t.
*/
struct http_parser_url {
uint16_t field_set; /* Bitmask of (1 << UF_*) values */
uint16_t port; /* Converted UF_PORT string */
struct {
uint16_t off; /* Offset into buffer in which field
* starts
*/
uint16_t len; /* Length of run in buffer */
} field_data[UF_MAX];
};
enum state parse_url_char(enum state s, const char ch);
/* Initialize all http_parser_url members to 0 */
void http_parser_url_init(struct http_parser_url *u);
/* Parse a URL; return nonzero on failure */
int http_parser_parse_url(const char *buf, size_t buflen,
int is_connect, struct http_parser_url *u);
#ifdef __cplusplus
}
#endif
#endif
``` | /content/code_sandbox/include/zephyr/net/http/parser_url.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 601 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_RTIO_WORKQ_H_
#define ZEPHYR_INCLUDE_RTIO_WORKQ_H_
#include <stdint.h>
#include <zephyr/device.h>
#include <zephyr/rtio/rtio.h>
#include <zephyr/sys/p4wq.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Callback API to execute work operation.
*
* @param iodev_sqe Associated SQE operation.
*/
typedef void (*rtio_work_submit_t)(struct rtio_iodev_sqe *iodev_sqe);
/**
* @brief RTIO Work request.
*
* This RTIO Work request to perform a work operation decoupled
* from its submission in the RTIO work-queues.
*/
struct rtio_work_req {
/** Work item used to submit unit of work. */
struct k_p4wq_work work;
/** Handle to IODEV SQE containing the operation.
* This is filled inside @ref rtio_work_req_submit.
*/
struct rtio_iodev_sqe *iodev_sqe;
/** Callback handler where synchronous operation may be executed.
* This is filled inside @ref rtio_work_req_submit.
*/
rtio_work_submit_t handler;
};
/**
* @brief Allocate item to perform an RTIO work request.
*
* @details This allocation utilizes its internal memory slab with
* pre-allocated elements.
*
* @return Pointer to allocated item if successful.
* @return NULL if allocation failed.
*/
struct rtio_work_req *rtio_work_req_alloc(void);
/**
* @brief Submit RTIO work request.
*
* @param req Item to fill with request information.
* @param iodev_sqe RTIO Operation information.
* @param handler Callback to handler where work operation is performed.
*/
void rtio_work_req_submit(struct rtio_work_req *req,
struct rtio_iodev_sqe *iodev_sqe,
rtio_work_submit_t handler);
/**
* @brief Obtain number of currently used items from the pre-allocated pool.
*
* @return Number of used items.
*/
uint32_t rtio_work_req_used_count_get(void);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_RTIO_WORKQ_H_ */
``` | /content/code_sandbox/include/zephyr/rtio/work.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 480 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public API for ISO-TP (ISO 15765-2:2016)
*
* ISO-TP is a transport protocol for CAN (Controller Area Network)
*/
#ifndef ZEPHYR_INCLUDE_CANBUS_ISOTP_H_
#define ZEPHYR_INCLUDE_CANBUS_ISOTP_H_
/**
* @brief CAN ISO-TP Protocol
* @defgroup can_isotp CAN ISO-TP Protocol
* @ingroup connectivity
* @{
*/
#include <zephyr/drivers/can.h>
#include <zephyr/types.h>
#include <zephyr/net/buf.h>
/*
* Abbreviations
* BS Block Size
* CAN_DL CAN LL data size
* CF Consecutive Frame
* CTS Continue to send
* DLC Data length code
* FC Flow Control
* FF First Frame
* SF Single Frame
* FS Flow Status
* AE Address Extension
* SN Sequence Number
* ST Separation time
* SA Source Address
* TA Target Address
* RX_DL CAN RX LL data size
* TX_DL CAN TX LL data size
* PCI Process Control Information
*/
/*
* N_Result according to ISO 15765-2:2016
* ISOTP_ prefix is used to be zephyr conform
*/
/** Completed successfully */
#define ISOTP_N_OK 0
/** Ar/As has timed out */
#define ISOTP_N_TIMEOUT_A -1
/** Reception of next FC has timed out */
#define ISOTP_N_TIMEOUT_BS -2
/** Cr has timed out */
#define ISOTP_N_TIMEOUT_CR -3
/** Unexpected sequence number */
#define ISOTP_N_WRONG_SN -4
/** Invalid flow status received*/
#define ISOTP_N_INVALID_FS -5
/** Unexpected PDU received */
#define ISOTP_N_UNEXP_PDU -6
/** Maximum number of WAIT flowStatus PDUs exceeded */
#define ISOTP_N_WFT_OVRN -7
/** FlowStatus OVFLW PDU was received */
#define ISOTP_N_BUFFER_OVERFLW -8
/** General error */
#define ISOTP_N_ERROR -9
/** Implementation specific errors */
/** Can't bind or send because the CAN device has no filter left*/
#define ISOTP_NO_FREE_FILTER -10
/** No net buffer left to allocate */
#define ISOTP_NO_NET_BUF_LEFT -11
/** Not sufficient space in the buffer left for the data */
#define ISOTP_NO_BUF_DATA_LEFT -12
/** No context buffer left to allocate */
#define ISOTP_NO_CTX_LEFT -13
/** Timeout for recv */
#define ISOTP_RECV_TIMEOUT -14
/*
* CAN ID filtering for ISO-TP fixed addressing according to SAE J1939
*
* Format of 29-bit CAN identifier:
* ------------------------------------------------------
* | 28 .. 26 | 25 | 24 | 23 .. 16 | 15 .. 8 | 7 .. 0 |
* ------------------------------------------------------
* | Priority | EDP | DP | N_TAtype | N_TA | N_SA |
* ------------------------------------------------------
*/
/** Position of fixed source address (SA) */
#define ISOTP_FIXED_ADDR_SA_POS (CONFIG_ISOTP_FIXED_ADDR_SA_POS)
/** Mask to obtain fixed source address (SA) */
#define ISOTP_FIXED_ADDR_SA_MASK (CONFIG_ISOTP_FIXED_ADDR_SA_MASK)
/** Position of fixed target address (TA) */
#define ISOTP_FIXED_ADDR_TA_POS (CONFIG_ISOTP_FIXED_ADDR_TA_POS)
/** Mask to obtain fixed target address (TA) */
#define ISOTP_FIXED_ADDR_TA_MASK (CONFIG_ISOTP_FIXED_ADDR_TA_MASK)
/** Position of priority in fixed addressing mode */
#define ISOTP_FIXED_ADDR_PRIO_POS (CONFIG_ISOTP_FIXED_ADDR_PRIO_POS)
/** Mask for priority in fixed addressing mode */
#define ISOTP_FIXED_ADDR_PRIO_MASK (CONFIG_ISOTP_FIXED_ADDR_PRIO_MASK)
/** CAN filter RX mask to match any priority and source address (SA) */
#define ISOTP_FIXED_ADDR_RX_MASK (CONFIG_ISOTP_FIXED_ADDR_RX_MASK)
#ifdef __cplusplus
extern "C" {
#endif
/**
* @name ISO-TP message ID flags
* @anchor ISOTP_MSG_FLAGS
*
* @{
*/
/** Message uses ISO-TP extended addressing (first payload byte of CAN frame) */
#define ISOTP_MSG_EXT_ADDR BIT(0)
/**
* Message uses ISO-TP fixed addressing (according to SAE J1939). Only valid in combination with
* ``ISOTP_MSG_IDE``.
*/
#define ISOTP_MSG_FIXED_ADDR BIT(1)
/** Message uses extended (29-bit) CAN ID */
#define ISOTP_MSG_IDE BIT(2)
/** Message uses CAN FD format (FDF) */
#define ISOTP_MSG_FDF BIT(3)
/** Message uses CAN FD Baud Rate Switch (BRS). Only valid in combination with ``ISOTP_MSG_FDF``. */
#define ISOTP_MSG_BRS BIT(4)
/** @} */
/**
* @brief ISO-TP message id struct
*
* Used to pass addresses to the bind and send functions.
*/
struct isotp_msg_id {
/**
* CAN identifier
*
* If ISO-TP fixed addressing is used, isotp_bind ignores SA and
* priority sections and modifies TA section in flow control frames.
*/
union {
uint32_t std_id : 11;
uint32_t ext_id : 29;
};
/** ISO-TP extended address (if used) */
uint8_t ext_addr;
/**
* ISO-TP frame data length (TX_DL for TX address or RX_DL for RX address).
*
* Valid values are 8 for classical CAN or 8, 12, 16, 20, 24, 32, 48 and 64 for CAN FD.
*
* 0 will be interpreted as 8 or 64 (if ISOTP_MSG_FDF is set).
*
* The value for incoming transmissions (RX_DL) is determined automatically based on the
* received first frame and does not need to be set during initialization.
*/
uint8_t dl;
/** Flags. @see @ref ISOTP_MSG_FLAGS. */
uint8_t flags;
};
/*
* STmin is split in two valid ranges:
* 0-127: 0ms-127ms
* 128-240: Reserved
* 241-249: 100us-900us (multiples of 100us)
* 250- : Reserved
*/
/**
* @brief ISO-TP frame control options struct
*
* Used to pass the options to the bind and send functions.
*/
struct isotp_fc_opts {
uint8_t bs; /**< Block size. Number of CF PDUs before next CF is sent */
uint8_t stmin; /**< Minimum separation time. Min time between frames */
};
/**
* @brief Transmission callback
*
* This callback is called when a transmission is completed.
*
* @param error_nr ISOTP_N_OK on success, ISOTP_N_* on error
* @param arg Callback argument passed to the send function
*/
typedef void (*isotp_tx_callback_t)(int error_nr, void *arg);
struct isotp_send_ctx;
struct isotp_recv_ctx;
/**
* @brief Bind an address to a receiving context.
*
* This function binds an RX and TX address combination to an RX context.
* When data arrives from the specified address, it is buffered and can be read
* by calling isotp_recv.
* When calling this routine, a filter is applied in the CAN device, and the
* context is initialized. The context must be valid until calling unbind.
*
* @param rctx Context to store the internal states.
* @param can_dev The CAN device to be used for sending and receiving.
* @param rx_addr Identifier for incoming data.
* @param tx_addr Identifier for FC frames.
* @param opts Flow control options.
* @param timeout Timeout for FF SF buffer allocation.
*
* @retval ISOTP_N_OK on success
* @retval ISOTP_NO_FREE_FILTER if CAN device has no filters left.
*/
int isotp_bind(struct isotp_recv_ctx *rctx, const struct device *can_dev,
const struct isotp_msg_id *rx_addr,
const struct isotp_msg_id *tx_addr,
const struct isotp_fc_opts *opts,
k_timeout_t timeout);
/**
* @brief Unbind a context from the interface
*
* This function removes the binding from isotp_bind.
* The filter is detached from the CAN device, and if a transmission is ongoing,
* buffers are freed.
* The context can be discarded safely after calling this function.
*
* @param rctx Context that should be unbound.
*/
void isotp_unbind(struct isotp_recv_ctx *rctx);
/**
* @brief Read out received data from fifo.
*
* This function reads the data from the receive FIFO of the context.
* It blocks if the FIFO is empty.
* If an error occurs, the function returns a negative number and leaves the
* data buffer unchanged.
*
* @param rctx Context that is already bound.
* @param data Pointer to a buffer where the data is copied to.
* @param len Size of the buffer.
* @param timeout Timeout for incoming data.
*
* @retval Number of bytes copied on success
* @retval ISOTP_RECV_TIMEOUT when "timeout" timed out
* @retval ISOTP_N_* on error
*/
int isotp_recv(struct isotp_recv_ctx *rctx, uint8_t *data, size_t len, k_timeout_t timeout);
/**
* @brief Get the net buffer on data reception
*
* This function reads incoming data into net-buffers.
* It blocks until the entire packet is received, BS is reached, or an error
* occurred. If BS was zero, the data is in a single net_buf. Otherwise,
* the data is fragmented in chunks of BS size.
* The net-buffers are referenced and must be freed with net_buf_unref after the
* data is processed.
*
* @param rctx Context that is already bound.
* @param buffer Pointer where the net_buf pointer is written to.
* @param timeout Timeout for incoming data.
*
* @retval Remaining data length for this transfer if BS > 0, 0 for BS = 0
* @retval ISOTP_RECV_TIMEOUT when "timeout" timed out
* @retval ISOTP_N_* on error
*/
int isotp_recv_net(struct isotp_recv_ctx *rctx, struct net_buf **buffer, k_timeout_t timeout);
/**
* @brief Send data
*
* This function is used to send data to a peer that listens to the tx_addr.
* An internal work-queue is used to transfer the segmented data.
* Data and context must be valid until the transmission has finished.
* If a complete_cb is given, this function is non-blocking, and the callback
* is called on completion with the return value as a parameter.
*
* @param sctx Context to store the internal states.
* @param can_dev The CAN device to be used for sending and receiving.
* @param data Data to be sent.
* @param len Length of the data to be sent.
* @param rx_addr Identifier for FC frames.
* @param tx_addr Identifier for outgoing frames the receiver listens on.
* @param complete_cb Function called on completion or NULL.
* @param cb_arg Argument passed to the complete callback.
*
* @retval ISOTP_N_OK on success
* @retval ISOTP_N_* on error
*/
int isotp_send(struct isotp_send_ctx *sctx, const struct device *can_dev,
const uint8_t *data, size_t len,
const struct isotp_msg_id *tx_addr,
const struct isotp_msg_id *rx_addr,
isotp_tx_callback_t complete_cb, void *cb_arg);
#ifdef CONFIG_ISOTP_ENABLE_CONTEXT_BUFFERS
/**
* @brief Send data with buffered context
*
* This function is similar to isotp_send, but the context is automatically
* allocated from an internal pool.
*
* @param can_dev The CAN device to be used for sending and receiving.
* @param data Data to be sent.
* @param len Length of the data to be sent.
* @param rx_addr Identifier for FC frames.
* @param tx_addr Identifier for outgoing frames the receiver listens on.
* @param complete_cb Function called on completion or NULL.
* @param cb_arg Argument passed to the complete callback.
* @param timeout Timeout for buffer allocation.
*
* @retval ISOTP_N_OK on success
* @retval ISOTP_N_* on error
*/
int isotp_send_ctx_buf(const struct device *can_dev,
const uint8_t *data, size_t len,
const struct isotp_msg_id *tx_addr,
const struct isotp_msg_id *rx_addr,
isotp_tx_callback_t complete_cb, void *cb_arg,
k_timeout_t timeout);
/**
* @brief Send data with buffered context
*
* This function is similar to isotp_send_ctx_buf, but the data is carried in
* a net_buf. net_buf_unref is called on the net_buf when sending is completed.
*
* @param can_dev The CAN device to be used for sending and receiving.
* @param data Data to be sent.
* @param len Length of the data to be sent.
* @param rx_addr Identifier for FC frames.
* @param tx_addr Identifier for outgoing frames the receiver listens on.
* @param complete_cb Function called on completion or NULL.
* @param cb_arg Argument passed to the complete callback.
* @param timeout Timeout for buffer allocation.
*
* @retval ISOTP_N_OK on success
* @retval ISOTP_* on error
*/
int isotp_send_net_ctx_buf(const struct device *can_dev,
struct net_buf *data,
const struct isotp_msg_id *tx_addr,
const struct isotp_msg_id *rx_addr,
isotp_tx_callback_t complete_cb, void *cb_arg,
k_timeout_t timeout);
#endif /*CONFIG_ISOTP_ENABLE_CONTEXT_BUFFERS*/
#if defined(CONFIG_ISOTP_USE_TX_BUF) && \
defined(CONFIG_ISOTP_ENABLE_CONTEXT_BUFFERS)
/**
* @brief Send data with buffered context
*
* This function is similar to isotp_send, but the context is automatically
* allocated from an internal pool and the data to be send is buffered in an
* internal net_buff.
*
* @param can_dev The CAN device to be used for sending and receiving.
* @param data Data to be sent.
* @param len Length of the data to be sent.
* @param rx_addr Identifier for FC frames.
* @param tx_addr Identifier for outgoing frames the receiver listens on.
* @param complete_cb Function called on completion or NULL.
* @param cb_arg Argument passed to the complete callback.
* @param timeout Timeout for buffer allocation.
*
* @retval ISOTP_N_OK on success
* @retval ISOTP_* on error
*/
int isotp_send_buf(const struct device *can_dev,
const uint8_t *data, size_t len,
const struct isotp_msg_id *tx_addr,
const struct isotp_msg_id *rx_addr,
isotp_tx_callback_t complete_cb, void *cb_arg,
k_timeout_t timeout);
#endif
/** @cond INTERNAL_HIDDEN */
struct isotp_callback {
isotp_tx_callback_t cb;
void *arg;
};
struct isotp_send_ctx {
int filter_id;
uint32_t error_nr;
const struct device *can_dev;
union {
struct net_buf *buf;
struct {
const uint8_t *data;
size_t len;
};
};
struct k_work work;
struct k_timer timer;
union {
struct isotp_callback fin_cb;
struct k_sem fin_sem;
};
struct isotp_fc_opts opts;
uint8_t state;
uint8_t tx_backlog;
struct k_sem tx_sem;
struct isotp_msg_id rx_addr;
struct isotp_msg_id tx_addr;
uint8_t wft;
uint8_t bs;
uint8_t sn : 4;
uint8_t is_net_buf : 1;
uint8_t is_ctx_slab : 1;
uint8_t has_callback: 1;
};
struct isotp_recv_ctx {
int filter_id;
const struct device *can_dev;
struct net_buf *buf;
struct net_buf *act_frag;
/* buffer currently processed in isotp_recv */
struct net_buf *recv_buf;
sys_snode_t alloc_node;
uint32_t length;
int error_nr;
struct k_work work;
struct k_timer timer;
struct k_fifo fifo;
struct isotp_msg_id rx_addr;
struct isotp_msg_id tx_addr;
struct isotp_fc_opts opts;
uint8_t state;
uint8_t bs;
uint8_t wft;
uint8_t sn_expected : 4;
};
/** @endcond */
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_CANBUS_ISOTP_H_ */
``` | /content/code_sandbox/include/zephyr/canbus/isotp.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,603 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public API for Shared Multi-Heap framework
*/
#ifndef ZEPHYR_INCLUDE_MULTI_HEAP_MANAGER_SMH_H_
#define ZEPHYR_INCLUDE_MULTI_HEAP_MANAGER_SMH_H_
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Heap Management
* @defgroup heaps Heap Management
* @ingroup os_services
* @{
* @}
*/
/**
* @brief Shared Multi-Heap (SMH) interface
* @defgroup shared_multi_heap Shared multi-heap interface
* @ingroup heaps
* @{
*
* The shared multi-heap manager uses the multi-heap allocator to manage a set
* of memory regions with different capabilities / attributes (cacheable,
* non-cacheable, etc...).
*
* All the different regions can be added at run-time to the shared multi-heap
* pool providing an opaque "attribute" value (an integer or enum value) that
* can be used by drivers or applications to request memory with certain
* capabilities.
*
* This framework is commonly used as follow:
*
* - At boot time some platform code initialize the shared multi-heap
* framework using @ref shared_multi_heap_pool_init and add the memory
* regions to the pool with @ref shared_multi_heap_add, possibly gathering
* the needed information for the regions from the DT.
*
* - Each memory region encoded in a @ref shared_multi_heap_region structure.
* This structure is also carrying an opaque and user-defined integer value
* that is used to define the region capabilities (for example:
* cacheability, cpu affinity, etc...)
*
* - When a driver or application needs some dynamic memory with a certain
* capability, it can use @ref shared_multi_heap_alloc (or the aligned
* version) to request the memory by using the opaque parameter to select
* the correct set of attributes for the needed memory. The framework will
* take care of selecting the correct heap (thus memory region) to carve
* memory from, based on the opaque parameter and the runtime state of the
* heaps (available memory, heap state, etc...)
*/
/**
* @brief SMH region attributes enumeration type.
*
* Enumeration type for some common memory region attributes.
*
*/
enum shared_multi_heap_attr {
/** cacheable */
SMH_REG_ATTR_CACHEABLE,
/** non-cacheable */
SMH_REG_ATTR_NON_CACHEABLE,
/** must be the last item */
SMH_REG_ATTR_NUM,
};
/** Maximum number of standard attributes. */
#define MAX_SHARED_MULTI_HEAP_ATTR SMH_REG_ATTR_NUM
/**
* @brief SMH region struct
*
* This struct is carrying information about the memory region to be added in
* the multi-heap pool.
*/
struct shared_multi_heap_region {
/** Memory heap attribute */
uint32_t attr;
/** Memory heap starting virtual address */
uintptr_t addr;
/** Memory heap size in bytes */
size_t size;
};
/**
* @brief Init the pool
*
* This must be the first function to be called to initialize the shared
* multi-heap pool. All the individual heaps must be added later with @ref
* shared_multi_heap_add.
*
* @note As for the generic multi-heap allocator the expectation is that this
* function will be called at soc- or board-level.
*
* @retval 0 on success.
* @retval -EALREADY when the pool was already inited.
* @retval other errno codes
*/
int shared_multi_heap_pool_init(void);
/**
* @brief Allocate memory from the memory shared multi-heap pool
*
* Allocates a block of memory of the specified size in bytes and with a
* specified capability / attribute. The opaque attribute parameter is used
* by the backend to select the correct heap to allocate memory from.
*
* @param attr capability / attribute requested for the memory block.
* @param bytes requested size of the allocation in bytes.
*
* @retval ptr a valid pointer to heap memory.
* @retval err NULL if no memory is available.
*/
void *shared_multi_heap_alloc(enum shared_multi_heap_attr attr, size_t bytes);
/**
* @brief Allocate aligned memory from the memory shared multi-heap pool
*
* Allocates a block of memory of the specified size in bytes and with a
* specified capability / attribute. Takes an additional parameter specifying a
* power of two alignment in bytes.
*
* @param attr capability / attribute requested for the memory block.
* @param align power of two alignment for the returned pointer, in bytes.
* @param bytes requested size of the allocation in bytes.
*
* @retval ptr a valid pointer to heap memory.
* @retval err NULL if no memory is available.
*/
void *shared_multi_heap_aligned_alloc(enum shared_multi_heap_attr attr,
size_t align, size_t bytes);
/**
* @brief Free memory from the shared multi-heap pool
*
* Used to free the passed block of memory that must be the return value of a
* previously call to @ref shared_multi_heap_alloc or @ref
* shared_multi_heap_aligned_alloc.
*
* @param block block to free, must be a pointer to a block allocated
* by shared_multi_heap_alloc or
* shared_multi_heap_aligned_alloc.
*/
void shared_multi_heap_free(void *block);
/**
* @brief Add an heap region to the shared multi-heap pool
*
* This adds a shared multi-heap region to the multi-heap pool.
*
* @param user_data pointer to any data for the heap.
* @param region pointer to the memory region to be added.
*
* @retval 0 on success.
* @retval -EINVAL when the region attribute is out-of-bound.
* @retval -ENOMEM when there are no more heaps available.
* @retval other errno codes
*/
int shared_multi_heap_add(struct shared_multi_heap_region *region, void *user_data);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_MULTI_HEAP_MANAGER_SMH_H_ */
``` | /content/code_sandbox/include/zephyr/multi_heap/shared_multi_heap.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,284 |
```objective-c
/*
*
*/
/**
* @file
* @brief Crypto Hash APIs
*
* This file contains the Crypto Abstraction layer APIs.
*/
#ifndef ZEPHYR_INCLUDE_CRYPTO_HASH_H_
#define ZEPHYR_INCLUDE_CRYPTO_HASH_H_
/**
* @addtogroup crypto_hash
* @{
*/
/**
* Hash algorithm
*/
enum hash_algo {
CRYPTO_HASH_ALGO_SHA224 = 1,
CRYPTO_HASH_ALGO_SHA256 = 2,
CRYPTO_HASH_ALGO_SHA384 = 3,
CRYPTO_HASH_ALGO_SHA512 = 4,
};
/* Forward declarations */
struct hash_ctx;
struct hash_pkt;
typedef int (*hash_op_t)(struct hash_ctx *ctx, struct hash_pkt *pkt,
bool finish);
/**
* Structure encoding session parameters.
*
* Refer to comments for individual fields to know the contract
* in terms of who fills what and when w.r.t begin_session() call.
*/
struct hash_ctx {
/** The device driver instance this crypto context relates to. Will be
* populated by the begin_session() API.
*/
const struct device *device;
/** If the driver supports multiple simultaneously crypto sessions, this
* will identify the specific driver state this crypto session relates
* to. Since dynamic memory allocation is not possible, it is
* suggested that at build time drivers allocate space for the
* max simultaneous sessions they intend to support. To be populated
* by the driver on return from begin_session().
*/
void *drv_sessn_state;
/**
* Hash handler set up when the session begins.
*/
hash_op_t hash_hndlr;
/**
* If it has started a multipart hash operation.
*/
bool started;
/** How certain fields are to be interpreted for this session.
* (A bitmask of CAP_* below.)
* To be populated by the app before calling hash_begin_session().
* An app can obtain the capability flags supported by a hw/driver
* by calling crypto_query_hwcaps().
*/
uint16_t flags;
};
/**
* Structure encoding IO parameters of a hash
* operation.
*
* The fields which has not been explicitly called out has to
* be filled up by the app before calling hash_compute().
*/
struct hash_pkt {
/** Start address of input buffer */
uint8_t *in_buf;
/** Bytes to be operated upon */
size_t in_len;
/**
* Start of the output buffer, to be allocated by
* the application. Can be NULL for in-place ops. To be populated
* with contents by the driver on return from op / async callback.
*/
uint8_t *out_buf;
/**
* Context this packet relates to. This can be useful to get the
* session details, especially for async ops.
*/
struct hash_ctx *ctx;
};
/* Prototype for the application function to be invoked by the crypto driver
* on completion of an async request. The app may get the session context
* via the pkt->ctx field.
*/
typedef void (*hash_completion_cb)(struct hash_pkt *completed, int status);
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_CRYPTO_HASH_H_ */
``` | /content/code_sandbox/include/zephyr/crypto/hash.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 663 |
```objective-c
/*
*
*/
/**
* @file
* @brief Crypto Cipher structure definitions
*
* This file contains the Crypto Abstraction layer structures.
*
* [Experimental] Users should note that the Structures can change
* as a part of ongoing development.
*/
#ifndef ZEPHYR_INCLUDE_CRYPTO_CIPHER_H_
#define ZEPHYR_INCLUDE_CRYPTO_CIPHER_H_
#include <zephyr/device.h>
#include <zephyr/sys/util.h>
/**
* @addtogroup crypto_cipher
* @{
*/
/** Cipher Algorithm */
enum cipher_algo {
CRYPTO_CIPHER_ALGO_AES = 1,
};
/** Cipher Operation */
enum cipher_op {
CRYPTO_CIPHER_OP_DECRYPT = 0,
CRYPTO_CIPHER_OP_ENCRYPT = 1,
};
/**
* Possible cipher mode options.
*
* More to be added as required.
*/
enum cipher_mode {
CRYPTO_CIPHER_MODE_ECB = 1,
CRYPTO_CIPHER_MODE_CBC = 2,
CRYPTO_CIPHER_MODE_CTR = 3,
CRYPTO_CIPHER_MODE_CCM = 4,
CRYPTO_CIPHER_MODE_GCM = 5,
};
/* Forward declarations */
struct cipher_aead_pkt;
struct cipher_ctx;
struct cipher_pkt;
typedef int (*block_op_t)(struct cipher_ctx *ctx, struct cipher_pkt *pkt);
/* Function signatures for encryption/ decryption using standard cipher modes
* like CBC, CTR, CCM.
*/
typedef int (*cbc_op_t)(struct cipher_ctx *ctx, struct cipher_pkt *pkt,
uint8_t *iv);
typedef int (*ctr_op_t)(struct cipher_ctx *ctx, struct cipher_pkt *pkt,
uint8_t *ctr);
typedef int (*ccm_op_t)(struct cipher_ctx *ctx, struct cipher_aead_pkt *pkt,
uint8_t *nonce);
typedef int (*gcm_op_t)(struct cipher_ctx *ctx, struct cipher_aead_pkt *pkt,
uint8_t *nonce);
struct cipher_ops {
enum cipher_mode cipher_mode;
union {
block_op_t block_crypt_hndlr;
cbc_op_t cbc_crypt_hndlr;
ctr_op_t ctr_crypt_hndlr;
ccm_op_t ccm_crypt_hndlr;
gcm_op_t gcm_crypt_hndlr;
};
};
struct ccm_params {
uint16_t tag_len;
uint16_t nonce_len;
};
struct ctr_params {
/* CTR mode counter is a split counter composed of iv and counter
* such that ivlen + ctr_len = keylen
*/
uint32_t ctr_len;
};
struct gcm_params {
uint16_t tag_len;
uint16_t nonce_len;
};
/**
* Structure encoding session parameters.
*
* Refer to comments for individual fields to know the contract
* in terms of who fills what and when w.r.t begin_session() call.
*/
struct cipher_ctx {
/** Place for driver to return function pointers to be invoked per
* cipher operation. To be populated by crypto driver on return from
* begin_session() based on the algo/mode chosen by the app.
*/
struct cipher_ops ops;
/** To be populated by the app before calling begin_session() */
union {
/* Cryptographic key to be used in this session */
const uint8_t *bit_stream;
/* For cases where key is protected and is not
* available to caller
*/
void *handle;
} key;
/** The device driver instance this crypto context relates to. Will be
* populated by the begin_session() API.
*/
const struct device *device;
/** If the driver supports multiple simultaneously crypto sessions, this
* will identify the specific driver state this crypto session relates
* to. Since dynamic memory allocation is not possible, it is
* suggested that at build time drivers allocate space for the
* max simultaneous sessions they intend to support. To be populated
* by the driver on return from begin_session().
*/
void *drv_sessn_state;
/** Place for the user app to put info relevant stuff for resuming when
* completion callback happens for async ops. Totally managed by the
* app.
*/
void *app_sessn_state;
/** Cypher mode parameters, which remain constant for all ops
* in a session. To be populated by the app before calling
* begin_session().
*/
union {
struct ccm_params ccm_info;
struct ctr_params ctr_info;
struct gcm_params gcm_info;
} mode_params;
/** Cryptographic keylength in bytes. To be populated by the app
* before calling begin_session()
*/
uint16_t keylen;
/** How certain fields are to be interpreted for this session.
* (A bitmask of CAP_* below.)
* To be populated by the app before calling begin_session().
* An app can obtain the capability flags supported by a hw/driver
* by calling crypto_query_hwcaps().
*/
uint16_t flags;
};
/**
* Structure encoding IO parameters of one cryptographic
* operation like encrypt/decrypt.
*
* The fields which has not been explicitly called out has to
* be filled up by the app before making the cipher_xxx_op()
* call.
*/
struct cipher_pkt {
/** Start address of input buffer */
uint8_t *in_buf;
/** Bytes to be operated upon */
int in_len;
/** Start of the output buffer, to be allocated by
* the application. Can be NULL for in-place ops. To be populated
* with contents by the driver on return from op / async callback.
*/
uint8_t *out_buf;
/** Size of the out_buf area allocated by the application. Drivers
* should not write past the size of output buffer.
*/
int out_buf_max;
/** To be populated by driver on return from cipher_xxx_op() and
* holds the size of the actual result.
*/
int out_len;
/** Context this packet relates to. This can be useful to get the
* session details, especially for async ops. Will be populated by the
* cipher_xxx_op() API based on the ctx parameter.
*/
struct cipher_ctx *ctx;
};
/**
* Structure encoding IO parameters in AEAD (Authenticated Encryption
* with Associated Data) scenario like in CCM.
*
* App has to furnish valid contents prior to making cipher_ccm_op() call.
*/
struct cipher_aead_pkt {
/* IO buffers for encryption. This has to be supplied by the app. */
struct cipher_pkt *pkt;
/**
* Start address for Associated Data. This has to be supplied by app.
*/
uint8_t *ad;
/** Size of Associated Data. This has to be supplied by the app. */
uint32_t ad_len;
/** Start address for the auth hash. For an encryption op this will
* be populated by the driver when it returns from cipher_ccm_op call.
* For a decryption op this has to be supplied by the app.
*/
uint8_t *tag;
};
/* Prototype for the application function to be invoked by the crypto driver
* on completion of an async request. The app may get the session context
* via the pkt->ctx field. For CCM ops the encompassing AEAD packet may be
* accessed via container_of(). The type of a packet can be determined via
* pkt->ctx.ops.mode .
*/
typedef void (*cipher_completion_cb)(struct cipher_pkt *completed, int status);
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_CRYPTO_CIPHER_H_ */
``` | /content/code_sandbox/include/zephyr/crypto/cipher.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,588 |
```objective-c
/*
*
*/
/**
* @file
* @brief Real-Time IO device API for moving bytes with low effort
*
* RTIO is a context for asynchronous batch operations using a submission and completion queue.
*
* Asynchronous I/O operation are setup in a submission queue. Each entry in the queue describes
* the operation it wishes to perform with some understood semantics.
*
* These operations may be chained in a such a way that only when the current
* operation is complete the next will be executed. If the current operation fails
* all chained operations will also fail.
*
* Operations may also be submitted as a transaction where a set of operations are considered
* to be one operation.
*
* The completion of these operations typically provide one or more completion queue events.
*/
#ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
#define ZEPHYR_INCLUDE_RTIO_RTIO_H_
#include <string.h>
#include <zephyr/app_memory/app_memdomain.h>
#include <zephyr/device.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/sys/mem_blocks.h>
#include <zephyr/sys/util.h>
#include <zephyr/sys/iterable_sections.h>
#include <zephyr/sys/mpsc_lockfree.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief RTIO
* @defgroup rtio RTIO
* @since 3.2
* @version 0.1.0
* @ingroup os_services
* @{
*/
/**
* @brief RTIO Predefined Priorities
* @defgroup rtio_sqe_prio RTIO Priorities
* @ingroup rtio
* @{
*/
/**
* @brief Low priority
*/
#define RTIO_PRIO_LOW 0U
/**
* @brief Normal priority
*/
#define RTIO_PRIO_NORM 127U
/**
* @brief High priority
*/
#define RTIO_PRIO_HIGH 255U
/**
* @}
*/
/**
* @brief RTIO SQE Flags
* @defgroup rtio_sqe_flags RTIO SQE Flags
* @ingroup rtio
* @{
*/
/**
* @brief The next request in the queue should wait on this one.
*
* Chained SQEs are individual units of work describing patterns of
* ordering and failure cascading. A chained SQE must be started only
* after the one before it. They are given to the iodevs one after another.
*/
#define RTIO_SQE_CHAINED BIT(0)
/**
* @brief The next request in the queue is part of a transaction.
*
* Transactional SQEs are sequential parts of a unit of work.
* Only the first transactional SQE is submitted to an iodev, the
* remaining SQEs are never individually submitted but instead considered
* to be part of the transaction to the single iodev. The first sqe in the
* sequence holds the iodev that will be used and the last holds the userdata
* that will be returned in a single completion on failure/success.
*/
#define RTIO_SQE_TRANSACTION BIT(1)
/**
* @brief The buffer should be allocated by the RTIO mempool
*
* This flag can only exist if the CONFIG_RTIO_SYS_MEM_BLOCKS Kconfig was
* enabled and the RTIO context was created via the RTIO_DEFINE_WITH_MEMPOOL()
* macro. If set, the buffer associated with the entry was allocated by the
* internal memory pool and should be released as soon as it is no longer
* needed via a call to rtio_release_mempool().
*/
#define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
/**
* @brief The SQE should not execute if possible
*
* If possible (not yet executed), the SQE should be canceled by flagging it as failed and returning
* -ECANCELED as the result.
*/
#define RTIO_SQE_CANCELED BIT(3)
/**
* @brief The SQE should continue producing CQEs until canceled
*
* This flag must exist along @ref RTIO_SQE_MEMPOOL_BUFFER and signals that when a read is
* complete. It should be placed back in queue until canceled.
*/
#define RTIO_SQE_MULTISHOT BIT(4)
/**
* @brief The SQE does not produce a CQE.
*/
#define RTIO_SQE_NO_RESPONSE BIT(5)
/**
* @}
*/
/**
* @brief RTIO CQE Flags
* @defgroup rtio_cqe_flags RTIO CQE Flags
* @ingroup rtio
* @{
*/
/**
* @brief The entry's buffer was allocated from the RTIO's mempool
*
* If this bit is set, the buffer was allocated from the memory pool and should be recycled as
* soon as the application is done with it.
*/
#define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
#define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
/**
* @brief Get the block index of a mempool flags
*
* @param flags The CQE flags value
* @return The block index portion of the flags field.
*/
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
/**
* @brief Get the block count of a mempool flags
*
* @param flags The CQE flags value
* @return The block count portion of the flags field.
*/
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
/**
* @brief Prepare CQE flags for a mempool read.
*
* @param blk_idx The mempool block index
* @param blk_cnt The mempool block count
* @return A shifted and masked value that can be added to the flags field with an OR operator.
*/
#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt) \
(FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) | \
FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
/**
* @}
*/
/**
* @brief Equivalent to the I2C_MSG_STOP flag
*/
#define RTIO_IODEV_I2C_STOP BIT(1)
/**
* @brief Equivalent to the I2C_MSG_RESTART flag
*/
#define RTIO_IODEV_I2C_RESTART BIT(2)
/**
* @brief Equivalent to the I2C_MSG_ADDR_10_BITS
*/
#define RTIO_IODEV_I2C_10_BITS BIT(3)
/** @cond ignore */
struct rtio;
struct rtio_cqe;
struct rtio_sqe;
struct rtio_sqe_pool;
struct rtio_cqe_pool;
struct rtio_iodev;
struct rtio_iodev_sqe;
/** @endcond */
/**
* @typedef rtio_callback_t
* @brief Callback signature for RTIO_OP_CALLBACK
* @param r RTIO context being used with the callback
* @param sqe Submission for the callback op
* @param arg0 Argument option as part of the sqe
*/
typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0);
/**
* @brief A submission queue event
*/
struct rtio_sqe {
uint8_t op; /**< Op code */
uint8_t prio; /**< Op priority */
uint16_t flags; /**< Op Flags */
uint16_t iodev_flags; /**< Op iodev flags */
uint16_t _resv0;
const struct rtio_iodev *iodev; /**< Device to operation on */
/**
* User provided data which is returned upon operation completion. Could be a pointer or
* integer.
*
* If unique identification of completions is desired this should be
* unique as well.
*/
void *userdata;
union {
/** OP_TX */
struct {
uint32_t buf_len; /**< Length of buffer */
const uint8_t *buf; /**< Buffer to write from */
} tx;
/** OP_RX */
struct {
uint32_t buf_len; /**< Length of buffer */
uint8_t *buf; /**< Buffer to read into */
} rx;
/** OP_TINY_TX */
struct {
uint8_t buf_len; /**< Length of tiny buffer */
uint8_t buf[7]; /**< Tiny buffer */
} tiny_tx;
/** OP_CALLBACK */
struct {
rtio_callback_t callback;
void *arg0; /**< Last argument given to callback */
} callback;
/** OP_TXRX */
struct {
uint32_t buf_len; /**< Length of tx and rx buffers */
const uint8_t *tx_buf; /**< Buffer to write from */
uint8_t *rx_buf; /**< Buffer to read into */
} txrx;
/** OP_I2C_CONFIGURE */
uint32_t i2c_config;
};
};
/** @cond ignore */
/* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
/** @endcond */
/**
* @brief A completion queue event
*/
struct rtio_cqe {
struct mpsc_node q;
int32_t result; /**< Result from operation */
void *userdata; /**< Associated userdata with operation */
uint32_t flags; /**< Flags associated with the operation */
};
struct rtio_sqe_pool {
struct mpsc free_q;
const uint16_t pool_size;
uint16_t pool_free;
struct rtio_iodev_sqe *pool;
};
struct rtio_cqe_pool {
struct mpsc free_q;
const uint16_t pool_size;
uint16_t pool_free;
struct rtio_cqe *pool;
};
/**
* @brief An RTIO context containing what can be viewed as a pair of queues.
*
* A queue for submissions (available and in queue to be produced) as well as a queue
* of completions (available and ready to be consumed).
*
* The rtio executor along with any objects implementing the rtio_iodev interface are
* the consumers of submissions and producers of completions.
*
* No work is started until rtio_submit() is called.
*/
struct rtio {
#ifdef CONFIG_RTIO_SUBMIT_SEM
/* A wait semaphore which may suspend the calling thread
* to wait for some number of completions when calling submit
*/
struct k_sem *submit_sem;
uint32_t submit_count;
#endif
#ifdef CONFIG_RTIO_CONSUME_SEM
/* A wait semaphore which may suspend the calling thread
* to wait for some number of completions while consuming
* them from the completion queue
*/
struct k_sem *consume_sem;
#endif
/* Total number of completions */
atomic_t cq_count;
/* Number of completions that were unable to be submitted with results
* due to the cq spsc being full
*/
atomic_t xcqcnt;
/* Submission queue object pool with free list */
struct rtio_sqe_pool *sqe_pool;
/* Complete queue object pool with free list */
struct rtio_cqe_pool *cqe_pool;
#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
/* Mem block pool */
struct sys_mem_blocks *block_pool;
#endif
/* Submission queue */
struct mpsc sq;
/* Completion queue */
struct mpsc cq;
};
/** The memory partition associated with all RTIO context information */
extern struct k_mem_partition rtio_partition;
/**
* @brief Get the mempool block size of the RTIO context
*
* @param[in] r The RTIO context
* @return The size of each block in the context's mempool
* @return 0 if the context doesn't have a mempool
*/
static inline size_t rtio_mempool_block_size(const struct rtio *r)
{
#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
ARG_UNUSED(r);
return 0;
#else
if (r == NULL || r->block_pool == NULL) {
return 0;
}
return BIT(r->block_pool->info.blk_sz_shift);
#endif
}
/**
* @brief Compute the mempool block index for a given pointer
*
* @param[in] r RTIO context
* @param[in] ptr Memory pointer in the mempool
* @return Index of the mempool block associated with the pointer. Or UINT16_MAX if invalid.
*/
#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
{
uintptr_t addr = (uintptr_t)ptr;
struct sys_mem_blocks *mem_pool = r->block_pool;
uint32_t block_size = rtio_mempool_block_size(r);
uintptr_t buff = (uintptr_t)mem_pool->buffer;
uint32_t buff_size = mem_pool->info.num_blocks * block_size;
if (addr < buff || addr >= buff + buff_size) {
return UINT16_MAX;
}
return (addr - buff) / block_size;
}
#endif
/**
* @brief IO device submission queue entry
*
* May be cast safely to and from a rtio_sqe as they occupy the same memory provided by the pool
*/
struct rtio_iodev_sqe {
struct rtio_sqe sqe;
struct mpsc_node q;
struct rtio_iodev_sqe *next;
struct rtio *r;
};
/**
* @brief API that an RTIO IO device should implement
*/
struct rtio_iodev_api {
/**
* @brief Submit to the iodev an entry to work on
*
* This call should be short in duration and most likely
* either enqueue or kick off an entry with the hardware.
*
* @param iodev_sqe Submission queue entry
*/
void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
};
/**
* @brief An IO device with a function table for submitting requests
*/
struct rtio_iodev {
/* Function pointer table */
const struct rtio_iodev_api *api;
/* Data associated with this iodev */
void *data;
};
/** An operation that does nothing and will complete immediately */
#define RTIO_OP_NOP 0
/** An operation that receives (reads) */
#define RTIO_OP_RX (RTIO_OP_NOP+1)
/** An operation that transmits (writes) */
#define RTIO_OP_TX (RTIO_OP_RX+1)
/** An operation that transmits tiny writes by copying the data to write */
#define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
/** An operation that calls a given function (callback) */
#define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
/** An operation that transceives (reads and writes simultaneously) */
#define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
/** An operation to recover I2C buses */
#define RTIO_OP_I2C_RECOVER (RTIO_OP_TXRX+1)
/** An operation to configure I2C buses */
#define RTIO_OP_I2C_CONFIGURE (RTIO_OP_I2C_RECOVER+1)
/**
* @brief Prepare a nop (no op) submission
*/
static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
const struct rtio_iodev *iodev,
void *userdata)
{
memset(sqe, 0, sizeof(struct rtio_sqe));
sqe->op = RTIO_OP_NOP;
sqe->iodev = iodev;
sqe->userdata = userdata;
}
/**
* @brief Prepare a read op submission
*/
static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
const struct rtio_iodev *iodev,
int8_t prio,
uint8_t *buf,
uint32_t len,
void *userdata)
{
memset(sqe, 0, sizeof(struct rtio_sqe));
sqe->op = RTIO_OP_RX;
sqe->prio = prio;
sqe->iodev = iodev;
sqe->rx.buf_len = len;
sqe->rx.buf = buf;
sqe->userdata = userdata;
}
/**
* @brief Prepare a read op submission with context's mempool
*
* @see rtio_sqe_prep_read()
*/
static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
const struct rtio_iodev *iodev, int8_t prio,
void *userdata)
{
rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
sqe->flags = RTIO_SQE_MEMPOOL_BUFFER;
}
static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
const struct rtio_iodev *iodev, int8_t prio,
void *userdata)
{
rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
sqe->flags |= RTIO_SQE_MULTISHOT;
}
/**
* @brief Prepare a write op submission
*/
static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
const struct rtio_iodev *iodev,
int8_t prio,
const uint8_t *buf,
uint32_t len,
void *userdata)
{
memset(sqe, 0, sizeof(struct rtio_sqe));
sqe->op = RTIO_OP_TX;
sqe->prio = prio;
sqe->iodev = iodev;
sqe->tx.buf_len = len;
sqe->tx.buf = buf;
sqe->userdata = userdata;
}
/**
* @brief Prepare a tiny write op submission
*
* Unlike the normal write operation where the source buffer must outlive the call
* the tiny write data in this case is copied to the sqe. It must be tiny to fit
* within the specified size of a rtio_sqe.
*
* This is useful in many scenarios with RTL logic where a write of the register to
* subsequently read must be done.
*/
static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
const struct rtio_iodev *iodev,
int8_t prio,
const uint8_t *tiny_write_data,
uint8_t tiny_write_len,
void *userdata)
{
__ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_tx.buf));
memset(sqe, 0, sizeof(struct rtio_sqe));
sqe->op = RTIO_OP_TINY_TX;
sqe->prio = prio;
sqe->iodev = iodev;
sqe->tiny_tx.buf_len = tiny_write_len;
memcpy(sqe->tiny_tx.buf, tiny_write_data, tiny_write_len);
sqe->userdata = userdata;
}
/**
* @brief Prepare a callback op submission
*
* A somewhat special operation in that it may only be done in kernel mode.
*
* Used where general purpose logic is required in a queue of io operations to do
* transforms or logic.
*/
static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
rtio_callback_t callback,
void *arg0,
void *userdata)
{
memset(sqe, 0, sizeof(struct rtio_sqe));
sqe->op = RTIO_OP_CALLBACK;
sqe->prio = 0;
sqe->iodev = NULL;
sqe->callback.callback = callback;
sqe->callback.arg0 = arg0;
sqe->userdata = userdata;
}
/**
* @brief Prepare a transceive op submission
*/
static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
const struct rtio_iodev *iodev,
int8_t prio,
const uint8_t *tx_buf,
uint8_t *rx_buf,
uint32_t buf_len,
void *userdata)
{
memset(sqe, 0, sizeof(struct rtio_sqe));
sqe->op = RTIO_OP_TXRX;
sqe->prio = prio;
sqe->iodev = iodev;
sqe->txrx.buf_len = buf_len;
sqe->txrx.tx_buf = tx_buf;
sqe->txrx.rx_buf = rx_buf;
sqe->userdata = userdata;
}
static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
{
struct mpsc_node *node = mpsc_pop(&pool->free_q);
if (node == NULL) {
return NULL;
}
struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
pool->pool_free--;
return iodev_sqe;
}
static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
{
mpsc_push(&pool->free_q, &iodev_sqe->q);
pool->pool_free++;
}
static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
{
struct mpsc_node *node = mpsc_pop(&pool->free_q);
if (node == NULL) {
return NULL;
}
struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
memset(cqe, 0, sizeof(struct rtio_cqe));
pool->pool_free--;
return cqe;
}
static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
{
mpsc_push(&pool->free_q, &cqe->q);
pool->pool_free++;
}
static inline int rtio_block_pool_alloc(struct rtio *r, size_t min_sz,
size_t max_sz, uint8_t **buf, uint32_t *buf_len)
{
#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
ARG_UNUSED(r);
ARG_UNUSED(min_sz);
ARG_UNUSED(max_sz);
ARG_UNUSED(buf);
ARG_UNUSED(buf_len);
return -ENOTSUP;
#else
const uint32_t block_size = rtio_mempool_block_size(r);
uint32_t bytes = max_sz;
/* Not every context has a block pool and the block size may return 0 in
* that case
*/
if (block_size == 0) {
return -ENOMEM;
}
do {
size_t num_blks = DIV_ROUND_UP(bytes, block_size);
int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf);
if (rc == 0) {
*buf_len = num_blks * block_size;
return 0;
}
if (bytes <= block_size) {
break;
}
bytes -= block_size;
} while (bytes >= min_sz);
return -ENOMEM;
#endif
}
static inline void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
{
#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
ARG_UNUSED(r);
ARG_UNUSED(buf);
ARG_UNUSED(buf_len);
#else
size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift;
sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks);
#endif
}
/* Do not try and reformat the macros */
/* clang-format off */
/**
* @brief Statically define and initialize an RTIO IODev
*
* @param name Name of the iodev
* @param iodev_api Pointer to struct rtio_iodev_api
* @param iodev_data Data pointer
*/
#define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data) \
STRUCT_SECTION_ITERABLE(rtio_iodev, name) = { \
.api = (iodev_api), \
.data = (iodev_data), \
}
#define Z_RTIO_SQE_POOL_DEFINE(name, sz) \
static struct rtio_iodev_sqe CONCAT(_sqe_pool_, name)[sz]; \
STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = { \
.free_q = MPSC_INIT((name.free_q)), \
.pool_size = sz, \
.pool_free = sz, \
.pool = CONCAT(_sqe_pool_, name), \
}
#define Z_RTIO_CQE_POOL_DEFINE(name, sz) \
static struct rtio_cqe CONCAT(_cqe_pool_, name)[sz]; \
STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = { \
.free_q = MPSC_INIT((name.free_q)), \
.pool_size = sz, \
.pool_free = sz, \
.pool = CONCAT(_cqe_pool_, name), \
}
/**
* @brief Allocate to bss if available
*
* If CONFIG_USERSPACE is selected, allocate to the rtio_partition bss. Maps to:
* K_APP_BMEM(rtio_partition) static
*
* If CONFIG_USERSPACE is disabled, allocate as plain static:
* static
*/
#define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
/**
* @brief Allocate as initialized memory if available
*
* If CONFIG_USERSPACE is selected, allocate to the rtio_partition init. Maps to:
* K_APP_DMEM(rtio_partition) static
*
* If CONFIG_USERSPACE is disabled, allocate as plain static:
* static
*/
#define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
#define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align) \
RTIO_BMEM uint8_t __aligned(WB_UP(blk_align)) \
CONCAT(_block_pool_, name)[blk_cnt*WB_UP(blk_sz)]; \
_SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(name, WB_UP(blk_sz), blk_cnt, \
CONCAT(_block_pool_, name), RTIO_DMEM)
#define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool) \
IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, \
(static K_SEM_DEFINE(CONCAT(_submit_sem_, name), 0, K_SEM_MAX_LIMIT))) \
IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, \
(static K_SEM_DEFINE(CONCAT(_consume_sem_, name), 0, K_SEM_MAX_LIMIT))) \
STRUCT_SECTION_ITERABLE(rtio, name) = { \
IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &CONCAT(_submit_sem_, name),)) \
IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,)) \
IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &CONCAT(_consume_sem_, name),))\
.cq_count = ATOMIC_INIT(0), \
.xcqcnt = ATOMIC_INIT(0), \
.sqe_pool = _sqe_pool, \
.cqe_pool = _cqe_pool, \
IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,)) \
.sq = MPSC_INIT((name.sq)), \
.cq = MPSC_INIT((name.cq)), \
}
/**
* @brief Statically define and initialize an RTIO context
*
* @param name Name of the RTIO
* @param sq_sz Size of the submission queue entry pool
* @param cq_sz Size of the completion queue entry pool
*/
#define RTIO_DEFINE(name, sq_sz, cq_sz) \
Z_RTIO_SQE_POOL_DEFINE(CONCAT(name, _sqe_pool), sq_sz); \
Z_RTIO_CQE_POOL_DEFINE(CONCAT(name, _cqe_pool), cq_sz); \
Z_RTIO_DEFINE(name, &CONCAT(name, _sqe_pool), \
&CONCAT(name, _cqe_pool), NULL)
/* clang-format on */
/**
* @brief Statically define and initialize an RTIO context
*
* @param name Name of the RTIO
* @param sq_sz Size of the submission queue, must be power of 2
* @param cq_sz Size of the completion queue, must be power of 2
* @param num_blks Number of blocks in the memory pool
* @param blk_size The number of bytes in each block
* @param balign The block alignment
*/
#define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz); \
Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz); \
Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
/* clang-format on */
/**
* @brief Count of acquirable submission queue events
*
* @param r RTIO context
*
* @return Count of acquirable submission queue events
*/
static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
{
return r->sqe_pool->pool_free;
}
/**
* @brief Get the next sqe in the transaction
*
* @param iodev_sqe Submission queue entry
*
* @retval NULL if current sqe is last in transaction
* @retval struct rtio_sqe * if available
*/
static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
{
if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
return iodev_sqe->next;
} else {
return NULL;
}
}
/**
* @brief Get the next sqe in the chain
*
* @param iodev_sqe Submission queue entry
*
* @retval NULL if current sqe is last in chain
* @retval struct rtio_sqe * if available
*/
static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
{
if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
return iodev_sqe->next;
} else {
return NULL;
}
}
/**
* @brief Get the next sqe in the chain or transaction
*
* @param iodev_sqe Submission queue entry
*
* @retval NULL if current sqe is last in chain
* @retval struct rtio_iodev_sqe * if available
*/
static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
{
return iodev_sqe->next;
}
/**
* @brief Acquire a single submission queue event if available
*
* @param r RTIO context
*
* @retval sqe A valid submission queue event acquired from the submission queue
* @retval NULL No subsmission queue event available
*/
static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
{
struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
if (iodev_sqe == NULL) {
return NULL;
}
mpsc_push(&r->sq, &iodev_sqe->q);
return &iodev_sqe->sqe;
}
/**
* @brief Drop all previously acquired sqe
*
* @param r RTIO context
*/
static inline void rtio_sqe_drop_all(struct rtio *r)
{
struct rtio_iodev_sqe *iodev_sqe;
struct mpsc_node *node = mpsc_pop(&r->sq);
while (node != NULL) {
iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
node = mpsc_pop(&r->sq);
}
}
/**
* @brief Acquire a complete queue event if available
*/
static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
{
struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
if (cqe == NULL) {
return NULL;
}
memset(cqe, 0, sizeof(struct rtio_cqe));
return cqe;
}
/**
* @brief Produce a complete queue event if available
*/
static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
{
mpsc_push(&r->cq, &cqe->q);
}
/**
* @brief Consume a single completion queue event if available
*
* If a completion queue event is returned rtio_cq_release(r) must be called
* at some point to release the cqe spot for the cqe producer.
*
* @param r RTIO context
*
* @retval cqe A valid completion queue event consumed from the completion queue
* @retval NULL No completion queue event available
*/
static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
{
struct mpsc_node *node;
struct rtio_cqe *cqe = NULL;
#ifdef CONFIG_RTIO_CONSUME_SEM
if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
return NULL;
}
#endif
node = mpsc_pop(&r->cq);
if (node == NULL) {
return NULL;
}
cqe = CONTAINER_OF(node, struct rtio_cqe, q);
return cqe;
}
/**
* @brief Wait for and consume a single completion queue event
*
* If a completion queue event is returned rtio_cq_release(r) must be called
* at some point to release the cqe spot for the cqe producer.
*
* @param r RTIO context
*
* @retval cqe A valid completion queue event consumed from the completion queue
*/
static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
{
struct mpsc_node *node;
struct rtio_cqe *cqe;
#ifdef CONFIG_RTIO_CONSUME_SEM
k_sem_take(r->consume_sem, K_FOREVER);
#endif
node = mpsc_pop(&r->cq);
while (node == NULL) {
Z_SPIN_DELAY(1);
node = mpsc_pop(&r->cq);
}
cqe = CONTAINER_OF(node, struct rtio_cqe, q);
return cqe;
}
/**
* @brief Release consumed completion queue event
*
* @param r RTIO context
* @param cqe Completion queue entry
*/
static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
{
rtio_cqe_pool_free(r->cqe_pool, cqe);
}
/**
* @brief Compute the CQE flags from the rtio_iodev_sqe entry
*
* @param iodev_sqe The SQE entry in question.
* @return The value that should be set for the CQE's flags field.
*/
static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
{
uint32_t flags = 0;
#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
struct rtio *r = iodev_sqe->r;
struct sys_mem_blocks *mem_pool = r->block_pool;
int blk_index = (iodev_sqe->sqe.rx.buf - mem_pool->buffer) >>
mem_pool->info.blk_sz_shift;
int blk_count = iodev_sqe->sqe.rx.buf_len >> mem_pool->info.blk_sz_shift;
flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
}
#else
ARG_UNUSED(iodev_sqe);
#endif
return flags;
}
/**
* @brief Retrieve the mempool buffer that was allocated for the CQE.
*
* If the RTIO context contains a memory pool, and the SQE was created by calling
* rtio_sqe_read_with_pool(), this function can be used to retrieve the memory associated with the
* read. Once processing is done, it should be released by calling rtio_release_buffer().
*
* @param[in] r RTIO context
* @param[in] cqe The CQE handling the event.
* @param[out] buff Pointer to the mempool buffer
* @param[out] buff_len Length of the allocated buffer
* @return 0 on success
* @return -EINVAL if the buffer wasn't allocated for this cqe
* @return -ENOTSUP if memory blocks are disabled
*/
__syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
uint8_t **buff, uint32_t *buff_len);
static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
uint8_t **buff, uint32_t *buff_len)
{
#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
if (RTIO_CQE_FLAG_GET(cqe->flags) == RTIO_CQE_FLAG_MEMPOOL_BUFFER) {
int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
uint32_t blk_size = rtio_mempool_block_size(r);
*buff = r->block_pool->buffer + blk_idx * blk_size;
*buff_len = blk_count * blk_size;
__ASSERT_NO_MSG(*buff >= r->block_pool->buffer);
__ASSERT_NO_MSG(*buff <
r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks);
return 0;
}
return -EINVAL;
#else
ARG_UNUSED(r);
ARG_UNUSED(cqe);
ARG_UNUSED(buff);
ARG_UNUSED(buff_len);
return -ENOTSUP;
#endif
}
void rtio_executor_submit(struct rtio *r);
void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
/**
* @brief Inform the executor of a submission completion with success
*
* This may start the next asynchronous request if one is available.
*
* @param iodev_sqe IODev Submission that has succeeded
* @param result Result of the request
*/
static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
{
rtio_executor_ok(iodev_sqe, result);
}
/**
* @brief Inform the executor of a submissions completion with error
*
* This SHALL fail the remaining submissions in the chain.
*
* @param iodev_sqe Submission that has failed
* @param result Result of the request
*/
static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
{
rtio_executor_err(iodev_sqe, result);
}
/**
* Submit a completion queue event with a given result and userdata
*
* Called by the executor to produce a completion queue event, no inherent
* locking is performed and this is not safe to do from multiple callers.
*
* @param r RTIO context
* @param result Integer result code (could be -errno)
* @param userdata Userdata to pass along to completion
* @param flags Flags to use for the CEQ see RTIO_CQE_FLAG_*
*/
static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
{
struct rtio_cqe *cqe = rtio_cqe_acquire(r);
if (cqe == NULL) {
atomic_inc(&r->xcqcnt);
} else {
cqe->result = result;
cqe->userdata = userdata;
cqe->flags = flags;
rtio_cqe_produce(r, cqe);
}
atomic_inc(&r->cq_count);
#ifdef CONFIG_RTIO_SUBMIT_SEM
if (r->submit_count > 0) {
r->submit_count--;
if (r->submit_count == 0) {
k_sem_give(r->submit_sem);
}
}
#endif
#ifdef CONFIG_RTIO_CONSUME_SEM
k_sem_give(r->consume_sem);
#endif
}
#define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
/**
* @brief Get the buffer associate with the RX submission
*
* @param[in] iodev_sqe The submission to probe
* @param[in] min_buf_len The minimum number of bytes needed for the operation
* @param[in] max_buf_len The maximum number of bytes needed for the operation
* @param[out] buf Where to store the pointer to the buffer
* @param[out] buf_len Where to store the size of the buffer
*
* @return 0 if @p buf and @p buf_len were successfully filled
* @return -ENOMEM Not enough memory for @p min_buf_len
*/
static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
{
struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
struct rtio *r = iodev_sqe->r;
if (sqe->rx.buf != NULL) {
if (sqe->rx.buf_len < min_buf_len) {
return -ENOMEM;
}
*buf = sqe->rx.buf;
*buf_len = sqe->rx.buf_len;
return 0;
}
int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len);
if (rc == 0) {
sqe->rx.buf = *buf;
sqe->rx.buf_len = *buf_len;
return 0;
}
return -ENOMEM;
}
#else
ARG_UNUSED(max_buf_len);
#endif
if (sqe->rx.buf_len < min_buf_len) {
return -ENOMEM;
}
*buf = sqe->rx.buf;
*buf_len = sqe->rx.buf_len;
return 0;
}
/**
* @brief Release memory that was allocated by the RTIO's memory pool
*
* If the RTIO context was created by a call to RTIO_DEFINE_WITH_MEMPOOL(), then the cqe data might
* contain a buffer that's owned by the RTIO context. In those cases (if the read request was
* configured via rtio_sqe_read_with_pool()) the buffer must be returned back to the pool.
*
* Call this function when processing is complete. This function will validate that the memory
* actually belongs to the RTIO context and will ignore invalid arguments.
*
* @param r RTIO context
* @param buff Pointer to the buffer to be released.
* @param buff_len Number of bytes to free (will be rounded up to nearest memory block).
*/
__syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
{
#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
return;
}
rtio_block_pool_free(r, buff, buff_len);
#else
ARG_UNUSED(r);
ARG_UNUSED(buff);
ARG_UNUSED(buff_len);
#endif
}
/**
* Grant access to an RTIO context to a user thread
*/
static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
{
k_object_access_grant(r, t);
#ifdef CONFIG_RTIO_SUBMIT_SEM
k_object_access_grant(r->submit_sem, t);
#endif
#ifdef CONFIG_RTIO_CONSUME_SEM
k_object_access_grant(r->consume_sem, t);
#endif
}
/**
* @brief Attempt to cancel an SQE
*
* If possible (not currently executing), cancel an SQE and generate a failure with -ECANCELED
* result.
*
* @param[in] sqe The SQE to cancel
* @return 0 if the SQE was flagged for cancellation
* @return <0 on error
*/
__syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
{
struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
do {
iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
} while (iodev_sqe != NULL);
return 0;
}
/**
* @brief Copy an array of SQEs into the queue and get resulting handles back
*
* Copies one or more SQEs into the RTIO context and optionally returns their generated SQE handles.
* Handles can be used to cancel events via the rtio_sqe_cancel() call.
*
* @param[in] r RTIO context
* @param[in] sqes Pointer to an array of SQEs
* @param[out] handle Optional pointer to @ref rtio_sqe pointer to store the handle of the
* first generated SQE. Use NULL to ignore.
* @param[in] sqe_count Count of sqes in array
*
* @retval 0 success
* @retval -ENOMEM not enough room in the queue
*/
__syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
struct rtio_sqe **handle, size_t sqe_count);
static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
struct rtio_sqe **handle,
size_t sqe_count)
{
struct rtio_sqe *sqe;
uint32_t acquirable = rtio_sqe_acquirable(r);
if (acquirable < sqe_count) {
return -ENOMEM;
}
for (unsigned long i = 0; i < sqe_count; i++) {
sqe = rtio_sqe_acquire(r);
__ASSERT_NO_MSG(sqe != NULL);
if (handle != NULL && i == 0) {
*handle = sqe;
}
*sqe = sqes[i];
}
return 0;
}
/**
* @brief Copy an array of SQEs into the queue
*
* Useful if a batch of submissions is stored in ROM or
* RTIO is used from user mode where a copy must be made.
*
* Partial copying is not done as chained SQEs need to be submitted
* as a whole set.
*
* @param r RTIO context
* @param sqes Pointer to an array of SQEs
* @param sqe_count Count of sqes in array
*
* @retval 0 success
* @retval -ENOMEM not enough room in the queue
*/
static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
{
return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
}
/**
* @brief Copy an array of CQEs from the queue
*
* Copies from the RTIO context and its queue completion queue
* events, waiting for the given time period to gather the number
* of completions requested.
*
* @param r RTIO context
* @param cqes Pointer to an array of SQEs
* @param cqe_count Count of sqes in array
* @param timeout Timeout to wait for each completion event. Total wait time is
* potentially timeout*cqe_count at maximum.
*
* @retval copy_count Count of copied CQEs (0 to cqe_count)
*/
__syscall int rtio_cqe_copy_out(struct rtio *r,
struct rtio_cqe *cqes,
size_t cqe_count,
k_timeout_t timeout);
static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
struct rtio_cqe *cqes,
size_t cqe_count,
k_timeout_t timeout)
{
size_t copied = 0;
struct rtio_cqe *cqe;
k_timepoint_t end = sys_timepoint_calc(timeout);
do {
cqe = K_TIMEOUT_EQ(timeout, K_FOREVER) ? rtio_cqe_consume_block(r)
: rtio_cqe_consume(r);
if (cqe == NULL) {
Z_SPIN_DELAY(25);
continue;
}
cqes[copied++] = *cqe;
rtio_cqe_release(r, cqe);
} while (copied < cqe_count && !sys_timepoint_expired(end));
return copied;
}
/**
* @brief Submit I/O requests to the underlying executor
*
* Submits the queue of submission queue events to the executor.
* The executor will do the work of managing tasks representing each
* submission chain, freeing submission queue events when done, and
* producing completion queue events as submissions are completed.
*
* @param r RTIO context
* @param wait_count Number of submissions to wait for completion of.
*
* @retval 0 On success
*/
__syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
{
int res = 0;
#ifdef CONFIG_RTIO_SUBMIT_SEM
/* TODO undefined behavior if another thread calls submit of course
*/
if (wait_count > 0) {
__ASSERT(!k_is_in_isr(),
"expected rtio submit with wait count to be called from a thread");
k_sem_reset(r->submit_sem);
r->submit_count = wait_count;
}
#else
uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count) + wait_count;
#endif
/* Submit the queue to the executor which consumes submissions
* and produces completions through ISR chains or other means.
*/
rtio_executor_submit(r);
/* TODO could be nicer if we could suspend the thread and not
* wake up on each completion here.
*/
#ifdef CONFIG_RTIO_SUBMIT_SEM
if (wait_count > 0) {
res = k_sem_take(r->submit_sem, K_FOREVER);
__ASSERT(res == 0,
"semaphore was reset or timed out while waiting on completions!");
}
#else
while ((uintptr_t)atomic_get(&r->cq_count) < cq_count) {
Z_SPIN_DELAY(10);
k_yield();
}
#endif
return res;
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#include <zephyr/syscalls/rtio.h>
#endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
``` | /content/code_sandbox/include/zephyr/rtio/rtio.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 11,224 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_MEM_ATTR_H_
#define ZEPHYR_INCLUDE_MEM_ATTR_H_
/**
* @brief Memory-Attr Interface
* @defgroup memory_attr_interface Memory-Attr Interface
* @ingroup mem_mgmt
* @{
*/
#include <stddef.h>
#include <zephyr/types.h>
#include <zephyr/dt-bindings/memory-attr/memory-attr.h>
#ifdef __cplusplus
extern "C" {
#endif
/** @cond INTERNAL_HIDDEN */
#define __MEM_ATTR zephyr_memory_attr
#define _FILTER(node_id, fn) \
COND_CODE_1(DT_NODE_HAS_PROP(node_id, __MEM_ATTR), \
(fn(node_id)), \
())
/** @endcond */
/**
* @brief Invokes @p fn for every status `okay` node in the tree with property
* `zephyr,memory-attr`
*
* The macro @p fn must take one parameter, which will be a node identifier
* with the `zephyr,memory-attr` property. The macro is expanded once for each
* node in the tree with status `okay`. The order that nodes are visited in is
* not specified.
*
* @param fn macro to invoke
*/
#define DT_MEMORY_ATTR_FOREACH_STATUS_OKAY_NODE(fn) \
DT_FOREACH_STATUS_OKAY_NODE_VARGS(_FILTER, fn)
/**
* @brief memory-attr region structure.
*
* This structure represents the data gathered from DT about a memory-region
* marked with memory attributes.
*/
struct mem_attr_region_t {
/** Memory node full name */
const char *dt_name;
/** Memory region physical address */
uintptr_t dt_addr;
/** Memory region size */
size_t dt_size;
/** Memory region attributes */
uint32_t dt_attr;
};
/**
* @brief Get the list of memory regions.
*
* Get the list of enabled memory regions with their memory-attribute as
* gathered by DT.
*
* @param region Pointer to pointer to the list of memory regions.
*
* @retval Number of memory regions returned in the parameter.
*/
size_t mem_attr_get_regions(const struct mem_attr_region_t **region);
/**
* @brief Check if a buffer has correct size and attributes.
*
* This function is used to check if a given buffer with a given set of
* attributes fully match a memory region in terms of size and attributes.
*
* This is usually used to verify that a buffer has the expected attributes
* (for example the buffer is cacheable / non-cacheable or belongs to RAM /
* FLASH, etc...) and it has been correctly allocated.
*
* The expected set of attributes for the buffer is and-matched against the
* full set of attributes for the memory region it belongs to (bitmask). So the
* buffer is considered matching when at least that set of attributes are valid
* for the memory region (but the region can be marked also with other
* attributes besides the one passed as parameter).
*
* @param addr Virtual address of the user buffer.
* @param size Size of the user buffer.
* @param attr Expected / desired attribute for the buffer.
*
* @retval 0 if the buffer has the correct size and attribute.
* @retval -ENOSYS if the operation is not supported (for example if the MMU is enabled).
* @retval -ENOTSUP if the wrong parameters were passed.
* @retval -EINVAL if the buffer has the wrong set of attributes.
* @retval -ENOSPC if the buffer is too big for the region it belongs to.
* @retval -ENOBUFS if the buffer is entirely allocated outside a memory region.
*/
int mem_attr_check_buf(void *addr, size_t size, uint32_t attr);
#ifdef __cplusplus
}
#endif
/** @} */
#endif /* ZEPHYR_INCLUDE_MEM_ATTR_H_ */
``` | /content/code_sandbox/include/zephyr/mem_mgmt/mem_attr.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 806 |
```objective-c
/*
*
*/
/**
* @file
* @brief Crypto Cipher APIs
*
* This file contains the Crypto Abstraction layer APIs.
*
* [Experimental] Users should note that the APIs can change
* as a part of ongoing development.
*/
#ifndef ZEPHYR_INCLUDE_CRYPTO_H_
#define ZEPHYR_INCLUDE_CRYPTO_H_
#include <zephyr/device.h>
#include <errno.h>
#include <zephyr/sys/util.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/crypto/hash.h>
#include "cipher.h"
/**
* @brief Crypto APIs
* @defgroup crypto Crypto
* @since 1.7
* @version 1.0.0
* @ingroup os_services
* @{
*/
/* ctx.flags values. Not all drivers support all flags.
* A user app can query the supported hw / driver
* capabilities via provided API (crypto_query_hwcaps()), and choose a
* supported config during the session setup.
*/
#define CAP_OPAQUE_KEY_HNDL BIT(0)
#define CAP_RAW_KEY BIT(1)
/* TBD to define */
#define CAP_KEY_LOADING_API BIT(2)
/** Whether the output is placed in separate buffer or not */
#define CAP_INPLACE_OPS BIT(3)
#define CAP_SEPARATE_IO_BUFS BIT(4)
/**
* These denotes if the output (completion of a cipher_xxx_op) is conveyed
* by the op function returning, or it is conveyed by an async notification
*/
#define CAP_SYNC_OPS BIT(5)
#define CAP_ASYNC_OPS BIT(6)
/** Whether the hardware/driver supports autononce feature */
#define CAP_AUTONONCE BIT(7)
/** Don't prefix IV to cipher blocks */
#define CAP_NO_IV_PREFIX BIT(8)
/* More flags to be added as necessary */
/** @brief Crypto driver API definition. */
__subsystem struct crypto_driver_api {
int (*query_hw_caps)(const struct device *dev);
/* Setup a crypto session */
int (*cipher_begin_session)(const struct device *dev, struct cipher_ctx *ctx,
enum cipher_algo algo, enum cipher_mode mode,
enum cipher_op op_type);
/* Tear down an established session */
int (*cipher_free_session)(const struct device *dev, struct cipher_ctx *ctx);
/* Register async crypto op completion callback with the driver */
int (*cipher_async_callback_set)(const struct device *dev,
cipher_completion_cb cb);
/* Setup a hash session */
int (*hash_begin_session)(const struct device *dev, struct hash_ctx *ctx,
enum hash_algo algo);
/* Tear down an established hash session */
int (*hash_free_session)(const struct device *dev, struct hash_ctx *ctx);
/* Register async hash op completion callback with the driver */
int (*hash_async_callback_set)(const struct device *dev,
hash_completion_cb cb);
};
/* Following are the public API a user app may call.
* The first two relate to crypto "session" setup / teardown. Further we
* have four cipher mode specific (CTR, CCM, CBC ...) calls to perform the
* actual crypto operation in the context of a session. Also we have an
* API to provide the callback for async operations.
*/
/**
* @brief Query the crypto hardware capabilities
*
* This API is used by the app to query the capabilities supported by the
* crypto device. Based on this the app can specify a subset of the supported
* options to be honored for a session during cipher_begin_session().
*
* @param dev Pointer to the device structure for the driver instance.
*
* @return bitmask of supported options.
*/
static inline int crypto_query_hwcaps(const struct device *dev)
{
struct crypto_driver_api *api;
int tmp;
api = (struct crypto_driver_api *) dev->api;
tmp = api->query_hw_caps(dev);
__ASSERT((tmp & (CAP_OPAQUE_KEY_HNDL | CAP_RAW_KEY)) != 0,
"Driver should support at least one key type: RAW/Opaque");
__ASSERT((tmp & (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS)) != 0,
"Driver should support at least one IO buf type: Inplace/separate");
__ASSERT((tmp & (CAP_SYNC_OPS | CAP_ASYNC_OPS)) != 0,
"Driver should support at least one op-type: sync/async");
return tmp;
}
/**
* @}
*/
/**
* @brief Crypto Cipher APIs
* @defgroup crypto_cipher Cipher
* @ingroup crypto
* @{
*/
/**
* @brief Setup a crypto session
*
* Initializes one time parameters, like the session key, algorithm and cipher
* mode which may remain constant for all operations in the session. The state
* may be cached in hardware and/or driver data state variables.
*
* @param dev Pointer to the device structure for the driver instance.
* @param ctx Pointer to the context structure. Various one time
* parameters like key, keylength, etc. are supplied via
* this structure. The structure documentation specifies
* which fields are to be populated by the app before
* making this call.
* @param algo The crypto algorithm to be used in this session. e.g AES
* @param mode The cipher mode to be used in this session. e.g CBC, CTR
* @param optype Whether we should encrypt or decrypt in this session
*
* @return 0 on success, negative errno code on fail.
*/
static inline int cipher_begin_session(const struct device *dev,
struct cipher_ctx *ctx,
enum cipher_algo algo,
enum cipher_mode mode,
enum cipher_op optype)
{
struct crypto_driver_api *api;
uint32_t flags;
api = (struct crypto_driver_api *) dev->api;
ctx->device = dev;
ctx->ops.cipher_mode = mode;
flags = (ctx->flags & (CAP_OPAQUE_KEY_HNDL | CAP_RAW_KEY));
__ASSERT(flags != 0U, "Keytype missing: RAW Key or OPAQUE handle");
__ASSERT(flags != (CAP_OPAQUE_KEY_HNDL | CAP_RAW_KEY),
"conflicting options for keytype");
flags = (ctx->flags & (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS));
__ASSERT(flags != 0U, "IO buffer type missing");
__ASSERT(flags != (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS),
"conflicting options for IO buffer type");
flags = (ctx->flags & (CAP_SYNC_OPS | CAP_ASYNC_OPS));
__ASSERT(flags != 0U, "sync/async type missing");
__ASSERT(flags != (CAP_SYNC_OPS | CAP_ASYNC_OPS),
"conflicting options for sync/async");
return api->cipher_begin_session(dev, ctx, algo, mode, optype);
}
/**
* @brief Cleanup a crypto session
*
* Clears the hardware and/or driver state of a previous session.
*
* @param dev Pointer to the device structure for the driver instance.
* @param ctx Pointer to the crypto context structure of the session
* to be freed.
*
* @return 0 on success, negative errno code on fail.
*/
static inline int cipher_free_session(const struct device *dev,
struct cipher_ctx *ctx)
{
struct crypto_driver_api *api;
api = (struct crypto_driver_api *) dev->api;
return api->cipher_free_session(dev, ctx);
}
/**
* @brief Registers an async crypto op completion callback with the driver
*
* The application can register an async crypto op completion callback handler
* to be invoked by the driver, on completion of a prior request submitted via
* cipher_do_op(). Based on crypto device hardware semantics, this is likely to
* be invoked from an ISR context.
*
* @param dev Pointer to the device structure for the driver instance.
* @param cb Pointer to application callback to be called by the driver.
*
* @return 0 on success, -ENOTSUP if the driver does not support async op,
* negative errno code on other error.
*/
static inline int cipher_callback_set(const struct device *dev,
cipher_completion_cb cb)
{
struct crypto_driver_api *api;
api = (struct crypto_driver_api *) dev->api;
if (api->cipher_async_callback_set) {
return api->cipher_async_callback_set(dev, cb);
}
return -ENOTSUP;
}
/**
* @brief Perform single-block crypto operation (ECB cipher mode). This
* should not be overloaded to operate on multiple blocks for security reasons.
*
* @param ctx Pointer to the crypto context of this op.
* @param pkt Structure holding the input/output buffer pointers.
*
* @return 0 on success, negative errno code on fail.
*/
static inline int cipher_block_op(struct cipher_ctx *ctx,
struct cipher_pkt *pkt)
{
__ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_ECB, "ECB mode "
"session invoking a different mode handler");
pkt->ctx = ctx;
return ctx->ops.block_crypt_hndlr(ctx, pkt);
}
/**
* @brief Perform Cipher Block Chaining (CBC) crypto operation.
*
* @param ctx Pointer to the crypto context of this op.
* @param pkt Structure holding the input/output buffer pointers.
* @param iv Initialization Vector (IV) for the operation. Same
* IV value should not be reused across multiple
* operations (within a session context) for security.
*
* @return 0 on success, negative errno code on fail.
*/
static inline int cipher_cbc_op(struct cipher_ctx *ctx,
struct cipher_pkt *pkt, uint8_t *iv)
{
__ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_CBC, "CBC mode "
"session invoking a different mode handler");
pkt->ctx = ctx;
return ctx->ops.cbc_crypt_hndlr(ctx, pkt, iv);
}
/**
* @brief Perform Counter (CTR) mode crypto operation.
*
* @param ctx Pointer to the crypto context of this op.
* @param pkt Structure holding the input/output buffer pointers.
* @param iv Initialization Vector (IV) for the operation. We use a
* split counter formed by appending IV and ctr.
* Consequently ivlen = keylen - ctrlen. 'ctrlen' is
* specified during session setup through the
* 'ctx.mode_params.ctr_params.ctr_len' parameter. IV
* should not be reused across multiple operations
* (within a session context) for security. The non-IV
* part of the split counter is transparent to the caller
* and is fully managed by the crypto provider.
*
* @return 0 on success, negative errno code on fail.
*/
static inline int cipher_ctr_op(struct cipher_ctx *ctx,
struct cipher_pkt *pkt, uint8_t *iv)
{
__ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_CTR, "CTR mode "
"session invoking a different mode handler");
pkt->ctx = ctx;
return ctx->ops.ctr_crypt_hndlr(ctx, pkt, iv);
}
/**
* @brief Perform Counter with CBC-MAC (CCM) mode crypto operation
*
* @param ctx Pointer to the crypto context of this op.
* @param pkt Structure holding the input/output, Associated
* Data (AD) and auth tag buffer pointers.
* @param nonce Nonce for the operation. Same nonce value should not
* be reused across multiple operations (within a
* session context) for security.
*
* @return 0 on success, negative errno code on fail.
*/
static inline int cipher_ccm_op(struct cipher_ctx *ctx,
struct cipher_aead_pkt *pkt, uint8_t *nonce)
{
__ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_CCM, "CCM mode "
"session invoking a different mode handler");
pkt->pkt->ctx = ctx;
return ctx->ops.ccm_crypt_hndlr(ctx, pkt, nonce);
}
/**
* @brief Perform Galois/Counter Mode (GCM) crypto operation
*
* @param ctx Pointer to the crypto context of this op.
* @param pkt Structure holding the input/output, Associated
* Data (AD) and auth tag buffer pointers.
* @param nonce Nonce for the operation. Same nonce value should not
* be reused across multiple operations (within a
* session context) for security.
*
* @return 0 on success, negative errno code on fail.
*/
static inline int cipher_gcm_op(struct cipher_ctx *ctx,
struct cipher_aead_pkt *pkt, uint8_t *nonce)
{
__ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_GCM, "GCM mode "
"session invoking a different mode handler");
pkt->pkt->ctx = ctx;
return ctx->ops.gcm_crypt_hndlr(ctx, pkt, nonce);
}
/**
* @}
*/
/**
* @brief Crypto Hash APIs
* @defgroup crypto_hash Hash
* @ingroup crypto
* @{
*/
/**
* @brief Setup a hash session
*
* Initializes one time parameters, like the algorithm which may
* remain constant for all operations in the session. The state may be
* cached in hardware and/or driver data state variables.
*
* @param dev Pointer to the device structure for the driver instance.
* @param ctx Pointer to the context structure. Various one time
* parameters like session capabilities and algorithm are
* supplied via this structure. The structure documentation
* specifies which fields are to be populated by the app
* before making this call.
* @param algo The hash algorithm to be used in this session. e.g sha256
*
* @return 0 on success, negative errno code on fail.
*/
static inline int hash_begin_session(const struct device *dev,
struct hash_ctx *ctx,
enum hash_algo algo)
{
uint32_t flags;
struct crypto_driver_api *api;
api = (struct crypto_driver_api *) dev->api;
ctx->device = dev;
flags = (ctx->flags & (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS));
__ASSERT(flags != 0U, "IO buffer type missing");
__ASSERT(flags != (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS),
"conflicting options for IO buffer type");
flags = (ctx->flags & (CAP_SYNC_OPS | CAP_ASYNC_OPS));
__ASSERT(flags != 0U, "sync/async type missing");
__ASSERT(flags != (CAP_SYNC_OPS | CAP_ASYNC_OPS),
"conflicting options for sync/async");
return api->hash_begin_session(dev, ctx, algo);
}
/**
* @brief Cleanup a hash session
*
* Clears the hardware and/or driver state of a session. @see hash_begin_session
*
* @param dev Pointer to the device structure for the driver instance.
* @param ctx Pointer to the crypto hash context structure of the session
* to be freed.
*
* @return 0 on success, negative errno code on fail.
*/
static inline int hash_free_session(const struct device *dev,
struct hash_ctx *ctx)
{
struct crypto_driver_api *api;
api = (struct crypto_driver_api *) dev->api;
return api->hash_free_session(dev, ctx);
}
/**
* @brief Registers an async hash completion callback with the driver
*
* The application can register an async hash completion callback handler
* to be invoked by the driver, on completion of a prior request submitted via
* hash_compute(). Based on crypto device hardware semantics, this is likely to
* be invoked from an ISR context.
*
* @param dev Pointer to the device structure for the driver instance.
* @param cb Pointer to application callback to be called by the driver.
*
* @return 0 on success, -ENOTSUP if the driver does not support async op,
* negative errno code on other error.
*/
static inline int hash_callback_set(const struct device *dev,
hash_completion_cb cb)
{
struct crypto_driver_api *api;
api = (struct crypto_driver_api *) dev->api;
if (api->hash_async_callback_set) {
return api->hash_async_callback_set(dev, cb);
}
return -ENOTSUP;
}
/**
* @brief Perform a cryptographic hash function.
*
* @param ctx Pointer to the hash context of this op.
* @param pkt Structure holding the input/output.
* @return 0 on success, negative errno code on fail.
*/
static inline int hash_compute(struct hash_ctx *ctx, struct hash_pkt *pkt)
{
pkt->ctx = ctx;
return ctx->hash_hndlr(ctx, pkt, true);
}
/**
* @brief Perform a cryptographic multipart hash operation.
*
* This function can be called zero or more times, passing a slice of
* the data. The hash is calculated using all the given pieces.
* To calculate the hash call @c hash_compute().
*
* @param ctx Pointer to the hash context of this op.
* @param pkt Structure holding the input.
* @return 0 on success, negative errno code on fail.
*/
static inline int hash_update(struct hash_ctx *ctx, struct hash_pkt *pkt)
{
pkt->ctx = ctx;
return ctx->hash_hndlr(ctx, pkt, false);
}
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_CRYPTO_H_ */
``` | /content/code_sandbox/include/zephyr/crypto/crypto.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,765 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_MEM_ATTR_HEAP_H_
#define ZEPHYR_INCLUDE_MEM_ATTR_HEAP_H_
/**
* @brief Memory heaps based on memory attributes
* @defgroup memory_attr_heap Memory heaps based on memory attributes
* @ingroup mem_mgmt
* @{
*/
#include <zephyr/mem_mgmt/mem_attr.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Init the memory pool
*
* This must be the first function to be called to initialize the memory pools
* from all the memory regions with the a software attribute.
*
* @retval 0 on success.
* @retval -EALREADY if the pool was already initialized.
* @retval -ENOMEM too many regions already allocated.
*/
int mem_attr_heap_pool_init(void);
/**
* @brief Allocate memory with a specified attribute and size.
*
* Allocates a block of memory of the specified size in bytes and with a
* specified capability / attribute. The attribute is used to select the
* correct memory heap to allocate memory from.
*
* @param attr capability / attribute requested for the memory block.
* @param bytes requested size of the allocation in bytes.
*
* @retval ptr a valid pointer to the allocated memory.
* @retval NULL if no memory is available with that attribute and size.
*/
void *mem_attr_heap_alloc(uint32_t attr, size_t bytes);
/**
* @brief Allocate aligned memory with a specified attribute, size and alignment.
*
* Allocates a block of memory of the specified size in bytes and with a
* specified capability / attribute. Takes an additional parameter specifying a
* power of two alignment in bytes.
*
* @param attr capability / attribute requested for the memory block.
* @param align power of two alignment for the returned pointer in bytes.
* @param bytes requested size of the allocation in bytes.
*
* @retval ptr a valid pointer to the allocated memory.
* @retval NULL if no memory is available with that attribute and size.
*/
void *mem_attr_heap_aligned_alloc(uint32_t attr, size_t align, size_t bytes);
/**
* @brief Free the allocated memory
*
* Used to free the passed block of memory that must be the return value of a
* previously call to @ref mem_attr_heap_alloc or @ref
* mem_attr_heap_aligned_alloc.
*
* @param block block to free, must be a pointer to a block allocated by
* @ref mem_attr_heap_alloc or @ref mem_attr_heap_aligned_alloc.
*/
void mem_attr_heap_free(void *block);
/**
* @brief Get a specific memory region descriptor for a provided address
*
* Finds the memory region descriptor struct controlling the provided pointer.
*
* @param addr address to be found, must be a pointer to a block allocated by
* @ref mem_attr_heap_alloc or @ref mem_attr_heap_aligned_alloc.
*
* @retval str pointer to a memory region structure the address belongs to.
*/
const struct mem_attr_region_t *mem_attr_heap_get_region(void *addr);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_MEM_ATTR_HEAP_H_ */
``` | /content/code_sandbox/include/zephyr/mem_mgmt/mem_attr_heap.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 651 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_APP_MEMORY_APP_MEMDOMAIN_H_
#define ZEPHYR_INCLUDE_APP_MEMORY_APP_MEMDOMAIN_H_
#include <zephyr/linker/linker-defs.h>
#include <zephyr/sys/dlist.h>
#include <zephyr/kernel.h>
/**
* @brief Application memory domain APIs
* @defgroup mem_domain_apis_app Application memory domain APIs
* @ingroup mem_domain_apis
* @{
*/
#ifdef CONFIG_USERSPACE
/**
* @brief Name of the data section for a particular partition
*
* Useful for defining memory pools, or any other macro that takes a
* section name as a parameter.
*
* @param id Partition name
*/
#define K_APP_DMEM_SECTION(id) data_smem_##id##_data
/**
* @brief Name of the bss section for a particular partition
*
* Useful for defining memory pools, or any other macro that takes a
* section name as a parameter.
*
* @param id Partition name
*/
#define K_APP_BMEM_SECTION(id) data_smem_##id##_bss
/**
* @brief Place data in a partition's data section
*
* Globals tagged with this will end up in the data section for the
* specified memory partition. This data should be initialized to some
* desired value.
*
* @param id Name of the memory partition to associate this data
*/
#define K_APP_DMEM(id) Z_GENERIC_SECTION(K_APP_DMEM_SECTION(id))
/**
* @brief Place data in a partition's bss section
*
* Globals tagged with this will end up in the bss section for the
* specified memory partition. This data will be zeroed at boot.
*
* @param id Name of the memory partition to associate this data
*/
#define K_APP_BMEM(id) Z_GENERIC_SECTION(K_APP_BMEM_SECTION(id))
struct z_app_region {
void *bss_start;
size_t bss_size;
};
#define Z_APP_START(id) z_data_smem_##id##_part_start
#define Z_APP_SIZE(id) z_data_smem_##id##_part_size
#define Z_APP_BSS_START(id) z_data_smem_##id##_bss_start
#define Z_APP_BSS_SIZE(id) z_data_smem_##id##_bss_size
/* If a partition is declared with K_APPMEM_PARTITION, but never has any
* data assigned to its contents, then no symbols with its prefix will end
* up in the symbol table. This prevents gen_app_partitions.py from detecting
* that the partition exists, and the linker symbols which specify partition
* bounds will not be generated, resulting in build errors.
*
* What this inline assembly code does is define a symbol with no data.
* This should work for all arches that produce ELF binaries, see
* path_to_url
*
* We don't know what active flags/type of the pushed section were, so we are
* specific: "aw" indicates section is allocatable and writable,
* and "@progbits" indicates the section has data.
*/
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
/* ARM has a quirk in that '@' denotes a comment, so we have to send
* %progbits to the assembler instead.
*/
#define Z_PROGBITS_SYM "%"
#else
#define Z_PROGBITS_SYM "@"
#endif
#if defined(CONFIG_ARC) && defined(__CCAC__)
/* ARC MWDT assembler has slightly different pushsection/popsection directives
* names.
*/
#define Z_PUSHSECTION_DIRECTIV ".pushsect"
#define Z_POPSECTION_DIRECTIVE ".popsect"
#else
#define Z_PUSHSECTION_DIRECTIV ".pushsection"
#define Z_POPSECTION_DIRECTIVE ".popsection"
#endif
#define Z_APPMEM_PLACEHOLDER(name) \
__asm__ ( \
Z_PUSHSECTION_DIRECTIV " " STRINGIFY(K_APP_DMEM_SECTION(name)) \
",\"aw\"," Z_PROGBITS_SYM "progbits\n\t" \
".global " STRINGIFY(name) "_placeholder\n\t" \
STRINGIFY(name) "_placeholder:\n\t" \
Z_POPSECTION_DIRECTIVE "\n\t")
/**
* @brief Define an application memory partition with linker support
*
* Defines a k_mem_paritition with the provided name.
* This name may be used with the K_APP_DMEM and K_APP_BMEM macros to
* place globals automatically in this partition.
*
* NOTE: placeholder char variable is defined here to prevent build errors
* if a partition is defined but nothing ever placed in it.
*
* @param name Name of the k_mem_partition to declare
*/
#define K_APPMEM_PARTITION_DEFINE(name) \
extern char Z_APP_START(name)[]; \
extern char Z_APP_SIZE(name)[]; \
struct k_mem_partition name = { \
.start = (uintptr_t) &Z_APP_START(name)[0], \
.size = (size_t) &Z_APP_SIZE(name)[0], \
.attr = K_MEM_PARTITION_P_RW_U_RW \
}; \
extern char Z_APP_BSS_START(name)[]; \
extern char Z_APP_BSS_SIZE(name)[]; \
Z_GENERIC_SECTION(.app_regions.name) \
const struct z_app_region name##_region = { \
.bss_start = &Z_APP_BSS_START(name)[0], \
.bss_size = (size_t) &Z_APP_BSS_SIZE(name)[0] \
}; \
Z_APPMEM_PLACEHOLDER(name)
#else
#define K_APP_BMEM(ptn)
#define K_APP_DMEM(ptn)
#define K_APP_DMEM_SECTION(ptn) .data
#define K_APP_BMEM_SECTION(ptn) .bss
#define K_APPMEM_PARTITION_DEFINE(name)
#endif /* CONFIG_USERSPACE */
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_APP_MEMORY_APP_MEMDOMAIN_H_ */
``` | /content/code_sandbox/include/zephyr/app_memory/app_memdomain.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,218 |
```objective-c
/*
*
*/
#ifndef INCLUDE_APP_MEMORY_MEM_DOMAIN_H
#define INCLUDE_APP_MEMORY_MEM_DOMAIN_H
#include <stdint.h>
#include <stddef.h>
#include <zephyr/sys/dlist.h>
#include <zephyr/toolchain.h>
#include <zephyr/kernel/thread.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup mem_domain_apis Memory domain APIs
* @ingroup kernel_apis
* @{
*/
#ifdef CONFIG_USERSPACE
/**
* @def K_MEM_PARTITION_DEFINE
*
* @brief Statically declare a memory partition
*/
#ifdef _ARCH_MEM_PARTITION_ALIGN_CHECK
#define K_MEM_PARTITION_DEFINE(name, start, size, attr) \
_ARCH_MEM_PARTITION_ALIGN_CHECK(start, size); \
struct k_mem_partition name =\
{ (uintptr_t)start, size, attr}
#else
#define K_MEM_PARTITION_DEFINE(name, start, size, attr) \
struct k_mem_partition name =\
{ (uintptr_t)start, size, attr}
#endif /* _ARCH_MEM_PARTITION_ALIGN_CHECK */
/**
* @brief Memory Partition
*
* A memory partition is a region of memory in the linear address space
* with a specific access policy.
*
* The alignment of the starting address, and the alignment of the size
* value may have varying requirements based on the capabilities of the
* underlying memory management hardware; arbitrary values are unlikely
* to work.
*/
struct k_mem_partition {
/** start address of memory partition */
uintptr_t start;
/** size of memory partition */
size_t size;
/** attribute of memory partition */
k_mem_partition_attr_t attr;
};
/**
* @brief Memory Domain
*
* A memory domain is a collection of memory partitions, used to represent
* a user thread's access policy for the linear address space. A thread
* may be a member of only one memory domain, but any memory domain may
* have multiple threads that are members.
*
* Supervisor threads may also be a member of a memory domain; this has
* no implications on their memory access but can be useful as any child
* threads inherit the memory domain membership of the parent.
*
* A user thread belonging to a memory domain with no active partitions
* will have guaranteed access to its own stack buffer, program text,
* and read-only data.
*/
struct k_mem_domain {
#ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
struct arch_mem_domain arch;
#endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
/** partitions in the domain */
struct k_mem_partition partitions[CONFIG_MAX_DOMAIN_PARTITIONS];
/** Doubly linked list of member threads */
sys_dlist_t mem_domain_q;
/** number of active partitions in the domain */
uint8_t num_partitions;
};
/**
* Default memory domain
*
* All threads are a member of some memory domain, even if running in
* supervisor mode. Threads belong to this default memory domain if they
* haven't been added to or inherited membership from some other domain.
*
* This memory domain has the z_libc_partition partition for the C library
* added to it if exists.
*/
extern struct k_mem_domain k_mem_domain_default;
#else
/* To support use of IS_ENABLED for the APIs below */
struct k_mem_domain;
struct k_mem_partition;
#endif /* CONFIG_USERSPACE */
/**
* @brief Initialize a memory domain.
*
* Initialize a memory domain with given name and memory partitions.
*
* See documentation for k_mem_domain_add_partition() for details about
* partition constraints.
*
* Do not call k_mem_domain_init() on the same memory domain more than once,
* doing so is undefined behavior.
*
* @param domain The memory domain to be initialized.
* @param num_parts The number of array items of "parts" parameter.
* @param parts An array of pointers to the memory partitions. Can be NULL
* if num_parts is zero.
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters supplied
* @retval -ENOMEM if insufficient memory
*/
int k_mem_domain_init(struct k_mem_domain *domain, uint8_t num_parts,
struct k_mem_partition *parts[]);
/**
* @brief Add a memory partition into a memory domain.
*
* Add a memory partition into a memory domain. Partitions must conform to
* the following constraints:
*
* - Partitions in the same memory domain may not overlap each other.
* - Partitions must not be defined which expose private kernel
* data structures or kernel objects.
* - The starting address alignment, and the partition size must conform to
* the constraints of the underlying memory management hardware, which
* varies per architecture.
* - Memory domain partitions are only intended to control access to memory
* from user mode threads.
* - If CONFIG_EXECUTE_XOR_WRITE is enabled, the partition must not allow
* both writes and execution.
*
* Violating these constraints may lead to CPU exceptions or undefined
* behavior.
*
* @param domain The memory domain to be added a memory partition.
* @param part The memory partition to be added
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters supplied
* @retval -ENOSPC if no free partition slots available
*/
int k_mem_domain_add_partition(struct k_mem_domain *domain,
struct k_mem_partition *part);
/**
* @brief Remove a memory partition from a memory domain.
*
* Remove a memory partition from a memory domain.
*
* @param domain The memory domain to be removed a memory partition.
* @param part The memory partition to be removed
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters supplied
* @retval -ENOENT if no matching partition found
*/
int k_mem_domain_remove_partition(struct k_mem_domain *domain,
struct k_mem_partition *part);
/**
* @brief Add a thread into a memory domain.
*
* Add a thread into a memory domain. It will be removed from whatever
* memory domain it previously belonged to.
*
* @param domain The memory domain that the thread is going to be added into.
* @param thread ID of thread going to be added into the memory domain.
*
* @return 0 if successful, fails otherwise.
*/
int k_mem_domain_add_thread(struct k_mem_domain *domain,
k_tid_t thread);
#ifdef __cplusplus
}
#endif
/** @} */
#endif /* INCLUDE_APP_MEMORY_MEM_DOMAIN_H */
``` | /content/code_sandbox/include/zephyr/app_memory/mem_domain.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,323 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_APP_MEMORY_PARTITIONS_H
#define ZEPHYR_APP_MEMORY_PARTITIONS_H
#ifdef CONFIG_USERSPACE
#include <zephyr/kernel.h> /* For struct k_mem_partition */
#if defined(CONFIG_MBEDTLS)
extern struct k_mem_partition k_mbedtls_partition;
#endif /* CONFIG_MBEDTLS */
#endif /* CONFIG_USERSPACE */
#endif /* ZEPHYR_APP_MEMORY_PARTITIONS_H */
``` | /content/code_sandbox/include/zephyr/app_memory/partitions.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 88 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public API for stream writes to flash
*/
#ifndef ZEPHYR_INCLUDE_STORAGE_STREAM_FLASH_H_
#define ZEPHYR_INCLUDE_STORAGE_STREAM_FLASH_H_
/**
* @brief Abstraction over stream writes to flash
*
* @defgroup stream_flash Stream to flash interface
* @since 2.3
* @version 0.1.0
* @ingroup storage_apis
* @{
*/
#include <stdbool.h>
#include <zephyr/drivers/flash.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @typedef stream_flash_callback_t
*
* @brief Signature for callback invoked after flash write completes.
*
* @details Functions of this type are invoked with a buffer containing
* data read back from the flash after a flash write has completed.
* This enables verifying that the data has been correctly stored (for
* instance by using a SHA function). The write buffer 'buf' provided in
* stream_flash_init is used as a read buffer for this purpose.
*
* @param buf Pointer to the data read.
* @param len The length of the data read.
* @param offset The offset the data was read from.
*/
typedef int (*stream_flash_callback_t)(uint8_t *buf, size_t len, size_t offset);
/**
* @brief Structure for stream flash context
*
* Users should treat these structures as opaque values and only interact
* with them through the below API.
*/
struct stream_flash_ctx {
uint8_t *buf; /* Write buffer */
size_t buf_len; /* Length of write buffer */
size_t buf_bytes; /* Number of bytes currently stored in write buf */
const struct device *fdev; /* Flash device */
size_t bytes_written; /* Number of bytes written to flash */
size_t offset; /* Offset from base of flash device to write area */
size_t available; /* Available bytes in write area */
stream_flash_callback_t callback; /* Callback invoked after write op */
#ifdef CONFIG_STREAM_FLASH_ERASE
off_t last_erased_page_start_offset; /* Last erased offset */
#endif
size_t write_block_size; /* Offset/size device write alignment */
uint8_t erase_value;
};
/**
* @brief Initialize context needed for stream writes to flash.
*
* @param ctx context to be initialized
* @param fdev Flash device to operate on
* @param buf Write buffer
* @param buf_len Length of write buffer. Can not be larger than the page size.
* Must be multiple of the flash device write-block-size.
* @param offset Offset within flash device to start writing to
* @param size Number of bytes available for performing buffered write.
* If this is '0', the size will be set to the total size
* of the flash device minus the offset.
* @param cb Callback to be invoked on completed flash write operations.
*
* @return non-negative on success, negative errno code on fail
*/
int stream_flash_init(struct stream_flash_ctx *ctx, const struct device *fdev,
uint8_t *buf, size_t buf_len, size_t offset, size_t size,
stream_flash_callback_t cb);
/**
* @brief Read number of bytes written to the flash.
*
* @note api-tags: pre-kernel-ok isr-ok
*
* @param ctx context
*
* @return Number of payload bytes written to flash.
*/
size_t stream_flash_bytes_written(struct stream_flash_ctx *ctx);
/**
* @brief Process input buffers to be written to flash device in single blocks.
* Will store remainder between calls.
*
* A write with the @p flush set to true has to be issued as the last
* write request for a given context, as it concludes write of a stream,
* and flushes buffers to storage device.
*
* @warning There must not be any additional write requests issued for a flushed context,
* unless it is re-initialized, as such write attempts may result in the function
* failing and returning error.
* Once context has been flushed, it can be re-initialized and re-used for new
* stream flash session.
*
* @param ctx context
* @param data data to write
* @param len Number of bytes to write
* @param flush when true this forces any buffered data to be written to flash
*
* @return non-negative on success, negative errno code on fail
*/
int stream_flash_buffered_write(struct stream_flash_ctx *ctx, const uint8_t *data,
size_t len, bool flush);
/**
* @brief Erase the flash page to which a given offset belongs.
*
* This function erases a flash page to which an offset belongs if this page
* is not the page previously erased by the provided ctx
* (ctx->last_erased_page_start_offset).
*
* @param ctx context
* @param off offset from the base address of the flash device
*
* @return non-negative on success, negative errno code on fail
*/
int stream_flash_erase_page(struct stream_flash_ctx *ctx, off_t off);
/**
* @brief Load persistent stream write progress stored with key
* @p settings_key .
*
* This function should be called directly after @ref stream_flash_init to
* load previous stream write progress before writing any data. If the loaded
* progress has fewer bytes written than @p ctx then it will be ignored.
*
* @param ctx context
* @param settings_key key to use with the settings module for loading
* the stream write progress
*
* @return non-negative on success, negative errno code on fail
*/
int stream_flash_progress_load(struct stream_flash_ctx *ctx,
const char *settings_key);
/**
* @brief Save persistent stream write progress using key @p settings_key .
*
* @param ctx context
* @param settings_key key to use with the settings module for storing
* the stream write progress
*
* @return non-negative on success, negative errno code on fail
*/
int stream_flash_progress_save(struct stream_flash_ctx *ctx,
const char *settings_key);
/**
* @brief Clear persistent stream write progress stored with key
* @p settings_key .
*
* @param ctx context
* @param settings_key key previously used for storing the stream write progress
*
* @return non-negative on success, negative errno code on fail
*/
int stream_flash_progress_clear(struct stream_flash_ctx *ctx,
const char *settings_key);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_STORAGE_STREAM_FLASH_H_ */
``` | /content/code_sandbox/include/zephyr/storage/stream_flash.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,360 |
```objective-c
/*
*
*/
/**
* @file
* @brief Disk Access layer API
*
* This file contains APIs for disk access.
*/
#ifndef ZEPHYR_INCLUDE_STORAGE_DISK_ACCESS_H_
#define ZEPHYR_INCLUDE_STORAGE_DISK_ACCESS_H_
/**
* @brief Storage APIs
* @defgroup storage_apis Storage APIs
* @ingroup os_services
* @{
* @}
*/
/**
* @brief Disk Access APIs
* @defgroup disk_access_interface Disk Access Interface
* @ingroup storage_apis
* @{
*/
#include <zephyr/drivers/disk.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief perform any initialization
*
* This call is made by the consumer before doing any IO calls so that the
* disk or the backing device can do any initialization. Although still
* supported for legacy compatibility, users should instead call
* @ref disk_access_ioctl with the IOCTL @ref DISK_IOCTL_CTRL_INIT.
*
* Disk initialization is reference counted, so only the first successful call
* to initialize a uninitialized (or previously de-initialized) disk will
* actually initialize the disk
*
* @param[in] pdrv Disk name
*
* @return 0 on success, negative errno code on fail
*/
int disk_access_init(const char *pdrv);
/**
* @brief Get the status of disk
*
* This call is used to get the status of the disk
*
* @param[in] pdrv Disk name
*
* @return DISK_STATUS_OK or other DISK_STATUS_*s
*/
int disk_access_status(const char *pdrv);
/**
* @brief read data from disk
*
* Function to read data from disk to a memory buffer.
*
* Note: if he disk is of NVMe type, user will need to ensure data_buf
* pointer is 4-bytes aligned.
*
* @param[in] pdrv Disk name
* @param[in] data_buf Pointer to the memory buffer to put data.
* @param[in] start_sector Start disk sector to read from
* @param[in] num_sector Number of disk sectors to read
*
* @return 0 on success, negative errno code on fail
*/
int disk_access_read(const char *pdrv, uint8_t *data_buf,
uint32_t start_sector, uint32_t num_sector);
/**
* @brief write data to disk
*
* Function write data from memory buffer to disk.
*
* Note: if he disk is of NVMe type, user will need to ensure data_buf
* pointer is 4-bytes aligned.
*
* @param[in] pdrv Disk name
* @param[in] data_buf Pointer to the memory buffer
* @param[in] start_sector Start disk sector to write to
* @param[in] num_sector Number of disk sectors to write
*
* @return 0 on success, negative errno code on fail
*/
int disk_access_write(const char *pdrv, const uint8_t *data_buf,
uint32_t start_sector, uint32_t num_sector);
/**
* @brief Get/Configure disk parameters
*
* Function to get disk parameters and make any special device requests.
*
* @param[in] pdrv Disk name
* @param[in] cmd DISK_IOCTL_* code describing the request
* @param[in] buff Command data buffer
*
* @return 0 on success, negative errno code on fail
*/
int disk_access_ioctl(const char *pdrv, uint8_t cmd, void *buff);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_STORAGE_DISK_ACCESS_H_ */
``` | /content/code_sandbox/include/zephyr/storage/disk_access.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 766 |
```objective-c
/*
*
*/
/**
* @file
* @brief Statistics.
*
* Statistics are per-module event counters for troubleshooting, maintenance,
* and usage monitoring. Statistics are organized into named "groups", with
* each group consisting of a set of "entries". An entry corresponds to an
* individual counter. Each entry can optionally be named if the STATS_NAMES
* setting is enabled. Statistics can be retrieved with the mcumgr management
* subsystem.
*
* There are two, largely duplicated, statistics sections here, in order to
* provide the optional ability to name statistics.
*
* STATS_SECT_START/END actually declare the statistics structure definition,
* STATS_SECT_DECL() creates the structure declaration so you can declare
* these statistics as a global structure, and STATS_NAME_START/END are how
* you name the statistics themselves.
*
* Statistics entries can be declared as any of several integer types.
* However, all statistics in a given structure must be of the same size, and
* they are all unsigned.
*
* - STATS_SECT_ENTRY(): default statistic entry, 32-bits.
*
* - STATS_SECT_ENTRY16(): 16-bits. Smaller statistics if you need to fit into
* specific RAM or code size numbers.
*
* - STATS_SECT_ENTRY32(): 32-bits.
*
* - STATS_SECT_ENTRY64(): 64-bits. Useful for storing chunks of data.
*
* Following the static entry declaration is the statistic names declaration.
* This is compiled out when the CONFIGURE_STATS_NAME setting is undefined.
*
* When CONFIG_STATS_NAMES is defined, the statistics names are stored and
* returned to the management APIs. When the setting is undefined, temporary
* names are generated as needed with the following format:
*
* s<stat-idx>
*
* E.g., "s0", "s1", etc.
*/
#ifndef ZEPHYR_INCLUDE_STATS_STATS_H_
#define ZEPHYR_INCLUDE_STATS_STATS_H_
#include <stddef.h>
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct stats_name_map {
uint16_t snm_off;
const char *snm_name;
} __attribute__((packed));
struct stats_hdr {
const char *s_name;
uint8_t s_size;
uint16_t s_cnt;
uint8_t s_pad1;
#ifdef CONFIG_STATS_NAMES
const struct stats_name_map *s_map;
int s_map_cnt;
#endif
struct stats_hdr *s_next;
};
/**
* @brief Declares a stat group struct.
*
* @param group__ The name to assign to the structure tag.
*/
#define STATS_SECT_DECL(group__) \
struct stats_ ## group__
/**
* @brief Ends a stats group struct definition.
*/
#define STATS_SECT_END }
/* The following macros depend on whether CONFIG_STATS is defined. If it is
* not defined, then invocations of these macros get compiled out.
*/
#ifdef CONFIG_STATS
/**
* @brief Begins a stats group struct definition.
*
* @param group__ The stats group struct name.
*/
#define STATS_SECT_START(group__) \
STATS_SECT_DECL(group__) { \
struct stats_hdr s_hdr;
/**
* @brief Declares a 32-bit stat entry inside a group struct.
*
* @param var__ The name to assign to the entry.
*/
#define STATS_SECT_ENTRY(var__) uint32_t var__;
/**
* @brief Declares a 16-bit stat entry inside a group struct.
*
* @param var__ The name to assign to the entry.
*/
#define STATS_SECT_ENTRY16(var__) uint16_t var__;
/**
* @brief Declares a 32-bit stat entry inside a group struct.
*
* @param var__ The name to assign to the entry.
*/
#define STATS_SECT_ENTRY32(var__) uint32_t var__;
/**
* @brief Declares a 64-bit stat entry inside a group struct.
*
* @param var__ The name to assign to the entry.
*/
#define STATS_SECT_ENTRY64(var__) uint64_t var__;
/**
* @brief Increases a statistic entry by the specified amount.
*
* Increases a statistic entry by the specified amount. Compiled out if
* CONFIG_STATS is not defined.
*
* @param group__ The group containing the entry to increase.
* @param var__ The statistic entry to increase.
* @param n__ The amount to increase the statistic entry by.
*/
#define STATS_INCN(group__, var__, n__) \
((group__).var__ += (n__))
/**
* @brief Increments a statistic entry.
*
* Increments a statistic entry by one. Compiled out if CONFIG_STATS is not
* defined.
*
* @param group__ The group containing the entry to increase.
* @param var__ The statistic entry to increase.
*/
#define STATS_INC(group__, var__) \
STATS_INCN(group__, var__, 1)
/**
* @brief Set a statistic entry to the specified amount.
*
* Set a statistic entry to the specified amount. Compiled out if
* CONFIG_STATS is not defined.
*
* @param group__ The group containing the entry to increase.
* @param var__ The statistic entry to increase.
* @param n__ The amount to set the statistic entry to.
*/
#define STATS_SET(group__, var__, n__) \
((group__).var__ = (n__))
/**
* @brief Sets a statistic entry to zero.
*
* Sets a statistic entry to zero. Compiled out if CONFIG_STATS is not
* defined.
*
* @param group__ The group containing the entry to clear.
* @param var__ The statistic entry to clear.
*/
#define STATS_CLEAR(group__, var__) \
((group__).var__ = 0)
#define STATS_SIZE_16 (sizeof(uint16_t))
#define STATS_SIZE_32 (sizeof(uint32_t))
#define STATS_SIZE_64 (sizeof(uint64_t))
#define STATS_SIZE_INIT_PARMS(group__, size__) \
(size__), \
((sizeof(group__)) - sizeof(struct stats_hdr)) / (size__)
/**
* @brief Initializes and registers a statistics group.
*
* @param group__ The statistics group to initialize and
* register.
* @param size__ The size of each entry in the statistics group,
* in bytes. Must be one of: 2 (16-bits), 4
* (32-bits) or 8 (64-bits).
* @param name__ The name of the statistics group to register.
* This name must be unique among all
* statistics groups.
*
* @return 0 on success; negative error code on failure.
*/
#define STATS_INIT_AND_REG(group__, size__, name__) \
stats_init_and_reg( \
&(group__).s_hdr, \
(size__), \
(sizeof(group__) - sizeof(struct stats_hdr)) / (size__), \
STATS_NAME_INIT_PARMS(group__), \
(name__))
/**
* @brief Initializes a statistics group.
*
* @param hdr The header of the statistics structure,
* contains things like statistic section
* name, size of statistics entries, number of
* statistics, etc.
* @param size The size of each individual statistics
* element, in bytes. Must be one of: 2
* (16-bits), 4 (32-bits) or 8 (64-bits).
* @param cnt The number of elements in the stats group.
* @param map The mapping of stat offset to name.
* @param map_cnt The number of items in the statistics map
*
* @param group__ The group containing the entry to clear.
* @param var__ The statistic entry to clear.
*/
void stats_init(struct stats_hdr *shdr, uint8_t size, uint16_t cnt,
const struct stats_name_map *map, uint16_t map_cnt);
/**
* @brief Registers a statistics group to be managed.
*
* @param name The name of the statistics group to register.
* This name must be unique among all
* statistics groups. If the name is a
* duplicate, this function will return
* -EALREADY.
* @param shdr The statistics group to register.
*
* @return 0 on success, non-zero error code on failure.
*/
int stats_register(const char *name, struct stats_hdr *shdr);
/**
* @brief Initializes and registers a statistics group.
*
* Initializes and registers a statistics group. Note: it is recommended to
* use the STATS_INIT_AND_REG macro instead of this function.
*
* @param hdr The header of the statistics group to
* initialize and register.
* @param size The size of each individual statistics
* element, in bytes. Must be one of: 2
* (16-bits), 4 (32-bits) or 8 (64-bits).
* @param cnt The number of elements in the stats group.
* @param map The mapping of stat offset to name.
* @param map_cnt The number of items in the statistics map
* @param name The name of the statistics group to register.
* This name must be unique among all
* statistics groups. If the name is a
* duplicate, this function will return
* -EALREADY.
*
* @return 0 on success; negative error code on failure.
*
* @see STATS_INIT_AND_REG
*/
int stats_init_and_reg(struct stats_hdr *hdr, uint8_t size, uint16_t cnt,
const struct stats_name_map *map, uint16_t map_cnt,
const char *name);
/**
* Zeroes the specified statistics group.
*
* @param shdr The statistics group to clear.
*/
void stats_reset(struct stats_hdr *shdr);
/** @typedef stats_walk_fn
* @brief Function that gets applied to every stat entry during a walk.
*
* @param hdr The group containing the stat entry being
* walked.
* @param arg Optional argument.
* @param name The name of the statistic entry to process
* @param off The offset of the entry, from `hdr`.
*
* @return 0 if the walk should proceed;
* nonzero to abort the walk.
*/
typedef int stats_walk_fn(struct stats_hdr *hdr, void *arg,
const char *name, uint16_t off);
/**
* @brief Applies a function to every stat entry in a group.
*
* @param hdr The stats group to operate on.
* @param walk_cb The function to apply to each stat entry.
* @param arg Optional argument to pass to the callback.
*
* @return 0 if the walk completed;
* nonzero if the walk was aborted.
*/
int stats_walk(struct stats_hdr *hdr, stats_walk_fn *walk_cb, void *arg);
/** @typedef stats_group_walk_fn
* @brief Function that gets applied to every registered stats group.
*
* @param hdr The stats group being walked.
* @param arg Optional argument.
*
* @return 0 if the walk should proceed;
* nonzero to abort the walk.
*/
typedef int stats_group_walk_fn(struct stats_hdr *hdr, void *arg);
/**
* @brief Applies a function every registered statistics group.
*
* @param walk_cb The function to apply to each stat group.
* @param arg Optional argument to pass to the callback.
*
* @return 0 if the walk completed;
* nonzero if the walk was aborted.
*/
int stats_group_walk(stats_group_walk_fn *walk_cb, void *arg);
/**
* @brief Retrieves the next registered statistics group.
*
* @param cur The group whose successor is being retrieved, or
* NULL to retrieve the first group.
*
* @return Pointer to the retrieved group on success;
* NULL if no more groups remain.
*/
struct stats_hdr *stats_group_get_next(const struct stats_hdr *cur);
/**
* @brief Retrieves the statistics group with the specified name.
*
* @param name The name of the statistics group to look up.
*
* @return Pointer to the retrieved group on success;
* NULL if there is no matching registered group.
*/
struct stats_hdr *stats_group_find(const char *name);
#else /* CONFIG_STATS */
#define STATS_SECT_START(group__) \
STATS_SECT_DECL(group__) {
#define STATS_SECT_ENTRY(var__)
#define STATS_SECT_ENTRY16(var__)
#define STATS_SECT_ENTRY32(var__)
#define STATS_SECT_ENTRY64(var__)
#define STATS_RESET(var__)
#define STATS_SIZE_INIT_PARMS(group__, size__)
#define STATS_INCN(group__, var__, n__)
#define STATS_INC(group__, var__)
#define STATS_SET(group__, var__)
#define STATS_CLEAR(group__, var__)
#define STATS_INIT_AND_REG(group__, size__, name__) (0)
#endif /* !CONFIG_STATS */
#ifdef CONFIG_STATS_NAMES
#define STATS_NAME_MAP_NAME(sectname__) stats_map_ ## sectname__
#define STATS_NAME_START(sectname__) \
static const struct stats_name_map STATS_NAME_MAP_NAME(sectname__)[] = {
#define STATS_NAME(sectname__, entry__) \
{ offsetof(STATS_SECT_DECL(sectname__), entry__), #entry__ },
#define STATS_NAME_END(sectname__) }
#define STATS_NAME_INIT_PARMS(name__) \
&(STATS_NAME_MAP_NAME(name__)[0]), \
(sizeof(STATS_NAME_MAP_NAME(name__)) / sizeof(struct stats_name_map))
#else /* CONFIG_STATS_NAMES */
#define STATS_NAME_START(name__)
#define STATS_NAME(name__, entry__)
#define STATS_NAME_END(name__)
#define STATS_NAME_INIT_PARMS(name__) NULL, 0
#endif /* CONFIG_STATS_NAMES */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_STATS_STATS_H_ */
``` | /content/code_sandbox/include/zephyr/stats/stats.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,928 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LOGGING_LOG_INTERNAL_H_
#define ZEPHYR_INCLUDE_LOGGING_LOG_INTERNAL_H_
#include <zephyr/types.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/logging/log_core.h>
#include <zephyr/sys/mpsc_pbuf.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Header contains declarations of functions used internally in the logging,
* shared between various portions of logging subsystem. Functions are internal
* not intended to be used outside, including logging backends.
*/
/** @brief Structure wrapper to be used for memory section. */
struct log_mpsc_pbuf {
struct mpsc_pbuf_buffer buf;
};
/** @brief Structure wrapper to be used for memory section. */
struct log_msg_ptr {
union log_msg_generic *msg;
};
/** @brief Indicate to the log core that one log message has been dropped.
*
* @param buffered True if dropped message was already buffered and it is being
* dropped to free space for another message. False if message is being dropped
* because allocation failed.
*/
void z_log_dropped(bool buffered);
/** @brief Read and clear current drop indications counter.
*
* @return Dropped count.
*/
uint32_t z_log_dropped_read_and_clear(void);
/** @brief Check if there are any pending drop notifications.
*
* @retval true Pending unreported drop indications.
* @retval false No pending unreported drop indications.
*/
bool z_log_dropped_pending(void);
/** @brief Free allocated buffer.
*
* @param buf Buffer.
*/
void z_log_free(void *buf);
/* Initialize runtime filters */
void z_log_runtime_filters_init(void);
/* Initialize links. */
void z_log_links_initiate(void);
/* Activate links.
* Attempt to activate links,
*
* @param active_mask Mask with links to activate. N bit set indicates that Nth
* link should be activated.
*
* @param[in, out] offset Offset assigned to domains. Initialize to 0 before first use.
*
* @return Mask with links that still remain inactive.
*/
uint32_t z_log_links_activate(uint32_t active_mask, uint8_t *offset);
/* Notify log_core that a backend was enabled. */
void z_log_notify_backend_enabled(void);
/** @brief Get pointer to the filter set of the log source.
*
* @param source_id Source ID.
*
* @return Pointer to the filter set.
*/
static inline uint32_t *z_log_dynamic_filters_get(uint32_t source_id)
{
return &TYPE_SECTION_START(log_dynamic)[source_id].filters;
}
/** @brief Get number of registered sources. */
static inline uint32_t z_log_sources_count(void)
{
return log_const_source_id(TYPE_SECTION_END(log_const));
}
/** @brief Return number of external domains.
*
* @return Number of external domains.
*/
uint8_t z_log_ext_domain_count(void);
/** @brief Initialize module for handling logging message. */
void z_log_msg_init(void);
/** @brief Commit log message.
*
* @param msg Message.
*/
void z_log_msg_commit(struct log_msg *msg);
/** @brief Get pending log message.
*
* @param[out] backoff Recommended backoff needed to maintain ordering of processed
* messages. Used only when links are using dedicated buffers.
*/
union log_msg_generic *z_log_msg_claim(k_timeout_t *backoff);
/** @brief Free message.
*
* @param msg Message.
*/
void z_log_msg_free(union log_msg_generic *msg);
/** @brief Check if there are any message pending.
*
* @retval true if at least one message is pending.
* @retval false if no message is pending.
*/
bool z_log_msg_pending(void);
static inline void z_log_notify_drop(const struct mpsc_pbuf_buffer *buffer,
const union mpsc_pbuf_generic *item)
{
ARG_UNUSED(buffer);
ARG_UNUSED(item);
z_log_dropped(true);
}
/** @brief Get tag.
*
* @return Tag. Null if feature is disabled.
*/
const char *z_log_get_tag(void);
/** @brief Check if domain is local.
*
* @param domain_id Domain ID.
*
* @return True if domain is local.
*/
static inline bool z_log_is_local_domain(uint8_t domain_id)
{
return !IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) ||
(domain_id == Z_LOG_LOCAL_DOMAIN_ID);
}
/** @brief Get timestamp.
*
* @return Timestamp.
*/
log_timestamp_t z_log_timestamp(void);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_LOGGING_LOG_INTERNAL_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_internal.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 952 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_LOG_BACKEND_BLE_H_
#define ZEPHYR_LOG_BACKEND_BLE_H_
#include <stdbool.h>
/**
* @brief Raw adv UUID data to add the ble backend for the use with apps
* such as the NRF Toolbox
*
*/
#define LOGGER_BACKEND_BLE_ADV_UUID_DATA \
0x9E, 0xCA, 0xDC, 0x24, 0x0E, 0xE5, 0xA9, 0xE0, 0x93, 0xF3, 0xA3, 0xB5, 0x01, 0x00, 0x40, \
0x6E
/**
* @brief Hook for application to know when the ble backend
* is enabled or disabled.
* @param backend_status True if the backend is enabled or false if disabled
* @param ctx User context
*
*/
typedef void (*logger_backend_ble_hook)(bool backend_status, void *ctx);
/**
* @brief Allows application to add a hook for the status of the BLE
* logger backend.
* @details The BLE logger backend is enabled or disabled auomatically by
* the subscription of the notification characteristic of this BLE
* Logger backend service.
*
* @param hook The hook that will be called when the status of the backend changes
* @param ctx User context for whenever the hook is called
*/
void logger_backend_ble_set_hook(logger_backend_ble_hook hook, void *ctx);
#endif /* ZEPHYR_LOG_BACKEND_BLE_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_backend_ble.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 330 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public API for flash map
*/
#ifndef ZEPHYR_INCLUDE_STORAGE_FLASH_MAP_H_
#define ZEPHYR_INCLUDE_STORAGE_FLASH_MAP_H_
/**
* @brief Abstraction over flash partitions/areas and their drivers
*
* @defgroup flash_area_api flash area Interface
* @since 1.11
* @version 1.0.0
* @ingroup storage_apis
* @{
*/
/*
* This API makes it possible to operate on flash areas easily and
* effectively.
*
* The system contains global data about flash areas. Every area
* contains an ID number, offset, and length.
*/
/**
*
*/
#include <zephyr/types.h>
#include <stddef.h>
#include <sys/types.h>
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Provided for compatibility with MCUboot */
#define SOC_FLASH_0_ID 0
/** Provided for compatibility with MCUboot */
#define SPI_FLASH_0_ID 1
/**
* @brief Flash partition
*
* This structure represents a fixed-size partition on a flash device.
* Each partition contains one or more flash sectors.
*/
struct flash_area {
/** ID number */
uint8_t fa_id;
uint16_t pad16;
/** Start offset from the beginning of the flash device */
off_t fa_off;
/** Total size */
size_t fa_size;
/** Backing flash device */
const struct device *fa_dev;
#if CONFIG_FLASH_MAP_LABELS
/** Partition label if defined in DTS. Otherwise nullptr; */
const char *fa_label;
#endif
};
/**
* @brief Structure for transfer flash sector boundaries
*
* This template is used for presentation of flash memory structure. It
* consumes much less RAM than @ref flash_area
*/
struct flash_sector {
/** Sector offset from the beginning of the flash device */
off_t fs_off;
/** Sector size in bytes */
size_t fs_size;
};
#if defined(CONFIG_FLASH_AREA_CHECK_INTEGRITY)
/**
* @brief Structure for verify flash region integrity
*
* This is used to pass data to be used to check flash integrity using SHA-256
* algorithm.
*/
struct flash_area_check {
const uint8_t *match; /** 256 bits match vector */
size_t clen; /** Content len to be compared */
size_t off; /** Start Offset */
uint8_t *rbuf; /** Temporary read buffer */
size_t rblen; /** Size of read buffer */
};
/**
* Verify flash memory length bytes integrity from a flash area. The start
* point is indicated by an offset value.
*
* @param[in] fa Flash area
* @param[in] fic Flash area check integrity data
*
* @return 0 on success, negative errno code on fail
*/
int flash_area_check_int_sha256(const struct flash_area *fa,
const struct flash_area_check *fac);
#endif
/**
* @brief Retrieve partitions flash area from the flash_map.
*
* Function Retrieves flash_area from flash_map for given partition.
*
* @param[in] id ID of the flash partition.
* @param[out] fa Pointer which has to reference flash_area. If
* @p ID is unknown, it will be NULL on output.
*
* @return 0 on success, -EACCES if the flash_map is not available ,
* -ENOENT if @p ID is unknown, -ENODEV if there is no driver attached
* to the area.
*/
int flash_area_open(uint8_t id, const struct flash_area **fa);
/**
* @brief Close flash_area
*
* Reserved for future usage and external projects compatibility reason.
* Currently is NOP.
*
* @param[in] fa Flash area to be closed.
*/
void flash_area_close(const struct flash_area *fa);
/**
* @brief Read flash area data
*
* Read data from flash area. Area readout boundaries are asserted before read
* request. API has the same limitation regard read-block alignment and size
* as wrapped flash driver.
*
* @param[in] fa Flash area
* @param[in] off Offset relative from beginning of flash area to read
* @param[out] dst Buffer to store read data
* @param[in] len Number of bytes to read
*
* @return 0 on success, negative errno code on fail.
*/
int flash_area_read(const struct flash_area *fa, off_t off, void *dst,
size_t len);
/**
* @brief Write data to flash area
*
* Write data to flash area. Area write boundaries are asserted before write
* request. API has the same limitation regard write-block alignment and size
* as wrapped flash driver.
*
* @param[in] fa Flash area
* @param[in] off Offset relative from beginning of flash area to write
* @param[in] src Buffer with data to be written
* @param[in] len Number of bytes to write
*
* @return 0 on success, negative errno code on fail.
*/
int flash_area_write(const struct flash_area *fa, off_t off, const void *src,
size_t len);
/**
* @brief Erase flash area
*
* Erase given flash area range. Area boundaries are asserted before erase
* request. API has the same limitation regard erase-block alignment and size
* as wrapped flash driver.
*
* @param[in] fa Flash area
* @param[in] off Offset relative from beginning of flash area.
* @param[in] len Number of bytes to be erase
*
* @return 0 on success, negative errno code on fail.
*/
int flash_area_erase(const struct flash_area *fa, off_t off, size_t len);
/**
* @brief Erase flash area or fill with erase-value
*
* On program-erase devices this function behaves exactly like flash_area_erase.
* On RAM non-volatile device it will call erase, if driver provides such
* callback, or will fill given range with erase-value defined by driver.
* This function should be only used by code that has not been written
* to directly support devices that do not require erase and rely on
* device being erased prior to some operations.
* Note that emulated erase, on devices that do not require, is done
* via write, which affects endurance of device.
*
* @see flash_area_erase()
* @see flash_flatten()
*
* @param[in] fa Flash area
* @param[in] off Offset relative from beginning of flash area.
* @param[in] len Number of bytes to be erase
*
* @return 0 on success, negative errno code on fail.
*/
int flash_area_flatten(const struct flash_area *fa, off_t off, size_t len);
/**
* @brief Get write block size of the flash area
*
* Currently write block size might be treated as read block size, although
* most of drivers supports unaligned readout.
*
* @param[in] fa Flash area
*
* @return Alignment restriction for flash writes in [B].
*/
uint32_t flash_area_align(const struct flash_area *fa);
/**
* Retrieve info about sectors within the area.
*
* @param[in] fa_id Given flash area ID
* @param[out] sectors buffer for sectors data
* @param[in,out] count On input Capacity of @p sectors, on output number of
* sectors Retrieved.
*
* @return 0 on success, negative errno code on fail. Especially returns
* -ENOMEM if There are too many flash pages on the flash_area to fit in the
* array.
*/
int flash_area_get_sectors(int fa_id, uint32_t *count,
struct flash_sector *sectors);
/**
* Flash map iteration callback
*
* @param fa flash area
* @param user_data User supplied data
*
*/
typedef void (*flash_area_cb_t)(const struct flash_area *fa,
void *user_data);
/**
* Iterate over flash map
*
* @param user_cb User callback
* @param user_data User supplied data
*/
void flash_area_foreach(flash_area_cb_t user_cb, void *user_data);
/**
* Check whether given flash area has supporting flash driver
* in the system.
*
* @param[in] fa Flash area.
*
* @return 1 On success. -ENODEV if no driver match.
*/
int flash_area_has_driver(const struct flash_area *fa);
/**
* Get driver for given flash area.
*
* @param[in] fa Flash area.
*
* @return device driver.
*/
const struct device *flash_area_get_device(const struct flash_area *fa);
#if CONFIG_FLASH_MAP_LABELS
/**
* Get the label property from the device tree
*
* @param[in] fa Flash area.
*
* @return The label property if it is defined, otherwise NULL
*/
const char *flash_area_label(const struct flash_area *fa);
#endif
/**
* Get the value expected to be read when accessing any erased
* flash byte.
* This API is compatible with the MCUBoot's porting layer.
*
* @param fa Flash area.
*
* @return Byte value of erase memory.
*/
uint8_t flash_area_erased_val(const struct flash_area *fa);
/**
* Returns non-0 value if fixed-partition of given DTS node label exists.
*
* @param label DTS node label
*
* @return non-0 if fixed-partition node exists and is enabled;
* 0 if node does not exist, is not enabled or is not fixed-partition.
*/
#define FIXED_PARTITION_EXISTS(label) DT_FIXED_PARTITION_EXISTS(DT_NODELABEL(label))
/**
* Get flash area ID from fixed-partition DTS node label
*
* @param label DTS node label of a partition
*
* @return flash area ID
*/
#define FIXED_PARTITION_ID(label) DT_FIXED_PARTITION_ID(DT_NODELABEL(label))
/**
* Get fixed-partition offset from DTS node label
*
* @param label DTS node label of a partition
*
* @return fixed-partition offset, as defined for the partition in DTS.
*/
#define FIXED_PARTITION_OFFSET(label) DT_REG_ADDR(DT_NODELABEL(label))
/**
* Get fixed-partition offset from DTS node
*
* @param node DTS node of a partition
*
* @return fixed-partition offset, as defined for the partition in DTS.
*/
#define FIXED_PARTITION_NODE_OFFSET(node) DT_REG_ADDR(node)
/**
* Get fixed-partition size for DTS node label
*
* @param label DTS node label
*
* @return fixed-partition offset, as defined for the partition in DTS.
*/
#define FIXED_PARTITION_SIZE(label) DT_REG_SIZE(DT_NODELABEL(label))
/**
* Get fixed-partition size for DTS node
*
* @param node DTS node of a partition
*
* @return fixed-partition size, as defined for the partition in DTS.
*/
#define FIXED_PARTITION_NODE_SIZE(node) DT_REG_SIZE(node)
/**
* Get device pointer for device the area/partition resides on
*
* @param label DTS node label of a partition
*
* @return const struct device type pointer
*/
#define FLASH_AREA_DEVICE(label) \
DEVICE_DT_GET(DT_MTD_FROM_FIXED_PARTITION(DT_NODE_BY_FIXED_PARTITION_LABEL(label)))
/**
* Get device pointer for device the area/partition resides on
*
* @param label DTS node label of a partition
*
* @return Pointer to a device.
*/
#define FIXED_PARTITION_DEVICE(label) \
DEVICE_DT_GET(DT_MTD_FROM_FIXED_PARTITION(DT_NODELABEL(label)))
/**
* Get device pointer for device the area/partition resides on
*
* @param node DTS node of a partition
*
* @return Pointer to a device.
*/
#define FIXED_PARTITION_NODE_DEVICE(node) \
DEVICE_DT_GET(DT_MTD_FROM_FIXED_PARTITION(node))
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_STORAGE_FLASH_MAP_H_ */
``` | /content/code_sandbox/include/zephyr/storage/flash_map.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,531 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LOGGING_LOG_OUTPUT_CUSTOM_H_
#define ZEPHYR_INCLUDE_LOGGING_LOG_OUTPUT_CUSTOM_H_
#include <zephyr/logging/log_output.h>
#ifdef __cplusplus
extern "C" {
#endif
/** @brief Custom logging output formatting.
* @ingroup log_output
* @{
*/
/** @brief Process log messages from an external output function set with
* log_custom_output_msg_set
*
* Function is using provided context with the buffer and output function to
* process formatted string and output the data.
*
* @param log_output Pointer to the log output instance.
* @param msg Log message.
* @param flags Optional flags.
*/
void log_custom_output_msg_process(const struct log_output *log_output,
struct log_msg *msg, uint32_t flags);
/** @brief Set the formatting log function that will be applied with LOG_OUTPUT_CUSTOM
*
* @param format Pointer to the external formatter function
*/
void log_custom_output_msg_set(log_format_func_t format);
/**
* @brief Prototype of a printer function that can print the given timestamp
* into a specific logger instance.
*
* Example usage:
* @code{.c}
* log_timestamp_printer_t *printer = ...;
* printer(log_instance, "%02u:%02u", hours, minutes);
* @endcode
*
* @param output The logger instance to write to
* @param fmt The format string
* @param ... optional arguments for the format string
*/
typedef int (*log_timestamp_printer_t)(const struct log_output *output, const char *fmt, ...);
/**
* @brief Prototype of the function that will apply custom formatting
* to a timestamp when LOG_OUTPUT_FORMAT_CUSTOM_TIMESTAMP
*
* Example function:
* @code{.c}
* int custom_timestamp_formatter(const struct log_output* output,
* const log_timestamp_t timestamp,
* const log_timestamp_printer_t printer) {
* return printer(output, "%d ", timestamp);
* }
* @endcode
*
* @param output The logger instance to write to
* @param timestamp
* @param printer The printing function to use when formatting the timestamp.
*/
typedef int (*log_timestamp_format_func_t)(const struct log_output *output,
const log_timestamp_t timestamp,
const log_timestamp_printer_t printer);
/** @brief Format the timestamp with a external function.
*
* Function is using provided context with the buffer and output function to
* process formatted string and output the data.
*
* @param output Pointer to the log output instance.
* @param timestamp
* @param printer The printing function to use when formatting the timestamp.
*/
int log_custom_timestamp_print(const struct log_output *output, const log_timestamp_t timestamp,
const log_timestamp_printer_t printer);
/** @brief Set the timestamp formatting function that will be applied
* when LOG_OUTPUT_FORMAT_CUSTOM_TIMESTAMP
*
* @param format Pointer to the external formatter function
*/
void log_custom_timestamp_set(log_timestamp_format_func_t format);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_LOGGING_LOG_OUTPUT_CUSTOM_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_output_custom.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 641 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LOGGING_LOG_INSTANCE_H_
#define ZEPHYR_INCLUDE_LOGGING_LOG_INSTANCE_H_
#include <zephyr/types.h>
#include <zephyr/sys/iterable_sections.h>
#ifdef __cplusplus
extern "C" {
#endif
/** @brief Constant data associated with the source of log messages. */
struct log_source_const_data {
const char *name;
uint8_t level;
#ifdef CONFIG_NIOS2
/* Workaround alert! Dummy data to ensure that structure is >8 bytes.
* Nios2 uses global pointer register for structures <=8 bytes and
* apparently does not handle well variables placed in custom sections.
*/
uint32_t dummy;
#endif
};
/** @brief Dynamic data associated with the source of log messages. */
struct log_source_dynamic_data {
uint32_t filters;
#ifdef CONFIG_NIOS2
/* Workaround alert! Dummy data to ensure that structure is >8 bytes.
* Nios2 uses global pointer register for structures <=8 bytes and
* apparently does not handle well variables placed in custom sections.
*/
uint32_t dummy[2];
#endif
#if defined(CONFIG_64BIT)
/* Workaround: Ensure that structure size is a multiple of 8 bytes. */
uint32_t dummy_64;
#endif
};
/** @internal
*
* Creates name of variable and section for constant log data.
*
* @param _name Name.
*/
#define Z_LOG_ITEM_CONST_DATA(_name) UTIL_CAT(log_const_, _name)
/** @internal
*
* Create static logging instance in read only memory.
*
* @param _name name of the module. With added prefix forms name of variable and
* memory section.
*
* @param _str_name Name of the module that will be used when message is formatted.
*
* @param _level Messages up to this level are compiled in.
*/
#define Z_LOG_CONST_ITEM_REGISTER(_name, _str_name, _level) \
const STRUCT_SECTION_ITERABLE_ALTERNATE(log_const, \
log_source_const_data, \
Z_LOG_ITEM_CONST_DATA(_name)) = \
{ \
.name = _str_name, \
.level = (_level), \
}
/** @brief Initialize pointer to logger instance with explicitly provided object.
*
* Macro can be used to initialized a pointer with object that is not unique to
* the given instance, thus not created with @ref LOG_INSTANCE_REGISTER.
*
* @param _name Name of the structure element for holding logging object.
* @param _object Pointer to a logging instance object.
*/
#define LOG_OBJECT_PTR_INIT(_name, _object) \
IF_ENABLED(CONFIG_LOG, (._name = _object,))
/** @internal
*
* Create a name for which contains module and instance names.
*/
#define Z_LOG_INSTANCE_FULL_NAME(_module_name, _inst_name) \
UTIL_CAT(_module_name, UTIL_CAT(_, _inst_name))
/** @internal
*
* Returns a pointer associated with given logging instance. When runtime filtering
* is enabled then dynamic instance is returned.
*
* @param _name Name of the instance.
*
* @return Pointer to the instance object (static or dynamic).
*/
#define Z_LOG_OBJECT_PTR(_name) \
COND_CODE_1(CONFIG_LOG_RUNTIME_FILTERING, \
(&LOG_ITEM_DYNAMIC_DATA(_name)), \
(&Z_LOG_ITEM_CONST_DATA(_name))) \
/** @brief Get pointer to a logging instance.
*
* Instance is identified by @p _module_name and @p _inst_name.
*
* @param _module_name Module name.
* @param _inst_name Instance name.
*
* @return Pointer to a logging instance.
*/
#define LOG_INSTANCE_PTR(_module_name, _inst_name) \
Z_LOG_OBJECT_PTR(Z_LOG_INSTANCE_FULL_NAME(_module_name, _inst_name))
/** @brief Macro for initializing a pointer to the logger instance.
*
* @p _module_name and @p _inst_name are concatenated to form a name of the object.
*
* Macro is intended to be used in user structure initializer to initialize a field
* in the structure that holds pointer to the logging instance. Structure field
* should be declared using @p LOG_INSTANCE_PTR_DECLARE.
*
* @param _name Name of a structure element that have a pointer to logging instance object.
* @param _module_name Module name.
* @param _inst_name Instance name.
*/
#define LOG_INSTANCE_PTR_INIT(_name, _module_name, _inst_name) \
LOG_OBJECT_PTR_INIT(_name, LOG_INSTANCE_PTR(_module_name, _inst_name))
#define Z_LOG_INSTANCE_STRUCT \
COND_CODE_1(CONFIG_LOG_RUNTIME_FILTERING, \
(struct log_source_dynamic_data), \
(const struct log_source_const_data))
/**
* @brief Declare a logger instance pointer in the module structure.
*
* If logging is disabled then element in the structure is still declared to avoid
* compilation issues. If compiler supports zero length arrays then it is utilized
* to not use any space, else a byte array is created.
*
* @param _name Name of a structure element that will have a pointer to logging
* instance object.
*/
#define LOG_INSTANCE_PTR_DECLARE(_name) \
COND_CODE_1(CONFIG_LOG, (Z_LOG_INSTANCE_STRUCT * _name), \
(int _name[TOOLCHAIN_HAS_ZLA ? 0 : 1]))
#define Z_LOG_RUNTIME_INSTANCE_REGISTER(_module_name, _inst_name) \
STRUCT_SECTION_ITERABLE_ALTERNATE(log_dynamic, log_source_dynamic_data, \
LOG_INSTANCE_DYNAMIC_DATA(_module_name, _inst_name))
#define Z_LOG_INSTANCE_REGISTER(_module_name, _inst_name, _level) \
Z_LOG_CONST_ITEM_REGISTER( \
Z_LOG_INSTANCE_FULL_NAME(_module_name, _inst_name), \
STRINGIFY(_module_name._inst_name), \
_level); \
IF_ENABLED(CONFIG_LOG_RUNTIME_FILTERING, \
(Z_LOG_RUNTIME_INSTANCE_REGISTER(_module_name, _inst_name)))
/**
* @brief Macro for registering instance for logging with independent filtering.
*
* Module instance provides filtering of logs on instance level instead of
* module level. Instance create using this macro can later on be used with
* @ref LOG_INSTANCE_PTR_INIT or referenced by @ref LOG_INSTANCE_PTR.
*
* @param _module_name Module name.
* @param _inst_name Instance name.
* @param _level Initial static filtering.
*/
#define LOG_INSTANCE_REGISTER(_module_name, _inst_name, _level) \
IF_ENABLED(CONFIG_LOG, (Z_LOG_INSTANCE_REGISTER(_module_name, _inst_name, _level)))
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_LOGGING_LOG_INSTANCE_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_instance.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,402 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_LOG_BACKEND_ADSP_MTRACE_H_
#define ZEPHYR_LOG_BACKEND_ADSP_MTRACE_H_
#include <stdint.h>
#include <stddef.h>
/**
*@brief mtracelogger requires a hook for IPC messages
*
* When new log data is added to the SRAM buffer, a IPC message
* should be sent to the host. This hook function pointer allows
* for that.
*/
typedef void(*adsp_mtrace_log_hook_t)(size_t written, size_t space_left);
/**
* @brief Initialize the Intel ADSP mtrace logger
*
* @param hook Function is called after each write to the SRAM buffer
* It is up to the author of the hook to serialize if needed.
*/
void adsp_mtrace_log_init(adsp_mtrace_log_hook_t hook);
const struct log_backend *log_backend_adsp_mtrace_get(void);
#endif /* ZEPHYR_LOG_BACKEND_ADSP_MTRACE_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_backend_adsp_mtrace.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 204 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LOGGING_LOG_OUTPUT_DICT_H_
#define ZEPHYR_INCLUDE_LOGGING_LOG_OUTPUT_DICT_H_
#include <zephyr/logging/log_output.h>
#include <zephyr/logging/log_msg.h>
#include <stdarg.h>
#include <zephyr/toolchain.h>
#include <zephyr/sys/util.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* Log message type
*/
enum log_dict_output_msg_type {
MSG_NORMAL = 0,
MSG_DROPPED_MSG = 1,
};
/**
* Output header for one dictionary based log message.
*/
struct log_dict_output_normal_msg_hdr_t {
uint8_t type;
uint32_t domain:4;
uint32_t level:4;
uint32_t package_len:16;
uint32_t data_len:16;
uintptr_t source;
log_timestamp_t timestamp;
} __packed;
/**
* Output for one dictionary based log message about
* dropped messages.
*/
struct log_dict_output_dropped_msg_t {
uint8_t type;
uint16_t num_dropped_messages;
} __packed;
/** @brief Process log messages v2 for dictionary-based logging.
*
* Function is using provided context with the buffer and output function to
* process formatted string and output the data.
*
* @param log_output Pointer to the log output instance.
* @param msg Log message.
* @param flags Optional flags.
*/
void log_dict_output_msg_process(const struct log_output *log_output,
struct log_msg *msg, uint32_t flags);
/** @brief Process dropped messages indication for dictionary-based logging.
*
* Function prints error message indicating lost log messages.
*
* @param output Pointer to the log output instance.
* @param cnt Number of dropped messages.
*/
void log_dict_output_dropped_process(const struct log_output *output, uint32_t cnt);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_LOGGING_LOG_OUTPUT_DICT_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_output_dict.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 407 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LOGGING_LOG_H_
#define ZEPHYR_INCLUDE_LOGGING_LOG_H_
#include <zephyr/logging/log_instance.h>
#include <zephyr/logging/log_core.h>
#include <zephyr/sys/iterable_sections.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Logging
* @defgroup logging Logging
* @since 1.13
* @version 1.0.0
* @ingroup os_services
* @{
* @}
*/
/**
* @brief Logger API
* @defgroup log_api Logging API
* @ingroup logger
* @{
*/
/**
* @brief Writes an ERROR level message to the log.
*
* @details It's meant to report severe errors, such as those from which it's
* not possible to recover.
*
* @param ... A string optionally containing printk valid conversion specifier,
* followed by as many values as specifiers.
*/
#define LOG_ERR(...) Z_LOG(LOG_LEVEL_ERR, __VA_ARGS__)
/**
* @brief Writes a WARNING level message to the log.
*
* @details It's meant to register messages related to unusual situations that
* are not necessarily errors.
*
* @param ... A string optionally containing printk valid conversion specifier,
* followed by as many values as specifiers.
*/
#define LOG_WRN(...) Z_LOG(LOG_LEVEL_WRN, __VA_ARGS__)
/**
* @brief Writes an INFO level message to the log.
*
* @details It's meant to write generic user oriented messages.
*
* @param ... A string optionally containing printk valid conversion specifier,
* followed by as many values as specifiers.
*/
#define LOG_INF(...) Z_LOG(LOG_LEVEL_INF, __VA_ARGS__)
/**
* @brief Writes a DEBUG level message to the log.
*
* @details It's meant to write developer oriented information.
*
* @param ... A string optionally containing printk valid conversion specifier,
* followed by as many values as specifiers.
*/
#define LOG_DBG(...) Z_LOG(LOG_LEVEL_DBG, __VA_ARGS__)
/**
* @brief Writes a WARNING level message to the log on the first execution only.
*
* @details It's meant for situations that warrant investigation but could clutter
* the logs if output on every execution.
*
* @param ... A string optionally containing printk valid conversion specifier,
* followed by as many values as specifiers.
*/
#define LOG_WRN_ONCE(...) \
do { \
static uint8_t __warned; \
if (unlikely(__warned == 0)) { \
Z_LOG(LOG_LEVEL_WRN, __VA_ARGS__); \
__warned = 1; \
} \
} while (0)
/**
* @brief Unconditionally print raw log message.
*
* The result is same as if printk was used but it goes through logging
* infrastructure thus utilizes logging mode, e.g. deferred mode.
*
* @param ... A string optionally containing printk valid conversion specifier,
* followed by as many values as specifiers.
*/
#define LOG_PRINTK(...) Z_LOG_PRINTK(0, __VA_ARGS__)
/**
* @brief Unconditionally print raw log message.
*
* Provided string is printed as is without appending any characters (e.g., color or newline).
*
* @param ... A string optionally containing printk valid conversion specifier,
* followed by as many values as specifiers.
*/
#define LOG_RAW(...) Z_LOG_PRINTK(1, __VA_ARGS__)
/**
* @brief Writes an ERROR level message associated with the instance to the log.
*
* Message is associated with specific instance of the module which has
* independent filtering settings (if runtime filtering is enabled) and
* message prefix (\<module_name\>.\<instance_name\>). It's meant to report
* severe errors, such as those from which it's not possible to recover.
*
* @param _log_inst Pointer to the log structure associated with the instance.
* @param ... A string optionally containing printk valid conversion specifier,
* followed by as many values as specifiers.
*/
#define LOG_INST_ERR(_log_inst, ...) \
Z_LOG_INSTANCE(LOG_LEVEL_ERR, _log_inst, __VA_ARGS__)
/**
* @brief Writes a WARNING level message associated with the instance to the
* log.
*
* Message is associated with specific instance of the module which has
* independent filtering settings (if runtime filtering is enabled) and
* message prefix (\<module_name\>.\<instance_name\>). It's meant to register
* messages related to unusual situations that are not necessarily errors.
*
* @param _log_inst Pointer to the log structure associated with the instance.
* @param ... A string optionally containing printk valid conversion
* specifier, followed by as many values as specifiers.
*/
#define LOG_INST_WRN(_log_inst, ...) \
Z_LOG_INSTANCE(LOG_LEVEL_WRN, _log_inst, __VA_ARGS__)
/**
* @brief Writes an INFO level message associated with the instance to the log.
*
* Message is associated with specific instance of the module which has
* independent filtering settings (if runtime filtering is enabled) and
* message prefix (\<module_name\>.\<instance_name\>). It's meant to write
* generic user oriented messages.
*
* @param _log_inst Pointer to the log structure associated with the instance.
* @param ... A string optionally containing printk valid conversion specifier,
* followed by as many values as specifiers.
*/
#define LOG_INST_INF(_log_inst, ...) \
Z_LOG_INSTANCE(LOG_LEVEL_INF, _log_inst, __VA_ARGS__)
/**
* @brief Writes a DEBUG level message associated with the instance to the log.
*
* Message is associated with specific instance of the module which has
* independent filtering settings (if runtime filtering is enabled) and
* message prefix (\<module_name\>.\<instance_name\>). It's meant to write
* developer oriented information.
*
* @param _log_inst Pointer to the log structure associated with the instance.
* @param ... A string optionally containing printk valid conversion specifier,
* followed by as many values as specifiers.
*/
#define LOG_INST_DBG(_log_inst, ...) \
Z_LOG_INSTANCE(LOG_LEVEL_DBG, _log_inst, __VA_ARGS__)
/**
* @brief Writes an ERROR level hexdump message to the log.
*
* @details It's meant to report severe errors, such as those from which it's
* not possible to recover.
*
* @param _data Pointer to the data to be logged.
* @param _length Length of data (in bytes).
* @param _str Persistent, raw string.
*/
#define LOG_HEXDUMP_ERR(_data, _length, _str) \
Z_LOG_HEXDUMP(LOG_LEVEL_ERR, _data, _length, (_str))
/**
* @brief Writes a WARNING level message to the log.
*
* @details It's meant to register messages related to unusual situations that
* are not necessarily errors.
*
* @param _data Pointer to the data to be logged.
* @param _length Length of data (in bytes).
* @param _str Persistent, raw string.
*/
#define LOG_HEXDUMP_WRN(_data, _length, _str) \
Z_LOG_HEXDUMP(LOG_LEVEL_WRN, _data, _length, (_str))
/**
* @brief Writes an INFO level message to the log.
*
* @details It's meant to write generic user oriented messages.
*
* @param _data Pointer to the data to be logged.
* @param _length Length of data (in bytes).
* @param _str Persistent, raw string.
*/
#define LOG_HEXDUMP_INF(_data, _length, _str) \
Z_LOG_HEXDUMP(LOG_LEVEL_INF, _data, _length, (_str))
/**
* @brief Writes a DEBUG level message to the log.
*
* @details It's meant to write developer oriented information.
*
* @param _data Pointer to the data to be logged.
* @param _length Length of data (in bytes).
* @param _str Persistent, raw string.
*/
#define LOG_HEXDUMP_DBG(_data, _length, _str) \
Z_LOG_HEXDUMP(LOG_LEVEL_DBG, _data, _length, (_str))
/**
* @brief Writes an ERROR hexdump message associated with the instance to the
* log.
*
* Message is associated with specific instance of the module which has
* independent filtering settings (if runtime filtering is enabled) and
* message prefix (\<module_name\>.\<instance_name\>). It's meant to report
* severe errors, such as those from which it's not possible to recover.
*
* @param _log_inst Pointer to the log structure associated with the instance.
* @param _data Pointer to the data to be logged.
* @param _length Length of data (in bytes).
* @param _str Persistent, raw string.
*/
#define LOG_INST_HEXDUMP_ERR(_log_inst, _data, _length, _str) \
Z_LOG_HEXDUMP_INSTANCE(LOG_LEVEL_ERR, _log_inst, _data, _length, _str)
/**
* @brief Writes a WARNING level hexdump message associated with the instance to
* the log.
*
* @details It's meant to register messages related to unusual situations that
* are not necessarily errors.
*
* @param _log_inst Pointer to the log structure associated with the instance.
* @param _data Pointer to the data to be logged.
* @param _length Length of data (in bytes).
* @param _str Persistent, raw string.
*/
#define LOG_INST_HEXDUMP_WRN(_log_inst, _data, _length, _str) \
Z_LOG_HEXDUMP_INSTANCE(LOG_LEVEL_WRN, _log_inst, _data, _length, _str)
/**
* @brief Writes an INFO level hexdump message associated with the instance to
* the log.
*
* @details It's meant to write generic user oriented messages.
*
* @param _log_inst Pointer to the log structure associated with the instance.
* @param _data Pointer to the data to be logged.
* @param _length Length of data (in bytes).
* @param _str Persistent, raw string.
*/
#define LOG_INST_HEXDUMP_INF(_log_inst, _data, _length, _str) \
Z_LOG_HEXDUMP_INSTANCE(LOG_LEVEL_INF, _log_inst, _data, _length, _str)
/**
* @brief Writes a DEBUG level hexdump message associated with the instance to
* the log.
*
* @details It's meant to write developer oriented information.
*
* @param _log_inst Pointer to the log structure associated with the instance.
* @param _data Pointer to the data to be logged.
* @param _length Length of data (in bytes).
* @param _str Persistent, raw string.
*/
#define LOG_INST_HEXDUMP_DBG(_log_inst, _data, _length, _str) \
Z_LOG_HEXDUMP_INSTANCE(LOG_LEVEL_DBG, _log_inst, _data, _length, _str)
/**
* @brief Writes an formatted string to the log.
*
* @details Conditionally compiled (see CONFIG_LOG_PRINTK). Function provides
* printk functionality.
*
* It is less efficient compared to standard logging because static packaging
* cannot be used.
*
* @param fmt Formatted string to output.
* @param ap Variable parameters.
*/
void z_log_vprintk(const char *fmt, va_list ap);
#ifdef __cplusplus
}
#define LOG_IN_CPLUSPLUS 1
#endif
/* Macro expects that optionally on second argument local log level is provided.
* If provided it is returned, otherwise default log level is returned or
* LOG_LEVEL, if it was locally defined.
*/
#if !defined(CONFIG_LOG)
#define _LOG_LEVEL_RESOLVE(...) LOG_LEVEL_NONE
#else
#define _LOG_LEVEL_RESOLVE(...) \
Z_LOG_EVAL(COND_CODE_0(LOG_LEVEL, (1), (LOG_LEVEL)), \
(GET_ARG_N(2, __VA_ARGS__, LOG_LEVEL)), \
(GET_ARG_N(2, __VA_ARGS__, CONFIG_LOG_DEFAULT_LEVEL)))
#endif
/* Return first argument */
#define _LOG_ARG1(arg1, ...) arg1
#define _LOG_MODULE_CONST_DATA_CREATE(_name, _level) \
IF_ENABLED(CONFIG_LOG_FMT_SECTION, ( \
static const char UTIL_CAT(_name, _str)[] \
__in_section(_log_strings, static, _CONCAT(_name, _)) __used __noasan = \
STRINGIFY(_name);)) \
IF_ENABLED(LOG_IN_CPLUSPLUS, (extern)) \
const STRUCT_SECTION_ITERABLE_ALTERNATE(log_const, \
log_source_const_data, \
Z_LOG_ITEM_CONST_DATA(_name)) = \
{ \
.name = COND_CODE_1(CONFIG_LOG_FMT_SECTION, \
(UTIL_CAT(_name, _str)), (STRINGIFY(_name))), \
.level = (_level) \
}
#define _LOG_MODULE_DYNAMIC_DATA_CREATE(_name) \
STRUCT_SECTION_ITERABLE_ALTERNATE(log_dynamic, log_source_dynamic_data, \
LOG_ITEM_DYNAMIC_DATA(_name))
#define _LOG_MODULE_DYNAMIC_DATA_COND_CREATE(_name) \
IF_ENABLED(CONFIG_LOG_RUNTIME_FILTERING, \
(_LOG_MODULE_DYNAMIC_DATA_CREATE(_name);))
#define _LOG_MODULE_DATA_CREATE(_name, _level) \
_LOG_MODULE_CONST_DATA_CREATE(_name, _level); \
_LOG_MODULE_DYNAMIC_DATA_COND_CREATE(_name)
/* Determine if data for the module shall be created. It is created if logging
* is enabled, override level is set or module specific level is set (not off).
*/
#define Z_DO_LOG_MODULE_REGISTER(...) \
COND_CODE_1(CONFIG_LOG, \
(Z_LOG_EVAL(CONFIG_LOG_OVERRIDE_LEVEL, \
(1), \
(Z_LOG_EVAL(_LOG_LEVEL_RESOLVE(__VA_ARGS__), (1), (0))) \
)), (0))
/**
* @brief Create module-specific state and register the module with Logger.
*
* This macro normally must be used after including <zephyr/logging/log.h> to
* complete the initialization of the module.
*
* Module registration can be skipped in two cases:
*
* - The module consists of more than one file, and another file
* invokes this macro. (LOG_MODULE_DECLARE() should be used instead
* in all of the module's other files.)
* - Instance logging is used and there is no need to create module entry. In
* that case LOG_LEVEL_SET() should be used to set log level used within the
* file.
*
* Macro accepts one or two parameters:
* - module name
* - optional log level. If not provided then default log level is used in
* the file.
*
* Example usage:
* - LOG_MODULE_REGISTER(foo, CONFIG_FOO_LOG_LEVEL)
* - LOG_MODULE_REGISTER(foo)
*
*
* @note The module's state is defined, and the module is registered,
* only if LOG_LEVEL for the current source file is non-zero or
* it is not defined and CONFIG_LOG_DEFAULT_LEVEL is non-zero.
* In other cases, this macro has no effect.
* @see LOG_MODULE_DECLARE
*/
#define LOG_MODULE_REGISTER(...) \
COND_CODE_1( \
Z_DO_LOG_MODULE_REGISTER(__VA_ARGS__), \
(_LOG_MODULE_DATA_CREATE(GET_ARG_N(1, __VA_ARGS__), \
_LOG_LEVEL_RESOLVE(__VA_ARGS__))),\
() \
) \
LOG_MODULE_DECLARE(__VA_ARGS__)
/**
* @brief Macro for declaring a log module (not registering it).
*
* Modules which are split up over multiple files must have exactly
* one file use LOG_MODULE_REGISTER() to create module-specific state
* and register the module with the logger core.
*
* The other files in the module should use this macro instead to
* declare that same state. (Otherwise, LOG_INF() etc. will not be
* able to refer to module-specific state variables.)
*
* Macro accepts one or two parameters:
* - module name
* - optional log level. If not provided then default log level is used in
* the file.
*
* Example usage:
* - LOG_MODULE_DECLARE(foo, CONFIG_FOO_LOG_LEVEL)
* - LOG_MODULE_DECLARE(foo)
*
* @note The module's state is declared only if LOG_LEVEL for the
* current source file is non-zero or it is not defined and
* CONFIG_LOG_DEFAULT_LEVEL is non-zero. In other cases,
* this macro has no effect.
* @see LOG_MODULE_REGISTER
*/
#define LOG_MODULE_DECLARE(...) \
extern const struct log_source_const_data \
Z_LOG_ITEM_CONST_DATA(GET_ARG_N(1, __VA_ARGS__)); \
extern struct log_source_dynamic_data \
LOG_ITEM_DYNAMIC_DATA(GET_ARG_N(1, __VA_ARGS__)); \
\
static const struct log_source_const_data * \
__log_current_const_data __unused = \
Z_DO_LOG_MODULE_REGISTER(__VA_ARGS__) ? \
&Z_LOG_ITEM_CONST_DATA(GET_ARG_N(1, __VA_ARGS__)) : \
NULL; \
\
static struct log_source_dynamic_data * \
__log_current_dynamic_data __unused = \
(Z_DO_LOG_MODULE_REGISTER(__VA_ARGS__) && \
IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) ? \
&LOG_ITEM_DYNAMIC_DATA(GET_ARG_N(1, __VA_ARGS__)) : \
NULL; \
\
static const uint32_t __log_level __unused = \
_LOG_LEVEL_RESOLVE(__VA_ARGS__)
/**
* @brief Macro for setting log level in the file or function where instance
* logging API is used.
*
* @param level Level used in file or in function.
*
*/
#define LOG_LEVEL_SET(level) static const uint32_t __log_level __unused = \
Z_LOG_RESOLVED_LEVEL(level, 0)
#ifdef CONFIG_LOG_CUSTOM_HEADER
/* This include must always be at the end of log.h */
#include <zephyr_custom_log.h>
#endif
/*
* Eclipse CDT or JetBrains Clion parser is sometimes confused by logging API
* code and freezes the whole IDE. Following lines hides LOG_x macros from them.
*/
#if defined(__CDT_PARSER__) || defined(__JETBRAINS_IDE__)
#undef LOG_ERR
#undef LOG_WRN
#undef LOG_INF
#undef LOG_DBG
#undef LOG_HEXDUMP_ERR
#undef LOG_HEXDUMP_WRN
#undef LOG_HEXDUMP_INF
#undef LOG_HEXDUMP_DBG
#define LOG_ERR(...) (void) 0
#define LOG_WRN(...) (void) 0
#define LOG_DBG(...) (void) 0
#define LOG_INF(...) (void) 0
#define LOG_HEXDUMP_ERR(...) (void) 0
#define LOG_HEXDUMP_WRN(...) (void) 0
#define LOG_HEXDUMP_DBG(...) (void) 0
#define LOG_HEXDUMP_INF(...) (void) 0
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_LOGGING_LOG_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,057 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LOGGING_LOG_BACKEND_H_
#define ZEPHYR_INCLUDE_LOGGING_LOG_BACKEND_H_
#include <zephyr/logging/log_msg.h>
#include <stdarg.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/util.h>
#include <zephyr/logging/log_output.h>
#include <zephyr/sys/iterable_sections.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Logger backend interface
* @defgroup log_backend Logger backend interface
* @ingroup logger
* @{
*/
/* Forward declaration of the log_backend type. */
struct log_backend;
/**
* @brief Backend events
*/
enum log_backend_evt {
/**
* @brief Event when process thread finishes processing.
*
* This event is emitted when the process thread finishes
* processing pending log messages.
*
* @note This is not emitted when there are no pending
* log messages being processed.
*
* @note Deferred mode only.
*/
LOG_BACKEND_EVT_PROCESS_THREAD_DONE,
/** @brief Maximum number of backend events */
LOG_BACKEND_EVT_MAX,
};
/**
* @brief Argument(s) for backend events.
*/
union log_backend_evt_arg {
/** @brief Unspecified argument(s). */
void *raw;
};
/**
* @brief Logger backend API.
*/
struct log_backend_api {
void (*process)(const struct log_backend *const backend,
union log_msg_generic *msg);
void (*dropped)(const struct log_backend *const backend, uint32_t cnt);
void (*panic)(const struct log_backend *const backend);
void (*init)(const struct log_backend *const backend);
int (*is_ready)(const struct log_backend *const backend);
int (*format_set)(const struct log_backend *const backend,
uint32_t log_type);
void (*notify)(const struct log_backend *const backend,
enum log_backend_evt event,
union log_backend_evt_arg *arg);
};
/**
* @brief Logger backend control block.
*/
struct log_backend_control_block {
void *ctx;
uint8_t id;
bool active;
/* Initialization level. */
uint8_t level;
};
/**
* @brief Logger backend structure.
*/
struct log_backend {
const struct log_backend_api *api;
struct log_backend_control_block *cb;
const char *name;
bool autostart;
};
/**
* @brief Macro for creating a logger backend instance.
*
* @param _name Name of the backend instance.
* @param _api Logger backend API.
* @param _autostart If true backend is initialized and activated together
* with the logger subsystem.
* @param ... Optional context.
*/
#define LOG_BACKEND_DEFINE(_name, _api, _autostart, ...) \
static struct log_backend_control_block UTIL_CAT(backend_cb_, _name) = \
{ \
COND_CODE_0(NUM_VA_ARGS_LESS_1(_, ##__VA_ARGS__), \
(), (.ctx = __VA_ARGS__,)) \
.id = 0, \
.active = false, \
}; \
static const STRUCT_SECTION_ITERABLE(log_backend, _name) = \
{ \
.api = &_api, \
.cb = &UTIL_CAT(backend_cb_, _name), \
.name = STRINGIFY(_name), \
.autostart = _autostart \
}
/**
* @brief Initialize or initiate the logging backend.
*
* If backend initialization takes longer time it could block logging thread
* if backend is autostarted. That is because all backends are initialized in
* the context of the logging thread. In that case, backend shall provide
* function for polling for readiness (@ref log_backend_is_ready).
*
* @param[in] backend Pointer to the backend instance.
*/
static inline void log_backend_init(const struct log_backend *const backend)
{
__ASSERT_NO_MSG(backend != NULL);
if (backend->api->init) {
backend->api->init(backend);
}
}
/**
* @brief Poll for backend readiness.
*
* If backend is ready immediately after initialization then backend may not
* provide this function.
*
* @param[in] backend Pointer to the backend instance.
*
* @retval 0 if backend is ready.
* @retval -EBUSY if backend is not yet ready.
*/
static inline int log_backend_is_ready(const struct log_backend *const backend)
{
__ASSERT_NO_MSG(backend != NULL);
if (backend->api->is_ready != NULL) {
return backend->api->is_ready(backend);
}
return 0;
}
/**
* @brief Process message.
*
* Function is used in deferred and immediate mode. On return, message content
* is processed by the backend and memory can be freed.
*
* @param[in] backend Pointer to the backend instance.
* @param[in] msg Pointer to message with log entry.
*/
static inline void log_backend_msg_process(const struct log_backend *const backend,
union log_msg_generic *msg)
{
__ASSERT_NO_MSG(backend != NULL);
__ASSERT_NO_MSG(msg != NULL);
backend->api->process(backend, msg);
}
/**
* @brief Notify backend about dropped log messages.
*
* Function is optional.
*
* @param[in] backend Pointer to the backend instance.
* @param[in] cnt Number of dropped logs since last notification.
*/
static inline void log_backend_dropped(const struct log_backend *const backend,
uint32_t cnt)
{
__ASSERT_NO_MSG(backend != NULL);
if (backend->api->dropped != NULL) {
backend->api->dropped(backend, cnt);
}
}
/**
* @brief Reconfigure backend to panic mode.
*
* @param[in] backend Pointer to the backend instance.
*/
static inline void log_backend_panic(const struct log_backend *const backend)
{
__ASSERT_NO_MSG(backend != NULL);
backend->api->panic(backend);
}
/**
* @brief Set backend id.
*
* @note It is used internally by the logger.
*
* @param backend Pointer to the backend instance.
* @param id ID.
*/
static inline void log_backend_id_set(const struct log_backend *const backend,
uint8_t id)
{
__ASSERT_NO_MSG(backend != NULL);
backend->cb->id = id;
}
/**
* @brief Get backend id.
*
* @note It is used internally by the logger.
*
* @param[in] backend Pointer to the backend instance.
* @return Id.
*/
static inline uint8_t log_backend_id_get(const struct log_backend *const backend)
{
__ASSERT_NO_MSG(backend != NULL);
return backend->cb->id;
}
/**
* @brief Get backend.
*
* @param[in] idx Pointer to the backend instance.
*
* @return Pointer to the backend instance.
*/
static inline const struct log_backend *log_backend_get(uint32_t idx)
{
const struct log_backend *backend;
STRUCT_SECTION_GET(log_backend, idx, &backend);
return backend;
}
/**
* @brief Get number of backends.
*
* @return Number of backends.
*/
static inline int log_backend_count_get(void)
{
int cnt;
STRUCT_SECTION_COUNT(log_backend, &cnt);
return cnt;
}
/**
* @brief Activate backend.
*
* @param[in] backend Pointer to the backend instance.
* @param[in] ctx User context.
*/
static inline void log_backend_activate(const struct log_backend *const backend,
void *ctx)
{
__ASSERT_NO_MSG(backend != NULL);
backend->cb->ctx = ctx;
backend->cb->active = true;
}
/**
* @brief Deactivate backend.
*
* @param[in] backend Pointer to the backend instance.
*/
static inline void log_backend_deactivate(
const struct log_backend *const backend)
{
__ASSERT_NO_MSG(backend != NULL);
backend->cb->active = false;
}
/**
* @brief Check state of the backend.
*
* @param[in] backend Pointer to the backend instance.
*
* @return True if backend is active, false otherwise.
*/
static inline bool log_backend_is_active(
const struct log_backend *const backend)
{
__ASSERT_NO_MSG(backend != NULL);
return backend->cb->active;
}
/** @brief Set logging format.
*
* @param backend Pointer to the backend instance.
* @param log_type Log format.
*
* @retval -ENOTSUP If the backend does not support changing format types.
* @retval -EINVAL If the input is invalid.
* @retval 0 for success.
*/
static inline int log_backend_format_set(const struct log_backend *backend, uint32_t log_type)
{
extern size_t log_format_table_size(void);
if ((size_t)log_type >= log_format_table_size()) {
return -EINVAL;
}
if (log_format_func_t_get(log_type) == NULL) {
return -EINVAL;
}
if (backend == NULL) {
return -EINVAL;
}
if (backend->api->format_set == NULL) {
return -ENOTSUP;
}
return backend->api->format_set(backend, log_type);
}
/**
* @brief Notify a backend of an event.
*
* @param backend Pointer to the backend instance.
* @param event Event to be notified.
* @param arg Pointer to the argument(s).
*/
static inline void log_backend_notify(const struct log_backend *const backend,
enum log_backend_evt event,
union log_backend_evt_arg *arg)
{
__ASSERT_NO_MSG(backend != NULL);
if (backend->api->notify) {
backend->api->notify(backend, event, arg);
}
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_LOGGING_LOG_BACKEND_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_backend.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,088 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LOGGING_LOG_CTRL_H_
#define ZEPHYR_INCLUDE_LOGGING_LOG_CTRL_H_
#include <zephyr/kernel.h>
#include <zephyr/logging/log_backend.h>
#include <zephyr/logging/log_msg.h>
#include <zephyr/logging/log_internal.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Logger
* @defgroup logger Logger system
* @since 1.13
* @ingroup logging
* @{
* @}
*/
/**
* @brief Logger control API
* @defgroup log_ctrl Logger control API
* @since 1.13
* @ingroup logger
* @{
*/
typedef log_timestamp_t (*log_timestamp_get_t)(void);
/** @brief Function system initialization of the logger.
*
* Function is called during start up to allow logging before user can
* explicitly initialize the logger.
*/
void log_core_init(void);
/**
* @brief Function for user initialization of the logger.
*
*/
void log_init(void);
/** @brief Trigger the log processing thread to process logs immediately.
*
* @note Function has no effect when CONFIG_LOG_MODE_IMMEDIATE is set.
*/
void log_thread_trigger(void);
/**
* @brief Function for providing thread which is processing logs.
*
* See CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD.
*
* @note Function has asserts and has no effect when CONFIG_LOG_PROCESS_THREAD is set.
*
* @param process_tid Process thread id. Used to wake up the thread.
*/
void log_thread_set(k_tid_t process_tid);
/**
* @brief Function for providing timestamp function.
*
* @param timestamp_getter Timestamp function.
* @param freq Timestamping frequency.
*
* @return 0 on success or error.
*/
int log_set_timestamp_func(log_timestamp_get_t timestamp_getter,
uint32_t freq);
/**
* @brief Switch the logger subsystem to the panic mode.
*
* Returns immediately if the logger is already in the panic mode.
*
* @details On panic the logger subsystem informs all backends about panic mode.
* Backends must switch to blocking mode or halt. All pending logs
* are flushed after switching to panic mode. In panic mode, all log
* messages must be processed in the context of the call.
*/
__syscall void log_panic(void);
/**
* @brief Process one pending log message.
*
* @retval true There are more messages pending to be processed.
* @retval false No messages pending.
*/
__syscall bool log_process(void);
/**
* @brief Return number of buffered log messages.
*
* @return Number of currently buffered log messages.
*/
__syscall uint32_t log_buffered_cnt(void);
/** @brief Get number of independent logger sources (modules and instances)
*
* @param domain_id Domain ID.
*
* @return Number of sources.
*/
uint32_t log_src_cnt_get(uint32_t domain_id);
/** @brief Get name of the source (module or instance).
*
* @param domain_id Domain ID.
* @param source_id Source ID.
*
* @return Source name or NULL if invalid arguments.
*/
const char *log_source_name_get(uint32_t domain_id, uint32_t source_id);
/** @brief Return number of domains present in the system.
*
* There will be at least one local domain.
*
* @return Number of domains.
*/
static inline uint8_t log_domains_count(void)
{
return 1 + (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) ? z_log_ext_domain_count() : 0);
}
/** @brief Get name of the domain.
*
* @param domain_id Domain ID.
*
* @return Domain name.
*/
const char *log_domain_name_get(uint32_t domain_id);
/**
* @brief Function for finding source ID based on source name.
*
* @param name Source name
*
* @return Source ID or negative number when source ID is not found.
*/
int log_source_id_get(const char *name);
/**
* @brief Get source filter for the provided backend.
*
* @param backend Backend instance.
* @param domain_id ID of the domain.
* @param source_id Source (module or instance) ID.
* @param runtime True for runtime filter or false for compiled in.
*
* @return Severity level.
*/
uint32_t log_filter_get(struct log_backend const *const backend,
uint32_t domain_id, int16_t source_id, bool runtime);
/**
* @brief Set filter on given source for the provided backend.
*
* @param backend Backend instance. NULL for all backends (and frontend).
* @param domain_id ID of the domain.
* @param source_id Source (module or instance) ID.
* @param level Severity level.
*
* @return Actual level set which may be limited by compiled level. If filter
* was set for all backends then maximal level that was set is returned.
*/
__syscall uint32_t log_filter_set(struct log_backend const *const backend,
uint32_t domain_id, int16_t source_id,
uint32_t level);
/**
* @brief Get source filter for the frontend.
*
* @param source_id Source (module or instance) ID.
* @param runtime True for runtime filter or false for compiled in.
*
* @return Severity level.
*/
uint32_t log_frontend_filter_get(int16_t source_id, bool runtime);
/**
* @brief Set filter on given source for the frontend.
*
* @param source_id Source (module or instance) ID.
* @param level Severity level.
*
* @return Actual level set which may be limited by compiled level.
*/
__syscall uint32_t log_frontend_filter_set(int16_t source_id, uint32_t level);
/**
*
* @brief Enable backend with initial maximum filtering level.
*
* @param backend Backend instance.
* @param ctx User context.
* @param level Severity level.
*/
void log_backend_enable(struct log_backend const *const backend,
void *ctx,
uint32_t level);
/**
*
* @brief Disable backend.
*
* @param backend Backend instance.
*/
void log_backend_disable(struct log_backend const *const backend);
/**
* @brief Get backend by name.
*
* @param[in] backend_name Name of the backend as defined by the LOG_BACKEND_DEFINE.
*
* @retval Pointer to the backend instance if found, NULL if backend is not found.
*/
const struct log_backend *log_backend_get_by_name(const char *backend_name);
/** @brief Sets logging format for all active backends.
*
* @param log_type Log format.
*
* @retval Pointer to the last backend that failed, NULL for success.
*/
const struct log_backend *log_format_set_all_active_backends(size_t log_type);
/**
* @brief Check if there is pending data to be processed by the logging subsystem.
*
* Function can be used to determine if all logs have been flushed. Function
* returns false when deferred mode is not enabled.
*
* @retval true There is pending data.
* @retval false No pending data to process.
*/
static inline bool log_data_pending(void)
{
return IS_ENABLED(CONFIG_LOG_MODE_DEFERRED) ? z_log_msg_pending() : false;
}
/**
* @brief Configure tag used to prefix each message.
*
* @param tag Tag.
*
* @retval 0 on successful operation.
* @retval -ENOTSUP if feature is disabled.
* @retval -ENOMEM if string is longer than the buffer capacity. Tag will be trimmed.
*/
int log_set_tag(const char *tag);
/**
* @brief Get current memory usage.
*
* @param[out] buf_size Capacity of the buffer used for storing log messages.
* @param[out] usage Number of bytes currently containing pending log messages.
*
* @retval -EINVAL if logging mode does not use the buffer.
* @retval 0 successfully collected usage data.
*/
int log_mem_get_usage(uint32_t *buf_size, uint32_t *usage);
/**
* @brief Get maximum memory usage.
*
* Requires CONFIG_LOG_MEM_UTILIZATION option.
*
* @param[out] max Maximum number of bytes used for pending log messages.
*
* @retval -EINVAL if logging mode does not use the buffer.
* @retval -ENOTSUP if instrumentation is not enabled.
* not been enabled.
*
* @retval 0 successfully collected usage data.
*/
int log_mem_get_max_usage(uint32_t *max);
#if defined(CONFIG_LOG) && !defined(CONFIG_LOG_MODE_MINIMAL)
#define LOG_CORE_INIT() log_core_init()
#define LOG_PANIC() log_panic()
#if defined(CONFIG_LOG_FRONTEND_ONLY)
#define LOG_INIT() 0
#define LOG_PROCESS() false
#else /* !CONFIG_LOG_FRONTEND_ONLY */
#define LOG_INIT() log_init()
#define LOG_PROCESS() log_process()
#endif /* !CONFIG_LOG_FRONTEND_ONLY */
#else
#define LOG_CORE_INIT() do { } while (false)
#define LOG_INIT() 0
#define LOG_PANIC() /* Empty */
#define LOG_PROCESS() false
#endif
#include <zephyr/syscalls/log_ctrl.h>
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_LOGGING_LOG_CTRL_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_ctrl.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,912 |
```objective-c
/*
*
*/
#ifndef LOG_FRONTEND_H_
#define LOG_FRONTEND_H_
#include <zephyr/logging/log_core.h>
/** @brief Initialize frontend.
*/
void log_frontend_init(void);
/** @brief Log generic message.
*
* Message details does not contain timestamp. Since function is called in the
* context of log message call, implementation can use its own timestamping scheme.
*
* @param source Pointer to a structure associated with given source. It points to
* static structure or dynamic structure if runtime filtering is enabled.
* @ref log_const_source_id or @ref log_dynamic_source_id can be used to determine
* source id.
*
* @param desc Message descriptor.
*
* @param package Cbprintf package containing logging formatted string. Length s in @p desc.
*
* @param data Hexdump data. Length is in @p desc.
*/
void log_frontend_msg(const void *source,
const struct log_msg_desc desc,
uint8_t *package, const void *data);
/** @brief Log message with 0 arguments.
*
* Optimized version for log message which does not have arguments (only string).
* This API is optional and is used only if optimizing common log messages is enabled.
*
* @param source Pointer to a structure associated with given source. It points to
* static structure or dynamic structure if runtime filtering is enabled.
* @ref log_const_source_id or @ref log_dynamic_source_id can be used to determine
* source id.
* @param level Severity level.
* @param fmt String.
*/
void log_frontend_simple_0(const void *source, uint32_t level, const char *fmt);
/** @brief Log message with 1 argument.
*
* Optimized version for log message which has one argument that fits in a 32 bit word.
* This API is optional and is used only if optimizing common log messages is enabled.
*
* @param source Pointer to a structure associated with given source. It points to
* static structure or dynamic structure if runtime filtering is enabled.
* @ref log_const_source_id or @ref log_dynamic_source_id can be used to determine
* source id.
* @param level Severity level.
* @param fmt String.
* @param arg Argument passed to the string.
*/
void log_frontend_simple_1(const void *source, uint32_t level, const char *fmt, uint32_t arg);
/** @brief Log message with 2 arguments.
*
* Optimized version for log message which has two arguments that fit in a 32 bit word.
* This API is optional and is used only if optimizing common log messages is enabled.
*
* @param source Pointer to a structure associated with given source. It points to
* static structure or dynamic structure if runtime filtering is enabled.
* @ref log_const_source_id or @ref log_dynamic_source_id can be used to determine
* source id.
* @param level Severity level.
* @param fmt String.
* @param arg0 First argument passed to the string.
* @param arg1 Second argument passed to the string.
*/
void log_frontend_simple_2(const void *source, uint32_t level,
const char *fmt, uint32_t arg0, uint32_t arg1);
/** @brief Panic state notification. */
void log_frontend_panic(void);
#endif /* LOG_FRONTEND_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_frontend.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 695 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_LOG_BACKEND_NET_H_
#define ZEPHYR_LOG_BACKEND_NET_H_
#include <stdbool.h>
#include <zephyr/net/net_ip.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Allows user to set a server IP address, provided as string, at runtime
*
* @details This function allows the user to set an IPv4 or IPv6 address at runtime. It can be
* called either before or after the backend has been initialized. If it gets called when
* the net logger backend context is running, it'll release it and create another one with
* the new address next time process() gets called.
*
* @param addr String that contains the IP address.
*
* @return True if parsing could be done, false otherwise.
*/
bool log_backend_net_set_addr(const char *addr);
/**
* @brief Allows user to set a server IP address, provided as sockaddr structure, at runtime
*
* @details This function allows the user to set an IPv4 or IPv6 address at runtime. It can be
* called either before or after the backend has been initialized. If it gets called when
* the net logger backend context is running, it'll release it and create another one with
* the new address next time process() gets called.
*
* @param addr Pointer to the sockaddr structure that contains the IP address.
*
* @return True if address could be set, false otherwise.
*/
bool log_backend_net_set_ip(const struct sockaddr *addr);
/**
* @brief update the hostname
*
* @details This function allows to update the hostname displayed by the logging backend. It will be
* called by the network stack if the hostname is set with net_hostname_set().
*
* @param hostname new hostname as char array.
* @param len Length of the hostname array.
*/
#if defined(CONFIG_NET_HOSTNAME_ENABLE)
void log_backend_net_hostname_set(char *hostname, size_t len);
#else
static inline void log_backend_net_hostname_set(const char *hostname, size_t len)
{
ARG_UNUSED(hostname);
ARG_UNUSED(len);
}
#endif
/**
* @brief Get the net logger backend
*
* @details This function returns the net logger backend.
*
* @return Pointer to the net logger backend.
*/
const struct log_backend *log_backend_net_get(void);
/**
* @brief Start the net logger backend
*
* @details This function starts the net logger backend.
*/
void log_backend_net_start(void);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_LOG_BACKEND_NET_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_backend_net.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 543 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LOGGING_LOG_OUTPUT_H_
#define ZEPHYR_INCLUDE_LOGGING_LOG_OUTPUT_H_
#include <zephyr/logging/log_msg.h>
#include <zephyr/sys/util.h>
#include <stdarg.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/kernel.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Log output API
* @defgroup log_output Log output API
* @ingroup logger
* @{
*/
/**@defgroup LOG_OUTPUT_FLAGS Log output formatting flags.
* @{
*/
/** @brief Flag forcing ANSI escape code colors, red (errors), yellow
* (warnings).
*/
#define LOG_OUTPUT_FLAG_COLORS BIT(0)
/** @brief Flag forcing timestamp */
#define LOG_OUTPUT_FLAG_TIMESTAMP BIT(1)
/** @brief Flag forcing timestamp formatting. */
#define LOG_OUTPUT_FLAG_FORMAT_TIMESTAMP BIT(2)
/** @brief Flag forcing severity level prefix. */
#define LOG_OUTPUT_FLAG_LEVEL BIT(3)
/** @brief Flag preventing the logger from adding CR and LF characters. */
#define LOG_OUTPUT_FLAG_CRLF_NONE BIT(4)
/** @brief Flag forcing a single LF character for line breaks. */
#define LOG_OUTPUT_FLAG_CRLF_LFONLY BIT(5)
/** @brief Flag forcing syslog format specified in RFC 5424
*/
#define LOG_OUTPUT_FLAG_FORMAT_SYSLOG BIT(6)
/** @brief Flag thread id or name prefix. */
#define LOG_OUTPUT_FLAG_THREAD BIT(7)
/** @brief Flag forcing to skip logging the source. */
#define LOG_OUTPUT_FLAG_SKIP_SOURCE BIT(8)
/**@} */
/** @brief Supported backend logging format types for use
* with log_format_set() API to switch log format at runtime.
*/
#define LOG_OUTPUT_TEXT 0
#define LOG_OUTPUT_SYST 1
#define LOG_OUTPUT_DICT 2
#define LOG_OUTPUT_CUSTOM 3
/**
* @brief Prototype of the function processing output data.
*
* @param buf The buffer data.
* @param size The buffer size.
* @param ctx User context.
*
* @return Number of bytes processed, dropped or discarded.
*
* @note If the log output function cannot process all of the data, it is
* its responsibility to mark them as dropped or discarded by returning
* the corresponding number of bytes dropped or discarded to the caller.
*/
typedef int (*log_output_func_t)(uint8_t *buf, size_t size, void *ctx);
/* @brief Control block structure for log_output instance. */
struct log_output_control_block {
atomic_t offset;
void *ctx;
const char *hostname;
};
/** @brief Log_output instance structure. */
struct log_output {
log_output_func_t func;
struct log_output_control_block *control_block;
uint8_t *buf;
size_t size;
};
/**
* @brief Typedef of the function pointer table "format_table".
*
* @param output Pointer to log_output struct.
* @param msg Pointer to log_msg struct.
* @param flags Flags used for text formatting options.
*
* @return Function pointer based on Kconfigs defined for backends.
*/
typedef void (*log_format_func_t)(const struct log_output *output,
struct log_msg *msg, uint32_t flags);
/**
* @brief Declaration of the get routine for function pointer table format_table.
*/
log_format_func_t log_format_func_t_get(uint32_t log_type);
/** @brief Create log_output instance.
*
* @param _name Instance name.
* @param _func Function for processing output data.
* @param _buf Pointer to the output buffer.
* @param _size Size of the output buffer.
*/
#define LOG_OUTPUT_DEFINE(_name, _func, _buf, _size) \
static struct log_output_control_block _name##_control_block; \
static const struct log_output _name = { \
.func = _func, \
.control_block = &_name##_control_block, \
.buf = _buf, \
.size = _size, \
}
/** @brief Process log messages v2 to readable strings.
*
* Function is using provided context with the buffer and output function to
* process formatted string and output the data.
*
* @param log_output Pointer to the log output instance.
* @param msg Log message.
* @param flags Optional flags. See @ref LOG_OUTPUT_FLAGS.
*/
void log_output_msg_process(const struct log_output *log_output,
struct log_msg *msg, uint32_t flags);
/** @brief Process input data to a readable string.
*
* @param log_output Pointer to the log output instance.
* @param timestamp Timestamp.
* @param domain Domain name string. Can be NULL.
* @param source Source name string. Can be NULL.
* @param tid Thread ID.
* @param level Criticality level.
* @param package Cbprintf package with a logging message string.
* @param data Data passed to hexdump API. Can be NULL.
* @param data_len Data length.
* @param flags Formatting flags. See @ref LOG_OUTPUT_FLAGS.
*/
void log_output_process(const struct log_output *log_output,
log_timestamp_t timestamp,
const char *domain,
const char *source,
k_tid_t tid,
uint8_t level,
const uint8_t *package,
const uint8_t *data,
size_t data_len,
uint32_t flags);
/** @brief Process log messages v2 to SYS-T format.
*
* Function is using provided context with the buffer and output function to
* process formatted string and output the data in sys-t log output format.
*
* @param log_output Pointer to the log output instance.
* @param msg Log message.
* @param flags Optional flags. See @ref LOG_OUTPUT_FLAGS.
*/
void log_output_msg_syst_process(const struct log_output *log_output,
struct log_msg *msg, uint32_t flags);
/** @brief Process dropped messages indication.
*
* Function prints error message indicating lost log messages.
*
* @param output Pointer to the log output instance.
* @param cnt Number of dropped messages.
*/
void log_output_dropped_process(const struct log_output *output, uint32_t cnt);
/** @brief Flush output buffer.
*
* @param output Pointer to the log output instance.
*/
void log_output_flush(const struct log_output *output);
/** @brief Function for setting user context passed to the output function.
*
* @param output Pointer to the log output instance.
* @param ctx User context.
*/
static inline void log_output_ctx_set(const struct log_output *output,
void *ctx)
{
output->control_block->ctx = ctx;
}
/** @brief Function for setting hostname of this device
*
* @param output Pointer to the log output instance.
* @param hostname Hostname of this device
*/
static inline void log_output_hostname_set(const struct log_output *output,
const char *hostname)
{
output->control_block->hostname = hostname;
}
/** @brief Set timestamp frequency.
*
* @param freq Frequency in Hz.
*/
void log_output_timestamp_freq_set(uint32_t freq);
/** @brief Convert timestamp of the message to us.
*
* @param timestamp Message timestamp
*
* @return Timestamp value in us.
*/
uint64_t log_output_timestamp_to_us(log_timestamp_t timestamp);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_LOGGING_LOG_OUTPUT_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_output.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,570 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_LOG_BACKEND_ADSP_HDA_H_
#define ZEPHYR_LOG_BACKEND_ADSP_HDA_H_
#include <stdint.h>
/**
*@brief HDA logger requires a hook for IPC messages
*
* When the log is flushed and written with DMA an IPC message should
* be sent to inform the host. This hook function pointer allows for that
*/
typedef void(*adsp_hda_log_hook_t)(uint32_t written);
/**
* @brief Initialize the Intel ADSP HDA logger
*
* @param hook Function is called after each HDA flush in order to
* inform the Host of DMA log data. This hook may be called
* from multiple CPUs and multiple calling contexts concurrently.
* It is up to the author of the hook to serialize if needed.
* It is guaranteed to be called once for every flush.
* @param channel HDA stream (DMA Channel) to use for logging
*/
void adsp_hda_log_init(adsp_hda_log_hook_t hook, uint32_t channel);
#endif /* ZEPHYR_LOG_BACKEND_ADSP_HDA_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_backend_adsp_hda.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 236 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LOGGING_LOG_MSG_H_
#define ZEPHYR_INCLUDE_LOGGING_LOG_MSG_H_
#include <zephyr/logging/log_instance.h>
#include <zephyr/sys/mpsc_packet.h>
#include <zephyr/sys/cbprintf.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/sys/iterable_sections.h>
#include <zephyr/sys/util.h>
#include <string.h>
#include <zephyr/toolchain.h>
#ifdef __GNUC__
#ifndef alloca
#define alloca __builtin_alloca
#endif
#else
#include <alloca.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
#define LOG_MSG_DEBUG 0
#define LOG_MSG_DBG(...) IF_ENABLED(LOG_MSG_DEBUG, (printk(__VA_ARGS__)))
#ifdef CONFIG_LOG_TIMESTAMP_64BIT
typedef uint64_t log_timestamp_t;
#else
typedef uint32_t log_timestamp_t;
#endif
/**
* @brief Log message API
* @defgroup log_msg Log message API
* @ingroup logger
* @{
*/
#define Z_LOG_MSG_LOG 0
#define Z_LOG_MSG_PACKAGE_BITS 11
#define Z_LOG_MSG_MAX_PACKAGE BIT_MASK(Z_LOG_MSG_PACKAGE_BITS)
#define LOG_MSG_GENERIC_HDR \
MPSC_PBUF_HDR;\
uint32_t type:1
struct log_msg_desc {
LOG_MSG_GENERIC_HDR;
uint32_t domain:3;
uint32_t level:3;
uint32_t package_len:Z_LOG_MSG_PACKAGE_BITS;
uint32_t data_len:12;
};
union log_msg_source {
const struct log_source_const_data *fixed;
struct log_source_dynamic_data *dynamic;
void *raw;
};
struct log_msg_hdr {
struct log_msg_desc desc;
/* Attempting to keep best alignment. When address is 64 bit and timestamp 32
* swap the order to have 16 byte header instead of 24 byte.
*/
#if (INTPTR_MAX > INT32_MAX) && !defined(CONFIG_LOG_TIMESTAMP_64BIT)
log_timestamp_t timestamp;
const void *source;
#else
const void *source;
log_timestamp_t timestamp;
#endif
#if defined(CONFIG_LOG_THREAD_ID_PREFIX)
void *tid;
#endif
};
/* Messages are aligned to alignment required by cbprintf package. */
#define Z_LOG_MSG_ALIGNMENT CBPRINTF_PACKAGE_ALIGNMENT
#define Z_LOG_MSG_PADDING \
((sizeof(struct log_msg_hdr) % Z_LOG_MSG_ALIGNMENT) > 0 ? \
(Z_LOG_MSG_ALIGNMENT - (sizeof(struct log_msg_hdr) % Z_LOG_MSG_ALIGNMENT)) : \
0)
struct log_msg {
struct log_msg_hdr hdr;
/* Adding padding to ensure that cbprintf package that follows is
* properly aligned.
*/
uint8_t padding[Z_LOG_MSG_PADDING];
uint8_t data[];
};
/**
* @cond INTERNAL_HIDDEN
*/
BUILD_ASSERT(sizeof(struct log_msg) % Z_LOG_MSG_ALIGNMENT == 0,
"Log msg size must aligned");
/**
* @endcond
*/
struct log_msg_generic_hdr {
LOG_MSG_GENERIC_HDR;
};
union log_msg_generic {
union mpsc_pbuf_generic buf;
struct log_msg_generic_hdr generic;
struct log_msg log;
};
/** @brief Method used for creating a log message.
*
* It is used for testing purposes to validate that expected mode was used.
*/
enum z_log_msg_mode {
/* Runtime mode is least efficient but supports all cases thus it is
* treated as a fallback method when others cannot be used.
*/
Z_LOG_MSG_MODE_RUNTIME,
/* Mode creates statically a string package on stack and calls a
* function for creating a message. It takes code size than
* Z_LOG_MSG_MODE_ZERO_COPY but is a bit slower.
*/
Z_LOG_MSG_MODE_FROM_STACK,
/* Mode calculates size of the message and allocates it and writes
* directly to the message space. It is the fastest method but requires
* more code size.
*/
Z_LOG_MSG_MODE_ZERO_COPY,
/* Mode optimized for simple messages with 0 to 2 32 bit word arguments.*/
Z_LOG_MSG_MODE_SIMPLE,
};
#define Z_LOG_MSG_DESC_INITIALIZER(_domain_id, _level, _plen, _dlen) \
{ \
.valid = 0, \
.busy = 0, \
.type = Z_LOG_MSG_LOG, \
.domain = (_domain_id), \
.level = (_level), \
.package_len = (_plen), \
.data_len = (_dlen), \
}
#define Z_LOG_MSG_CBPRINTF_FLAGS(_cstr_cnt) \
(CBPRINTF_PACKAGE_FIRST_RO_STR_CNT(_cstr_cnt) | \
(IS_ENABLED(CONFIG_LOG_MSG_APPEND_RO_STRING_LOC) ? \
CBPRINTF_PACKAGE_ADD_STRING_IDXS : 0))
#ifdef CONFIG_LOG_USE_VLA
#define Z_LOG_MSG_ON_STACK_ALLOC(ptr, len) \
long long _ll_buf[DIV_ROUND_UP(len, sizeof(long long))]; \
long double _ld_buf[DIV_ROUND_UP(len, sizeof(long double))]; \
(ptr) = (sizeof(long double) == Z_LOG_MSG_ALIGNMENT) ? \
(struct log_msg *)_ld_buf : (struct log_msg *)_ll_buf; \
if (IS_ENABLED(CONFIG_LOG_TEST_CLEAR_MESSAGE_SPACE)) { \
/* During test fill with 0's to simplify message comparison */ \
memset((ptr), 0, (len)); \
}
#else /* Z_LOG_MSG_USE_VLA */
/* When VLA cannot be used we need to trick compiler a bit and create multiple
* fixed size arrays and take the smallest one that will fit the message.
* Compiler will remove unused arrays and stack usage will be kept similar
* to vla case, rounded to the size of the used buffer.
*/
#define Z_LOG_MSG_ON_STACK_ALLOC(ptr, len) \
long long _ll_buf32[32 / sizeof(long long)]; \
long long _ll_buf48[48 / sizeof(long long)]; \
long long _ll_buf64[64 / sizeof(long long)]; \
long long _ll_buf128[128 / sizeof(long long)]; \
long long _ll_buf256[256 / sizeof(long long)]; \
long double _ld_buf32[32 / sizeof(long double)]; \
long double _ld_buf48[48 / sizeof(long double)]; \
long double _ld_buf64[64 / sizeof(long double)]; \
long double _ld_buf128[128 / sizeof(long double)]; \
long double _ld_buf256[256 / sizeof(long double)]; \
if (sizeof(long double) == Z_LOG_MSG_ALIGNMENT) { \
ptr = (len > 128) ? (struct log_msg *)_ld_buf256 : \
((len > 64) ? (struct log_msg *)_ld_buf128 : \
((len > 48) ? (struct log_msg *)_ld_buf64 : \
((len > 32) ? (struct log_msg *)_ld_buf48 : \
(struct log_msg *)_ld_buf32)));\
} else { \
ptr = (len > 128) ? (struct log_msg *)_ll_buf256 : \
((len > 64) ? (struct log_msg *)_ll_buf128 : \
((len > 48) ? (struct log_msg *)_ll_buf64 : \
((len > 32) ? (struct log_msg *)_ll_buf48 : \
(struct log_msg *)_ll_buf32)));\
} \
if (IS_ENABLED(CONFIG_LOG_TEST_CLEAR_MESSAGE_SPACE)) { \
/* During test fill with 0's to simplify message comparison */ \
memset((ptr), 0, (len)); \
}
#endif /* Z_LOG_MSG_USE_VLA */
#define Z_LOG_MSG_ALIGN_OFFSET \
offsetof(struct log_msg, data)
#define Z_LOG_MSG_LEN(pkg_len, data_len) \
(offsetof(struct log_msg, data) + (pkg_len) + (data_len))
#define Z_LOG_MSG_ALIGNED_WLEN(pkg_len, data_len) \
DIV_ROUND_UP(ROUND_UP(Z_LOG_MSG_LEN(pkg_len, data_len), \
Z_LOG_MSG_ALIGNMENT), \
sizeof(uint32_t))
/*
* With Zephyr SDK 0.14.2, aarch64-zephyr-elf-gcc (10.3.0) fails to ensure $sp
* is below the active memory during message construction. As a result,
* interrupts happening in the middle of that process can end up smashing active
* data and causing a logging fault. Work around this by inserting a compiler
* barrier after the allocation and before any use to make sure GCC moves the
* stack pointer soon enough
*/
#define Z_LOG_ARM64_VLA_PROTECT() compiler_barrier()
#define _LOG_MSG_SIMPLE_XXXX0 1
#define _LOG_MSG_SIMPLE_XXXX1 1
#define _LOG_MSG_SIMPLE_XXXX2 1
/* Determine if amount of arguments (less than 3) qualifies to simple message. */
#define LOG_MSG_SIMPLE_ARG_CNT_CHECK(...) \
COND_CODE_1(UTIL_CAT(_LOG_MSG_SIMPLE_XXXX, NUM_VA_ARGS_LESS_1(__VA_ARGS__)), (1), (0))
/* Set of marcos used to determine if arguments type allows simplified message creation mode. */
#define LOG_MSG_SIMPLE_ARG_TYPE_CHECK_0(fmt) 1
#define LOG_MSG_SIMPLE_ARG_TYPE_CHECK_1(fmt, arg) Z_CBPRINTF_IS_WORD_NUM(arg)
#define LOG_MSG_SIMPLE_ARG_TYPE_CHECK_2(fmt, arg0, arg1) \
Z_CBPRINTF_IS_WORD_NUM(arg0) && Z_CBPRINTF_IS_WORD_NUM(arg1)
/** brief Determine if string arguments types allow to use simplified message creation mode.
*
* @param ... String with arguments.
*/
#define LOG_MSG_SIMPLE_ARG_TYPE_CHECK(...) \
UTIL_CAT(LOG_MSG_SIMPLE_ARG_TYPE_CHECK_, NUM_VA_ARGS_LESS_1(__VA_ARGS__))(__VA_ARGS__)
/** @brief Check if message can be handled using simplified method.
*
* Following conditions must be met:
* - 32 bit platform
* - Number of arguments from 0 to 2
* - Type of an argument must be a numeric value that fits in 32 bit word.
*
* @param ... String with arguments.
*
* @retval 1 if message qualifies.
* @retval 0 if message does not qualify.
*/
#define LOG_MSG_SIMPLE_CHECK(...) \
COND_CODE_1(CONFIG_64BIT, (0), (\
COND_CODE_1(LOG_MSG_SIMPLE_ARG_CNT_CHECK(__VA_ARGS__), ( \
LOG_MSG_SIMPLE_ARG_TYPE_CHECK(__VA_ARGS__)), (0))))
/* Helper macro for handing log with one argument. Macro casts the first argument to uint32_t. */
#define Z_LOG_MSG_SIMPLE_CREATE_1(_source, _level, ...) \
z_log_msg_simple_create_1(_source, _level, GET_ARG_N(1, __VA_ARGS__), \
(uint32_t)(uintptr_t)GET_ARG_N(2, __VA_ARGS__))
/* Helper macro for handing log with two arguments. Macro casts arguments to uint32_t.
*/
#define Z_LOG_MSG_SIMPLE_CREATE_2(_source, _level, ...) \
z_log_msg_simple_create_2(_source, _level, GET_ARG_N(1, __VA_ARGS__), \
(uint32_t)(uintptr_t)GET_ARG_N(2, __VA_ARGS__), \
(uint32_t)(uintptr_t)GET_ARG_N(3, __VA_ARGS__))
/* Call specific function based on the number of arguments.
* Since up 2 to arguments are supported COND_CODE_0 and COND_CODE_1 can be used to
* handle all cases (0, 1 and 2 arguments). When tracing is enable then for each
* function a macro is create. The difference between function and macro is that
* macro is applied to any input arguments so we need to make sure that it is
* always called with proper number of arguments. For that it is wrapped around
* into another macro and dummy arguments to cover for cases when there is less
* arguments in a log call.
*/
#define Z_LOG_MSG_SIMPLE_FUNC2(arg_cnt, _source, _level, ...) \
COND_CODE_0(arg_cnt, \
(z_log_msg_simple_create_0(_source, _level, GET_ARG_N(1, __VA_ARGS__))), \
(COND_CODE_1(arg_cnt, ( \
Z_LOG_MSG_SIMPLE_CREATE_1(_source, _level, __VA_ARGS__, dummy) \
), ( \
Z_LOG_MSG_SIMPLE_CREATE_2(_source, _level, __VA_ARGS__, dummy, dummy) \
) \
)))
/** @brief Call specific function to create a log message.
*
* Macro picks matching function (based on number of arguments) and calls it.
* String arguments are casted to uint32_t.
*
* @param _source Source.
* @param _level Severity level.
* @param ... String with arguments.
*/
#define LOG_MSG_SIMPLE_FUNC(_source, _level, ...) \
Z_LOG_MSG_SIMPLE_FUNC2(NUM_VA_ARGS_LESS_1(__VA_ARGS__), _source, _level, __VA_ARGS__)
/** @brief Create log message using simplified method.
*
* Macro is gated by the argument count check to run @ref LOG_MSG_SIMPLE_FUNC only
* on entries with 2 or less arguments.
*
* @param _domain_id Domain ID.
* @param _source Pointer to the source structure.
* @param _level Severity level.
* @param ... String with arguments.
*/
#define Z_LOG_MSG_SIMPLE_ARGS_CREATE(_domain_id, _source, _level, ...) \
IF_ENABLED(LOG_MSG_SIMPLE_ARG_CNT_CHECK(__VA_ARGS__), (\
LOG_MSG_SIMPLE_FUNC(_source, _level, __VA_ARGS__); \
))
#define Z_LOG_MSG_STACK_CREATE(_cstr_cnt, _domain_id, _source, _level, _data, _dlen, ...) \
do { \
int _plen; \
uint32_t _options = Z_LOG_MSG_CBPRINTF_FLAGS(_cstr_cnt) | \
CBPRINTF_PACKAGE_ADD_RW_STR_POS; \
if (GET_ARG_N(1, __VA_ARGS__) == NULL) { \
_plen = 0; \
} else { \
CBPRINTF_STATIC_PACKAGE(NULL, 0, _plen, Z_LOG_MSG_ALIGN_OFFSET, _options, \
__VA_ARGS__); \
} \
TOOLCHAIN_IGNORE_WSHADOW_BEGIN \
struct log_msg *_msg; \
TOOLCHAIN_IGNORE_WSHADOW_END \
Z_LOG_MSG_ON_STACK_ALLOC(_msg, Z_LOG_MSG_LEN(_plen, 0)); \
Z_LOG_ARM64_VLA_PROTECT(); \
if (_plen != 0) { \
CBPRINTF_STATIC_PACKAGE(_msg->data, _plen, \
_plen, Z_LOG_MSG_ALIGN_OFFSET, _options, \
__VA_ARGS__);\
} \
struct log_msg_desc _desc = \
Z_LOG_MSG_DESC_INITIALIZER(_domain_id, _level, \
(uint32_t)_plen, _dlen); \
LOG_MSG_DBG("creating message on stack: package len: %d, data len: %d\n", \
_plen, (int)(_dlen)); \
z_log_msg_static_create((void *)(_source), _desc, _msg->data, (_data)); \
} while (false)
#ifdef CONFIG_LOG_SPEED
#define Z_LOG_MSG_SIMPLE_CREATE(_cstr_cnt, _domain_id, _source, _level, ...) do { \
int _plen; \
CBPRINTF_STATIC_PACKAGE(NULL, 0, _plen, Z_LOG_MSG_ALIGN_OFFSET, \
Z_LOG_MSG_CBPRINTF_FLAGS(_cstr_cnt), \
__VA_ARGS__); \
size_t _msg_wlen = Z_LOG_MSG_ALIGNED_WLEN(_plen, 0); \
struct log_msg *_msg = z_log_msg_alloc(_msg_wlen); \
struct log_msg_desc _desc = \
Z_LOG_MSG_DESC_INITIALIZER(_domain_id, _level, (uint32_t)_plen, 0); \
LOG_MSG_DBG("creating message zero copy: package len: %d, msg: %p\n", \
_plen, _msg); \
if (_msg) { \
CBPRINTF_STATIC_PACKAGE(_msg->data, _plen, _plen, \
Z_LOG_MSG_ALIGN_OFFSET, \
Z_LOG_MSG_CBPRINTF_FLAGS(_cstr_cnt), \
__VA_ARGS__); \
} \
z_log_msg_finalize(_msg, (void *)_source, _desc, NULL); \
} while (false)
#else
/* Alternative empty macro created to speed up compilation when LOG_SPEED is
* disabled (default).
*/
#define Z_LOG_MSG_SIMPLE_CREATE(...)
#endif
/* Macro handles case when local variable with log message string is created. It
* replaces original string literal with that variable.
*/
#define Z_LOG_FMT_ARGS_2(_name, ...) \
COND_CODE_1(CONFIG_LOG_FMT_SECTION, \
(COND_CODE_0(NUM_VA_ARGS_LESS_1(__VA_ARGS__), \
(_name), (_name, GET_ARGS_LESS_N(1, __VA_ARGS__)))), \
(__VA_ARGS__))
/** @brief Wrapper for log message string with arguments.
*
* Wrapper is replacing first argument with a variable from a dedicated memory
* section if option is enabled. Macro handles the case when there is no
* log message provided.
*
* @param _name Name of the variable with log message string. It is optionally used.
* @param ... Optional log message with arguments (may be empty).
*/
#define Z_LOG_FMT_ARGS(_name, ...) \
COND_CODE_0(NUM_VA_ARGS_LESS_1(_, ##__VA_ARGS__), \
(NULL), \
(Z_LOG_FMT_ARGS_2(_name, ##__VA_ARGS__)))
#if defined(CONFIG_LOG_USE_TAGGED_ARGUMENTS)
#define Z_LOG_FMT_TAGGED_ARGS_2(_name, ...) \
COND_CODE_1(CONFIG_LOG_FMT_SECTION, \
(_name, Z_CBPRINTF_TAGGED_ARGS(NUM_VA_ARGS_LESS_1(__VA_ARGS__), \
GET_ARGS_LESS_N(1, __VA_ARGS__))), \
(GET_ARG_N(1, __VA_ARGS__), \
Z_CBPRINTF_TAGGED_ARGS(NUM_VA_ARGS_LESS_1(__VA_ARGS__), \
GET_ARGS_LESS_N(1, __VA_ARGS__))))
/** @brief Wrapper for log message string with tagged arguments.
*
* Wrapper is replacing first argument with a variable from a dedicated memory
* section if option is enabled. Macro handles the case when there is no
* log message provided. Each subsequent arguments are tagged by preceding
* each argument with its type value.
*
* @param _name Name of the variable with log message string. It is optionally used.
* @param ... Optional log message with arguments (may be empty).
*/
#define Z_LOG_FMT_TAGGED_ARGS(_name, ...) \
COND_CODE_0(NUM_VA_ARGS_LESS_1(_, ##__VA_ARGS__), \
(Z_CBPRINTF_TAGGED_ARGS(0)), \
(Z_LOG_FMT_TAGGED_ARGS_2(_name, ##__VA_ARGS__)))
#define Z_LOG_FMT_RUNTIME_ARGS(...) \
Z_LOG_FMT_TAGGED_ARGS(__VA_ARGS__)
#else
#define Z_LOG_FMT_RUNTIME_ARGS(...) \
Z_LOG_FMT_ARGS(__VA_ARGS__)
#endif /* CONFIG_LOG_USE_TAGGED_ARGUMENTS */
/* Macro handles case when there is no string provided, in that case variable
* is not created.
*/
#define Z_LOG_MSG_STR_VAR_IN_SECTION(_name, ...) \
COND_CODE_0(NUM_VA_ARGS_LESS_1(_, ##__VA_ARGS__), \
(/* No args provided, no variable */), \
(static const char _name[] \
__in_section(_log_strings, static, _CONCAT(_name, _)) __used __noasan = \
GET_ARG_N(1, __VA_ARGS__);))
/** @brief Create variable in the dedicated memory section (if enabled).
*
* Variable is initialized with a format string from the log message.
*
* @param _name Variable name.
* @param ... Optional log message with arguments (may be empty).
*/
#define Z_LOG_MSG_STR_VAR(_name, ...) \
IF_ENABLED(CONFIG_LOG_FMT_SECTION, \
(Z_LOG_MSG_STR_VAR_IN_SECTION(_name, ##__VA_ARGS__)))
/** @brief Create log message and write it into the logger buffer.
*
* Macro handles creation of log message which includes storing log message
* description, timestamp, arguments, copying string arguments into message and
* copying user data into the message space. There are 3 modes of message
* creation:
* - at compile time message size is determined, message is allocated and
* content is written directly to the message. It is the fastest but cannot
* be used in user mode. Message size cannot be determined at compile time if
* it contains data or string arguments which are string pointers.
* - at compile time message size is determined, string package is created on
* stack, message is created in function call. String package can only be
* created on stack if it does not contain unexpected pointers to strings.
* - string package is created at runtime. This mode has no limitations but
* it is significantly slower.
*
* @param _try_0cpy If positive then, if possible, message content is written
* directly to message. If 0 then, if possible, string package is created on
* the stack and message is created in the function call.
*
* @param _mode Used for testing. It is set according to message creation mode
* used.
*
* @param _cstr_cnt Number of constant strings present in the string. It is
* used to help detect messages which must be runtime processed, compared to
* message which can be prebuilt at compile time.
*
* @param _domain_id Domain ID.
*
* @param _source Pointer to the constant descriptor of the log message source.
*
* @param _level Log message level.
*
* @param _data Pointer to the data. Can be null.
*
* @param _dlen Number of data bytes. 0 if data is not provided.
*
* @param ... Optional string with arguments (fmt, ...). It may be empty.
*/
#if defined(CONFIG_LOG_ALWAYS_RUNTIME) || !defined(CONFIG_LOG)
#define Z_LOG_MSG_CREATE2(_try_0cpy, _mode, _cstr_cnt, _domain_id, _source,\
_level, _data, _dlen, ...) \
do {\
Z_LOG_MSG_STR_VAR(_fmt, ##__VA_ARGS__) \
z_log_msg_runtime_create((_domain_id), (void *)(_source), \
(_level), (uint8_t *)(_data), (_dlen),\
Z_LOG_MSG_CBPRINTF_FLAGS(_cstr_cnt) | \
(IS_ENABLED(CONFIG_LOG_USE_TAGGED_ARGUMENTS) ? \
CBPRINTF_PACKAGE_ARGS_ARE_TAGGED : 0), \
Z_LOG_FMT_RUNTIME_ARGS(_fmt, ##__VA_ARGS__));\
(_mode) = Z_LOG_MSG_MODE_RUNTIME; \
} while (false)
#else /* CONFIG_LOG_ALWAYS_RUNTIME || !CONFIG_LOG */
#define Z_LOG_MSG_CREATE3(_try_0cpy, _mode, _cstr_cnt, _domain_id, _source,\
_level, _data, _dlen, ...) \
do { \
Z_LOG_MSG_STR_VAR(_fmt, ##__VA_ARGS__); \
bool has_rw_str = CBPRINTF_MUST_RUNTIME_PACKAGE( \
Z_LOG_MSG_CBPRINTF_FLAGS(_cstr_cnt), \
__VA_ARGS__); \
if (IS_ENABLED(CONFIG_LOG_SPEED) && (_try_0cpy) && ((_dlen) == 0) && !has_rw_str) {\
LOG_MSG_DBG("create zero-copy message\n");\
Z_LOG_MSG_SIMPLE_CREATE(_cstr_cnt, _domain_id, _source, \
_level, Z_LOG_FMT_ARGS(_fmt, ##__VA_ARGS__)); \
(_mode) = Z_LOG_MSG_MODE_ZERO_COPY; \
} else { \
IF_ENABLED(UTIL_AND(IS_ENABLED(CONFIG_LOG_SIMPLE_MSG_OPTIMIZE), \
UTIL_AND(UTIL_NOT(_domain_id), UTIL_NOT(_cstr_cnt))), \
( \
bool can_simple = LOG_MSG_SIMPLE_CHECK(__VA_ARGS__); \
if (can_simple && ((_dlen) == 0) && !k_is_user_context()) { \
LOG_MSG_DBG("create fast message\n");\
Z_LOG_MSG_SIMPLE_ARGS_CREATE(_domain_id, _source, _level, \
Z_LOG_FMT_ARGS(_fmt, ##__VA_ARGS__)); \
_mode = Z_LOG_MSG_MODE_SIMPLE; \
break; \
} \
) \
) \
LOG_MSG_DBG("create on stack message\n");\
Z_LOG_MSG_STACK_CREATE(_cstr_cnt, _domain_id, _source, _level, _data, \
_dlen, Z_LOG_FMT_ARGS(_fmt, ##__VA_ARGS__)); \
(_mode) = Z_LOG_MSG_MODE_FROM_STACK; \
} \
(void)(_mode); \
} while (false)
#if defined(__cplusplus)
#define Z_AUTO_TYPE auto
#else
#define Z_AUTO_TYPE __auto_type
#endif
/* Macro for getting name of a local variable with the exception of the first argument
* which is a formatted string in log message.
*/
#define Z_LOG_LOCAL_ARG_NAME(idx, arg) COND_CODE_0(idx, (arg), (_v##idx))
/* Create local variable from input variable (expect for the first (fmt) argument). */
#define Z_LOG_LOCAL_ARG_CREATE(idx, arg) \
COND_CODE_0(idx, (), (Z_AUTO_TYPE Z_LOG_LOCAL_ARG_NAME(idx, arg) = (arg) + 0))
/* First level of processing creates stack variables to be passed for further processing.
* This is done to prevent multiple evaluations of input arguments (in case argument
* evaluation has side effects, e.g. it is a non-pure function call).
*/
#define Z_LOG_MSG_CREATE2(_try_0cpy, _mode, _cstr_cnt, _domain_id, _source, \
_level, _data, _dlen, ...) \
do { \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wpointer-arith\"") \
FOR_EACH_IDX(Z_LOG_LOCAL_ARG_CREATE, (;), __VA_ARGS__); \
_Pragma("GCC diagnostic pop") \
Z_LOG_MSG_CREATE3(_try_0cpy, _mode, _cstr_cnt, _domain_id, _source,\
_level, _data, _dlen, \
FOR_EACH_IDX(Z_LOG_LOCAL_ARG_NAME, (,), __VA_ARGS__)); \
} while (false)
#endif /* CONFIG_LOG_ALWAYS_RUNTIME || !CONFIG_LOG */
#define Z_LOG_MSG_CREATE(_try_0cpy, _mode, _domain_id, _source,\
_level, _data, _dlen, ...) \
Z_LOG_MSG_CREATE2(_try_0cpy, _mode, UTIL_CAT(Z_LOG_FUNC_PREFIX_, _level), \
_domain_id, _source, _level, _data, _dlen, \
Z_LOG_STR(_level, __VA_ARGS__))
/** @brief Allocate log message.
*
* @param wlen Length in 32 bit words.
*
* @return allocated space or null if cannot be allocated.
*/
struct log_msg *z_log_msg_alloc(uint32_t wlen);
/** @brief Finalize message.
*
* Finalization includes setting source, copying data and timestamp in the
* message followed by committing the message.
*
* @param msg Message.
*
* @param source Address of the source descriptor.
*
* @param desc Message descriptor.
*
* @param data Data.
*/
void z_log_msg_finalize(struct log_msg *msg, const void *source,
const struct log_msg_desc desc, const void *data);
/** @brief Create log message using simplified method for string with no arguments.
*
* @param source Pointer to the source structure.
* @param level Severity level.
* @param fmt String pointer.
*/
__syscall void z_log_msg_simple_create_0(const void *source, uint32_t level,
const char *fmt);
/** @brief Create log message using simplified method for string with a one argument.
*
* @param source Pointer to the source structure.
* @param level Severity level.
* @param fmt String pointer.
* @param arg String argument.
*/
__syscall void z_log_msg_simple_create_1(const void *source, uint32_t level,
const char *fmt, uint32_t arg);
/** @brief Create log message using simplified method for string with two arguments.
*
* @param source Pointer to the source structure.
* @param level Severity level.
* @param fmt String pointer.
* @param arg0 String argument.
* @param arg1 String argument.
*/
__syscall void z_log_msg_simple_create_2(const void *source, uint32_t level,
const char *fmt, uint32_t arg0, uint32_t arg1);
/** @brief Create a logging message from message details and string package.
*
* @param source Source.
*
* @param desc Message descriptor.
*
* @param package Package.
*
* @param data Data.
*/
__syscall void z_log_msg_static_create(const void *source,
const struct log_msg_desc desc,
uint8_t *package, const void *data);
/** @brief Create message at runtime.
*
* Function allows to build any log message based on input data. Processing
* time is significantly higher than statically message creating.
*
* @param domain_id Domain ID.
*
* @param source Source.
*
* @param level Log level.
*
* @param data Data.
*
* @param dlen Data length.
*
* @param package_flags Package flags.
*
* @param fmt String.
*
* @param ap Variable list of string arguments.
*/
void z_log_msg_runtime_vcreate(uint8_t domain_id, const void *source,
uint8_t level, const void *data,
size_t dlen, uint32_t package_flags,
const char *fmt,
va_list ap);
/** @brief Create message at runtime.
*
* Function allows to build any log message based on input data. Processing
* time is significantly higher than statically message creating.
*
* @param domain_id Domain ID.
*
* @param source Source.
*
* @param level Log level.
*
* @param data Data.
*
* @param dlen Data length.
*
* @param package_flags Package flags.
*
* @param fmt String.
*
* @param ... String arguments.
*/
static inline void z_log_msg_runtime_create(uint8_t domain_id,
const void *source,
uint8_t level, const void *data,
size_t dlen, uint32_t package_flags,
const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
z_log_msg_runtime_vcreate(domain_id, source, level,
data, dlen, package_flags, fmt, ap);
va_end(ap);
}
static inline bool z_log_item_is_msg(const union log_msg_generic *msg)
{
return msg->generic.type == Z_LOG_MSG_LOG;
}
/** @brief Get total length (in 32 bit words) of a log message.
*
* @param desc Log message descriptor.
*
* @return Length.
*/
static inline uint32_t log_msg_get_total_wlen(const struct log_msg_desc desc)
{
return Z_LOG_MSG_ALIGNED_WLEN(desc.package_len, desc.data_len);
}
/** @brief Get length of the log item.
*
* @param item Item.
*
* @return Length in 32 bit words.
*/
static inline uint32_t log_msg_generic_get_wlen(const union mpsc_pbuf_generic *item)
{
const union log_msg_generic *generic_msg = (const union log_msg_generic *)item;
if (z_log_item_is_msg(generic_msg)) {
const struct log_msg *msg = (const struct log_msg *)generic_msg;
return log_msg_get_total_wlen(msg->hdr.desc);
}
return 0;
}
/** @brief Get log message domain ID.
*
* @param msg Log message.
*
* @return Domain ID
*/
static inline uint8_t log_msg_get_domain(struct log_msg *msg)
{
return msg->hdr.desc.domain;
}
/** @brief Get log message level.
*
* @param msg Log message.
*
* @return Log level.
*/
static inline uint8_t log_msg_get_level(struct log_msg *msg)
{
return msg->hdr.desc.level;
}
/** @brief Get message source data.
*
* @param msg Log message.
*
* @return Pointer to the source data.
*/
static inline const void *log_msg_get_source(struct log_msg *msg)
{
return msg->hdr.source;
}
/** @brief Get log message source ID.
*
* @param msg Log message.
*
* @return Source ID, or -1 if not available.
*/
int16_t log_msg_get_source_id(struct log_msg *msg);
/** @brief Get timestamp.
*
* @param msg Log message.
*
* @return Timestamp.
*/
static inline log_timestamp_t log_msg_get_timestamp(struct log_msg *msg)
{
return msg->hdr.timestamp;
}
/** @brief Get Thread ID.
*
* @param msg Log message.
*
* @return Thread ID.
*/
static inline void *log_msg_get_tid(struct log_msg *msg)
{
#if defined(CONFIG_LOG_THREAD_ID_PREFIX)
return msg->hdr.tid;
#else
ARG_UNUSED(msg);
return NULL;
#endif
}
/** @brief Get data buffer.
*
* @param msg log message.
*
* @param len location where data length is written.
*
* @return pointer to the data buffer.
*/
static inline uint8_t *log_msg_get_data(struct log_msg *msg, size_t *len)
{
*len = msg->hdr.desc.data_len;
return msg->data + msg->hdr.desc.package_len;
}
/** @brief Get string package.
*
* @param msg log message.
*
* @param len location where string package length is written.
*
* @return pointer to the package.
*/
static inline uint8_t *log_msg_get_package(struct log_msg *msg, size_t *len)
{
*len = msg->hdr.desc.package_len;
return msg->data;
}
/**
* @}
*/
#include <zephyr/syscalls/log_msg.h>
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_LOGGING_LOG_MSG_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_msg.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 7,240 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_LOG_BACKEND_STD_H_
#define ZEPHYR_LOG_BACKEND_STD_H_
#include <zephyr/logging/log_msg.h>
#include <zephyr/logging/log_output.h>
#include <zephyr/kernel.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Logger backend interface for forwarding to standard backend
* @defgroup log_backend_std Logger backend standard interface
* @ingroup logger
* @{
*/
static inline uint32_t log_backend_std_get_flags(void)
{
uint32_t flags = (LOG_OUTPUT_FLAG_LEVEL | LOG_OUTPUT_FLAG_TIMESTAMP);
if (IS_ENABLED(CONFIG_LOG_BACKEND_SHOW_COLOR)) {
flags |= LOG_OUTPUT_FLAG_COLORS;
}
if (IS_ENABLED(CONFIG_LOG_BACKEND_FORMAT_TIMESTAMP)) {
flags |= LOG_OUTPUT_FLAG_FORMAT_TIMESTAMP;
}
if (IS_ENABLED(CONFIG_LOG_THREAD_ID_PREFIX)) {
flags |= LOG_OUTPUT_FLAG_THREAD;
}
return flags;
}
/** @brief Put a standard logger backend into panic mode.
*
* @param output Log output instance.
*/
static inline void
log_backend_std_panic(const struct log_output *const output)
{
log_output_flush(output);
}
/** @brief Report dropped messages to a standard logger backend.
*
* @param output Log output instance.
* @param cnt Number of dropped messages.
*/
static inline void
log_backend_std_dropped(const struct log_output *const output, uint32_t cnt)
{
log_output_dropped_process(output, cnt);
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_LOG_BACKEND_STD_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_backend_std.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 328 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LOGGING_LOG_LINK_H_
#define ZEPHYR_INCLUDE_LOGGING_LOG_LINK_H_
#include <zephyr/types.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/logging/log_msg.h>
#include <zephyr/logging/log_internal.h>
#include <zephyr/sys/iterable_sections.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Log link API
* @defgroup log_link Log link API
* @ingroup logger
* @{
*/
struct log_link;
typedef void (*log_link_callback_t)(const struct log_link *link,
union log_msg_generic *msg);
typedef void (*log_link_dropped_cb_t)(const struct log_link *link,
uint32_t dropped);
struct log_link_config {
log_link_callback_t msg_cb;
log_link_dropped_cb_t dropped_cb;
};
struct log_link_api {
int (*initiate)(const struct log_link *link, struct log_link_config *config);
int (*activate)(const struct log_link *link);
int (*get_domain_name)(const struct log_link *link, uint32_t domain_id,
char *buf, size_t *length);
int (*get_source_name)(const struct log_link *link, uint32_t domain_id,
uint16_t source_id, char *buf, size_t *length);
int (*get_levels)(const struct log_link *link, uint32_t domain_id,
uint16_t source_id, uint8_t *level,
uint8_t *runtime_level);
int (*set_runtime_level)(const struct log_link *link, uint32_t domain_id,
uint16_t source_id, uint8_t level);
};
struct log_link_ctrl_blk {
uint32_t domain_cnt;
uint16_t source_cnt[1 + COND_CODE_1(CONFIG_LOG_MULTIDOMAIN,
(CONFIG_LOG_REMOTE_DOMAIN_MAX_COUNT),
(0))];
uint32_t domain_offset;
uint32_t *filters;
};
struct log_link {
const struct log_link_api *api;
const char *name;
struct log_link_ctrl_blk *ctrl_blk;
void *ctx;
struct mpsc_pbuf_buffer *mpsc_pbuf;
const struct mpsc_pbuf_buffer_config *mpsc_pbuf_config;
};
/** @brief Create instance of a log link.
*
* Link can have dedicated buffer for messages if @p _buf_len is positive. In
* that case messages will be processed in an order since logging core will
* attempt to fetch message from all available buffers (default and links) and
* process the one with the earliest timestamp. If strict ordering is not needed
* then dedicated buffer may be omitted (@p _buf_len set to 0). That results in
* better memory utilization but unordered messages passed to backends.
*
* @param _name Instance name.
* @param _api API list. See @ref log_link_api.
* @param _buf_wlen Size (in words) of dedicated buffer for messages from this buffer.
* If 0 default buffer is used.
* @param _ctx Context (void *) associated with the link.
*/
#define LOG_LINK_DEF(_name, _api, _buf_wlen, _ctx) \
static uint32_t __aligned(Z_LOG_MSG_ALIGNMENT) _name##_buf32[_buf_wlen]; \
static const struct mpsc_pbuf_buffer_config _name##_mpsc_pbuf_config = { \
.buf = (uint32_t *)_name##_buf32, \
.size = _buf_wlen, \
.notify_drop = z_log_notify_drop, \
.get_wlen = log_msg_generic_get_wlen, \
.flags = IS_ENABLED(CONFIG_LOG_MODE_OVERFLOW) ? \
MPSC_PBUF_MODE_OVERWRITE : 0 \
}; \
COND_CODE_0(_buf_wlen, (), (static STRUCT_SECTION_ITERABLE(log_msg_ptr, \
_name##_log_msg_ptr);)) \
static STRUCT_SECTION_ITERABLE_ALTERNATE(log_mpsc_pbuf, \
mpsc_pbuf_buffer, \
_name##_log_mpsc_pbuf); \
static struct log_link_ctrl_blk _name##_ctrl_blk; \
static const STRUCT_SECTION_ITERABLE(log_link, _name) = \
{ \
.api = &_api, \
.name = STRINGIFY(_name), \
.ctrl_blk = &_name##_ctrl_blk, \
.ctx = _ctx, \
.mpsc_pbuf = _buf_wlen ? &_name##_log_mpsc_pbuf : NULL, \
.mpsc_pbuf_config = _buf_wlen ? &_name##_mpsc_pbuf_config : NULL \
}
/** @brief Initiate log link.
*
* Function initiates the link. Since initialization procedure may be time
* consuming, function returns before link is ready to not block logging
* initialization. @ref log_link_activate is called to complete link initialization.
*
* @param link Log link instance.
* @param config Configuration.
*
* @return 0 on success or error code.
*/
static inline int log_link_initiate(const struct log_link *link,
struct log_link_config *config)
{
__ASSERT_NO_MSG(link);
return link->api->initiate(link, config);
}
/** @brief Activate log link.
*
* Function checks if link is initialized and completes initialization process.
* When successfully returns, link is ready with domain and sources count fetched
* and timestamp details updated.
*
* @param link Log link instance.
*
* @retval 0 When successfully activated.
* @retval -EINPROGRESS Activation in progress.
*/
static inline int log_link_activate(const struct log_link *link)
{
__ASSERT_NO_MSG(link);
return link->api->activate(link);
}
/** @brief Check if link is activated.
*
* @param link Log link instance.
*
* @retval 0 When successfully activated.
* @retval -EINPROGRESS Activation in progress.
*/
static inline int log_link_is_active(const struct log_link *link)
{
return link->ctrl_blk->domain_offset > 0 ? 0 : -EINPROGRESS;
}
/** @brief Get number of domains in the link.
*
* @param[in] link Log link instance.
*
* @return Number of domains.
*/
static inline uint8_t log_link_domains_count(const struct log_link *link)
{
__ASSERT_NO_MSG(link);
return link->ctrl_blk->domain_cnt;
}
/** @brief Get number of sources in the domain.
*
* @param[in] link Log link instance.
* @param[in] domain_id Relative domain ID.
*
* @return Source count.
*/
static inline uint16_t log_link_sources_count(const struct log_link *link,
uint32_t domain_id)
{
__ASSERT_NO_MSG(link);
return link->ctrl_blk->source_cnt[domain_id];
}
/** @brief Get domain name.
*
* @param[in] link Log link instance.
* @param[in] domain_id Relative domain ID.
* @param[out] buf Output buffer filled with domain name. If NULL
* then name length is returned.
* @param[in,out] length Buffer size. Name is trimmed if it does not fit
* in the buffer and field is set to actual name
* length.
*
* @return 0 on success or error code.
*/
static inline int log_link_get_domain_name(const struct log_link *link,
uint32_t domain_id, char *buf,
size_t *length)
{
__ASSERT_NO_MSG(link);
return link->api->get_domain_name(link, domain_id, buf, length);
}
/** @brief Get source name.
*
* @param[in] link Log link instance.
* @param[in] domain_id Relative domain ID.
* @param[in] source_id Source ID.
* @param[out] buf Output buffer filled with source name.
* @param[in,out] length Buffer size. Name is trimmed if it does not fit
* in the buffer and field is set to actual name
* length.
*
* @return 0 on success or error code.
*/
static inline int log_link_get_source_name(const struct log_link *link,
uint32_t domain_id, uint16_t source_id,
char *buf, size_t *length)
{
__ASSERT_NO_MSG(link);
__ASSERT_NO_MSG(buf);
return link->api->get_source_name(link, domain_id, source_id,
buf, length);
}
/** @brief Get level settings of the given source.
*
* @param[in] link Log link instance.
* @param[in] domain_id Relative domain ID.
* @param[in] source_id Source ID.
* @param[out] level Location to store requested compile time level.
* @param[out] runtime_level Location to store requested runtime time level.
*
* @return 0 on success or error code.
*/
static inline int log_link_get_levels(const struct log_link *link,
uint32_t domain_id, uint16_t source_id,
uint8_t *level, uint8_t *runtime_level)
{
__ASSERT_NO_MSG(link);
return link->api->get_levels(link, domain_id, source_id,
level, runtime_level);
}
/** @brief Set runtime level of the given source.
*
* @param[in] link Log link instance.
* @param[in] domain_id Relative domain ID.
* @param[in] source_id Source ID.
* @param[out] level Requested level.
*
* @return 0 on success or error code.
*/
static inline int log_link_set_runtime_level(const struct log_link *link,
uint32_t domain_id, uint16_t source_id,
uint8_t level)
{
__ASSERT_NO_MSG(link);
__ASSERT_NO_MSG(level);
return link->api->set_runtime_level(link, domain_id, source_id, level);
}
/**
* @brief Enqueue external log message.
*
* Add log message to processing queue. Log message is created outside local
* core. For example it maybe coming from external domain.
*
* @param link Log link instance.
* @param data Message from remote domain.
* @param len Length in bytes.
*/
void z_log_msg_enqueue(const struct log_link *link, const void *data, size_t len);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_LOGGING_LOG_LINK_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_link.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,190 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LOGGING_LOG_CORE_H_
#define ZEPHYR_INCLUDE_LOGGING_LOG_CORE_H_
#include <zephyr/logging/log_msg.h>
#include <zephyr/logging/log_instance.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdarg.h>
#include <zephyr/sys/util.h>
/* This header file keeps all macros and functions needed for creating logging
* messages (macros like @ref LOG_ERR).
*/
#define LOG_LEVEL_NONE 0U
#define LOG_LEVEL_ERR 1U
#define LOG_LEVEL_WRN 2U
#define LOG_LEVEL_INF 3U
#define LOG_LEVEL_DBG 4U
#ifdef __cplusplus
extern "C" {
#endif
#ifndef CONFIG_LOG
#define CONFIG_LOG_DEFAULT_LEVEL 0U
#define CONFIG_LOG_MAX_LEVEL 0U
#endif
/* Id of local domain. */
#define Z_LOG_LOCAL_DOMAIN_ID 0
#define LOG_FUNCTION_PREFIX_MASK \
(((uint32_t)IS_ENABLED(CONFIG_LOG_FUNC_NAME_PREFIX_ERR) << \
LOG_LEVEL_ERR) | \
((uint32_t)IS_ENABLED(CONFIG_LOG_FUNC_NAME_PREFIX_WRN) << \
LOG_LEVEL_WRN) | \
((uint32_t)IS_ENABLED(CONFIG_LOG_FUNC_NAME_PREFIX_INF) << \
LOG_LEVEL_INF) | \
((uint32_t)IS_ENABLED(CONFIG_LOG_FUNC_NAME_PREFIX_DBG) << LOG_LEVEL_DBG))
/** @brief Macro for returning local level value if defined or default.
*
* Check @ref IS_ENABLED macro for detailed explanation of the trick.
*/
#define Z_LOG_RESOLVED_LEVEL(_level, _default) \
Z_LOG_RESOLVED_LEVEL1(_level, _default)
#define Z_LOG_RESOLVED_LEVEL1(_level, _default) \
__COND_CODE(_LOG_XXXX##_level, (_level), (_default))
#define _LOG_XXXX0 _LOG_YYYY,
#define _LOG_XXXX0U _LOG_YYYY,
#define _LOG_XXXX1 _LOG_YYYY,
#define _LOG_XXXX1U _LOG_YYYY,
#define _LOG_XXXX2 _LOG_YYYY,
#define _LOG_XXXX2U _LOG_YYYY,
#define _LOG_XXXX3 _LOG_YYYY,
#define _LOG_XXXX3U _LOG_YYYY,
#define _LOG_XXXX4 _LOG_YYYY,
#define _LOG_XXXX4U _LOG_YYYY,
/**
* @brief Macro for conditional code generation if provided log level allows.
*
* Macro behaves similarly to standard \#if \#else \#endif clause. The
* difference is that it is evaluated when used and not when header file is
* included.
*
* @param _eval_level Evaluated level. If level evaluates to one of existing log
* log level (1-4) then macro evaluates to _iftrue.
* @param _iftrue Code that should be inserted when evaluated to true. Note,
* that parameter must be provided in brackets.
* @param _iffalse Code that should be inserted when evaluated to false.
* Note, that parameter must be provided in brackets.
*/
#define Z_LOG_EVAL(_eval_level, _iftrue, _iffalse) \
Z_LOG_EVAL1(_eval_level, _iftrue, _iffalse)
#define Z_LOG_EVAL1(_eval_level, _iftrue, _iffalse) \
__COND_CODE(_LOG_ZZZZ##_eval_level, _iftrue, _iffalse)
#define _LOG_ZZZZ1 _LOG_YYYY,
#define _LOG_ZZZZ1U _LOG_YYYY,
#define _LOG_ZZZZ2 _LOG_YYYY,
#define _LOG_ZZZZ2U _LOG_YYYY,
#define _LOG_ZZZZ3 _LOG_YYYY,
#define _LOG_ZZZZ3U _LOG_YYYY,
#define _LOG_ZZZZ4 _LOG_YYYY,
#define _LOG_ZZZZ4U _LOG_YYYY,
/**
*
* @brief Macro for getting ID of current module.
*/
#define LOG_CURRENT_MODULE_ID() (__log_level != 0 ? \
log_const_source_id(__log_current_const_data) : 0U)
/* Set of defines that are set to 1 if function name prefix is enabled for given level. */
#define Z_LOG_FUNC_PREFIX_0U 0
#define Z_LOG_FUNC_PREFIX_1U COND_CODE_1(CONFIG_LOG_FUNC_NAME_PREFIX_ERR, (1), (0))
#define Z_LOG_FUNC_PREFIX_2U COND_CODE_1(CONFIG_LOG_FUNC_NAME_PREFIX_WRN, (1), (0))
#define Z_LOG_FUNC_PREFIX_3U COND_CODE_1(CONFIG_LOG_FUNC_NAME_PREFIX_INF, (1), (0))
#define Z_LOG_FUNC_PREFIX_4U COND_CODE_1(CONFIG_LOG_FUNC_NAME_PREFIX_DBG, (1), (0))
/**
* @brief Macro for optional injection of function name as first argument of
* formatted string. COND_CODE_0() macro is used to handle no arguments
* case.
*
* The purpose of this macro is to prefix string literal with format specifier
* for function name and inject function name as first argument. In order to
* handle string with no arguments _LOG_Z_EVAL is used.
*/
#define Z_LOG_STR_WITH_PREFIX2(...) \
"%s: " GET_ARG_N(1, __VA_ARGS__), (const char *)__func__\
COND_CODE_0(NUM_VA_ARGS_LESS_1(__VA_ARGS__),\
(),\
(, GET_ARGS_LESS_N(1, __VA_ARGS__))\
)
/* Macro handles case when no format string is provided: e.g. LOG_DBG().
* Handling of format string is deferred to the next level macro.
*/
#define Z_LOG_STR_WITH_PREFIX(...) \
COND_CODE_0(NUM_VA_ARGS_LESS_1(_, ##__VA_ARGS__), \
("%s", (const char *)__func__), \
(Z_LOG_STR_WITH_PREFIX2(__VA_ARGS__)))
/**
* @brief Handle optional injection of function name as the first argument.
*
* Additionally, macro is handling the empty message case.
*/
#define Z_LOG_STR(_level, ...) \
COND_CODE_1(UTIL_CAT(Z_LOG_FUNC_PREFIX_##_level), \
(Z_LOG_STR_WITH_PREFIX(__VA_ARGS__)), (__VA_ARGS__))
#define Z_LOG_LEVEL_CHECK(_level, _check_level, _default_level) \
((_level) <= Z_LOG_RESOLVED_LEVEL(_check_level, _default_level))
#define Z_LOG_CONST_LEVEL_CHECK(_level) \
(IS_ENABLED(CONFIG_LOG) && \
(Z_LOG_LEVEL_CHECK(_level, CONFIG_LOG_OVERRIDE_LEVEL, LOG_LEVEL_NONE) \
|| \
((IS_ENABLED(CONFIG_LOG_OVERRIDE_LEVEL) == false) && \
((_level) <= __log_level) && \
((_level) <= CONFIG_LOG_MAX_LEVEL) \
) \
))
/*****************************************************************************/
/****************** Definitions used by minimal logging *********************/
/*****************************************************************************/
void z_log_minimal_hexdump_print(int level, const void *data, size_t size);
void z_log_minimal_vprintk(const char *fmt, va_list ap);
void z_log_minimal_printk(const char *fmt, ...);
#define Z_LOG_TO_PRINTK(_level, fmt, ...) do { \
z_log_minimal_printk("%c: " fmt "\n", \
z_log_minimal_level_to_char(_level), \
##__VA_ARGS__); \
} while (false)
#define Z_LOG_TO_VPRINTK(_level, fmt, valist) do { \
z_log_minimal_printk("%c: ", z_log_minimal_level_to_char(_level)); \
z_log_minimal_vprintk(fmt, valist); \
z_log_minimal_printk("\n"); \
} while (false)
static inline char z_log_minimal_level_to_char(int level)
{
switch (level) {
case LOG_LEVEL_ERR:
return 'E';
case LOG_LEVEL_WRN:
return 'W';
case LOG_LEVEL_INF:
return 'I';
case LOG_LEVEL_DBG:
return 'D';
default:
return '?';
}
}
#define Z_LOG_INST(_inst) COND_CODE_1(CONFIG_LOG, (_inst), NULL)
/* If strings are removed from the binary then there is a risk of creating invalid
* cbprintf package if %p is used with character pointer which is interpreted as
* string. A compile time check is performed (since format string is known at
* compile time) and check fails logging message is not created but error is
* emitted instead. String check may increase compilation time so it is not
* always performed (could significantly increase CI time).
*/
#ifdef CONFIG_LOG_FMT_STRING_VALIDATE
#define LOG_STRING_WARNING(_mode, _src, ...) \
Z_LOG_MSG_CREATE(UTIL_NOT(IS_ENABLED(CONFIG_USERSPACE)), _mode, \
Z_LOG_LOCAL_DOMAIN_ID, _src, LOG_LEVEL_ERR, NULL, 0, \
"char pointer used for %%p, cast to void *:\"%s\"", \
GET_ARG_N(1, __VA_ARGS__))
#define LOG_POINTERS_VALIDATE(string_ok, ...) \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wpointer-arith\"") \
string_ok = Z_CBPRINTF_POINTERS_VALIDATE(__VA_ARGS__); \
_Pragma("GCC diagnostic pop")
#else
#define LOG_POINTERS_VALIDATE(string_ok, ...) string_ok = true
#define LOG_STRING_WARNING(_mode, _src, ...)
#endif
/*****************************************************************************/
/****************** Macros for standard logging ******************************/
/*****************************************************************************/
/** @internal
* @brief Generic logging macro.
*
* It checks against static levels (resolved at compile timer), runtime levels
* and modes and dispatch to relevant processing path.
*
* @param _level Log message severity level.
*
* @param _inst Set to 1 for instance specific log message. 0 otherwise.
*
* @param _source Pointer to static source descriptor object. NULL when runtime filtering
* is enabled.
*
* @param _dsource Pointer to dynamic source descriptor. NULL when runtime filtering
* is disabled.
*
* @param ... String with arguments.
*/
#define Z_LOG2(_level, _inst, _source, _dsource, ...) do { \
if (!Z_LOG_CONST_LEVEL_CHECK(_level)) { \
break; \
} \
if (IS_ENABLED(CONFIG_LOG_MODE_MINIMAL)) { \
Z_LOG_TO_PRINTK(_level, __VA_ARGS__); \
break; \
} \
/* For instance logging check instance specific static level */ \
if (_inst != 0 && !IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) { \
if (_level > ((struct log_source_const_data *)_source)->level) { \
break; \
} \
} \
\
bool is_user_context = k_is_user_context(); \
if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) && \
!is_user_context && _level > Z_LOG_RUNTIME_FILTER((_dsource)->filters)) { \
break; \
} \
int _mode; \
void *_src = IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ? \
(void *)(_dsource) : (void *)(_source); \
bool string_ok; \
LOG_POINTERS_VALIDATE(string_ok, __VA_ARGS__); \
if (!string_ok) { \
LOG_STRING_WARNING(_mode, _src, __VA_ARGS__); \
break; \
} \
Z_LOG_MSG_CREATE(UTIL_NOT(IS_ENABLED(CONFIG_USERSPACE)), _mode, \
Z_LOG_LOCAL_DOMAIN_ID, _src, _level, NULL,\
0, __VA_ARGS__); \
(void)_mode; \
if (false) { \
/* Arguments checker present but never evaluated.*/ \
/* Placed here to ensure that __VA_ARGS__ are*/ \
/* evaluated once when log is enabled.*/ \
z_log_printf_arg_checker(__VA_ARGS__); \
} \
} while (false)
#define Z_LOG(_level, ...) \
Z_LOG2(_level, 0, __log_current_const_data, __log_current_dynamic_data, __VA_ARGS__)
#define Z_LOG_INSTANCE(_level, _inst, ...) do { \
(void)_inst; \
Z_LOG2(_level, 1, \
COND_CODE_1(CONFIG_LOG_RUNTIME_FILTERING, (NULL), (Z_LOG_INST(_inst))), \
(struct log_source_dynamic_data *)COND_CODE_1( \
CONFIG_LOG_RUNTIME_FILTERING, \
(Z_LOG_INST(_inst)), (NULL)), \
__VA_ARGS__); \
} while (0)
/*****************************************************************************/
/****************** Macros for hexdump logging *******************************/
/*****************************************************************************/
/** @internal
* @brief Generic logging macro.
*
* It checks against static levels (resolved at compile timer), runtime levels
* and modes and dispatch to relevant processing path.
*
* @param _level Log message severity level.
*
* @param _inst Set to 1 for instance specific log message. 0 otherwise.
*
* @param _source Pointer to static source descriptor object. NULL when runtime filtering
* is enabled.
*
* @param _dsource Pointer to dynamic source descriptor. NULL when runtime filtering
* is disabled.
*
* @param _data Hexdump data;
*
* @param _len Hexdump data length.
*
* @param ... String.
*/
#define Z_LOG_HEXDUMP2(_level, _inst, _source, _dsource, _data, _len, ...) do { \
const char *_str = GET_ARG_N(1, __VA_ARGS__); \
if (!Z_LOG_CONST_LEVEL_CHECK(_level)) { \
break; \
} \
/* For instance logging check instance specific static level */ \
if (_inst && !IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) { \
if (_level > ((struct log_source_const_data *)_source)->level) { \
break; \
} \
} \
bool is_user_context = k_is_user_context(); \
uint32_t filters = IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ? \
(_dsource)->filters : 0;\
\
if (IS_ENABLED(CONFIG_LOG_MODE_MINIMAL)) { \
Z_LOG_TO_PRINTK(_level, "%s", _str); \
z_log_minimal_hexdump_print((_level), \
(const char *)(_data), (_len));\
break; \
} \
if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) && \
!is_user_context && (_level) > Z_LOG_RUNTIME_FILTER(filters)) { \
break; \
} \
int mode; \
void *_src = IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ? \
(void *)(_dsource) : (void *)(_source); \
Z_LOG_MSG_CREATE(UTIL_NOT(IS_ENABLED(CONFIG_USERSPACE)), mode, \
Z_LOG_LOCAL_DOMAIN_ID, _src, _level, \
_data, _len, \
COND_CODE_0(NUM_VA_ARGS_LESS_1(_, ##__VA_ARGS__), \
(), \
(COND_CODE_0(NUM_VA_ARGS_LESS_1(__VA_ARGS__), \
("%s", __VA_ARGS__), (__VA_ARGS__)))));\
} while (false)
#define Z_LOG_HEXDUMP(_level, _data, _length, ...) \
Z_LOG_HEXDUMP2(_level, 0, \
__log_current_const_data, \
__log_current_dynamic_data, \
_data, _length, __VA_ARGS__)
#define Z_LOG_HEXDUMP_INSTANCE(_level, _inst, _data, _length, _str) \
Z_LOG_HEXDUMP2(_level, 1, \
COND_CODE_1(CONFIG_LOG_RUNTIME_FILTERING, (NULL), (Z_LOG_INST(_inst))), \
(struct log_source_dynamic_data *)COND_CODE_1( \
CONFIG_LOG_RUNTIME_FILTERING, \
(Z_LOG_INST(_inst)), (NULL)), \
_data, _length, _str)
/*****************************************************************************/
/****************** Filtering macros *****************************************/
/*****************************************************************************/
/** @brief Number of bits used to encode log level. */
#define LOG_LEVEL_BITS 3U
/** @brief Filter slot size. */
#define LOG_FILTER_SLOT_SIZE LOG_LEVEL_BITS
/** @brief Number of slots in one word. */
#define LOG_FILTERS_NUM_OF_SLOTS (32 / LOG_FILTER_SLOT_SIZE)
/** @brief Maximum number of backends supported when runtime filtering is enabled. */
#define LOG_FILTERS_MAX_BACKENDS \
(LOG_FILTERS_NUM_OF_SLOTS - (1 + IS_ENABLED(CONFIG_LOG_FRONTEND)))
/** @brief Slot reserved for the frontend. Last slot is used. */
#define LOG_FRONTEND_SLOT_ID (LOG_FILTERS_NUM_OF_SLOTS - 1)
/** @brief Slot mask. */
#define LOG_FILTER_SLOT_MASK (BIT(LOG_FILTER_SLOT_SIZE) - 1U)
/** @brief Bit offset of a slot.
*
* @param _id Slot ID.
*/
#define LOG_FILTER_SLOT_SHIFT(_id) (LOG_FILTER_SLOT_SIZE * (_id))
#define LOG_FILTER_SLOT_GET(_filters, _id) \
((*(_filters) >> LOG_FILTER_SLOT_SHIFT(_id)) & LOG_FILTER_SLOT_MASK)
#define LOG_FILTER_SLOT_SET(_filters, _id, _filter) \
do { \
*(_filters) &= ~(LOG_FILTER_SLOT_MASK << \
LOG_FILTER_SLOT_SHIFT(_id)); \
*(_filters) |= ((_filter) & LOG_FILTER_SLOT_MASK) << \
LOG_FILTER_SLOT_SHIFT(_id); \
} while (false)
#define LOG_FILTER_AGGR_SLOT_IDX 0
#define LOG_FILTER_AGGR_SLOT_GET(_filters) \
LOG_FILTER_SLOT_GET(_filters, LOG_FILTER_AGGR_SLOT_IDX)
#define LOG_FILTER_FIRST_BACKEND_SLOT_IDX 1
/* Return aggregated (highest) level for all enabled backends, e.g. if there
* are 3 active backends, one backend is set to get INF logs from a module and
* two other backends are set for ERR, returned level is INF.
*/
#define Z_LOG_RUNTIME_FILTER(_filter) \
LOG_FILTER_SLOT_GET(&(_filter), LOG_FILTER_AGGR_SLOT_IDX)
/** @brief Log level value used to indicate log entry that should not be
* formatted (raw string).
*/
#define LOG_LEVEL_INTERNAL_RAW_STRING LOG_LEVEL_NONE
TYPE_SECTION_START_EXTERN(struct log_source_const_data, log_const);
TYPE_SECTION_END_EXTERN(struct log_source_const_data, log_const);
/** @brief Create message for logging printk-like string or a raw string.
*
* Part of printk string processing is appending of carriage return after any
* new line character found in the string. If it is not desirable then @p _is_raw
* can be set to 1 to indicate raw string. This information is stored in the source
* field which is not used for its typical purpose in this case.
*
* @param _is_raw Set to 1 to indicate raw string, set to 0 to indicate printk.
* @param ... Format string with arguments.
*/
#define Z_LOG_PRINTK(_is_raw, ...) do { \
if (!IS_ENABLED(CONFIG_LOG)) { \
break; \
} \
if (IS_ENABLED(CONFIG_LOG_MODE_MINIMAL)) { \
z_log_minimal_printk(__VA_ARGS__); \
break; \
} \
int _mode; \
if (0) {\
z_log_printf_arg_checker(__VA_ARGS__); \
} \
Z_LOG_MSG_CREATE(!IS_ENABLED(CONFIG_USERSPACE), _mode, \
Z_LOG_LOCAL_DOMAIN_ID, (const void *)(uintptr_t)_is_raw, \
LOG_LEVEL_INTERNAL_RAW_STRING, NULL, 0, __VA_ARGS__);\
} while (0)
/** @brief Get index of the log source based on the address of the constant data
* associated with the source.
*
* @param data Address of the constant data.
*
* @return Source ID.
*/
static inline uint32_t log_const_source_id(
const struct log_source_const_data *data)
{
return ((const uint8_t *)data - (uint8_t *)TYPE_SECTION_START(log_const))/
sizeof(struct log_source_const_data);
}
TYPE_SECTION_START_EXTERN(struct log_source_dynamic_data, log_dynamic);
TYPE_SECTION_END_EXTERN(struct log_source_dynamic_data, log_dynamic);
/** @brief Creates name of variable and section for runtime log data.
*
* @param _name Name.
*/
#define LOG_ITEM_DYNAMIC_DATA(_name) UTIL_CAT(log_dynamic_, _name)
#define LOG_INSTANCE_DYNAMIC_DATA(_module_name, _inst) \
LOG_ITEM_DYNAMIC_DATA(Z_LOG_INSTANCE_FULL_NAME(_module_name, _inst))
/** @brief Get index of the log source based on the address of the dynamic data
* associated with the source.
*
* @param data Address of the dynamic data.
*
* @return Source ID.
*/
static inline uint32_t log_dynamic_source_id(struct log_source_dynamic_data *data)
{
return ((uint8_t *)data - (uint8_t *)TYPE_SECTION_START(log_dynamic))/
sizeof(struct log_source_dynamic_data);
}
/** @brief Dummy function to trigger log messages arguments type checking. */
static inline __printf_like(1, 2)
void z_log_printf_arg_checker(const char *fmt, ...)
{
ARG_UNUSED(fmt);
}
/**
* @brief Write a generic log message.
*
* @note This function is intended to be used when porting other log systems.
*
* @param level Log level..
* @param fmt String to format.
* @param ap Pointer to arguments list.
*/
static inline void log_generic(uint8_t level, const char *fmt, va_list ap)
{
z_log_msg_runtime_vcreate(Z_LOG_LOCAL_DOMAIN_ID, NULL, level,
NULL, 0, 0, fmt, ap);
}
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_LOGGING_LOG_CORE_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_core.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,558 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LOGGING_LOG_MULTIDOMAIN_HELPER_H_
#define ZEPHYR_INCLUDE_LOGGING_LOG_MULTIDOMAIN_HELPER_H_
/**
* @brief Logger multidomain backend helpers
*
* This module aims to provide baseline for links and backends and simplify
* the implementation. It is not core part of logging in similar way as
* log_output module is just a helper for log message formatting. Links and
* backends can be implemented without this helper.
*
* @defgroup log_backend_multidomain Logger multidomain backend helpers
* @ingroup log_backend
* @{
*/
/**
* @name Multidomain message IDs
* @anchor LOG_MULTIDOMAIN_HELPER_MESSAGE_IDS
* @{
*/
/** @brief Logging message ID. */
#define Z_LOG_MULTIDOMAIN_ID_MSG 0
/** @brief Domain count request ID. */
#define Z_LOG_MULTIDOMAIN_ID_GET_DOMAIN_CNT 1
/** @brief Source count request ID. */
#define Z_LOG_MULTIDOMAIN_ID_GET_SOURCE_CNT 2
/** @brief Domain name request ID. */
#define Z_LOG_MULTIDOMAIN_ID_GET_DOMAIN_NAME 3
/** @brief Source name request ID. */
#define Z_LOG_MULTIDOMAIN_ID_GET_SOURCE_NAME 4
/** @brief Compile time and run-time levels request ID. */
#define Z_LOG_MULTIDOMAIN_ID_GET_LEVELS 5
/** @brief Setting run-time level ID. */
#define Z_LOG_MULTIDOMAIN_ID_SET_RUNTIME_LEVEL 6
/** @brief Get number of dropped message ID. */
#define Z_LOG_MULTIDOMAIN_ID_DROPPED 7
/** @brief Link-backend readiness indication ID/ */
#define Z_LOG_MULTIDOMAIN_ID_READY 8
/**@} */
/**
* @name Multidomain status flags
* @anchor LOG_MULTIDOMAIN_STATUS
* @{
*/
/** @brief OK. */
#define Z_LOG_MULTIDOMAIN_STATUS_OK 0
/** @brief Error. */
#define Z_LOG_MULTIDOMAIN_STATUS_ERR 1
/**@} */
/** @brief Content of the logging message. */
struct log_multidomain_log_msg {
uint8_t data[0];
} __packed;
/** @brief Content of the domain count message. */
struct log_multidomain_domain_cnt {
uint16_t count;
} __packed;
/** @brief Content of the source count message. */
struct log_multidomain_source_cnt {
uint8_t domain_id;
uint16_t count;
} __packed;
/** @brief Content of the domain name message. */
struct log_multidomain_domain_name {
uint8_t domain_id;
char name[0];
} __packed;
/** @brief Content of the source name message. */
struct log_multidomain_source_name {
uint8_t domain_id;
uint16_t source_id;
char name[0];
} __packed;
/** @brief Content of the message for getting logging levels. */
struct log_multidomain_levels {
uint8_t domain_id;
uint16_t source_id;
uint8_t level;
uint8_t runtime_level;
} __packed;
/** @brief Content of the message for setting logging level. */
struct log_multidomain_set_runtime_level {
uint8_t domain_id;
uint16_t source_id;
uint8_t runtime_level;
} __packed;
/** @brief Content of the message for getting amount of dropped messages. */
struct log_multidomain_dropped {
uint32_t dropped;
} __packed;
/** @brief Union with all message types. */
union log_multidomain_msg_data {
struct log_multidomain_log_msg log_msg;
struct log_multidomain_domain_cnt domain_cnt;
struct log_multidomain_source_cnt source_cnt;
struct log_multidomain_domain_name domain_name;
struct log_multidomain_source_name source_name;
struct log_multidomain_levels levels;
struct log_multidomain_set_runtime_level set_rt_level;
struct log_multidomain_dropped dropped;
};
/** @brief Message. */
struct log_multidomain_msg {
uint8_t id;
uint8_t status;
union log_multidomain_msg_data data;
} __packed;
/** @brief Forward declaration. */
struct log_multidomain_link;
/** @brief Structure with link transport API. */
struct log_multidomain_link_transport_api {
int (*init)(struct log_multidomain_link *link);
int (*send)(struct log_multidomain_link *link, void *data, size_t len);
};
/** @brief Union for holding data returned by associated remote backend. */
union log_multidomain_link_dst {
uint16_t count;
struct {
char *dst;
size_t *len;
} name;
struct {
uint8_t level;
uint8_t runtime_level;
} levels;
struct {
uint8_t level;
} set_runtime_level;
};
/** @brief Remote link API. */
extern struct log_link_api log_multidomain_link_api;
/** @brief Remote link structure. */
struct log_multidomain_link {
const struct log_multidomain_link_transport_api *transport_api;
struct k_sem rdy_sem;
const struct log_link *link;
union log_multidomain_link_dst dst;
int status;
bool ready;
};
/** @brief Forward declaration. */
struct log_multidomain_backend;
/** @brief Backend transport API. */
struct log_multidomain_backend_transport_api {
int (*init)(struct log_multidomain_backend *backend);
int (*send)(struct log_multidomain_backend *backend, void *data, size_t len);
};
/** @brief Remote backend API. */
extern const struct log_backend_api log_multidomain_backend_api;
/** @brief Remote backend structure. */
struct log_multidomain_backend {
const struct log_multidomain_backend_transport_api *transport_api;
const struct log_backend *log_backend;
struct k_sem rdy_sem;
bool panic;
int status;
bool ready;
};
/** @brief Function to be called when data is received from remote.
*
* @param link Link instance.
* @param data Data.
* @param len Data length.
*/
void log_multidomain_link_on_recv_cb(struct log_multidomain_link *link,
const void *data, size_t len);
/** @brief Function called on error reported by transport layer.
*
* @param link Link instance.
* @param err Error code.
*/
void log_multidomain_link_on_error(struct log_multidomain_link *link, int err);
/** @brief Function called when connection with remote is established.
*
* @param link Link instance.
* @param err Error code.
*/
void log_multidomain_link_on_started(struct log_multidomain_link *link, int err);
/** @brief Function to be called when data is received from remote.
*
* @param backend Backend instance.
* @param data Data.
* @param len Data length.
*/
void log_multidomain_backend_on_recv_cb(struct log_multidomain_backend *backend,
const void *data, size_t len);
/** @brief Function called on error reported by transport layer.
*
* @param backend Backend instance.
* @param err Error code.
*/
void log_multidomain_backend_on_error(struct log_multidomain_backend *backend, int err);
/** @brief Function called when connection with remote is established.
*
* @param backend Backend instance.
* @param err Error code.
*/
void log_multidomain_backend_on_started(struct log_multidomain_backend *backend, int err);
/** @} */
#endif /* ZEPHYR_INCLUDE_LOGGING_LOG_MULTIDOMAIN_HELPER_H_ */
``` | /content/code_sandbox/include/zephyr/logging/log_multidomain_helper.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,558 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_TIMING_TYPES_H_
#define ZEPHYR_INCLUDE_TIMING_TYPES_H_
typedef uint64_t timing_t;
#endif /* ZEPHYR_INCLUDE_TIMING_TYPES_H_ */
``` | /content/code_sandbox/include/zephyr/timing/types.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 46 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_RANDOM_RAND32_H_
#define ZEPHYR_INCLUDE_RANDOM_RAND32_H_
#include <zephyr/random/random.h>
#warning "<zephyr/random/rand32.h> is deprecated, include <zephyr/random/random.h> instead"
#endif /* ZEPHYR_INCLUDE_RANDOM_RAND32_H_ */
``` | /content/code_sandbox/include/zephyr/random/rand32.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 76 |
```objective-c
/*
*
*/
/**
* @file
* @brief Random number generator header file
*
* This header file declares prototypes for the kernel's random number
* generator APIs.
*
* Typically, a platform enables the appropriate source for the random
* number generation based on the hardware platform's capabilities or
* (for testing purposes only) enables the TEST_RANDOM_GENERATOR
* configuration option.
*/
#ifndef ZEPHYR_INCLUDE_RANDOM_RANDOM_H_
#define ZEPHYR_INCLUDE_RANDOM_RANDOM_H_
#include <zephyr/types.h>
#include <stddef.h>
#include <zephyr/kernel.h>
/**
* @brief Random Function APIs
* @defgroup random_api Random Function APIs
* @since 1.0
* @version 1.0.0
* @ingroup crypto
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Fill the destination buffer with random data values that should
* pass general randomness tests.
*
* @note The random values returned are not considered cryptographically
* secure random number values.
*
* @param [out] dst destination buffer to fill with random data.
* @param len size of the destination buffer.
*
*/
__syscall void sys_rand_get(void *dst, size_t len);
/**
* @brief Fill the destination buffer with cryptographically secure
* random data values.
*
* @note If the random values requested do not need to be cryptographically
* secure then use sys_rand_get() instead.
*
* @param [out] dst destination buffer to fill.
* @param len size of the destination buffer.
*
* @return 0 if success, -EIO if entropy reseed error
*
*/
__syscall int sys_csrand_get(void *dst, size_t len);
/**
* @brief Return a 8-bit random value that should pass general
* randomness tests.
*
* @note The random value returned is not a cryptographically secure
* random number value.
*
* @return 8-bit random value.
*/
static inline uint8_t sys_rand8_get(void)
{
uint8_t ret;
sys_rand_get(&ret, sizeof(ret));
return ret;
}
/**
* @brief Return a 16-bit random value that should pass general
* randomness tests.
*
* @note The random value returned is not a cryptographically secure
* random number value.
*
* @return 16-bit random value.
*/
static inline uint16_t sys_rand16_get(void)
{
uint16_t ret;
sys_rand_get(&ret, sizeof(ret));
return ret;
}
/**
* @brief Return a 32-bit random value that should pass general
* randomness tests.
*
* @note The random value returned is not a cryptographically secure
* random number value.
*
* @return 32-bit random value.
*/
static inline uint32_t sys_rand32_get(void)
{
uint32_t ret;
sys_rand_get(&ret, sizeof(ret));
return ret;
}
/**
* @brief Return a 64-bit random value that should pass general
* randomness tests.
*
* @note The random value returned is not a cryptographically secure
* random number value.
*
* @return 64-bit random value.
*/
static inline uint64_t sys_rand64_get(void)
{
uint64_t ret;
sys_rand_get(&ret, sizeof(ret));
return ret;
}
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#include <zephyr/syscalls/random.h>
#endif /* ZEPHYR_INCLUDE_RANDOM_RANDOM_H_ */
``` | /content/code_sandbox/include/zephyr/random/random.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 720 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_TIMING_TIMING_H_
#define ZEPHYR_INCLUDE_TIMING_TIMING_H_
#include <zephyr/arch/arch_interface.h>
#include <zephyr/timing/types.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Timing Measurement APIs
* @defgroup timing_api Timing Measurement APIs
* @ingroup os_services
*
* The timing measurement APIs can be used to obtain execution
* time of a section of code to aid in analysis and optimization.
*
* Please note that the timing functions may use a different timer
* than the default kernel timer, where the timer being used is
* specified by architecture, SoC or board configuration.
*/
/**
* @brief SoC specific Timing Measurement APIs
* @defgroup timing_api_soc SoC specific Timing Measurement APIs
* @ingroup timing_api
*
* Implements the necessary bits to support timing measurement
* using SoC specific timing measurement mechanism.
*
* @{
*/
/**
* @brief Initialize the timing subsystem on SoC.
*
* Perform the necessary steps to initialize the timing subsystem.
*
* @see timing_init()
*/
void soc_timing_init(void);
/**
* @brief Signal the start of the timing information gathering.
*
* Signal to the timing subsystem that timing information
* will be gathered from this point forward.
*
* @see timing_start()
*/
void soc_timing_start(void);
/**
* @brief Signal the end of the timing information gathering.
*
* Signal to the timing subsystem that timing information
* is no longer being gathered from this point forward.
*
* @see timing_stop()
*/
void soc_timing_stop(void);
/**
* @brief Return timing counter.
*
* @note Not all SoCs have timing counters with 64 bit precision. It
* is possible to see this value "go backwards" due to internal
* rollover. Timing code must be prepared to address the rollover
* (with SoC dependent code, e.g. by casting to a uint32_t before
* subtraction) or by using soc_timing_cycles_get() which is required
* to understand the distinction.
*
* @return Timing counter.
*
* @see timing_counter_get()
*/
timing_t soc_timing_counter_get(void);
/**
* @brief Get number of cycles between @p start and @p end.
*
* @note The raw numbers from counter need to be scaled to
* obtain actual number of cycles, or may roll over internally.
* This function computes a positive-definite interval between two
* returned cycle values.
*
* @param start Pointer to counter at start of a measured execution.
* @param end Pointer to counter at stop of a measured execution.
* @return Number of cycles between start and end.
*
* @see timing_cycles_get()
*/
uint64_t soc_timing_cycles_get(volatile timing_t *const start,
volatile timing_t *const end);
/**
* @brief Get frequency of counter used (in Hz).
*
* @return Frequency of counter used for timing in Hz.
*
* @see timing_freq_get()
*/
uint64_t soc_timing_freq_get(void);
/**
* @brief Convert number of @p cycles into nanoseconds.
*
* @param cycles Number of cycles
* @return Converted time value
*
* @see timing_cycles_to_ns()
*/
uint64_t soc_timing_cycles_to_ns(uint64_t cycles);
/**
* @brief Convert number of @p cycles into nanoseconds with averaging.
*
* @param cycles Number of cycles
* @param count Times of accumulated cycles to average over
* @return Converted time value
*
* @see timing_cycles_to_ns_avg()
*/
uint64_t soc_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count);
/**
* @brief Get frequency of counter used (in MHz).
*
* @return Frequency of counter used for timing in MHz.
*
* @see timing_freq_get_mhz()
*/
uint32_t soc_timing_freq_get_mhz(void);
/**
* @}
*/
/**
* @brief Board specific Timing Measurement APIs
* @defgroup timing_api_board Board specific Timing Measurement APIs
* @ingroup timing_api
*
* Implements the necessary bits to support timing measurement
* using board specific timing measurement mechanism.
*
* @{
*/
/**
* @brief Initialize the timing subsystem.
*
* Perform the necessary steps to initialize the timing subsystem.
*
* @see timing_init()
*/
void board_timing_init(void);
/**
* @brief Signal the start of the timing information gathering.
*
* Signal to the timing subsystem that timing information
* will be gathered from this point forward.
*
* @see timing_start()
*/
void board_timing_start(void);
/**
* @brief Signal the end of the timing information gathering.
*
* Signal to the timing subsystem that timing information
* is no longer being gathered from this point forward.
*
* @see timing_stop()
*/
void board_timing_stop(void);
/**
* @brief Return timing counter.
*
* @note Not all timing counters have 64 bit precision. It is
* possible to see this value "go backwards" due to internal
* rollover. Timing code must be prepared to address the rollover
* (with board dependent code, e.g. by casting to a uint32_t before
* subtraction) or by using board_timing_cycles_get() which is required
* to understand the distinction.
*
* @return Timing counter.
*
* @see timing_counter_get()
*/
timing_t board_timing_counter_get(void);
/**
* @brief Get number of cycles between @p start and @p end.
*
* @note The raw numbers from counter need to be scaled to
* obtain actual number of cycles, or may roll over internally.
* This function computes a positive-definite interval between two
* returned cycle values.
*
* @param start Pointer to counter at start of a measured execution.
* @param end Pointer to counter at stop of a measured execution.
* @return Number of cycles between start and end.
*
* @see timing_cycles_get()
*/
uint64_t board_timing_cycles_get(volatile timing_t *const start,
volatile timing_t *const end);
/**
* @brief Get frequency of counter used (in Hz).
*
* @return Frequency of counter used for timing in Hz.
*
* @see timing_freq_get()
*/
uint64_t board_timing_freq_get(void);
/**
* @brief Convert number of @p cycles into nanoseconds.
*
* @param cycles Number of cycles
* @return Converted time value
*
* @see timing_cycles_to_ns()
*/
uint64_t board_timing_cycles_to_ns(uint64_t cycles);
/**
* @brief Convert number of @p cycles into nanoseconds with averaging.
*
* @param cycles Number of cycles
* @param count Times of accumulated cycles to average over
* @return Converted time value
*
* @see timing_cycles_to_ns_avg()
*/
uint64_t board_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count);
/**
* @brief Get frequency of counter used (in MHz).
*
* @return Frequency of counter used for timing in MHz.
*
* @see timing_freq_get_mhz()
*/
uint32_t board_timing_freq_get_mhz(void);
/**
* @}
*/
/**
* @addtogroup timing_api
* @{
*/
#ifdef CONFIG_TIMING_FUNCTIONS
/**
* @brief Initialize the timing subsystem.
*
* Perform the necessary steps to initialize the timing subsystem.
*/
void timing_init(void);
/**
* @brief Signal the start of the timing information gathering.
*
* Signal to the timing subsystem that timing information
* will be gathered from this point forward.
*/
void timing_start(void);
/**
* @brief Signal the end of the timing information gathering.
*
* Signal to the timing subsystem that timing information
* is no longer being gathered from this point forward.
*/
void timing_stop(void);
/**
* @brief Return timing counter.
*
* @return Timing counter.
*/
static inline timing_t timing_counter_get(void)
{
#if defined(CONFIG_BOARD_HAS_TIMING_FUNCTIONS)
return board_timing_counter_get();
#elif defined(CONFIG_SOC_HAS_TIMING_FUNCTIONS)
return soc_timing_counter_get();
#else
return arch_timing_counter_get();
#endif
}
/**
* @brief Get number of cycles between @p start and @p end.
*
* For some architectures or SoCs, the raw numbers from counter
* need to be scaled to obtain actual number of cycles.
*
* @param start Pointer to counter at start of a measured execution.
* @param end Pointer to counter at stop of a measured execution.
* @return Number of cycles between start and end.
*/
static inline uint64_t timing_cycles_get(volatile timing_t *const start,
volatile timing_t *const end)
{
#if defined(CONFIG_BOARD_HAS_TIMING_FUNCTIONS)
return board_timing_cycles_get(start, end);
#elif defined(CONFIG_SOC_HAS_TIMING_FUNCTIONS)
return soc_timing_cycles_get(start, end);
#else
return arch_timing_cycles_get(start, end);
#endif
}
/**
* @brief Get frequency of counter used (in Hz).
*
* @return Frequency of counter used for timing in Hz.
*/
static inline uint64_t timing_freq_get(void)
{
#if defined(CONFIG_BOARD_HAS_TIMING_FUNCTIONS)
return board_timing_freq_get();
#elif defined(CONFIG_SOC_HAS_TIMING_FUNCTIONS)
return soc_timing_freq_get();
#else
return arch_timing_freq_get();
#endif
}
/**
* @brief Convert number of @p cycles into nanoseconds.
*
* @param cycles Number of cycles
* @return Converted time value
*/
static inline uint64_t timing_cycles_to_ns(uint64_t cycles)
{
#if defined(CONFIG_BOARD_HAS_TIMING_FUNCTIONS)
return board_timing_cycles_to_ns(cycles);
#elif defined(CONFIG_SOC_HAS_TIMING_FUNCTIONS)
return soc_timing_cycles_to_ns(cycles);
#else
return arch_timing_cycles_to_ns(cycles);
#endif
}
/**
* @brief Convert number of @p cycles into nanoseconds with averaging.
*
* @param cycles Number of cycles
* @param count Times of accumulated cycles to average over
* @return Converted time value
*/
static inline uint64_t timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count)
{
#if defined(CONFIG_BOARD_HAS_TIMING_FUNCTIONS)
return board_timing_cycles_to_ns_avg(cycles, count);
#elif defined(CONFIG_SOC_HAS_TIMING_FUNCTIONS)
return soc_timing_cycles_to_ns_avg(cycles, count);
#else
return arch_timing_cycles_to_ns_avg(cycles, count);
#endif
}
/**
* @brief Get frequency of counter used (in MHz).
*
* @return Frequency of counter used for timing in MHz.
*/
static inline uint32_t timing_freq_get_mhz(void)
{
#if defined(CONFIG_BOARD_HAS_TIMING_FUNCTIONS)
return board_timing_freq_get_mhz();
#elif defined(CONFIG_SOC_HAS_TIMING_FUNCTIONS)
return soc_timing_freq_get_mhz();
#else
return arch_timing_freq_get_mhz();
#endif
}
#endif /* CONFIG_TIMING_FUNCTIONS */
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_TIMING_TIMING_H_ */
``` | /content/code_sandbox/include/zephyr/timing/timing.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,249 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_MATH_ILOG2_H_
#define ZEPHYR_INCLUDE_MATH_ILOG2_H_
#include <stdint.h>
#include <zephyr/toolchain.h>
#include <zephyr/arch/common/ffs.h>
#include <zephyr/sys/util.h>
/**
* @file
* @brief Provide ilog2() function
*/
/**
*
* @brief Calculate the floor of log2 for compile time constant
*
* This calculates the floor of log2 (integer log2) for 32-bit
* unsigned integer.
*
* @note This should only be used for compile time constant
* when value is known during preprocessing stage.
* DO NOT USE for runtime code due to the big tree of
* nested if-else blocks.
*
* @warning Will return 0 if input value is 0, which is
* invalid for log2.
*
* @param n Input value
* @return Integer log2 of @n
*/
#define ilog2_compile_time_const_u32(n) \
( \
((n) < 2) ? 0 : \
(((n) & BIT(31)) == BIT(31)) ? 31 : \
(((n) & BIT(30)) == BIT(30)) ? 30 : \
(((n) & BIT(29)) == BIT(29)) ? 29 : \
(((n) & BIT(28)) == BIT(28)) ? 28 : \
(((n) & BIT(27)) == BIT(27)) ? 27 : \
(((n) & BIT(26)) == BIT(26)) ? 26 : \
(((n) & BIT(25)) == BIT(25)) ? 25 : \
(((n) & BIT(24)) == BIT(24)) ? 24 : \
(((n) & BIT(23)) == BIT(23)) ? 23 : \
(((n) & BIT(22)) == BIT(22)) ? 22 : \
(((n) & BIT(21)) == BIT(21)) ? 21 : \
(((n) & BIT(20)) == BIT(20)) ? 20 : \
(((n) & BIT(19)) == BIT(19)) ? 19 : \
(((n) & BIT(18)) == BIT(18)) ? 18 : \
(((n) & BIT(17)) == BIT(17)) ? 17 : \
(((n) & BIT(16)) == BIT(16)) ? 16 : \
(((n) & BIT(15)) == BIT(15)) ? 15 : \
(((n) & BIT(14)) == BIT(14)) ? 14 : \
(((n) & BIT(13)) == BIT(13)) ? 13 : \
(((n) & BIT(12)) == BIT(12)) ? 12 : \
(((n) & BIT(11)) == BIT(11)) ? 11 : \
(((n) & BIT(10)) == BIT(10)) ? 10 : \
(((n) & BIT(9)) == BIT(9)) ? 9 : \
(((n) & BIT(8)) == BIT(8)) ? 8 : \
(((n) & BIT(7)) == BIT(7)) ? 7 : \
(((n) & BIT(6)) == BIT(6)) ? 6 : \
(((n) & BIT(5)) == BIT(5)) ? 5 : \
(((n) & BIT(4)) == BIT(4)) ? 4 : \
(((n) & BIT(3)) == BIT(3)) ? 3 : \
(((n) & BIT(2)) == BIT(2)) ? 2 : \
1 \
)
/**
*
* @brief Calculate integer log2
*
* This calculates the floor of log2 (integer of log2).
*
* @warning Will return 0 if input value is 0, which is
* invalid for log2.
*
* @param n Input value
* @return Integer log2 of @p n
*/
/*
* This is in #define form as this needs to also work on
* compile time constants. Doing this as a static inline
* function will result in compiler complaining with
* "initializer element is not constant".
*/
#define ilog2(n) \
( \
__builtin_constant_p(n) ? \
ilog2_compile_time_const_u32(n) : \
find_msb_set(n) - 1 \
)
#endif /* ZEPHYR_INCLUDE_MATH_ILOG2_H_ */
``` | /content/code_sandbox/include/zephyr/math/ilog2.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,067 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_PTP_CLOCK_H_
#define ZEPHYR_INCLUDE_DRIVERS_PTP_CLOCK_H_
#include <zephyr/kernel.h>
#include <stdint.h>
#include <zephyr/device.h>
#include <zephyr/sys/util.h>
#include <zephyr/net/ptp_time.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Name of the PTP clock driver */
#if !defined(PTP_CLOCK_NAME)
#define PTP_CLOCK_NAME "PTP_CLOCK"
#endif
__subsystem struct ptp_clock_driver_api {
int (*set)(const struct device *dev, struct net_ptp_time *tm);
int (*get)(const struct device *dev, struct net_ptp_time *tm);
int (*adjust)(const struct device *dev, int increment);
int (*rate_adjust)(const struct device *dev, double ratio);
};
/**
* @brief Set the time of the PTP clock.
*
* @param dev PTP clock device
* @param tm Time to set
*
* @return 0 if ok, <0 if error
*/
static inline int ptp_clock_set(const struct device *dev,
struct net_ptp_time *tm)
{
const struct ptp_clock_driver_api *api =
(const struct ptp_clock_driver_api *)dev->api;
return api->set(dev, tm);
}
/**
* @brief Get the time of the PTP clock.
*
* @param dev PTP clock device
* @param tm Where to store the current time.
*
* @return 0 if ok, <0 if error
*/
__syscall int ptp_clock_get(const struct device *dev, struct net_ptp_time *tm);
static inline int z_impl_ptp_clock_get(const struct device *dev,
struct net_ptp_time *tm)
{
const struct ptp_clock_driver_api *api =
(const struct ptp_clock_driver_api *)dev->api;
return api->get(dev, tm);
}
/**
* @brief Adjust the PTP clock time.
*
* @param dev PTP clock device
* @param increment Increment of the clock in nanoseconds
*
* @return 0 if ok, <0 if error
*/
static inline int ptp_clock_adjust(const struct device *dev, int increment)
{
const struct ptp_clock_driver_api *api =
(const struct ptp_clock_driver_api *)dev->api;
return api->adjust(dev, increment);
}
/**
* @brief Adjust the PTP clock time change rate when compared to its neighbor.
*
* @param dev PTP clock device
* @param rate Rate of the clock time change
*
* @return 0 if ok, <0 if error
*/
static inline int ptp_clock_rate_adjust(const struct device *dev, double rate)
{
const struct ptp_clock_driver_api *api =
(const struct ptp_clock_driver_api *)dev->api;
return api->rate_adjust(dev, rate);
}
#ifdef __cplusplus
}
#endif
#include <zephyr/syscalls/ptp_clock.h>
#endif /* ZEPHYR_INCLUDE_DRIVERS_PTP_CLOCK_H_ */
``` | /content/code_sandbox/include/zephyr/drivers/ptp_clock.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 656 |
```objective-c
/*
*
*/
/**
* @file
* @brief Charger APIs
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_CHARGER_H_
#define ZEPHYR_INCLUDE_DRIVERS_CHARGER_H_
/**
* @brief Charger Interface
* @defgroup charger_interface Charger Interface
* @ingroup io_interfaces
* @{
*/
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <zephyr/device.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/**
* @brief Runtime Dynamic Battery Parameters
*/
enum charger_property {
/** Indicates if external supply is present for the charger. */
/** Value should be of type enum charger_online */
CHARGER_PROP_ONLINE = 0,
/** Reports whether or not a battery is present. */
/** Value should be of type bool*/
CHARGER_PROP_PRESENT,
/** Represents the charging status of the charger. */
/** Value should be of type enum charger_status */
CHARGER_PROP_STATUS,
/** Represents the charging algo type of the charger. */
/** Value should be of type enum charger_charge_type */
CHARGER_PROP_CHARGE_TYPE,
/** Represents the health of the charger. */
/** Value should be of type enum charger_health */
CHARGER_PROP_HEALTH,
/** Configuration of current sink used for charging in A */
CHARGER_PROP_CONSTANT_CHARGE_CURRENT_UA,
/** Configuration of current sink used for conditioning in A */
CHARGER_PROP_PRECHARGE_CURRENT_UA,
/** Configuration of charge termination target in A */
CHARGER_PROP_CHARGE_TERM_CURRENT_UA,
/** Configuration of charge voltage regulation target in V */
CHARGER_PROP_CONSTANT_CHARGE_VOLTAGE_UV,
/**
* Configuration of the input current regulation target in A
*
* This value is a rising current threshold that is regulated by reducing the charge
* current output
*/
CHARGER_PROP_INPUT_REGULATION_CURRENT_UA,
/**
* Configuration of the input voltage regulation target in V
*
* This value is a falling voltage threshold that is regulated by reducing the charge
* current output
*/
CHARGER_PROP_INPUT_REGULATION_VOLTAGE_UV,
/**
* Configuration to issue a notification to the system based on the input current
* level and timing
*
* Value should be of type struct charger_current_notifier
*/
CHARGER_PROP_INPUT_CURRENT_NOTIFICATION,
/**
* Configuration to issue a notification to the system based on the battery discharge
* current level and timing
*
* Value should be of type struct charger_current_notifier
*/
CHARGER_PROP_DISCHARGE_CURRENT_NOTIFICATION,
/**
* Configuration of the falling system voltage threshold where a notification
* is issued to the system, measured in V
*/
CHARGER_PROP_SYSTEM_VOLTAGE_NOTIFICATION_UV,
/**
* Configuration to issue a notification to the system based on the charger status change
*
* Value should be of type charger_status_notifier_t
*/
CHARGER_PROP_STATUS_NOTIFICATION,
/**
* Configuration to issue a notification to the system based on the charger online change
*
* Value should be of type charger_online_notifier_t
*/
CHARGER_PROP_ONLINE_NOTIFICATION,
/** Reserved to demark end of common charger properties */
CHARGER_PROP_COMMON_COUNT,
/**
* Reserved to demark downstream custom properties - use this value as the actual value may
* change over future versions of this API
*/
CHARGER_PROP_CUSTOM_BEGIN = CHARGER_PROP_COMMON_COUNT + 1,
/** Reserved to demark end of valid enum properties */
CHARGER_PROP_MAX = UINT16_MAX,
};
/**
* @typedef charger_prop_t
* @brief A charger property's identifier
*
* See charger_property for a list of identifiers
*/
typedef uint16_t charger_prop_t;
/**
* @brief External supply states
*/
enum charger_online {
/** External supply not present */
CHARGER_ONLINE_OFFLINE = 0,
/** External supply is present and of fixed output */
CHARGER_ONLINE_FIXED,
/** External supply is present and of programmable output*/
CHARGER_ONLINE_PROGRAMMABLE,
};
/**
* @brief Charging states
*/
enum charger_status {
/** Charging device state is unknown */
CHARGER_STATUS_UNKNOWN = 0,
/** Charging device is charging a battery */
CHARGER_STATUS_CHARGING,
/** Charging device is not able to charge a battery */
CHARGER_STATUS_DISCHARGING,
/** Charging device is not charging a battery */
CHARGER_STATUS_NOT_CHARGING,
/** The battery is full and the charging device will not attempt charging */
CHARGER_STATUS_FULL,
};
/**
* @brief Charge algorithm types
*/
enum charger_charge_type {
/** Charge type is unknown */
CHARGER_CHARGE_TYPE_UNKNOWN = 0,
/** Charging is not occurring */
CHARGER_CHARGE_TYPE_NONE,
/**
* Charging is occurring at the slowest desired charge rate,
* typically for battery detection or preconditioning
*/
CHARGER_CHARGE_TYPE_TRICKLE,
/** Charging is occurring at the fastest desired charge rate */
CHARGER_CHARGE_TYPE_FAST,
/** Charging is occurring at a moderate charge rate */
CHARGER_CHARGE_TYPE_STANDARD,
/*
* Charging is being dynamically adjusted by the charger device
*/
CHARGER_CHARGE_TYPE_ADAPTIVE,
/*
* Charging is occurring at a reduced charge rate to preserve
* battery health
*/
CHARGER_CHARGE_TYPE_LONGLIFE,
/*
* The charger device is being bypassed and the power conversion
* is being handled externally, typically by a "smart" wall adaptor
*/
CHARGER_CHARGE_TYPE_BYPASS,
};
/**
* @brief Charger health conditions
*
* These conditions determine the ability to, or the rate of, charge
*/
enum charger_health {
/** Charger health condition is unknown */
CHARGER_HEALTH_UNKNOWN = 0,
/** Charger health condition is good */
CHARGER_HEALTH_GOOD,
/** The charger device is overheated */
CHARGER_HEALTH_OVERHEAT,
/** The battery voltage has exceeded its overvoltage threshold */
CHARGER_HEALTH_OVERVOLTAGE,
/**
* The battery or charger device is experiencing an unspecified
* failure.
*/
CHARGER_HEALTH_UNSPEC_FAILURE,
/** The battery temperature is below the "cold" threshold */
CHARGER_HEALTH_COLD,
/** The charger device's watchdog timer has expired */
CHARGER_HEALTH_WATCHDOG_TIMER_EXPIRE,
/** The charger device's safety timer has expired */
CHARGER_HEALTH_SAFETY_TIMER_EXPIRE,
/** The charger device requires calibration */
CHARGER_HEALTH_CALIBRATION_REQUIRED,
/** The battery temperature is in the "warm" range */
CHARGER_HEALTH_WARM,
/** The battery temperature is in the "cool" range */
CHARGER_HEALTH_COOL,
/** The battery temperature is below the "hot" threshold */
CHARGER_HEALTH_HOT,
/** The charger device does not detect a battery */
CHARGER_HEALTH_NO_BATTERY,
};
/**
* @brief Charger severity levels for system notifications
*/
enum charger_notification_severity {
/** Most severe level, typically triggered instantaneously */
CHARGER_SEVERITY_PEAK = 0,
/** More severe than the warning level, less severe than peak */
CHARGER_SEVERITY_CRITICAL,
/** Base severity level */
CHARGER_SEVERITY_WARNING,
};
/**
* @brief The input current thresholds for the charger to notify the system
*/
struct charger_current_notifier {
/** The severity of the notification where CHARGER_SEVERITY_PEAK is the most severe */
uint8_t severity;
/** The current threshold to be exceeded */
uint32_t current_ua;
/** The duration of excess current before notifying the system */
uint32_t duration_us;
};
/**
* @brief The charger status change callback to notify the system
*
* @param status Current charging state
*/
typedef void (*charger_status_notifier_t)(enum charger_status status);
/**
* @brief The charger online change callback to notify the system
*
* @param online Current external supply state
*/
typedef void (*charger_online_notifier_t)(enum charger_online online);
/**
* @brief container for a charger_property value
*
*/
union charger_propval {
/* Fields have the format: */
/* CHARGER_PROPERTY_FIELD */
/* type property_field; */
/** CHARGER_PROP_ONLINE */
enum charger_online online;
/** CHARGER_PROP_PRESENT */
bool present;
/** CHARGER_PROP_STATUS */
enum charger_status status;
/** CHARGER_PROP_CHARGE_TYPE */
enum charger_charge_type charge_type;
/** CHARGER_PROP_HEALTH */
enum charger_health health;
/** CHARGER_PROP_CONSTANT_CHARGE_CURRENT_UA */
uint32_t const_charge_current_ua;
/** CHARGER_PROP_PRECHARGE_CURRENT_UA */
uint32_t precharge_current_ua;
/** CHARGER_PROP_CHARGE_TERM_CURRENT_UA */
uint32_t charge_term_current_ua;
/** CHARGER_PROP_CONSTANT_CHARGE_VOLTAGE_UV */
uint32_t const_charge_voltage_uv;
/** CHARGER_PROP_INPUT_REGULATION_CURRENT_UA */
uint32_t input_current_regulation_current_ua;
/** CHARGER_PROP_INPUT_REGULATION_VOLTAGE_UV */
uint32_t input_voltage_regulation_voltage_uv;
/** CHARGER_PROP_INPUT_CURRENT_NOTIFICATION */
struct charger_current_notifier input_current_notification;
/** CHARGER_PROP_DISCHARGE_CURRENT_NOTIFICATION */
struct charger_current_notifier discharge_current_notification;
/** CHARGER_PROP_SYSTEM_VOLTAGE_NOTIFICATION_UV */
uint32_t system_voltage_notification;
/** CHARGER_PROP_STATUS_NOTIFICATION */
charger_status_notifier_t status_notification;
/** CHARGER_PROP_ONLINE_NOTIFICATION */
charger_online_notifier_t online_notification;
};
/**
* @typedef charger_get_property_t
* @brief Callback API for getting a charger property.
*
* See charger_get_property() for argument description
*/
typedef int (*charger_get_property_t)(const struct device *dev, const charger_prop_t prop,
union charger_propval *val);
/**
* @typedef charger_set_property_t
* @brief Callback API for setting a charger property.
*
* See charger_set_property() for argument description
*/
typedef int (*charger_set_property_t)(const struct device *dev, const charger_prop_t prop,
const union charger_propval *val);
/**
* @typedef charger_charge_enable_t
* @brief Callback API enabling or disabling a charge cycle.
*
* See charger_charge_enable() for argument description
*/
typedef int (*charger_charge_enable_t)(const struct device *dev, const bool enable);
/**
* @brief Charging device API
*
* Caching is entirely on the onus of the client
*/
__subsystem struct charger_driver_api {
charger_get_property_t get_property;
charger_set_property_t set_property;
charger_charge_enable_t charge_enable;
};
/**
* @brief Fetch a battery charger property
*
* @param dev Pointer to the battery charger device
* @param prop Charger property to get
* @param val Pointer to charger_propval union
*
* @retval 0 if successful
* @retval < 0 if getting property failed
*/
__syscall int charger_get_prop(const struct device *dev, const charger_prop_t prop,
union charger_propval *val);
static inline int z_impl_charger_get_prop(const struct device *dev, const charger_prop_t prop,
union charger_propval *val)
{
const struct charger_driver_api *api = (const struct charger_driver_api *)dev->api;
return api->get_property(dev, prop, val);
}
/**
* @brief Set a battery charger property
*
* @param dev Pointer to the battery charger device
* @param prop Charger property to set
* @param val Pointer to charger_propval union
*
* @retval 0 if successful
* @retval < 0 if setting property failed
*/
__syscall int charger_set_prop(const struct device *dev, const charger_prop_t prop,
const union charger_propval *val);
static inline int z_impl_charger_set_prop(const struct device *dev, const charger_prop_t prop,
const union charger_propval *val)
{
const struct charger_driver_api *api = (const struct charger_driver_api *)dev->api;
return api->set_property(dev, prop, val);
}
/**
* @brief Enable or disable a charge cycle
*
* @param dev Pointer to the battery charger device
* @param enable true enables a charge cycle, false disables a charge cycle
*
* @retval 0 if successful
* @retval -EIO if communication with the charger failed
* @retval -EINVAL if the conditions for initiating charging are invalid
*/
__syscall int charger_charge_enable(const struct device *dev, const bool enable);
static inline int z_impl_charger_charge_enable(const struct device *dev, const bool enable)
{
const struct charger_driver_api *api = (const struct charger_driver_api *)dev->api;
return api->charge_enable(dev, enable);
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif /* __cplusplus */
#include <zephyr/syscalls/charger.h>
#endif /* ZEPHYR_INCLUDE_DRIVERS_CHARGER_H_ */
``` | /content/code_sandbox/include/zephyr/drivers/charger.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,767 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_UART_EMUL_H_
#define ZEPHYR_INCLUDE_DRIVERS_UART_EMUL_H_
#include <zephyr/device.h>
#include <zephyr/drivers/emul.h>
#include <zephyr/drivers/uart.h>
#include <zephyr/sys/slist.h>
#include <zephyr/types.h>
/**
* @file
*
* @brief Public APIs for the UART device emulation drivers.
*/
/**
* @brief UART Emulation Interface
* @defgroup uart_emul_interface UART Emulation Interface
* @ingroup io_emulators
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
struct uart_emul_device_api;
/**
* @brief Define the emulation callback function signature
*
* @param dev UART device instance
* @param size Number of available bytes in TX buffer
* @param target pointer to emulation context
*/
typedef void (*uart_emul_device_tx_data_ready_t)(const struct device *dev, size_t size,
const struct emul *target);
/** Node in a linked list of emulators for UART devices */
struct uart_emul {
sys_snode_t node;
/** Target emulator - REQUIRED for all emulated bus nodes of any type */
const struct emul *target;
/** API provided for this device */
const struct uart_emul_device_api *api;
};
/** Definition of the emulator API */
struct uart_emul_device_api {
uart_emul_device_tx_data_ready_t tx_data_ready;
};
/**
* Register an emulated device on the controller
*
* @param dev Device that will use the emulator
* @param emul UART emulator to use
* @return 0 indicating success
*/
int uart_emul_register(const struct device *dev, struct uart_emul *emul);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_DRIVERS_UART_EMUL_H_ */
``` | /content/code_sandbox/include/zephyr/drivers/uart_emul.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 398 |
```objective-c
/**
* @file
*
* @brief Public APIs for Video.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_VIDEO_H_
#define ZEPHYR_INCLUDE_VIDEO_H_
/**
* @brief Video Interface
* @defgroup video_interface Video Interface
* @since 2.1
* @version 1.0.0
* @ingroup io_interfaces
* @{
*/
#include <zephyr/device.h>
#include <stddef.h>
#include <zephyr/kernel.h>
#include <zephyr/types.h>
#include <zephyr/drivers/video-controls.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @struct video_format
* @brief Video format structure
*
* Used to configure frame format.
*/
struct video_format {
/** FourCC pixel format value (\ref video_pixel_formats) */
uint32_t pixelformat;
/** frame width in pixels. */
uint32_t width;
/** frame height in pixels. */
uint32_t height;
/**
* @brief line stride.
*
* This is the number of bytes that needs to be added to the address in the
* first pixel of a row in order to go to the address of the first pixel of
* the next row (>=width).
*/
uint32_t pitch;
};
/**
* @struct video_format_cap
* @brief Video format capability
*
* Used to describe a video endpoint format capability.
*/
struct video_format_cap {
/** FourCC pixel format value (\ref video_pixel_formats). */
uint32_t pixelformat;
/** minimum supported frame width in pixels. */
uint32_t width_min;
/** maximum supported frame width in pixels. */
uint32_t width_max;
/** minimum supported frame height in pixels. */
uint32_t height_min;
/** maximum supported frame height in pixels. */
uint32_t height_max;
/** width step size in pixels. */
uint16_t width_step;
/** height step size in pixels. */
uint16_t height_step;
};
/**
* @struct video_caps
* @brief Video format capabilities
*
* Used to describe video endpoint capabilities.
*/
struct video_caps {
/** list of video format capabilities (zero terminated). */
const struct video_format_cap *format_caps;
/** minimal count of video buffers to enqueue before being able to start
* the stream.
*/
uint8_t min_vbuf_count;
};
/**
* @struct video_buffer
* @brief Video buffer structure
*
* Represent a video frame.
*/
struct video_buffer {
/** pointer to driver specific data. */
void *driver_data;
/** pointer to the start of the buffer. */
uint8_t *buffer;
/** size of the buffer in bytes. */
uint32_t size;
/** number of bytes occupied by the valid data in the buffer. */
uint32_t bytesused;
/** time reference in milliseconds at which the last data byte was
* actually received for input endpoints or to be consumed for output
* endpoints.
*/
uint32_t timestamp;
};
/**
* @brief video_endpoint_id enum
*
* Identify the video device endpoint.
*/
enum video_endpoint_id {
VIDEO_EP_NONE,
VIDEO_EP_ANY,
VIDEO_EP_IN,
VIDEO_EP_OUT,
};
/**
* @brief video_event enum
*
* Identify video event.
*/
enum video_signal_result {
VIDEO_BUF_DONE,
VIDEO_BUF_ABORTED,
VIDEO_BUF_ERROR,
};
/**
* @typedef video_api_set_format_t
* @brief Set video format
*
* See video_set_format() for argument descriptions.
*/
typedef int (*video_api_set_format_t)(const struct device *dev,
enum video_endpoint_id ep,
struct video_format *fmt);
/**
* @typedef video_api_get_format_t
* @brief Get current video format
*
* See video_get_format() for argument descriptions.
*/
typedef int (*video_api_get_format_t)(const struct device *dev,
enum video_endpoint_id ep,
struct video_format *fmt);
/**
* @typedef video_api_enqueue_t
* @brief Enqueue a buffer in the drivers incoming queue.
*
* See video_enqueue() for argument descriptions.
*/
typedef int (*video_api_enqueue_t)(const struct device *dev,
enum video_endpoint_id ep,
struct video_buffer *buf);
/**
* @typedef video_api_dequeue_t
* @brief Dequeue a buffer from the drivers outgoing queue.
*
* See video_dequeue() for argument descriptions.
*/
typedef int (*video_api_dequeue_t)(const struct device *dev,
enum video_endpoint_id ep,
struct video_buffer **buf,
k_timeout_t timeout);
/**
* @typedef video_api_flush_t
* @brief Flush endpoint buffers, buffer are moved from incoming queue to
* outgoing queue.
*
* See video_flush() for argument descriptions.
*/
typedef int (*video_api_flush_t)(const struct device *dev,
enum video_endpoint_id ep,
bool cancel);
/**
* @typedef video_api_stream_start_t
* @brief Start the capture or output process.
*
* See video_stream_start() for argument descriptions.
*/
typedef int (*video_api_stream_start_t)(const struct device *dev);
/**
* @typedef video_api_stream_stop_t
* @brief Stop the capture or output process.
*
* See video_stream_stop() for argument descriptions.
*/
typedef int (*video_api_stream_stop_t)(const struct device *dev);
/**
* @typedef video_api_set_ctrl_t
* @brief Set a video control value.
*
* See video_set_ctrl() for argument descriptions.
*/
typedef int (*video_api_set_ctrl_t)(const struct device *dev,
unsigned int cid,
void *value);
/**
* @typedef video_api_get_ctrl_t
* @brief Get a video control value.
*
* See video_get_ctrl() for argument descriptions.
*/
typedef int (*video_api_get_ctrl_t)(const struct device *dev,
unsigned int cid,
void *value);
/**
* @typedef video_api_get_caps_t
* @brief Get capabilities of a video endpoint.
*
* See video_get_caps() for argument descriptions.
*/
typedef int (*video_api_get_caps_t)(const struct device *dev,
enum video_endpoint_id ep,
struct video_caps *caps);
/**
* @typedef video_api_set_signal_t
* @brief Register/Unregister poll signal for buffer events.
*
* See video_set_signal() for argument descriptions.
*/
typedef int (*video_api_set_signal_t)(const struct device *dev,
enum video_endpoint_id ep,
struct k_poll_signal *signal);
__subsystem struct video_driver_api {
/* mandatory callbacks */
video_api_set_format_t set_format;
video_api_get_format_t get_format;
video_api_stream_start_t stream_start;
video_api_stream_stop_t stream_stop;
video_api_get_caps_t get_caps;
/* optional callbacks */
video_api_enqueue_t enqueue;
video_api_dequeue_t dequeue;
video_api_flush_t flush;
video_api_set_ctrl_t set_ctrl;
video_api_set_ctrl_t get_ctrl;
video_api_set_signal_t set_signal;
};
/**
* @brief Set video format.
*
* Configure video device with a specific format.
*
* @param dev Pointer to the device structure for the driver instance.
* @param ep Endpoint ID.
* @param fmt Pointer to a video format struct.
*
* @retval 0 Is successful.
* @retval -EINVAL If parameters are invalid.
* @retval -ENOTSUP If format is not supported.
* @retval -EIO General input / output error.
*/
static inline int video_set_format(const struct device *dev,
enum video_endpoint_id ep,
struct video_format *fmt)
{
const struct video_driver_api *api =
(const struct video_driver_api *)dev->api;
if (api->set_format == NULL) {
return -ENOSYS;
}
return api->set_format(dev, ep, fmt);
}
/**
* @brief Get video format.
*
* Get video device current video format.
*
* @param dev Pointer to the device structure for the driver instance.
* @param ep Endpoint ID.
* @param fmt Pointer to video format struct.
*
* @retval pointer to video format
*/
static inline int video_get_format(const struct device *dev,
enum video_endpoint_id ep,
struct video_format *fmt)
{
const struct video_driver_api *api =
(const struct video_driver_api *)dev->api;
if (api->get_format == NULL) {
return -ENOSYS;
}
return api->get_format(dev, ep, fmt);
}
/**
* @brief Enqueue a video buffer.
*
* Enqueue an empty (capturing) or filled (output) video buffer in the drivers
* endpoint incoming queue.
*
* @param dev Pointer to the device structure for the driver instance.
* @param ep Endpoint ID.
* @param buf Pointer to the video buffer.
*
* @retval 0 Is successful.
* @retval -EINVAL If parameters are invalid.
* @retval -EIO General input / output error.
*/
static inline int video_enqueue(const struct device *dev,
enum video_endpoint_id ep,
struct video_buffer *buf)
{
const struct video_driver_api *api =
(const struct video_driver_api *)dev->api;
if (api->enqueue == NULL) {
return -ENOSYS;
}
return api->enqueue(dev, ep, buf);
}
/**
* @brief Dequeue a video buffer.
*
* Dequeue a filled (capturing) or displayed (output) buffer from the drivers
* endpoint outgoing queue.
*
* @param dev Pointer to the device structure for the driver instance.
* @param ep Endpoint ID.
* @param buf Pointer a video buffer pointer.
* @param timeout Timeout
*
* @retval 0 Is successful.
* @retval -EINVAL If parameters are invalid.
* @retval -EIO General input / output error.
*/
static inline int video_dequeue(const struct device *dev,
enum video_endpoint_id ep,
struct video_buffer **buf,
k_timeout_t timeout)
{
const struct video_driver_api *api =
(const struct video_driver_api *)dev->api;
if (api->dequeue == NULL) {
return -ENOSYS;
}
return api->dequeue(dev, ep, buf, timeout);
}
/**
* @brief Flush endpoint buffers.
*
* A call to flush finishes when all endpoint buffers have been moved from
* incoming queue to outgoing queue. Either because canceled or fully processed
* through the video function.
*
* @param dev Pointer to the device structure for the driver instance.
* @param ep Endpoint ID.
* @param cancel If true, cancel buffer processing instead of waiting for
* completion.
*
* @retval 0 Is successful, -ERRNO code otherwise.
*/
static inline int video_flush(const struct device *dev,
enum video_endpoint_id ep,
bool cancel)
{
const struct video_driver_api *api =
(const struct video_driver_api *)dev->api;
if (api->flush == NULL) {
return -ENOSYS;
}
return api->flush(dev, ep, cancel);
}
/**
* @brief Start the video device function.
*
* video_stream_start is called to enter streaming state (capture, output...).
* The driver may receive buffers with video_enqueue() before video_stream_start
* is called. If driver/device needs a minimum number of buffers before being
* able to start streaming, then driver set the min_vbuf_count to the related
* endpoint capabilities.
*
* @retval 0 Is successful.
* @retval -EIO General input / output error.
*/
static inline int video_stream_start(const struct device *dev)
{
const struct video_driver_api *api =
(const struct video_driver_api *)dev->api;
if (api->stream_start == NULL) {
return -ENOSYS;
}
return api->stream_start(dev);
}
/**
* @brief Stop the video device function.
*
* On video_stream_stop, driver must stop any transactions or wait until they
* finish.
*
* @retval 0 Is successful.
* @retval -EIO General input / output error.
*/
static inline int video_stream_stop(const struct device *dev)
{
const struct video_driver_api *api =
(const struct video_driver_api *)dev->api;
int ret;
if (api->stream_stop == NULL) {
return -ENOSYS;
}
ret = api->stream_stop(dev);
video_flush(dev, VIDEO_EP_ANY, true);
return ret;
}
/**
* @brief Get the capabilities of a video endpoint.
*
* @param dev Pointer to the device structure for the driver instance.
* @param ep Endpoint ID.
* @param caps Pointer to the video_caps struct to fill.
*
* @retval 0 Is successful, -ERRNO code otherwise.
*/
static inline int video_get_caps(const struct device *dev,
enum video_endpoint_id ep,
struct video_caps *caps)
{
const struct video_driver_api *api =
(const struct video_driver_api *)dev->api;
if (api->get_caps == NULL) {
return -ENOSYS;
}
return api->get_caps(dev, ep, caps);
}
/**
* @brief Set the value of a control.
*
* This set the value of a video control, value type depends on control ID, and
* must be interpreted accordingly.
*
* @param dev Pointer to the device structure for the driver instance.
* @param cid Control ID.
* @param value Pointer to the control value.
*
* @retval 0 Is successful.
* @retval -EINVAL If parameters are invalid.
* @retval -ENOTSUP If format is not supported.
* @retval -EIO General input / output error.
*/
static inline int video_set_ctrl(const struct device *dev, unsigned int cid,
void *value)
{
const struct video_driver_api *api =
(const struct video_driver_api *)dev->api;
if (api->set_ctrl == NULL) {
return -ENOSYS;
}
return api->set_ctrl(dev, cid, value);
}
/**
* @brief Get the current value of a control.
*
* This retrieve the value of a video control, value type depends on control ID,
* and must be interpreted accordingly.
*
* @param dev Pointer to the device structure for the driver instance.
* @param cid Control ID.
* @param value Pointer to the control value.
*
* @retval 0 Is successful.
* @retval -EINVAL If parameters are invalid.
* @retval -ENOTSUP If format is not supported.
* @retval -EIO General input / output error.
*/
static inline int video_get_ctrl(const struct device *dev, unsigned int cid,
void *value)
{
const struct video_driver_api *api =
(const struct video_driver_api *)dev->api;
if (api->get_ctrl == NULL) {
return -ENOSYS;
}
return api->get_ctrl(dev, cid, value);
}
/**
* @brief Register/Unregister k_poll signal for a video endpoint.
*
* Register a poll signal to the endpoint, which will be signaled on frame
* completion (done, aborted, error). Registering a NULL poll signal
* unregisters any previously registered signal.
*
* @param dev Pointer to the device structure for the driver instance.
* @param ep Endpoint ID.
* @param signal Pointer to k_poll_signal
*
* @retval 0 Is successful, -ERRNO code otherwise.
*/
static inline int video_set_signal(const struct device *dev,
enum video_endpoint_id ep,
struct k_poll_signal *signal)
{
const struct video_driver_api *api =
(const struct video_driver_api *)dev->api;
if (api->set_signal == NULL) {
return -ENOSYS;
}
return api->set_signal(dev, ep, signal);
}
/**
* @brief Allocate aligned video buffer.
*
* @param size Size of the video buffer (in bytes).
* @param align Alignment of the requested memory, must be a power of two.
*
* @retval pointer to allocated video buffer
*/
struct video_buffer *video_buffer_aligned_alloc(size_t size, size_t align);
/**
* @brief Allocate video buffer.
*
* @param size Size of the video buffer (in bytes).
*
* @retval pointer to allocated video buffer
*/
struct video_buffer *video_buffer_alloc(size_t size);
/**
* @brief Release a video buffer.
*
* @param buf Pointer to the video buffer to release.
*/
void video_buffer_release(struct video_buffer *buf);
/* fourcc - four-character-code */
#define video_fourcc(a, b, c, d)\
((uint32_t)(a) | ((uint32_t)(b) << 8) | ((uint32_t)(c) << 16) | ((uint32_t)(d) << 24))
/**
* @defgroup video_pixel_formats Video pixel formats
* @{
*/
/**
* @name Bayer formats
* @{
*/
/** BGGR8 pixel format */
#define VIDEO_PIX_FMT_BGGR8 video_fourcc('B', 'G', 'G', 'R') /* 8 BGBG.. GRGR.. */
/** GBRG8 pixel format */
#define VIDEO_PIX_FMT_GBRG8 video_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
/** GRBG8 pixel format */
#define VIDEO_PIX_FMT_GRBG8 video_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */
/** RGGB8 pixel format */
#define VIDEO_PIX_FMT_RGGB8 video_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */
/**
* @}
*/
/**
* @name RGB formats
* @{
*/
/** RGB565 pixel format */
#define VIDEO_PIX_FMT_RGB565 video_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */
/** XRGB32 pixel format */
#define VIDEO_PIX_FMT_XRGB32 video_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */
/**
* @}
*/
/**
* @name YUV formats
* @{
*/
/** YUYV pixel format */
#define VIDEO_PIX_FMT_YUYV video_fourcc('Y', 'U', 'Y', 'V') /* 16 Y0-Cb0 Y1-Cr0 */
/** XYUV32 pixel format */
#define VIDEO_PIX_FMT_XYUV32 video_fourcc('X', 'Y', 'U', 'V') /* 32 XYUV-8-8-8-8 */
/**
*
* @}
*/
/**
* @name JPEG formats
* @{
*/
/** JPEG pixel format */
#define VIDEO_PIX_FMT_JPEG video_fourcc('J', 'P', 'E', 'G') /* 8 JPEG */
/**
* @}
*/
/**
* @}
*/
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_VIDEO_H_ */
``` | /content/code_sandbox/include/zephyr/drivers/video.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,993 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public APIs for eSPI driver
*/
#ifndef ZEPHYR_INCLUDE_ESPI_SAF_H_
#define ZEPHYR_INCLUDE_ESPI_SAF_H_
#include <zephyr/sys/__assert.h>
#include <zephyr/types.h>
#include <zephyr/device.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief eSPI SAF Driver APIs
* @defgroup espi_interface ESPI Driver APIs
* @ingroup io_interfaces
* @{
*/
/**
* @code
*+your_sha256_hash------+
*| |
*| eSPI host +-------------+ |
*| +-----------+ | Power | +----------+ |
*| |Out of band| | management | | GPIO | |
*| ------------ |processor | | controller | | sources | |
*| +-----------+ +-------------+ +----------+ |
*| | | | |
*| ------------ | | | |
*| +--------+ +---------------+ |
*| | | |
*| -----+ +--------+ +----------+ +----v-----+ |
*| | | LPC | | Tunneled | | Tunneled | |
*| | | bridge | | SMBus | | GPIO | |
*| | +--------+ +----------+ +----------+ |
*| | | | | |
*| | ------+ | | |
*| | | | | |
*| +------v-----+ +---v-------v-------------v----+ |
*| | eSPI Flash | | eSPI protocol block | |
*| | access +--->+ | |
*| +------------+ +------------------------------+ |
*| | |
*| ----------- | |
*| v |
*| XXXXXXXXXXXXXXXXXXXXXXX |
*| XXXXXXXXXXXXXXXXXXXXX |
*| XXXXXXXXXXXXXXXXXXX |
*+your_sha256_hash------+
* |
* +-----------------+
* --------- | | | | | |
* | | | | | |
* --------- | + + + + | eSPI bus
* | CH0 CH1 CH2 CH3 | (logical channels)
* | + + + + |
* | | | | | |
* +-----------------+
* |
*+your_sha256_hash-------+
*| eSPI slave |
*| |
*| CH0 | CH1 | CH2 | CH3 |
*| eSPI endpoint | VWIRE | OOB | Flash |
*+your_sha256_hash-------+
* | |
* v |
* +---------+ |
* | Flash | Slave Attached Flash |
* +---------+ |
* |
* @endcode
*/
/**
* @cond INTERNAL_HIDDEN
*
*/
/** @endcond */
struct espi_saf_hw_cfg;
struct espi_saf_flash_cfg;
struct espi_saf_pr;
/**
* @brief eSPI SAF configuration parameters
*/
struct espi_saf_cfg {
uint8_t nflash_devices;
struct espi_saf_hw_cfg hwcfg;
struct espi_saf_flash_cfg *flash_cfgs;
};
/**
* @brief eSPI SAF transaction packet format
*/
struct espi_saf_packet {
uint32_t flash_addr;
uint8_t *buf;
uint32_t len;
};
/*
*defined in espi.h
* struct espi_callback
* typedef void (*espi_callback_handler_t)()
*/
/**
* @cond INTERNAL_HIDDEN
*
* eSPI driver API definition and system call entry points
*
* (Internal use only.)
*/
typedef int (*espi_saf_api_config)(const struct device *dev,
const struct espi_saf_cfg *cfg);
typedef int (*espi_saf_api_set_protection_regions)(
const struct device *dev,
const struct espi_saf_protection *pr);
typedef int (*espi_saf_api_activate)(const struct device *dev);
typedef bool (*espi_saf_api_get_channel_status)(const struct device *dev);
typedef int (*espi_saf_api_flash_read)(const struct device *dev,
struct espi_saf_packet *pckt);
typedef int (*espi_saf_api_flash_write)(const struct device *dev,
struct espi_saf_packet *pckt);
typedef int (*espi_saf_api_flash_erase)(const struct device *dev,
struct espi_saf_packet *pckt);
typedef int (*espi_saf_api_flash_unsuccess)(const struct device *dev,
struct espi_saf_packet *pckt);
/* Callbacks and traffic intercept */
typedef int (*espi_saf_api_manage_callback)(const struct device *dev,
struct espi_callback *callback,
bool set);
__subsystem struct espi_saf_driver_api {
espi_saf_api_config config;
espi_saf_api_set_protection_regions set_protection_regions;
espi_saf_api_activate activate;
espi_saf_api_get_channel_status get_channel_status;
espi_saf_api_flash_read flash_read;
espi_saf_api_flash_write flash_write;
espi_saf_api_flash_erase flash_erase;
espi_saf_api_flash_unsuccess flash_unsuccess;
espi_saf_api_manage_callback manage_callback;
};
/**
* @endcond
*/
/**
* @brief Configure operation of a eSPI controller.
*
* This routine provides a generic interface to override eSPI controller
* capabilities.
*
* If this eSPI controller is acting as slave, the values set here
* will be discovered as part through the GET_CONFIGURATION command
* issued by the eSPI master during initialization.
*
* If this eSPI controller is acting as master, the values set here
* will be used by eSPI master to determine minimum common capabilities with
* eSPI slave then send via SET_CONFIGURATION command.
*
* @code
* +--------+ +---------+ +------+ +---------+ +---------+
* | eSPI | | eSPI | | eSPI | | eSPI | | eSPI |
* | slave | | driver | | bus | | driver | | host |
* +--------+ +---------+ +------+ +---------+ +---------+
* | | | | |
* | espi_config | Set eSPI | Set eSPI | espi_config |
* +--------------+ ctrl regs | cap ctrl reg| +-----------+
* | +-------+ | +--------+ |
* | |<------+ | +------->| |
* | | | | |
* | | | | |
* | | | GET_CONFIGURATION | |
* | | +<------------------+ |
* | |<-----------| | |
* | | eSPI caps | | |
* | |----------->+ response | |
* | | |------------------>+ |
* | | | | |
* | | | SET_CONFIGURATION | |
* | | +<------------------+ |
* | | | accept | |
* | | +------------------>+ |
* + + + + +
* @endcode
*
* @param dev Pointer to the device structure for the driver instance.
* @param cfg the device runtime configuration for the eSPI controller.
*
* @retval 0 If successful.
* @retval -EIO General input / output error, failed to configure device.
* @retval -EINVAL invalid capabilities, failed to configure device.
* @retval -ENOTSUP capability not supported by eSPI slave.
*/
__syscall int espi_saf_config(const struct device *dev,
const struct espi_saf_cfg *cfg);
static inline int z_impl_espi_saf_config(const struct device *dev,
const struct espi_saf_cfg *cfg)
{
const struct espi_saf_driver_api *api =
(const struct espi_saf_driver_api *)dev->api;
return api->config(dev, cfg);
}
/**
* @brief Set one or more SAF protection regions
*
* This routine provides an interface to override the default flash
* protection regions of the SAF controller.
*
* @param dev Pointer to the device structure for the driver instance.
* @param pr Pointer to the SAF protection region structure.
*
* @retval 0 If successful.
* @retval -EIO General input / output error, failed to configure device.
* @retval -EINVAL invalid capabilities, failed to configure device.
* @retval -ENOTSUP capability not supported by eSPI slave.
*/
__syscall int espi_saf_set_protection_regions(
const struct device *dev,
const struct espi_saf_protection *pr);
static inline int z_impl_espi_saf_set_protection_regions(
const struct device *dev,
const struct espi_saf_protection *pr)
{
const struct espi_saf_driver_api *api =
(const struct espi_saf_driver_api *)dev->api;
return api->set_protection_regions(dev, pr);
}
/**
* @brief Activate SAF block
*
* This routine activates the SAF block and should only be
* called after SAF has been configured and the eSPI Master
* has enabled the Flash Channel.
*
* @param dev Pointer to the device structure for the driver instance.
*
* @retval 0 If successful
* @retval -EINVAL if failed to activate SAF.
*/
__syscall int espi_saf_activate(const struct device *dev);
static inline int z_impl_espi_saf_activate(const struct device *dev)
{
const struct espi_saf_driver_api *api =
(const struct espi_saf_driver_api *)dev->api;
return api->activate(dev);
}
/**
* @brief Query to see if SAF is ready
*
* This routine allows to check if SAF is ready before use.
*
* @param dev Pointer to the device structure for the driver instance.
*
* @retval true If eSPI SAF is ready.
* @retval false otherwise.
*/
__syscall bool espi_saf_get_channel_status(const struct device *dev);
static inline bool z_impl_espi_saf_get_channel_status(
const struct device *dev)
{
const struct espi_saf_driver_api *api =
(const struct espi_saf_driver_api *)dev->api;
return api->get_channel_status(dev);
}
/**
* @brief Sends a read request packet for slave attached flash.
*
* This routines provides an interface to send a request to read the flash
* component shared between the eSPI master and eSPI slaves.
*
* @param dev Pointer to the device structure for the driver instance.
* @param pckt Address of the representation of read flash transaction.
*
* @retval -ENOTSUP eSPI flash logical channel transactions not supported.
* @retval -EBUSY eSPI flash channel is not ready or disabled by master.
* @retval -EIO General input / output error, failed request to master.
*/
__syscall int espi_saf_flash_read(const struct device *dev,
struct espi_saf_packet *pckt);
static inline int z_impl_espi_saf_flash_read(const struct device *dev,
struct espi_saf_packet *pckt)
{
const struct espi_saf_driver_api *api =
(const struct espi_saf_driver_api *)dev->api;
if (!api->flash_read) {
return -ENOTSUP;
}
return api->flash_read(dev, pckt);
}
/**
* @brief Sends a write request packet for slave attached flash.
*
* This routines provides an interface to send a request to write to the flash
* components shared between the eSPI master and eSPI slaves.
*
* @param dev Pointer to the device structure for the driver instance.
* @param pckt Address of the representation of write flash transaction.
*
* @retval -ENOTSUP eSPI flash logical channel transactions not supported.
* @retval -EBUSY eSPI flash channel is not ready or disabled by master.
* @retval -EIO General input / output error, failed request to master.
*/
__syscall int espi_saf_flash_write(const struct device *dev,
struct espi_saf_packet *pckt);
static inline int z_impl_espi_saf_flash_write(const struct device *dev,
struct espi_saf_packet *pckt)
{
const struct espi_saf_driver_api *api =
(const struct espi_saf_driver_api *)dev->api;
if (!api->flash_write) {
return -ENOTSUP;
}
return api->flash_write(dev, pckt);
}
/**
* @brief Sends a write request packet for slave attached flash.
*
* This routines provides an interface to send a request to write to the flash
* components shared between the eSPI master and eSPI slaves.
*
* @param dev Pointer to the device structure for the driver instance.
* @param pckt Address of the representation of erase flash transaction.
*
* @retval -ENOTSUP eSPI flash logical channel transactions not supported.
* @retval -EBUSY eSPI flash channel is not ready or disabled by master.
* @retval -EIO General input / output error, failed request to master.
*/
__syscall int espi_saf_flash_erase(const struct device *dev,
struct espi_saf_packet *pckt);
static inline int z_impl_espi_saf_flash_erase(const struct device *dev,
struct espi_saf_packet *pckt)
{
const struct espi_saf_driver_api *api =
(const struct espi_saf_driver_api *)dev->api;
if (!api->flash_erase) {
return -ENOTSUP;
}
return api->flash_erase(dev, pckt);
}
/**
* @brief Response unsuccessful completion for slave attached flash.
*
* This routines provides an interface to response that transaction is
* invalid and return unsuccessful completion from target to controller.
*
* @param dev Pointer to the device structure for the driver instance.
* @param pckt Address of the representation of flash transaction.
*
* @retval -ENOTSUP eSPI flash logical channel transactions not supported.
* @retval -EBUSY eSPI flash channel is not ready or disabled by master.
* @retval -EIO General input / output error, failed request to master.
*/
__syscall int espi_saf_flash_unsuccess(const struct device *dev,
struct espi_saf_packet *pckt);
static inline int z_impl_espi_saf_flash_unsuccess(const struct device *dev,
struct espi_saf_packet *pckt)
{
const struct espi_saf_driver_api *api =
(const struct espi_saf_driver_api *)dev->api;
if (!api->flash_unsuccess) {
return -ENOTSUP;
}
return api->flash_unsuccess(dev, pckt);
}
/**
* Callback model
*
* @code
*+-------+ +-------------+ +------+ +---------+
*| App | | eSPI driver | | HW | |eSPI Host|
*+---+---+ +-------+-----+ +---+--+ +----+----+
* | | | |
* | espi_init_callback | | |
* +----------------------------> | | |
* | espi_add_callback | |
* +----------------------------->+ |
* | | | eSPI reset | eSPI host
* | | IRQ +<------------+ resets the
* | | <-----------+ | bus
* | | | |
* | | Processed | |
* | | within the | |
* | | driver | |
* | | | |
* | | | VW CH ready| eSPI host
* | | IRQ +<------------+ enables VW
* | | <-----------+ | channel
* | | | |
* | | Processed | |
* | | within the | |
* | | driver | |
* | | | |
* | | | Memory I/O | Peripheral
* | | <-------------+ event
* | +<------------+ |
* +<-----------------------------+ callback | |
* | Report peripheral event | | |
* | and data for the event | | |
* | | | |
* | | | SLP_S5 | eSPI host
* | | <-------------+ send VWire
* | +<------------+ |
* +<-----------------------------+ callback | |
* | App enables/configures | | |
* | discrete regulator | | |
* | | | |
* | espi_send_vwire_signal | | |
* +------------------------------>------------>|------------>|
* | | | |
* | | | HOST_RST | eSPI host
* | | <-------------+ send VWire
* | +<------------+ |
* +<-----------------------------+ callback | |
* | App reset host-related | | |
* | data structures | | |
* | | | |
* | | | C10 | eSPI host
* | | +<------------+ send VWire
* | <-------------+ |
* <------------------------------+ | |
* | App executes | | |
* + power mgmt policy | | |
* @endcode
*/
/**
* @brief Helper to initialize a struct espi_callback properly.
*
* @param callback A valid Application's callback structure pointer.
* @param handler A valid handler function pointer.
* @param evt_type indicates the eSPI event relevant for the handler.
* for VWIRE_RECEIVED event the data will indicate the new level asserted
*/
static inline void espi_saf_init_callback(struct espi_callback *callback,
espi_callback_handler_t handler,
enum espi_bus_event evt_type)
{
__ASSERT(callback, "Callback pointer should not be NULL");
__ASSERT(handler, "Callback handler pointer should not be NULL");
callback->handler = handler;
callback->evt_type = evt_type;
}
/**
* @brief Add an application callback.
* @param dev Pointer to the device structure for the driver instance.
* @param callback A valid Application's callback structure pointer.
* @return 0 if successful, negative errno code on failure.
*
* @note Callbacks may be added to the device from within a callback
* handler invocation, but whether they are invoked for the current
* eSPI event is not specified.
*
* Note: enables to add as many callback as needed on the same device.
*/
static inline int espi_saf_add_callback(const struct device *dev,
struct espi_callback *callback)
{
const struct espi_saf_driver_api *api =
(const struct espi_saf_driver_api *)dev->api;
if (!api->manage_callback) {
return -ENOTSUP;
}
return api->manage_callback(dev, callback, true);
}
/**
* @brief Remove an application callback.
* @param dev Pointer to the device structure for the driver instance.
* @param callback A valid application's callback structure pointer.
* @return 0 if successful, negative errno code on failure.
*
* @warning It is explicitly permitted, within a callback handler, to
* remove the registration for the callback that is running, i.e. @p
* callback. Attempts to remove other registrations on the same
* device may result in undefined behavior, including failure to
* invoke callbacks that remain registered and unintended invocation
* of removed callbacks.
*
* Note: enables to remove as many callbacks as added through
* espi_add_callback().
*/
static inline int espi_saf_remove_callback(const struct device *dev,
struct espi_callback *callback)
{
const struct espi_saf_driver_api *api =
(const struct espi_saf_driver_api *)dev->api;
if (!api->manage_callback) {
return -ENOTSUP;
}
return api->manage_callback(dev, callback, false);
}
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#include <zephyr/syscalls/espi_saf.h>
#endif /* ZEPHYR_INCLUDE_ESPI_SAF_H_ */
``` | /content/code_sandbox/include/zephyr/drivers/espi_saf.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,601 |
```objective-c
/* clock_control.h - public clock controller driver API */
/*
*
*/
/**
* @file
* @brief Public Clock Control APIs
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_CLOCK_CONTROL_H_
#define ZEPHYR_INCLUDE_DRIVERS_CLOCK_CONTROL_H_
/**
* @brief Clock Control Interface
* @defgroup clock_control_interface Clock Control Interface
* @since 1.0
* @version 1.0.0
* @ingroup io_interfaces
* @{
*/
#include <errno.h>
#include <stddef.h>
#include <zephyr/types.h>
#include <zephyr/device.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/slist.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Clock control API */
/* Used to select all subsystem of a clock controller */
#define CLOCK_CONTROL_SUBSYS_ALL NULL
/**
* @brief Current clock status.
*/
enum clock_control_status {
CLOCK_CONTROL_STATUS_STARTING,
CLOCK_CONTROL_STATUS_OFF,
CLOCK_CONTROL_STATUS_ON,
CLOCK_CONTROL_STATUS_UNKNOWN
};
/**
* clock_control_subsys_t is a type to identify a clock controller sub-system.
* Such data pointed is opaque and relevant only to the clock controller
* driver instance being used.
*/
typedef void *clock_control_subsys_t;
/**
* clock_control_subsys_rate_t is a type to identify a clock
* controller sub-system rate. Such data pointed is opaque and
* relevant only to set the clock controller rate of the driver
* instance being used.
*/
typedef void *clock_control_subsys_rate_t;
/** @brief Callback called on clock started.
*
* @param dev Device structure whose driver controls the clock.
* @param subsys Opaque data representing the clock.
* @param user_data User data.
*/
typedef void (*clock_control_cb_t)(const struct device *dev,
clock_control_subsys_t subsys,
void *user_data);
typedef int (*clock_control)(const struct device *dev,
clock_control_subsys_t sys);
typedef int (*clock_control_get)(const struct device *dev,
clock_control_subsys_t sys,
uint32_t *rate);
typedef int (*clock_control_async_on_fn)(const struct device *dev,
clock_control_subsys_t sys,
clock_control_cb_t cb,
void *user_data);
typedef enum clock_control_status (*clock_control_get_status_fn)(
const struct device *dev,
clock_control_subsys_t sys);
typedef int (*clock_control_set)(const struct device *dev,
clock_control_subsys_t sys,
clock_control_subsys_rate_t rate);
typedef int (*clock_control_configure_fn)(const struct device *dev,
clock_control_subsys_t sys,
void *data);
__subsystem struct clock_control_driver_api {
clock_control on;
clock_control off;
clock_control_async_on_fn async_on;
clock_control_get get_rate;
clock_control_get_status_fn get_status;
clock_control_set set_rate;
clock_control_configure_fn configure;
};
/**
* @brief Enable a clock controlled by the device
*
* On success, the clock is enabled and ready when this function
* returns. This function may sleep, and thus can only be called from
* thread context.
*
* Use @ref clock_control_async_on() for non-blocking operation.
*
* @param dev Device structure whose driver controls the clock.
* @param sys Opaque data representing the clock.
* @return 0 on success, negative errno on failure.
*/
static inline int clock_control_on(const struct device *dev,
clock_control_subsys_t sys)
{
const struct clock_control_driver_api *api =
(const struct clock_control_driver_api *)dev->api;
return api->on(dev, sys);
}
/**
* @brief Disable a clock controlled by the device
*
* This function is non-blocking and can be called from any context.
* On success, the clock is disabled when this function returns.
*
* @param dev Device structure whose driver controls the clock
* @param sys Opaque data representing the clock
* @return 0 on success, negative errno on failure.
*/
static inline int clock_control_off(const struct device *dev,
clock_control_subsys_t sys)
{
const struct clock_control_driver_api *api =
(const struct clock_control_driver_api *)dev->api;
return api->off(dev, sys);
}
/**
* @brief Request clock to start with notification when clock has been started.
*
* Function is non-blocking and can be called from any context. User callback is
* called when clock is started.
*
* @param dev Device.
* @param sys A pointer to an opaque data representing the sub-system.
* @param cb Callback.
* @param user_data User context passed to the callback.
*
* @retval 0 if start is successfully initiated.
* @retval -EALREADY if clock was already started and is starting or running.
* @retval -ENOTSUP If the requested mode of operation is not supported.
* @retval -ENOSYS if the interface is not implemented.
* @retval other negative errno on vendor specific error.
*/
static inline int clock_control_async_on(const struct device *dev,
clock_control_subsys_t sys,
clock_control_cb_t cb,
void *user_data)
{
const struct clock_control_driver_api *api =
(const struct clock_control_driver_api *)dev->api;
if (api->async_on == NULL) {
return -ENOSYS;
}
return api->async_on(dev, sys, cb, user_data);
}
/**
* @brief Get clock status.
*
* @param dev Device.
* @param sys A pointer to an opaque data representing the sub-system.
*
* @return Status.
*/
static inline enum clock_control_status clock_control_get_status(const struct device *dev,
clock_control_subsys_t sys)
{
const struct clock_control_driver_api *api =
(const struct clock_control_driver_api *)dev->api;
if (!api->get_status) {
return CLOCK_CONTROL_STATUS_UNKNOWN;
}
return api->get_status(dev, sys);
}
/**
* @brief Obtain the clock rate of given sub-system
* @param dev Pointer to the device structure for the clock controller driver
* instance
* @param sys A pointer to an opaque data representing the sub-system
* @param[out] rate Subsystem clock rate
* @retval 0 on successful rate reading.
* @retval -EAGAIN if rate cannot be read. Some drivers do not support returning the rate when the
* clock is off.
* @retval -ENOTSUP if reading the clock rate is not supported for the given sub-system.
* @retval -ENOSYS if the interface is not implemented.
*/
static inline int clock_control_get_rate(const struct device *dev,
clock_control_subsys_t sys,
uint32_t *rate)
{
const struct clock_control_driver_api *api =
(const struct clock_control_driver_api *)dev->api;
if (api->get_rate == NULL) {
return -ENOSYS;
}
return api->get_rate(dev, sys, rate);
}
/**
* @brief Set the rate of the clock controlled by the device.
*
* On success, the new clock rate is set and ready when this function
* returns. This function may sleep, and thus can only be called from
* thread context.
*
* @param dev Device structure whose driver controls the clock.
* @param sys Opaque data representing the clock.
* @param rate Opaque data representing the clock rate to be used.
*
* @retval -EALREADY if clock was already in the given rate.
* @retval -ENOTSUP If the requested mode of operation is not supported.
* @retval -ENOSYS if the interface is not implemented.
* @retval other negative errno on vendor specific error.
*/
static inline int clock_control_set_rate(const struct device *dev,
clock_control_subsys_t sys,
clock_control_subsys_rate_t rate)
{
const struct clock_control_driver_api *api =
(const struct clock_control_driver_api *)dev->api;
if (api->set_rate == NULL) {
return -ENOSYS;
}
return api->set_rate(dev, sys, rate);
}
/**
* @brief Configure a source clock
*
* This function is non-blocking and can be called from any context.
* On success, the selected clock is configured as per caller's request.
*
* It is caller's responsibility to ensure that subsequent calls to the API
* provide the right information to allows clock_control driver to perform
* the right action (such as using the right clock source on clock_control_get_rate
* call).
*
* @p data is implementation specific and could be used to convey
* supplementary information required for expected clock configuration.
*
* @param dev Device structure whose driver controls the clock
* @param sys Opaque data representing the clock
* @param data Opaque data providing additional input for clock configuration
*
* @retval 0 On success
* @retval -ENOSYS If the device driver does not implement this call
* @retval -errno Other negative errno on failure.
*/
static inline int clock_control_configure(const struct device *dev,
clock_control_subsys_t sys,
void *data)
{
const struct clock_control_driver_api *api =
(const struct clock_control_driver_api *)dev->api;
if (api->configure == NULL) {
return -ENOSYS;
}
return api->configure(dev, sys, data);
}
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_DRIVERS_CLOCK_CONTROL_H_ */
``` | /content/code_sandbox/include/zephyr/drivers/clock_control.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,009 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public APIs for MSPI driver
* @since 3.7
* @version 0.1.0
*/
#ifndef ZEPHYR_INCLUDE_MSPI_H_
#define ZEPHYR_INCLUDE_MSPI_H_
#include <errno.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/types.h>
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/drivers/gpio.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief MSPI Driver APIs
* @defgroup mspi_interface MSPI Driver APIs
* @ingroup io_interfaces
* @{
*/
/**
* @brief MSPI operational mode
*/
enum mspi_op_mode {
MSPI_OP_MODE_CONTROLLER = 0,
MSPI_OP_MODE_PERIPHERAL = 1,
};
/**
* @brief MSPI duplex mode
*/
enum mspi_duplex {
MSPI_HALF_DUPLEX = 0,
MSPI_FULL_DUPLEX = 1,
};
/**
* @brief MSPI I/O mode capabilities
* Postfix like 1_4_4 stands for the number of lines used for
* command, address and data phases.
* Mode with no postfix has the same number of lines for all phases.
*/
enum mspi_io_mode {
MSPI_IO_MODE_SINGLE = 0,
MSPI_IO_MODE_DUAL = 1,
MSPI_IO_MODE_DUAL_1_1_2 = 2,
MSPI_IO_MODE_DUAL_1_2_2 = 3,
MSPI_IO_MODE_QUAD = 4,
MSPI_IO_MODE_QUAD_1_1_4 = 5,
MSPI_IO_MODE_QUAD_1_4_4 = 6,
MSPI_IO_MODE_OCTAL = 7,
MSPI_IO_MODE_OCTAL_1_1_8 = 8,
MSPI_IO_MODE_OCTAL_1_8_8 = 9,
MSPI_IO_MODE_HEX = 10,
MSPI_IO_MODE_HEX_8_8_16 = 11,
MSPI_IO_MODE_HEX_8_16_16 = 12,
MSPI_IO_MODE_MAX,
};
/**
* @brief MSPI data rate capabilities
* SINGLE stands for single data rate for all phases.
* DUAL stands for dual data rate for all phases.
* S_S_D stands for single data rate for command and address phases but
* dual data rate for data phase.
* S_D_D stands for single data rate for command phase but dual data rate
* for address and data phases.
*/
enum mspi_data_rate {
MSPI_DATA_RATE_SINGLE = 0,
MSPI_DATA_RATE_S_S_D = 1,
MSPI_DATA_RATE_S_D_D = 2,
MSPI_DATA_RATE_DUAL = 3,
MSPI_DATA_RATE_MAX,
};
/**
* @brief MSPI Polarity & Phase Modes
*/
enum mspi_cpp_mode {
MSPI_CPP_MODE_0 = 0,
MSPI_CPP_MODE_1 = 1,
MSPI_CPP_MODE_2 = 2,
MSPI_CPP_MODE_3 = 3,
};
/**
* @brief MSPI Endian
*/
enum mspi_endian {
MSPI_XFER_LITTLE_ENDIAN = 0,
MSPI_XFER_BIG_ENDIAN = 1,
};
/**
* @brief MSPI chip enable polarity
*/
enum mspi_ce_polarity {
MSPI_CE_ACTIVE_LOW = 0,
MSPI_CE_ACTIVE_HIGH = 1,
};
/**
* @brief MSPI bus event.
* This is a preliminary list of events. I encourage the community
* to fill it up.
*/
enum mspi_bus_event {
MSPI_BUS_RESET = 0,
MSPI_BUS_ERROR = 1,
MSPI_BUS_XFER_COMPLETE = 2,
MSPI_BUS_EVENT_MAX,
};
/**
* @brief MSPI bus event callback mask
* This is a preliminary list same as mspi_bus_event. I encourage the
* community to fill it up.
*/
enum mspi_bus_event_cb_mask {
MSPI_BUS_NO_CB = 0,
MSPI_BUS_RESET_CB = BIT(0),
MSPI_BUS_ERROR_CB = BIT(1),
MSPI_BUS_XFER_COMPLETE_CB = BIT(2),
};
/**
* @brief MSPI transfer modes
*/
enum mspi_xfer_mode {
MSPI_PIO,
MSPI_DMA,
};
/**
* @brief MSPI transfer directions
*/
enum mspi_xfer_direction {
MSPI_RX,
MSPI_TX,
};
/**
* @brief MSPI controller device specific configuration mask
*/
enum mspi_dev_cfg_mask {
MSPI_DEVICE_CONFIG_NONE = 0,
MSPI_DEVICE_CONFIG_CE_NUM = BIT(0),
MSPI_DEVICE_CONFIG_FREQUENCY = BIT(1),
MSPI_DEVICE_CONFIG_IO_MODE = BIT(2),
MSPI_DEVICE_CONFIG_DATA_RATE = BIT(3),
MSPI_DEVICE_CONFIG_CPP = BIT(4),
MSPI_DEVICE_CONFIG_ENDIAN = BIT(5),
MSPI_DEVICE_CONFIG_CE_POL = BIT(6),
MSPI_DEVICE_CONFIG_DQS = BIT(7),
MSPI_DEVICE_CONFIG_RX_DUMMY = BIT(8),
MSPI_DEVICE_CONFIG_TX_DUMMY = BIT(9),
MSPI_DEVICE_CONFIG_READ_CMD = BIT(10),
MSPI_DEVICE_CONFIG_WRITE_CMD = BIT(11),
MSPI_DEVICE_CONFIG_CMD_LEN = BIT(12),
MSPI_DEVICE_CONFIG_ADDR_LEN = BIT(13),
MSPI_DEVICE_CONFIG_MEM_BOUND = BIT(14),
MSPI_DEVICE_CONFIG_BREAK_TIME = BIT(15),
MSPI_DEVICE_CONFIG_ALL = BIT_MASK(16),
};
/**
* @brief MSPI XIP access permissions
*/
enum mspi_xip_permit {
MSPI_XIP_READ_WRITE = 0,
MSPI_XIP_READ_ONLY = 1,
};
/**
* @brief MSPI Configure API
* @defgroup mspi_configure_api MSPI Configure API
* @{
*/
/**
* @brief Stub for timing parameter
*/
enum mspi_timing_param {
MSPI_TIMING_PARAM_DUMMY
};
/**
* @brief Stub for struct timing_cfg
*/
struct mspi_timing_cfg {
};
/**
* @brief MSPI device ID
* The controller can identify its devices and determine whether the access is
* allowed in a multiple device scheme.
*/
struct mspi_dev_id {
/** @brief device gpio ce */
struct gpio_dt_spec ce;
/** @brief device index on DT */
uint16_t dev_idx;
};
/**
* @brief MSPI controller configuration
*/
struct mspi_cfg {
/** @brief mspi channel number */
uint8_t channel_num;
/** @brief Configure operation mode */
enum mspi_op_mode op_mode;
/** @brief Configure duplex mode */
enum mspi_duplex duplex;
/** @brief DQS support flag */
bool dqs_support;
/** @brief Software managed multi peripheral enable */
bool sw_multi_periph;
/** @brief GPIO chip select lines (optional) */
struct gpio_dt_spec *ce_group;
/** @brief GPIO chip-select line numbers (optional) */
uint32_t num_ce_gpios;
/** @brief Peripheral number from 0 to host controller peripheral limit. */
uint32_t num_periph;
/** @brief Maximum supported frequency in MHz */
uint32_t max_freq;
/** @brief Whether to re-initialize controller */
bool re_init;
};
/**
* @brief MSPI DT information
*/
struct mspi_dt_spec {
/** @brief MSPI bus */
const struct device *bus;
/** @brief MSPI hardware specific configuration */
struct mspi_cfg config;
};
/**
* @brief MSPI controller device specific configuration
*/
struct mspi_dev_cfg {
/** @brief Configure CE0 or CE1 or more */
uint8_t ce_num;
/** @brief Configure frequency */
uint32_t freq;
/** @brief Configure I/O mode */
enum mspi_io_mode io_mode;
/** @brief Configure data rate */
enum mspi_data_rate data_rate;
/** @brief Configure clock polarity and phase */
enum mspi_cpp_mode cpp;
/** @brief Configure transfer endian */
enum mspi_endian endian;
/** @brief Configure chip enable polarity */
enum mspi_ce_polarity ce_polarity;
/** @brief Configure DQS mode */
bool dqs_enable;
/** @brief Configure number of clock cycles between
* addr and data in RX direction
*/
uint16_t rx_dummy;
/** @brief Configure number of clock cycles between
* addr and data in TX direction
*/
uint16_t tx_dummy;
/** @brief Configure read command */
uint32_t read_cmd;
/** @brief Configure write command */
uint32_t write_cmd;
/** @brief Configure command length */
uint8_t cmd_length;
/** @brief Configure address length */
uint8_t addr_length;
/** @brief Configure memory boundary */
uint32_t mem_boundary;
/** @brief Configure the time to break up a transfer into 2 */
uint32_t time_to_break;
};
/**
* @brief MSPI controller XIP configuration
*/
struct mspi_xip_cfg {
/** @brief XIP enable */
bool enable;
/** @brief XIP region start address =
* hardware default + address offset
*/
uint32_t address_offset;
/** @brief XIP region size */
uint32_t size;
/** @brief XIP access permission */
enum mspi_xip_permit permission;
};
/**
* @brief MSPI controller scramble configuration
*/
struct mspi_scramble_cfg {
/** @brief scramble enable */
bool enable;
/** @brief scramble region start address =
* hardware default + address offset
*/
uint32_t address_offset;
/** @brief scramble region size */
uint32_t size;
};
/** @} */
/**
* @brief MSPI Transfer API
* @defgroup mspi_transfer_api MSPI Transfer API
* @{
*/
/**
* @brief MSPI Chip Select control structure
*
* This can be used to control a CE line via a GPIO line, instead of
* using the controller inner CE logic.
*
*/
struct mspi_ce_control {
/**
* @brief GPIO devicetree specification of CE GPIO.
* The device pointer can be set to NULL to fully inhibit CE control if
* necessary. The GPIO flags GPIO_ACTIVE_LOW/GPIO_ACTIVE_HIGH should be
* the same as in MSPI configuration.
*/
struct gpio_dt_spec gpio;
/**
* @brief Delay to wait.
* In microseconds before starting the
* transmission and before releasing the CE line.
*/
uint32_t delay;
};
/**
* @brief MSPI peripheral xfer packet format
*/
struct mspi_xfer_packet {
/** @brief Direction (Transmit/Receive) */
enum mspi_xfer_direction dir;
/** @brief Bus event callback masks */
enum mspi_bus_event_cb_mask cb_mask;
/** @brief Transfer command */
uint32_t cmd;
/** @brief Transfer Address */
uint32_t address;
/** @brief Number of bytes to transfer */
uint32_t num_bytes;
/** @brief Data Buffer */
uint8_t *data_buf;
};
/**
* @brief MSPI peripheral xfer format
* This includes transfer related settings that may
* require configuring the hardware.
*/
struct mspi_xfer {
/** @brief Async or sync transfer */
bool async;
/** @brief Transfer Mode */
enum mspi_xfer_mode xfer_mode;
/** @brief Configure TX dummy cycles */
uint16_t tx_dummy;
/** @brief Configure RX dummy cycles */
uint16_t rx_dummy;
/** @brief Configure command length */
uint8_t cmd_length;
/** @brief Configure address length */
uint8_t addr_length;
/** @brief Hold CE active after xfer */
bool hold_ce;
/** @brief Software CE control */
struct mspi_ce_control ce_sw_ctrl;
/** @brief Priority 0 = Low (best effort)
* 1 = High (service immediately)
*/
uint8_t priority;
/** @brief Transfer packets */
const struct mspi_xfer_packet *packets;
/** @brief Number of transfer packets */
uint32_t num_packet;
/** @brief Transfer timeout value */
uint32_t timeout;
};
/** @} */
/**
* @brief MSPI callback API
* @defgroup mspi_callback_api MSPI callback API
* @{
*/
/**
* @brief MSPI event data
*/
struct mspi_event_data {
/** @brief Pointer to the bus controller */
const struct device *controller;
/** @brief Pointer to the peripheral device ID */
const struct mspi_dev_id *dev_id;
/** @brief Pointer to a transfer packet */
const struct mspi_xfer_packet *packet;
/** @brief MSPI event status */
uint32_t status;
/** @brief Packet index */
uint32_t packet_idx;
};
/**
* @brief MSPI event
*/
struct mspi_event {
/** Event type */
enum mspi_bus_event evt_type;
/** Data associated to the event */
struct mspi_event_data evt_data;
};
/**
* @brief MSPI callback context
*/
struct mspi_callback_context {
/** @brief MSPI event */
struct mspi_event mspi_evt;
/** @brief user defined context */
void *ctx;
};
/**
* @typedef mspi_callback_handler_t
* @brief Define the application callback handler function signature.
*
* @param mspi_cb_ctx Pointer to the MSPI callback context
*
*/
typedef void (*mspi_callback_handler_t)(struct mspi_callback_context *mspi_cb_ctx, ...);
/** @} */
/**
* MSPI driver API definition and system call entry points
*/
typedef int (*mspi_api_config)(const struct mspi_dt_spec *spec);
typedef int (*mspi_api_dev_config)(const struct device *controller,
const struct mspi_dev_id *dev_id,
const enum mspi_dev_cfg_mask param_mask,
const struct mspi_dev_cfg *cfg);
typedef int (*mspi_api_get_channel_status)(const struct device *controller, uint8_t ch);
typedef int (*mspi_api_transceive)(const struct device *controller,
const struct mspi_dev_id *dev_id,
const struct mspi_xfer *req);
typedef int (*mspi_api_register_callback)(const struct device *controller,
const struct mspi_dev_id *dev_id,
const enum mspi_bus_event evt_type,
mspi_callback_handler_t cb,
struct mspi_callback_context *ctx);
typedef int (*mspi_api_xip_config)(const struct device *controller,
const struct mspi_dev_id *dev_id,
const struct mspi_xip_cfg *xip_cfg);
typedef int (*mspi_api_scramble_config)(const struct device *controller,
const struct mspi_dev_id *dev_id,
const struct mspi_scramble_cfg *scramble_cfg);
typedef int (*mspi_api_timing_config)(const struct device *controller,
const struct mspi_dev_id *dev_id, const uint32_t param_mask,
void *timing_cfg);
__subsystem struct mspi_driver_api {
mspi_api_config config;
mspi_api_dev_config dev_config;
mspi_api_get_channel_status get_channel_status;
mspi_api_transceive transceive;
mspi_api_register_callback register_callback;
mspi_api_xip_config xip_config;
mspi_api_scramble_config scramble_config;
mspi_api_timing_config timing_config;
};
/**
* @addtogroup mspi_configure_api
* @{
*/
/**
* @brief Configure a MSPI controller.
*
* This routine provides a generic interface to override MSPI controller
* capabilities.
*
* In the controller driver, one may implement this API to initialize or
* re-initialize their controller hardware. Additional SoC platform specific
* settings that are not in struct mspi_cfg may be added to one's own
* binding(xxx,mspi-controller.yaml) so that one may derive the settings from
* DTS and configure it in this API. In general, these settings should not
* change during run-time. The bindings for @see mspi_cfg can be found in
* mspi-controller.yaml.
*
* @param spec Pointer to MSPI DT information.
*
* @retval 0 If successful.
* @retval -EIO General input / output error, failed to configure device.
* @retval -EINVAL invalid capabilities, failed to configure device.
* @retval -ENOTSUP capability not supported by MSPI peripheral.
*/
__syscall int mspi_config(const struct mspi_dt_spec *spec);
static inline int z_impl_mspi_config(const struct mspi_dt_spec *spec)
{
const struct mspi_driver_api *api = (const struct mspi_driver_api *)spec->bus->api;
return api->config(spec);
}
/**
* @brief Configure a MSPI controller with device specific parameters.
*
* This routine provides a generic interface to override MSPI controller
* device specific settings that should be derived from device datasheets.
*
* With @see mspi_dev_id defined as the device index and CE GPIO from device
* tree, the API supports multiple devices on the same controller instance.
* It is up to the controller driver implementation whether to support device
* switching either by software or by hardware or not at all. If by software,
* the switching should be done in this API's implementation.
* The implementation may also support individual parameter configurations
* specified by @see mspi_dev_cfg_mask.
* The settings within @see mspi_dev_cfg don't typically change once the mode
* of operation is determined after the device initialization.
* The bindings for @see mspi_dev_cfg can be found in mspi-device.yaml.
*
* @param controller Pointer to the device structure for the driver instance.
* @param dev_id Pointer to the device ID structure from a device.
* @param param_mask Macro definition of what to be configured in cfg.
* @param cfg The device runtime configuration for the MSPI controller.
*
* @retval 0 If successful.
* @retval -EIO General input / output error, failed to configure device.
* @retval -EINVAL invalid capabilities, failed to configure device.
* @retval -ENOTSUP capability not supported by MSPI peripheral.
*/
__syscall int mspi_dev_config(const struct device *controller,
const struct mspi_dev_id *dev_id,
const enum mspi_dev_cfg_mask param_mask,
const struct mspi_dev_cfg *cfg);
static inline int z_impl_mspi_dev_config(const struct device *controller,
const struct mspi_dev_id *dev_id,
const enum mspi_dev_cfg_mask param_mask,
const struct mspi_dev_cfg *cfg)
{
const struct mspi_driver_api *api = (const struct mspi_driver_api *)controller->api;
return api->dev_config(controller, dev_id, param_mask, cfg);
}
/**
* @brief Query to see if it a channel is ready.
*
* This routine allows to check if logical channel is ready before use.
* Note that queries for channels not supported will always return false.
*
* @param controller Pointer to the device structure for the driver instance.
* @param ch the MSPI channel for which status is to be retrieved.
*
* @retval 0 If MSPI channel is ready.
*/
__syscall int mspi_get_channel_status(const struct device *controller, uint8_t ch);
static inline int z_impl_mspi_get_channel_status(const struct device *controller, uint8_t ch)
{
const struct mspi_driver_api *api = (const struct mspi_driver_api *)controller->api;
return api->get_channel_status(controller, ch);
}
/** @} */
/**
* @addtogroup mspi_transfer_api
* @{
*/
/**
* @brief Transfer request over MSPI.
*
* This routines provides a generic interface to transfer a request
* synchronously/asynchronously.
*
* The @see mspi_xfer allows for dynamically changing the transfer related
* settings once the mode of operation is determined and configured.
* The API supports bulk transfers with different starting addresses and sizes
* with @see mspi_xfer_packet. However, it is up to the controller
* implementation whether to support scatter IO and callback management.
* The controller can determine which user callback to trigger based on
* @see mspi_bus_event_cb_mask upon completion of each async/sync transfer
* if the callback had been registered. Or not to trigger any callback at all
* with MSPI_BUS_NO_CB even if the callbacks are already registered.
*
* @param controller Pointer to the device structure for the driver instance.
* @param dev_id Pointer to the device ID structure from a device.
* @param req Content of the request and request specific settings.
*
* @retval 0 If successful.
* @retval -ENOTSUP
* @retval -EIO General input / output error, failed to send over the bus.
*/
__syscall int mspi_transceive(const struct device *controller,
const struct mspi_dev_id *dev_id,
const struct mspi_xfer *req);
static inline int z_impl_mspi_transceive(const struct device *controller,
const struct mspi_dev_id *dev_id,
const struct mspi_xfer *req)
{
const struct mspi_driver_api *api = (const struct mspi_driver_api *)controller->api;
if (!api->transceive) {
return -ENOTSUP;
}
return api->transceive(controller, dev_id, req);
}
/** @} */
/**
* @addtogroup mspi_configure_api
* @{
*/
/**
* @brief Configure a MSPI XIP settings.
*
* This routine provides a generic interface to configure the XIP feature.
*
* @param controller Pointer to the device structure for the driver instance.
* @param dev_id Pointer to the device ID structure from a device.
* @param cfg The controller XIP configuration for MSPI.
*
* @retval 0 If successful.
* @retval -EIO General input / output error, failed to configure device.
* @retval -EINVAL invalid capabilities, failed to configure device.
* @retval -ENOTSUP capability not supported by MSPI peripheral.
*/
__syscall int mspi_xip_config(const struct device *controller,
const struct mspi_dev_id *dev_id,
const struct mspi_xip_cfg *cfg);
static inline int z_impl_mspi_xip_config(const struct device *controller,
const struct mspi_dev_id *dev_id,
const struct mspi_xip_cfg *cfg)
{
const struct mspi_driver_api *api = (const struct mspi_driver_api *)controller->api;
if (!api->xip_config) {
return -ENOTSUP;
}
return api->xip_config(controller, dev_id, cfg);
}
/**
* @brief Configure a MSPI scrambling settings.
*
* This routine provides a generic interface to configure the scrambling
* feature.
*
* @param controller Pointer to the device structure for the driver instance.
* @param dev_id Pointer to the device ID structure from a device.
* @param cfg The controller scramble configuration for MSPI.
*
* @retval 0 If successful.
* @retval -EIO General input / output error, failed to configure device.
* @retval -EINVAL invalid capabilities, failed to configure device.
* @retval -ENOTSUP capability not supported by MSPI peripheral.
*/
__syscall int mspi_scramble_config(const struct device *controller,
const struct mspi_dev_id *dev_id,
const struct mspi_scramble_cfg *cfg);
static inline int z_impl_mspi_scramble_config(const struct device *controller,
const struct mspi_dev_id *dev_id,
const struct mspi_scramble_cfg *cfg)
{
const struct mspi_driver_api *api = (const struct mspi_driver_api *)controller->api;
if (!api->scramble_config) {
return -ENOTSUP;
}
return api->scramble_config(controller, dev_id, cfg);
}
/**
* @brief Configure a MSPI timing settings.
*
* This routine provides a generic interface to configure MSPI controller
* timing if necessary.
*
* @param controller Pointer to the device structure for the driver instance.
* @param dev_id Pointer to the device ID structure from a device.
* @param param_mask The macro definition of what should be configured in cfg.
* @param cfg The controller timing configuration for MSPI.
*
* @retval 0 If successful.
* @retval -EIO General input / output error, failed to configure device.
* @retval -EINVAL invalid capabilities, failed to configure device.
* @retval -ENOTSUP capability not supported by MSPI peripheral.
*/
__syscall int mspi_timing_config(const struct device *controller,
const struct mspi_dev_id *dev_id,
const uint32_t param_mask, void *cfg);
static inline int z_impl_mspi_timing_config(const struct device *controller,
const struct mspi_dev_id *dev_id,
const uint32_t param_mask, void *cfg)
{
const struct mspi_driver_api *api = (const struct mspi_driver_api *)controller->api;
if (!api->timing_config) {
return -ENOTSUP;
}
return api->timing_config(controller, dev_id, param_mask, cfg);
}
/** @} */
/**
* @addtogroup mspi_callback_api
* @{
*/
/**
* @brief Register the mspi callback functions.
*
* This routines provides a generic interface to register mspi callback functions.
* In generally it should be called before mspi_transceive.
*
* @param controller Pointer to the device structure for the driver instance.
* @param dev_id Pointer to the device ID structure from a device.
* @param evt_type The event type associated the callback.
* @param cb Pointer to the user implemented callback function.
* @param ctx Pointer to the callback context.
*
* @retval 0 If successful.
* @retval -ENOTSUP
*/
static inline int mspi_register_callback(const struct device *controller,
const struct mspi_dev_id *dev_id,
const enum mspi_bus_event evt_type,
mspi_callback_handler_t cb,
struct mspi_callback_context *ctx)
{
const struct mspi_driver_api *api = (const struct mspi_driver_api *)controller->api;
if (!api->register_callback) {
return -ENOTSUP;
}
return api->register_callback(controller, dev_id, evt_type, cb, ctx);
}
/** @} */
#ifdef __cplusplus
}
#endif
#include <zephyr/drivers/mspi/devicetree.h>
/**
* @}
*/
#include <zephyr/syscalls/mspi.h>
#endif /* ZEPHYR_INCLUDE_MSPI_H_ */
``` | /content/code_sandbox/include/zephyr/drivers/mspi.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,788 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public APIs for GPIO drivers
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_GPIO_H_
#define ZEPHYR_INCLUDE_DRIVERS_GPIO_H_
#include <errno.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/slist.h>
#include <zephyr/types.h>
#include <stddef.h>
#include <zephyr/device.h>
#include <zephyr/dt-bindings/gpio/gpio.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief GPIO Driver APIs
* @defgroup gpio_interface GPIO Driver APIs
* @since 1.0
* @version 1.0.0
* @ingroup io_interfaces
* @{
*/
/**
* @name GPIO input/output configuration flags
* @{
*/
/** Enables pin as input. */
#define GPIO_INPUT (1U << 16)
/** Enables pin as output, no change to the output state. */
#define GPIO_OUTPUT (1U << 17)
/** Disables pin for both input and output. */
#define GPIO_DISCONNECTED 0
/** @cond INTERNAL_HIDDEN */
/* Initializes output to a low state. */
#define GPIO_OUTPUT_INIT_LOW (1U << 18)
/* Initializes output to a high state. */
#define GPIO_OUTPUT_INIT_HIGH (1U << 19)
/* Initializes output based on logic level */
#define GPIO_OUTPUT_INIT_LOGICAL (1U << 20)
/** @endcond */
/** Configures GPIO pin as output and initializes it to a low state. */
#define GPIO_OUTPUT_LOW (GPIO_OUTPUT | GPIO_OUTPUT_INIT_LOW)
/** Configures GPIO pin as output and initializes it to a high state. */
#define GPIO_OUTPUT_HIGH (GPIO_OUTPUT | GPIO_OUTPUT_INIT_HIGH)
/** Configures GPIO pin as output and initializes it to a logic 0. */
#define GPIO_OUTPUT_INACTIVE (GPIO_OUTPUT | \
GPIO_OUTPUT_INIT_LOW | \
GPIO_OUTPUT_INIT_LOGICAL)
/** Configures GPIO pin as output and initializes it to a logic 1. */
#define GPIO_OUTPUT_ACTIVE (GPIO_OUTPUT | \
GPIO_OUTPUT_INIT_HIGH | \
GPIO_OUTPUT_INIT_LOGICAL)
/** @} */
/**
* @name GPIO interrupt configuration flags
* The `GPIO_INT_*` flags are used to specify how input GPIO pins will trigger
* interrupts. The interrupts can be sensitive to pin physical or logical level.
* Interrupts sensitive to pin logical level take into account GPIO_ACTIVE_LOW
* flag. If a pin was configured as Active Low, physical level low will be
* considered as logical level 1 (an active state), physical level high will
* be considered as logical level 0 (an inactive state).
* The GPIO controller should reset the interrupt status, such as clearing the
* pending bit, etc, when configuring the interrupt triggering properties.
* Applications should use the `GPIO_INT_MODE_ENABLE_ONLY` and
* `GPIO_INT_MODE_DISABLE_ONLY` flags to enable and disable interrupts on the
* pin without changing any GPIO settings.
* @{
*/
/** Disables GPIO pin interrupt. */
#define GPIO_INT_DISABLE (1U << 21)
/** @cond INTERNAL_HIDDEN */
/* Enables GPIO pin interrupt. */
#define GPIO_INT_ENABLE (1U << 22)
/* GPIO interrupt is sensitive to logical levels.
*
* This is a component flag that should be combined with other
* `GPIO_INT_*` flags to produce a meaningful configuration.
*/
#define GPIO_INT_LEVELS_LOGICAL (1U << 23)
/* GPIO interrupt is edge sensitive.
*
* Note: by default interrupts are level sensitive.
*
* This is a component flag that should be combined with other
* `GPIO_INT_*` flags to produce a meaningful configuration.
*/
#define GPIO_INT_EDGE (1U << 24)
/* Trigger detection when input state is (or transitions to) physical low or
* logical 0 level.
*
* This is a component flag that should be combined with other
* `GPIO_INT_*` flags to produce a meaningful configuration.
*/
#define GPIO_INT_LOW_0 (1U << 25)
/* Trigger detection on input state is (or transitions to) physical high or
* logical 1 level.
*
* This is a component flag that should be combined with other
* `GPIO_INT_*` flags to produce a meaningful configuration.
*/
#define GPIO_INT_HIGH_1 (1U << 26)
#ifdef CONFIG_GPIO_ENABLE_DISABLE_INTERRUPT
/* Disable/Enable interrupt functionality without changing other interrupt
* related register, such as clearing the pending register.
*
* This is a component flag that should be combined with `GPIO_INT_ENABLE` or
* `GPIO_INT_DISABLE` flags to produce a meaningful configuration.
*/
#define GPIO_INT_ENABLE_DISABLE_ONLY (1u << 27)
#endif /* CONFIG_GPIO_ENABLE_DISABLE_INTERRUPT */
#define GPIO_INT_MASK (GPIO_INT_DISABLE | \
GPIO_INT_ENABLE | \
GPIO_INT_LEVELS_LOGICAL | \
GPIO_INT_EDGE | \
GPIO_INT_LOW_0 | \
GPIO_INT_HIGH_1)
/** @endcond */
/** Configures GPIO interrupt to be triggered on pin rising edge and enables it.
*/
#define GPIO_INT_EDGE_RISING (GPIO_INT_ENABLE | \
GPIO_INT_EDGE | \
GPIO_INT_HIGH_1)
/** Configures GPIO interrupt to be triggered on pin falling edge and enables
* it.
*/
#define GPIO_INT_EDGE_FALLING (GPIO_INT_ENABLE | \
GPIO_INT_EDGE | \
GPIO_INT_LOW_0)
/** Configures GPIO interrupt to be triggered on pin rising or falling edge and
* enables it.
*/
#define GPIO_INT_EDGE_BOTH (GPIO_INT_ENABLE | \
GPIO_INT_EDGE | \
GPIO_INT_LOW_0 | \
GPIO_INT_HIGH_1)
/** Configures GPIO interrupt to be triggered on pin physical level low and
* enables it.
*/
#define GPIO_INT_LEVEL_LOW (GPIO_INT_ENABLE | \
GPIO_INT_LOW_0)
/** Configures GPIO interrupt to be triggered on pin physical level high and
* enables it.
*/
#define GPIO_INT_LEVEL_HIGH (GPIO_INT_ENABLE | \
GPIO_INT_HIGH_1)
/** Configures GPIO interrupt to be triggered on pin state change to logical
* level 0 and enables it.
*/
#define GPIO_INT_EDGE_TO_INACTIVE (GPIO_INT_ENABLE | \
GPIO_INT_LEVELS_LOGICAL | \
GPIO_INT_EDGE | \
GPIO_INT_LOW_0)
/** Configures GPIO interrupt to be triggered on pin state change to logical
* level 1 and enables it.
*/
#define GPIO_INT_EDGE_TO_ACTIVE (GPIO_INT_ENABLE | \
GPIO_INT_LEVELS_LOGICAL | \
GPIO_INT_EDGE | \
GPIO_INT_HIGH_1)
/** Configures GPIO interrupt to be triggered on pin logical level 0 and enables
* it.
*/
#define GPIO_INT_LEVEL_INACTIVE (GPIO_INT_ENABLE | \
GPIO_INT_LEVELS_LOGICAL | \
GPIO_INT_LOW_0)
/** Configures GPIO interrupt to be triggered on pin logical level 1 and enables
* it.
*/
#define GPIO_INT_LEVEL_ACTIVE (GPIO_INT_ENABLE | \
GPIO_INT_LEVELS_LOGICAL | \
GPIO_INT_HIGH_1)
/** @} */
/** @cond INTERNAL_HIDDEN */
#define GPIO_DIR_MASK (GPIO_INPUT | GPIO_OUTPUT)
/** @endcond */
/**
* @brief Identifies a set of pins associated with a port.
*
* The pin with index n is present in the set if and only if the bit
* identified by (1U << n) is set.
*/
typedef uint32_t gpio_port_pins_t;
/**
* @brief Provides values for a set of pins associated with a port.
*
* The value for a pin with index n is high (physical mode) or active
* (logical mode) if and only if the bit identified by (1U << n) is set.
* Otherwise the value for the pin is low (physical mode) or inactive
* (logical mode).
*
* Values of this type are often paired with a `gpio_port_pins_t` value
* that specifies which encoded pin values are valid for the operation.
*/
typedef uint32_t gpio_port_value_t;
/**
* @brief Provides a type to hold a GPIO pin index.
*
* This reduced-size type is sufficient to record a pin number,
* e.g. from a devicetree GPIOS property.
*/
typedef uint8_t gpio_pin_t;
/**
* @brief Provides a type to hold GPIO devicetree flags.
*
* All GPIO flags that can be expressed in devicetree fit in the low 16
* bits of the full flags field, so use a reduced-size type to record
* that part of a GPIOS property.
*
* The lower 8 bits are used for standard flags. The upper 8 bits are reserved
* for SoC specific flags.
*/
typedef uint16_t gpio_dt_flags_t;
/**
* @brief Provides a type to hold GPIO configuration flags.
*
* This type is sufficient to hold all flags used to control GPIO
* configuration, whether pin or interrupt.
*/
typedef uint32_t gpio_flags_t;
/**
* @brief Container for GPIO pin information specified in devicetree
*
* This type contains a pointer to a GPIO device, pin number for a pin
* controlled by that device, and the subset of pin configuration
* flags which may be given in devicetree.
*
* @see GPIO_DT_SPEC_GET_BY_IDX
* @see GPIO_DT_SPEC_GET_BY_IDX_OR
* @see GPIO_DT_SPEC_GET
* @see GPIO_DT_SPEC_GET_OR
*/
struct gpio_dt_spec {
/** GPIO device controlling the pin */
const struct device *port;
/** The pin's number on the device */
gpio_pin_t pin;
/** The pin's configuration flags as specified in devicetree */
gpio_dt_flags_t dt_flags;
};
/**
* @brief Static initializer for a @p gpio_dt_spec
*
* This returns a static initializer for a @p gpio_dt_spec structure given a
* devicetree node identifier, a property specifying a GPIO and an index.
*
* Example devicetree fragment:
*
* n: node {
* foo-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>,
* <&gpio1 2 GPIO_ACTIVE_LOW>;
* }
*
* Example usage:
*
* const struct gpio_dt_spec spec = GPIO_DT_SPEC_GET_BY_IDX(DT_NODELABEL(n),
* foo_gpios, 1);
* // Initializes 'spec' to:
* // {
* // .port = DEVICE_DT_GET(DT_NODELABEL(gpio1)),
* // .pin = 2,
* // .dt_flags = GPIO_ACTIVE_LOW
* // }
*
* The 'gpio' field must still be checked for readiness, e.g. using
* device_is_ready(). It is an error to use this macro unless the node
* exists, has the given property, and that property specifies a GPIO
* controller, pin number, and flags as shown above.
*
* @param node_id devicetree node identifier
* @param prop lowercase-and-underscores property name
* @param idx logical index into "prop"
* @return static initializer for a struct gpio_dt_spec for the property
*/
#define GPIO_DT_SPEC_GET_BY_IDX(node_id, prop, idx) \
{ \
.port = DEVICE_DT_GET(DT_GPIO_CTLR_BY_IDX(node_id, prop, idx)),\
.pin = DT_GPIO_PIN_BY_IDX(node_id, prop, idx), \
.dt_flags = DT_GPIO_FLAGS_BY_IDX(node_id, prop, idx), \
}
/**
* @brief Like GPIO_DT_SPEC_GET_BY_IDX(), with a fallback to a default value
*
* If the devicetree node identifier 'node_id' refers to a node with a
* property 'prop', this expands to
* <tt>GPIO_DT_SPEC_GET_BY_IDX(node_id, prop, idx)</tt>. The @p
* default_value parameter is not expanded in this case.
*
* Otherwise, this expands to @p default_value.
*
* @param node_id devicetree node identifier
* @param prop lowercase-and-underscores property name
* @param idx logical index into "prop"
* @param default_value fallback value to expand to
* @return static initializer for a struct gpio_dt_spec for the property,
* or default_value if the node or property do not exist
*/
#define GPIO_DT_SPEC_GET_BY_IDX_OR(node_id, prop, idx, default_value) \
COND_CODE_1(DT_NODE_HAS_PROP(node_id, prop), \
(GPIO_DT_SPEC_GET_BY_IDX(node_id, prop, idx)), \
(default_value))
/**
* @brief Equivalent to GPIO_DT_SPEC_GET_BY_IDX(node_id, prop, 0).
*
* @param node_id devicetree node identifier
* @param prop lowercase-and-underscores property name
* @return static initializer for a struct gpio_dt_spec for the property
* @see GPIO_DT_SPEC_GET_BY_IDX()
*/
#define GPIO_DT_SPEC_GET(node_id, prop) \
GPIO_DT_SPEC_GET_BY_IDX(node_id, prop, 0)
/**
* @brief Equivalent to
* GPIO_DT_SPEC_GET_BY_IDX_OR(node_id, prop, 0, default_value).
*
* @param node_id devicetree node identifier
* @param prop lowercase-and-underscores property name
* @param default_value fallback value to expand to
* @return static initializer for a struct gpio_dt_spec for the property
* @see GPIO_DT_SPEC_GET_BY_IDX_OR()
*/
#define GPIO_DT_SPEC_GET_OR(node_id, prop, default_value) \
GPIO_DT_SPEC_GET_BY_IDX_OR(node_id, prop, 0, default_value)
/**
* @brief Static initializer for a @p gpio_dt_spec from a DT_DRV_COMPAT
* instance's GPIO property at an index.
*
* @param inst DT_DRV_COMPAT instance number
* @param prop lowercase-and-underscores property name
* @param idx logical index into "prop"
* @return static initializer for a struct gpio_dt_spec for the property
* @see GPIO_DT_SPEC_GET_BY_IDX()
*/
#define GPIO_DT_SPEC_INST_GET_BY_IDX(inst, prop, idx) \
GPIO_DT_SPEC_GET_BY_IDX(DT_DRV_INST(inst), prop, idx)
/**
* @brief Static initializer for a @p gpio_dt_spec from a DT_DRV_COMPAT
* instance's GPIO property at an index, with fallback
*
* @param inst DT_DRV_COMPAT instance number
* @param prop lowercase-and-underscores property name
* @param idx logical index into "prop"
* @param default_value fallback value to expand to
* @return static initializer for a struct gpio_dt_spec for the property
* @see GPIO_DT_SPEC_GET_BY_IDX()
*/
#define GPIO_DT_SPEC_INST_GET_BY_IDX_OR(inst, prop, idx, default_value) \
COND_CODE_1(DT_PROP_HAS_IDX(DT_DRV_INST(inst), prop, idx), \
(GPIO_DT_SPEC_GET_BY_IDX(DT_DRV_INST(inst), prop, idx)), \
(default_value))
/**
* @brief Equivalent to GPIO_DT_SPEC_INST_GET_BY_IDX(inst, prop, 0).
*
* @param inst DT_DRV_COMPAT instance number
* @param prop lowercase-and-underscores property name
* @return static initializer for a struct gpio_dt_spec for the property
* @see GPIO_DT_SPEC_INST_GET_BY_IDX()
*/
#define GPIO_DT_SPEC_INST_GET(inst, prop) \
GPIO_DT_SPEC_INST_GET_BY_IDX(inst, prop, 0)
/**
* @brief Equivalent to
* GPIO_DT_SPEC_INST_GET_BY_IDX_OR(inst, prop, 0, default_value).
*
* @param inst DT_DRV_COMPAT instance number
* @param prop lowercase-and-underscores property name
* @param default_value fallback value to expand to
* @return static initializer for a struct gpio_dt_spec for the property
* @see GPIO_DT_SPEC_INST_GET_BY_IDX()
*/
#define GPIO_DT_SPEC_INST_GET_OR(inst, prop, default_value) \
GPIO_DT_SPEC_INST_GET_BY_IDX_OR(inst, prop, 0, default_value)
/*
* @cond INTERNAL_HIDDEN
*/
/**
* Auxiliary conditional macro that generates a bitmask for the range
* from @p "prop" array defined by the (off_idx, sz_idx) pair,
* or 0 if the range does not exist.
*
* @param node_id devicetree node identifier
* @param prop lowercase-and-underscores array property name
* @param off_idx logical index of bitmask offset value into "prop" array
* @param sz_idx logical index of bitmask size value into "prop" array
*/
#define Z_GPIO_GEN_BITMASK_COND(node_id, prop, off_idx, sz_idx) \
COND_CODE_1(DT_PROP_HAS_IDX(node_id, prop, off_idx), \
(COND_CODE_0(DT_PROP_BY_IDX(node_id, prop, sz_idx), \
(0), \
(GENMASK64(DT_PROP_BY_IDX(node_id, prop, off_idx) + \
DT_PROP_BY_IDX(node_id, prop, sz_idx) - 1, \
DT_PROP_BY_IDX(node_id, prop, off_idx)))) \
), (0))
/**
* A helper conditional macro returning generated bitmask for one element
* from @p "gpio-reserved-ranges"
*
* @param odd_it the value of an odd sequential iterator
* @param node_id devicetree node identifier
*/
#define Z_GPIO_GEN_RESERVED_RANGES_COND(odd_it, node_id) \
COND_CODE_1(DT_PROP_HAS_IDX(node_id, gpio_reserved_ranges, odd_it), \
(Z_GPIO_GEN_BITMASK_COND(node_id, \
gpio_reserved_ranges, \
GET_ARG_N(odd_it, Z_SPARSE_LIST_EVEN_NUMBERS), \
odd_it)), \
(0))
/**
* @endcond
*/
/**
* @brief Makes a bitmask of reserved GPIOs from DT @p "gpio-reserved-ranges"
* property and @p "ngpios" argument
*
* This macro returns the value as a bitmask of the @p "gpio-reserved-ranges"
* property. This property defines the disabled (or 'reserved') GPIOs in the
* range @p 0...ngpios-1 and is specified as an array of value's pairs that
* define the start offset and size of the reserved ranges.
*
* For example, setting "gpio-reserved-ranges = <3 2>, <10 1>;"
* means that GPIO offsets 3, 4 and 10 cannot be used even if @p ngpios = <18>.
*
* The implementation constraint is inherited from common DT limitations:
* a maximum of 64 pairs can be used (with result limited to bitsize
* of gpio_port_pins_t type).
*
* NB: Due to the nature of C macros, some incorrect tuple definitions
* (for example, overlapping or out of range) will produce undefined results.
*
* Also be aware that if @p ngpios is less than 32 (bit size of DT int type),
* then all unused MSBs outside the range defined by @p ngpios will be
* marked as reserved too.
*
* Example devicetree fragment:
*
* @code{.dts}
* a {
* compatible = "some,gpio-controller";
* ngpios = <32>;
* gpio-reserved-ranges = <0 4>, <5 3>, <9 5>, <11 2>, <15 2>,
* <18 2>, <21 1>, <23 1>, <25 4>, <30 2>;
* };
*
* b {
* compatible = "some,gpio-controller";
* ngpios = <18>;
* gpio-reserved-ranges = <3 2>, <10 1>;
* };
*
* @endcode
*
* Example usage:
*
* @code{.c}
* struct some_config {
* uint32_t ngpios;
* uint32_t gpios_reserved;
* };
*
* static const struct some_config dev_cfg_a = {
* .ngpios = DT_PROP_OR(DT_LABEL(a), ngpios, 0),
* .gpios_reserved = GPIO_DT_RESERVED_RANGES_NGPIOS(DT_LABEL(a),
* DT_PROP(DT_LABEL(a), ngpios)),
* };
*
* static const struct some_config dev_cfg_b = {
* .ngpios = DT_PROP_OR(DT_LABEL(b), ngpios, 0),
* .gpios_reserved = GPIO_DT_RESERVED_RANGES_NGPIOS(DT_LABEL(b),
* DT_PROP(DT_LABEL(b), ngpios)),
* };
*@endcode
*
* This expands to:
*
* @code{.c}
* struct some_config {
* uint32_t ngpios;
* uint32_t gpios_reserved;
* };
*
* static const struct some_config dev_cfg_a = {
* .ngpios = 32,
* .gpios_reserved = 0xdeadbeef,
* // 0b1101 1110 1010 1101 1011 1110 1110 1111
*
* static const struct some_config dev_cfg_b = {
* .ngpios = 18,
* .gpios_reserved = 0xfffc0418,
* // 0b1111 1111 1111 1100 0000 0100 0001 1000
* // unused MSBs were marked as reserved too
* };
* @endcode
*
* @param node_id GPIO controller node identifier.
* @param ngpios number of GPIOs.
* @return the bitmask of reserved gpios
*/
#define GPIO_DT_RESERVED_RANGES_NGPIOS(node_id, ngpios) \
((gpio_port_pins_t) \
COND_CODE_1(DT_NODE_HAS_PROP(node_id, gpio_reserved_ranges), \
(GENMASK64(BITS_PER_LONG_LONG - 1, ngpios) \
| FOR_EACH_FIXED_ARG(Z_GPIO_GEN_RESERVED_RANGES_COND, \
(|), \
node_id, \
LIST_DROP_EMPTY(Z_SPARSE_LIST_ODD_NUMBERS))), \
(0)))
/**
* @brief Makes a bitmask of reserved GPIOs from the @p "gpio-reserved-ranges"
* and @p "ngpios" DT properties values
*
* @param node_id GPIO controller node identifier.
* @return the bitmask of reserved gpios
*/
#define GPIO_DT_RESERVED_RANGES(node_id) \
GPIO_DT_RESERVED_RANGES_NGPIOS(node_id, DT_PROP(node_id, ngpios))
/**
* @brief Makes a bitmask of reserved GPIOs from a DT_DRV_COMPAT instance's
* @p "gpio-reserved-ranges" property and @p "ngpios" argument
*
* @param inst DT_DRV_COMPAT instance number
* @return the bitmask of reserved gpios
* @param ngpios number of GPIOs
* @see GPIO_DT_RESERVED_RANGES()
*/
#define GPIO_DT_INST_RESERVED_RANGES_NGPIOS(inst, ngpios) \
GPIO_DT_RESERVED_RANGES_NGPIOS(DT_DRV_INST(inst), ngpios)
/**
* @brief Make a bitmask of reserved GPIOs from a DT_DRV_COMPAT instance's GPIO
* @p "gpio-reserved-ranges" and @p "ngpios" properties
*
* @param inst DT_DRV_COMPAT instance number
* @return the bitmask of reserved gpios
* @see GPIO_DT_RESERVED_RANGES()
*/
#define GPIO_DT_INST_RESERVED_RANGES(inst) \
GPIO_DT_RESERVED_RANGES(DT_DRV_INST(inst))
/**
* @brief Makes a bitmask of allowed GPIOs from DT @p "gpio-reserved-ranges"
* property and @p "ngpios" argument
*
* This macro is paired with GPIO_DT_RESERVED_RANGES_NGPIOS(), however unlike
* the latter, it returns a bitmask of ALLOWED gpios.
*
* Example devicetree fragment:
*
* @code{.dts}
* a {
* compatible = "some,gpio-controller";
* ngpios = <32>;
* gpio-reserved-ranges = <0 8>, <9 5>, <15 16>;
* };
*
* @endcode
*
* Example usage:
*
* @code{.c}
* struct some_config {
* uint32_t port_pin_mask;
* };
*
* static const struct some_config dev_cfg = {
* .port_pin_mask = GPIO_DT_PORT_PIN_MASK_NGPIOS_EXC(
* DT_LABEL(a), 32),
* };
* @endcode
*
* This expands to:
*
* @code{.c}
* struct some_config {
* uint32_t port_pin_mask;
* };
*
* static const struct some_config dev_cfg = {
* .port_pin_mask = 0x80004100,
* // 0b1000 0000 0000 0000 0100 0001 00000 000
* };
* @endcode
*
* @param node_id GPIO controller node identifier.
* @param ngpios number of GPIOs
* @return the bitmask of allowed gpios
*/
#define GPIO_DT_PORT_PIN_MASK_NGPIOS_EXC(node_id, ngpios) \
((gpio_port_pins_t) \
COND_CODE_0(ngpios, \
(0), \
(COND_CODE_1(DT_NODE_HAS_PROP(node_id, gpio_reserved_ranges), \
((GENMASK64(ngpios - 1, 0) & \
~GPIO_DT_RESERVED_RANGES_NGPIOS(node_id, ngpios))), \
(GENMASK64(ngpios - 1, 0))) \
) \
))
/**
* @brief Makes a bitmask of allowed GPIOs from a DT_DRV_COMPAT instance's
* @p "gpio-reserved-ranges" property and @p "ngpios" argument
*
* @param inst DT_DRV_COMPAT instance number
* @param ngpios number of GPIOs
* @return the bitmask of allowed gpios
* @see GPIO_DT_NGPIOS_PORT_PIN_MASK_EXC()
*/
#define GPIO_DT_INST_PORT_PIN_MASK_NGPIOS_EXC(inst, ngpios) \
GPIO_DT_PORT_PIN_MASK_NGPIOS_EXC(DT_DRV_INST(inst), ngpios)
/**
* @brief Maximum number of pins that are supported by `gpio_port_pins_t`.
*/
#define GPIO_MAX_PINS_PER_PORT (sizeof(gpio_port_pins_t) * __CHAR_BIT__)
/**
* This structure is common to all GPIO drivers and is expected to be
* the first element in the object pointed to by the config field
* in the device structure.
*/
struct gpio_driver_config {
/** Mask identifying pins supported by the controller.
*
* Initialization of this mask is the responsibility of device
* instance generation in the driver.
*/
gpio_port_pins_t port_pin_mask;
};
/**
* This structure is common to all GPIO drivers and is expected to be the first
* element in the driver's struct driver_data declaration.
*/
struct gpio_driver_data {
/** Mask identifying pins that are configured as active low.
*
* Management of this mask is the responsibility of the
* wrapper functions in this header.
*/
gpio_port_pins_t invert;
};
struct gpio_callback;
/**
* @typedef gpio_callback_handler_t
* @brief Define the application callback handler function signature
*
* @param port Device struct for the GPIO device.
* @param cb Original struct gpio_callback owning this handler
* @param pins Mask of pins that triggers the callback handler
*
* Note: cb pointer can be used to retrieve private data through
* CONTAINER_OF() if original struct gpio_callback is stored in
* another private structure.
*/
typedef void (*gpio_callback_handler_t)(const struct device *port,
struct gpio_callback *cb,
gpio_port_pins_t pins);
/**
* @brief GPIO callback structure
*
* Used to register a callback in the driver instance callback list.
* As many callbacks as needed can be added as long as each of them
* are unique pointers of struct gpio_callback.
* Beware such structure should not be allocated on stack.
*
* Note: To help setting it, see gpio_init_callback() below
*/
struct gpio_callback {
/** This is meant to be used in the driver and the user should not
* mess with it (see drivers/gpio/gpio_utils.h)
*/
sys_snode_t node;
/** Actual callback function being called when relevant. */
gpio_callback_handler_t handler;
/** A mask of pins the callback is interested in, if 0 the callback
* will never be called. Such pin_mask can be modified whenever
* necessary by the owner, and thus will affect the handler being
* called or not. The selected pins must be configured to trigger
* an interrupt.
*/
gpio_port_pins_t pin_mask;
};
/**
* @cond INTERNAL_HIDDEN
*
* For internal use only, skip these in public documentation.
*/
/* Used by driver api function pin_interrupt_configure, these are defined
* in terms of the public flags so we can just mask and pass them
* through to the driver api
*/
enum gpio_int_mode {
GPIO_INT_MODE_DISABLED = GPIO_INT_DISABLE,
GPIO_INT_MODE_LEVEL = GPIO_INT_ENABLE,
GPIO_INT_MODE_EDGE = GPIO_INT_ENABLE | GPIO_INT_EDGE,
#ifdef CONFIG_GPIO_ENABLE_DISABLE_INTERRUPT
GPIO_INT_MODE_DISABLE_ONLY = GPIO_INT_DISABLE | GPIO_INT_ENABLE_DISABLE_ONLY,
GPIO_INT_MODE_ENABLE_ONLY = GPIO_INT_ENABLE | GPIO_INT_ENABLE_DISABLE_ONLY,
#endif /* CONFIG_GPIO_ENABLE_DISABLE_INTERRUPT */
};
enum gpio_int_trig {
/* Trigger detection when input state is (or transitions to)
* physical low. (Edge Falling or Active Low)
*/
GPIO_INT_TRIG_LOW = GPIO_INT_LOW_0,
/* Trigger detection when input state is (or transitions to)
* physical high. (Edge Rising or Active High) */
GPIO_INT_TRIG_HIGH = GPIO_INT_HIGH_1,
/* Trigger detection on pin rising or falling edge. */
GPIO_INT_TRIG_BOTH = GPIO_INT_LOW_0 | GPIO_INT_HIGH_1,
/* Trigger a system wakeup. */
GPIO_INT_TRIG_WAKE = GPIO_INT_WAKEUP,
};
__subsystem struct gpio_driver_api {
int (*pin_configure)(const struct device *port, gpio_pin_t pin,
gpio_flags_t flags);
#ifdef CONFIG_GPIO_GET_CONFIG
int (*pin_get_config)(const struct device *port, gpio_pin_t pin,
gpio_flags_t *flags);
#endif
int (*port_get_raw)(const struct device *port,
gpio_port_value_t *value);
int (*port_set_masked_raw)(const struct device *port,
gpio_port_pins_t mask,
gpio_port_value_t value);
int (*port_set_bits_raw)(const struct device *port,
gpio_port_pins_t pins);
int (*port_clear_bits_raw)(const struct device *port,
gpio_port_pins_t pins);
int (*port_toggle_bits)(const struct device *port,
gpio_port_pins_t pins);
int (*pin_interrupt_configure)(const struct device *port,
gpio_pin_t pin,
enum gpio_int_mode, enum gpio_int_trig);
int (*manage_callback)(const struct device *port,
struct gpio_callback *cb,
bool set);
uint32_t (*get_pending_int)(const struct device *dev);
#ifdef CONFIG_GPIO_GET_DIRECTION
int (*port_get_direction)(const struct device *port, gpio_port_pins_t map,
gpio_port_pins_t *inputs, gpio_port_pins_t *outputs);
#endif /* CONFIG_GPIO_GET_DIRECTION */
};
/**
* @endcond
*/
/**
* @brief Validate that GPIO port is ready.
*
* @param spec GPIO specification from devicetree
*
* @retval true if the GPIO spec is ready for use.
* @retval false if the GPIO spec is not ready for use.
*/
static inline bool gpio_is_ready_dt(const struct gpio_dt_spec *spec)
{
/* Validate port is ready */
return device_is_ready(spec->port);
}
/**
* @brief Configure pin interrupt.
*
* @note This function can also be used to configure interrupts on pins
* not controlled directly by the GPIO module. That is, pins which are
* routed to other modules such as I2C, SPI, UART.
*
* @param port Pointer to device structure for the driver instance.
* @param pin Pin number.
* @param flags Interrupt configuration flags as defined by GPIO_INT_*.
*
* @retval 0 If successful.
* @retval -ENOSYS If the operation is not implemented by the driver.
* @retval -ENOTSUP If any of the configuration options is not supported
* (unless otherwise directed by flag documentation).
* @retval -EINVAL Invalid argument.
* @retval -EBUSY Interrupt line required to configure pin interrupt is
* already in use.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
__syscall int gpio_pin_interrupt_configure(const struct device *port,
gpio_pin_t pin,
gpio_flags_t flags);
static inline int z_impl_gpio_pin_interrupt_configure(const struct device *port,
gpio_pin_t pin,
gpio_flags_t flags)
{
const struct gpio_driver_api *api =
(const struct gpio_driver_api *)port->api;
__unused const struct gpio_driver_config *const cfg =
(const struct gpio_driver_config *)port->config;
const struct gpio_driver_data *const data =
(const struct gpio_driver_data *)port->data;
enum gpio_int_trig trig;
enum gpio_int_mode mode;
if (api->pin_interrupt_configure == NULL) {
return -ENOSYS;
}
__ASSERT((flags & (GPIO_INT_DISABLE | GPIO_INT_ENABLE))
!= (GPIO_INT_DISABLE | GPIO_INT_ENABLE),
"Cannot both enable and disable interrupts");
__ASSERT((flags & (GPIO_INT_DISABLE | GPIO_INT_ENABLE)) != 0U,
"Must either enable or disable interrupts");
__ASSERT(((flags & GPIO_INT_ENABLE) == 0) ||
((flags & GPIO_INT_EDGE) != 0) ||
((flags & (GPIO_INT_LOW_0 | GPIO_INT_HIGH_1)) !=
(GPIO_INT_LOW_0 | GPIO_INT_HIGH_1)),
"Only one of GPIO_INT_LOW_0, GPIO_INT_HIGH_1 can be "
"enabled for a level interrupt.");
__ASSERT(((flags & GPIO_INT_ENABLE) == 0) ||
#ifdef CONFIG_GPIO_ENABLE_DISABLE_INTERRUPT
((flags & (GPIO_INT_LOW_0 | GPIO_INT_HIGH_1)) != 0) ||
(flags & GPIO_INT_ENABLE_DISABLE_ONLY) != 0,
#else
((flags & (GPIO_INT_LOW_0 | GPIO_INT_HIGH_1)) != 0),
#endif /* CONFIG_GPIO_ENABLE_DISABLE_INTERRUPT */
"At least one of GPIO_INT_LOW_0, GPIO_INT_HIGH_1 has to be "
"enabled.");
__ASSERT((cfg->port_pin_mask & (gpio_port_pins_t)BIT(pin)) != 0U,
"Unsupported pin");
if (((flags & GPIO_INT_LEVELS_LOGICAL) != 0) &&
((data->invert & (gpio_port_pins_t)BIT(pin)) != 0)) {
/* Invert signal bits */
flags ^= (GPIO_INT_LOW_0 | GPIO_INT_HIGH_1);
}
trig = (enum gpio_int_trig)(flags & (GPIO_INT_LOW_0 | GPIO_INT_HIGH_1 | GPIO_INT_WAKEUP));
#ifdef CONFIG_GPIO_ENABLE_DISABLE_INTERRUPT
mode = (enum gpio_int_mode)(flags & (GPIO_INT_EDGE | GPIO_INT_DISABLE | GPIO_INT_ENABLE |
GPIO_INT_ENABLE_DISABLE_ONLY));
#else
mode = (enum gpio_int_mode)(flags & (GPIO_INT_EDGE | GPIO_INT_DISABLE | GPIO_INT_ENABLE));
#endif /* CONFIG_GPIO_ENABLE_DISABLE_INTERRUPT */
return api->pin_interrupt_configure(port, pin, mode, trig);
}
/**
* @brief Configure pin interrupts from a @p gpio_dt_spec.
*
* This is equivalent to:
*
* gpio_pin_interrupt_configure(spec->port, spec->pin, flags);
*
* The <tt>spec->dt_flags</tt> value is not used.
*
* @param spec GPIO specification from devicetree
* @param flags interrupt configuration flags
* @return a value from gpio_pin_interrupt_configure()
*/
static inline int gpio_pin_interrupt_configure_dt(const struct gpio_dt_spec *spec,
gpio_flags_t flags)
{
return gpio_pin_interrupt_configure(spec->port, spec->pin, flags);
}
/**
* @brief Configure a single pin.
*
* @param port Pointer to device structure for the driver instance.
* @param pin Pin number to configure.
* @param flags Flags for pin configuration: 'GPIO input/output configuration
* flags', 'GPIO pin drive flags', 'GPIO pin bias flags'.
*
* @retval 0 If successful.
* @retval -ENOTSUP if any of the configuration options is not supported
* (unless otherwise directed by flag documentation).
* @retval -EINVAL Invalid argument.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
__syscall int gpio_pin_configure(const struct device *port,
gpio_pin_t pin,
gpio_flags_t flags);
static inline int z_impl_gpio_pin_configure(const struct device *port,
gpio_pin_t pin,
gpio_flags_t flags)
{
const struct gpio_driver_api *api =
(const struct gpio_driver_api *)port->api;
__unused const struct gpio_driver_config *const cfg =
(const struct gpio_driver_config *)port->config;
struct gpio_driver_data *data =
(struct gpio_driver_data *)port->data;
__ASSERT((flags & GPIO_INT_MASK) == 0,
"Interrupt flags are not supported");
__ASSERT((flags & (GPIO_PULL_UP | GPIO_PULL_DOWN)) !=
(GPIO_PULL_UP | GPIO_PULL_DOWN),
"Pull Up and Pull Down should not be enabled simultaneously");
__ASSERT(!((flags & GPIO_INPUT) && !(flags & GPIO_OUTPUT) && (flags & GPIO_SINGLE_ENDED)),
"Input cannot be enabled for 'Open Drain', 'Open Source' modes without Output");
__ASSERT_NO_MSG((flags & GPIO_SINGLE_ENDED) != 0 ||
(flags & GPIO_LINE_OPEN_DRAIN) == 0);
__ASSERT((flags & (GPIO_OUTPUT_INIT_LOW | GPIO_OUTPUT_INIT_HIGH)) == 0
|| (flags & GPIO_OUTPUT) != 0,
"Output needs to be enabled to be initialized low or high");
__ASSERT((flags & (GPIO_OUTPUT_INIT_LOW | GPIO_OUTPUT_INIT_HIGH))
!= (GPIO_OUTPUT_INIT_LOW | GPIO_OUTPUT_INIT_HIGH),
"Output cannot be initialized low and high");
if (((flags & GPIO_OUTPUT_INIT_LOGICAL) != 0)
&& ((flags & (GPIO_OUTPUT_INIT_LOW | GPIO_OUTPUT_INIT_HIGH)) != 0)
&& ((flags & GPIO_ACTIVE_LOW) != 0)) {
flags ^= GPIO_OUTPUT_INIT_LOW | GPIO_OUTPUT_INIT_HIGH;
}
flags &= ~GPIO_OUTPUT_INIT_LOGICAL;
__ASSERT((cfg->port_pin_mask & (gpio_port_pins_t)BIT(pin)) != 0U,
"Unsupported pin");
if ((flags & GPIO_ACTIVE_LOW) != 0) {
data->invert |= (gpio_port_pins_t)BIT(pin);
} else {
data->invert &= ~(gpio_port_pins_t)BIT(pin);
}
return api->pin_configure(port, pin, flags);
}
/**
* @brief Configure a single pin from a @p gpio_dt_spec and some extra flags.
*
* This is equivalent to:
*
* gpio_pin_configure(spec->port, spec->pin, spec->dt_flags | extra_flags);
*
* @param spec GPIO specification from devicetree
* @param extra_flags additional flags
* @return a value from gpio_pin_configure()
*/
static inline int gpio_pin_configure_dt(const struct gpio_dt_spec *spec,
gpio_flags_t extra_flags)
{
return gpio_pin_configure(spec->port,
spec->pin,
spec->dt_flags | extra_flags);
}
/**
* @brief Get direction of select pins in a port.
*
* Retrieve direction of each pin specified in @p map.
*
* If @p inputs or @p outputs is NULL, then this function does not get the
* respective input or output direction information.
*
* @param port Pointer to the device structure for the driver instance.
* @param map Bitmap of pin directions to query.
* @param inputs Pointer to a variable where input directions will be stored.
* @param outputs Pointer to a variable where output directions will be stored.
*
* @retval 0 If successful.
* @retval -ENOSYS if the underlying driver does not support this call.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
__syscall int gpio_port_get_direction(const struct device *port, gpio_port_pins_t map,
gpio_port_pins_t *inputs, gpio_port_pins_t *outputs);
#ifdef CONFIG_GPIO_GET_DIRECTION
static inline int z_impl_gpio_port_get_direction(const struct device *port, gpio_port_pins_t map,
gpio_port_pins_t *inputs,
gpio_port_pins_t *outputs)
{
const struct gpio_driver_api *api = (const struct gpio_driver_api *)port->api;
if (api->port_get_direction == NULL) {
return -ENOSYS;
}
return api->port_get_direction(port, map, inputs, outputs);
}
#endif /* CONFIG_GPIO_GET_DIRECTION */
/**
* @brief Check if @p pin is configured for input
*
* @param port Pointer to device structure for the driver instance.
* @param pin Pin number to query the direction of
*
* @retval 1 if @p pin is configured as @ref GPIO_INPUT.
* @retval 0 if @p pin is not configured as @ref GPIO_INPUT.
* @retval -ENOSYS if the underlying driver does not support this call.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
static inline int gpio_pin_is_input(const struct device *port, gpio_pin_t pin)
{
int rv;
gpio_port_pins_t pins;
__unused const struct gpio_driver_config *cfg =
(const struct gpio_driver_config *)port->config;
__ASSERT((cfg->port_pin_mask & (gpio_port_pins_t)BIT(pin)) != 0U, "Unsupported pin");
rv = gpio_port_get_direction(port, BIT(pin), &pins, NULL);
if (rv < 0) {
return rv;
}
return (int)!!((gpio_port_pins_t)BIT(pin) & pins);
}
/**
* @brief Check if a single pin from @p gpio_dt_spec is configured for input
*
* This is equivalent to:
*
* gpio_pin_is_input(spec->port, spec->pin);
*
* @param spec GPIO specification from devicetree.
*
* @return A value from gpio_pin_is_input().
*/
static inline int gpio_pin_is_input_dt(const struct gpio_dt_spec *spec)
{
return gpio_pin_is_input(spec->port, spec->pin);
}
/**
* @brief Check if @p pin is configured for output
*
* @param port Pointer to device structure for the driver instance.
* @param pin Pin number to query the direction of
*
* @retval 1 if @p pin is configured as @ref GPIO_OUTPUT.
* @retval 0 if @p pin is not configured as @ref GPIO_OUTPUT.
* @retval -ENOSYS if the underlying driver does not support this call.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
static inline int gpio_pin_is_output(const struct device *port, gpio_pin_t pin)
{
int rv;
gpio_port_pins_t pins;
__unused const struct gpio_driver_config *cfg =
(const struct gpio_driver_config *)port->config;
__ASSERT((cfg->port_pin_mask & (gpio_port_pins_t)BIT(pin)) != 0U, "Unsupported pin");
rv = gpio_port_get_direction(port, BIT(pin), NULL, &pins);
if (rv < 0) {
return rv;
}
return (int)!!((gpio_port_pins_t)BIT(pin) & pins);
}
/**
* @brief Check if a single pin from @p gpio_dt_spec is configured for output
*
* This is equivalent to:
*
* gpio_pin_is_output(spec->port, spec->pin);
*
* @param spec GPIO specification from devicetree.
*
* @return A value from gpio_pin_is_output().
*/
static inline int gpio_pin_is_output_dt(const struct gpio_dt_spec *spec)
{
return gpio_pin_is_output(spec->port, spec->pin);
}
/**
* @brief Get a configuration of a single pin.
*
* @param port Pointer to device structure for the driver instance.
* @param pin Pin number which configuration is get.
* @param flags Pointer to variable in which the current configuration will
* be stored if function is successful.
*
* @retval 0 If successful.
* @retval -ENOSYS if getting current pin configuration is not implemented
* by the driver.
* @retval -EINVAL Invalid argument.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
__syscall int gpio_pin_get_config(const struct device *port, gpio_pin_t pin,
gpio_flags_t *flags);
#ifdef CONFIG_GPIO_GET_CONFIG
static inline int z_impl_gpio_pin_get_config(const struct device *port,
gpio_pin_t pin,
gpio_flags_t *flags)
{
const struct gpio_driver_api *api =
(const struct gpio_driver_api *)port->api;
if (api->pin_get_config == NULL)
return -ENOSYS;
return api->pin_get_config(port, pin, flags);
}
#endif
/**
* @brief Get a configuration of a single pin from a @p gpio_dt_spec.
*
* This is equivalent to:
*
* gpio_pin_get_config(spec->port, spec->pin, flags);
*
* @param spec GPIO specification from devicetree
* @param flags Pointer to variable in which the current configuration will
* be stored if function is successful.
* @return a value from gpio_pin_configure()
*/
static inline int gpio_pin_get_config_dt(const struct gpio_dt_spec *spec,
gpio_flags_t *flags)
{
return gpio_pin_get_config(spec->port, spec->pin, flags);
}
/**
* @brief Get physical level of all input pins in a port.
*
* A low physical level on the pin will be interpreted as value 0. A high
* physical level will be interpreted as value 1. This function ignores
* GPIO_ACTIVE_LOW flag.
*
* Value of a pin with index n will be represented by bit n in the returned
* port value.
*
* @param port Pointer to the device structure for the driver instance.
* @param value Pointer to a variable where pin values will be stored.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
__syscall int gpio_port_get_raw(const struct device *port,
gpio_port_value_t *value);
static inline int z_impl_gpio_port_get_raw(const struct device *port,
gpio_port_value_t *value)
{
const struct gpio_driver_api *api =
(const struct gpio_driver_api *)port->api;
return api->port_get_raw(port, value);
}
/**
* @brief Get logical level of all input pins in a port.
*
* Get logical level of an input pin taking into account GPIO_ACTIVE_LOW flag.
* If pin is configured as Active High, a low physical level will be interpreted
* as logical value 0. If pin is configured as Active Low, a low physical level
* will be interpreted as logical value 1.
*
* Value of a pin with index n will be represented by bit n in the returned
* port value.
*
* @param port Pointer to the device structure for the driver instance.
* @param value Pointer to a variable where pin values will be stored.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
static inline int gpio_port_get(const struct device *port,
gpio_port_value_t *value)
{
const struct gpio_driver_data *const data =
(const struct gpio_driver_data *)port->data;
int ret;
ret = gpio_port_get_raw(port, value);
if (ret == 0) {
*value ^= data->invert;
}
return ret;
}
/**
* @brief Set physical level of output pins in a port.
*
* Writing value 0 to the pin will set it to a low physical level. Writing
* value 1 will set it to a high physical level. This function ignores
* GPIO_ACTIVE_LOW flag.
*
* Pin with index n is represented by bit n in mask and value parameter.
*
* @param port Pointer to the device structure for the driver instance.
* @param mask Mask indicating which pins will be modified.
* @param value Value assigned to the output pins.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
__syscall int gpio_port_set_masked_raw(const struct device *port,
gpio_port_pins_t mask,
gpio_port_value_t value);
static inline int z_impl_gpio_port_set_masked_raw(const struct device *port,
gpio_port_pins_t mask,
gpio_port_value_t value)
{
const struct gpio_driver_api *api =
(const struct gpio_driver_api *)port->api;
return api->port_set_masked_raw(port, mask, value);
}
/**
* @brief Set logical level of output pins in a port.
*
* Set logical level of an output pin taking into account GPIO_ACTIVE_LOW flag.
* Value 0 sets the pin in logical 0 / inactive state. Value 1 sets the pin in
* logical 1 / active state. If pin is configured as Active High, the default,
* setting it in inactive state will force the pin to a low physical level. If
* pin is configured as Active Low, setting it in inactive state will force the
* pin to a high physical level.
*
* Pin with index n is represented by bit n in mask and value parameter.
*
* @param port Pointer to the device structure for the driver instance.
* @param mask Mask indicating which pins will be modified.
* @param value Value assigned to the output pins.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
static inline int gpio_port_set_masked(const struct device *port,
gpio_port_pins_t mask,
gpio_port_value_t value)
{
const struct gpio_driver_data *const data =
(const struct gpio_driver_data *)port->data;
value ^= data->invert;
return gpio_port_set_masked_raw(port, mask, value);
}
/**
* @brief Set physical level of selected output pins to high.
*
* @param port Pointer to the device structure for the driver instance.
* @param pins Value indicating which pins will be modified.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
__syscall int gpio_port_set_bits_raw(const struct device *port,
gpio_port_pins_t pins);
static inline int z_impl_gpio_port_set_bits_raw(const struct device *port,
gpio_port_pins_t pins)
{
const struct gpio_driver_api *api =
(const struct gpio_driver_api *)port->api;
return api->port_set_bits_raw(port, pins);
}
/**
* @brief Set logical level of selected output pins to active.
*
* @param port Pointer to the device structure for the driver instance.
* @param pins Value indicating which pins will be modified.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
static inline int gpio_port_set_bits(const struct device *port,
gpio_port_pins_t pins)
{
return gpio_port_set_masked(port, pins, pins);
}
/**
* @brief Set physical level of selected output pins to low.
*
* @param port Pointer to the device structure for the driver instance.
* @param pins Value indicating which pins will be modified.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
__syscall int gpio_port_clear_bits_raw(const struct device *port,
gpio_port_pins_t pins);
static inline int z_impl_gpio_port_clear_bits_raw(const struct device *port,
gpio_port_pins_t pins)
{
const struct gpio_driver_api *api =
(const struct gpio_driver_api *)port->api;
return api->port_clear_bits_raw(port, pins);
}
/**
* @brief Set logical level of selected output pins to inactive.
*
* @param port Pointer to the device structure for the driver instance.
* @param pins Value indicating which pins will be modified.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
static inline int gpio_port_clear_bits(const struct device *port,
gpio_port_pins_t pins)
{
return gpio_port_set_masked(port, pins, 0);
}
/**
* @brief Toggle level of selected output pins.
*
* @param port Pointer to the device structure for the driver instance.
* @param pins Value indicating which pins will be modified.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
__syscall int gpio_port_toggle_bits(const struct device *port,
gpio_port_pins_t pins);
static inline int z_impl_gpio_port_toggle_bits(const struct device *port,
gpio_port_pins_t pins)
{
const struct gpio_driver_api *api =
(const struct gpio_driver_api *)port->api;
return api->port_toggle_bits(port, pins);
}
/**
* @brief Set physical level of selected output pins.
*
* @param port Pointer to the device structure for the driver instance.
* @param set_pins Value indicating which pins will be set to high.
* @param clear_pins Value indicating which pins will be set to low.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
static inline int gpio_port_set_clr_bits_raw(const struct device *port,
gpio_port_pins_t set_pins,
gpio_port_pins_t clear_pins)
{
__ASSERT((set_pins & clear_pins) == 0, "Set and Clear pins overlap");
return gpio_port_set_masked_raw(port, set_pins | clear_pins, set_pins);
}
/**
* @brief Set logical level of selected output pins.
*
* @param port Pointer to the device structure for the driver instance.
* @param set_pins Value indicating which pins will be set to active.
* @param clear_pins Value indicating which pins will be set to inactive.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
static inline int gpio_port_set_clr_bits(const struct device *port,
gpio_port_pins_t set_pins,
gpio_port_pins_t clear_pins)
{
__ASSERT((set_pins & clear_pins) == 0, "Set and Clear pins overlap");
return gpio_port_set_masked(port, set_pins | clear_pins, set_pins);
}
/**
* @brief Get physical level of an input pin.
*
* A low physical level on the pin will be interpreted as value 0. A high
* physical level will be interpreted as value 1. This function ignores
* GPIO_ACTIVE_LOW flag.
*
* @param port Pointer to the device structure for the driver instance.
* @param pin Pin number.
*
* @retval 1 If pin physical level is high.
* @retval 0 If pin physical level is low.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
static inline int gpio_pin_get_raw(const struct device *port, gpio_pin_t pin)
{
__unused const struct gpio_driver_config *const cfg =
(const struct gpio_driver_config *)port->config;
gpio_port_value_t value;
int ret;
__ASSERT((cfg->port_pin_mask & (gpio_port_pins_t)BIT(pin)) != 0U,
"Unsupported pin");
ret = gpio_port_get_raw(port, &value);
if (ret == 0) {
ret = (value & (gpio_port_pins_t)BIT(pin)) != 0 ? 1 : 0;
}
return ret;
}
/**
* @brief Get logical level of an input pin.
*
* Get logical level of an input pin taking into account GPIO_ACTIVE_LOW flag.
* If pin is configured as Active High, a low physical level will be interpreted
* as logical value 0. If pin is configured as Active Low, a low physical level
* will be interpreted as logical value 1.
*
* Note: If pin is configured as Active High, the default, gpio_pin_get()
* function is equivalent to gpio_pin_get_raw().
*
* @param port Pointer to the device structure for the driver instance.
* @param pin Pin number.
*
* @retval 1 If pin logical value is 1 / active.
* @retval 0 If pin logical value is 0 / inactive.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
static inline int gpio_pin_get(const struct device *port, gpio_pin_t pin)
{
__unused const struct gpio_driver_config *const cfg =
(const struct gpio_driver_config *)port->config;
gpio_port_value_t value;
int ret;
__ASSERT((cfg->port_pin_mask & (gpio_port_pins_t)BIT(pin)) != 0U,
"Unsupported pin");
ret = gpio_port_get(port, &value);
if (ret == 0) {
ret = (value & (gpio_port_pins_t)BIT(pin)) != 0 ? 1 : 0;
}
return ret;
}
/**
* @brief Get logical level of an input pin from a @p gpio_dt_spec.
*
* This is equivalent to:
*
* gpio_pin_get(spec->port, spec->pin);
*
* @param spec GPIO specification from devicetree
* @return a value from gpio_pin_get()
*/
static inline int gpio_pin_get_dt(const struct gpio_dt_spec *spec)
{
return gpio_pin_get(spec->port, spec->pin);
}
/**
* @brief Set physical level of an output pin.
*
* Writing value 0 to the pin will set it to a low physical level. Writing any
* value other than 0 will set it to a high physical level. This function
* ignores GPIO_ACTIVE_LOW flag.
*
* @param port Pointer to the device structure for the driver instance.
* @param pin Pin number.
* @param value Value assigned to the pin.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
static inline int gpio_pin_set_raw(const struct device *port, gpio_pin_t pin,
int value)
{
__unused const struct gpio_driver_config *const cfg =
(const struct gpio_driver_config *)port->config;
int ret;
__ASSERT((cfg->port_pin_mask & (gpio_port_pins_t)BIT(pin)) != 0U,
"Unsupported pin");
if (value != 0) {
ret = gpio_port_set_bits_raw(port, (gpio_port_pins_t)BIT(pin));
} else {
ret = gpio_port_clear_bits_raw(port, (gpio_port_pins_t)BIT(pin));
}
return ret;
}
/**
* @brief Set logical level of an output pin.
*
* Set logical level of an output pin taking into account GPIO_ACTIVE_LOW flag.
* Value 0 sets the pin in logical 0 / inactive state. Any value other than 0
* sets the pin in logical 1 / active state. If pin is configured as Active
* High, the default, setting it in inactive state will force the pin to a low
* physical level. If pin is configured as Active Low, setting it in inactive
* state will force the pin to a high physical level.
*
* Note: If pin is configured as Active High, gpio_pin_set() function is
* equivalent to gpio_pin_set_raw().
*
* @param port Pointer to the device structure for the driver instance.
* @param pin Pin number.
* @param value Value assigned to the pin.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
static inline int gpio_pin_set(const struct device *port, gpio_pin_t pin,
int value)
{
__unused const struct gpio_driver_config *const cfg =
(const struct gpio_driver_config *)port->config;
const struct gpio_driver_data *const data =
(const struct gpio_driver_data *)port->data;
__ASSERT((cfg->port_pin_mask & (gpio_port_pins_t)BIT(pin)) != 0U,
"Unsupported pin");
if (data->invert & (gpio_port_pins_t)BIT(pin)) {
value = (value != 0) ? 0 : 1;
}
return gpio_pin_set_raw(port, pin, value);
}
/**
* @brief Set logical level of a output pin from a @p gpio_dt_spec.
*
* This is equivalent to:
*
* gpio_pin_set(spec->port, spec->pin, value);
*
* @param spec GPIO specification from devicetree
* @param value Value assigned to the pin.
* @return a value from gpio_pin_set()
*/
static inline int gpio_pin_set_dt(const struct gpio_dt_spec *spec, int value)
{
return gpio_pin_set(spec->port, spec->pin, value);
}
/**
* @brief Toggle pin level.
*
* @param port Pointer to the device structure for the driver instance.
* @param pin Pin number.
*
* @retval 0 If successful.
* @retval -EIO I/O error when accessing an external GPIO chip.
* @retval -EWOULDBLOCK if operation would block.
*/
static inline int gpio_pin_toggle(const struct device *port, gpio_pin_t pin)
{
__unused const struct gpio_driver_config *const cfg =
(const struct gpio_driver_config *)port->config;
__ASSERT((cfg->port_pin_mask & (gpio_port_pins_t)BIT(pin)) != 0U,
"Unsupported pin");
return gpio_port_toggle_bits(port, (gpio_port_pins_t)BIT(pin));
}
/**
* @brief Toggle pin level from a @p gpio_dt_spec.
*
* This is equivalent to:
*
* gpio_pin_toggle(spec->port, spec->pin);
*
* @param spec GPIO specification from devicetree
* @return a value from gpio_pin_toggle()
*/
static inline int gpio_pin_toggle_dt(const struct gpio_dt_spec *spec)
{
return gpio_pin_toggle(spec->port, spec->pin);
}
/**
* @brief Helper to initialize a struct gpio_callback properly
* @param callback A valid Application's callback structure pointer.
* @param handler A valid handler function pointer.
* @param pin_mask A bit mask of relevant pins for the handler
*/
static inline void gpio_init_callback(struct gpio_callback *callback,
gpio_callback_handler_t handler,
gpio_port_pins_t pin_mask)
{
__ASSERT(callback, "Callback pointer should not be NULL");
__ASSERT(handler, "Callback handler pointer should not be NULL");
callback->handler = handler;
callback->pin_mask = pin_mask;
}
/**
* @brief Add an application callback.
* @param port Pointer to the device structure for the driver instance.
* @param callback A valid Application's callback structure pointer.
* @retval 0 If successful
* @retval -ENOSYS If driver does not implement the operation
* @retval -errno Other negative errno code on failure.
*
* @note Callbacks may be added to the device from within a callback
* handler invocation, but whether they are invoked for the current
* GPIO event is not specified.
*
* Note: enables to add as many callback as needed on the same port.
*/
static inline int gpio_add_callback(const struct device *port,
struct gpio_callback *callback)
{
const struct gpio_driver_api *api =
(const struct gpio_driver_api *)port->api;
if (api->manage_callback == NULL) {
return -ENOSYS;
}
return api->manage_callback(port, callback, true);
}
/**
* @brief Add an application callback.
*
* This is equivalent to:
*
* gpio_add_callback(spec->port, callback);
*
* @param spec GPIO specification from devicetree.
* @param callback A valid application's callback structure pointer.
* @return a value from gpio_add_callback().
*/
static inline int gpio_add_callback_dt(const struct gpio_dt_spec *spec,
struct gpio_callback *callback)
{
return gpio_add_callback(spec->port, callback);
}
/**
* @brief Remove an application callback.
* @param port Pointer to the device structure for the driver instance.
* @param callback A valid application's callback structure pointer.
* @retval 0 If successful
* @retval -ENOSYS If driver does not implement the operation
* @retval -errno Other negative errno code on failure.
*
* @warning It is explicitly permitted, within a callback handler, to
* remove the registration for the callback that is running, i.e. @p
* callback. Attempts to remove other registrations on the same
* device may result in undefined behavior, including failure to
* invoke callbacks that remain registered and unintended invocation
* of removed callbacks.
*
* Note: enables to remove as many callbacks as added through
* gpio_add_callback().
*/
static inline int gpio_remove_callback(const struct device *port,
struct gpio_callback *callback)
{
const struct gpio_driver_api *api =
(const struct gpio_driver_api *)port->api;
if (api->manage_callback == NULL) {
return -ENOSYS;
}
return api->manage_callback(port, callback, false);
}
/**
* @brief Remove an application callback.
*
* This is equivalent to:
*
* gpio_remove_callback(spec->port, callback);
*
* @param spec GPIO specification from devicetree.
* @param callback A valid application's callback structure pointer.
* @return a value from gpio_remove_callback().
*/
static inline int gpio_remove_callback_dt(const struct gpio_dt_spec *spec,
struct gpio_callback *callback)
{
return gpio_remove_callback(spec->port, callback);
}
/**
* @brief Function to get pending interrupts
*
* The purpose of this function is to return the interrupt
* status register for the device.
* This is especially useful when waking up from
* low power states to check the wake up source.
*
* @param dev Pointer to the device structure for the driver instance.
*
* @retval status != 0 if at least one gpio interrupt is pending.
* @retval 0 if no gpio interrupt is pending.
* @retval -ENOSYS If driver does not implement the operation
*/
__syscall int gpio_get_pending_int(const struct device *dev);
static inline int z_impl_gpio_get_pending_int(const struct device *dev)
{
const struct gpio_driver_api *api =
(const struct gpio_driver_api *)dev->api;
if (api->get_pending_int == NULL) {
return -ENOSYS;
}
return api->get_pending_int(dev);
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#include <zephyr/syscalls/gpio.h>
#endif /* ZEPHYR_INCLUDE_DRIVERS_GPIO_H_ */
``` | /content/code_sandbox/include/zephyr/drivers/gpio.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 14,608 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public APIs for MIPI-DBI drivers
*
* MIPI-DBI defines the following 3 interfaces:
* Type A: Motorola 6800 type parallel bus
* Type B: Intel 8080 type parallel bus
* Type C: SPI Type (1 bit bus) with 3 options:
* 1. 9 write clocks per byte, final bit is command/data selection bit
* 2. Same as above, but 16 write clocks per byte
* 3. 8 write clocks per byte. Command/data selected via GPIO pin
* The current driver interface only supports type C modes 1 and 3
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_MIPI_DBI_H_
#define ZEPHYR_INCLUDE_DRIVERS_MIPI_DBI_H_
/**
* @brief MIPI-DBI driver APIs
* @defgroup mipi_dbi_interface MIPI-DBI driver APIs
* @since 3.6
* @version 0.1.0
* @ingroup io_interfaces
* @{
*/
#include <zephyr/device.h>
#include <zephyr/drivers/display.h>
#include <zephyr/display/mipi_display.h>
#include <zephyr/drivers/spi.h>
#include <zephyr/dt-bindings/mipi_dbi/mipi_dbi.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief initialize a MIPI DBI SPI configuration struct from devicetree
*
* This helper allows drivers to initialize a MIPI DBI SPI configuration
* structure using devicetree.
* @param node_id Devicetree node identifier for the MIPI DBI device whose
* struct spi_config to create an initializer for
* @param operation_ the desired operation field in the struct spi_config
* @param delay_ the desired delay field in the struct spi_config's
* spi_cs_control, if there is one
*/
#define MIPI_DBI_SPI_CONFIG_DT(node_id, operation_, delay_) \
{ \
.frequency = DT_PROP(node_id, mipi_max_frequency), \
.operation = (operation_) | \
DT_PROP_OR(node_id, duplex, 0) | \
COND_CODE_1(DT_PROP(node_id, mipi_cpol), SPI_MODE_CPOL, (0)) | \
COND_CODE_1(DT_PROP(node_id, mipi_cpha), SPI_MODE_CPHA, (0)) | \
COND_CODE_1(DT_PROP(node_id, mipi_hold_cs), SPI_HOLD_ON_CS, (0)), \
.slave = DT_REG_ADDR(node_id), \
.cs = { \
.gpio = GPIO_DT_SPEC_GET_BY_IDX_OR(DT_PHANDLE(DT_PARENT(node_id), \
spi_dev), cs_gpios, \
DT_REG_ADDR(node_id), \
{}), \
.delay = (delay_), \
}, \
}
/**
* @brief Initialize a MIPI DBI SPI configuration from devicetree instance
*
* This helper initializes a MIPI DBI SPI configuration from a devicetree
* instance. It is equivalent to MIPI_DBI_SPI_CONFIG_DT(DT_DRV_INST(inst))
* @param inst Instance number to initialize configuration from
* @param operation_ the desired operation field in the struct spi_config
* @param delay_ the desired delay field in the struct spi_config's
* spi_cs_control, if there is one
*/
#define MIPI_DBI_SPI_CONFIG_DT_INST(inst, operation_, delay_) \
MIPI_DBI_SPI_CONFIG_DT(DT_DRV_INST(inst), operation_, delay_)
/**
* @brief Initialize a MIPI DBI configuration from devicetree
*
* This helper allows drivers to initialize a MIPI DBI configuration
* structure from devicetree. It sets the MIPI DBI mode, as well
* as configuration fields in the SPI configuration structure
* @param node_id Devicetree node identifier for the MIPI DBI device to
* initialize
* @param operation_ the desired operation field in the struct spi_config
* @param delay_ the desired delay field in the struct spi_config's
* spi_cs_control, if there is one
*/
#define MIPI_DBI_CONFIG_DT(node_id, operation_, delay_) \
{ \
.mode = DT_PROP(node_id, mipi_mode), \
.config = MIPI_DBI_SPI_CONFIG_DT(node_id, operation_, delay_), \
}
/**
* @brief Initialize a MIPI DBI configuration from device instance
*
* Equivalent to MIPI_DBI_CONFIG_DT(DT_DRV_INST(inst), operation_, delay_)
* @param inst Instance of the device to initialize a MIPI DBI configuration for
* @param operation_ the desired operation field in the struct spi_config
* @param delay_ the desired delay field in the struct spi_config's
* spi_cs_control, if there is one
*/
#define MIPI_DBI_CONFIG_DT_INST(inst, operation_, delay_) \
MIPI_DBI_CONFIG_DT(DT_DRV_INST(inst), operation_, delay_)
/**
* @brief MIPI DBI controller configuration
*
* Configuration for MIPI DBI controller write
*/
struct mipi_dbi_config {
/** MIPI DBI mode (SPI 3 wire or 4 wire) */
uint8_t mode;
/** SPI configuration */
struct spi_config config;
};
/** MIPI-DBI host driver API */
__subsystem struct mipi_dbi_driver_api {
int (*command_write)(const struct device *dev,
const struct mipi_dbi_config *config, uint8_t cmd,
const uint8_t *data, size_t len);
int (*command_read)(const struct device *dev,
const struct mipi_dbi_config *config, uint8_t *cmds,
size_t num_cmds, uint8_t *response, size_t len);
int (*write_display)(const struct device *dev,
const struct mipi_dbi_config *config,
const uint8_t *framebuf,
struct display_buffer_descriptor *desc,
enum display_pixel_format pixfmt);
int (*reset)(const struct device *dev, k_timeout_t delay);
int (*release)(const struct device *dev,
const struct mipi_dbi_config *config);
};
/**
* @brief Write a command to the display controller
*
* Writes a command, along with an optional data buffer to the display.
* If data buffer and buffer length are NULL and 0 respectively, then
* only a command will be sent. Note that if the SPI configuration passed
* to this function locks the SPI bus, it is the caller's responsibility
* to release it with mipi_dbi_release()
*
* @param dev mipi dbi controller
* @param config MIPI DBI configuration
* @param cmd command to write to display controller
* @param data optional data buffer to write after command
* @param len size of data buffer in bytes. Set to 0 to skip sending data.
* @retval 0 command write succeeded
* @retval -EIO I/O error
* @retval -ETIMEDOUT transfer timed out
* @retval -EBUSY controller is busy
* @retval -ENOSYS not implemented
*/
static inline int mipi_dbi_command_write(const struct device *dev,
const struct mipi_dbi_config *config,
uint8_t cmd, const uint8_t *data,
size_t len)
{
const struct mipi_dbi_driver_api *api =
(const struct mipi_dbi_driver_api *)dev->api;
if (api->command_write == NULL) {
return -ENOSYS;
}
return api->command_write(dev, config, cmd, data, len);
}
/**
* @brief Read a command response from the display controller
*
* Reads a command response from the display controller.
*
* @param dev mipi dbi controller
* @param config MIPI DBI configuration
* @param cmds array of one byte commands to send to display controller
* @param num_cmd number of commands to write to display controller
* @param response response buffer, filled with display controller response
* @param len size of response buffer in bytes.
* @retval 0 command read succeeded
* @retval -EIO I/O error
* @retval -ETIMEDOUT transfer timed out
* @retval -EBUSY controller is busy
* @retval -ENOSYS not implemented
*/
static inline int mipi_dbi_command_read(const struct device *dev,
const struct mipi_dbi_config *config,
uint8_t *cmds, size_t num_cmd,
uint8_t *response, size_t len)
{
const struct mipi_dbi_driver_api *api =
(const struct mipi_dbi_driver_api *)dev->api;
if (api->command_read == NULL) {
return -ENOSYS;
}
return api->command_read(dev, config, cmds, num_cmd, response, len);
}
/**
* @brief Write a display buffer to the display controller.
*
* Writes a display buffer to the controller. If the controller requires
* a "Write memory" command before writing display data, this should be
* sent with @ref mipi_dbi_command_write
* @param dev mipi dbi controller
* @param config MIPI DBI configuration
* @param framebuf: framebuffer to write to display
* @param desc: descriptor of framebuffer to write. Note that the pitch must
* be equal to width. "buf_size" field determines how many bytes will be
* written.
* @param pixfmt: pixel format of framebuffer data
* @retval 0 buffer write succeeded.
* @retval -EIO I/O error
* @retval -ETIMEDOUT transfer timed out
* @retval -EBUSY controller is busy
* @retval -ENOSYS not implemented
*/
static inline int mipi_dbi_write_display(const struct device *dev,
const struct mipi_dbi_config *config,
const uint8_t *framebuf,
struct display_buffer_descriptor *desc,
enum display_pixel_format pixfmt)
{
const struct mipi_dbi_driver_api *api =
(const struct mipi_dbi_driver_api *)dev->api;
if (api->write_display == NULL) {
return -ENOSYS;
}
return api->write_display(dev, config, framebuf, desc, pixfmt);
}
/**
* @brief Resets attached display controller
*
* Resets the attached display controller.
* @param dev mipi dbi controller
* @param delay_ms duration to set reset signal for, in milliseconds
* @retval 0 reset succeeded
* @retval -EIO I/O error
* @retval -ENOSYS not implemented
* @retval -ENOTSUP not supported
*/
static inline int mipi_dbi_reset(const struct device *dev, uint32_t delay_ms)
{
const struct mipi_dbi_driver_api *api =
(const struct mipi_dbi_driver_api *)dev->api;
if (api->reset == NULL) {
return -ENOSYS;
}
return api->reset(dev, K_MSEC(delay_ms));
}
/**
* @brief Releases a locked MIPI DBI device.
*
* Releases a lock on a MIPI DBI device and/or the device's CS line if and
* only if the given config parameter was the last one to be used in any
* of the above functions, and if it has the SPI_LOCK_ON bit set and/or
* the SPI_HOLD_ON_CS bit set into its operation bits field.
* This lock functions exactly like the SPI lock, and can be used if the caller
* needs to keep CS asserted for multiple transactions, or the MIPI DBI device
* locked.
* @param dev mipi dbi controller
* @param config MIPI DBI configuration
* @retval 0 reset succeeded
* @retval -EIO I/O error
* @retval -ENOSYS not implemented
* @retval -ENOTSUP not supported
*/
static inline int mipi_dbi_release(const struct device *dev,
const struct mipi_dbi_config *config)
{
const struct mipi_dbi_driver_api *api =
(const struct mipi_dbi_driver_api *)dev->api;
if (api->release == NULL) {
return -ENOSYS;
}
return api->release(dev, config);
}
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_DRIVERS_MIPI_DBI_H_ */
``` | /content/code_sandbox/include/zephyr/drivers/mipi_dbi.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,705 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_WATCHDOG_H_
#define ZEPHYR_INCLUDE_DRIVERS_WATCHDOG_H_
/**
* @brief Watchdog Interface
* @defgroup watchdog_interface Watchdog Interface
* @since 1.0
* @version 1.0.0
* @ingroup io_interfaces
* @{
*/
#include <zephyr/types.h>
#include <zephyr/sys/util.h>
#include <zephyr/device.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @name Watchdog options
* @anchor WDT_OPT
* @{
*/
/** @brief Pause watchdog timer when CPU is in sleep state. */
#define WDT_OPT_PAUSE_IN_SLEEP BIT(0)
/** @brief Pause watchdog timer when CPU is halted by the debugger. */
#define WDT_OPT_PAUSE_HALTED_BY_DBG BIT(1)
/** @} */
/**
* @name Watchdog behavior flags
* @anchor WDT_FLAGS
* @{
*/
/** @cond INTERNAL_HIDDEN */
/** @brief Watchdog reset flag bit field mask shift. */
#define WDT_FLAG_RESET_SHIFT (0)
/** @brief Watchdog reset flag bit field mask. */
#define WDT_FLAG_RESET_MASK (0x3 << WDT_FLAG_RESET_SHIFT)
/** @endcond */
/** Reset: none */
#define WDT_FLAG_RESET_NONE (0 << WDT_FLAG_RESET_SHIFT)
/** Reset: CPU core */
#define WDT_FLAG_RESET_CPU_CORE (1 << WDT_FLAG_RESET_SHIFT)
/** Reset: SoC */
#define WDT_FLAG_RESET_SOC (2 << WDT_FLAG_RESET_SHIFT)
/** @} */
/**
* @brief Watchdog timeout window.
*
* Each installed timeout needs feeding within the specified time window,
* otherwise the watchdog will trigger. If the watchdog instance does not
* support window timeouts then min value must be equal to 0.
*
* @note If specified values can not be precisely set they are always rounded
* up.
*/
struct wdt_window {
/** Lower limit of watchdog feed timeout in milliseconds. */
uint32_t min;
/** Upper limit of watchdog feed timeout in milliseconds. */
uint32_t max;
};
/**
* @brief Watchdog callback.
*
* @param dev Watchdog device instance.
* @param channel_id Channel identifier.
*/
typedef void (*wdt_callback_t)(const struct device *dev, int channel_id);
/** @brief Watchdog timeout configuration. */
struct wdt_timeout_cfg {
/** Timing parameters of watchdog timeout. */
struct wdt_window window;
/** Timeout callback (can be `NULL`). */
wdt_callback_t callback;
#if defined(CONFIG_WDT_MULTISTAGE) || defined(__DOXYGEN__)
/**
* Pointer to the next timeout configuration.
*
* This field is only available if @kconfig{CONFIG_WDT_MULTISTAGE} is
* enabled (watchdogs with staged timeouts functionality). Value must be
* `NULL` for single stage timeout.
*/
struct wdt_timeout_cfg *next;
#endif
/** Flags (see @ref WDT_FLAGS). */
uint8_t flags;
};
/** @cond INTERNAL_HIDDEN */
/**
* @brief Callback API for setting up watchdog instance.
* @see wdt_setup().
*/
typedef int (*wdt_api_setup)(const struct device *dev, uint8_t options);
/**
* @brief Callback API for disabling watchdog instance.
* @see wdt_disable().
*/
typedef int (*wdt_api_disable)(const struct device *dev);
/**
* @brief Callback API for installing new timeout.
* @see wdt_install_timeout().
*/
typedef int (*wdt_api_install_timeout)(const struct device *dev,
const struct wdt_timeout_cfg *cfg);
/**
* @brief Callback API for feeding specified watchdog timeout.
* @see wdt_feed().
*/
typedef int (*wdt_api_feed)(const struct device *dev, int channel_id);
__subsystem struct wdt_driver_api {
wdt_api_setup setup;
wdt_api_disable disable;
wdt_api_install_timeout install_timeout;
wdt_api_feed feed;
};
/**
* @endcond
*/
/**
* @brief Set up watchdog instance.
*
* This function is used for configuring global watchdog settings that
* affect all timeouts. It should be called after installing timeouts.
* After successful return, all installed timeouts are valid and must be
* serviced periodically by calling wdt_feed().
*
* @param dev Watchdog device instance.
* @param options Configuration options (see @ref WDT_OPT).
*
* @retval 0 If successful.
* @retval -ENOTSUP If any of the set options is not supported.
* @retval -EBUSY If watchdog instance has been already setup.
* @retval -errno In case of any other failure.
*/
__syscall int wdt_setup(const struct device *dev, uint8_t options);
static inline int z_impl_wdt_setup(const struct device *dev, uint8_t options)
{
const struct wdt_driver_api *api =
(const struct wdt_driver_api *)dev->api;
return api->setup(dev, options);
}
/**
* @brief Disable watchdog instance.
*
* This function disables the watchdog instance and automatically uninstalls all
* timeouts. To set up a new watchdog, install timeouts and call wdt_setup()
* again. Not all watchdogs can be restarted after they are disabled.
*
* @param dev Watchdog device instance.
*
* @retval 0 If successful.
* @retval -EFAULT If watchdog instance is not enabled.
* @retval -EPERM If watchdog can not be disabled directly by application code.
* @retval -errno In case of any other failure.
*/
__syscall int wdt_disable(const struct device *dev);
static inline int z_impl_wdt_disable(const struct device *dev)
{
const struct wdt_driver_api *api =
(const struct wdt_driver_api *)dev->api;
return api->disable(dev);
}
/**
* @brief Install a new timeout.
*
* @note This function must be used before wdt_setup(). Changes applied here
* have no effects until wdt_setup() is called.
*
* @param dev Watchdog device instance.
* @param[in] cfg Timeout configuration.
*
* @retval channel_id If successful, a non-negative value indicating the index
* of the channel to which the timeout was assigned. This value is supposed to
* be used as the parameter in calls to wdt_feed().
* @retval -EBUSY If timeout can not be installed while watchdog has already
* been setup.
* @retval -ENOMEM If no more timeouts can be installed.
* @retval -ENOTSUP If any of the set flags is not supported.
* @retval -EINVAL If any of the window timeout value is out of possible range.
* This value is also returned if watchdog supports only one timeout value for
* all timeouts and the supplied timeout window differs from windows for alarms
* installed so far.
* @retval -errno In case of any other failure.
*/
static inline int wdt_install_timeout(const struct device *dev,
const struct wdt_timeout_cfg *cfg)
{
const struct wdt_driver_api *api =
(const struct wdt_driver_api *) dev->api;
return api->install_timeout(dev, cfg);
}
/**
* @brief Feed specified watchdog timeout.
*
* @param dev Watchdog device instance.
* @param channel_id Channel index.
*
* @retval 0 If successful.
* @retval -EAGAIN If completing the feed operation would stall the caller, for
* example due to an in-progress watchdog operation such as a previous
* wdt_feed() call.
* @retval -EINVAL If there is no installed timeout for supplied channel.
* @retval -errno In case of any other failure.
*/
__syscall int wdt_feed(const struct device *dev, int channel_id);
static inline int z_impl_wdt_feed(const struct device *dev, int channel_id)
{
const struct wdt_driver_api *api =
(const struct wdt_driver_api *)dev->api;
return api->feed(dev, channel_id);
}
#ifdef __cplusplus
}
#endif
/** @} */
#include <zephyr/syscalls/watchdog.h>
#endif /* ZEPHYR_INCLUDE_DRIVERS_WATCHDOG_H_ */
``` | /content/code_sandbox/include/zephyr/drivers/watchdog.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,717 |
```objective-c
/**
* @file
*
* @brief Public APIs for the I2C emulation drivers.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_I2C_I2C_EMUL_H_
#define ZEPHYR_INCLUDE_DRIVERS_I2C_I2C_EMUL_H_
#include <zephyr/device.h>
#include <zephyr/drivers/emul.h>
#include <zephyr/drivers/i2c.h>
#include <zephyr/sys/slist.h>
#include <zephyr/types.h>
/**
* @brief I2C Emulation Interface
* @defgroup i2c_emul_interface I2C Emulation Interface
* @ingroup io_emulators
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
struct i2c_msg;
struct i2c_emul_api;
/** Node in a linked list of emulators for I2C devices */
struct i2c_emul {
sys_snode_t node;
/** Target emulator - REQUIRED for all emulated bus nodes of any type */
const struct emul *target;
/* API provided for this device */
const struct i2c_emul_api *api;
/**
* A mock API that if not NULL will take precedence over the actual API. If set, a return
* value of -ENOSYS will revert back to the default api.
*/
struct i2c_emul_api *mock_api;
/* I2C address of the emulated device */
uint16_t addr;
};
/**
* Passes I2C messages to the emulator. The emulator updates the data with what
* was read back.
*
* @param target The device Emulator instance.
* @param msgs Array of messages to transfer. For 'read' messages, this function
* updates the 'buf' member with the data that was read.
* @param num_msgs Number of messages to transfer.
* @param addr Address of the I2C target device.
*
* @retval 0 If successful.
* @retval -EIO General input / output error.
*/
typedef int (*i2c_emul_transfer_t)(const struct emul *target, struct i2c_msg *msgs, int num_msgs,
int addr);
/**
* Register an emulated device on the controller
*
* @param dev Device that will use the emulator
* @param emul I2C emulator to use
* @return 0 indicating success (always)
*/
int i2c_emul_register(const struct device *dev, struct i2c_emul *emul);
/** Definition of the emulator API */
struct i2c_emul_api {
i2c_emul_transfer_t transfer;
};
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_DRIVERS_I2C_I2C_EMUL_H_ */
``` | /content/code_sandbox/include/zephyr/drivers/i2c_emul.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 587 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public API for SPI drivers and applications
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_SPI_H_
#define ZEPHYR_INCLUDE_DRIVERS_SPI_H_
/**
* @brief SPI Interface
* @defgroup spi_interface SPI Interface
* @since 1.0
* @version 1.0.0
* @ingroup io_interfaces
* @{
*/
#include <zephyr/types.h>
#include <stddef.h>
#include <zephyr/device.h>
#include <zephyr/dt-bindings/spi/spi.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/rtio/rtio.h>
#include <zephyr/stats/stats.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @name SPI operational mode
* @{
*/
#define SPI_OP_MODE_MASTER 0U /**< Master mode. */
#define SPI_OP_MODE_SLAVE BIT(0) /**< Slave mode. */
/** @cond INTERNAL_HIDDEN */
#define SPI_OP_MODE_MASK 0x1U
/** @endcond */
/** Get SPI operational mode. */
#define SPI_OP_MODE_GET(_operation_) ((_operation_) & SPI_OP_MODE_MASK)
/** @} */
/**
* @name SPI Polarity & Phase Modes
* @{
*/
/**
* Clock Polarity: if set, clock idle state will be 1
* and active state will be 0. If untouched, the inverse will be true
* which is the default.
*/
#define SPI_MODE_CPOL BIT(1)
/**
* Clock Phase: this dictates when is the data captured, and depends
* clock's polarity. When SPI_MODE_CPOL is set and this bit as well,
* capture will occur on low to high transition and high to low if
* this bit is not set (default). This is fully reversed if CPOL is
* not set.
*/
#define SPI_MODE_CPHA BIT(2)
/**
* Whatever data is transmitted is looped-back to the receiving buffer of
* the controller. This is fully controller dependent as some may not
* support this, and can be used for testing purposes only.
*/
#define SPI_MODE_LOOP BIT(3)
/** @cond INTERNAL_HIDDEN */
#define SPI_MODE_MASK (0xEU)
/** @endcond */
/** Get SPI polarity and phase mode bits. */
#define SPI_MODE_GET(_mode_) \
((_mode_) & SPI_MODE_MASK)
/** @} */
/**
* @name SPI Transfer modes (host controller dependent)
* @{
*/
#define SPI_TRANSFER_MSB (0U) /**< Most significant bit first. */
#define SPI_TRANSFER_LSB BIT(4) /**< Least significant bit first. */
/** @} */
/**
* @name SPI word size
* @{
*/
/** @cond INTERNAL_HIDDEN */
#define SPI_WORD_SIZE_SHIFT (5U)
#define SPI_WORD_SIZE_MASK (0x3FU << SPI_WORD_SIZE_SHIFT)
/** @endcond */
/** Get SPI word size. */
#define SPI_WORD_SIZE_GET(_operation_) \
(((_operation_) & SPI_WORD_SIZE_MASK) >> SPI_WORD_SIZE_SHIFT)
/** Set SPI word size. */
#define SPI_WORD_SET(_word_size_) \
((_word_size_) << SPI_WORD_SIZE_SHIFT)
/** @} */
/**
* @name Specific SPI devices control bits
* @{
*/
/** Requests - if possible - to keep CS asserted after the transaction */
#define SPI_HOLD_ON_CS BIT(12)
/** Keep the device locked after the transaction for the current config.
* Use this with extreme caution (see spi_release() below) as it will
* prevent other callers to access the SPI device until spi_release() is
* properly called.
*/
#define SPI_LOCK_ON BIT(13)
/** Active high logic on CS. Usually, and by default, CS logic is active
* low. However, some devices may require the reverse logic: active high.
* This bit will request the controller to use that logic. Note that not
* all controllers are able to handle that natively. In this case deferring
* the CS control to a gpio line through struct spi_cs_control would be
* the solution.
*/
#define SPI_CS_ACTIVE_HIGH BIT(14)
/** @} */
/**
* @name SPI MISO lines
* @{
*
* Some controllers support dual, quad or octal MISO lines connected to slaves.
* Default is single, which is the case most of the time.
* Without @kconfig{CONFIG_SPI_EXTENDED_MODES} being enabled, single is the
* only supported one.
*/
#define SPI_LINES_SINGLE (0U << 16) /**< Single line */
#define SPI_LINES_DUAL (1U << 16) /**< Dual lines */
#define SPI_LINES_QUAD (2U << 16) /**< Quad lines */
#define SPI_LINES_OCTAL (3U << 16) /**< Octal lines */
#define SPI_LINES_MASK (0x3U << 16) /**< Mask for MISO lines in spi_operation_t */
/** @} */
/**
* @brief SPI Chip Select control structure
*
* This can be used to control a CS line via a GPIO line, instead of
* using the controller inner CS logic.
*
*/
struct spi_cs_control {
/**
* GPIO devicetree specification of CS GPIO.
* The device pointer can be set to NULL to fully inhibit CS control if
* necessary. The GPIO flags GPIO_ACTIVE_LOW/GPIO_ACTIVE_HIGH should be
* equivalent to SPI_CS_ACTIVE_HIGH/SPI_CS_ACTIVE_LOW options in struct
* spi_config.
*/
struct gpio_dt_spec gpio;
/**
* Delay in microseconds to wait before starting the
* transmission and before releasing the CS line.
*/
uint32_t delay;
};
/**
* @brief Get a <tt>struct gpio_dt_spec</tt> for a SPI device's chip select pin
*
* Example devicetree fragment:
*
* @code{.devicetree}
* gpio1: gpio@abcd0001 { ... };
*
* gpio2: gpio@abcd0002 { ... };
*
* spi@abcd0003 {
* compatible = "vnd,spi";
* cs-gpios = <&gpio1 10 GPIO_ACTIVE_LOW>,
* <&gpio2 20 GPIO_ACTIVE_LOW>;
*
* a: spi-dev-a@0 {
* reg = <0>;
* };
*
* b: spi-dev-b@1 {
* reg = <1>;
* };
* };
* @endcode
*
* Example usage:
*
* @code{.c}
* SPI_CS_GPIOS_DT_SPEC_GET(DT_NODELABEL(a)) \
* // { DEVICE_DT_GET(DT_NODELABEL(gpio1)), 10, GPIO_ACTIVE_LOW }
* SPI_CS_GPIOS_DT_SPEC_GET(DT_NODELABEL(b)) \
* // { DEVICE_DT_GET(DT_NODELABEL(gpio2)), 20, GPIO_ACTIVE_LOW }
* @endcode
*
* @param spi_dev a SPI device node identifier
* @return #gpio_dt_spec struct corresponding with spi_dev's chip select
*/
#define SPI_CS_GPIOS_DT_SPEC_GET(spi_dev) \
GPIO_DT_SPEC_GET_BY_IDX_OR(DT_BUS(spi_dev), cs_gpios, \
DT_REG_ADDR(spi_dev), {})
/**
* @brief Get a <tt>struct gpio_dt_spec</tt> for a SPI device's chip select pin
*
* This is equivalent to
* <tt>SPI_CS_GPIOS_DT_SPEC_GET(DT_DRV_INST(inst))</tt>.
*
* @param inst Devicetree instance number
* @return #gpio_dt_spec struct corresponding with spi_dev's chip select
*/
#define SPI_CS_GPIOS_DT_SPEC_INST_GET(inst) \
SPI_CS_GPIOS_DT_SPEC_GET(DT_DRV_INST(inst))
/**
* @brief Initialize and get a pointer to a @p spi_cs_control from a
* devicetree node identifier
*
* This helper is useful for initializing a device on a SPI bus. It
* initializes a struct spi_cs_control and returns a pointer to it.
* Here, @p node_id is a node identifier for a SPI device, not a SPI
* controller.
*
* Example devicetree fragment:
*
* @code{.devicetree}
* spi@abcd0001 {
* cs-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
* spidev: spi-device@0 { ... };
* };
* @endcode
*
* Example usage:
*
* @code{.c}
* struct spi_cs_control ctrl =
* SPI_CS_CONTROL_INIT(DT_NODELABEL(spidev), 2);
* @endcode
*
* This example is equivalent to:
*
* @code{.c}
* struct spi_cs_control ctrl = {
* .gpio = SPI_CS_GPIOS_DT_SPEC_GET(DT_NODELABEL(spidev)),
* .delay = 2,
* };
* @endcode
*
* @param node_id Devicetree node identifier for a device on a SPI bus
* @param delay_ The @p delay field to set in the @p spi_cs_control
* @return a pointer to the @p spi_cs_control structure
*/
#define SPI_CS_CONTROL_INIT(node_id, delay_) \
{ \
.gpio = SPI_CS_GPIOS_DT_SPEC_GET(node_id), \
.delay = (delay_), \
}
/**
* @brief Get a pointer to a @p spi_cs_control from a devicetree node
*
* This is equivalent to
* <tt>SPI_CS_CONTROL_INIT(DT_DRV_INST(inst), delay)</tt>.
*
* Therefore, @p DT_DRV_COMPAT must already be defined before using
* this macro.
*
* @param inst Devicetree node instance number
* @param delay_ The @p delay field to set in the @p spi_cs_control
* @return a pointer to the @p spi_cs_control structure
*/
#define SPI_CS_CONTROL_INIT_INST(inst, delay_) \
SPI_CS_CONTROL_INIT(DT_DRV_INST(inst), delay_)
/**
* @typedef spi_operation_t
* Opaque type to hold the SPI operation flags.
*/
#if defined(CONFIG_SPI_EXTENDED_MODES)
typedef uint32_t spi_operation_t;
#else
typedef uint16_t spi_operation_t;
#endif
/**
* @brief SPI controller configuration structure
*/
struct spi_config {
/** @brief Bus frequency in Hertz. */
uint32_t frequency;
/**
* @brief Operation flags.
*
* It is a bit field with the following parts:
*
* - 0: Master or slave.
* - 1..3: Polarity, phase and loop mode.
* - 4: LSB or MSB first.
* - 5..10: Size of a data frame in bits.
* - 11: Full/half duplex.
* - 12: Hold on the CS line if possible.
* - 13: Keep resource locked for the caller.
* - 14: Active high CS logic.
* - 15: Motorola or TI frame format (optional).
*
* If @kconfig{CONFIG_SPI_EXTENDED_MODES} is enabled:
*
* - 16..17: MISO lines (Single/Dual/Quad/Octal).
* - 18..31: Reserved for future use.
*/
spi_operation_t operation;
/** @brief Slave number from 0 to host controller slave limit. */
uint16_t slave;
/**
* @brief GPIO chip-select line (optional, must be initialized to zero
* if not used).
*/
struct spi_cs_control cs;
};
/**
* @brief Structure initializer for spi_config from devicetree
*
* This helper macro expands to a static initializer for a <tt>struct
* spi_config</tt> by reading the relevant @p frequency, @p slave, and
* @p cs data from the devicetree.
*
* @param node_id Devicetree node identifier for the SPI device whose
* struct spi_config to create an initializer for
* @param operation_ the desired @p operation field in the struct spi_config
* @param delay_ the desired @p delay field in the struct spi_config's
* spi_cs_control, if there is one
*/
#define SPI_CONFIG_DT(node_id, operation_, delay_) \
{ \
.frequency = DT_PROP(node_id, spi_max_frequency), \
.operation = (operation_) | \
DT_PROP(node_id, duplex) | \
DT_PROP(node_id, frame_format) | \
COND_CODE_1(DT_PROP(node_id, spi_cpol), SPI_MODE_CPOL, (0)) | \
COND_CODE_1(DT_PROP(node_id, spi_cpha), SPI_MODE_CPHA, (0)) | \
COND_CODE_1(DT_PROP(node_id, spi_hold_cs), SPI_HOLD_ON_CS, (0)), \
.slave = DT_REG_ADDR(node_id), \
.cs = SPI_CS_CONTROL_INIT(node_id, delay_), \
}
/**
* @brief Structure initializer for spi_config from devicetree instance
*
* This is equivalent to
* <tt>SPI_CONFIG_DT(DT_DRV_INST(inst), operation_, delay_)</tt>.
*
* @param inst Devicetree instance number
* @param operation_ the desired @p operation field in the struct spi_config
* @param delay_ the desired @p delay field in the struct spi_config's
* spi_cs_control, if there is one
*/
#define SPI_CONFIG_DT_INST(inst, operation_, delay_) \
SPI_CONFIG_DT(DT_DRV_INST(inst), operation_, delay_)
/**
* @brief Complete SPI DT information
*/
struct spi_dt_spec {
/** SPI bus */
const struct device *bus;
/** Slave specific configuration */
struct spi_config config;
};
/**
* @brief Structure initializer for spi_dt_spec from devicetree
*
* This helper macro expands to a static initializer for a <tt>struct
* spi_dt_spec</tt> by reading the relevant bus, frequency, slave, and cs
* data from the devicetree.
*
* Important: multiple fields are automatically constructed by this macro
* which must be checked before use. @ref spi_is_ready_dt performs the required
* @ref device_is_ready checks.
*
* @param node_id Devicetree node identifier for the SPI device whose
* struct spi_dt_spec to create an initializer for
* @param operation_ the desired @p operation field in the struct spi_config
* @param delay_ the desired @p delay field in the struct spi_config's
* spi_cs_control, if there is one
*/
#define SPI_DT_SPEC_GET(node_id, operation_, delay_) \
{ \
.bus = DEVICE_DT_GET(DT_BUS(node_id)), \
.config = SPI_CONFIG_DT(node_id, operation_, delay_) \
}
/**
* @brief Structure initializer for spi_dt_spec from devicetree instance
*
* This is equivalent to
* <tt>SPI_DT_SPEC_GET(DT_DRV_INST(inst), operation_, delay_)</tt>.
*
* @param inst Devicetree instance number
* @param operation_ the desired @p operation field in the struct spi_config
* @param delay_ the desired @p delay field in the struct spi_config's
* spi_cs_control, if there is one
*/
#define SPI_DT_SPEC_INST_GET(inst, operation_, delay_) \
SPI_DT_SPEC_GET(DT_DRV_INST(inst), operation_, delay_)
/**
* @brief Value that will never compare true with any valid overrun character
*/
#define SPI_MOSI_OVERRUN_UNKNOWN 0x100
/**
* @brief The value sent on MOSI when all TX bytes are sent, but RX continues
*
* For drivers where the MOSI line state when receiving is important, this value
* can be queried at compile-time to determine whether allocating a constant
* array is necessary.
*
* @param node_id Devicetree node identifier for the SPI device to query
*
* @retval SPI_MOSI_OVERRUN_UNKNOWN if controller does not export the value
* @retval byte default MOSI value otherwise
*/
#define SPI_MOSI_OVERRUN_DT(node_id) \
DT_PROP_OR(node_id, overrun_character, SPI_MOSI_OVERRUN_UNKNOWN)
/**
* @brief The value sent on MOSI when all TX bytes are sent, but RX continues
*
* This is equivalent to
* <tt>SPI_MOSI_OVERRUN_DT(DT_DRV_INST(inst))</tt>.
*
* @param inst Devicetree instance number
*
* @retval SPI_MOSI_OVERRUN_UNKNOWN if controller does not export the value
* @retval byte default MOSI value otherwise
*/
#define SPI_MOSI_OVERRUN_DT_INST(inst) \
DT_INST_PROP_OR(inst, overrun_character, SPI_MOSI_OVERRUN_UNKNOWN)
/**
* @brief SPI buffer structure
*/
struct spi_buf {
/** Valid pointer to a data buffer, or NULL otherwise */
void *buf;
/** Length of the buffer @a buf.
* If @a buf is NULL, length which as to be sent as dummy bytes (as TX
* buffer) or the length of bytes that should be skipped (as RX buffer).
*/
size_t len;
};
/**
* @brief SPI buffer array structure
*/
struct spi_buf_set {
/** Pointer to an array of spi_buf, or NULL */
const struct spi_buf *buffers;
/** Length of the array pointed by @a buffers */
size_t count;
};
#if defined(CONFIG_SPI_STATS)
STATS_SECT_START(spi)
STATS_SECT_ENTRY32(rx_bytes)
STATS_SECT_ENTRY32(tx_bytes)
STATS_SECT_ENTRY32(transfer_error)
STATS_SECT_END;
STATS_NAME_START(spi)
STATS_NAME(spi, rx_bytes)
STATS_NAME(spi, tx_bytes)
STATS_NAME(spi, transfer_error)
STATS_NAME_END(spi);
/**
* @brief SPI specific device state which allows for SPI device class specific additions
*/
struct spi_device_state {
struct device_state devstate;
struct stats_spi stats;
};
/**
* @brief Get pointer to SPI statistics structure
*/
#define Z_SPI_GET_STATS(dev_) \
CONTAINER_OF(dev_->state, struct spi_device_state, devstate)->stats
/**
* @brief Increment the rx bytes for a SPI device
*
* @param dev_ Pointer to the device structure for the driver instance.
*/
#define SPI_STATS_RX_BYTES_INCN(dev_, n) \
STATS_INCN(Z_SPI_GET_STATS(dev_), rx_bytes, n)
/**
* @brief Increment the tx bytes for a SPI device
*
* @param dev_ Pointer to the device structure for the driver instance.
*/
#define SPI_STATS_TX_BYTES_INCN(dev_, n) \
STATS_INCN(Z_SPI_GET_STATS(dev_), tx_bytes, n)
/**
* @brief Increment the transfer error counter for a SPI device
*
* The transfer error count is incremented when there occurred a transfer error
*
* @param dev_ Pointer to the device structure for the driver instance.
*/
#define SPI_STATS_TRANSFER_ERROR_INC(dev_) \
STATS_INC(Z_SPI_GET_STATS(dev_), transfer_error)
/**
* @brief Define a statically allocated and section assigned SPI device state
*/
#define Z_SPI_DEVICE_STATE_DEFINE(dev_id) \
static struct spi_device_state Z_DEVICE_STATE_NAME(dev_id) \
__attribute__((__section__(".z_devstate")));
/**
* @brief Define an SPI device init wrapper function
*
* This does device instance specific initialization of common data (such as stats)
* and calls the given init_fn
*/
#define Z_SPI_INIT_FN(dev_id, init_fn) \
static inline int UTIL_CAT(dev_id, _init)(const struct device *dev) \
{ \
struct spi_device_state *state = \
CONTAINER_OF(dev->state, struct spi_device_state, devstate); \
stats_init(&state->stats.s_hdr, STATS_SIZE_32, 3, \
STATS_NAME_INIT_PARMS(spi)); \
stats_register(dev->name, &(state->stats.s_hdr)); \
return init_fn(dev); \
}
/**
* @brief Like DEVICE_DT_DEFINE() with SPI specifics.
*
* @details Defines a device which implements the SPI API. May
* generate a custom device_state container struct and init_fn
* wrapper when needed depending on SPI @kconfig{CONFIG_SPI_STATS}.
*
* @param node_id The devicetree node identifier.
* @param init_fn Name of the init function of the driver.
* @param pm_device PM device resources reference (NULL if device does not use PM).
* @param data_ptr Pointer to the device's private data.
* @param cfg_ptr The address to the structure containing the configuration
* information for this instance of the driver.
* @param level The initialization level. See SYS_INIT() for details.
* @param prio Priority within the selected initialization level. See SYS_INIT()
* for details.
* @param api_ptr Provides an initial pointer to the API function struct used by
* the driver. Can be NULL.
*/
#define SPI_DEVICE_DT_DEFINE(node_id, init_fn, pm_device, \
data_ptr, cfg_ptr, level, prio, \
api_ptr, ...) \
Z_SPI_DEVICE_STATE_DEFINE(Z_DEVICE_DT_DEV_ID(node_id)); \
Z_SPI_INIT_FN(Z_DEVICE_DT_DEV_ID(node_id), init_fn) \
Z_DEVICE_DEFINE(node_id, Z_DEVICE_DT_DEV_ID(node_id), \
DEVICE_DT_NAME(node_id), \
&UTIL_CAT(Z_DEVICE_DT_DEV_ID(node_id), _init), \
pm_device, \
data_ptr, cfg_ptr, level, prio, \
api_ptr, \
&(Z_DEVICE_STATE_NAME(Z_DEVICE_DT_DEV_ID(node_id)).devstate), \
__VA_ARGS__)
static inline void spi_transceive_stats(const struct device *dev, int error,
const struct spi_buf_set *tx_bufs,
const struct spi_buf_set *rx_bufs)
{
uint32_t tx_bytes;
uint32_t rx_bytes;
if (error) {
SPI_STATS_TRANSFER_ERROR_INC(dev);
}
if (tx_bufs) {
tx_bytes = tx_bufs->count ? tx_bufs->buffers->len : 0;
SPI_STATS_TX_BYTES_INCN(dev, tx_bytes);
}
if (rx_bufs) {
rx_bytes = rx_bufs->count ? rx_bufs->buffers->len : 0;
SPI_STATS_RX_BYTES_INCN(dev, rx_bytes);
}
}
#else /*CONFIG_SPI_STATS*/
#define SPI_DEVICE_DT_DEFINE(node_id, init_fn, pm, \
data, config, level, prio, \
api, ...) \
Z_DEVICE_STATE_DEFINE(Z_DEVICE_DT_DEV_ID(node_id)); \
Z_DEVICE_DEFINE(node_id, Z_DEVICE_DT_DEV_ID(node_id), \
DEVICE_DT_NAME(node_id), init_fn, pm, data, config, \
level, prio, api, \
&Z_DEVICE_STATE_NAME(Z_DEVICE_DT_DEV_ID(node_id)), \
__VA_ARGS__)
#define SPI_STATS_RX_BYTES_INC(dev_)
#define SPI_STATS_TX_BYTES_INC(dev_)
#define SPI_STATS_TRANSFER_ERROR_INC(dev_)
#define spi_transceive_stats(dev, error, tx_bufs, rx_bufs)
#endif /*CONFIG_SPI_STATS*/
/**
* @typedef spi_api_io
* @brief Callback API for I/O
* See spi_transceive() for argument descriptions
*/
typedef int (*spi_api_io)(const struct device *dev,
const struct spi_config *config,
const struct spi_buf_set *tx_bufs,
const struct spi_buf_set *rx_bufs);
/**
* @brief SPI callback for asynchronous transfer requests
*
* @param dev SPI device which is notifying of transfer completion or error
* @param result Result code of the transfer request. 0 is success, -errno for failure.
* @param data Transfer requester supplied data which is passed along to the callback.
*/
typedef void (*spi_callback_t)(const struct device *dev, int result, void *data);
/**
* @typedef spi_api_io
* @brief Callback API for asynchronous I/O
* See spi_transceive_signal() for argument descriptions
*/
typedef int (*spi_api_io_async)(const struct device *dev,
const struct spi_config *config,
const struct spi_buf_set *tx_bufs,
const struct spi_buf_set *rx_bufs,
spi_callback_t cb,
void *userdata);
#if defined(CONFIG_SPI_RTIO) || defined(DOXYGEN)
/**
* @typedef spi_api_iodev_submit
* @brief Callback API for submitting work to a SPI device with RTIO
*/
typedef void (*spi_api_iodev_submit)(const struct device *dev,
struct rtio_iodev_sqe *iodev_sqe);
#endif /* CONFIG_SPI_RTIO */
/**
* @typedef spi_api_release
* @brief Callback API for unlocking SPI device.
* See spi_release() for argument descriptions
*/
typedef int (*spi_api_release)(const struct device *dev,
const struct spi_config *config);
/**
* @brief SPI driver API
* This is the mandatory API any SPI driver needs to expose.
*/
__subsystem struct spi_driver_api {
spi_api_io transceive;
#ifdef CONFIG_SPI_ASYNC
spi_api_io_async transceive_async;
#endif /* CONFIG_SPI_ASYNC */
#ifdef CONFIG_SPI_RTIO
spi_api_iodev_submit iodev_submit;
#endif /* CONFIG_SPI_RTIO */
spi_api_release release;
};
/**
* @brief Check if SPI CS is controlled using a GPIO.
*
* @param config SPI configuration.
* @return true If CS is controlled using a GPIO.
* @return false If CS is controlled by hardware or any other means.
*/
static inline bool spi_cs_is_gpio(const struct spi_config *config)
{
return config->cs.gpio.port != NULL;
}
/**
* @brief Check if SPI CS in @ref spi_dt_spec is controlled using a GPIO.
*
* @param spec SPI specification from devicetree.
* @return true If CS is controlled using a GPIO.
* @return false If CS is controlled by hardware or any other means.
*/
static inline bool spi_cs_is_gpio_dt(const struct spi_dt_spec *spec)
{
return spi_cs_is_gpio(&spec->config);
}
/**
* @brief Validate that SPI bus (and CS gpio if defined) is ready.
*
* @param spec SPI specification from devicetree
*
* @retval true if the SPI bus is ready for use.
* @retval false if the SPI bus (or the CS gpio defined) is not ready for use.
*/
static inline bool spi_is_ready_dt(const struct spi_dt_spec *spec)
{
/* Validate bus is ready */
if (!device_is_ready(spec->bus)) {
return false;
}
/* Validate CS gpio port is ready, if it is used */
if (spi_cs_is_gpio_dt(spec) &&
!gpio_is_ready_dt(&spec->config.cs.gpio)) {
return false;
}
return true;
}
/**
* @brief Read/write the specified amount of data from the SPI driver.
*
* @note This function is synchronous.
*
* @param dev Pointer to the device structure for the driver instance
* @param config Pointer to a valid spi_config structure instance.
* Pointer-comparison may be used to detect changes from
* previous operations.
* @param tx_bufs Buffer array where data to be sent originates from,
* or NULL if none.
* @param rx_bufs Buffer array where data to be read will be written to,
* or NULL if none.
*
* @retval frames Positive number of frames received in slave mode.
* @retval 0 If successful in master mode.
* @retval -errno Negative errno code on failure.
*/
__syscall int spi_transceive(const struct device *dev,
const struct spi_config *config,
const struct spi_buf_set *tx_bufs,
const struct spi_buf_set *rx_bufs);
static inline int z_impl_spi_transceive(const struct device *dev,
const struct spi_config *config,
const struct spi_buf_set *tx_bufs,
const struct spi_buf_set *rx_bufs)
{
const struct spi_driver_api *api =
(const struct spi_driver_api *)dev->api;
int ret;
ret = api->transceive(dev, config, tx_bufs, rx_bufs);
spi_transceive_stats(dev, ret, tx_bufs, rx_bufs);
return ret;
}
/**
* @brief Read/write data from an SPI bus specified in @p spi_dt_spec.
*
* This is equivalent to:
*
* spi_transceive(spec->bus, &spec->config, tx_bufs, rx_bufs);
*
* @param spec SPI specification from devicetree
* @param tx_bufs Buffer array where data to be sent originates from,
* or NULL if none.
* @param rx_bufs Buffer array where data to be read will be written to,
* or NULL if none.
*
* @return a value from spi_transceive().
*/
static inline int spi_transceive_dt(const struct spi_dt_spec *spec,
const struct spi_buf_set *tx_bufs,
const struct spi_buf_set *rx_bufs)
{
return spi_transceive(spec->bus, &spec->config, tx_bufs, rx_bufs);
}
/**
* @brief Read the specified amount of data from the SPI driver.
*
* @note This function is synchronous.
*
* @note This function is a helper function calling spi_transceive.
*
* @param dev Pointer to the device structure for the driver instance
* @param config Pointer to a valid spi_config structure instance.
* Pointer-comparison may be used to detect changes from
* previous operations.
* @param rx_bufs Buffer array where data to be read will be written to.
*
* @retval frames Positive number of frames received in slave mode.
* @retval 0 If successful.
* @retval -errno Negative errno code on failure.
*/
static inline int spi_read(const struct device *dev,
const struct spi_config *config,
const struct spi_buf_set *rx_bufs)
{
return spi_transceive(dev, config, NULL, rx_bufs);
}
/**
* @brief Read data from a SPI bus specified in @p spi_dt_spec.
*
* This is equivalent to:
*
* spi_read(spec->bus, &spec->config, rx_bufs);
*
* @param spec SPI specification from devicetree
* @param rx_bufs Buffer array where data to be read will be written to.
*
* @return a value from spi_read().
*/
static inline int spi_read_dt(const struct spi_dt_spec *spec,
const struct spi_buf_set *rx_bufs)
{
return spi_read(spec->bus, &spec->config, rx_bufs);
}
/**
* @brief Write the specified amount of data from the SPI driver.
*
* @note This function is synchronous.
*
* @note This function is a helper function calling spi_transceive.
*
* @param dev Pointer to the device structure for the driver instance
* @param config Pointer to a valid spi_config structure instance.
* Pointer-comparison may be used to detect changes from
* previous operations.
* @param tx_bufs Buffer array where data to be sent originates from.
*
* @retval 0 If successful.
* @retval -errno Negative errno code on failure.
*/
static inline int spi_write(const struct device *dev,
const struct spi_config *config,
const struct spi_buf_set *tx_bufs)
{
return spi_transceive(dev, config, tx_bufs, NULL);
}
/**
* @brief Write data to a SPI bus specified in @p spi_dt_spec.
*
* This is equivalent to:
*
* spi_write(spec->bus, &spec->config, tx_bufs);
*
* @param spec SPI specification from devicetree
* @param tx_bufs Buffer array where data to be sent originates from.
*
* @return a value from spi_write().
*/
static inline int spi_write_dt(const struct spi_dt_spec *spec,
const struct spi_buf_set *tx_bufs)
{
return spi_write(spec->bus, &spec->config, tx_bufs);
}
#if defined(CONFIG_SPI_ASYNC) || defined(__DOXYGEN__)
/**
* @brief Read/write the specified amount of data from the SPI driver.
*
* @note This function is asynchronous.
*
* @note This function is available only if @kconfig{CONFIG_SPI_ASYNC}
* is selected.
*
* @param dev Pointer to the device structure for the driver instance
* @param config Pointer to a valid spi_config structure instance.
* Pointer-comparison may be used to detect changes from
* previous operations.
* @param tx_bufs Buffer array where data to be sent originates from,
* or NULL if none.
* @param rx_bufs Buffer array where data to be read will be written to,
* or NULL if none.
* @param callback Function pointer to completion callback.
* (Note: if NULL this function will not
* notify the end of the transaction, and whether it went
* successfully or not).
* @param userdata Userdata passed to callback
*
* @retval frames Positive number of frames received in slave mode.
* @retval 0 If successful in master mode.
* @retval -errno Negative errno code on failure.
*/
static inline int spi_transceive_cb(const struct device *dev,
const struct spi_config *config,
const struct spi_buf_set *tx_bufs,
const struct spi_buf_set *rx_bufs,
spi_callback_t callback,
void *userdata)
{
const struct spi_driver_api *api =
(const struct spi_driver_api *)dev->api;
return api->transceive_async(dev, config, tx_bufs, rx_bufs, callback, userdata);
}
#if defined(CONFIG_POLL) || defined(__DOXYGEN__)
/** @cond INTERNAL_HIDDEN */
void z_spi_transfer_signal_cb(const struct device *dev, int result, void *userdata);
/** @endcond */
/**
* @brief Read/write the specified amount of data from the SPI driver.
*
* @note This function is asynchronous.
*
* @note This function is available only if @kconfig{CONFIG_SPI_ASYNC}
* and @kconfig{CONFIG_POLL} are selected.
*
* @param dev Pointer to the device structure for the driver instance
* @param config Pointer to a valid spi_config structure instance.
* Pointer-comparison may be used to detect changes from
* previous operations.
* @param tx_bufs Buffer array where data to be sent originates from,
* or NULL if none.
* @param rx_bufs Buffer array where data to be read will be written to,
* or NULL if none.
* @param sig A pointer to a valid and ready to be signaled
* struct k_poll_signal. (Note: if NULL this function will not
* notify the end of the transaction, and whether it went
* successfully or not).
*
* @retval frames Positive number of frames received in slave mode.
* @retval 0 If successful in master mode.
* @retval -errno Negative errno code on failure.
*/
static inline int spi_transceive_signal(const struct device *dev,
const struct spi_config *config,
const struct spi_buf_set *tx_bufs,
const struct spi_buf_set *rx_bufs,
struct k_poll_signal *sig)
{
const struct spi_driver_api *api =
(const struct spi_driver_api *)dev->api;
spi_callback_t cb = (sig == NULL) ? NULL : z_spi_transfer_signal_cb;
return api->transceive_async(dev, config, tx_bufs, rx_bufs, cb, sig);
}
/**
* @brief Read the specified amount of data from the SPI driver.
*
* @note This function is asynchronous.
*
* @note This function is a helper function calling spi_transceive_signal.
*
* @note This function is available only if @kconfig{CONFIG_SPI_ASYNC}
* and @kconfig{CONFIG_POLL} are selected.
*
* @param dev Pointer to the device structure for the driver instance
* @param config Pointer to a valid spi_config structure instance.
* Pointer-comparison may be used to detect changes from
* previous operations.
* @param rx_bufs Buffer array where data to be read will be written to.
* @param sig A pointer to a valid and ready to be signaled
* struct k_poll_signal. (Note: if NULL this function will not
* notify the end of the transaction, and whether it went
* successfully or not).
*
* @retval frames Positive number of frames received in slave mode.
* @retval 0 If successful
* @retval -errno Negative errno code on failure.
*/
static inline int spi_read_signal(const struct device *dev,
const struct spi_config *config,
const struct spi_buf_set *rx_bufs,
struct k_poll_signal *sig)
{
return spi_transceive_signal(dev, config, NULL, rx_bufs, sig);
}
/**
* @brief Write the specified amount of data from the SPI driver.
*
* @note This function is asynchronous.
*
* @note This function is a helper function calling spi_transceive_signal.
*
* @note This function is available only if @kconfig{CONFIG_SPI_ASYNC}
* and @kconfig{CONFIG_POLL} are selected.
*
* @param dev Pointer to the device structure for the driver instance
* @param config Pointer to a valid spi_config structure instance.
* Pointer-comparison may be used to detect changes from
* previous operations.
* @param tx_bufs Buffer array where data to be sent originates from.
* @param sig A pointer to a valid and ready to be signaled
* struct k_poll_signal. (Note: if NULL this function will not
* notify the end of the transaction, and whether it went
* successfully or not).
*
* @retval 0 If successful.
* @retval -errno Negative errno code on failure.
*/
static inline int spi_write_signal(const struct device *dev,
const struct spi_config *config,
const struct spi_buf_set *tx_bufs,
struct k_poll_signal *sig)
{
return spi_transceive_signal(dev, config, tx_bufs, NULL, sig);
}
#endif /* CONFIG_POLL */
#endif /* CONFIG_SPI_ASYNC */
#if defined(CONFIG_SPI_RTIO) || defined(__DOXYGEN__)
/**
* @brief Submit a SPI device with a request
*
* @param iodev_sqe Prepared submissions queue entry connected to an iodev
* defined by SPI_IODEV_DEFINE.
* Must live as long as the request is in flight.
*/
static inline void spi_iodev_submit(struct rtio_iodev_sqe *iodev_sqe)
{
const struct spi_dt_spec *dt_spec = iodev_sqe->sqe.iodev->data;
const struct device *dev = dt_spec->bus;
const struct spi_driver_api *api = (const struct spi_driver_api *)dev->api;
api->iodev_submit(dt_spec->bus, iodev_sqe);
}
extern const struct rtio_iodev_api spi_iodev_api;
/**
* @brief Define an iodev for a given dt node on the bus
*
* These do not need to be shared globally but doing so
* will save a small amount of memory.
*
* @param name Symbolic name to use for defining the iodev
* @param node_id Devicetree node identifier
* @param operation_ SPI operational mode
* @param delay_ Chip select delay in microseconds
*/
#define SPI_DT_IODEV_DEFINE(name, node_id, operation_, delay_) \
const struct spi_dt_spec _spi_dt_spec_##name = \
SPI_DT_SPEC_GET(node_id, operation_, delay_); \
RTIO_IODEV_DEFINE(name, &spi_iodev_api, (void *)&_spi_dt_spec_##name)
/**
* @brief Validate that SPI bus (and CS gpio if defined) is ready.
*
* @param spi_iodev SPI iodev defined with SPI_DT_IODEV_DEFINE
*
* @retval true if the SPI bus is ready for use.
* @retval false if the SPI bus (or the CS gpio defined) is not ready for use.
*/
static inline bool spi_is_ready_iodev(const struct rtio_iodev *spi_iodev)
{
struct spi_dt_spec *spec = spi_iodev->data;
return spi_is_ready_dt(spec);
}
/**
* @brief Copy the tx_bufs and rx_bufs into a set of RTIO requests
*
* @param[in] r rtio context
* @param[in] iodev iodev to transceive with
* @param[in] tx_bufs transmit buffer set
* @param[in] rx_bufs receive buffer set
* @param[out] last_sqe last sqe submitted, NULL if not enough memory
*
* @retval Number of submission queue entries
* @retval -ENOMEM out of memory
*/
static inline int spi_rtio_copy(struct rtio *r,
struct rtio_iodev *iodev,
const struct spi_buf_set *tx_bufs,
const struct spi_buf_set *rx_bufs,
struct rtio_sqe **last_sqe)
{
int ret = 0;
size_t tx_count = tx_bufs ? tx_bufs->count : 0;
size_t rx_count = rx_bufs ? rx_bufs->count : 0;
uint32_t tx = 0, tx_len = 0;
uint32_t rx = 0, rx_len = 0;
uint8_t *tx_buf, *rx_buf;
struct rtio_sqe *sqe = NULL;
if (tx < tx_count) {
tx_buf = tx_bufs->buffers[tx].buf;
tx_len = tx_bufs->buffers[tx].len;
} else {
tx_buf = NULL;
tx_len = rx_bufs->buffers[rx].len;
}
if (rx < rx_count) {
rx_buf = rx_bufs->buffers[rx].buf;
rx_len = rx_bufs->buffers[rx].len;
} else {
rx_buf = NULL;
rx_len = tx_bufs->buffers[tx].len;
}
while ((tx < tx_count || rx < rx_count) && (tx_len > 0 || rx_len > 0)) {
sqe = rtio_sqe_acquire(r);
if (sqe == NULL) {
ret = -ENOMEM;
rtio_sqe_drop_all(r);
goto out;
}
ret++;
/* If tx/rx len are same, we can do a simple transceive */
if (tx_len == rx_len) {
if (tx_buf == NULL) {
rtio_sqe_prep_read(sqe, iodev, RTIO_PRIO_NORM,
rx_buf, rx_len, NULL);
} else if (rx_buf == NULL) {
rtio_sqe_prep_write(sqe, iodev, RTIO_PRIO_NORM,
tx_buf, tx_len, NULL);
} else {
rtio_sqe_prep_transceive(sqe, iodev, RTIO_PRIO_NORM,
tx_buf, rx_buf, rx_len, NULL);
}
tx++;
rx++;
if (rx < rx_count) {
rx_buf = rx_bufs->buffers[rx].buf;
rx_len = rx_bufs->buffers[rx].len;
} else {
rx_buf = NULL;
rx_len = 0;
}
if (tx < tx_count) {
tx_buf = tx_bufs->buffers[tx].buf;
tx_len = tx_bufs->buffers[tx].len;
} else {
tx_buf = NULL;
tx_len = 0;
}
} else if (tx_len == 0) {
rtio_sqe_prep_read(sqe, iodev, RTIO_PRIO_NORM,
(uint8_t *)rx_buf,
(uint32_t)rx_len,
NULL);
rx++;
if (rx < rx_count) {
rx_buf = rx_bufs->buffers[rx].buf;
rx_len = rx_bufs->buffers[rx].len;
} else {
rx_buf = NULL;
rx_len = 0;
}
} else if (rx_len == 0) {
rtio_sqe_prep_write(sqe, iodev, RTIO_PRIO_NORM,
(uint8_t *)tx_buf,
(uint32_t)tx_len,
NULL);
tx++;
if (tx < tx_count) {
tx_buf = rx_bufs->buffers[rx].buf;
tx_len = rx_bufs->buffers[rx].len;
} else {
tx_buf = NULL;
tx_len = 0;
}
} else if (tx_len > rx_len) {
rtio_sqe_prep_transceive(sqe, iodev, RTIO_PRIO_NORM,
(uint8_t *)tx_buf,
(uint8_t *)rx_buf,
(uint32_t)rx_len,
NULL);
tx_len -= rx_len;
tx_buf += rx_len;
rx++;
if (rx < rx_count) {
rx_buf = rx_bufs->buffers[rx].buf;
rx_len = rx_bufs->buffers[rx].len;
} else {
rx_buf = NULL;
rx_len = tx_len;
}
} else if (rx_len > tx_len) {
rtio_sqe_prep_transceive(sqe, iodev, RTIO_PRIO_NORM,
(uint8_t *)tx_buf,
(uint8_t *)rx_buf,
(uint32_t)tx_len,
NULL);
rx_len -= tx_len;
rx_buf += tx_len;
tx++;
if (tx < tx_count) {
tx_buf = tx_bufs->buffers[tx].buf;
tx_len = tx_bufs->buffers[tx].len;
} else {
tx_buf = NULL;
tx_len = rx_len;
}
} else {
__ASSERT_NO_MSG("Invalid spi_rtio_copy state");
}
sqe->flags = RTIO_SQE_TRANSACTION;
}
if (sqe != NULL) {
sqe->flags = 0;
*last_sqe = sqe;
}
out:
return ret;
}
#endif /* CONFIG_SPI_RTIO */
/**
* @brief Release the SPI device locked on and/or the CS by the current config
*
* Note: This synchronous function is used to release either the lock on the
* SPI device and/or the CS line that was kept if, and if only,
* given config parameter was the last one to be used (in any of the
* above functions) and if it has the SPI_LOCK_ON bit set and/or the
* SPI_HOLD_ON_CS bit set into its operation bits field.
* This can be used if the caller needs to keep its hand on the SPI
* device for consecutive transactions and/or if it needs the device to
* stay selected. Usually both bits will be used along each other, so the
* the device is locked and stays on until another operation is necessary
* or until it gets released with the present function.
*
* @param dev Pointer to the device structure for the driver instance
* @param config Pointer to a valid spi_config structure instance.
*
* @retval 0 If successful.
* @retval -errno Negative errno code on failure.
*/
__syscall int spi_release(const struct device *dev,
const struct spi_config *config);
static inline int z_impl_spi_release(const struct device *dev,
const struct spi_config *config)
{
const struct spi_driver_api *api =
(const struct spi_driver_api *)dev->api;
return api->release(dev, config);
}
/**
* @brief Release the SPI device specified in @p spi_dt_spec.
*
* This is equivalent to:
*
* spi_release(spec->bus, &spec->config);
*
* @param spec SPI specification from devicetree
*
* @return a value from spi_release().
*/
static inline int spi_release_dt(const struct spi_dt_spec *spec)
{
return spi_release(spec->bus, &spec->config);
}
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#include <zephyr/syscalls/spi.h>
#endif /* ZEPHYR_INCLUDE_DRIVERS_SPI_H_ */
``` | /content/code_sandbox/include/zephyr/drivers/spi.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 10,445 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_SPI_SPI_EMUL_H_
#define ZEPHYR_INCLUDE_DRIVERS_SPI_SPI_EMUL_H_
#include <zephyr/device.h>
#include <zephyr/drivers/emul.h>
#include <zephyr/drivers/spi.h>
#include <zephyr/sys/slist.h>
#include <zephyr/types.h>
/**
* @file
*
* @brief Public APIs for the SPI emulation drivers.
*/
/**
* @brief SPI Emulation Interface
* @defgroup spi_emul_interface SPI Emulation Interface
* @ingroup io_emulators
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
struct spi_msg;
struct spi_emul_api;
/** Node in a linked list of emulators for SPI devices */
struct spi_emul {
sys_snode_t node;
/** Target emulator - REQUIRED for all bus emulators */
const struct emul *target;
/* API provided for this device */
const struct spi_emul_api *api;
/**
* A mock API that if not NULL will take precedence over the actual API. If set, a return
* value of -ENOSYS will revert back to the default api.
*/
struct spi_emul_api *mock_api;
/* SPI chip-select of the emulated device */
uint16_t chipsel;
};
/**
* Passes SPI messages to the emulator. The emulator updates the data with what
* was read back.
*
* @param target The device Emulator instance
* @param config Pointer to a valid spi_config structure instance.
* Pointer-comparison may be used to detect changes from
* previous operations.
* @param tx_bufs Buffer array where data to be sent originates from,
* or NULL if none.
* @param rx_bufs Buffer array where data to be read will be written to,
* or NULL if none.
*
* @retval 0 If successful.
* @retval -EIO General input / output error.
*/
typedef int (*spi_emul_io_t)(const struct emul *target, const struct spi_config *config,
const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs);
/**
* Register an emulated device on the controller
*
* @param dev Device that will use the emulator
* @param emul SPI emulator to use
* @return 0 indicating success (always)
*/
int spi_emul_register(const struct device *dev, struct spi_emul *emul);
/** Definition of the emulator API */
struct spi_emul_api {
spi_emul_io_t io;
};
/**
* Back door to allow an emulator to retrieve the host configuration.
*
* @param dev SPI device associated with the emulator
* @return Bit-packed 32-bit value containing the device's runtime configuration
*/
uint32_t spi_emul_get_config(const struct device *dev);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_DRIVERS_SPI_SPI_EMUL_H_ */
``` | /content/code_sandbox/include/zephyr/drivers/spi_emul.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 627 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.