text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```objective-c
/*
* Organisation (CSIRO) ABN 41 687 119 230.
*
*
* Generate memory regions from devicetree nodes.
*/
#ifndef ZEPHYR_INCLUDE_LINKER_DEVICETREE_REGIONS_H_
#define ZEPHYR_INCLUDE_LINKER_DEVICETREE_REGIONS_H_
#include <zephyr/devicetree.h>
#include <zephyr/sys/util.h>
#include <zephyr/toolchain.h>
/**
* @brief Get the linker memory-region name in a token form
*
* This attempts to use the zephyr,memory-region property (with
* non-alphanumeric characters replaced with underscores) returning a token.
*
* Example devicetree fragment:
*
* @code{.dts}
* / {
* soc {
* sram1: memory@2000000 {
* zephyr,memory-region = "MY_NAME";
* };
* sram2: memory@2001000 {
* zephyr,memory-region = "MY@OTHER@NAME";
* };
* };
* };
* @endcode
*
* Example usage:
*
* @code{.c}
* LINKER_DT_NODE_REGION_NAME_TOKEN(DT_NODELABEL(sram1)) // MY_NAME
* LINKER_DT_NODE_REGION_NAME_TOKEN(DT_NODELABEL(sram2)) // MY_OTHER_NAME
* @endcode
*
* @param node_id node identifier
* @return the name of the memory memory region the node will generate
*/
#define LINKER_DT_NODE_REGION_NAME_TOKEN(node_id) \
DT_STRING_TOKEN(node_id, zephyr_memory_region)
/**
* @brief Get the linker memory-region name
*
* This attempts to use the zephyr,memory-region property (with
* non-alphanumeric characters replaced with underscores).
*
* Example devicetree fragment:
*
* @code{.dts}
* / {
* soc {
* sram1: memory@2000000 {
* zephyr,memory-region = "MY_NAME";
* };
* sram2: memory@2001000 {
* zephyr,memory-region = "MY@OTHER@NAME";
* };
* };
* };
* @endcode
*
* Example usage:
*
* @code{.c}
* LINKER_DT_NODE_REGION_NAME(DT_NODELABEL(sram1)) // "MY_NAME"
* LINKER_DT_NODE_REGION_NAME(DT_NODELABEL(sram2)) // "MY_OTHER_NAME"
* @endcode
*
* @param node_id node identifier
* @return the name of the memory memory region the node will generate
*/
#define LINKER_DT_NODE_REGION_NAME(node_id) \
STRINGIFY(LINKER_DT_NODE_REGION_NAME_TOKEN(node_id))
/** @cond INTERNAL_HIDDEN */
#define _DT_COMPATIBLE zephyr_memory_region
#define _DT_SECTION_PREFIX(node_id) UTIL_CAT(__, LINKER_DT_NODE_REGION_NAME_TOKEN(node_id))
#define _DT_SECTION_START(node_id) UTIL_CAT(_DT_SECTION_PREFIX(node_id), _start)
#define _DT_SECTION_END(node_id) UTIL_CAT(_DT_SECTION_PREFIX(node_id), _end)
#define _DT_SECTION_SIZE(node_id) UTIL_CAT(_DT_SECTION_PREFIX(node_id), _size)
#define _DT_SECTION_LOAD(node_id) UTIL_CAT(_DT_SECTION_PREFIX(node_id), _load_start)
/**
* @brief Declare a memory region
*
* Example devicetree fragment:
*
* @code{.dts}
* test_sram: sram@20010000 {
* compatible = "zephyr,memory-region", "mmio-sram";
* reg = < 0x20010000 0x1000 >;
* zephyr,memory-region = "FOOBAR";
* };
* @endcode
*
* will result in:
*
* @code{.unparsed}
* FOOBAR (rw) : ORIGIN = (0x20010000), LENGTH = (0x1000)
* @endcode
*
* @param node_id devicetree node identifier
* @param attr region attributes
*/
#define _REGION_DECLARE(node_id) \
LINKER_DT_NODE_REGION_NAME_TOKEN(node_id) : \
ORIGIN = DT_REG_ADDR(node_id), \
LENGTH = DT_REG_SIZE(node_id)
/**
* @brief Declare a memory section from the device tree nodes with
* compatible 'zephyr,memory-region'
*
* Example devicetree fragment:
*
* @code{.dts}
* test_sram: sram@20010000 {
* compatible = "zephyr,memory-region", "mmio-sram";
* reg = < 0x20010000 0x1000 >;
* zephyr,memory-region = "FOOBAR";
* };
* @endcode
*
* will result in:
*
* @code{.unparsed}
* FOOBAR (NOLOAD) :
* {
* __FOOBAR_start = .;
* KEEP(*(FOOBAR))
* KEEP(*(FOOBAR.*))
* __FOOBAR_end = .;
* } > FOOBAR
* __FOOBAR_size = __FOOBAR_end - __FOOBAR_start;
* __FOOBAR_load_start = LOADADDR(FOOBAR);
* @endcode
*
* @param node_id devicetree node identifier
*/
#define _SECTION_DECLARE(node_id) \
LINKER_DT_NODE_REGION_NAME_TOKEN(node_id) (NOLOAD) : \
{ \
_DT_SECTION_START(node_id) = .; \
KEEP(*(LINKER_DT_NODE_REGION_NAME_TOKEN(node_id))) \
KEEP(*(LINKER_DT_NODE_REGION_NAME_TOKEN(node_id).*)) \
_DT_SECTION_END(node_id) = .; \
} > LINKER_DT_NODE_REGION_NAME_TOKEN(node_id) \
_DT_SECTION_SIZE(node_id) = _DT_SECTION_END(node_id) - _DT_SECTION_START(node_id); \
_DT_SECTION_LOAD(node_id) = LOADADDR(LINKER_DT_NODE_REGION_NAME_TOKEN(node_id));
/** @endcond */
/**
* @brief Generate linker memory regions from the device tree nodes with
* compatible 'zephyr,memory-region'
*
* Note: for now we do not deal with MEMORY attributes since those are
* optional, not actually used by Zephyr and they will likely conflict with the
* MPU configuration.
*/
#define LINKER_DT_REGIONS() \
DT_FOREACH_STATUS_OKAY(_DT_COMPATIBLE, _REGION_DECLARE)
/**
* @brief Generate linker memory sections from the device tree nodes with
* compatible 'zephyr,memory-region'
*/
#define LINKER_DT_SECTIONS() \
DT_FOREACH_STATUS_OKAY(_DT_COMPATIBLE, _SECTION_DECLARE)
#endif /* ZEPHYR_INCLUDE_LINKER_DEVICETREE_REGIONS_H_ */
``` | /content/code_sandbox/include/zephyr/linker/devicetree_regions.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,483 |
```linker script
/*
*
*/
#ifdef CONFIG_USERSPACE
/* During LINKER_KOBJECT_PREBUILT and LINKER_ZEPHYR_PREBUILT,
* space needs to be reserved for the rodata that will be
* produced by gperf during the final stages of linking.
* The alignment and size are produced by
* scripts/build/gen_kobject_placeholders.py. These are here
* so the addresses to kobjects would remain the same
* during the final stages of linking (LINKER_ZEPHYR_FINAL).
*/
#if defined(LINKER_ZEPHYR_PREBUILT)
#include <zephyr/linker-kobject-prebuilt-rodata.h>
#ifdef KOBJECT_RODATA_ALIGN
. = ALIGN(KOBJECT_RODATA_ALIGN);
_kobject_rodata_area_start = .;
. = . + KOBJECT_RODATA_SZ;
_kobject_rodata_area_end = .;
#endif
#endif /* LINKER_ZEPHYR_PREBUILT */
#if defined(LINKER_ZEPHYR_FINAL)
#include <zephyr/linker-kobject-prebuilt-rodata.h>
#ifdef KOBJECT_RODATA_ALIGN
. = ALIGN(KOBJECT_RODATA_ALIGN);
_kobject_rodata_area_start = .;
#endif
*(".kobject_data.rodata*")
#ifdef KOBJECT_RODATA_ALIGN
_kobject_rodata_area_end = .;
_kobject_rodata_area_used = _kobject_rodata_area_end - _kobject_rodata_area_start;
ASSERT(_kobject_rodata_area_used <= KOBJECT_RODATA_SZ,
"scripts/build/gen_kobject_placeholders.py did not reserve enough space \
for kobject rodata."
);
/* Padding is needed to preserve kobject addresses
* if we have reserved more space than needed.
*/
. = MAX(., _kobject_rodata_area_start + KOBJECT_RODATA_SZ);
#endif /* KOBJECT_RODATA_ALIGN */
#endif /* LINKER_ZEPHYR_FINAL */
#endif /* CONFIG_USERSPACE */
``` | /content/code_sandbox/include/zephyr/linker/kobject-rom.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 417 |
```objective-c
/*
*/
/**
* @file
* @brief USB-C Device APIs
*
* This file contains the USB-C Device APIs.
*/
#ifndef ZEPHYR_INCLUDE_USBC_H_
#define ZEPHYR_INCLUDE_USBC_H_
#include <zephyr/types.h>
#include <zephyr/device.h>
#include <zephyr/drivers/usb_c/usbc_tcpc.h>
#include <zephyr/drivers/usb_c/usbc_vbus.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief USB-C Device APIs
* @defgroup _usbc_device_api USB-C Device API
* @since 3.3
* @version 0.1.0
* @{
*/
/**
* @brief This Request Data Object (RDO) value can be returned from the
* policy_cb_get_rdo if 5V@100mA with the following
* options are sufficient for the Sink to operate.
*
* The RDO is configured as follows:
* Maximum operating current 100mA
* Operating current 100mA
* Unchunked Extended Messages Not Supported
* No USB Suspend
* Not USB Communications Capable
* No capability mismatch
* Don't giveback
* Object position 1 (5V PDO)
*/
#define FIXED_5V_100MA_RDO 0x1100280a
/**
* @brief Device Policy Manager requests
*/
enum usbc_policy_request_t {
/** No request */
REQUEST_NOP,
/** Request Type-C layer to transition to Disabled State */
REQUEST_TC_DISABLED,
/** Request Type-C layer to transition to Error Recovery State */
REQUEST_TC_ERROR_RECOVERY,
/** End of Type-C requests */
REQUEST_TC_END,
/** Request Policy Engine layer to perform a Data Role Swap */
REQUEST_PE_DR_SWAP,
/** Request Policy Engine layer to send a hard reset */
REQUEST_PE_HARD_RESET_SEND,
/** Request Policy Engine layer to send a soft reset */
REQUEST_PE_SOFT_RESET_SEND,
/**
* Request Policy Engine layer to get Source Capabilities from
* port partner
*/
REQUEST_PE_GET_SRC_CAPS,
/**
* Request Policy Engine to get Sink Capabilities from
* port partner
*/
REQUEST_GET_SNK_CAPS,
/**
* Request Policy Engine to request the port partner to source
* minimum power
*/
REQUEST_PE_GOTO_MIN,
};
/**
* @brief Device Policy Manager notifications
*/
enum usbc_policy_notify_t {
/** Power Delivery Accept message was received */
MSG_ACCEPT_RECEIVED,
/** Power Delivery Reject message was received */
MSG_REJECTED_RECEIVED,
/** Power Delivery discarded the message being transmitted */
MSG_DISCARDED,
/** Power Delivery Not Supported message was received */
MSG_NOT_SUPPORTED_RECEIVED,
/** Data Role has been set to Upstream Facing Port (UFP) */
DATA_ROLE_IS_UFP,
/** Data Role has been set to Downstream Facing Port (DFP) */
DATA_ROLE_IS_DFP,
/** A PD Explicit Contract is in place */
PD_CONNECTED,
/** No PD Explicit Contract is in place */
NOT_PD_CONNECTED,
/** Transition the Power Supply */
TRANSITION_PS,
/** Port partner is not responsive */
PORT_PARTNER_NOT_RESPONSIVE,
/** Protocol Error occurred */
PROTOCOL_ERROR,
/** Transition the Sink to default */
SNK_TRANSITION_TO_DEFAULT,
/** Hard Reset Received */
HARD_RESET_RECEIVED,
/** Sink SubPower state at 0V */
POWER_CHANGE_0A0,
/** Sink SubPower state a 5V / 500mA */
POWER_CHANGE_DEF,
/** Sink SubPower state a 5V / 1.5A */
POWER_CHANGE_1A5,
/** Sink SubPower state a 5V / 3A */
POWER_CHANGE_3A0,
/** Sender Response Timeout */
SENDER_RESPONSE_TIMEOUT,
/** Source Capabilities Received */
SOURCE_CAPABILITIES_RECEIVED,
};
/**
* @brief Device Policy Manager checks
*/
enum usbc_policy_check_t {
/** Check if Power Role Swap is allowed */
CHECK_POWER_ROLE_SWAP,
/** Check if Data Role Swap to DFP is allowed */
CHECK_DATA_ROLE_SWAP_TO_DFP,
/** Check if Data Role Swap to UFP is allowed */
CHECK_DATA_ROLE_SWAP_TO_UFP,
/** Check if Sink is at default level */
CHECK_SNK_AT_DEFAULT_LEVEL,
/** Check if should control VCONN */
CHECK_VCONN_CONTROL,
/** Check if Source Power Supply is at default level */
CHECK_SRC_PS_AT_DEFAULT_LEVEL,
};
/**
* @brief Device Policy Manager Wait message notifications
*/
enum usbc_policy_wait_t {
/** The port partner is unable to meet the sink request at this time */
WAIT_SINK_REQUEST,
/** The port partner is unable to do a Power Role Swap at this time */
WAIT_POWER_ROLE_SWAP,
/** The port partner is unable to do a Data Role Swap at this time */
WAIT_DATA_ROLE_SWAP,
/** The port partner is unable to do a VCONN Swap at this time */
WAIT_VCONN_SWAP,
};
/**
* @brief Device Policy Manager's response to a Sink Request
*/
enum usbc_snk_req_reply_t {
/** The sink port partner's request can be met */
SNK_REQUEST_VALID,
/** The sink port partner's request can not be met */
SNK_REQUEST_REJECT,
/** The sink port partner's request can be met at a later time */
SNK_REQUEST_WAIT,
};
/** \addtogroup sink_callbacks
* @{
*/
/**
* @brief Callback type used to get the Sink Capabilities
*
* @param dev USB-C Connector Instance
* @param pdos pointer where pdos are stored
* @param num_pdos pointer where number of pdos is stored
* @return 0 on success
*/
typedef int (*policy_cb_get_snk_cap_t)(const struct device *dev, uint32_t **pdos, int *num_pdos);
/**
* @brief Callback type used to report the received Port Partner's
* Source Capabilities
*
* @param dev USB-C Connector Instance
* @param pdos pointer to the partner's source pdos
* @param num_pdos number of source pdos
*/
typedef void (*policy_cb_set_src_cap_t)(const struct device *dev, const uint32_t *pdos,
const int num_pdos);
/**
* @brief Callback type used to check a policy
*
* @param dev USB-C Connector Instance
* @param policy_check policy to check
* @return true if policy is currently allowed by the device policy manager
*/
typedef bool (*policy_cb_check_t)(const struct device *dev,
const enum usbc_policy_check_t policy_check);
/**
* @brief Callback type used to notify Device Policy Manager of WAIT
* message reception
*
* @param dev USB-C Connector Instance
* @param wait_notify wait notification
* @return return true if the PE should wait and resend the message
*/
typedef bool (*policy_cb_wait_notify_t)(const struct device *dev,
const enum usbc_policy_wait_t wait_notify);
/**
* @brief Callback type used to notify Device Policy Manager of a
* policy change
*
* @param dev USB-C Connector Instance
* @param policy_notify policy notification
*/
typedef void (*policy_cb_notify_t)(const struct device *dev,
const enum usbc_policy_notify_t policy_notify);
/**
* @brief Callback type used to get the Request Data Object (RDO)
*
* @param dev USB-C Connector Instance
* @return RDO
*/
typedef uint32_t (*policy_cb_get_rdo_t)(const struct device *dev);
/**
* @brief Callback type used to check if the sink power supply is at
* the default level
*
* @param dev USB-C Connector Instance
* @return true if power supply is at default level
*/
typedef bool (*policy_cb_is_snk_at_default_t)(const struct device *dev);
/** @}*/
/** \addtogroup source_callbacks
* @{
*/
/**
* @brief Callback type used to get the Source Capabilities
* from the Device Policy Manager
*
* @param dev USB-C Connector Instance
* @param pdos pointer to source capability pdos
* @param num_pdos pointer to number of source capability pdos
* @return 0 on success
*/
typedef int (*policy_cb_get_src_caps_t)(const struct device *dev, const uint32_t **pdos,
uint32_t *num_pdos);
/**
* @brief Callback type used to check if Sink request is valid
*
* @param dev USB-C Connector Instance
* @param request_msg request message to check
* @return sink request reply
*/
typedef enum usbc_snk_req_reply_t (*policy_cb_check_sink_request_t)(const struct device *dev,
const uint32_t request_msg);
/**
* @brief Callback type used to check if Source Power Supply is ready
*
* @param dev USB-C Connector Instance
* @return true if power supply is ready, else false
*/
typedef bool (*policy_cb_is_ps_ready_t)(const struct device *dev);
/**
* @brief Callback type used to check if present Contract is still valid
*
* @param dev USB-C Connector Instance
* @param present_contract present contract
* @return true if present contract is still valid
*/
typedef bool (*policy_cb_present_contract_is_valid_t)(const struct device *dev,
const uint32_t present_contract);
/**
* @brief Callback type used to request that a different set of Source Caps
* be sent to the Sink
*
* @param dev USB-C Connector Instance
* @return true if a different set of Source Caps is available
*/
typedef bool (*policy_cb_change_src_caps_t)(const struct device *dev);
/**
* @brief Callback type used to report the Capabilities received from
* a Sink Port Partner
*
* @param dev USB-C Connector Instance
* @param pdos pointer to sink cap pdos
* @param num_pdos number of sink cap pdos
*/
typedef void (*policy_cb_set_port_partner_snk_cap_t)(const struct device *dev, const uint32_t *pdos,
const int num_pdos);
/**
* @brief Callback type used to get the Rp value that should be placed on
* the CC lines
*
* @param dev USB-C Connector Instance
* @param rp rp value
* @return 0 on success
*/
typedef int (*policy_cb_get_src_rp_t)(const struct device *dev, enum tc_rp_value *rp);
/**
* @brief Callback type used to enable VBUS
*
* @param dev USB-C Connector Instance
* @param en true if VBUS should be enabled, else false to disable it
* @return 0 on success
*/
typedef int (*policy_cb_src_en_t)(const struct device *dev, bool en);
/** @}*/
/**
* @brief Start the USB-C Subsystem
*
* @param dev Runtime device structure
*
* @retval 0 on success
*/
int usbc_start(const struct device *dev);
/**
* @brief Suspend the USB-C Subsystem
*
* @param dev Runtime device structure
*
* @retval 0 on success
*/
int usbc_suspend(const struct device *dev);
/**
* @brief Make a request of the USB-C Subsystem
*
* @param dev Runtime device structure
* @param req request
*
* @retval 0 on success
*/
int usbc_request(const struct device *dev, const enum usbc_policy_request_t req);
/**
* @internal
* @brief Bypass the next USB-C stack sleep and execute one more iteration of the state machines.
* Used internally to decrease the response time.
*
* @param dev Runtime device structure
*/
void usbc_bypass_next_sleep(const struct device *dev);
/**
* @brief Set pointer to Device Policy Manager (DPM) data
*
* @param dev Runtime device structure
* @param dpm_data pointer to dpm data
*/
void usbc_set_dpm_data(const struct device *dev, void *dpm_data);
/**
* @brief Get pointer to Device Policy Manager (DPM) data
*
* @param dev Runtime device structure
*
* @retval pointer to dpm data that was set with usbc_set_dpm_data
* @retval NULL if dpm data was not set
*/
void *usbc_get_dpm_data(const struct device *dev);
/**
* @brief Set the callback used to set VCONN control
*
* @param dev Runtime device structure
* @param cb VCONN control callback
*/
void usbc_set_vconn_control_cb(const struct device *dev, const tcpc_vconn_control_cb_t cb);
/**
* @brief Set the callback used to discharge VCONN
*
* @param dev Runtime device structure
* @param cb VCONN discharge callback
*/
void usbc_set_vconn_discharge_cb(const struct device *dev, const tcpc_vconn_discharge_cb_t cb);
/**
* @brief Set the callback used to check a policy
*
* @param dev Runtime device structure
* @param cb callback
*/
void usbc_set_policy_cb_check(const struct device *dev, const policy_cb_check_t cb);
/**
* @brief Set the callback used to notify Device Policy Manager of a
* policy change
*
* @param dev Runtime device structure
* @param cb callback
*/
void usbc_set_policy_cb_notify(const struct device *dev, const policy_cb_notify_t cb);
/**
* @brief Set the callback used to notify Device Policy Manager of WAIT
* message reception
*
* @param dev Runtime device structure
* @param cb callback
*/
void usbc_set_policy_cb_wait_notify(const struct device *dev, const policy_cb_wait_notify_t cb);
/**
* @brief Set the callback used to get the Sink Capabilities
*
* @param dev Runtime device structure
* @param cb callback
*/
void usbc_set_policy_cb_get_snk_cap(const struct device *dev, const policy_cb_get_snk_cap_t cb);
/**
* @brief Set the callback used to store the received Port Partner's
* Source Capabilities
*
* @param dev Runtime device structure
* @param cb callback
*/
void usbc_set_policy_cb_set_src_cap(const struct device *dev, const policy_cb_set_src_cap_t cb);
/**
* @brief Set the callback used to get the Request Data Object (RDO)
*
* @param dev Runtime device structure
* @param cb callback
*/
void usbc_set_policy_cb_get_rdo(const struct device *dev, const policy_cb_get_rdo_t cb);
/**
* @brief Set the callback used to check if the sink power supply is at
* the default level
*
* @param dev Runtime device structure
* @param cb callback
*/
void usbc_set_policy_cb_is_snk_at_default(const struct device *dev,
const policy_cb_is_snk_at_default_t cb);
/**
* @brief Set the callback used to get the Rp value that should be placed on
* the CC lines
*
* @param dev USB-C Connector Instance
* @param cb callback
*/
void usbc_set_policy_cb_get_src_rp(const struct device *dev, const policy_cb_get_src_rp_t cb);
/**
* @brief Set the callback used to enable VBUS
*
* @param dev USB-C Connector Instance
* @param cb callback
*/
void usbc_set_policy_cb_src_en(const struct device *dev, const policy_cb_src_en_t cb);
/**
* @brief Set the callback used to get the Source Capabilities
* from the Device Policy Manager
*
* @param dev USB-C Connector Instance
* @param cb callback
*/
void usbc_set_policy_cb_get_src_caps(const struct device *dev, const policy_cb_get_src_caps_t cb);
/**
* @brief Set the callback used to check if Sink request is valid
*
* @param dev USB-C Connector Instance
* @param cb callback
*/
void usbc_set_policy_cb_check_sink_request(const struct device *dev,
const policy_cb_check_sink_request_t cb);
/**
* @brief Set the callback used to check if Source Power Supply is ready
*
* @param dev USB-C Connector Instance
* @param cb callback
*/
void usbc_set_policy_cb_is_ps_ready(const struct device *dev,
const policy_cb_is_ps_ready_t cb);
/**
* @brief Set the callback to check if present Contract is still valid
*
* @param dev USB-C Connector Instance
* @param cb callback
*/
void usbc_set_policy_cb_present_contract_is_valid(const struct device *dev,
const policy_cb_present_contract_is_valid_t cb);
/**
* @brief Set the callback used to request that a different set of Source Caps
* be sent to the Sink
*
* @param dev USB-C Connector Instance
* @param cb callback
*/
void usbc_set_policy_cb_change_src_caps(const struct device *dev,
const policy_cb_change_src_caps_t cb);
/**
* @brief Set the callback used to store the Capabilities received from a Sink Port Partner
*
* @param dev USB-C Connector Instance
* @param cb callback
*/
void usbc_set_policy_cb_set_port_partner_snk_cap(const struct device *dev,
const policy_cb_set_port_partner_snk_cap_t cb);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_USBC_H_ */
``` | /content/code_sandbox/include/zephyr/usb_c/usbc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,682 |
```objective-c
/*
*
*/
/*
* DESCRIPTION
* Platform independent set of macros for creating a memory segment for
* aggregating data that shall be kept in the elf file but not in the binary.
*/
#ifndef ZEPHYR_INCLUDE_LINKER_LINKER_DEVNULL_H_
#if defined(CONFIG_LINKER_DEVNULL_MEMORY)
#if defined(CONFIG_XIP)
#if (!defined(ROM_ADDR) && !defined(ROM_BASE)) || !defined(ROM_SIZE)
#error "ROM_SIZE, ROM_ADDR or ROM_BASE not defined"
#endif
#endif /* CONFIG_XIP */
#if (!defined(RAM_ADDR) && !defined(RAM_BASE)) || !defined(RAM_SIZE)
#error "RAM_SIZE, RAM_ADDR or RAM_BASE not defined"
#endif
#if defined(CONFIG_XIP) && !defined(ROM_ADDR)
#define ROM_ADDR ROM_BASE
#endif
#if !defined(RAM_ADDR)
#define RAM_ADDR RAM_BASE
#endif
#define ROM_END_ADDR (ROM_ADDR + ROM_SIZE)
#define DEVNULL_SIZE CONFIG_LINKER_DEVNULL_MEMORY_SIZE
#define ROM_DEVNULL_END_ADDR (ROM_END_ADDR + DEVNULL_SIZE)
#define MAX_ADDR UINT32_MAX
/* Determine where to put the devnull region. It should be adjacent to the ROM
* region. If ROM starts after RAM or the distance between ROM and RAM is big
* enough to fit the devnull region then devnull region is placed just after
* the ROM region. If it cannot be done then the devnull region is placed before
* the ROM region. It is possible that the devnull region cannot be placed
* adjacent to the ROM (e.g. ROM starts at 0 and RAM follows ROM). In that
* case compilation fails and the devnull region is not supported in that
* configuration.
*/
#if !defined(CONFIG_XIP)
#if RAM_ADDR >= DEVNULL_SIZE
#define DEVNULL_ADDR (RAM_ADDR - DEVNULL_SIZE)
#else
#define DEVNULL_ADDR (RAM_ADDR + RAM_SIZE)
#endif
#else /* CONFIG_XIP */
#if ((ROM_ADDR > RAM_ADDR) && ((MAX_ADDR - ROM_END_ADDR) >= DEVNULL_SIZE)) || \
((ROM_END_ADDR + DEVNULL_SIZE) <= RAM_ADDR)
#define DEVNULL_ADDR ROM_END_ADDR
#elif ROM_ADDR > DEVNULL_SIZE
#define DEVNULL_ADDR (ROM_ADDR - DEVNULL_SIZE)
#else
#error "Cannot place devnull segment adjacent to ROM region."
#endif
#endif /* CONFIG_XIP */
#define DEVNULL_REGION DEVNULL_ROM
#endif /* CONFIG_LINKER_DEVNULL_MEMORY */
#endif /* ZEPHYR_INCLUDE_LINKER_LINKER_DEVNULL_H_ */
``` | /content/code_sandbox/include/zephyr/linker/linker-devnull.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 527 |
```linker script
/* Empty file */
``` | /content/code_sandbox/include/zephyr/linker/app_smem_aligned.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 7 |
```linker script
/* following sections are obtained via 'ld --verbose' */
/* Stabs debugging sections. */
SECTION_PROLOGUE(.stab, 0,) { *(.stab) }
SECTION_PROLOGUE(.stabstr, 0,) { *(.stabstr) }
SECTION_PROLOGUE(.stab.excl, 0,) { *(.stab.excl) }
SECTION_PROLOGUE(.stab.exclstr, 0,) { *(.stab.exclstr) }
SECTION_PROLOGUE(.stab.index, 0,) { *(.stab.index) }
SECTION_PROLOGUE(.stab.indexstr, 0,) { *(.stab.indexstr) }
SECTION_PROLOGUE(.gnu.build.attributes, 0,) { *(.gnu.build.attributes .gnu.build.attributes.*) }
SECTION_PROLOGUE(.comment, 0,) { *(.comment) }
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to the beginning
of the section so we begin them at 0. */
/* DWARF 1 */
SECTION_PROLOGUE(.debug, 0,) { *(.debug) }
SECTION_PROLOGUE(.line, 0,) { *(.line) }
/* GNU DWARF 1 extensions */
SECTION_PROLOGUE(.debug_srcinfo, 0,) { *(.debug_srcinfo) }
SECTION_PROLOGUE(.debug_sfnames, 0,) { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
SECTION_PROLOGUE(.debug_aranges, 0,) { *(.debug_aranges) }
SECTION_PROLOGUE(.debug_pubnames, 0,) { *(.debug_pubnames) }
/* DWARF 2 */
SECTION_PROLOGUE(.debug_info, 0,) { *(.debug_info .gnu.linkonce.wi.*) }
SECTION_PROLOGUE(.debug_abbrev, 0,) { *(.debug_abbrev) }
SECTION_PROLOGUE(.debug_line, 0,) { *(.debug_line .debug_line.* .debug_line_end ) }
SECTION_PROLOGUE(.debug_frame, 0,) { *(.debug_frame) }
SECTION_PROLOGUE(.debug_str, 0,) { *(.debug_str) }
SECTION_PROLOGUE(.debug_loc, 0,) { *(.debug_loc) }
SECTION_PROLOGUE(.debug_macinfo, 0,) { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
SECTION_PROLOGUE(.debug_weaknames, 0,) { *(.debug_weaknames) }
SECTION_PROLOGUE(.debug_funcnames, 0,) { *(.debug_funcnames) }
SECTION_PROLOGUE(.debug_typenames, 0,) { *(.debug_typenames) }
SECTION_PROLOGUE(.debug_varnames, 0,) { *(.debug_varnames) }
/* DWARF 3 */
SECTION_PROLOGUE(.debug_pubtypes, 0,) { *(.debug_pubtypes) }
SECTION_PROLOGUE(.debug_ranges, 0,) { *(.debug_ranges) }
/* DWARF 5 */
SECTION_PROLOGUE(.debug_addr, 0,) { *(.debug_addr) }
SECTION_PROLOGUE(.debug_line_str, 0,) { *(.debug_line_str) }
SECTION_PROLOGUE(.debug_loclists, 0,) { *(.debug_loclists) }
SECTION_PROLOGUE(.debug_macro, 0,) { *(.debug_macro) }
SECTION_PROLOGUE(.debug_names, 0,) { *(.debug_names) }
SECTION_PROLOGUE(.debug_rnglists, 0,) { *(.debug_rnglists) }
SECTION_PROLOGUE(.debug_str_offsets, 0,) { *(.debug_str_offsets) }
SECTION_PROLOGUE(.debug_sup, 0,) { *(.debug_sup) }
``` | /content/code_sandbox/include/zephyr/linker/debug-sections.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 879 |
```objective-c
/*
*
*/
/*
* DESCRIPTION
* Platform independent, commonly used macros and defines related to linker
* script.
*
* This file may be included by:
* - Linker script files: for linker section declarations
* - C files: for external declaration of address or size of linker section
* - Assembly files: for external declaration of address or size of linker
* section
*/
#ifndef ZEPHYR_INCLUDE_LINKER_LINKER_DEFS_H_
#define ZEPHYR_INCLUDE_LINKER_LINKER_DEFS_H_
#include <zephyr/toolchain.h>
#include <zephyr/toolchain/common.h>
#include <zephyr/linker/sections.h>
#include <zephyr/sys/util.h>
#include <zephyr/offsets.h>
/* We need to dummy out DT_NODE_HAS_STATUS when building the unittests.
* Including devicetree.h would require generating dummy header files
* to match what gen_defines creates, so it's easier to just dummy out
* DT_NODE_HAS_STATUS.
*/
#ifdef ZTEST_UNITTEST
#define DT_NODE_HAS_STATUS(node, status) 0
#else
#include <zephyr/devicetree.h>
#endif
#ifdef _LINKER
/*
* generate a symbol to mark the start of the objects array for
* the specified object and level, then link all of those objects
* (sorted by priority). Ensure the objects aren't discarded if there is
* no direct reference to them
*/
#define CREATE_OBJ_LEVEL(object, level) \
__##object##_##level##_start = .; \
KEEP(*(SORT(.z_##object##_##level?_*))); \
KEEP(*(SORT(.z_##object##_##level??_*)));
/*
* link in shell initialization objects for all modules that use shell and
* their shell commands are automatically initialized by the kernel.
*/
#elif defined(_ASMLANGUAGE)
/* Assembly FILES: declaration defined by the linker script */
GDATA(__bss_start)
GDATA(__bss_num_words)
#ifdef CONFIG_XIP
GDATA(__data_region_load_start)
GDATA(__data_region_start)
GDATA(__data_region_num_words)
#endif
#else /* ! _ASMLANGUAGE */
#include <zephyr/types.h>
/*
* Memory owned by the kernel, to be used as shared memory between
* application threads.
*
* The following are extern symbols from the linker. This enables
* the dynamic k_mem_domain and k_mem_partition creation and alignment
* to the section produced in the linker.
* The policy for this memory will be to initially configure all of it as
* kernel / supervisor thread accessible.
*/
extern char _app_smem_start[];
extern char _app_smem_end[];
extern char _app_smem_size[];
extern char _app_smem_rom_start[];
extern char _app_smem_num_words[];
#ifdef CONFIG_LINKER_USE_PINNED_SECTION
extern char _app_smem_pinned_start[];
extern char _app_smem_pinned_end[];
extern char _app_smem_pinned_size[];
extern char _app_smem_pinned_num_words[];
#endif
/* Memory owned by the kernel. Start and end will be aligned for memory
* management/protection hardware for the target architecture.
*
* Consists of all kernel-side globals, all kernel objects, all thread stacks,
* and all currently unused RAM.
*
* Except for the stack of the currently executing thread, none of this memory
* is normally accessible to user threads unless specifically granted at
* runtime.
*/
extern char __kernel_ram_start[];
extern char __kernel_ram_end[];
extern char __kernel_ram_size[];
/* Used by z_bss_zero or arch-specific implementation */
extern char __bss_start[];
extern char __bss_end[];
/* Used by z_data_copy() or arch-specific implementation */
#ifdef CONFIG_XIP
extern char __data_region_load_start[];
extern char __data_region_start[];
extern char __data_region_end[];
#endif /* CONFIG_XIP */
#ifdef CONFIG_MMU
/* Virtual addresses of page-aligned kernel image mapped into RAM at boot */
extern char z_mapped_start[];
extern char z_mapped_end[];
#endif /* CONFIG_MMU */
/* Includes text and rodata */
extern char __rom_region_start[];
extern char __rom_region_end[];
extern char __rom_region_size[];
/* Includes all ROMable data, i.e. the size of the output image file. */
extern char _flash_used[];
/* datas, bss, noinit */
extern char _image_ram_start[];
extern char _image_ram_end[];
extern char _image_ram_size[];
extern char __text_region_start[];
extern char __text_region_end[];
extern char __text_region_size[];
extern char __rodata_region_start[];
extern char __rodata_region_end[];
extern char __rodata_region_size[];
extern char _vector_start[];
extern char _vector_end[];
#ifdef CONFIG_SW_VECTOR_RELAY
extern char __vector_relay_table[];
#endif
#ifdef CONFIG_COVERAGE_GCOV
extern char __gcov_bss_start[];
extern char __gcov_bss_end[];
extern char __gcov_bss_size[];
#endif /* CONFIG_COVERAGE_GCOV */
/* end address of image, used by newlib for the heap */
extern char _end[];
#if (DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ccm), okay))
extern char __ccm_data_rom_start[];
extern char __ccm_start[];
extern char __ccm_data_start[];
extern char __ccm_data_end[];
extern char __ccm_bss_start[];
extern char __ccm_bss_end[];
extern char __ccm_noinit_start[];
extern char __ccm_noinit_end[];
extern char __ccm_end[];
#endif
#if (DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_itcm), okay))
extern char __itcm_start[];
extern char __itcm_end[];
extern char __itcm_size[];
extern char __itcm_load_start[];
#endif
#if (DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay))
extern char __dtcm_data_start[];
extern char __dtcm_data_end[];
extern char __dtcm_bss_start[];
extern char __dtcm_bss_end[];
extern char __dtcm_noinit_start[];
extern char __dtcm_noinit_end[];
extern char __dtcm_data_load_start[];
extern char __dtcm_start[];
extern char __dtcm_end[];
#endif
#if (DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ocm), okay))
extern char __ocm_data_start[];
extern char __ocm_data_end[];
extern char __ocm_bss_start[];
extern char __ocm_bss_end[];
extern char __ocm_start[];
extern char __ocm_end[];
extern char __ocm_size[];
#endif
/* Used by the Security Attribution Unit to configure the
* Non-Secure Callable region.
*/
#ifdef CONFIG_ARM_FIRMWARE_HAS_SECURE_ENTRY_FUNCS
extern char __sg_start[];
extern char __sg_end[];
extern char __sg_size[];
#endif /* CONFIG_ARM_FIRMWARE_HAS_SECURE_ENTRY_FUNCS */
/*
* Non-cached kernel memory region, currently only available on ARM Cortex-M7
* with a MPU. Start and end will be aligned for memory management/protection
* hardware for the target architecture.
*
* All the functions with '__nocache' keyword will be placed into this
* section.
*/
#ifdef CONFIG_NOCACHE_MEMORY
extern char _nocache_ram_start[];
extern char _nocache_ram_end[];
extern char _nocache_ram_size[];
#endif /* CONFIG_NOCACHE_MEMORY */
/* Memory owned by the kernel. Start and end will be aligned for memory
* management/protection hardware for the target architecture.
*
* All the functions with '__ramfunc' keyword will be placed into this
* section, stored in RAM instead of FLASH.
*/
#ifdef CONFIG_ARCH_HAS_RAMFUNC_SUPPORT
extern char __ramfunc_start[];
extern char __ramfunc_end[];
extern char __ramfunc_size[];
extern char __ramfunc_load_start[];
#endif /* CONFIG_ARCH_HAS_RAMFUNC_SUPPORT */
/* Memory owned by the kernel. Memory region for thread privilege stack buffers,
* currently only applicable on ARM Cortex-M architecture when building with
* support for User Mode.
*
* All thread privilege stack buffers will be placed into this section.
*/
#ifdef CONFIG_USERSPACE
extern char z_priv_stacks_ram_start[];
extern char z_priv_stacks_ram_end[];
extern char z_user_stacks_start[];
extern char z_user_stacks_end[];
extern char z_kobject_data_begin[];
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_THREAD_LOCAL_STORAGE
extern char __tdata_start[];
extern char __tdata_end[];
extern char __tdata_size[];
extern char __tdata_align[];
extern char __tbss_start[];
extern char __tbss_end[];
extern char __tbss_size[];
extern char __tbss_align[];
extern char __tls_start[];
extern char __tls_end[];
extern char __tls_size[];
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
#ifdef CONFIG_LINKER_USE_BOOT_SECTION
/* lnkr_boot_start[] and lnkr_boot_end[]
* must encapsulate all the boot sections.
*/
extern char lnkr_boot_start[];
extern char lnkr_boot_end[];
extern char lnkr_boot_text_start[];
extern char lnkr_boot_text_end[];
extern char lnkr_boot_text_size[];
extern char lnkr_boot_data_start[];
extern char lnkr_boot_data_end[];
extern char lnkr_boot_data_size[];
extern char lnkr_boot_rodata_start[];
extern char lnkr_boot_rodata_end[];
extern char lnkr_boot_rodata_size[];
extern char lnkr_boot_bss_start[];
extern char lnkr_boot_bss_end[];
extern char lnkr_boot_bss_size[];
extern char lnkr_boot_noinit_start[];
extern char lnkr_boot_noinit_end[];
extern char lnkr_boot_noinit_size[];
#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
#ifdef CONFIG_LINKER_USE_PINNED_SECTION
/* lnkr_pinned_start[] and lnkr_pinned_end[] must encapsulate
* all the pinned sections as these are used by
* the MMU code to mark the physical page frames with
* K_MEM_PAGE_FRAME_PINNED.
*/
extern char lnkr_pinned_start[];
extern char lnkr_pinned_end[];
extern char lnkr_pinned_text_start[];
extern char lnkr_pinned_text_end[];
extern char lnkr_pinned_text_size[];
extern char lnkr_pinned_data_start[];
extern char lnkr_pinned_data_end[];
extern char lnkr_pinned_data_size[];
extern char lnkr_pinned_rodata_start[];
extern char lnkr_pinned_rodata_end[];
extern char lnkr_pinned_rodata_size[];
extern char lnkr_pinned_bss_start[];
extern char lnkr_pinned_bss_end[];
extern char lnkr_pinned_bss_size[];
extern char lnkr_pinned_noinit_start[];
extern char lnkr_pinned_noinit_end[];
extern char lnkr_pinned_noinit_size[];
__pinned_func
static inline bool lnkr_is_pinned(uint8_t *addr)
{
if ((addr >= (uint8_t *)lnkr_pinned_start) &&
(addr < (uint8_t *)lnkr_pinned_end)) {
return true;
} else {
return false;
}
}
__pinned_func
static inline bool lnkr_is_region_pinned(uint8_t *addr, size_t sz)
{
if ((addr >= (uint8_t *)lnkr_pinned_start) &&
((addr + sz) < (uint8_t *)lnkr_pinned_end)) {
return true;
} else {
return false;
}
}
#endif /* CONFIG_LINKER_USE_PINNED_SECTION */
#endif /* ! _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_LINKER_LINKER_DEFS_H_ */
``` | /content/code_sandbox/include/zephyr/linker/linker-defs.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,435 |
```linker script
#ifdef CONFIG_THREAD_LOCAL_STORAGE
SECTION_DATA_PROLOGUE(tdata,,)
{
*(.tdata .tdata.* .gnu.linkonce.td.*);
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_DATA_PROLOGUE(tbss,,)
{
*(.tbss .tbss.* .gnu.linkonce.tb.* .tcommon);
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
/*
* These needs to be outside of the tdata/tbss
* sections or else they would be considered
* thread-local variables, and the code would use
* the wrong values.
*/
#ifdef CONFIG_XIP
/* The "master copy" of tdata should be only in flash on XIP systems */
PROVIDE(__tdata_start = LOADADDR(tdata));
#else
PROVIDE(__tdata_start = ADDR(tdata));
#endif
PROVIDE(__tdata_align = ALIGNOF(tdata));
PROVIDE(__tdata_size = (SIZEOF(tdata) + __tdata_align - 1) & ~(__tdata_align - 1));
PROVIDE(__tdata_end = __tdata_start + __tdata_size);
PROVIDE(__tbss_align = ALIGNOF(tbss));
PROVIDE(__tbss_start = ADDR(tbss));
PROVIDE(__tbss_size = (SIZEOF(tbss) + __tbss_align - 1) & ~(__tbss_align - 1));
PROVIDE(__tbss_end = __tbss_start + __tbss_size);
PROVIDE(__tls_start = __tdata_start);
PROVIDE(__tls_end = __tbss_end);
PROVIDE(__tls_size = __tbss_end - __tdata_start);
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
``` | /content/code_sandbox/include/zephyr/linker/thread-local-storage.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 401 |
```linker script
/*
* Added after the very last allocation that might land in RAM to define the various
* end-of-used-memory symbols
*/
SECTION_PROLOGUE(.last_ram_section, (NOLOAD),)
{
#ifdef LAST_RAM_ALIGN
LAST_RAM_ALIGN
#endif
_image_ram_end = .;
_image_ram_size = _image_ram_end - _image_ram_start;
_end = .; /* end of image */
z_mapped_end = .;
} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
``` | /content/code_sandbox/include/zephyr/linker/ram-end.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 110 |
```linker script
/*
*
*/
SECTION_PROLOGUE(_NOINIT_SECTION_NAME,(NOLOAD),)
{
/*
* This section is used for non-initialized objects that
* will not be cleared during the boot process.
*/
*(.noinit)
*(".noinit.*")
#ifdef CONFIG_USERSPACE
z_user_stacks_start = .;
*(.user_stacks*)
z_user_stacks_end = .;
#endif /* CONFIG_USERSPACE */
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-noinit.ld>
} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#include "kobject-priv-stacks.ld"
``` | /content/code_sandbox/include/zephyr/linker/common-noinit.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 155 |
```linker script
#include <zephyr/linker/common-rom/common-rom-kernel-devices.ld>
#include <zephyr/linker/common-rom/common-rom-ztest.ld>
#include <zephyr/linker/common-rom/common-rom-init.ld>
#include <zephyr/linker/common-rom/common-rom-net.ld>
#include <zephyr/linker/common-rom/common-rom-bt.ld>
#include <zephyr/linker/common-rom/common-rom-logging.ld>
#include <zephyr/linker/common-rom/common-rom-debug.ld>
#include <zephyr/linker/common-rom/common-rom-interrupt-controllers.ld>
#include <zephyr/linker/common-rom/common-rom-misc.ld>
``` | /content/code_sandbox/include/zephyr/linker/common-rom.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 156 |
```linker script
/*
* Special section used by LLEXT if CONFIG_LLEXT_EXPORT_BUILTINS_BY_SLID
* is enabled. Declare this section to prevent it from being considered orphan.
*
* This section is used to temporarily save the exported symbols' names in the
* Zephyr ELF for post-processing, but it is not included in the final binary.
*
* NOTE: This section MUST start at address 0, as the post-processing scripts
* assume that the address of any data in this section (i.e., symbol names) is
* strictly equivalent to the offset inside the section.
*/
#ifdef CONFIG_LLEXT_EXPORT_BUILTINS_BY_SLID
SECTION_PROLOGUE(llext_exports_strtab, 0 (COPY), )
{
KEEP(*(llext_exports_strtab))
}
#endif
``` | /content/code_sandbox/include/zephyr/linker/llext-sections.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 182 |
```linker script
/*
*
*/
__device_deps_start = .;
#ifdef LINKER_DEVICE_DEPS_PASS1
KEEP(*(SORT(.__device_deps_pass1*)));
#else
KEEP(*(SORT(.__device_deps_pass2*)));
#endif /* LINKER_DEVICE_DEPS_PASS1 */
__device_deps_end = .;
``` | /content/code_sandbox/include/zephyr/linker/device-deps.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 61 |
```objective-c
/*
*
*/
/**
* @file
* @brief Metware toolchain linker defs
*
* This header file defines the necessary macros used by the linker script for
* use with the metware linker.
*/
#ifndef ZEPHYR_INCLUDE_LINKER_LINKER_TOOL_MWDT_H_
#define ZEPHYR_INCLUDE_LINKER_LINKER_TOOL_MWDT_H_
/*
* mwdt linker doesn't have the following directives
*/
#define ASSERT(x, y)
#define SUBALIGN(x) ALIGN(x)
/*
* The GROUP_START() and GROUP_END() macros are used to define a group
* of sections located in one memory area, such as RAM, ROM, etc.
* The <where> parameter is the name of the memory area.
*/
#define GROUP_START(where)
#define GROUP_END(where)
/*
* The GROUP_LINK_IN() macro is located at the end of the section
* description and tells the linker that this section is located in
* the memory area specified by <where> argument.
*/
#define GROUP_LINK_IN(where) > where
/**
* The GROUP_ROM_LINK_IN() macro is located at the end of the section
* description and tells the linker that this a read-only section
* that is physically placed at the 'lregion` argument.
*
*/
#define GROUP_ROM_LINK_IN(vregion, lregion) > lregion
/*
* As GROUP_LINK_IN(), but takes a second argument indicating the
* memory region (e.g. "ROM") for the load address. Used for
* initialized data sections that on XIP platforms must be copied at
* startup.
*
* And, because output directives in GNU ld are "sticky", this must
* also be used on the first section *after* such an initialized data
* section, specifying the same memory region (e.g. "RAM") for both
* vregion and lregion.
*/
#ifdef CONFIG_XIP
#define GROUP_DATA_LINK_IN(vregion, lregion) > vregion AT > lregion
#else
#define GROUP_DATA_LINK_IN(vregion, lregion) > vregion
#endif
/**
* Route memory for read-write sections that are NOT loaded; typically this
* is only used for 'BSS' and 'noinit'.
*/
#ifdef CONFIG_XIP
#define GROUP_NOLOAD_LINK_IN(vregion, lregion) > vregion AT > vregion
#else
#define GROUP_NOLOAD_LINK_IN(vregion, lregion) > vregion
#endif
/*
* The SECTION_PROLOGUE() macro is used to define the beginning of a section.
* The <name> parameter is the name of the section, and the <option> parameter
* is to include any special options such as (NOLOAD). Page alignment has its
* own parameter since it needs abstraction across the different toolchains.
* If not required, the <options> and <align> parameters should be left blank.
*/
#define SECTION_PROLOGUE(name, options, align) name options : align
/*
* As for SECTION_PROLOGUE(), except that this one must (!) be used
* for data sections which on XIP platforms will have differing
* virtual and load addresses (i.e. they'll be copied into RAM at
* program startup). Such a section must (!) also use
* GROUP_LINK_IN_LMA to specify the correct output load address.
*/
#ifdef CONFIG_XIP
#define SECTION_DATA_PROLOGUE(name, options, align) \
name options ALIGN(8) : align
#else
#define SECTION_DATA_PROLOGUE(name, options, align) name options : align
#endif
#define SORT_BY_NAME(x) SORT(x)
#endif /* ZEPHYR_INCLUDE_LINKER_LINKER_TOOL_MWDT_H_ */
``` | /content/code_sandbox/include/zephyr/linker/linker-tool-mwdt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 765 |
```linker script
#if LINKER_ZEPHYR_FINAL && CONFIG_ISR_TABLES_LOCAL_DECLARATION
/DISCARD/ :
{
KEEP(*(.vectors))
KEEP(*(_IRQ_VECTOR_TABLE_SECTION_SYMS))
}
#endif
``` | /content/code_sandbox/include/zephyr/linker/isr-local-drop-unused.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 46 |
```linker script
/*
*
*/
/* Empty file */
``` | /content/code_sandbox/include/zephyr/linker/app_smem_pinned_unaligned.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 8 |
```linker script
/*
*
*/
/* This creates a special section which is not included by the final binary,
* instead it is consumed by the gen_isr_tables.py script.
*
* What we create here is a data structure:
*
* struct {
* uint32_t num_vectors; <- typically CONFIG_NUM_IRQS
* struct _isr_list isrs[]; <- Usually of smaller size than num_vectors
* }
*
* Which indicates the memory address of the number of isrs that were
* defined, the total number of IRQ lines in the system, followed by
* an appropriate number of instances of struct _isr_list. See
* include/sw_isr_table.h
*
* You will need to declare a bogus memory region for IDT_LIST. It doesn't
* matter where this region goes as it is stripped from the final ELF image.
* The address doesn't even have to be valid on the target. However, it
* shouldn't overlap any other regions. On most arches the following should be
* fine:
*
* MEMORY {
* .. other regions ..
* IDT_LIST : ORIGIN = 0xfffff7ff, LENGTH = 2K
* }
*/
#ifndef LINKER_ZEPHYR_FINAL
SECTION_PROLOGUE(.intList,,)
{
KEEP(*(.irq_info*))
KEEP(*(.intList*))
} GROUP_ROM_LINK_IN(IDT_LIST, IDT_LIST)
#else
/DISCARD/ :
{
KEEP(*(.irq_info*))
KEEP(*(.intList*))
}
#endif
``` | /content/code_sandbox/include/zephyr/linker/intlist.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 321 |
```linker script
#include <zephyr/linker/iterable_sections.h>
#if defined(CONFIG_NETWORKING)
#ifndef NETWORK_RAM_SECTIONS
#define NETWORK_RAM_SECTIONS \
ITERABLE_SECTION_RAM(net_if, Z_LINK_ITERABLE_SUBALIGN) \
ITERABLE_SECTION_RAM(net_if_dev, Z_LINK_ITERABLE_SUBALIGN) \
ITERABLE_SECTION_RAM(net_l2, Z_LINK_ITERABLE_SUBALIGN) \
ITERABLE_SECTION_RAM(eth_bridge, Z_LINK_ITERABLE_SUBALIGN)
#endif
#endif /* NETWORKING */
#ifdef CONFIG_ARM_SCMI
ITERABLE_SECTION_RAM(scmi_protocol, Z_LINK_ITERABLE_SUBALIGN)
#endif /* CONFIG_ARM_SCMI */
#if defined(CONFIG_GEN_SW_ISR_TABLE) && defined(CONFIG_DYNAMIC_INTERRUPTS)
SECTION_DATA_PROLOGUE(sw_isr_table,,)
{
/*
* Some arch requires an entry to be aligned to arch
* specific boundary for using double word load
* instruction. See include/sw_isr_table.h.
*/
. = ALIGN(CONFIG_ARCH_SW_ISR_TABLE_ALIGN);
*(_SW_ISR_TABLE_SECTION_SYMS)
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#if defined(CONFIG_SHARED_INTERRUPTS)
SECTION_DATA_PROLOGUE(shared_sw_isr_table,,)
{
/* TODO: does this section require alignment? */
KEEP(*(_SHARED_SW_ISR_TABLE_SECTION_SYMS))
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif
#endif
SECTION_DATA_PROLOGUE(device_states,,)
{
/* Device states used by the device objects. */
__device_states_start = .;
KEEP(*(".z_devstate"));
KEEP(*(".z_devstate.*"));
__device_states_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#ifdef CONFIG_PM_DEVICE
ITERABLE_SECTION_RAM(pm_device_slots, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_DEVICE_DEPS_DYNAMIC)
SECTION_DATA_PROLOGUE(device_deps,,)
{
#include "device-deps.ld"
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif /* CONFIG_DEVICE_DEPS_DYNAMIC */
ITERABLE_SECTION_RAM_GC_ALLOWED(log_mpsc_pbuf, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM(log_msg_ptr, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM(log_dynamic, Z_LINK_ITERABLE_SUBALIGN)
#ifdef CONFIG_USERSPACE
/* All kernel objects within are assumed to be either completely
* initialized at build time, or initialized automatically at runtime
* via iteration before the POST_KERNEL phase.
*
* These two symbols only used by gen_kobject_list.py
*/
_static_kernel_objects_begin = .;
#endif /* CONFIG_USERSPACE */
ITERABLE_SECTION_RAM_GC_ALLOWED(k_timer, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_mem_slab, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_heap, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_mutex, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_stack, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_msgq, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_mbox, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_pipe, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_sem, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_event, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_queue, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_fifo, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_lifo, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_condvar, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM_GC_ALLOWED(sys_mem_blocks_ptr, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM(net_buf_pool, Z_LINK_ITERABLE_SUBALIGN)
#if defined(CONFIG_NETWORKING)
NETWORK_RAM_SECTIONS
#endif /* NETWORKING */
#if defined(CONFIG_PCIE)
ITERABLE_SECTION_RAM(pcie_dev, Z_LINK_ITERABLE_SUBALIGN)
#endif /* PCIE */
#if defined(CONFIG_USB_DEVICE_STACK)
SECTION_DATA_PROLOGUE(usb_descriptor,,SUBALIGN(1))
{
__usb_descriptor_start = .;
*(".usb.descriptor")
KEEP(*(SORT_BY_NAME(".usb.descriptor*")))
__usb_descriptor_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
ITERABLE_SECTION_RAM(usb_cfg_data, Z_LINK_ITERABLE_SUBALIGN)
#endif /* CONFIG_USB_DEVICE_STACK */
#if defined(CONFIG_USB_DEVICE_BOS)
SECTION_DATA_PROLOGUE(usb_bos_desc,,SUBALIGN(1))
{
__usb_bos_desc_start = .;
*(".usb.bos_desc")
KEEP(*(SORT_BY_NAME(".usb.bos_desc*")))
__usb_bos_desc_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif /* CONFIG_USB_DEVICE_BOS */
#if defined(CONFIG_RTIO)
ITERABLE_SECTION_RAM(rtio, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM(rtio_iodev, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM(rtio_sqe_pool, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_RAM(rtio_cqe_pool, Z_LINK_ITERABLE_SUBALIGN)
#endif /* CONFIG_RTIO */
#if defined(CONFIG_SENSING)
ITERABLE_SECTION_RAM(sensing_sensor, Z_LINK_ITERABLE_SUBALIGN)
#endif /* CONFIG_SENSING */
#if defined(CONFIG_ZBUS)
ITERABLE_SECTION_RAM(zbus_channel_observation_mask, 1)
#endif /* CONFIG_ZBUS */
#if defined(CONFIG_DEVICE_MUTABLE)
ITERABLE_SECTION_RAM(device_mutable, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_BT_ZEPHYR_NUS)
ITERABLE_SECTION_RAM(bt_nus_inst, Z_LINK_ITERABLE_SUBALIGN)
#endif
#ifdef CONFIG_USERSPACE
_static_kernel_objects_end = .;
#endif
``` | /content/code_sandbox/include/zephyr/linker/common-ram.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,303 |
```objective-c
/*
*
*/
/**
* @file
* @brief Definitions of various linker Sections.
*
* Linker Section declarations used by linker script, C files and Assembly
* files.
*/
#ifndef ZEPHYR_INCLUDE_LINKER_SECTIONS_H_
#define ZEPHYR_INCLUDE_LINKER_SECTIONS_H_
#define _TEXT_SECTION_NAME text
#define _RODATA_SECTION_NAME rodata
#define _CTOR_SECTION_NAME ctors
/* Linker issue with XIP where the name "data" cannot be used */
#define _DATA_SECTION_NAME datas
#define _BSS_SECTION_NAME bss
#define _NOINIT_SECTION_NAME noinit
#define _APP_SMEM_SECTION_NAME app_smem
#define _APP_DATA_SECTION_NAME app_datas
#define _APP_BSS_SECTION_NAME app_bss
#define _APP_NOINIT_SECTION_NAME app_noinit
#define _APP_SMEM_PINNED_SECTION_NAME app_smem_pinned
#define _UNDEFINED_SECTION_NAME undefined
/* Interrupts */
#define _IRQ_VECTOR_TABLE_SECTION_NAME .gnu.linkonce.irq_vector_table
#define _IRQ_VECTOR_TABLE_SECTION_SYMS .gnu.linkonce.irq_vector_table*
#define _SW_ISR_TABLE_SECTION_NAME .gnu.linkonce.sw_isr_table
#define _SW_ISR_TABLE_SECTION_SYMS .gnu.linkonce.sw_isr_table*
#ifdef CONFIG_SHARED_INTERRUPTS
#define _SHARED_SW_ISR_TABLE_SECTION_NAME .gnu.linkonce.shared_sw_isr_table
#define _SHARED_SW_ISR_TABLE_SECTION_SYMS .gnu.linkonce.shared_sw_isr_table*
#endif /* CONFIG_SHARED_INTERRUPTS */
/* Architecture-specific sections */
#if defined(CONFIG_ARM)
#define _KINETIS_FLASH_CONFIG_SECTION_NAME kinetis_flash_config
#define _TI_CCFG_SECTION_NAME .ti_ccfg
#define _CCM_DATA_SECTION_NAME .ccm_data
#define _CCM_BSS_SECTION_NAME .ccm_bss
#define _CCM_NOINIT_SECTION_NAME .ccm_noinit
#define _ITCM_SECTION_NAME .itcm
#define _DTCM_DATA_SECTION_NAME .dtcm_data
#define _DTCM_BSS_SECTION_NAME .dtcm_bss
#define _DTCM_NOINIT_SECTION_NAME .dtcm_noinit
#define _OCM_DATA_SECTION_NAME .ocm_data
#define _OCM_BSS_SECTION_NAME .ocm_bss
#endif
#define _IMX_BOOT_CONF_SECTION_NAME .boot_hdr.conf
#define _IMX_BOOT_DATA_SECTION_NAME .boot_hdr.data
#define _IMX_BOOT_IVT_SECTION_NAME .boot_hdr.ivt
#define _IMX_BOOT_DCD_SECTION_NAME .boot_hdr.dcd_data
#define _IMX_BOOT_CONTAINER_SECTION_NAME .boot_hdr.container
#define _STM32_SDRAM1_SECTION_NAME .stm32_sdram1
#define _STM32_SDRAM2_SECTION_NAME .stm32_sdram2
#define _STM32_BACKUP_SRAM_SECTION_NAME .stm32_backup_sram
#ifdef CONFIG_NOCACHE_MEMORY
#define _NOCACHE_SECTION_NAME nocache
#endif
/* Symbol table section */
#if defined(CONFIG_SYMTAB)
#define _SYMTAB_INFO_SECTION_NAME .gnu.linkonce.symtab.info
#define _SYMTAB_ENTRY_SECTION_NAME .gnu.linkonce.symtab.entry
#define _SYMTAB_SECTION_SYMS .gnu.linkonce.symtab*
#endif /* CONFIG_SYMTAB */
#if defined(CONFIG_LINKER_USE_BOOT_SECTION)
#define BOOT_TEXT_SECTION_NAME boot_text
#define BOOT_BSS_SECTION_NAME boot_bss
#define BOOT_RODATA_SECTION_NAME boot_rodata
#define BOOT_DATA_SECTION_NAME boot_data
#define BOOT_NOINIT_SECTION_NAME boot_noinit
#endif
#if defined(CONFIG_LINKER_USE_PINNED_SECTION)
#define PINNED_TEXT_SECTION_NAME pinned_text
#define PINNED_BSS_SECTION_NAME pinned_bss
#define PINNED_RODATA_SECTION_NAME pinned_rodata
#define PINNED_DATA_SECTION_NAME pinned_data
#define PINNED_NOINIT_SECTION_NAME pinned_noinit
#endif
/* Short section references for use in ASM files */
#if defined(_ASMLANGUAGE)
/* Various text section names */
#define TEXT text
/* Various data type section names */
#define BSS bss
#define RODATA rodata
#define DATA data
#define NOINIT noinit
#if defined(CONFIG_LINKER_USE_BOOT_SECTION)
#define BOOT_TEXT BOOT_TEXT_SECTION_NAME
#define BOOT_BSS BOOT_BSS_SECTION_NAME
#define BOOT_RODATA BOOT_RODATA_SECTION_NAME
#define BOOT_DATA BOOT_DATA_SECTION_NAME
#define BOOT_NOINIT BOOT_NOINIT_SECTION_NAME
#else
#define BOOT_TEXT TEXT
#define BOOT_BSS BSS
#define BOOT_RODATA RODATA
#define BOOT_DATA DATA
#define BOOT_NOINIT NOINIT
#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
#if defined(CONFIG_LINKER_USE_PINNED_SECTION)
#define PINNED_TEXT PINNED_TEXT_SECTION_NAME
#define PINNED_BSS PINNED_BSS_SECTION_NAME
#define PINNED_RODATA PINNED_RODATA_SECTION_NAME
#define PINNED_DATA PINNED_DATA_SECTION_NAME
#define PINNED_NOINIT PINNED_NOINIT_SECTION_NAME
#else
#define PINNED_TEXT TEXT
#define PINNED_BSS BSS
#define PINNED_RODATA RODATA
#define PINNED_DATA DATA
#define PINNED_NOINIT NOINIT
#endif /* CONFIG_LINKER_USE_PINNED_SECTION */
#endif /* _ASMLANGUAGE */
#include <zephyr/linker/section_tags.h>
#endif /* ZEPHYR_INCLUDE_LINKER_SECTIONS_H_ */
``` | /content/code_sandbox/include/zephyr/linker/sections.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,191 |
```linker script
#ifdef CONFIG_USERSPACE
/* We need to reserve room for the gperf generated hash functions.
* Fortunately, unlike the data tables, the size of the code is
* reasonably predictable.
*
* The linker will error out complaining that the location pointer
* is moving backwards if the reserved room isn't large enough.
*/
_kobject_text_area_start = .;
*(".kobject_data.literal*")
*(".kobject_data.text*")
_kobject_text_area_end = .;
_kobject_text_area_used = _kobject_text_area_end - _kobject_text_area_start;
#ifndef LINKER_ZEPHYR_FINAL
#ifdef CONFIG_DYNAMIC_OBJECTS
PROVIDE(z_object_gperf_find = .);
PROVIDE(z_object_gperf_wordlist_foreach = .);
#else
PROVIDE(k_object_find = .);
PROVIDE(k_object_wordlist_foreach = .);
#endif
#endif
/* In a valid build the MAX function will always evaluate to the
second argument below, but to give the user a good error message
when the area overflows we need to temporarily corrupt the
location counter, and then detect the overflow with an assertion
later on. */
. = MAX(., _kobject_text_area_start + CONFIG_KOBJECT_TEXT_AREA);
ASSERT(
CONFIG_KOBJECT_TEXT_AREA >= _kobject_text_area_used,
"Reserved space for kobject text area is too small. \
Please change CONFIG_KOBJECT_TEXT_AREA to a larger number."
);
#endif /* CONFIG_USERSPACE */
``` | /content/code_sandbox/include/zephyr/linker/kobject-text.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 331 |
```objective-c
/* Macros for tagging symbols and putting them in the correct sections. */
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LINKER_SECTION_TAGS_H_
#define ZEPHYR_INCLUDE_LINKER_SECTION_TAGS_H_
#include <zephyr/toolchain.h>
#if !defined(_ASMLANGUAGE)
#include <zephyr/linker/sections.h>
#define __noinit __in_section_unique(_NOINIT_SECTION_NAME)
#define __noinit_named(name) __in_section_unique_named(_NOINIT_SECTION_NAME, name)
#define __irq_vector_table Z_GENERIC_SECTION(_IRQ_VECTOR_TABLE_SECTION_NAME)
#define __sw_isr_table Z_GENERIC_SECTION(_SW_ISR_TABLE_SECTION_NAME)
#ifdef CONFIG_SHARED_INTERRUPTS
#define __shared_sw_isr_table Z_GENERIC_SECTION(_SHARED_SW_ISR_TABLE_SECTION_NAME)
#endif /* CONFIG_SHARED_INTERRUPTS */
/* Attribute macros to place code and data into IMR memory */
#define __imr __in_section_unique(imr)
#define __imrdata __in_section_unique(imrdata)
#if defined(CONFIG_ARM)
#define __kinetis_flash_config_section __in_section_unique(_KINETIS_FLASH_CONFIG_SECTION_NAME)
#define __ti_ccfg_section Z_GENERIC_SECTION(_TI_CCFG_SECTION_NAME)
#define __ccm_data_section Z_GENERIC_SECTION(_CCM_DATA_SECTION_NAME)
#define __ccm_bss_section Z_GENERIC_SECTION(_CCM_BSS_SECTION_NAME)
#define __ccm_noinit_section Z_GENERIC_SECTION(_CCM_NOINIT_SECTION_NAME)
#define __itcm_section Z_GENERIC_SECTION(_ITCM_SECTION_NAME)
#define __dtcm_data_section Z_GENERIC_SECTION(_DTCM_DATA_SECTION_NAME)
#define __dtcm_bss_section Z_GENERIC_SECTION(_DTCM_BSS_SECTION_NAME)
#define __dtcm_noinit_section Z_GENERIC_SECTION(_DTCM_NOINIT_SECTION_NAME)
#define __ocm_data_section Z_GENERIC_SECTION(_OCM_DATA_SECTION_NAME)
#define __ocm_bss_section Z_GENERIC_SECTION(_OCM_BSS_SECTION_NAME)
#define __imx_boot_conf_section Z_GENERIC_SECTION(_IMX_BOOT_CONF_SECTION_NAME)
#define __imx_boot_data_section Z_GENERIC_SECTION(_IMX_BOOT_DATA_SECTION_NAME)
#define __imx_boot_ivt_section Z_GENERIC_SECTION(_IMX_BOOT_IVT_SECTION_NAME)
#define __imx_boot_dcd_section Z_GENERIC_SECTION(_IMX_BOOT_DCD_SECTION_NAME)
#define __imx_boot_container_section Z_GENERIC_SECTION(_IMX_BOOT_CONTAINER_SECTION_NAME)
#define __stm32_sdram1_section Z_GENERIC_SECTION(_STM32_SDRAM1_SECTION_NAME)
#define __stm32_sdram2_section Z_GENERIC_SECTION(_STM32_SDRAM2_SECTION_NAME)
#define __stm32_backup_sram_section Z_GENERIC_SECTION(_STM32_BACKUP_SRAM_SECTION_NAME)
#endif /* CONFIG_ARM */
#if defined(CONFIG_NOCACHE_MEMORY)
#define __nocache __in_section_unique(_NOCACHE_SECTION_NAME)
#define __nocache_noinit __nocache
#else
#define __nocache
#define __nocache_noinit __noinit
#endif /* CONFIG_NOCACHE_MEMORY */
#if defined(CONFIG_KERNEL_COHERENCE)
#define __incoherent __in_section_unique(cached)
#if defined(CONFIG_USERSPACE)
#define __stackmem Z_GENERIC_SECTION(.user_stacks)
#else
#define __stackmem __incoherent
#endif /* CONFIG_USERSPACE */
#define __kstackmem __incoherent
#else
#define __incoherent
#define __stackmem Z_GENERIC_SECTION(.user_stacks)
#define __kstackmem __noinit
#endif /* CONFIG_KERNEL_COHERENCE */
#if defined(CONFIG_LINKER_USE_BOOT_SECTION)
#define __boot_func Z_GENERIC_DOT_SECTION(BOOT_TEXT_SECTION_NAME)
#define __boot_data Z_GENERIC_DOT_SECTION(BOOT_DATA_SECTION_NAME)
#define __boot_rodata Z_GENERIC_DOT_SECTION(BOOT_RODATA_SECTION_NAME)
#define __boot_bss Z_GENERIC_DOT_SECTION(BOOT_BSS_SECTION_NAME)
#define __boot_noinit Z_GENERIC_DOT_SECTION(BOOT_NOINIT_SECTION_NAME)
#else
#define __boot_func
#define __boot_data
#define __boot_rodata
#define __boot_bss
#define __boot_noinit __noinit
#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
#if defined(CONFIG_LINKER_USE_PINNED_SECTION)
#define __pinned_func Z_GENERIC_DOT_SECTION(PINNED_TEXT_SECTION_NAME)
#define __pinned_data Z_GENERIC_DOT_SECTION(PINNED_DATA_SECTION_NAME)
#define __pinned_rodata Z_GENERIC_DOT_SECTION(PINNED_RODATA_SECTION_NAME)
#define __pinned_bss Z_GENERIC_DOT_SECTION(PINNED_BSS_SECTION_NAME)
#define __pinned_noinit Z_GENERIC_DOT_SECTION(PINNED_NOINIT_SECTION_NAME)
#else
#define __pinned_func
#define __pinned_data
#define __pinned_rodata
#define __pinned_bss
#define __pinned_noinit __noinit
#endif /* CONFIG_LINKER_USE_PINNED_SECTION */
#if defined(CONFIG_LINKER_USE_PINNED_SECTION)
#define __isr __pinned_func
#else
#define __isr
#endif
/* Symbol table section */
#if defined(CONFIG_SYMTAB)
#define __symtab_info Z_GENERIC_SECTION(_SYMTAB_INFO_SECTION_NAME)
#define __symtab_entry Z_GENERIC_SECTION(_SYMTAB_ENTRY_SECTION_NAME)
#endif /* CONFIG_SYMTAB */
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_LINKER_SECTION_TAGS_H_ */
``` | /content/code_sandbox/include/zephyr/linker/section_tags.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,122 |
```linker script
/*
*
*/
#if defined (CONFIG_CPP)
SECTION_PROLOGUE(.gcc_except_table,,ONLY_IF_RO)
{
*(.gcc_except_table .gcc_except_table.*)
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#if defined (CONFIG_CPP_EXCEPTIONS)
SECTION_PROLOGUE(.eh_frame_hdr,,)
{
*(.eh_frame_hdr)
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_PROLOGUE(.eh_frame,,)
{
KEEP (*(SORT_NONE(EXCLUDE_FILE (*crtend.o) .eh_frame)))
KEEP (*(SORT_NONE(.eh_frame)))
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif /* CONFIG_CPP_EXCEPTIONS */
#endif /* CONFIG_CPP */
``` | /content/code_sandbox/include/zephyr/linker/cplusplus-rom.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 162 |
```linker script
/*
*
*/
/* Empty file */
``` | /content/code_sandbox/include/zephyr/linker/app_smem_pinned_aligned.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 9 |
```linker script
/*
* This hackish way of including files is due to CMake issues:
* path_to_url
* path_to_url
*
* When using the "Unix Makefiles" generator, CMake simply
* greps for "#include" to generate dependency list.
* So if doing it normally, both files are being included
* in the dependency list. This creates weird dependency
* issue:
*
* 1. Using A.ld to create a linker script A.cmd.
* 2. Using A.cmd to generate A_prebuilt.elf.
* 3. Using A_prebuilt.elf to create B.ld.
* 4. Creating B.cmd with B.ld.
* 5. Creating B_prebuilt.elf using B.cmd.
*
* Since the dependency list of A.cmd contains both
* A.ld and B.ld, when make is invoked again, B.ld
* is newer than A.cmd so everything from this point on
* gets rebuilt. In order to break this cycle, this
* hackish needs to be used since CMake does not parse
* macros, and thus these will not appear in
* the dependency list. The dependencies should then be
* put in CMakeLists.txt instead.
*
* Note: Ninja generator does not suffer from this issue.
*/
#ifdef LINKER_APP_SMEM_UNALIGNED
#define APP_SMEM_LD <app_smem_unaligned.ld>
#else
#define APP_SMEM_LD <app_smem_aligned.ld>
#endif
#include APP_SMEM_LD
#undef APP_SMEM_LD
``` | /content/code_sandbox/include/zephyr/linker/app_smem.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 325 |
```linker script
/*
*
*/
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_GEN_PRIV_STACKS
SECTION_DATA_PROLOGUE(priv_stacks_noinit,(NOLOAD),)
{
z_priv_stacks_ram_start = .;
/* During LINKER_KOBJECT_PREBUILT and LINKER_ZEPHYR_PREBUILT,
* space needs to be reserved for the rodata that will be
* produced by gperf during the final stages of linking.
* The alignment and size are produced by
* scripts/build/gen_kobject_placeholders.py. These are here
* so the addresses to kobjects would remain the same
* during the final stages of linking (LINKER_ZEPHYR_FINAL).
*/
#if defined(LINKER_ZEPHYR_PREBUILT)
#include <zephyr/linker-kobject-prebuilt-priv-stacks.h>
#ifdef KOBJECT_PRIV_STACKS_ALIGN
. = ALIGN(KOBJECT_PRIV_STACKS_ALIGN);
. = . + KOBJECT_PRIV_STACKS_SZ;
#endif
#endif /* LINKER_ZEPHYR_PREBUILT */
#if defined(LINKER_ZEPHYR_FINAL)
#include <zephyr/linker-kobject-prebuilt-priv-stacks.h>
#ifdef KOBJECT_PRIV_STACKS_ALIGN
. = ALIGN(KOBJECT_PRIV_STACKS_ALIGN);
#endif
*(".priv_stacks.noinit")
#endif /* LINKER_ZEPHYR_FINAL */
z_priv_stacks_ram_end = .;
#if defined(LINKER_ZEPHYR_FINAL)
#ifdef KOBJECT_PRIV_STACKS_ALIGN
z_priv_stacks_ram_used = z_priv_stacks_ram_end - z_priv_stacks_ram_start;
ASSERT(z_priv_stacks_ram_used <= KOBJECT_PRIV_STACKS_SZ,
"scripts/build/gen_kobject_placeholders.py did not reserve enough space \
for privileged stacks."
);
/* Padding is needed to preserve kobject addresses
* if we have reserved more space than needed.
*/
. = MAX(., z_priv_stacks_ram_start + KOBJECT_PRIV_STACKS_SZ);
#endif /* KOBJECT_PRIV_STACKS_ALIGN */
#endif /* LINKER_ZEPHYR_FINAL */
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif /* CONFIG_GEN_PRIV_STACKS */
#endif /* CONFIG_USERSPACE */
``` | /content/code_sandbox/include/zephyr/linker/kobject-priv-stacks.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 471 |
```linker script
/*
* .rel.* are for relocation.
* These are being produced by compiler/linker.
* Specify these here so they are not considered orphan sections.
*/
SECTION_PROLOGUE(.rel.plt,,)
{
*(.rel.plt)
PROVIDE_HIDDEN (__rel_iplt_start = .);
*(.rel.iplt)
PROVIDE_HIDDEN (__rel_iplt_end = .);
}
SECTION_PROLOGUE(.rela.plt,,)
{
*(.rela.plt)
PROVIDE_HIDDEN (__rela_iplt_start = .);
*(.rela.iplt)
PROVIDE_HIDDEN (__rela_iplt_end = .);
}
SECTION_PROLOGUE(.rel.dyn,,)
{
*(.rel.*)
}
SECTION_PROLOGUE(.rela.dyn,,)
{
*(.rela.*)
}
``` | /content/code_sandbox/include/zephyr/linker/rel-sections.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 198 |
```objective-c
/*
*/
#ifndef INCLUDE_ZEPHYR_LINKER_ITERABLE_SECTIONS_H_
#define INCLUDE_ZEPHYR_LINKER_ITERABLE_SECTIONS_H_
/**
* @addtogroup iterable_section_apis
* @{
*/
#define Z_LINK_ITERABLE(struct_type) \
_CONCAT(_##struct_type, _list_start) = .; \
KEEP(*(SORT_BY_NAME(._##struct_type.static.*))); \
_CONCAT(_##struct_type, _list_end) = .
#define Z_LINK_ITERABLE_NUMERIC(struct_type) \
_CONCAT(_##struct_type, _list_start) = .; \
KEEP(*(SORT(._##struct_type.static.*_?_*))); \
KEEP(*(SORT(._##struct_type.static.*_??_*))); \
_CONCAT(_##struct_type, _list_end) = .
#define Z_LINK_ITERABLE_ALIGNED(struct_type, align) \
. = ALIGN(align); \
Z_LINK_ITERABLE(struct_type);
#define Z_LINK_ITERABLE_GC_ALLOWED(struct_type) \
_CONCAT(_##struct_type, _list_start) = .; \
*(SORT_BY_NAME(._##struct_type.static.*)); \
_CONCAT(_##struct_type, _list_end) = .
#define Z_LINK_ITERABLE_SUBALIGN CONFIG_LINKER_ITERABLE_SUBALIGN
/**
* @brief Define a read-only iterable section output.
*
* @details
* Define an output section which will set up an iterable area
* of equally-sized data structures. For use with STRUCT_SECTION_ITERABLE().
* Input sections will be sorted by name, per ld's SORT_BY_NAME.
*
* This macro should be used for read-only data.
*
* Note that this keeps the symbols in the image even though
* they are not being directly referenced. Use this when symbols
* are indirectly referenced by iterating through the section.
*/
#define ITERABLE_SECTION_ROM(struct_type, subalign) \
SECTION_PROLOGUE(struct_type##_area,,SUBALIGN(subalign)) \
{ \
Z_LINK_ITERABLE(struct_type); \
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
/**
* @brief Define a read-only iterable section output, sorted numerically.
*
* This version of ITERABLE_SECTION_ROM() sorts the entries numerically, that
* is, `SECNAME_10` will come after `SECNAME_2`. `_` separator is required, and
* up to 2 numeric digits are handled (0-99).
*
* @see ITERABLE_SECTION_ROM()
*/
#define ITERABLE_SECTION_ROM_NUMERIC(struct_type, subalign) \
SECTION_PROLOGUE(struct_type##_area, EMPTY, SUBALIGN(subalign)) \
{ \
Z_LINK_ITERABLE_NUMERIC(struct_type); \
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
/**
* @brief Define a garbage collectable read-only iterable section output.
*
* @details
* Define an output section which will set up an iterable area
* of equally-sized data structures. For use with STRUCT_SECTION_ITERABLE().
* Input sections will be sorted by name, per ld's SORT_BY_NAME.
*
* This macro should be used for read-only data.
*
* Note that the symbols within the section can be garbage collected.
*/
#define ITERABLE_SECTION_ROM_GC_ALLOWED(struct_type, subalign) \
SECTION_PROLOGUE(struct_type##_area,,SUBALIGN(subalign)) \
{ \
Z_LINK_ITERABLE_GC_ALLOWED(struct_type); \
} GROUP_LINK_IN(ROMABLE_REGION)
/**
* @brief Define a read-write iterable section output.
*
* @details
* Define an output section which will set up an iterable area
* of equally-sized data structures. For use with STRUCT_SECTION_ITERABLE().
* Input sections will be sorted by name, per ld's SORT_BY_NAME.
*
* This macro should be used for read-write data that is modified at runtime.
*
* Note that this keeps the symbols in the image even though
* they are not being directly referenced. Use this when symbols
* are indirectly referenced by iterating through the section.
*/
#define ITERABLE_SECTION_RAM(struct_type, subalign) \
SECTION_DATA_PROLOGUE(struct_type##_area,,SUBALIGN(subalign)) \
{ \
Z_LINK_ITERABLE(struct_type); \
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
/**
* @brief Define a read-write iterable section output, sorted numerically.
*
* This version of ITERABLE_SECTION_RAM() sorts the entries numerically, that
* is, `SECNAME10` will come after `SECNAME2`. Up to 2 numeric digits are
* handled (0-99).
*
* @see ITERABLE_SECTION_RAM()
*/
#define ITERABLE_SECTION_RAM_NUMERIC(struct_type, subalign) \
SECTION_PROLOGUE(struct_type##_area, EMPTY, SUBALIGN(subalign)) \
{ \
Z_LINK_ITERABLE_NUMERIC(struct_type); \
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
/**
* @brief Define a garbage collectable read-write iterable section output.
*
* @details
* Define an output section which will set up an iterable area
* of equally-sized data structures. For use with STRUCT_SECTION_ITERABLE().
* Input sections will be sorted by name, per ld's SORT_BY_NAME.
*
* This macro should be used for read-write data that is modified at runtime.
*
* Note that the symbols within the section can be garbage collected.
*/
#define ITERABLE_SECTION_RAM_GC_ALLOWED(struct_type, subalign) \
SECTION_DATA_PROLOGUE(struct_type##_area,,SUBALIGN(subalign)) \
{ \
Z_LINK_ITERABLE_GC_ALLOWED(struct_type); \
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
/**
* @}
*/ /* end of struct_section_apis */
#endif /* INCLUDE_ZEPHYR_LINKER_ITERABLE_SECTIONS_H_ */
``` | /content/code_sandbox/include/zephyr/linker/iterable_sections.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,212 |
```linker script
/* Empty file */
``` | /content/code_sandbox/include/zephyr/linker/app_smem_unaligned.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```linker script
/*
*
*/
#ifdef CONFIG_USERSPACE
z_kobject_data_begin = .;
SECTION_DATA_PROLOGUE(kobject_data,,)
{
#if !defined(LINKER_ZEPHYR_PREBUILT) && \
!defined(LINKER_ZEPHYR_FINAL)
#ifdef CONFIG_DYNAMIC_OBJECTS
PROVIDE(_thread_idx_map = .);
. = . + CONFIG_MAX_THREAD_BYTES;
#endif
#endif /* !LINKER_ZEPHYR_PREBUILT && !LINKER_ZEPHYR_FINAL */
/* During LINKER_KOBJECT_PREBUILT and LINKER_ZEPHYR_PREBUILT,
* space needs to be reserved for the rodata that will be
* produced by gperf during the final stages of linking.
* The alignment and size are produced by
* scripts/build/gen_kobject_placeholders.py. These are here
* so the addresses to kobjects would remain the same
* during the final stages of linking (LINKER_ZEPHYR_FINAL).
*/
#if defined(LINKER_ZEPHYR_PREBUILT)
#include <zephyr/linker-kobject-prebuilt-data.h>
#ifdef CONFIG_DYNAMIC_OBJECTS
/* This is produced by gperf. Put a place holder here
* to avoid compilation error.
*/
PROVIDE(_thread_idx_map = .);
#endif
#ifdef KOBJECT_DATA_ALIGN
. = ALIGN(KOBJECT_DATA_ALIGN);
. = . + KOBJECT_DATA_SZ;
#endif
#endif /* LINKER_ZEPHYR_PREBUILT */
#if defined(LINKER_ZEPHYR_FINAL)
#include <zephyr/linker-kobject-prebuilt-data.h>
#ifdef KOBJECT_DATA_ALIGN
. = ALIGN(KOBJECT_DATA_ALIGN);
_kobject_data_area_start = .;
#endif
*(".kobject_data.data*")
*(".kobject_data.sdata*")
#ifdef KOBJECT_DATA_ALIGN
_kobject_data_area_end = .;
_kobject_data_area_used = _kobject_data_area_end - _kobject_data_area_start;
ASSERT(_kobject_data_area_used <= KOBJECT_DATA_SZ,
"scripts/build/gen_kobject_placeholders.py did not reserve enough space \
for kobject data."
);
/* Padding is needed to preserve kobject addresses
* if we have reserved more space than needed.
*/
. = MAX(., _kobject_data_area_start + KOBJECT_DATA_SZ);
#endif /* KOBJECT_DATA_ALIGN */
#endif /* LINKER_ZEPHYR_FINAL */
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif /* CONFIG_USERSPACE */
``` | /content/code_sandbox/include/zephyr/linker/kobject-data.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 545 |
```objective-c
/*
*
*/
/**
* @file
* @brief Toolchain-agnostic linker defs
*
* This header file is used to automatically select the proper set of macro
* definitions (based on the toolchain) for the linker script.
*/
#ifndef ZEPHYR_INCLUDE_LINKER_LINKER_TOOL_H_
#define ZEPHYR_INCLUDE_LINKER_LINKER_TOOL_H_
#if defined(_LINKER)
#if defined(__GCC_LINKER_CMD__)
#include <zephyr/linker/linker-tool-gcc.h>
#elif defined(__MWDT_LINKER_CMD__)
#include <zephyr/linker/linker-tool-mwdt.h>
#elif defined(__LLD_LINKER_CMD__)
#include <zephyr/linker/linker-tool-lld.h>
#else
#error "Unknown toolchain"
#endif
#endif
#endif /* ZEPHYR_INCLUDE_LINKER_LINKER_TOOL_H_ */
``` | /content/code_sandbox/include/zephyr/linker/linker-tool.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 178 |
```linker script
/*
*
*/
/*
* This hackish way of including files is due to CMake issues:
* path_to_url
* path_to_url
*
* When using the "Unix Makefiles" generator, CMake simply
* greps for "#include" to generate dependency list.
* So if doing it normally, both files are being included
* in the dependency list. This creates weird dependency
* issue:
*
* 1. Using A.ld to create a linker script A.cmd.
* 2. Using A.cmd to generate A_prebuilt.elf.
* 3. Using A_prebuilt.elf to create B.ld.
* 4. Creating B.cmd with B.ld.
* 5. Creating B_prebuilt.elf using B.cmd.
*
* Since the dependency list of A.cmd contains both
* A.ld and B.ld, when make is invoked again, B.ld
* is newer than A.cmd so everything from this point on
* gets rebuilt. In order to break this cycle, this
* hackish needs to be used since CMake does not parse
* macros, and thus these will not appear in
* the dependency list. The dependencies should then be
* put in CMakeLists.txt instead.
*
* Note: Ninja generator does not suffer from this issue.
*/
#ifdef LINKER_APP_SMEM_UNALIGNED
#define APP_SMEM_LD <app_smem_pinned_unaligned.ld>
#else
#define APP_SMEM_LD <app_smem_pinned_aligned.ld>
#endif
#include APP_SMEM_LD
#undef APP_SMEM_LD
``` | /content/code_sandbox/include/zephyr/linker/app_smem_pinned.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 331 |
```linker script
/*
*
*/
#if defined (CONFIG_CPP)
SECTION_DATA_PROLOGUE(.gcc_except_table,,ONLY_IF_RW)
{
*(.gcc_except_table .gcc_except_table.*)
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#if defined (CONFIG_CPP_EXCEPTIONS)
SECTION_PROLOGUE(.tm_clone_table,,)
{
KEEP (*(SORT_NONE(EXCLUDE_FILE (*crtend.o) .tm_clone_table)))
KEEP (*(SORT_NONE(.tm_clone_table)))
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif /* CONFIG_CPP_EXCEPTIONS */
#endif /* CONFIG_CPP */
``` | /content/code_sandbox/include/zephyr/linker/cplusplus-ram.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 131 |
```linker script
#if !(LINKER_ZEPHYR_FINAL && CONFIG_ISR_TABLES_LOCAL_DECLARATION)
. = ALIGN(CONFIG_ARCH_IRQ_VECTOR_TABLE_ALIGN);
KEEP(*(_IRQ_VECTOR_TABLE_SECTION_SYMS))
#endif
/*
* Some ARM platforms require this symbol to be placed after the IRQ vector
* table (like STM32F0). The symbol defined here is overriding the one in
* arch/arm/core/vector_table.ld when the IRQ vector table is enabled.
*/
_vector_end = .;
``` | /content/code_sandbox/include/zephyr/linker/irq-vector-table-section.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 99 |
```objective-c
/*
*
*/
/**
* @file
* @brief LLVM LLD linker defs
*
* This header file defines the necessary macros used by the linker script for
* use with the LLD linker.
*/
#ifndef ZEPHYR_INCLUDE_LINKER_LINKER_TOOL_LLD_H_
#define ZEPHYR_INCLUDE_LINKER_LINKER_TOOL_LLD_H_
#include <zephyr/linker/linker-tool-gcc.h>
/**
* @def SECTION_PROLOGUE
*
* The SECTION_PROLOGUE() macro is used to define the beginning of a section.
*
* When --omagic (-N) option is provided to LLD then only the first output
* section of given region has aligned LMA (by default, without --omagic, LLD
* aligns LMA and VMA of every section to the same value) and the difference
* between VMA addresses (0 is this is the first section) is added.
* The difference between LMA and VMA is constant for every section, so this
* emulates ALIGN_WITH_INPUT option present in GNU LD (required by XIP systems).
*
* The --omagic flag is defined in cmake/linker/lld/target_baremetal.cmake
*
* @param name Name of the output section
* @param options Section options, such as (NOLOAD), or left blank
* @param align Alignment directives, such as SUBALIGN(). May be blank.
*/
#undef SECTION_PROLOGUE
#define SECTION_PROLOGUE(name, options, align) \
name options : align
/**
* @def SECTION_DATA_PROLOGUE
*
* Same as for SECTION_PROLOGUE(), except that this one must be used
* for data sections which on XIP platforms will have differing
* virtual and load addresses (i.e. they'll be copied into RAM at
* program startup). Such a section must also use
* GROUP_DATA_LINK_IN to specify the correct output load address.
*
* This is equivalent to SECTION_PROLOGUE() when linking using LLD.
*
* @param name Name of the output section
* @param options Section options, or left blank
* @param align Alignment directives, such as SUBALIGN(). May be blank.
*/
#undef SECTION_DATA_PROLOGUE
#define SECTION_DATA_PROLOGUE(name, options, align) \
SECTION_PROLOGUE(name, options, align)
#endif /* ZEPHYR_INCLUDE_LINKER_LINKER_TOOL_LLD_H_ */
``` | /content/code_sandbox/include/zephyr/linker/linker-tool-lld.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 507 |
```objective-c
/*
*
*/
/**
* @file
* @brief GCC toolchain linker defs
*
* This header file defines the necessary macros used by the linker script for
* use with the GCC linker.
*/
#ifndef ZEPHYR_INCLUDE_LINKER_LINKER_TOOL_GCC_H_
#define ZEPHYR_INCLUDE_LINKER_LINKER_TOOL_GCC_H_
#include <zephyr/kernel/mm.h>
#if defined(CONFIG_ARM)
#if defined(CONFIG_BIG_ENDIAN)
#define OUTPUT_FORMAT_ "elf32-bigarm"
#else
#define OUTPUT_FORMAT_ "elf32-littlearm"
#endif
OUTPUT_FORMAT(OUTPUT_FORMAT_)
#elif defined(CONFIG_ARM64)
OUTPUT_FORMAT("elf64-littleaarch64")
#elif defined(CONFIG_ARC)
#if defined(CONFIG_ISA_ARCV3) && defined(CONFIG_64BIT)
OUTPUT_FORMAT("elf64-littlearc64")
#elif defined(CONFIG_ISA_ARCV3) && !defined(CONFIG_64BIT)
OUTPUT_FORMAT("elf32-littlearc64")
#else
OUTPUT_FORMAT("elf32-littlearc", "elf32-bigarc", "elf32-littlearc")
#endif
#elif defined(CONFIG_X86)
#if defined(CONFIG_X86_64)
OUTPUT_FORMAT("elf64-x86-64")
OUTPUT_ARCH("i386:x86-64")
#else
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH("i386")
#endif
#elif defined(CONFIG_NIOS2)
OUTPUT_FORMAT("elf32-littlenios2", "elf32-bignios2", "elf32-littlenios2")
#elif defined(CONFIG_RISCV)
OUTPUT_ARCH("riscv")
#ifdef CONFIG_64BIT
OUTPUT_FORMAT("elf64-littleriscv")
#else
OUTPUT_FORMAT("elf32-littleriscv")
#endif
#elif defined(CONFIG_XTENSA)
/* Not needed */
#elif defined(CONFIG_MIPS)
OUTPUT_ARCH("mips")
#elif defined(CONFIG_ARCH_POSIX)
/* Not needed */
#elif defined(CONFIG_SPARC)
OUTPUT_FORMAT("elf32-sparc")
#else
#error Arch not supported.
#endif
/*
* The GROUP_START() and GROUP_END() macros are used to define a group
* of sections located in one memory area, such as RAM, ROM, etc.
* The <where> parameter is the name of the memory area.
*/
#define GROUP_START(where)
#define GROUP_END(where)
/**
* @def GROUP_LINK_IN
*
* Route memory to a specified memory area
*
* The GROUP_LINK_IN() macro is located at the end of the section
* description and tells the linker that this section is located in
* the memory area specified by 'where' argument.
*
* This macro is intentionally undefined for CONFIG_MMU systems when
* CONFIG_KERNEL_VM_BASE is not the same as CONFIG_SRAM_BASE_ADDRESS,
* as both the LMA and VMA destinations must be known for all sections
* as this corresponds to physical vs. virtual location.
*
* @param where Destination memory area
*/
#if defined(CONFIG_ARCH_POSIX)
#define GROUP_LINK_IN(where)
#elif !defined(K_MEM_IS_VM_KERNEL)
#define GROUP_LINK_IN(where) > where
#endif
/**
* @def GROUP_ROM_LINK_IN
*
* Route memory for a read-only section
*
* The GROUP_ROM_LINK_IN() macro is located at the end of the section
* description and tells the linker that this a read-only section
* that is physically placed at the 'lregion` argument.
*
* If CONFIG_XIP is active, the 'lregion' area is flash memory.
*
* If CONFIG_MMU is active, the vregion argument will be used to
* determine where this is located in the virtual memory map, otherwise
* it is ignored.
*
* @param vregion Output VMA (only used if CONFIG_MMU where LMA != VMA)
* @param lregion Output LMA
*/
#if defined(CONFIG_ARCH_POSIX)
#define GROUP_ROM_LINK_IN(vregion, lregion)
#elif defined(K_MEM_IS_VM_KERNEL)
#define GROUP_ROM_LINK_IN(vregion, lregion) > vregion AT > lregion
#else
#define GROUP_ROM_LINK_IN(vregion, lregion) > lregion
#endif
/**
* @def GROUP_DATA_LINK_IN
*
* Route memory for read-write sections that are loaded.
*
* Used for initialized data sections that on XIP platforms must be copied at
* startup.
*
* @param vregion Output VMA
* @param lregion Output LMA (only used if CONFIG_MMU if VMA != LMA,
* or CONFIG_XIP)
*/
#if defined(CONFIG_ARCH_POSIX)
#define GROUP_DATA_LINK_IN(vregion, lregion)
#elif defined(CONFIG_XIP) || defined(K_MEM_IS_VM_KERNEL)
#define GROUP_DATA_LINK_IN(vregion, lregion) > vregion AT > lregion
#else
#define GROUP_DATA_LINK_IN(vregion, lregion) > vregion
#endif
/**
* @def GROUP_NOLOAD_LINK_IN
*
* Route memory for read-write sections that are NOT loaded; typically this
* is only used for 'BSS' and 'noinit'.
*
* @param vregion Output VMA
* @param lregion Output LMA (only used if CONFIG_MMU if VMA != LMA,
* corresponds to physical location)
*/
#if defined(CONFIG_ARCH_POSIX)
#define GROUP_NOLOAD_LINK_IN(vregion, lregion)
#elif defined(K_MEM_IS_VM_KERNEL)
#define GROUP_NOLOAD_LINK_IN(vregion, lregion) > vregion AT > lregion
#elif defined(CONFIG_XIP)
#define GROUP_NOLOAD_LINK_IN(vregion, lregion) > vregion AT > vregion
#else
#define GROUP_NOLOAD_LINK_IN(vregion, lregion) > vregion
#endif
/**
* @def SECTION_PROLOGUE
*
* The SECTION_PROLOGUE() macro is used to define the beginning of a section.
*
* On MMU systems where VMA != LMA there is an implicit ALIGN_WITH_INPUT
* specified.
*
* @param name Name of the output section
* @param options Section options, such as (NOLOAD), or left blank
* @param align Alignment directives, such as SUBALIGN(). ALIGN() itself is
* not allowed. May be blank.
*/
#ifdef K_MEM_IS_VM_KERNEL
/* If we have a virtual memory map we need ALIGN_WITH_INPUT in all sections */
#define SECTION_PROLOGUE(name, options, align) \
name options : ALIGN_WITH_INPUT align
#else
#define SECTION_PROLOGUE(name, options, align) \
name options : align
#endif
/**
* @def SECTION_DATA_PROLOGUE
*
* Same as for SECTION_PROLOGUE(), except that this one must be used
* for data sections which on XIP platforms will have differing
* virtual and load addresses (i.e. they'll be copied into RAM at
* program startup). Such a section must also use
* GROUP_DATA_LINK_IN to specify the correct output load address.
*
* This is equivalent to SECTION_PROLOGUE() on non-XIP systems.
* On XIP systems there is an implicit ALIGN_WITH_INPUT specified.
*
* @param name Name of the output section
* @param options Section options, or left blank
* @param align Alignment directives, such as SUBALIGN(). ALIGN() itself is
* not allowed. May be blank.
*/
#if defined(CONFIG_XIP)
#define SECTION_DATA_PROLOGUE(name, options, align) \
name options : ALIGN_WITH_INPUT
#else
#define SECTION_DATA_PROLOGUE(name, options, align) \
SECTION_PROLOGUE(name, options, align)
#endif
#define COMMON_SYMBOLS *(COMMON)
#endif /* ZEPHYR_INCLUDE_LINKER_LINKER_TOOL_GCC_H_ */
``` | /content/code_sandbox/include/zephyr/linker/linker-tool-gcc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,647 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_LINKER_UTILS_H_
#define ZEPHYR_INCLUDE_LINKER_UTILS_H_
#include <stdbool.h>
/**
* @brief Check if address is in read only section.
*
* Note that this may return false if the address lies outside
* the compiler's default read only sections (e.g. .rodata
* section), depending on the linker script used. This also
* applies to constants with explicit section attributes.
*
* @param addr Address.
*
* @return True if address identified within read only section.
*/
static inline bool linker_is_in_rodata(const void *addr)
{
#if defined(CONFIG_LINKER_USE_PINNED_SECTION)
extern const char lnkr_pinned_rodata_start[];
extern const char lnkr_pinned_rodata_end[];
if (((const char *)addr >= (const char *)lnkr_pinned_rodata_start) &&
((const char *)addr < (const char *)lnkr_pinned_rodata_end)) {
return true;
}
#endif
#if defined(CONFIG_ARM) || defined(CONFIG_ARC) || defined(CONFIG_X86) || \
defined(CONFIG_ARM64) || defined(CONFIG_NIOS2) || \
defined(CONFIG_RISCV) || defined(CONFIG_SPARC) || \
defined(CONFIG_MIPS) || defined(CONFIG_XTENSA)
extern char __rodata_region_start[];
extern char __rodata_region_end[];
#define RO_START __rodata_region_start
#define RO_END __rodata_region_end
#else
#define RO_START 0
#define RO_END 0
#endif
return (((const char *)addr >= (const char *)RO_START) &&
((const char *)addr < (const char *)RO_END));
#undef RO_START
#undef RO_END
}
#endif /* ZEPHYR_INCLUDE_LINKER_UTILS_H_ */
``` | /content/code_sandbox/include/zephyr/linker/utils.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 384 |
```linker script
/*
*
*/
SECTION_PROLOGUE(ztest,,)
{
Z_LINK_ITERABLE(ztest_expected_result_entry);
Z_LINK_ITERABLE(ztest_suite_node);
Z_LINK_ITERABLE(ztest_unit_test);
Z_LINK_ITERABLE(ztest_test_rule);
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
``` | /content/code_sandbox/include/zephyr/linker/common-rom/common-rom-ztest.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 64 |
```linker script
/*
*
*/
#include <zephyr/linker/iterable_sections.h>
ITERABLE_SECTION_ROM(intc_table, 4)
``` | /content/code_sandbox/include/zephyr/linker/common-rom/common-rom-interrupt-controllers.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 29 |
```linker script
#include <zephyr/linker/iterable_sections.h>
ITERABLE_SECTION_ROM(bt_l2cap_fixed_chan, Z_LINK_ITERABLE_SUBALIGN)
#if defined(CONFIG_BT_CLASSIC)
ITERABLE_SECTION_ROM(bt_l2cap_br_fixed_chan, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_BT_CONN)
ITERABLE_SECTION_ROM(bt_conn_cb, Z_LINK_ITERABLE_SUBALIGN)
#endif
ITERABLE_SECTION_ROM(bt_gatt_service_static, Z_LINK_ITERABLE_SUBALIGN)
#if defined(CONFIG_BT_MESH)
ITERABLE_SECTION_ROM(bt_mesh_subnet_cb, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_ROM(bt_mesh_app_key_cb, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_ROM(bt_mesh_hb_cb, Z_LINK_ITERABLE_SUBALIGN)
#if defined(CONFIG_BT_TESTING)
ITERABLE_SECTION_ROM(bt_mesh_beacon_cb, Z_LINK_ITERABLE_SUBALIGN)
#endif
#endif
#if defined(CONFIG_BT_MESH_FRIEND)
ITERABLE_SECTION_ROM(bt_mesh_friend_cb, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_BT_MESH_LOW_POWER)
ITERABLE_SECTION_ROM(bt_mesh_lpn_cb, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_BT_IAS)
ITERABLE_SECTION_ROM(bt_ias_cb, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_BT_MESH_GATT_PROXY)
ITERABLE_SECTION_ROM(bt_mesh_proxy_cb, Z_LINK_ITERABLE_SUBALIGN)
#endif
``` | /content/code_sandbox/include/zephyr/linker/common-rom/common-rom-bt.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 302 |
```linker script
#include <zephyr/linker/iterable_sections.h>
ITERABLE_SECTION_ROM(tracing_backend, Z_LINK_ITERABLE_SUBALIGN)
SECTION_DATA_PROLOGUE(zephyr_dbg_info,,)
{
KEEP(*(".dbg_thread_info"));
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#ifdef CONFIG_SYMTAB
SECTION_PROLOGUE(symtab,,)
{
KEEP(*(_SYMTAB_SECTION_SYMS))
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif /* CONFIG_SYMTAB */
``` | /content/code_sandbox/include/zephyr/linker/common-rom/common-rom-debug.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 121 |
```linker script
#include <zephyr/linker/iterable_sections.h>
#if defined(CONFIG_NET_SOCKETS)
ITERABLE_SECTION_ROM(net_socket_register, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_NET_L2_PPP)
ITERABLE_SECTION_ROM(ppp_protocol_handler, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_DNS_SD)
ITERABLE_SECTION_ROM(dns_sd_rec, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_HTTP_SERVER)
ITERABLE_SECTION_ROM(http_service_desc, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_COAP_SERVER)
ITERABLE_SECTION_ROM(coap_service, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_NET_MGMT_EVENT)
ITERABLE_SECTION_ROM(net_mgmt_event_static_handler, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_NET_SOCKETS_SERVICE)
ITERABLE_SECTION_ROM(net_socket_service_desc, Z_LINK_ITERABLE_SUBALIGN)
#endif
``` | /content/code_sandbox/include/zephyr/linker/common-rom/common-rom-net.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 198 |
```linker script
#include <zephyr/linker/iterable_sections.h>
#if defined(CONFIG_LOG_FMT_SECTION_STRIP) && defined(DEVNULL_REGION)
SECTION_PROLOGUE(log_strings,(COPY),SUBALIGN(Z_LINK_ITERABLE_SUBALIGN))
{
Z_LINK_ITERABLE(log_strings);
} GROUP_ROM_LINK_IN(DEVNULL_REGION, DEVNULL_REGION)
#else
ITERABLE_SECTION_ROM(log_strings, Z_LINK_ITERABLE_SUBALIGN)
#endif
ITERABLE_SECTION_ROM(log_const, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_ROM(log_backend, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_ROM(log_link, Z_LINK_ITERABLE_SUBALIGN)
``` | /content/code_sandbox/include/zephyr/linker/common-rom/common-rom-logging.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 138 |
```linker script
#include <zephyr/linker/iterable_sections.h>
SECTION_PROLOGUE(initlevel,,)
{
/*
* link in initialization objects for all objects that are
* automatically initialized by the kernel; the objects are
* sorted in the order they will be initialized (i.e. ordered
* by level, sorted by priority within a level)
*/
__init_start = .;
CREATE_OBJ_LEVEL(init, EARLY)
CREATE_OBJ_LEVEL(init, PRE_KERNEL_1)
CREATE_OBJ_LEVEL(init, PRE_KERNEL_2)
CREATE_OBJ_LEVEL(init, POST_KERNEL)
CREATE_OBJ_LEVEL(init, APPLICATION)
CREATE_OBJ_LEVEL(init, SMP)
__init_end = .;
__deferred_init_list_start = .;
KEEP(*(.z_deferred_init*))
__deferred_init_list_end = .;
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
ITERABLE_SECTION_ROM_NUMERIC(device, Z_LINK_ITERABLE_SUBALIGN)
#if defined(CONFIG_GEN_SW_ISR_TABLE) && defined(CONFIG_SHARED_INTERRUPTS)
/* since z_shared_isr() is not referenced anywhere when
* zephyr_pre0.elf is built, the linker will end up dropping it.
* Later on, during the second linking stage (when zephyr.elf is
* built), the symbol will be added to the text section since it's
* now being referenced (thanks to isr_tables.c). This is very
* problematic because adding the z_shared_isr symbol between
* the linking stages will end up shifting the addresses of the
* functions, which, in turn, will end up messing the ISR table
* (as the entries from _sw_isr_table will end up pointing to
* old addresses of the registered ISRs). To prevent this from
* happening, instruct the linker to avoid dropping z_shared_isr
* if it's not being referenced anywhere.
*/
SECTION_PROLOGUE(.text.z_shared_isr,,)
{
KEEP(*(.text.z_shared_isr*))
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif
#if defined(CONFIG_GEN_SW_ISR_TABLE) && !defined(CONFIG_DYNAMIC_INTERRUPTS)
SECTION_PROLOGUE(sw_isr_table,,)
{
/*
* Some arch requires an entry to be aligned to arch
* specific boundary for using double word load
* instruction. See include/sw_isr_table.h.
*/
. = ALIGN(CONFIG_ARCH_SW_ISR_TABLE_ALIGN);
*(_SW_ISR_TABLE_SECTION_SYMS)
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#if defined(CONFIG_SHARED_INTERRUPTS)
SECTION_PROLOGUE(shared_sw_isr_table,,)
{
/* TODO: does this section require alignment? */
KEEP(*(_SHARED_SW_ISR_TABLE_SECTION_SYMS))
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif
#endif
/* verify we don't have rogue .z_init_<something> initlevel sections */
SECTION_PROLOGUE(initlevel_error,,)
{
KEEP(*(SORT(.z_init_[_A-Z0-9]*)))
}
ASSERT(SIZEOF(initlevel_error) == 0, "Undefined initialization levels used.")
#ifdef CONFIG_USERSPACE
/* Build-time assignment of permissions to kernel objects to
* threads declared with K_THREAD_DEFINE()
*/
ITERABLE_SECTION_ROM(k_object_assignment, Z_LINK_ITERABLE_SUBALIGN)
#endif
SECTION_DATA_PROLOGUE(app_shmem_regions,,)
{
__app_shmem_regions_start = .;
KEEP(*(SORT(.app_regions.*)));
__app_shmem_regions_end = .;
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
ITERABLE_SECTION_ROM(k_p4wq_initparam, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_ROM(_static_thread_data, Z_LINK_ITERABLE_SUBALIGN)
#if defined(CONFIG_PCIE)
ITERABLE_SECTION_ROM(irq_alloc, Z_LINK_ITERABLE_SUBALIGN)
#endif /* CONFIG_PCIE */
#if !defined(CONFIG_DEVICE_DEPS_DYNAMIC)
SECTION_DATA_PROLOGUE(device_deps,,)
{
#include <zephyr/linker/device-deps.ld>
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif /* !CONFIG_DEVICE_DEPS_DYNAMIC */
``` | /content/code_sandbox/include/zephyr/linker/common-rom/common-rom-kernel-devices.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 937 |
```linker script
#ifdef CONFIG_STATIC_INIT_GNU
SECTION_PROLOGUE(_CTOR_SECTION_NAME,,)
{
/*
* The compiler fills the constructor pointers table below,
* hence symbol __CTOR_LIST__ must be aligned on word
* boundary. To align with the C++ standard, the first element
* of the array contains the number of actual constructors. The
* last element is NULL.
*
* The __CTOR_LIST__ and __CTOR_END__ symbols are always defined
* to result in an empty list. This is necessary to fix an issue
* where the glibc process initialization code on native_posix
* platforms calls constructors before Zephyr loads (issue #39347).
*
* Zephyr's start-up code uses the __ZEPHYR_CTOR_LIST__ and
* __ZEHPYR_CTOR_END__ symbols, so these need to be correctly set.
*/
#ifdef CONFIG_64BIT
. = ALIGN(8);
__ZEPHYR_CTOR_LIST__ = .;
QUAD((__ZEPHYR_CTOR_END__ - __ZEPHYR_CTOR_LIST__) / 8 - 2)
KEEP(*(SORT_BY_NAME(".ctors*")))
__CTOR_LIST__ = .;
QUAD(0)
__ZEPHYR_CTOR_END__ = .;
QUAD(0)
__CTOR_END__ = .;
#else
. = ALIGN(4);
__ZEPHYR_CTOR_LIST__ = .;
LONG((__ZEPHYR_CTOR_END__ - __ZEPHYR_CTOR_LIST__) / 4 - 2)
KEEP(*(SORT_BY_NAME(".ctors*")))
__CTOR_LIST__ = .;
LONG(0)
__ZEPHYR_CTOR_END__ = .;
LONG(0)
__CTOR_END__ = .;
#endif
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_PROLOGUE(init_array,,)
{
/*
* Similar to the schenanigans required for the __CTOR_LIST__ and
* __CTOR_END__ symbols we define __init_array_start and __init_array_end
* to the same address to define an empty list. This prevents the glibc
* startup code from calling any global constructors before Zephyr loads.
*
* Zephyr's start-up code uses the __zephyr_init_array_start and
* __zephyr_init_array_end sybmols, so these need to be set correctly.
*/
. = ALIGN(4);
__init_array_start = .;
__init_array_end = .;
__zephyr_init_array_start = .;
KEEP(*(SORT_BY_NAME(".init_array*")))
__zephyr_init_array_end = .;
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#elif defined(CONFIG_TOOLCHAIN_SUPPORTS_STATIC_INIT_GNU) && !defined(CONFIG_NATIVE_APPLICATION)
/*
* If the code to invoke constructors is not enabled,
* make sure there aren't any in the application
*/
SECTION_PROLOGUE(init_array,,)
{
KEEP(*(SORT_BY_NAME(".ctors*")))
KEEP(*(SORT_BY_NAME(".init_array*")))
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
ASSERT (SIZEOF(init_array) == 0,
"GNU-style constructors required but STATIC_INIT_GNU not enabled")
#endif
``` | /content/code_sandbox/include/zephyr/linker/common-rom/common-rom-init.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 778 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_TRACING_TRACING_FORMAT_H
#define ZEPHYR_INCLUDE_TRACING_TRACING_FORMAT_H
#include <zephyr/toolchain/common.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Tracing format APIs
* @defgroup subsys_tracing_format_apis Tracing format APIs
* @ingroup subsys_tracing
* @{
*/
/** @brief A structure to represent tracing data format. */
typedef struct tracing_data {
uint8_t *data;
uint32_t length;
} __packed tracing_data_t;
/**
* @brief Macro to trace a message in string format.
*
* @param fmt The format string.
* @param ... The format arguments.
*/
#define TRACING_STRING(fmt, ...) \
do { \
tracing_format_string(fmt, ##__VA_ARGS__); \
} while (false)
/**
* @brief Macro to format data to tracing data format.
*
* @param x Data field.
*/
#define TRACING_FORMAT_DATA(x) \
((struct tracing_data){.data = (uint8_t *)&(x), .length = sizeof((x))})
/**
* @brief Macro to trace a message in tracing data format.
*
* All the parameters should be struct tracing_data.
*/
#define TRACING_DATA(...) \
do { \
struct tracing_data arg[] = {__VA_ARGS__}; \
\
tracing_format_data(arg, sizeof(arg) / \
sizeof(struct tracing_data)); \
} while (false)
/**
* @brief Tracing a message in string format.
*
* @param str String to format.
* @param ... Variable length arguments.
*/
void tracing_format_string(const char *str, ...);
/**
* @brief Tracing a message in raw data format.
*
* @param data Raw data to be traced.
* @param length Raw data length.
*/
void tracing_format_raw_data(uint8_t *data, uint32_t length);
/**
* @brief Tracing a message in tracing data format.
*
* @param tracing_data_array Tracing_data format data array to be traced.
* @param count Tracing_data array data count.
*/
void tracing_format_data(tracing_data_t *tracing_data_array, uint32_t count);
/** @} */ /* end of subsys_tracing_format_apis */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_TRACING_TRACING_FORMAT_H */
``` | /content/code_sandbox/include/zephyr/tracing/tracing_format.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 514 |
```linker script
#include <zephyr/linker/iterable_sections.h>
#if defined(CONFIG_EC_HOST_CMD)
ITERABLE_SECTION_ROM(ec_host_cmd_handler, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_SETTINGS)
ITERABLE_SECTION_ROM(settings_handler_static, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_SENSING)
ITERABLE_SECTION_ROM(sensing_sensor_info, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_SENSOR_INFO)
ITERABLE_SECTION_ROM(sensor_info, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_SENSOR_ASYNC_API)
ITERABLE_SECTION_ROM(sensor_decoder_api, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_MCUMGR)
ITERABLE_SECTION_ROM(mcumgr_handler, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_INPUT)
ITERABLE_SECTION_ROM(input_callback, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_EMUL)
ITERABLE_SECTION_ROM(emul, Z_LINK_ITERABLE_SUBALIGN)
#endif /* CONFIG_EMUL */
#if defined(CONFIG_ZBUS)
ITERABLE_SECTION_ROM(zbus_channel, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_ROM(zbus_observer, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_ROM(zbus_channel_observation, Z_LINK_ITERABLE_SUBALIGN)
#endif /* CONFIG_ZBUS */
#ifdef CONFIG_LLEXT
ITERABLE_SECTION_ROM(llext_const_symbol, Z_LINK_ITERABLE_SUBALIGN)
#endif /* CONFIG_LLEXT */
SECTION_DATA_PROLOGUE(symbol_to_keep,,)
{
__symbol_to_keep_start = .;
KEEP(*(SORT(.symbol_to_keep*)));
__symbol_to_keep_end = .;
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
ITERABLE_SECTION_ROM(shell, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_ROM(shell_root_cmds, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_ROM(shell_subcmds, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_ROM(shell_dynamic_subcmds, Z_LINK_ITERABLE_SUBALIGN)
ITERABLE_SECTION_ROM(cfb_font, Z_LINK_ITERABLE_SUBALIGN)
#if defined(CONFIG_GNSS)
ITERABLE_SECTION_ROM(gnss_data_callback, Z_LINK_ITERABLE_SUBALIGN)
#endif
#if defined(CONFIG_GNSS_SATELLITES)
ITERABLE_SECTION_ROM(gnss_satellites_callback, Z_LINK_ITERABLE_SUBALIGN)
#endif
``` | /content/code_sandbox/include/zephyr/linker/common-rom/common-rom-misc.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 500 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_TRACING_SYSCALL_H_
#define ZEPHYR_INCLUDE_TRACING_SYSCALL_H_
#if defined CONFIG_SEGGER_SYSTEMVIEW
#include "tracing_sysview_syscall.h"
#elif defined CONFIG_TRACING_TEST
#include "tracing_test_syscall.h"
#else
/**
* @brief Syscall Tracing APIs
* @defgroup subsys_tracing_apis_syscall Syscall Tracing APIs
* @ingroup subsys_tracing_apis
* @{
*/
/**
* @brief Trace syscall entry
* @param id Syscall ID (as defined in the generated syscall_list.h)
* @param name Syscall name as a token (ex: k_thread_create)
* @param ... Other parameters passed to the syscall
*/
#define sys_port_trace_syscall_enter(id, name, ...)
/**
* @brief Trace syscall exit
* @param id Syscall ID (as defined in the generated syscall_list.h)
* @param name Syscall name as a token (ex: k_thread_create)
* @param ... Other parameters passed to the syscall, if the syscall has a
* return, the return value is the last parameter in the list
*/
#define sys_port_trace_syscall_exit(id, name, ...)
/** @} */ /* end of subsys_tracing_syscall_apis */
#endif
#endif /* ZEPHYR_INCLUDE_TRACING_SYSCALL_H_ */
``` | /content/code_sandbox/include/zephyr/tracing/tracing_syscall.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 290 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_TRACING_TRACING_MACROS_H_
#define ZEPHYR_INCLUDE_TRACING_TRACING_MACROS_H_
#include <zephyr/sys/util_macro.h>
#if !defined(CONFIG_TRACING) && !defined(__DOXYGEN__)
#define SYS_PORT_TRACING_FUNC(type, func, ...) do { } while (false)
#define SYS_PORT_TRACING_FUNC_ENTER(type, func, ...) do { } while (false)
#define SYS_PORT_TRACING_FUNC_BLOCKING(type, func, ...) do { } while (false)
#define SYS_PORT_TRACING_FUNC_EXIT(type, func, ...) do { } while (false)
#define SYS_PORT_TRACING_OBJ_INIT(obj_type, obj, ...) do { } while (false)
#define SYS_PORT_TRACING_OBJ_FUNC(obj_type, func, obj, ...) do { } while (false)
#define SYS_PORT_TRACING_OBJ_FUNC_ENTER(obj_type, func, obj, ...) do { } while (false)
#define SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(obj_type, func, obj, ...) do { } while (false)
#define SYS_PORT_TRACING_OBJ_FUNC_EXIT(obj_type, func, obj, ...) do { } while (false)
#define SYS_PORT_TRACING_TRACKING_FIELD(type)
#else
/**
* @brief Tracing utility macros
* @defgroup subsys_tracing_macros Tracing utility macros
* @ingroup subsys_tracing
* @{
*/
/** @cond INTERNAL_HIDDEN */
/*
* Helper macros used by the extended tracing system
*/
#define _SYS_PORT_TRACING_TYPE_MASK(type) \
sys_port_trace_type_mask_ ## type
#define _SYS_PORT_TRACING_FUNC(name, func) \
sys_port_trace_ ## name ## _ ## func
#define _SYS_PORT_TRACING_FUNC_ENTER(name, func) \
sys_port_trace_ ## name ## _ ## func ## _enter
#define _SYS_PORT_TRACING_FUNC_BLOCKING(name, func) \
sys_port_trace_ ## name ## _ ## func ## _blocking
#define _SYS_PORT_TRACING_FUNC_EXIT(name, func) \
sys_port_trace_ ## name ## _ ## func ## _exit
#define _SYS_PORT_TRACING_OBJ_INIT(name) \
sys_port_trace_ ## name ## _init
#define _SYS_PORT_TRACING_OBJ_FUNC(name, func) \
sys_port_trace_ ## name ## _ ## func
#define _SYS_PORT_TRACING_OBJ_FUNC_ENTER(name, func) \
sys_port_trace_ ## name ## _ ## func ## _enter
#define _SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(name, func) \
sys_port_trace_ ## name ## _ ## func ## _blocking
#define _SYS_PORT_TRACING_OBJ_FUNC_EXIT(name, func) \
sys_port_trace_ ## name ## _ ## func ## _exit
/*
* Helper macros for the object tracking system
*/
#define _SYS_PORT_TRACKING_OBJ_INIT(name) \
sys_port_track_ ## name ## _init
#define _SYS_PORT_TRACKING_OBJ_FUNC(name, func) \
sys_port_track_ ## name ## _ ## func
/*
* Object trace macros part of the system for checking if certain
* objects should be traced or not depending on the tracing configuration.
*/
#if defined(CONFIG_TRACING_THREAD)
#define sys_port_trace_type_mask_k_thread(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_thread(trace_call)
#define sys_port_trace_k_thread_is_disabled 1
#endif
#if defined(CONFIG_TRACING_WORK)
#define sys_port_trace_type_mask_k_work(trace_call) trace_call
#define sys_port_trace_type_mask_k_work_queue(trace_call) trace_call
#define sys_port_trace_type_mask_k_work_delayable(trace_call) trace_call
#define sys_port_trace_type_mask_k_work_poll(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_work(trace_call)
#define sys_port_trace_type_mask_k_work_queue(trace_call)
#define sys_port_trace_type_mask_k_work_delayable(trace_call)
#define sys_port_trace_type_mask_k_work_poll(trace_call)
#endif
#if defined(CONFIG_TRACING_SEMAPHORE)
#define sys_port_trace_type_mask_k_sem(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_sem(trace_call)
#endif
#if defined(CONFIG_TRACING_MUTEX)
#define sys_port_trace_type_mask_k_mutex(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_mutex(trace_call)
#endif
#if defined(CONFIG_TRACING_CONDVAR)
#define sys_port_trace_type_mask_k_condvar(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_condvar(trace_call)
#endif
#if defined(CONFIG_TRACING_QUEUE)
#define sys_port_trace_type_mask_k_queue(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_queue(trace_call)
#endif
#if defined(CONFIG_TRACING_FIFO)
#define sys_port_trace_type_mask_k_fifo(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_fifo(trace_call)
#endif
#if defined(CONFIG_TRACING_LIFO)
#define sys_port_trace_type_mask_k_lifo(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_lifo(trace_call)
#endif
#if defined(CONFIG_TRACING_STACK)
#define sys_port_trace_type_mask_k_stack(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_stack(trace_call)
#endif
#if defined(CONFIG_TRACING_MESSAGE_QUEUE)
#define sys_port_trace_type_mask_k_msgq(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_msgq(trace_call)
#endif
#if defined(CONFIG_TRACING_MAILBOX)
#define sys_port_trace_type_mask_k_mbox(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_mbox(trace_call)
#endif
#if defined(CONFIG_TRACING_PIPE)
#define sys_port_trace_type_mask_k_pipe(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_pipe(trace_call)
#endif
#if defined(CONFIG_TRACING_HEAP)
#define sys_port_trace_type_mask_k_heap(trace_call) trace_call
#define sys_port_trace_type_mask_k_heap_sys(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_heap(trace_call)
#define sys_port_trace_type_mask_k_heap_sys(trace_call)
#endif
#if defined(CONFIG_TRACING_MEMORY_SLAB)
#define sys_port_trace_type_mask_k_mem_slab(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_mem_slab(trace_call)
#endif
#if defined(CONFIG_TRACING_TIMER)
#define sys_port_trace_type_mask_k_timer(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_timer(trace_call)
#endif
#if defined(CONFIG_TRACING_EVENT)
#define sys_port_trace_type_mask_k_event(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_event(trace_call)
#endif
#ifndef CONFIG_TRACING_POLLING
#define sys_port_trace_k_poll_api_is_disabled 1
#define sys_port_trace_k_work_poll_is_disabled 1
#endif
#ifndef CONFIG_TRACING_PM
#define sys_port_trace_pm_is_disabled 1
#endif
#if defined(CONFIG_TRACING_NET_SOCKETS)
#define sys_port_trace_type_mask_socket(trace_call) trace_call
#else
#define sys_port_trace_type_mask_socket(trace_call)
#endif
#if defined(CONFIG_TRACING_NET_CORE)
#define sys_port_trace_type_mask_net(trace_call) trace_call
#else
#define sys_port_trace_type_mask_net(trace_call)
#endif
/*
* We cannot positively enumerate all traced APIs, as applications may trace
* arbitrary custom APIs we know nothing about. Therefore we demand that tracing
* of an API must be actively disabled.
*
* This contrasts with object tracing/tracking as all traceable objects are well
* known, see the SYS_PORT_TRACING_TYPE_MASK approach below.
*/
#define _SYS_PORT_TRACE_IS_DISABLED(type) sys_port_trace_##type##_is_disabled
#define _SYS_PORT_TRACE_WRAP(func, ...) do { func(__VA_ARGS__); } while (false)
#define _SYS_PORT_TRACE_IF_NOT_DISABLED(type, func, ...) \
COND_CODE_1(_SYS_PORT_TRACE_IS_DISABLED(type), (), \
(_SYS_PORT_TRACE_WRAP(func, __VA_ARGS__)))
/** @endcond */
/**
* @brief Checks if an object type should be traced or not.
*
* @param type Tracing event type/object
* @param trace_call Tracing call
*/
#define SYS_PORT_TRACING_TYPE_MASK(type, trace_call) \
_SYS_PORT_TRACING_TYPE_MASK(type)(trace_call)
/**
* @brief Tracing macro for function calls which are not directly
* associated with a specific type of object.
*
* @param type Type of tracing event or object type
* @param func Name of the function responsible for the call. This does not need to exactly
* match the name of the function but should rather match what the user called in case of
* system calls etc. That is, we can often omit the z_vrfy/z_impl part of the name.
* @param ... Additional parameters relevant to the tracing call
*/
#define SYS_PORT_TRACING_FUNC(type, func, ...) \
_SYS_PORT_TRACE_IF_NOT_DISABLED(type, _SYS_PORT_TRACING_FUNC(type, func), __VA_ARGS__)
/**
* @brief Tracing macro for the entry into a function that might or might not return
* a value.
*
* @param type Type of tracing event or object type
* @param func Name of the function responsible for the call. This does not need to exactly
* match the name of the function but should rather match what the user called in case of
* system calls etc. That is, we can often omit the z_vrfy/z_impl part of the name.
* @param ... Additional parameters relevant to the tracing call
*/
#define SYS_PORT_TRACING_FUNC_ENTER(type, func, ...) \
_SYS_PORT_TRACE_IF_NOT_DISABLED(type, _SYS_PORT_TRACING_FUNC_ENTER(type, func), __VA_ARGS__)
/**
* @brief Tracing macro for when a function blocks during its execution.
*
* @param type Type of tracing event or object type
* @param func Name of the function responsible for the call. This does not need to exactly
* match the name of the function but should rather match what the user called in case of
* system calls etc. That is, we can often omit the z_vrfy/z_impl part of the name.
* @param ... Additional parameters relevant to the tracing call
*/
#define SYS_PORT_TRACING_FUNC_BLOCKING(type, func, ...) \
_SYS_PORT_TRACE_IF_NOT_DISABLED(type, _SYS_PORT_TRACING_FUNC_BLOCKING(type, func), \
__VA_ARGS__)
/**
* @brief Tracing macro for when a function ends its execution. Potential return values
* can be given as additional arguments.
*
* @param type Type of tracing event or object type
* @param func Name of the function responsible for the call. This does not need to exactly
* match the name of the function but should rather match what the user called in case of
* system calls etc. That is, we can often omit the z_vrfy/z_impl part of the name.
* @param ... Additional parameters relevant to the tracing call
*/
#define SYS_PORT_TRACING_FUNC_EXIT(type, func, ...) \
_SYS_PORT_TRACE_IF_NOT_DISABLED(type, _SYS_PORT_TRACING_FUNC_EXIT(type, func), __VA_ARGS__)
/**
* @brief Tracing macro for the initialization of an object.
*
* @param obj_type The type of object associated with the call (k_thread, k_sem, k_mutex etc.)
* @param obj Object
*/
#define SYS_PORT_TRACING_OBJ_INIT(obj_type, obj, ...) \
do { \
SYS_PORT_TRACING_TYPE_MASK(obj_type, \
_SYS_PORT_TRACING_OBJ_INIT(obj_type)(obj, ##__VA_ARGS__)); \
SYS_PORT_TRACING_TYPE_MASK(obj_type, \
_SYS_PORT_TRACKING_OBJ_INIT(obj_type)(obj, ##__VA_ARGS__)); \
} while (false)
/**
* @brief Tracing macro for simple object function calls often without returns or branching.
*
* @param obj_type The type of object associated with the call (k_thread, k_sem, k_mutex etc.)
* @param func Name of the function responsible for the call. This does not need to exactly
* match the name of the function but should rather match what the user called in case of
* system calls etc. That is, we can often omit the z_vrfy/z_impl part of the name.
* @param obj Object
* @param ... Additional parameters relevant to the tracing call
*/
#define SYS_PORT_TRACING_OBJ_FUNC(obj_type, func, obj, ...) \
do { \
SYS_PORT_TRACING_TYPE_MASK(obj_type, \
_SYS_PORT_TRACING_OBJ_FUNC(obj_type, func)(obj, ##__VA_ARGS__)); \
SYS_PORT_TRACING_TYPE_MASK(obj_type, \
_SYS_PORT_TRACKING_OBJ_FUNC(obj_type, func)(obj, ##__VA_ARGS__)); \
} while (false)
/**
* @brief Tracing macro for the entry into a function that might or might not return
* a value.
*
* @param obj_type The type of object associated with the call (k_thread, k_sem, k_mutex etc.)
* @param func Name of the function responsible for the call. This does not need to exactly
* match the name of the function but should rather match what the user called in case of
* system calls etc. That is, we can often omit the z_vrfy/z_impl part of the name.
* @param obj Object
* @param ... Additional parameters relevant to the tracing call
*/
#define SYS_PORT_TRACING_OBJ_FUNC_ENTER(obj_type, func, obj, ...) \
do { \
SYS_PORT_TRACING_TYPE_MASK(obj_type, \
_SYS_PORT_TRACING_OBJ_FUNC_ENTER(obj_type, func)(obj, ##__VA_ARGS__)); \
} while (false)
/**
* @brief Tracing macro for when a function blocks during its execution.
*
* @param obj_type The type of object associated with the call (k_thread, k_sem, k_mutex etc.)
* @param func Name of the function responsible for the call. This does not need to exactly
* match the name of the function but should rather match what the user called in case of
* system calls etc. That is, we can often omit the z_vrfy/z_impl part of the name.
* @param obj Object
* @param timeout Timeout
* @param ... Additional parameters relevant to the tracing call
*/
#define SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(obj_type, func, obj, timeout, ...) \
do { \
SYS_PORT_TRACING_TYPE_MASK(obj_type, \
_SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(obj_type, func) \
(obj, timeout, ##__VA_ARGS__)); \
} while (false)
/**
* @brief Tracing macro for when a function ends its execution. Potential return values
* can be given as additional arguments.
*
* @param obj_type The type of object associated with the call (k_thread, k_sem, k_mutex etc.)
* @param func Name of the function responsible for the call. This does not need to exactly
* match the name of the function but should rather match what the user called in case of
* system calls etc. That is, we can often omit the z_vrfy/z_impl part of the name.
* @param obj Object
* @param ... Additional parameters relevant to the tracing call
*/
#define SYS_PORT_TRACING_OBJ_FUNC_EXIT(obj_type, func, obj, ...) \
do { \
SYS_PORT_TRACING_TYPE_MASK(obj_type, \
_SYS_PORT_TRACING_OBJ_FUNC_EXIT(obj_type, func)(obj, ##__VA_ARGS__)); \
} while (false)
/**
* @brief Field added to kernel objects so they are tracked.
*
* @param type Type of object being tracked (k_thread, k_sem, etc.)
*/
#define SYS_PORT_TRACING_TRACKING_FIELD(type) \
SYS_PORT_TRACING_TYPE_MASK(type, struct type *_obj_track_next;)
/** @} */ /* end of subsys_tracing_macros */
#endif /* CONFIG_TRACING */
#endif /* ZEPHYR_INCLUDE_TRACING_TRACING_MACROS_H_ */
``` | /content/code_sandbox/include/zephyr/tracing/tracing_macros.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,391 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_TRACING_TRACKING_H_
#define ZEPHYR_INCLUDE_TRACING_TRACKING_H_
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#if defined(CONFIG_TRACING_OBJECT_TRACKING) || defined(__DOXYGEN__)
/**
* @brief Object tracking
*
* Object tracking provides lists to kernel objects, so their
* existence and current status can be tracked.
*
* The following global variables are the heads of available lists:
* - _track_list_k_timer
* - _track_list_k_mem_slab
* - _track_list_k_sem
* - _track_list_k_mutex
* - _track_list_k_stack
* - _track_list_k_msgq
* - _track_list_k_mbox
* - _track_list_k_pipe
* - _track_list_k_queue
* - _track_list_k_event
*
* @defgroup subsys_tracing_object_tracking Object tracking
* @ingroup subsys_tracing
* @{
*/
extern struct k_timer *_track_list_k_timer;
extern struct k_mem_slab *_track_list_k_mem_slab;
extern struct k_sem *_track_list_k_sem;
extern struct k_mutex *_track_list_k_mutex;
extern struct k_stack *_track_list_k_stack;
extern struct k_msgq *_track_list_k_msgq;
extern struct k_mbox *_track_list_k_mbox;
extern struct k_pipe *_track_list_k_pipe;
extern struct k_queue *_track_list_k_queue;
extern struct k_event *_track_list_k_event;
/**
* @brief Gets node's next element in a object tracking list.
*
* @param list Node to get next element from.
*/
#define SYS_PORT_TRACK_NEXT(list)((list)->_obj_track_next)
/** @cond INTERNAL_HIDDEN */
#define sys_port_track_k_thread_start(thread)
#define sys_port_track_k_thread_create(new_thread)
#define sys_port_track_k_thread_sched_ready(thread)
#define sys_port_track_k_thread_wakeup(thread)
#define sys_port_track_k_thread_sched_priority_set(thread, prio)
#define sys_port_track_k_work_delayable_init(dwork)
#define sys_port_track_k_work_queue_init(queue)
#define sys_port_track_k_work_init(work)
#define sys_port_track_k_mutex_init(mutex, ret) \
sys_track_k_mutex_init(mutex)
#define sys_port_track_k_timer_stop(timer)
#define sys_port_track_k_timer_start(timer, duration, period)
#define sys_port_track_k_timer_init(timer) \
sys_track_k_timer_init(timer)
#define sys_port_track_k_queue_peek_tail(queue, ret)
#define sys_port_track_k_queue_peek_head(queue, ret)
#define sys_port_track_k_queue_cancel_wait(queue)
#define sys_port_track_k_queue_init(queue) \
sys_track_k_queue_init(queue)
#define sys_port_track_k_pipe_init(pipe) \
sys_track_k_pipe_init(pipe)
#define sys_port_track_k_condvar_init(condvar, ret)
#define sys_port_track_k_stack_init(stack) \
sys_track_k_stack_init(stack)
#define sys_port_track_k_thread_name_set(thread, ret)
#define sys_port_track_k_sem_reset(sem)
#define sys_port_track_k_sem_init(sem, ret) \
sys_track_k_sem_init(sem)
#define sys_port_track_k_msgq_purge(msgq)
#define sys_port_track_k_msgq_peek(msgq, ret)
#define sys_port_track_k_msgq_init(msgq) \
sys_track_k_msgq_init(msgq)
#define sys_port_track_k_mbox_init(mbox) \
sys_track_k_mbox_init(mbox)
#define sys_port_track_k_mem_slab_init(slab, rc) \
sys_track_k_mem_slab_init(slab)
#define sys_port_track_k_heap_free(h)
#define sys_port_track_k_heap_init(h)
#define sys_port_track_k_event_init(event) \
sys_track_k_event_init(event);
#define sys_port_track_socket_init(sock, family, type, proto) \
sys_track_socket_init(sock, family, type, proto);
void sys_track_k_timer_init(struct k_timer *timer);
void sys_track_k_mem_slab_init(struct k_mem_slab *slab);
void sys_track_k_sem_init(struct k_sem *sem);
void sys_track_k_mutex_init(struct k_mutex *mutex);
void sys_track_k_stack_init(struct k_stack *stack);
void sys_track_k_msgq_init(struct k_msgq *msgq);
void sys_track_k_mbox_init(struct k_mbox *mbox);
void sys_track_k_pipe_init(struct k_pipe *pipe);
void sys_track_k_queue_init(struct k_queue *queue);
void sys_track_k_event_init(struct k_event *event);
void sys_track_socket_init(int sock, int family, int type, int proto);
/** @endcond */
/** @} */ /* end of subsys_tracing_object_tracking */
#else
#define sys_port_track_k_thread_start(thread)
#define sys_port_track_k_thread_create(new_thread)
#define sys_port_track_k_thread_sched_ready(thread)
#define sys_port_track_k_thread_wakeup(thread)
#define sys_port_track_k_thread_sched_priority_set(thread, prio)
#define sys_port_track_k_work_delayable_init(dwork)
#define sys_port_track_k_work_queue_init(queue)
#define sys_port_track_k_work_init(work)
#define sys_port_track_k_mutex_init(mutex, ret)
#define sys_port_track_k_timer_stop(timer)
#define sys_port_track_k_timer_start(timer, duration, period)
#define sys_port_track_k_timer_init(timer)
#define sys_port_track_k_queue_peek_tail(queue, ret)
#define sys_port_track_k_queue_peek_head(queue, ret)
#define sys_port_track_k_queue_cancel_wait(queue)
#define sys_port_track_k_queue_init(queue)
#define sys_port_track_k_pipe_init(pipe)
#define sys_port_track_k_condvar_init(condvar, ret)
#define sys_port_track_k_stack_init(stack)
#define sys_port_track_k_thread_name_set(thread, ret)
#define sys_port_track_k_sem_reset(sem)
#define sys_port_track_k_sem_init(sem, ret)
#define sys_port_track_k_msgq_purge(msgq)
#define sys_port_track_k_msgq_peek(msgq, ret)
#define sys_port_track_k_msgq_init(msgq)
#define sys_port_track_k_mbox_init(mbox)
#define sys_port_track_k_mem_slab_init(slab, rc)
#define sys_port_track_k_heap_free(h)
#define sys_port_track_k_heap_init(h)
#define sys_port_track_k_event_init(event)
#define sys_port_track_socket_init(sock, family, type, proto)
#endif
#endif /* ZEPHYR_INCLUDE_TRACING_TRACKING_H_ */
``` | /content/code_sandbox/include/zephyr/tracing/tracking.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,325 |
```objective-c
/** @file
* @brief Network interface promiscuous mode support
*
* An API for applications to start listening network traffic.
* This requires support from network device driver and from application.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_PROMISCUOUS_H_
#define ZEPHYR_INCLUDE_NET_PROMISCUOUS_H_
/**
* @brief Promiscuous mode support.
* @defgroup promiscuous Promiscuous mode
* @since 1.13
* @version 0.8.0
* @ingroup networking
* @{
*/
#include <zephyr/net/net_pkt.h>
#include <zephyr/net/net_if.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Start to wait received network packets.
*
* @param timeout How long to wait before returning.
*
* @return Received net_pkt, NULL if not received any packet.
*/
#if defined(CONFIG_NET_PROMISCUOUS_MODE)
struct net_pkt *net_promisc_mode_wait_data(k_timeout_t timeout);
#else
static inline struct net_pkt *net_promisc_mode_wait_data(k_timeout_t timeout)
{
ARG_UNUSED(timeout);
return NULL;
}
#endif /* CONFIG_NET_PROMISCUOUS_MODE */
/**
* @brief Enable promiscuous mode for a given network interface.
*
* @param iface Network interface
*
* @return 0 if ok, <0 if error
*/
#if defined(CONFIG_NET_PROMISCUOUS_MODE)
int net_promisc_mode_on(struct net_if *iface);
#else
static inline int net_promisc_mode_on(struct net_if *iface)
{
ARG_UNUSED(iface);
return -ENOTSUP;
}
#endif /* CONFIG_NET_PROMISCUOUS_MODE */
/**
* @brief Disable promiscuous mode for a given network interface.
*
* @param iface Network interface
*
* @return 0 if ok, <0 if error
*/
#if defined(CONFIG_NET_PROMISCUOUS_MODE)
int net_promisc_mode_off(struct net_if *iface);
#else
static inline int net_promisc_mode_off(struct net_if *iface)
{
ARG_UNUSED(iface);
return -ENOTSUP;
}
#endif /* CONFIG_NET_PROMISCUOUS_MODE */
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_PROMISCUOUS_H_ */
``` | /content/code_sandbox/include/zephyr/net/promiscuous.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 478 |
```objective-c
/*
*
*/
/**
* @file
* @brief API for controlling generic network association routines on network devices that
* support it.
*/
#ifndef ZEPHYR_INCLUDE_CONN_MGR_CONNECTIVITY_H_
#define ZEPHYR_INCLUDE_CONN_MGR_CONNECTIVITY_H_
#include <zephyr/device.h>
#include <zephyr/net/net_if.h>
#include <zephyr/sys/iterable_sections.h>
#include <zephyr/net/net_mgmt.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Connection Manager Connectivity API
* @defgroup conn_mgr_connectivity Connection Manager Connectivity API
* @since 3.4
* @version 0.1.0
* @ingroup networking
* @{
*/
/** @cond INTERNAL_HIDDEN */
/* Connectivity Events */
#define _NET_MGMT_CONN_LAYER NET_MGMT_LAYER(NET_MGMT_LAYER_L2)
#define _NET_MGMT_CONN_CODE NET_MGMT_LAYER_CODE(0x207)
#define _NET_MGMT_CONN_BASE (_NET_MGMT_CONN_LAYER | _NET_MGMT_CONN_CODE | \
NET_MGMT_EVENT_BIT)
#define _NET_MGMT_CONN_IF_EVENT (NET_MGMT_IFACE_BIT | _NET_MGMT_CONN_BASE)
enum net_event_conn_cmd {
NET_EVENT_CONN_CMD_IF_TIMEOUT = 1,
NET_EVENT_CONN_CMD_IF_FATAL_ERROR,
};
/** @endcond */
/**
* @brief net_mgmt event raised when a connection attempt times out
*/
#define NET_EVENT_CONN_IF_TIMEOUT \
(_NET_MGMT_CONN_IF_EVENT | NET_EVENT_CONN_CMD_IF_TIMEOUT)
/**
* @brief net_mgmt event raised when a non-recoverable connectivity error occurs on an iface
*/
#define NET_EVENT_CONN_IF_FATAL_ERROR \
(_NET_MGMT_CONN_IF_EVENT | NET_EVENT_CONN_CMD_IF_FATAL_ERROR)
/**
* @brief Per-iface connectivity flags
*/
enum conn_mgr_if_flag {
/**
* Persistent
*
* When set, indicates that the connectivity implementation bound to this iface should
* attempt to persist connectivity by automatically reconnecting after connection loss.
*/
CONN_MGR_IF_PERSISTENT,
/**
* No auto-connect
*
* When set, conn_mgr will not automatically attempt to connect this iface when it reaches
* admin-up.
*/
CONN_MGR_IF_NO_AUTO_CONNECT,
/**
* No auto-down
*
* When set, conn_mgr will not automatically take the iface admin-down when it stops
* trying to connect, even if CONFIG_NET_CONNECTION_MANAGER_AUTO_IF_DOWN is enabled.
*/
CONN_MGR_IF_NO_AUTO_DOWN,
/** @cond INTERNAL_HIDDEN */
/* Total number of flags - must be at the end of the enum */
CONN_MGR_NUM_IF_FLAGS,
/** @endcond */
};
/** Value to use with @ref conn_mgr_if_set_timeout and @ref conn_mgr_conn_binding.timeout to
* indicate no timeout
*/
#define CONN_MGR_IF_NO_TIMEOUT 0
/**
* @brief Connect interface
*
* If the provided iface has been bound to a connectivity implementation, initiate
* network connect/association.
*
* Automatically takes the iface admin-up (by calling @ref net_if_up) if it isn't already.
*
* Non-Blocking.
*
* @param iface Pointer to network interface
* @retval 0 on success.
* @retval -ESHUTDOWN if the iface is not admin-up.
* @retval -ENOTSUP if the iface does not have a connectivity implementation.
* @retval implementation-specific status code otherwise.
*/
int conn_mgr_if_connect(struct net_if *iface);
/**
* @brief Disconnect interface
*
* If the provided iface has been bound to a connectivity implementation, disconnect/disassociate
* it from the network, and cancel any pending attempts to connect/associate.
*
* Does nothing if the iface is currently admin-down.
*
* @param iface Pointer to network interface
*
* @retval 0 on success.
* @retval -ENOTSUP if the iface does not have a connectivity implementation.
* @retval implementation-specific status code otherwise.
*/
int conn_mgr_if_disconnect(struct net_if *iface);
/**
* @brief Check whether the provided network interface supports connectivity / has been bound
* to a connectivity implementation.
*
* @param iface Pointer to the iface to check.
* @retval true if connectivity is supported (a connectivity implementation has been bound).
* @retval false otherwise.
*/
bool conn_mgr_if_is_bound(struct net_if *iface);
/**
* @brief Set implementation-specific connectivity options.
*
* If the provided iface has been bound to a connectivity implementation that supports it,
* implementation-specific connectivity options related to the iface.
*
* @param iface Pointer to the network interface.
* @param optname Integer value representing the option to set.
* The meaning of values is up to the conn_mgr_conn_api implementation.
* Some settings may affect multiple ifaces.
* @param optval Pointer to the value to be assigned to the option.
* @param optlen Length (in bytes) of the value to be assigned to the option.
* @retval 0 if successful.
* @retval -ENOTSUP if conn_mgr_if_set_opt not implemented by the iface.
* @retval -ENOBUFS if optlen is too long.
* @retval -EINVAL if NULL optval pointer provided.
* @retval -ENOPROTOOPT if the optname is not recognized.
* @retval implementation-specific error code otherwise.
*/
int conn_mgr_if_set_opt(struct net_if *iface, int optname, const void *optval, size_t optlen);
/**
* @brief Get implementation-specific connectivity options.
*
* If the provided iface has been bound to a connectivity implementation that supports it,
* retrieves implementation-specific connectivity options related to the iface.
*
* @param iface Pointer to the network interface.
* @param optname Integer value representing the option to set.
* The meaning of values is up to the conn_mgr_conn_api implementation.
* Some settings may be shared by multiple ifaces.
* @param optval Pointer to where the retrieved value should be stored.
* @param optlen Pointer to length (in bytes) of the destination buffer available for storing the
* retrieved value. If the available space is less than what is needed, -ENOBUFS
* is returned. If the available space is invalid, -EINVAL is returned.
*
* optlen will always be set to the total number of bytes written, regardless of
* whether an error is returned, even if zero bytes were written.
*
* @retval 0 if successful.
* @retval -ENOTSUP if conn_mgr_if_get_opt is not implemented by the iface.
* @retval -ENOBUFS if retrieval buffer is too small.
* @retval -EINVAL if invalid retrieval buffer length is provided, or if NULL optval or
* optlen pointer provided.
* @retval -ENOPROTOOPT if the optname is not recognized.
* @retval implementation-specific error code otherwise.
*/
int conn_mgr_if_get_opt(struct net_if *iface, int optname, void *optval, size_t *optlen);
/**
* @brief Check the value of connectivity flags
*
* If the provided iface is bound to a connectivity implementation, retrieves the value of the
* specified connectivity flag associated with that iface.
*
* @param iface - Pointer to the network interface to check.
* @param flag - The flag to check.
* @return True if the flag is set, otherwise False.
* Also returns False if the provided iface is not bound to a connectivity implementation,
* or the requested flag doesn't exist.
*/
bool conn_mgr_if_get_flag(struct net_if *iface, enum conn_mgr_if_flag flag);
/**
* @brief Set the value of a connectivity flags
*
* If the provided iface is bound to a connectivity implementation, sets the value of the
* specified connectivity flag associated with that iface.
*
* @param iface - Pointer to the network interface to modify.
* @param flag - The flag to set.
* @param value - Whether the flag should be enabled or disabled.
* @retval 0 on success.
* @retval -EINVAL if the flag does not exist.
* @retval -ENOTSUP if the provided iface is not bound to a connectivity implementation.
*/
int conn_mgr_if_set_flag(struct net_if *iface, enum conn_mgr_if_flag flag, bool value);
/**
* @brief Get the connectivity timeout for an iface
*
* If the provided iface is bound to a connectivity implementation, retrieves the timeout setting
* in seconds for it.
*
* @param iface - Pointer to the iface to check.
* @return int - The connectivity timeout value (in seconds) if it could be retrieved, otherwise
* CONN_MGR_IF_NO_TIMEOUT.
*/
int conn_mgr_if_get_timeout(struct net_if *iface);
/**
* @brief Set the connectivity timeout for an iface.
*
* If the provided iface is bound to a connectivity implementation, sets the timeout setting in
* seconds for it.
*
* @param iface - Pointer to the network interface to modify.
* @param timeout - The timeout value to set (in seconds).
* Pass @ref CONN_MGR_IF_NO_TIMEOUT to disable the timeout.
* @retval 0 on success.
* @retval -ENOTSUP if the provided iface is not bound to a connectivity implementation.
*/
int conn_mgr_if_set_timeout(struct net_if *iface, int timeout);
/**
* @}
*/
/**
* @brief Connection Manager Bulk API
* @defgroup conn_mgr_connectivity_bulk Connection Manager Connectivity Bulk API
* @since 3.4
* @version 0.1.0
* @ingroup conn_mgr_connectivity
* @{
*/
/**
* @brief Convenience function that takes all available ifaces into the admin-up state.
*
* Essentially a wrapper for @ref net_if_up.
*
* @param skip_ignored - If true, only affect ifaces that aren't ignored by conn_mgr.
* Otherwise, affect all ifaces.
* @return 0 if all net_if_up calls returned 0, otherwise the first nonzero value
* returned by a net_if_up call.
*/
int conn_mgr_all_if_up(bool skip_ignored);
/**
* @brief Convenience function that takes all available ifaces into the admin-down state.
*
* Essentially a wrapper for @ref net_if_down.
*
* @param skip_ignored - If true, only affect ifaces that aren't ignored by conn_mgr.
* Otherwise, affect all ifaces.
* @return 0 if all net_if_down calls returned 0, otherwise the first nonzero value
* returned by a net_if_down call.
*/
int conn_mgr_all_if_down(bool skip_ignored);
/**
* @brief Convenience function that takes all available ifaces into the admin-up state, and
* connects those that support connectivity.
*
* Essentially a wrapper for @ref net_if_up and @ref conn_mgr_if_connect.
*
* @param skip_ignored - If true, only affect ifaces that aren't ignored by conn_mgr.
* Otherwise, affect all ifaces.
* @return 0 if all net_if_up and conn_mgr_if_connect calls returned 0, otherwise the first nonzero
* value returned by either net_if_up or conn_mgr_if_connect.
*/
int conn_mgr_all_if_connect(bool skip_ignored);
/**
* @brief Convenience function that disconnects all available ifaces that support connectivity
* without putting them into admin-down state (unless auto-down is enabled for the iface).
*
* Essentially a wrapper for @ref net_if_down.
*
* @param skip_ignored - If true, only affect ifaces that aren't ignored by conn_mgr.
* Otherwise, affect all ifaces.
* @return 0 if all net_if_up and conn_mgr_if_connect calls returned 0, otherwise the first nonzero
* value returned by either net_if_up or conn_mgr_if_connect.
*/
int conn_mgr_all_if_disconnect(bool skip_ignored);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_CONN_MGR_CONNECTIVITY_H_ */
``` | /content/code_sandbox/include/zephyr/net/conn_mgr_connectivity.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,521 |
```objective-c
/*
*
*/
/**
* @file
* @brief IEEE 802.15.4 native L2 stack public header
*
* @note All references to the standard in this file cite IEEE 802.15.4-2020.
*/
#ifndef ZEPHYR_INCLUDE_NET_IEEE802154_H_
#define ZEPHYR_INCLUDE_NET_IEEE802154_H_
#include <limits.h>
#include <zephyr/net/net_l2.h>
#include <zephyr/net/net_mgmt.h>
#include <zephyr/crypto/cipher.h>
#include <zephyr/net/ieee802154_radio.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup ieee802154 IEEE 802.15.4 and Thread APIs
* @since 1.0
* @version 0.8.0
* @ingroup connectivity
*
* @brief IEEE 802.15.4 native and OpenThread L2, configuration, management and
* driver APIs
*
* @details The IEEE 802.15.4 and Thread subsystems comprise the OpenThread L2
* subsystem, the native IEEE 802.15.4 L2 subsystem ("Soft" MAC), a mostly
* vendor and protocol agnostic driver API shared between the OpenThread and
* native L2 stacks ("Hard" MAC and PHY) as well as several APIs to configure
* the subsystem (shell, net management, Kconfig, devicetree, etc.).
*
* The **OpenThread subsystem API** integrates the external <a
* href="path_to_url">OpenThread</a> stack into Zephyr. It builds upon
* Zephyr's native IEEE 802.15.4 driver API.
*
* The **native IEEE 802.15.4 subsystem APIs** are exposed at different levels
* and address several audiences:
* - shell (end users, application developers):
* - a set of IEEE 802.15.4 shell commands (see `shell> ieee802154 help`)
* - application API (application developers):
* - IPv6, DGRAM and RAW sockets for actual peer-to-peer, multicast and
* broadcast data exchange between nodes including connection specific
* configuration (sample coming soon, see
* path_to_url for now
* which inspired our API and therefore has a similar socket API),
* - Kconfig and devicetree configuration options (net config library
* extension, subsystem-wide MAC and PHY Kconfig/DT options, driver/vendor
* specific Kconfig/DT options, watch out for options prefixed with
* IEEE802154/ieee802154),
* - Network Management: runtime configuration of the IEEE 802.15.4
* protocols stack at the MAC (L2) and PHY (L1) levels
* (see @ref ieee802154_mgmt),
* - L2 integration (subsystem contributors):
* - see @ref ieee802154_l2
* - implementation of Zephyr's internal L2-level socket and network context
* abstractions (context/socket operations, see @ref net_l2),
* - protocol-specific extension to the interface structure (see @ref net_if)
* - protocol-specific extensions to the network packet structure
* (see @ref net_pkt),
*
* - OpenThread and native IEEE 802.15.4 share a common **driver API** (driver
* maintainers/contributors):
* - see @ref ieee802154_driver
* - a basic, mostly PHY-level driver API to be implemented by all drivers,
* - several "hard MAC" (hardware/firmware offloading) extension points for
* performance critical or timing sensitive aspects of the protocol
*/
/**
* @defgroup ieee802154_l2 IEEE 802.15.4 L2
* @since 1.0
* @version 0.8.0
* @ingroup ieee802154
*
* @brief IEEE 802.15.4 L2 APIs
*
* @details This API provides integration with Zephyr's sockets and network
* contexts. **Application and driver developers should never interface directly
* with this API.** It is of interest to subsystem maintainers only.
*
* The API implements and extends the following structures:
* - implements Zephyr's internal L2-level socket and network context
* abstractions (context/socket operations, see @ref net_l2),
* - protocol-specific extension to the interface structure (see @ref net_if)
* - protocol-specific extensions to the network packet structure
* (see @ref net_pkt),
*
* @note All section, table and figure references are to the IEEE 802.15.4-2020
* standard.
*
* @{
*/
/**
* @brief Represents the PHY constant aMaxPhyPacketSize, see section 11.3.
*
* @note Currently only 127 byte sized packets are supported although some PHYs
* (e.g. SUN, MSK, LECIM, ...) support larger packet sizes. Needs to be changed
* once those PHYs should be fully supported.
*/
#define IEEE802154_MAX_PHY_PACKET_SIZE 127
/**
* @brief Represents the frame check sequence length, see section 7.2.1.1.
*
* @note Currently only a 2 byte FCS is supported although some PHYs (e.g. SUN,
* TVWS, ...) optionally support a 4 byte FCS. Needs to be changed once those
* PHYs should be fully supported.
*/
#define IEEE802154_FCS_LENGTH 2
/**
* @brief IEEE 802.15.4 "hardware" MTU (not to be confused with L3/IP MTU), i.e.
* the actual payload available to the next higher layer.
*
* @details This is equivalent to the IEEE 802.15.4 MAC frame length minus
* checksum bytes which is again equivalent to the PHY payload aka PSDU length
* minus checksum bytes. This definition exists for compatibility with the same
* concept in Linux and Zephyr's L3. It is not a concept from the IEEE 802.15.4
* standard.
*
* @note Currently only the original frame size from the 2006 standard version
* and earlier is supported. The 2015+ standard introduced PHYs with larger PHY
* payload. These are not (yet) supported in Zephyr.
*/
#define IEEE802154_MTU (IEEE802154_MAX_PHY_PACKET_SIZE - IEEE802154_FCS_LENGTH)
/* TODO: Support flexible MTU and FCS lengths for IEEE 802.15.4-2015ff */
/** IEEE 802.15.4 short address length. */
#define IEEE802154_SHORT_ADDR_LENGTH 2
/** IEEE 802.15.4 extended address length. */
#define IEEE802154_EXT_ADDR_LENGTH 8
/** IEEE 802.15.4 maximum address length. */
#define IEEE802154_MAX_ADDR_LENGTH IEEE802154_EXT_ADDR_LENGTH
/**
* A special channel value that symbolizes "all" channels or "any" channel -
* depending on context.
*/
#define IEEE802154_NO_CHANNEL USHRT_MAX
/**
* Represents the IEEE 802.15.4 broadcast short address, see sections 6.1 and
* 8.4.3, table 8-94, macShortAddress.
*/
#define IEEE802154_BROADCAST_ADDRESS 0xffff
/**
* Represents a special IEEE 802.15.4 short address that indicates that a device
* has been associated with a coordinator but did not receive a short address,
* see sections 6.4.1 and 8.4.3, table 8-94, macShortAddress.
*/
#define IEEE802154_NO_SHORT_ADDRESS_ASSIGNED 0xfffe
/** Represents the IEEE 802.15.4 broadcast PAN ID, see section 6.1. */
#define IEEE802154_BROADCAST_PAN_ID 0xffff
/**
* Represents a special value of the macShortAddress MAC PIB attribute, while the
* device is not associated, see section 8.4.3, table 8-94.
*/
#define IEEE802154_SHORT_ADDRESS_NOT_ASSOCIATED IEEE802154_BROADCAST_ADDRESS
/**
* Represents a special value of the macPanId MAC PIB attribute, while the
* device is not associated, see section 8.4.3, table 8-94.
*/
#define IEEE802154_PAN_ID_NOT_ASSOCIATED IEEE802154_BROADCAST_PAN_ID
/** Interface-level security attributes, see section 9.5. */
struct ieee802154_security_ctx {
/**
* Interface-level outgoing frame counter, section 9.5, table 9-8,
* secFrameCounter.
*
* Only used when the driver does not implement key-specific frame
* counters.
*/
uint32_t frame_counter;
/** @cond INTERNAL_HIDDEN */
struct cipher_ctx enc;
struct cipher_ctx dec;
/** INTERNAL_HIDDEN @endcond */
/**
* @brief Interface-level frame encryption security key material
*
* @details Currently native L2 only supports a single secKeySource, see
* section 9.5, table 9-9, in combination with secKeyMode zero (implicit
* key mode), see section 9.4.2.3, table 9-7.
*
* @warning This is no longer in accordance with the 2015+ versions of
* the standard and needs to be extended in the future for full security
* procedure compliance.
*/
uint8_t key[16];
/** Length in bytes of the interface-level security key material. */
uint8_t key_len;
/**
* @brief Frame security level, possible values are defined in section
* 9.4.2.2, table 9-6.
*
* @warning Currently native L2 allows to configure one common security
* level for all frame types, commands and information elements. This is
* no longer in accordance with the 2015+ versions of the standard and
* needs to be extended in the future for full security procedure
* compliance.
*/
uint8_t level : 3;
/**
* @brief Frame security key mode
*
* @details Currently only implicit key mode is partially supported, see
* section 9.4.2.3, table 9-7, secKeyMode.
*
* @warning This is no longer in accordance with the 2015+ versions of
* the standard and needs to be extended in the future for full security
* procedure compliance.
*/
uint8_t key_mode : 2;
/** @cond INTERNAL_HIDDEN */
uint8_t _unused : 3;
/** INTERNAL_HIDDEN @endcond */
};
/** @brief IEEE 802.15.4 device role */
enum ieee802154_device_role {
IEEE802154_DEVICE_ROLE_ENDDEVICE, /**< End device */
IEEE802154_DEVICE_ROLE_COORDINATOR, /**< Coordinator */
IEEE802154_DEVICE_ROLE_PAN_COORDINATOR, /**< PAN coordinator */
};
/** IEEE 802.15.4 L2 context. */
struct ieee802154_context {
/**
* @brief PAN ID
*
* @details The identifier of the PAN on which the device is operating.
* If this value is 0xffff, the device is not associated. See section
* 8.4.3.1, table 8-94, macPanId.
*
* in CPU byte order
*/
uint16_t pan_id;
/**
* @brief Channel Number
*
* @details The RF channel to use for all transmissions and receptions,
* see section 11.3, table 11-2, phyCurrentChannel. The allowable range
* of values is PHY dependent as defined in section 10.1.3.
*
* in CPU byte order
*/
uint16_t channel;
/**
* @brief Short Address (in CPU byte order)
*
* @details Range:
* * 0x00000xfffd: associated, short address was assigned
* * 0xfffe: associated but no short address assigned
* * 0xffff: not associated (default),
*
* See section 6.4.1, table 6-4 (Usage of the shart address) and
* section 8.4.3.1, table 8-94, macShortAddress.
*/
uint16_t short_addr;
/**
* @brief Extended Address (in little endian)
*
* @details The extended address is device specific, usually permanently
* stored on the device and immutable.
*
* See section 8.4.3.1, table 8-94, macExtendedAddress.
*/
uint8_t ext_addr[IEEE802154_MAX_ADDR_LENGTH];
/** Link layer address (in big endian) */
struct net_linkaddr_storage linkaddr;
#ifdef CONFIG_NET_L2_IEEE802154_SECURITY
/** Security context */
struct ieee802154_security_ctx sec_ctx;
#endif
#ifdef CONFIG_NET_L2_IEEE802154_MGMT
/** Pointer to scanning parameters and results, guarded by scan_ctx_lock */
struct ieee802154_req_params *scan_ctx;
/**
* Used to maintain integrity of data for all fields in this struct
* unless otherwise documented on field level.
*/
struct k_sem scan_ctx_lock;
/**
* @brief Coordinator extended address
*
* @details see section 8.4.3.1, table 8-94, macCoordExtendedAddress,
* the address of the coordinator through which the device is
* associated.
*
* A value of zero indicates that a coordinator extended address is
* unknown (default).
*
* in little endian
*/
uint8_t coord_ext_addr[IEEE802154_MAX_ADDR_LENGTH];
/**
* @brief Coordinator short address
*
* @details see section 8.4.3.1, table 8-94, macCoordShortAddress, the
* short address assigned to the coordinator through which the device is
* associated.
*
* A value of 0xfffe indicates that the coordinator is only using its
* extended address. A value of 0xffff indicates that this value is
* unknown.
*
* in CPU byte order
*/
uint16_t coord_short_addr;
#endif
/** Transmission power in dBm. */
int16_t tx_power;
/** L2 flags */
enum net_l2_flags flags;
/**
* @brief Data sequence number
*
* @details The sequence number added to the transmitted Data frame or
* MAC command, see section 8.4.3.1, table 8-94, macDsn.
*/
uint8_t sequence;
/**
* @brief Device Role
*
* @details See section 6.1: A device may be operating as end device
* (0), coordinator (1), or PAN coordinator (2). If no device role is
* explicitly configured then the device will be treated as an end
* device.
*
* A value of 3 is undefined.
*
* Can be read/set via @ref ieee802154_device_role.
*/
uint8_t device_role : 2;
/** @cond INTERNAL_HIDDEN */
uint8_t _unused : 5;
/** INTERNAL_HIDDEN @endcond */
/**
* ACK requested flag, guarded by ack_lock
*/
uint8_t ack_requested: 1;
/** ACK expected sequence number, guarded by ack_lock */
uint8_t ack_seq;
/** ACK lock, guards ack_* fields */
struct k_sem ack_lock;
/**
* @brief Context lock
*
* @details This lock guards all mutable context attributes unless
* otherwise mentioned on attribute level.
*/
struct k_sem ctx_lock;
};
/** @cond INTERNAL_HIDDEN */
/* L2 context type to be used with NET_L2_GET_CTX_TYPE */
#define IEEE802154_L2_CTX_TYPE struct ieee802154_context
/** INTERNAL_HIDDEN @endcond */
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_IEEE802154_H_ */
``` | /content/code_sandbox/include/zephyr/net/ieee802154.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,489 |
```objective-c
/** @file
* @brief Loopback control interface
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_LOOPBACK_H_
#define ZEPHYR_INCLUDE_NET_LOOPBACK_H_
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CONFIG_NET_LOOPBACK_SIMULATE_PACKET_DROP
/**
* @brief Set the packet drop rate
*
* @param[in] ratio Value between 0 = no packet loss and 1 = all packets dropped
*
* @return 0 on success, otherwise a negative integer.
*/
int loopback_set_packet_drop_ratio(float ratio);
/**
* @brief Get the number of dropped packets
*
* @return number of packets dropped by the loopback interface
*/
int loopback_get_num_dropped_packets(void);
#endif
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_LOOPBACK_H_ */
``` | /content/code_sandbox/include/zephyr/net/loopback.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 176 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_TRACING_TRACING_H_
#define ZEPHYR_INCLUDE_TRACING_TRACING_H_
#include <zephyr/kernel.h>
#include "tracking.h"
#if defined CONFIG_SEGGER_SYSTEMVIEW
#include "tracing_sysview.h"
#elif defined CONFIG_TRACING_CTF
#include "tracing_ctf.h"
#elif defined CONFIG_TRACING_TEST
#include "tracing_test.h"
#elif defined CONFIG_TRACING_USER
#include "tracing_user.h"
#else
/**
* @brief Tracing
*
* The tracing subsystem provides hooks that permits you to collect data from
* your application and allows tools running on a host to visualize the
* inner-working of the kernel and various other subsystems.
*
* @defgroup subsys_tracing Tracing
* @ingroup os_services
* @{
*/
/**
* @brief Tracing APIs
* @defgroup subsys_tracing_apis Tracing APIs
* @{
*/
/**
* @brief Thread Tracing APIs
* @defgroup subsys_tracing_apis_thread Thread Tracing APIs
* @{
*/
/**
* @brief Called when entering a k_thread_foreach call
*/
#define sys_port_trace_k_thread_foreach_enter()
/**
* @brief Called when exiting a k_thread_foreach call
*/
#define sys_port_trace_k_thread_foreach_exit()
/**
* @brief Called when entering a k_thread_foreach_unlocked
*/
#define sys_port_trace_k_thread_foreach_unlocked_enter()
/**
* @brief Called when exiting a k_thread_foreach_unlocked
*/
#define sys_port_trace_k_thread_foreach_unlocked_exit()
/**
* @brief Trace creating a Thread
* @param new_thread Thread object
*/
#define sys_port_trace_k_thread_create(new_thread)
/**
* @brief Trace Thread entering user mode
*/
#define sys_port_trace_k_thread_user_mode_enter()
/**
* @brief Called when entering a k_thread_join
* @param thread Thread object
* @param timeout Timeout period
*/
#define sys_port_trace_k_thread_join_enter(thread, timeout)
/**
* @brief Called when k_thread_join blocks
* @param thread Thread object
* @param timeout Timeout period
*/
#define sys_port_trace_k_thread_join_blocking(thread, timeout)
/**
* @brief Called when exiting k_thread_join
* @param thread Thread object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_thread_join_exit(thread, timeout, ret)
/**
* @brief Called when entering k_thread_sleep
* @param timeout Timeout period
*/
#define sys_port_trace_k_thread_sleep_enter(timeout)
/**
* @brief Called when exiting k_thread_sleep
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_thread_sleep_exit(timeout, ret)
/**
* @brief Called when entering k_thread_msleep
* @param ms Duration in milliseconds
*/
#define sys_port_trace_k_thread_msleep_enter(ms)
/**
* @brief Called when exiting k_thread_msleep
* @param ms Duration in milliseconds
* @param ret Return value
*/
#define sys_port_trace_k_thread_msleep_exit(ms, ret)
/**
* @brief Called when entering k_thread_usleep
* @param us Duration in microseconds
*/
#define sys_port_trace_k_thread_usleep_enter(us)
/**
* @brief Called when exiting k_thread_usleep
* @param us Duration in microseconds
* @param ret Return value
*/
#define sys_port_trace_k_thread_usleep_exit(us, ret)
/**
* @brief Called when entering k_thread_busy_wait
* @param usec_to_wait Duration in microseconds
*/
#define sys_port_trace_k_thread_busy_wait_enter(usec_to_wait)
/**
* @brief Called when exiting k_thread_busy_wait
* @param usec_to_wait Duration in microseconds
*/
#define sys_port_trace_k_thread_busy_wait_exit(usec_to_wait)
/**
* @brief Called when a thread yields
*/
#define sys_port_trace_k_thread_yield()
/**
* @brief Called when a thread wakes up
* @param thread Thread object
*/
#define sys_port_trace_k_thread_wakeup(thread)
/**
* @brief Called when a thread is started
* @param thread Thread object
*/
#define sys_port_trace_k_thread_start(thread)
/**
* @brief Called when a thread is being aborted
* @param thread Thread object
*/
#define sys_port_trace_k_thread_abort(thread)
/**
* @brief Called when a thread enters the k_thread_abort routine
* @param thread Thread object
*/
#define sys_port_trace_k_thread_abort_enter(thread)
/**
* @brief Called when a thread exits the k_thread_abort routine
* @param thread Thread object
*/
#define sys_port_trace_k_thread_abort_exit(thread)
/**
* @brief Called when setting priority of a thread
* @param thread Thread object
*/
#define sys_port_trace_k_thread_priority_set(thread)
/**
* @brief Called when a thread enters the k_thread_suspend
* function.
* @param thread Thread object
*/
#define sys_port_trace_k_thread_suspend_enter(thread)
/**
* @brief Called when a thread exits the k_thread_suspend
* function.
* @param thread Thread object
*/
#define sys_port_trace_k_thread_suspend_exit(thread)
/**
* @brief Called when a thread enters the resume from suspension
* function.
* @param thread Thread object
*/
#define sys_port_trace_k_thread_resume_enter(thread)
/**
* @brief Called when a thread exits the resumed from suspension
* function.
* @param thread Thread object
*/
#define sys_port_trace_k_thread_resume_exit(thread)
/**
* @brief Called when the thread scheduler is locked
*/
#define sys_port_trace_k_thread_sched_lock()
/**
* @brief Called when the thread scheduler is unlocked
*/
#define sys_port_trace_k_thread_sched_unlock()
/**
* @brief Called when a thread name is set
* @param thread Thread object
* @param ret Return value
*/
#define sys_port_trace_k_thread_name_set(thread, ret)
/**
* @brief Called before a thread has been selected to run
*/
#define sys_port_trace_k_thread_switched_out()
/**
* @brief Called after a thread has been selected to run
*/
#define sys_port_trace_k_thread_switched_in()
/**
* @brief Called when a thread is ready to run
* @param thread Thread object
*/
#define sys_port_trace_k_thread_ready(thread)
/**
* @brief Called when a thread is pending
* @param thread Thread object
*/
#define sys_port_trace_k_thread_pend(thread)
/**
* @brief Provide information about specific thread
* @param thread Thread object
*/
#define sys_port_trace_k_thread_info(thread)
/**
* @brief Trace implicit thread wakeup invocation by the scheduler
* @param thread Thread object
*/
#define sys_port_trace_k_thread_sched_wakeup(thread)
/**
* @brief Trace implicit thread abort invocation by the scheduler
* @param thread Thread object
*/
#define sys_port_trace_k_thread_sched_abort(thread)
/**
* @brief Trace implicit thread set priority invocation by the scheduler
* @param thread Thread object
* @param prio Thread priority
*/
#define sys_port_trace_k_thread_sched_priority_set(thread, prio)
/**
* @brief Trace implicit thread ready invocation by the scheduler
* @param thread Thread object
*/
#define sys_port_trace_k_thread_sched_ready(thread)
/**
* @brief Trace implicit thread pend invocation by the scheduler
* @param thread Thread object
*/
#define sys_port_trace_k_thread_sched_pend(thread)
/**
* @brief Trace implicit thread resume invocation by the scheduler
* @param thread Thread object
*/
#define sys_port_trace_k_thread_sched_resume(thread)
/**
* @brief Trace implicit thread suspend invocation by the scheduler
* @param thread Thread object
*/
#define sys_port_trace_k_thread_sched_suspend(thread)
/** @}c*/ /* end of subsys_tracing_apis_thread */
/**
* @brief Work Tracing APIs
* @defgroup subsys_tracing_apis_work Work Tracing APIs
* @{
*/
/**
* @brief Trace initialisation of a Work structure
* @param work Work structure
*/
#define sys_port_trace_k_work_init(work)
/**
* @brief Trace submit work to work queue call entry
* @param queue Work queue structure
* @param work Work structure
*/
#define sys_port_trace_k_work_submit_to_queue_enter(queue, work)
/**
* @brief Trace submit work to work queue call exit
* @param queue Work queue structure
* @param work Work structure
* @param ret Return value
*/
#define sys_port_trace_k_work_submit_to_queue_exit(queue, work, ret)
/**
* @brief Trace submit work to system work queue call entry
* @param work Work structure
*/
#define sys_port_trace_k_work_submit_enter(work)
/**
* @brief Trace submit work to system work queue call exit
* @param work Work structure
* @param ret Return value
*/
#define sys_port_trace_k_work_submit_exit(work, ret)
/**
* @brief Trace flush work call entry
* @param work Work structure
*/
#define sys_port_trace_k_work_flush_enter(work)
/**
* @brief Trace flush work call blocking
* @param work Work structure
* @param timeout Timeout period
*/
#define sys_port_trace_k_work_flush_blocking(work, timeout)
/**
* @brief Trace flush work call exit
* @param work Work structure
* @param ret Return value
*/
#define sys_port_trace_k_work_flush_exit(work, ret)
/**
* @brief Trace cancel work call entry
* @param work Work structure
*/
#define sys_port_trace_k_work_cancel_enter(work)
/**
* @brief Trace cancel work call exit
* @param work Work structure
* @param ret Return value
*/
#define sys_port_trace_k_work_cancel_exit(work, ret)
/**
* @brief Trace cancel sync work call entry
* @param work Work structure
* @param sync Sync object
*/
#define sys_port_trace_k_work_cancel_sync_enter(work, sync)
/**
* @brief Trace cancel sync work call blocking
* @param work Work structure
* @param sync Sync object
*/
#define sys_port_trace_k_work_cancel_sync_blocking(work, sync)
/**
* @brief Trace cancel sync work call exit
* @param work Work structure
* @param sync Sync object
* @param ret Return value
*/
#define sys_port_trace_k_work_cancel_sync_exit(work, sync, ret)
/** @} */ /* end of subsys_tracing_apis_work */
/**
* @brief Work Queue Tracing APIs
* @defgroup subsys_tracing_apis_work_q Work Queue Tracing APIs
* @{
*/
/**
* @brief Trace initialisation of a Work Queue structure
* @param queue Work Queue structure
*/
#define sys_port_trace_k_work_queue_init(queue)
/**
* @brief Trace start of a Work Queue call entry
* @param queue Work Queue structure
*/
#define sys_port_trace_k_work_queue_start_enter(queue)
/**
* @brief Trace start of a Work Queue call exit
* @param queue Work Queue structure
*/
#define sys_port_trace_k_work_queue_start_exit(queue)
/**
* @brief Trace Work Queue drain call entry
* @param queue Work Queue structure
*/
#define sys_port_trace_k_work_queue_drain_enter(queue)
/**
* @brief Trace Work Queue drain call exit
* @param queue Work Queue structure
* @param ret Return value
*/
#define sys_port_trace_k_work_queue_drain_exit(queue, ret)
/**
* @brief Trace Work Queue unplug call entry
* @param queue Work Queue structure
*/
#define sys_port_trace_k_work_queue_unplug_enter(queue)
/**
* @brief Trace Work Queue unplug call exit
* @param queue Work Queue structure
* @param ret Return value
*/
#define sys_port_trace_k_work_queue_unplug_exit(queue, ret)
/** @} */ /* end of subsys_tracing_apis_work_q */
/**
* @brief Work Delayable Tracing APIs
* @defgroup subsys_tracing_apis_work_delayable Work Delayable Tracing APIs
* @{
*/
/**
* @brief Trace initialisation of a Delayable Work structure
* @param dwork Delayable Work structure
*/
#define sys_port_trace_k_work_delayable_init(dwork)
/**
* @brief Trace schedule delayable work for queue enter
* @param queue Work Queue structure
* @param dwork Delayable Work structure
* @param delay Delay period
*/
#define sys_port_trace_k_work_schedule_for_queue_enter(queue, dwork, delay)
/**
* @brief Trace schedule delayable work for queue exit
* @param queue Work Queue structure
* @param dwork Delayable Work structure
* @param delay Delay period
* @param ret Return value
*/
#define sys_port_trace_k_work_schedule_for_queue_exit(queue, dwork, delay, ret)
/**
* @brief Trace schedule delayable work for system work queue enter
* @param dwork Delayable Work structure
* @param delay Delay period
*/
#define sys_port_trace_k_work_schedule_enter(dwork, delay)
/**
* @brief Trace schedule delayable work for system work queue exit
* @param dwork Delayable Work structure
* @param delay Delay period
* @param ret Return value
*/
#define sys_port_trace_k_work_schedule_exit(dwork, delay, ret)
/**
* @brief Trace reschedule delayable work for queue enter
* @param queue Work Queue structure
* @param dwork Delayable Work structure
* @param delay Delay period
*/
#define sys_port_trace_k_work_reschedule_for_queue_enter(queue, dwork, delay)
/**
* @brief Trace reschedule delayable work for queue exit
* @param queue Work Queue structure
* @param dwork Delayable Work structure
* @param delay Delay period
* @param ret Return value
*/
#define sys_port_trace_k_work_reschedule_for_queue_exit(queue, dwork, delay, ret)
/**
* @brief Trace reschedule delayable work for system queue enter
* @param dwork Delayable Work structure
* @param delay Delay period
*/
#define sys_port_trace_k_work_reschedule_enter(dwork, delay)
/**
* @brief Trace reschedule delayable work for system queue exit
* @param dwork Delayable Work structure
* @param delay Delay period
* @param ret Return value
*/
#define sys_port_trace_k_work_reschedule_exit(dwork, delay, ret)
/**
* @brief Trace delayable work flush enter
* @param dwork Delayable Work structure
* @param sync Sync object
*/
#define sys_port_trace_k_work_flush_delayable_enter(dwork, sync)
/**
* @brief Trace delayable work flush exit
* @param dwork Delayable Work structure
* @param sync Sync object
* @param ret Return value
*/
#define sys_port_trace_k_work_flush_delayable_exit(dwork, sync, ret)
/**
* @brief Trace delayable work cancel enter
* @param dwork Delayable Work structure
*/
#define sys_port_trace_k_work_cancel_delayable_enter(dwork)
/**
* @brief Trace delayable work cancel enter
* @param dwork Delayable Work structure
* @param ret Return value
*/
#define sys_port_trace_k_work_cancel_delayable_exit(dwork, ret)
/**
* @brief Trace delayable work cancel sync enter
* @param dwork Delayable Work structure
* @param sync Sync object
*/
#define sys_port_trace_k_work_cancel_delayable_sync_enter(dwork, sync)
/**
* @brief Trace delayable work cancel sync enter
* @param dwork Delayable Work structure
* @param sync Sync object
* @param ret Return value
*/
#define sys_port_trace_k_work_cancel_delayable_sync_exit(dwork, sync, ret)
/** @} */ /* end of subsys_tracing_apis_work_delayable */
/**
* @brief Work Poll Tracing APIs
* @defgroup subsys_tracing_apis_work_poll Work Poll Tracing APIs
* @{
*/
/**
* @brief Trace initialisation of a Work Poll structure enter
* @param work Work structure
*/
#define sys_port_trace_k_work_poll_init_enter(work)
/**
* @brief Trace initialisation of a Work Poll structure exit
* @param work Work structure
*/
#define sys_port_trace_k_work_poll_init_exit(work)
/**
* @brief Trace work poll submit to queue enter
* @param work_q Work queue
* @param work Work structure
* @param timeout Timeout period
*/
#define sys_port_trace_k_work_poll_submit_to_queue_enter(work_q, work, timeout)
/**
* @brief Trace work poll submit to queue blocking
* @param work_q Work queue
* @param work Work structure
* @param timeout Timeout period
*/
#define sys_port_trace_k_work_poll_submit_to_queue_blocking(work_q, work, timeout)
/**
* @brief Trace work poll submit to queue exit
* @param work_q Work queue
* @param work Work structure
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_work_poll_submit_to_queue_exit(work_q, work, timeout, ret)
/**
* @brief Trace work poll submit to system queue enter
* @param work Work structure
* @param timeout Timeout period
*/
#define sys_port_trace_k_work_poll_submit_enter(work, timeout)
/**
* @brief Trace work poll submit to system queue exit
* @param work Work structure
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_work_poll_submit_exit(work, timeout, ret)
/**
* @brief Trace work poll cancel enter
* @param work Work structure
*/
#define sys_port_trace_k_work_poll_cancel_enter(work)
/**
* @brief Trace work poll cancel exit
* @param work Work structure
* @param ret Return value
*/
#define sys_port_trace_k_work_poll_cancel_exit(work, ret)
/** @} */ /* end of subsys_tracing_apis_work_poll */
/**
* @brief Poll Tracing APIs
* @defgroup subsys_tracing_apis_poll Poll Tracing APIs
* @{
*/
/**
* @brief Trace initialisation of a Poll Event
* @param event Poll Event
*/
#define sys_port_trace_k_poll_api_event_init(event)
/**
* @brief Trace Polling call start
* @param events Poll Events
*/
#define sys_port_trace_k_poll_api_poll_enter(events)
/**
* @brief Trace Polling call outcome
* @param events Poll Events
* @param ret Return value
*/
#define sys_port_trace_k_poll_api_poll_exit(events, ret)
/**
* @brief Trace initialisation of a Poll Signal
* @param signal Poll Signal
*/
#define sys_port_trace_k_poll_api_signal_init(signal)
/**
* @brief Trace resetting of Poll Signal
* @param signal Poll Signal
*/
#define sys_port_trace_k_poll_api_signal_reset(signal)
/**
* @brief Trace checking of Poll Signal
* @param signal Poll Signal
*/
#define sys_port_trace_k_poll_api_signal_check(signal)
/**
* @brief Trace raising of Poll Signal
* @param signal Poll Signal
* @param ret Return value
*/
#define sys_port_trace_k_poll_api_signal_raise(signal, ret)
/** @} */ /* end of subsys_tracing_apis_poll */
/**
* @brief Semaphore Tracing APIs
* @defgroup subsys_tracing_apis_sem Semaphore Tracing APIs
* @{
*/
/**
* @brief Trace initialisation of a Semaphore
* @param sem Semaphore object
* @param ret Return value
*/
#define sys_port_trace_k_sem_init(sem, ret)
/**
* @brief Trace giving a Semaphore entry
* @param sem Semaphore object
*/
#define sys_port_trace_k_sem_give_enter(sem)
/**
* @brief Trace giving a Semaphore exit
* @param sem Semaphore object
*/
#define sys_port_trace_k_sem_give_exit(sem)
/**
* @brief Trace taking a Semaphore attempt start
* @param sem Semaphore object
* @param timeout Timeout period
*/
#define sys_port_trace_k_sem_take_enter(sem, timeout)
/**
* @brief Trace taking a Semaphore attempt blocking
* @param sem Semaphore object
* @param timeout Timeout period
*/
#define sys_port_trace_k_sem_take_blocking(sem, timeout)
/**
* @brief Trace taking a Semaphore attempt outcome
* @param sem Semaphore object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_sem_take_exit(sem, timeout, ret)
/**
* @brief Trace resetting a Semaphore
* @param sem Semaphore object
*/
#define sys_port_trace_k_sem_reset(sem)
/** @} */ /* end of subsys_tracing_apis_sem */
/**
* @brief Mutex Tracing APIs
* @defgroup subsys_tracing_apis_mutex Mutex Tracing APIs
* @{
*/
/**
* @brief Trace initialization of Mutex
* @param mutex Mutex object
* @param ret Return value
*/
#define sys_port_trace_k_mutex_init(mutex, ret)
/**
* @brief Trace Mutex lock attempt start
* @param mutex Mutex object
* @param timeout Timeout period
*/
#define sys_port_trace_k_mutex_lock_enter(mutex, timeout)
/**
* @brief Trace Mutex lock attempt blocking
* @param mutex Mutex object
* @param timeout Timeout period
*/
#define sys_port_trace_k_mutex_lock_blocking(mutex, timeout)
/**
* @brief Trace Mutex lock attempt outcome
* @param mutex Mutex object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_mutex_lock_exit(mutex, timeout, ret)
/**
* @brief Trace Mutex unlock entry
* @param mutex Mutex object
*/
#define sys_port_trace_k_mutex_unlock_enter(mutex)
/**
* @brief Trace Mutex unlock exit
*/
#define sys_port_trace_k_mutex_unlock_exit(mutex, ret)
/** @} */ /* end of subsys_tracing_apis_mutex */
/**
* @brief Conditional Variable Tracing APIs
* @defgroup subsys_tracing_apis_condvar Conditional Variable Tracing APIs
* @{
*/
/**
* @brief Trace initialization of Conditional Variable
* @param condvar Conditional Variable object
* @param ret Return value
*/
#define sys_port_trace_k_condvar_init(condvar, ret)
/**
* @brief Trace Conditional Variable signaling start
* @param condvar Conditional Variable object
*/
#define sys_port_trace_k_condvar_signal_enter(condvar)
/**
* @brief Trace Conditional Variable signaling blocking
* @param condvar Conditional Variable object
* @param timeout Timeout period
*/
#define sys_port_trace_k_condvar_signal_blocking(condvar, timeout)
/**
* @brief Trace Conditional Variable signaling outcome
* @param condvar Conditional Variable object
* @param ret Return value
*/
#define sys_port_trace_k_condvar_signal_exit(condvar, ret)
/**
* @brief Trace Conditional Variable broadcast enter
* @param condvar Conditional Variable object
*/
#define sys_port_trace_k_condvar_broadcast_enter(condvar)
/**
* @brief Trace Conditional Variable broadcast exit
* @param condvar Conditional Variable object
* @param ret Return value
*/
#define sys_port_trace_k_condvar_broadcast_exit(condvar, ret)
/**
* @brief Trace Conditional Variable wait enter
* @param condvar Conditional Variable object
*/
#define sys_port_trace_k_condvar_wait_enter(condvar)
/**
* @brief Trace Conditional Variable wait exit
* @param condvar Conditional Variable object
* @param ret Return value
*/
#define sys_port_trace_k_condvar_wait_exit(condvar, ret)
/** @} */ /* end of subsys_tracing_apis_condvar */
/**
* @brief Queue Tracing APIs
* @defgroup subsys_tracing_apis_queue Queue Tracing APIs
* @{
*/
/**
* @brief Trace initialization of Queue
* @param queue Queue object
*/
#define sys_port_trace_k_queue_init(queue)
/**
* @brief Trace Queue cancel wait
* @param queue Queue object
*/
#define sys_port_trace_k_queue_cancel_wait(queue)
/**
* @brief Trace Queue insert attempt entry
* @param queue Queue object
* @param alloc Allocation flag
*/
#define sys_port_trace_k_queue_queue_insert_enter(queue, alloc)
/**
* @brief Trace Queue insert attempt blocking
* @param queue Queue object
* @param alloc Allocation flag
* @param timeout Timeout period
*/
#define sys_port_trace_k_queue_queue_insert_blocking(queue, alloc, timeout)
/**
* @brief Trace Queue insert attempt outcome
* @param queue Queue object
* @param alloc Allocation flag
* @param ret Return value
*/
#define sys_port_trace_k_queue_queue_insert_exit(queue, alloc, ret)
/**
* @brief Trace Queue append enter
* @param queue Queue object
*/
#define sys_port_trace_k_queue_append_enter(queue)
/**
* @brief Trace Queue append exit
* @param queue Queue object
*/
#define sys_port_trace_k_queue_append_exit(queue)
/**
* @brief Trace Queue alloc append enter
* @param queue Queue object
*/
#define sys_port_trace_k_queue_alloc_append_enter(queue)
/**
* @brief Trace Queue alloc append exit
* @param queue Queue object
* @param ret Return value
*/
#define sys_port_trace_k_queue_alloc_append_exit(queue, ret)
/**
* @brief Trace Queue prepend enter
* @param queue Queue object
*/
#define sys_port_trace_k_queue_prepend_enter(queue)
/**
* @brief Trace Queue prepend exit
* @param queue Queue object
*/
#define sys_port_trace_k_queue_prepend_exit(queue)
/**
* @brief Trace Queue alloc prepend enter
* @param queue Queue object
*/
#define sys_port_trace_k_queue_alloc_prepend_enter(queue)
/**
* @brief Trace Queue alloc prepend exit
* @param queue Queue object
* @param ret Return value
*/
#define sys_port_trace_k_queue_alloc_prepend_exit(queue, ret)
/**
* @brief Trace Queue insert attempt entry
* @param queue Queue object
*/
#define sys_port_trace_k_queue_insert_enter(queue)
/**
* @brief Trace Queue insert attempt blocking
* @param queue Queue object
* @param timeout Timeout period
*/
#define sys_port_trace_k_queue_insert_blocking(queue, timeout)
/**
* @brief Trace Queue insert attempt exit
* @param queue Queue object
*/
#define sys_port_trace_k_queue_insert_exit(queue)
/**
* @brief Trace Queue append list enter
* @param queue Queue object
*/
#define sys_port_trace_k_queue_append_list_enter(queue)
/**
* @brief Trace Queue append list exit
* @param queue Queue object
* @param ret Return value
*/
#define sys_port_trace_k_queue_append_list_exit(queue, ret)
/**
* @brief Trace Queue merge slist enter
* @param queue Queue object
*/
#define sys_port_trace_k_queue_merge_slist_enter(queue)
/**
* @brief Trace Queue merge slist exit
* @param queue Queue object
* @param ret Return value
*/
#define sys_port_trace_k_queue_merge_slist_exit(queue, ret)
/**
* @brief Trace Queue get attempt enter
* @param queue Queue object
* @param timeout Timeout period
*/
#define sys_port_trace_k_queue_get_enter(queue, timeout)
/**
* @brief Trace Queue get attempt blockings
* @param queue Queue object
* @param timeout Timeout period
*/
#define sys_port_trace_k_queue_get_blocking(queue, timeout)
/**
* @brief Trace Queue get attempt outcome
* @param queue Queue object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_queue_get_exit(queue, timeout, ret)
/**
* @brief Trace Queue remove enter
* @param queue Queue object
*/
#define sys_port_trace_k_queue_remove_enter(queue)
/**
* @brief Trace Queue remove exit
* @param queue Queue object
* @param ret Return value
*/
#define sys_port_trace_k_queue_remove_exit(queue, ret)
/**
* @brief Trace Queue unique append enter
* @param queue Queue object
*/
#define sys_port_trace_k_queue_unique_append_enter(queue)
/**
* @brief Trace Queue unique append exit
* @param queue Queue object
*
* @param ret Return value
*/
#define sys_port_trace_k_queue_unique_append_exit(queue, ret)
/**
* @brief Trace Queue peek head
* @param queue Queue object
* @param ret Return value
*/
#define sys_port_trace_k_queue_peek_head(queue, ret)
/**
* @brief Trace Queue peek tail
* @param queue Queue object
* @param ret Return value
*/
#define sys_port_trace_k_queue_peek_tail(queue, ret)
/** @} */ /* end of subsys_tracing_apis_queue */
/**
* @brief FIFO Tracing APIs
* @defgroup subsys_tracing_apis_fifo FIFO Tracing APIs
* @{
*/
/**
* @brief Trace initialization of FIFO Queue entry
* @param fifo FIFO object
*/
#define sys_port_trace_k_fifo_init_enter(fifo)
/**
* @brief Trace initialization of FIFO Queue exit
* @param fifo FIFO object
*/
#define sys_port_trace_k_fifo_init_exit(fifo)
/**
* @brief Trace FIFO Queue cancel wait entry
* @param fifo FIFO object
*/
#define sys_port_trace_k_fifo_cancel_wait_enter(fifo)
/**
* @brief Trace FIFO Queue cancel wait exit
* @param fifo FIFO object
*/
#define sys_port_trace_k_fifo_cancel_wait_exit(fifo)
/**
* @brief Trace FIFO Queue put entry
* @param fifo FIFO object
* @param data Data item
*/
#define sys_port_trace_k_fifo_put_enter(fifo, data)
/**
* @brief Trace FIFO Queue put exit
* @param fifo FIFO object
* @param data Data item
*/
#define sys_port_trace_k_fifo_put_exit(fifo, data)
/**
* @brief Trace FIFO Queue alloc put entry
* @param fifo FIFO object
* @param data Data item
*/
#define sys_port_trace_k_fifo_alloc_put_enter(fifo, data)
/**
* @brief Trace FIFO Queue alloc put exit
* @param fifo FIFO object
* @param data Data item
* @param ret Return value
*/
#define sys_port_trace_k_fifo_alloc_put_exit(fifo, data, ret)
/**
* @brief Trace FIFO Queue put list entry
* @param fifo FIFO object
* @param head First ll-node
* @param tail Last ll-node
*/
#define sys_port_trace_k_fifo_put_list_enter(fifo, head, tail)
/**
* @brief Trace FIFO Queue put list exit
* @param fifo FIFO object
* @param head First ll-node
* @param tail Last ll-node
*/
#define sys_port_trace_k_fifo_put_list_exit(fifo, head, tail)
/**
* @brief Trace FIFO Queue put slist entry
* @param fifo FIFO object
* @param list Syslist object
*/
#define sys_port_trace_k_fifo_alloc_put_slist_enter(fifo, list)
/**
* @brief Trace FIFO Queue put slist exit
* @param fifo FIFO object
* @param list Syslist object
*/
#define sys_port_trace_k_fifo_alloc_put_slist_exit(fifo, list)
/**
* @brief Trace FIFO Queue get entry
* @param fifo FIFO object
* @param timeout Timeout period
*/
#define sys_port_trace_k_fifo_get_enter(fifo, timeout)
/**
* @brief Trace FIFO Queue get exit
* @param fifo FIFO object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_fifo_get_exit(fifo, timeout, ret)
/**
* @brief Trace FIFO Queue peek head entry
* @param fifo FIFO object
*/
#define sys_port_trace_k_fifo_peek_head_enter(fifo)
/**
* @brief Trace FIFO Queue peek head exit
* @param fifo FIFO object
* @param ret Return value
*/
#define sys_port_trace_k_fifo_peek_head_exit(fifo, ret)
/**
* @brief Trace FIFO Queue peek tail entry
* @param fifo FIFO object
*/
#define sys_port_trace_k_fifo_peek_tail_enter(fifo)
/**
* @brief Trace FIFO Queue peek tail exit
* @param fifo FIFO object
* @param ret Return value
*/
#define sys_port_trace_k_fifo_peek_tail_exit(fifo, ret)
/** @} */ /* end of subsys_tracing_apis_fifo */
/**
* @brief LIFO Tracing APIs
* @defgroup subsys_tracing_apis_lifo LIFO Tracing APIs
* @{
*/
/**
* @brief Trace initialization of LIFO Queue entry
* @param lifo LIFO object
*/
#define sys_port_trace_k_lifo_init_enter(lifo)
/**
* @brief Trace initialization of LIFO Queue exit
* @param lifo LIFO object
*/
#define sys_port_trace_k_lifo_init_exit(lifo)
/**
* @brief Trace LIFO Queue put entry
* @param lifo LIFO object
* @param data Data item
*/
#define sys_port_trace_k_lifo_put_enter(lifo, data)
/**
* @brief Trace LIFO Queue put exit
* @param lifo LIFO object
* @param data Data item
*/
#define sys_port_trace_k_lifo_put_exit(lifo, data)
/**
* @brief Trace LIFO Queue alloc put entry
* @param lifo LIFO object
* @param data Data item
*/
#define sys_port_trace_k_lifo_alloc_put_enter(lifo, data)
/**
* @brief Trace LIFO Queue alloc put exit
* @param lifo LIFO object
* @param data Data item
* @param ret Return value
*/
#define sys_port_trace_k_lifo_alloc_put_exit(lifo, data, ret)
/**
* @brief Trace LIFO Queue get entry
* @param lifo LIFO object
* @param timeout Timeout period
*/
#define sys_port_trace_k_lifo_get_enter(lifo, timeout)
/**
* @brief Trace LIFO Queue get exit
* @param lifo LIFO object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_lifo_get_exit(lifo, timeout, ret)
/** @} */ /* end of subsys_tracing_apis_lifo */
/**
* @brief Stack Tracing APIs
* @defgroup subsys_tracing_apis_stack Stack Tracing APIs
* @{
*/
/**
* @brief Trace initialization of Stack
* @param stack Stack object
*/
#define sys_port_trace_k_stack_init(stack)
/**
* @brief Trace Stack alloc init attempt entry
* @param stack Stack object
*/
#define sys_port_trace_k_stack_alloc_init_enter(stack)
/**
* @brief Trace Stack alloc init outcome
* @param stack Stack object
* @param ret Return value
*/
#define sys_port_trace_k_stack_alloc_init_exit(stack, ret)
/**
* @brief Trace Stack cleanup attempt entry
* @param stack Stack object
*/
#define sys_port_trace_k_stack_cleanup_enter(stack)
/**
* @brief Trace Stack cleanup outcome
* @param stack Stack object
* @param ret Return value
*/
#define sys_port_trace_k_stack_cleanup_exit(stack, ret)
/**
* @brief Trace Stack push attempt entry
* @param stack Stack object
*/
#define sys_port_trace_k_stack_push_enter(stack)
/**
* @brief Trace Stack push attempt outcome
* @param stack Stack object
* @param ret Return value
*/
#define sys_port_trace_k_stack_push_exit(stack, ret)
/**
* @brief Trace Stack pop attempt entry
* @param stack Stack object
* @param timeout Timeout period
*/
#define sys_port_trace_k_stack_pop_enter(stack, timeout)
/**
* @brief Trace Stack pop attempt blocking
* @param stack Stack object
* @param timeout Timeout period
*/
#define sys_port_trace_k_stack_pop_blocking(stack, timeout)
/**
* @brief Trace Stack pop attempt outcome
* @param stack Stack object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_stack_pop_exit(stack, timeout, ret)
/** @} */ /* end of subsys_tracing_apis_stack */
/**
* @brief Message Queue Tracing APIs
* @defgroup subsys_tracing_apis_msgq Message Queue Tracing APIs
* @{
*/
/**
* @brief Trace initialization of Message Queue
* @param msgq Message Queue object
*/
#define sys_port_trace_k_msgq_init(msgq)
/**
* @brief Trace Message Queue alloc init attempt entry
* @param msgq Message Queue object
*/
#define sys_port_trace_k_msgq_alloc_init_enter(msgq)
/**
* @brief Trace Message Queue alloc init attempt outcome
* @param msgq Message Queue object
* @param ret Return value
*/
#define sys_port_trace_k_msgq_alloc_init_exit(msgq, ret)
/**
* @brief Trace Message Queue cleanup attempt entry
* @param msgq Message Queue object
*/
#define sys_port_trace_k_msgq_cleanup_enter(msgq)
/**
* @brief Trace Message Queue cleanup attempt outcome
* @param msgq Message Queue object
* @param ret Return value
*/
#define sys_port_trace_k_msgq_cleanup_exit(msgq, ret)
/**
* @brief Trace Message Queue put attempt entry
* @param msgq Message Queue object
* @param timeout Timeout period
*/
#define sys_port_trace_k_msgq_put_enter(msgq, timeout)
/**
* @brief Trace Message Queue put attempt blocking
* @param msgq Message Queue object
* @param timeout Timeout period
*/
#define sys_port_trace_k_msgq_put_blocking(msgq, timeout)
/**
* @brief Trace Message Queue put attempt outcome
* @param msgq Message Queue object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_msgq_put_exit(msgq, timeout, ret)
/**
* @brief Trace Message Queue get attempt entry
* @param msgq Message Queue object
* @param timeout Timeout period
*/
#define sys_port_trace_k_msgq_get_enter(msgq, timeout)
/**
* @brief Trace Message Queue get attempt blockings
* @param msgq Message Queue object
* @param timeout Timeout period
*/
#define sys_port_trace_k_msgq_get_blocking(msgq, timeout)
/**
* @brief Trace Message Queue get attempt outcome
* @param msgq Message Queue object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_msgq_get_exit(msgq, timeout, ret)
/**
* @brief Trace Message Queue peek
* @param msgq Message Queue object
* @param ret Return value
*/
#define sys_port_trace_k_msgq_peek(msgq, ret)
/**
* @brief Trace Message Queue purge
* @param msgq Message Queue object
*/
#define sys_port_trace_k_msgq_purge(msgq)
/** @} */ /* end of subsys_tracing_apis_msgq */
/**
* @brief Mailbox Tracing APIs
* @defgroup subsys_tracing_apis_mbox Mailbox Tracing APIs
* @{
*/
/**
* @brief Trace initialization of Mailbox
* @param mbox Mailbox object
*/
#define sys_port_trace_k_mbox_init(mbox)
/**
* @brief Trace Mailbox message put attempt entry
* @param mbox Mailbox object
* @param timeout Timeout period
*/
#define sys_port_trace_k_mbox_message_put_enter(mbox, timeout)
/**
* @brief Trace Mailbox message put attempt blocking
* @param mbox Mailbox object
* @param timeout Timeout period
*/
#define sys_port_trace_k_mbox_message_put_blocking(mbox, timeout)
/**
* @brief Trace Mailbox message put attempt outcome
* @param mbox Mailbox object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_mbox_message_put_exit(mbox, timeout, ret)
/**
* @brief Trace Mailbox put attempt entry
* @param mbox Mailbox object
* @param timeout Timeout period
*/
#define sys_port_trace_k_mbox_put_enter(mbox, timeout)
/**
* @brief Trace Mailbox put attempt blocking
* @param mbox Mailbox object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_mbox_put_exit(mbox, timeout, ret)
/**
* @brief Trace Mailbox async put entry
* @param mbox Mailbox object
* @param sem Semaphore object
*/
#define sys_port_trace_k_mbox_async_put_enter(mbox, sem)
/**
* @brief Trace Mailbox async put exit
* @param mbox Mailbox object
* @param sem Semaphore object
*/
#define sys_port_trace_k_mbox_async_put_exit(mbox, sem)
/**
* @brief Trace Mailbox get attempt entry
* @param mbox Mailbox entry
* @param timeout Timeout period
*/
#define sys_port_trace_k_mbox_get_enter(mbox, timeout)
/**
* @brief Trace Mailbox get attempt blocking
* @param mbox Mailbox entry
* @param timeout Timeout period
*/
#define sys_port_trace_k_mbox_get_blocking(mbox, timeout)
/**
* @brief Trace Mailbox get attempt outcome
* @param mbox Mailbox entry
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_mbox_get_exit(mbox, timeout, ret)
/**
* @brief Trace Mailbox data get
* @brief rx_msg Receive Message object
*/
#define sys_port_trace_k_mbox_data_get(rx_msg)
/** @} */ /* end of subsys_tracing_apis_mbox */
/**
* @brief Pipe Tracing APIs
* @defgroup subsys_tracing_apis_pipe Pipe Tracing APIs
* @{
*/
/**
* @brief Trace initialization of Pipe
* @param pipe Pipe object
*/
#define sys_port_trace_k_pipe_init(pipe)
/**
* @brief Trace Pipe cleanup entry
* @param pipe Pipe object
*/
#define sys_port_trace_k_pipe_cleanup_enter(pipe)
/**
* @brief Trace Pipe cleanup exit
* @param pipe Pipe object
* @param ret Return value
*/
#define sys_port_trace_k_pipe_cleanup_exit(pipe, ret)
/**
* @brief Trace Pipe alloc init entry
* @param pipe Pipe object
*/
#define sys_port_trace_k_pipe_alloc_init_enter(pipe)
/**
* @brief Trace Pipe alloc init exit
* @param pipe Pipe object
* @param ret Return value
*/
#define sys_port_trace_k_pipe_alloc_init_exit(pipe, ret)
/**
* @brief Trace Pipe flush entry
* @param pipe Pipe object
*/
#define sys_port_trace_k_pipe_flush_enter(pipe)
/**
* @brief Trace Pipe flush exit
* @param pipe Pipe object
*/
#define sys_port_trace_k_pipe_flush_exit(pipe)
/**
* @brief Trace Pipe buffer flush entry
* @param pipe Pipe object
*/
#define sys_port_trace_k_pipe_buffer_flush_enter(pipe)
/**
* @brief Trace Pipe buffer flush exit
* @param pipe Pipe object
*/
#define sys_port_trace_k_pipe_buffer_flush_exit(pipe)
/**
* @brief Trace Pipe put attempt entry
* @param pipe Pipe object
* @param timeout Timeout period
*/
#define sys_port_trace_k_pipe_put_enter(pipe, timeout)
/**
* @brief Trace Pipe put attempt blocking
* @param pipe Pipe object
* @param timeout Timeout period
*/
#define sys_port_trace_k_pipe_put_blocking(pipe, timeout)
/**
* @brief Trace Pipe put attempt outcome
* @param pipe Pipe object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_pipe_put_exit(pipe, timeout, ret)
/**
* @brief Trace Pipe get attempt entry
* @param pipe Pipe object
* @param timeout Timeout period
*/
#define sys_port_trace_k_pipe_get_enter(pipe, timeout)
/**
* @brief Trace Pipe get attempt blocking
* @param pipe Pipe object
* @param timeout Timeout period
*/
#define sys_port_trace_k_pipe_get_blocking(pipe, timeout)
/**
* @brief Trace Pipe get attempt outcome
* @param pipe Pipe object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_pipe_get_exit(pipe, timeout, ret)
/** @} */ /* end of subsys_tracing_apis_pipe */
/**
* @brief Heap Tracing APIs
* @defgroup subsys_tracing_apis_heap Heap Tracing APIs
* @{
*/
/**
* @brief Trace initialization of Heap
* @param h Heap object
*/
#define sys_port_trace_k_heap_init(h)
/**
* @brief Trace Heap aligned alloc attempt entry
* @param h Heap object
* @param timeout Timeout period
*/
#define sys_port_trace_k_heap_aligned_alloc_enter(h, timeout)
/**
* @brief Trace Heap align alloc attempt blocking
* @param h Heap object
* @param timeout Timeout period
*/
#define sys_port_trace_k_heap_aligned_alloc_blocking(h, timeout)
/**
* @brief Trace Heap align alloc attempt outcome
* @param h Heap object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_heap_aligned_alloc_exit(h, timeout, ret)
/**
* @brief Trace Heap alloc enter
* @param h Heap object
* @param timeout Timeout period
*/
#define sys_port_trace_k_heap_alloc_enter(h, timeout)
/**
* @brief Trace Heap alloc exit
* @param h Heap object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_heap_alloc_exit(h, timeout, ret)
/**
* @brief Trace Heap free
* @param h Heap object
*/
#define sys_port_trace_k_heap_free(h)
/**
* @brief Trace Heap realloc enter
* @param h Heap object
* @param ptr Pointer to reallocate
* @param bytes Bytes to reallocate
* @param timeout Timeout period
*/
#define sys_port_trace_k_heap_realloc_enter(h, ptr, bytes, timeout)
/**
* @brief Trace Heap realloc exit
* @param h Heap object
* @param ptr Pointer to reallocate
* @param bytes Bytes to reallocate
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_heap_realloc_exit(h, ptr, bytes, timeout, ret)
/**
* @brief Trace System Heap aligned alloc enter
* @param heap Heap object
*/
#define sys_port_trace_k_heap_sys_k_aligned_alloc_enter(heap)
/**
* @brief Trace System Heap aligned alloc exit
* @param heap Heap object
* @param ret Return value
*/
#define sys_port_trace_k_heap_sys_k_aligned_alloc_exit(heap, ret)
/**
* @brief Trace System Heap aligned alloc enter
* @param heap Heap object
*/
#define sys_port_trace_k_heap_sys_k_malloc_enter(heap)
/**
* @brief Trace System Heap aligned alloc exit
* @param heap Heap object
* @param ret Return value
*/
#define sys_port_trace_k_heap_sys_k_malloc_exit(heap, ret)
/**
* @brief Trace System Heap free entry
* @param heap Heap object
* @param heap_ref Heap reference
*/
#define sys_port_trace_k_heap_sys_k_free_enter(heap, heap_ref)
/**
* @brief Trace System Heap free exit
* @param heap Heap object
* @param heap_ref Heap reference
*/
#define sys_port_trace_k_heap_sys_k_free_exit(heap, heap_ref)
/**
* @brief Trace System heap calloc enter
* @param heap
*/
#define sys_port_trace_k_heap_sys_k_calloc_enter(heap)
/**
* @brief Trace System heap calloc exit
* @param heap Heap object
* @param ret Return value
*/
#define sys_port_trace_k_heap_sys_k_calloc_exit(heap, ret)
/**
* @brief Trace System heap realloc enter
* @param heap
* @param ptr
*/
#define sys_port_trace_k_heap_sys_k_realloc_enter(heap, ptr)
/**
* @brief Trace System heap realloc exit
* @param heap Heap object
* @param ptr Memory pointer
* @param ret Return value
*/
#define sys_port_trace_k_heap_sys_k_realloc_exit(heap, ptr, ret)
/** @} */ /* end of subsys_tracing_apis_heap */
/**
* @brief Memory Slab Tracing APIs
* @defgroup subsys_tracing_apis_mslab Memory Slab Tracing APIs
* @{
*/
/**
* @brief Trace initialization of Memory Slab
* @param slab Memory Slab object
* @param rc Return value
*/
#define sys_port_trace_k_mem_slab_init(slab, rc)
/**
* @brief Trace Memory Slab alloc attempt entry
* @param slab Memory Slab object
* @param timeout Timeout period
*/
#define sys_port_trace_k_mem_slab_alloc_enter(slab, timeout)
/**
* @brief Trace Memory Slab alloc attempt blocking
* @param slab Memory Slab object
* @param timeout Timeout period
*/
#define sys_port_trace_k_mem_slab_alloc_blocking(slab, timeout)
/**
* @brief Trace Memory Slab alloc attempt outcome
* @param slab Memory Slab object
* @param timeout Timeout period
* @param ret Return value
*/
#define sys_port_trace_k_mem_slab_alloc_exit(slab, timeout, ret)
/**
* @brief Trace Memory Slab free entry
* @param slab Memory Slab object
*/
#define sys_port_trace_k_mem_slab_free_enter(slab)
/**
* @brief Trace Memory Slab free exit
* @param slab Memory Slab object
*/
#define sys_port_trace_k_mem_slab_free_exit(slab)
/** @} */ /* end of subsys_tracing_apis_mslab */
/**
* @brief Timer Tracing APIs
* @defgroup subsys_tracing_apis_timer Timer Tracing APIs
* @{
*/
/**
* @brief Trace initialization of Timer
* @param timer Timer object
*/
#define sys_port_trace_k_timer_init(timer)
/**
* @brief Trace Timer start
* @param timer Timer object
* @param duration Timer duration
* @param period Timer period
*/
#define sys_port_trace_k_timer_start(timer, duration, period)
/**
* @brief Trace Timer stop
* @param timer Timer object
*/
#define sys_port_trace_k_timer_stop(timer)
/**
* @brief Trace Timer status sync entry
* @param timer Timer object
*/
#define sys_port_trace_k_timer_status_sync_enter(timer)
/**
* @brief Trace Timer Status sync blocking
* @param timer Timer object
* @param timeout Timeout period
*/
#define sys_port_trace_k_timer_status_sync_blocking(timer, timeout)
/**
* @brief Trace Time Status sync outcome
* @param timer Timer object
* @param result Return value
*/
#define sys_port_trace_k_timer_status_sync_exit(timer, result)
/** @} */ /* end of subsys_tracing_apis_timer */
/**
* @brief Event Tracing APIs
* @defgroup subsys_tracing_apis_event Event Tracing APIs
* @{
*/
/**
* @brief Trace initialisation of an Event
* @param event Event object
*/
#define sys_port_trace_k_event_init(event)
/**
* @brief Trace posting of an Event call entry
* @param event Event object
* @param events Set of posted events
* @param events_mask Mask to apply against posted events
*/
#define sys_port_trace_k_event_post_enter(event, events, events_mask)
/**
* @brief Trace posting of an Event call exit
* @param event Event object
* @param events Set of posted events
* @param events_mask Mask to apply against posted events
*/
#define sys_port_trace_k_event_post_exit(event, events, events_mask)
/**
* @brief Trace waiting of an Event call entry
* @param event Event object
* @param events Set of events for which to wait
* @param options Event wait options
* @param timeout Timeout period
*/
#define sys_port_trace_k_event_wait_enter(event, events, options, timeout)
/**
* @brief Trace waiting of an Event call exit
* @param event Event object
* @param events Set of events for which to wait
* @param options Event wait options
* @param timeout Timeout period
*/
#define sys_port_trace_k_event_wait_blocking(event, events, options, timeout)
/**
* @brief Trace waiting of an Event call exit
* @param event Event object
* @param events Set of events for which to wait
* @param ret Set of received events
*/
#define sys_port_trace_k_event_wait_exit(event, events, ret)
/** @} */ /* end of subsys_tracing_apis_event */
/**
* @brief System PM Tracing APIs
* @defgroup subsys_tracing_apis_pm_system System PM Tracing APIs
* @{
*/
/**
* @brief Trace system suspend call entry.
* @param ticks Ticks.
*/
#define sys_port_trace_pm_system_suspend_enter(ticks)
/**
* @brief Trace system suspend call exit.
* @param ticks Ticks.
* @param state PM state.
*/
#define sys_port_trace_pm_system_suspend_exit(ticks, state)
/** @} */ /* end of subsys_tracing_apis_pm_system */
/**
* @brief PM Device Runtime Tracing APIs
* @defgroup subsys_tracing_apis_pm_device_runtime PM Device Runtime Tracing APIs
* @{
*/
/**
* @brief Trace getting a device call entry.
* @param dev Device instance.
*/
#define sys_port_trace_pm_device_runtime_get_enter(dev)
/**
* @brief Trace getting a device call exit.
* @param dev Device instance.
* @param ret Return value.
*/
#define sys_port_trace_pm_device_runtime_get_exit(dev, ret)
/**
* @brief Trace putting a device call entry.
* @param dev Device instance.
*/
#define sys_port_trace_pm_device_runtime_put_enter(dev)
/**
* @brief Trace putting a device call exit.
* @param dev Device instance.
* @param ret Return value.
*/
#define sys_port_trace_pm_device_runtime_put_exit(dev, ret)
/**
* @brief Trace putting a device (asynchronously) call entry.
* @param dev Device instance.
* @param delay Time to delay the operation
*/
#define sys_port_trace_pm_device_runtime_put_async_enter(dev, delay)
/**
* @brief Trace putting a device (asynchronously) call exit.
* @param dev Device instance.
* @param delay Time to delay the operation.
* @param ret Return value.
*/
#define sys_port_trace_pm_device_runtime_put_async_exit(dev, delay, ret)
/**
* @brief Trace enabling device runtime PM call entry.
* @param dev Device instance.
*/
#define sys_port_trace_pm_device_runtime_enable_enter(dev)
/**
* @brief Trace enabling device runtime PM call exit.
* @param dev Device instance.
* @param ret Return value.
*/
#define sys_port_trace_pm_device_runtime_enable_exit(dev, ret)
/**
* @brief Trace disabling device runtime PM call entry.
* @param dev Device instance.
*/
#define sys_port_trace_pm_device_runtime_disable_enter(dev)
/**
* @brief Trace disabling device runtime PM call exit.
* @param dev Device instance.
* @param ret Return value.
*/
#define sys_port_trace_pm_device_runtime_disable_exit(dev, ret)
/** @} */ /* end of subsys_tracing_apis_pm_device_runtime */
/**
* @brief Network Core Tracing APIs
* @defgroup subsys_tracing_apis_net Network Core Tracing APIs
* @{
*/
/**
* @brief Trace network data receive
* @param iface Network interface
* @param pkt Received network packet
*/
#define sys_port_trace_net_recv_data_enter(iface, pkt)
/**
* @brief Trace network data receive attempt
* @param iface Network interface
* @param pkt Received network packet
* @param ret Return value
*/
#define sys_port_trace_net_recv_data_exit(iface, pkt, ret)
/**
* @brief Trace network data send
* @param pkt Network packet to send
*/
#define sys_port_trace_net_send_data_enter(pkt)
/**
* @brief Trace network data send attempt
* @param pkt Received network packet
* @param ret Return value
*/
#define sys_port_trace_net_send_data_exit(pkt, ret)
/**
* @brief Trace network data receive time
* @param pkt Received network packet
* @param end_time When the RX processing stopped for this pkt (in ticks)
*/
#define sys_port_trace_net_rx_time(pkt, end_time)
/**
* @brief Trace network data sent time
* @param pkt Sent network packet
* @param end_time When the TX processing stopped for this pkt (in ticks)
*/
#define sys_port_trace_net_tx_time(pkt, end_time)
/** @} */ /* end of subsys_tracing_apis_net */
/**
* @brief Network Socket Tracing APIs
* @defgroup subsys_tracing_apis_socket Network Socket Tracing APIs
* @{
*/
/**
* @brief Trace init of network sockets
* @param socket Network socket is returned
* @param family Socket address family
* @param type Socket type
* @param proto Socket protocol
*/
#define sys_port_trace_socket_init(socket, family, type, proto)
/**
* @brief Trace close of network sockets
* @param socket Socket object
*/
#define sys_port_trace_socket_close_enter(socket)
/**
* @brief Trace network socket close attempt
* @param socket Socket object
* @param ret Return value
*/
#define sys_port_trace_socket_close_exit(socket, ret)
/**
* @brief Trace shutdown of network sockets
* @param socket Socket object
* @param how Socket shutdown type
*/
#define sys_port_trace_socket_shutdown_enter(socket, how)
/**
* @brief Trace network socket shutdown attempt
* @param socket Socket object
* @param ret Return value
*/
#define sys_port_trace_socket_shutdown_exit(socket, ret)
/**
* @brief Trace bind of network sockets
* @param socket Socket object
* @param addr Network address to bind
* @param addrlen Address length
*/
#define sys_port_trace_socket_bind_enter(socket, addr, addrlen)
/**
* @brief Trace network socket bind attempt
* @param socket Socket object
* @param ret Return value
*/
#define sys_port_trace_socket_bind_exit(socket, ret)
/**
* @brief Trace connect of network sockets
* @param socket Socket object
* @param addr Network address to bind
* @param addrlen Address length
*/
#define sys_port_trace_socket_connect_enter(socket, addr, addrlen)
/**
* @brief Trace network socket connect attempt
* @param socket Socket object
* @param ret Return value
*/
#define sys_port_trace_socket_connect_exit(socket, ret)
/**
* @brief Trace listen of network sockets
* @param socket Socket object
* @param backlog Socket backlog length
*/
#define sys_port_trace_socket_listen_enter(socket, backlog)
/**
* @brief Trace network socket listen attempt
* @param socket Socket object
* @param ret Return value
*/
#define sys_port_trace_socket_listen_exit(socket, ret)
/**
* @brief Trace accept of network sockets
* @param socket Socket object
*/
#define sys_port_trace_socket_accept_enter(socket)
/**
* @brief Trace network socket accept attempt
* @param socket Socket object
* @param addr Peer network address
* @param addrlen Network address length
* @param ret Return value
*/
#define sys_port_trace_socket_accept_exit(socket, addr, addrlen, ret)
/**
* @brief Trace sendto of network sockets
* @param socket Socket object
* @param len Length of the data to send
* @param flags Flags for this send operation
* @param dest_addr Destination network address
* @param addrlen Network address length
*/
#define sys_port_trace_socket_sendto_enter(socket, len, flags, dest_addr, addrlen)
/**
* @brief Trace network socket sendto attempt
* @param socket Socket object
* @param ret Return value
*/
#define sys_port_trace_socket_sendto_exit(socket, ret)
/**
* @brief Trace sendmsg of network sockets
* @param socket Socket object
* @param msg Data to send
* @param flags Flags for this send operation
*/
#define sys_port_trace_socket_sendmsg_enter(socket, msg, flags)
/**
* @brief Trace network socket sendmsg attempt
* @param socket Socket object
* @param ret Return value
*/
#define sys_port_trace_socket_sendmsg_exit(socket, ret)
/**
* @brief Trace recvfrom of network sockets
* @param socket Socket object
* @param max_len Maximum length of the data we can receive
* @param flags Flags for this receive operation
* @param addr Remote network address
* @param addrlen Network address length
*/
#define sys_port_trace_socket_recvfrom_enter(socket, max_len, flags, addr, addrlen)
/**
* @brief Trace network socket recvfrom attempt
* @param socket Socket object
* @param src_addr Peer network address that send the data
* @param addrlen Length of the network address
* @param ret Return value
*/
#define sys_port_trace_socket_recvfrom_exit(socket, src_addr, addrlen, ret)
/**
* @brief Trace recvmsg of network sockets
* @param socket Socket object
* @param msg Message buffer to receive
* @param flags Flags for this receive operation
*/
#define sys_port_trace_socket_recvmsg_enter(socket, msg, flags)
/**
* @brief Trace network socket recvmsg attempt
* @param socket Socket object
* @param msg Message buffer received
* @param ret Return value
*/
#define sys_port_trace_socket_recvmsg_exit(socket, msg, ret)
/**
* @brief Trace fcntl of network sockets
* @param socket Socket object
* @param cmd Command to set for this socket
* @param flags Flags for this receive operation
*/
#define sys_port_trace_socket_fcntl_enter(socket, cmd, flags)
/**
* @brief Trace network socket fcntl attempt
* @param socket Socket object
* @param ret Return value
*/
#define sys_port_trace_socket_fcntl_exit(socket, ret)
/**
* @brief Trace ioctl of network sockets
* @param socket Socket object
* @param req Request to set for this socket
*/
#define sys_port_trace_socket_ioctl_enter(socket, req)
/**
* @brief Trace network socket ioctl attempt
* @param socket Socket object
* @param ret Return value
*/
#define sys_port_trace_socket_ioctl_exit(socket, ret)
/**
* @brief Trace polling of network sockets
* @param fds Set of socket object
* @param nfds Number of socket objects in the set
* @param timeout Timeout for the poll operation
*/
#define sys_port_trace_socket_poll_enter(fds, nfds, timeout)
/**
* @brief Trace network socket poll attempt
* @param fds Set of socket object
* @param nfds Number of socket objects in the set
* @param ret Return value
*/
#define sys_port_trace_socket_poll_exit(fds, nfds, ret)
/**
* @brief Trace getsockopt of network sockets
* @param socket Socket object
* @param level Option level
* @param optname Option name
*/
#define sys_port_trace_socket_getsockopt_enter(socket, level, optname)
/**
* @brief Trace network socket getsockopt attempt
* @param socket Socket object
* @param level Option level
* @param optname Option name
* @param optval Option value
* @param optlen Option value length
* @param ret Return value
*/
#define sys_port_trace_socket_getsockopt_exit(socket, level, optname, optval, optlen, ret)
/**
* @brief Trace setsockopt of network sockets
* @param socket Socket object
* @param level Option level
* @param optname Option name
* @param optval Option value
* @param optlen Option value length
*/
#define sys_port_trace_socket_setsockopt_enter(socket, level, optname, optval, optlen)
/**
* @brief Trace network socket setsockopt attempt
* @param socket Socket object
* @param ret Return value
*/
#define sys_port_trace_socket_setsockopt_exit(socket, ret)
/**
* @brief Trace getpeername of network sockets
* @param socket Socket object
*/
#define sys_port_trace_socket_getpeername_enter(socket)
/**
* @brief Trace network socket getpeername attempt
* @param socket Socket object
* @param addr Peer socket network address
* @param addrlen Length of the network address
* @param ret Return value
*/
#define sys_port_trace_socket_getpeername_exit(socket, addr, addrlen, ret)
/**
* @brief Trace getsockname of network sockets
* @param socket Socket object
*/
#define sys_port_trace_socket_getsockname_enter(socket)
/**
* @brief Trace network socket getsockname attempt
* @param socket Socket object
* @param addr Local socket network address
* @param addrlen Length of the network address
* @param ret Return value
*/
#define sys_port_trace_socket_getsockname_exit(socket, addr, addrlen, ret)
/**
* @brief Trace socketpair enter call
* @param family Network address family
* @param type Socket type
* @param proto Socket protocol
* @param sv Socketpair buffer
*/
#define sys_port_trace_socket_socketpair_enter(family, type, proto, sv)
/**
* @brief Trace network socketpair open attempt
* @param socket_A Socketpair first socket object
* @param socket_B Socketpair second socket object
* @param ret Return value
*/
#define sys_port_trace_socket_socketpair_exit(socket_A, socket_B, ret)
/** @} */ /* end of subsys_tracing_apis_socket */
#if defined(CONFIG_PERCEPIO_TRACERECORDER)
#include "tracing_tracerecorder.h"
#else
/**
* @brief Called when entering an ISR
*/
void sys_trace_isr_enter(void);
/**
* @brief Called when exiting an ISR
*/
void sys_trace_isr_exit(void);
/**
* @brief Called when exiting an ISR and switching to scheduler
*/
void sys_trace_isr_exit_to_scheduler(void);
/**
* @brief Called when the cpu enters the idle state
*/
void sys_trace_idle(void);
#endif /* CONFIG_PERCEPIO_TRACERECORDER */
/**
* @brief Called when entering an init function
*/
#define sys_trace_sys_init_enter(entry, level)
/**
* @brief Called when exiting an init function
*/
#define sys_trace_sys_init_exit(entry, level, result)
/** @} */ /* end of subsys_tracing_apis */
/** @} */ /* end of subsys_tracing */
#endif
#endif /* ZEPHYR_INCLUDE_TRACING_TRACING_H_ */
``` | /content/code_sandbox/include/zephyr/tracing/tracing.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 13,621 |
```objective-c
/** @file
* @brief Routines for network subsystem initialization.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_NET_CONFIG_H_
#define ZEPHYR_INCLUDE_NET_NET_CONFIG_H_
#include <zephyr/types.h>
#include <zephyr/device.h>
#include <zephyr/net/net_if.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Network configuration library
* @defgroup net_config Network Configuration Library
* @since 1.8
* @version 0.8.0
* @ingroup networking
* @{
*/
/* Flags that tell what kind of functionality is needed by the client. */
/**
* @brief Application needs routers to be set so that connectivity to remote
* network is possible. For IPv6 networks, this means that the device should
* receive IPv6 router advertisement message before continuing.
*/
#define NET_CONFIG_NEED_ROUTER 0x00000001
/**
* @brief Application needs IPv6 subsystem configured and initialized.
* Typically this means that the device has IPv6 address set.
*/
#define NET_CONFIG_NEED_IPV6 0x00000002
/**
* @brief Application needs IPv4 subsystem configured and initialized.
* Typically this means that the device has IPv4 address set.
*/
#define NET_CONFIG_NEED_IPV4 0x00000004
/**
* @brief Initialize this network application.
*
* @details This will call net_config_init_by_iface() with NULL network
* interface.
*
* @param app_info String describing this application.
* @param flags Flags related to services needed by the client.
* @param timeout How long to wait the network setup before continuing
* the startup.
*
* @return 0 if ok, <0 if error.
*/
int net_config_init(const char *app_info, uint32_t flags, int32_t timeout);
/**
* @brief Initialize this network application using a specific network
* interface.
*
* @details If network interface is set to NULL, then the default one
* is used in the configuration.
*
* @param iface Initialize networking using this network interface.
* @param app_info String describing this application.
* @param flags Flags related to services needed by the client.
* @param timeout How long to wait the network setup before continuing
* the startup.
*
* @return 0 if ok, <0 if error.
*/
int net_config_init_by_iface(struct net_if *iface, const char *app_info,
uint32_t flags, int32_t timeout);
/**
* @brief Initialize this network application.
*
* @details If CONFIG_NET_CONFIG_AUTO_INIT is set, then this function is called
* automatically when the device boots. If that is not desired, unset
* the config option and call the function manually when the
* application starts.
*
* @param dev Network device to use. The function will figure out what
* network interface to use based on the device. If the device is NULL,
* then default network interface is used by the function.
* @param app_info String describing this application.
*
* @return 0 if ok, <0 if error.
*/
int net_config_init_app(const struct device *dev, const char *app_info);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_NET_CONFIG_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_config.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 695 |
```objective-c
/*
*
*/
/**
* @file
* @brief CoAP Events code public header
*/
#ifndef ZEPHYR_INCLUDE_NET_COAP_MGMT_H_
#define ZEPHYR_INCLUDE_NET_COAP_MGMT_H_
#include <zephyr/net/net_mgmt.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief CoAP Manager Events
* @defgroup coap_mgmt CoAP Manager Events
* @since 3.6
* @version 0.1.0
* @ingroup networking
* @{
*/
/** @cond INTERNAL_HIDDEN */
/* CoAP events */
#define _NET_COAP_LAYER NET_MGMT_LAYER_L4
#define _NET_COAP_CODE 0x1c0
#define _NET_COAP_IF_BASE (NET_MGMT_EVENT_BIT | \
NET_MGMT_LAYER(_NET_COAP_LAYER) | \
NET_MGMT_LAYER_CODE(_NET_COAP_CODE))
struct coap_service;
struct coap_resource;
struct coap_observer;
enum net_event_coap_cmd {
/* Service events */
NET_EVENT_COAP_CMD_SERVICE_STARTED = 1,
NET_EVENT_COAP_CMD_SERVICE_STOPPED,
/* Observer events */
NET_EVENT_COAP_CMD_OBSERVER_ADDED,
NET_EVENT_COAP_CMD_OBSERVER_REMOVED,
};
/** @endcond */
/**
* @brief coap_mgmt event raised when a service has started
*/
#define NET_EVENT_COAP_SERVICE_STARTED \
(_NET_COAP_IF_BASE | NET_EVENT_COAP_CMD_SERVICE_STARTED)
/**
* @brief coap_mgmt event raised when a service has stopped
*/
#define NET_EVENT_COAP_SERVICE_STOPPED \
(_NET_COAP_IF_BASE | NET_EVENT_COAP_CMD_SERVICE_STOPPED)
/**
* @brief coap_mgmt event raised when an observer has been added to a resource
*/
#define NET_EVENT_COAP_OBSERVER_ADDED \
(_NET_COAP_IF_BASE | NET_EVENT_COAP_CMD_OBSERVER_ADDED)
/**
* @brief coap_mgmt event raised when an observer has been removed from a resource
*/
#define NET_EVENT_COAP_OBSERVER_REMOVED \
(_NET_COAP_IF_BASE | NET_EVENT_COAP_CMD_OBSERVER_REMOVED)
/**
* @brief CoAP Service event structure.
*/
struct net_event_coap_service {
/** The CoAP service for which the event is emitted */
const struct coap_service *service;
};
/**
* @brief CoAP Observer event structure.
*/
struct net_event_coap_observer {
/** The CoAP resource for which the event is emitted */
struct coap_resource *resource;
/** The observer that is added/removed */
struct coap_observer *observer;
};
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_COAP_MGMT_H_ */
``` | /content/code_sandbox/include/zephyr/net/coap_mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 588 |
```objective-c
/*
*
*/
/** @file
* @brief DHCPv6 client
*/
#ifndef ZEPHYR_INCLUDE_NET_DHCPV6_H_
#define ZEPHYR_INCLUDE_NET_DHCPV6_H_
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief DHCPv6
* @defgroup dhcpv6 DHCPv6
* @since 3.5
* @version 0.8.0
* @ingroup networking
* @{
*/
/** @cond INTERNAL_HIDDEN */
/** Current state of DHCPv6 client address/prefix negotiation. */
enum net_dhcpv6_state {
NET_DHCPV6_DISABLED,
NET_DHCPV6_INIT,
NET_DHCPV6_SOLICITING,
NET_DHCPV6_REQUESTING,
NET_DHCPV6_CONFIRMING,
NET_DHCPV6_RENEWING,
NET_DHCPV6_REBINDING,
NET_DHCPV6_INFO_REQUESTING,
NET_DHCPV6_BOUND,
} __packed;
#define DHCPV6_TID_SIZE 3
#ifndef CONFIG_NET_DHCPV6_DUID_MAX_LEN
#define CONFIG_NET_DHCPV6_DUID_MAX_LEN 22
#endif
struct net_dhcpv6_duid_raw {
uint16_t type;
uint8_t buf[CONFIG_NET_DHCPV6_DUID_MAX_LEN];
} __packed;
struct net_dhcpv6_duid_storage {
struct net_dhcpv6_duid_raw duid;
uint8_t length;
};
struct net_if;
/** @endcond */
/** @brief DHCPv6 client configuration parameters. */
struct net_dhcpv6_params {
bool request_addr : 1; /**< Request IPv6 address. */
bool request_prefix : 1; /**< Request IPv6 prefix. */
};
/**
* @brief Start DHCPv6 client on an iface
*
* @details Start DHCPv6 client on a given interface. DHCPv6 client will start
* negotiation for IPv6 address and/or prefix, depending on the configuration.
* Once the negotiation is complete, IPv6 address/prefix details will be added
* to the interface.
*
* @param iface A valid pointer to a network interface
* @param params DHCPv6 client configuration parameters.
*/
void net_dhcpv6_start(struct net_if *iface, struct net_dhcpv6_params *params);
/**
* @brief Stop DHCPv6 client on an iface
*
* @details Stop DHCPv6 client on a given interface. DHCPv6 client
* will remove all configuration obtained from a DHCP server from the
* interface and stop any further negotiation with the server.
*
* @param iface A valid pointer to a network interface
*/
void net_dhcpv6_stop(struct net_if *iface);
/**
* @brief Restart DHCPv6 client on an iface
*
* @details Restart DHCPv6 client on a given interface. DHCPv6 client
* will restart the state machine without any of the initial delays.
*
* @param iface A valid pointer to a network interface
*/
void net_dhcpv6_restart(struct net_if *iface);
/** @cond INTERNAL_HIDDEN */
/**
* @brief DHCPv6 state name
*
* @internal
*/
const char *net_dhcpv6_state_name(enum net_dhcpv6_state state);
/** @endcond */
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_DHCPV6_H_ */
``` | /content/code_sandbox/include/zephyr/net/dhcpv6.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 737 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public functions for the Precision Time Protocol time specification.
*
* References are to version 2019 of IEEE 1588, ("PTP")
* and version 2020 of IEEE 802.1AS ("gPTP").
*/
#ifndef ZEPHYR_INCLUDE_NET_PTP_TIME_H_
#define ZEPHYR_INCLUDE_NET_PTP_TIME_H_
/**
* @brief Precision Time Protocol time specification
* @defgroup ptp_time PTP time
* @since 1.13
* @version 0.8.0
* @ingroup networking
* @{
*/
#include <zephyr/net/net_core.h>
#include <zephyr/net/net_time.h>
#include <zephyr/toolchain.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief (Generalized) Precision Time Protocol Timestamp format.
*
* @details This structure represents a timestamp according to the Precision
* Time Protocol standard ("PTP", IEEE 1588, section 5.3.3), the Generalized
* Precision Time Protocol standard ("gPTP", IEEE 802.1AS, section 6.4.3.4), or
* any other well-defined context in which precision structured timestamps are
* required on network messages in Zephyr.
*
* Seconds are encoded as a 48 bits unsigned integer. Nanoseconds are encoded
* as a 32 bits unsigned integer.
*
* In the context of (g)PTP, @em timestamps designate the time, relative to a
* local clock ("LocalClock") at which the message timestamp point passes a
* reference plane marking the boundary between the PTP Instance and the network
* medium (IEEE 1855, section 7.3.4.2; IEEE 802.1AS, section 8.4.3).
*
* The exact definitions of the <em>message timestamp point</em> and
* <em>reference plane</em> depends on the network medium and use case.
*
* For (g)PTP the media-specific message timestamp points and reference planes
* are defined in the standard. In non-PTP contexts specific to Zephyr,
* timestamps are measured relative to the same local clock but with a
* context-specific message timestamp point and reference plane, defined below
* per use case.
*
* A @em "LocalClock" is a freerunning clock, embedded into a well-defined
* entity (e.g. a PTP Instance) and provides a common time to that entity
* relative to an arbitrary epoch (IEEE 1855, section 3.1.26, IEEE 802.1AS,
* section 3.16).
*
* In Zephyr, the local clock is usually any instance of a kernel system clock
* driver, counter driver, RTC API driver or low-level counter/timer peripheral
* (e.g. an ethernet peripheral with hardware timestamp support or a radio
* timer) with sufficient precision for the context in which it is used.
*
* See IEEE 802.1AS, Annex B for specific performance requirements regarding
* conformance of local clocks in the gPTP context. See IEEE 1588, Annex A,
* section A5.4 for general performance requirements regarding PTP local clocks.
* See IEEE 802.15.4-2020, section 15.7 for requirements in the context of
* ranging applications and ibid., section 6.7.6 for the relation between guard
* times and clock accuracy which again influence the precision required for
* subprotocols like CSL, TSCH, RIT, etc.
*
* Applications that use timestamps across different subsystems or media must
* ensure that they understand the definition of the respective reference planes
* and interpret timestamps accordingly. Applications must further ensure that
* timestamps are either all referenced to the same local clock or convert
* between clocks based on sufficiently precise conversion algorithms.
*
* Timestamps may be measured on ingress (RX timestamps) or egress (TX
* timestamps) of network messages. Timestamps can also be used to schedule a
* network message to a well-defined point in time in the future at which it is
* to be sent over the medium (timed TX). A future timestamp and a duration,
* both referenced to the local clock, may be given to specify a time window at
* which a network device should expect incoming messages (RX window).
*
* In Zephyr this timestamp structure is currently used in the following
* contexts:
* * gPTP for Full Duplex Point-to-Point IEEE 802.3 links (IEEE 802.1AS,
* section 11): the reference plane and message timestamp points are as
* defined in the standard.
* * IEEE 802.15.4 timed TX and RX: Timestamps designate the point in time at
* which the end of the last symbol of the start-of-frame delimiter (SFD) (or
* equivalently, the start of the first symbol of the PHY header) is at the
* local antenna. The standard also refers to this as the "RMARKER" (IEEE
* 802.15.4-2020, section 6.9.1) or "symbol boundary" (ibid., section 6.5.2),
* depending on the context. In the context of beacon timestamps, the
* difference between the timestamp measurement plane and the reference plane
* is defined by the MAC PIB attribute "macSyncSymbolOffset", ibid., section
* 8.4.3.1, table 8-94.
*
* If further use cases are added to Zephyr using this timestamp structure,
* their clock performance requirements, message timestamp points and reference
* plane definition SHALL be added to the above list.
*/
struct net_ptp_time {
/** Seconds encoded on 48 bits. */
union {
/** @cond INTERNAL_HIDDEN */
struct {
#ifdef CONFIG_LITTLE_ENDIAN
uint32_t low;
uint16_t high;
uint16_t unused;
#else
uint16_t unused;
uint16_t high;
uint32_t low;
#endif
} _sec;
/** @endcond */
/** Second value. */
uint64_t second;
};
/** Nanoseconds. */
uint32_t nanosecond;
};
#ifdef __cplusplus
}
#endif
/**
* @brief Generalized Precision Time Protocol Extended Timestamp format.
*
* @details This structure represents an extended timestamp according to the
* Generalized Precision Time Protocol standard (IEEE 802.1AS), see section
* 6.4.3.5.
*
* Seconds are encoded as 48 bits unsigned integer. Fractional nanoseconds are
* encoded as 48 bits, their unit is 2*(-16) ns.
*
* A precise definition of PTP timestamps and their uses in Zephyr is given in
* the description of @ref net_ptp_time.
*/
struct net_ptp_extended_time {
/** Seconds encoded on 48 bits. */
union {
/** @cond INTERNAL_HIDDEN */
struct {
#ifdef CONFIG_LITTLE_ENDIAN
uint32_t low;
uint16_t high;
uint16_t unused;
#else
uint16_t unused;
uint16_t high;
uint32_t low;
#endif
} _sec;
/** @endcond */
/** Second value. */
uint64_t second;
};
/** Fractional nanoseconds on 48 bits. */
union {
/** @cond INTERNAL_HIDDEN */
struct {
#ifdef CONFIG_LITTLE_ENDIAN
uint32_t low;
uint16_t high;
uint16_t unused;
#else
uint16_t unused;
uint16_t high;
uint32_t low;
#endif
} _fns;
/** @endcond */
/** Fractional nanoseconds value. */
uint64_t fract_nsecond;
};
} __packed;
/**
* @brief Convert a PTP timestamp to a nanosecond precision timestamp, both
* related to the local network reference clock.
*
* @note Only timestamps representing up to ~290 years can be converted to
* nanosecond timestamps. Larger timestamps will return the maximum
* representable nanosecond precision timestamp.
*
* @param ts the PTP timestamp
*
* @return the corresponding nanosecond precision timestamp
*/
static inline net_time_t net_ptp_time_to_ns(struct net_ptp_time *ts)
{
if (!ts) {
return 0;
}
if (ts->second >= NET_TIME_SEC_MAX) {
return NET_TIME_MAX;
}
return ((int64_t)ts->second * NSEC_PER_SEC) + ts->nanosecond;
}
/**
* @brief Convert a nanosecond precision timestamp to a PTP timestamp, both
* related to the local network reference clock.
*
* @param nsec a nanosecond precision timestamp
*
* @return the corresponding PTP timestamp
*/
static inline struct net_ptp_time ns_to_net_ptp_time(net_time_t nsec)
{
struct net_ptp_time ts;
__ASSERT_NO_MSG(nsec >= 0);
ts.second = nsec / NSEC_PER_SEC;
ts.nanosecond = nsec % NSEC_PER_SEC;
return ts;
}
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_PTP_TIME_H_ */
``` | /content/code_sandbox/include/zephyr/net/ptp_time.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,984 |
```objective-c
/*
*
*/
/**
* @file
* @brief Representation of nanosecond resolution elapsed time and timestamps in
* the network stack.
*
* Inspired by
* path_to_url and
* path_to_url
*
* @defgroup net_time Network time representation.
* @since 3.5
* @version 0.1.0
* @ingroup networking
* @{
*/
#ifndef ZEPHYR_INCLUDE_NET_NET_TIME_H_
#define ZEPHYR_INCLUDE_NET_NET_TIME_H_
/* Include required for NSEC_PER_* constants. */
#include <zephyr/sys_clock.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Any occurrence of net_time_t specifies a concept of nanosecond
* resolution scalar time span, future (positive) or past (negative) relative
* time or absolute timestamp referred to some local network uptime reference
* clock that does not wrap during uptime and is - in a certain, well-defined
* sense - common to all local network interfaces, sometimes even to remote
* interfaces on the same network.
*
* This type is EXPERIMENTAL. Usage is currently restricted to representation of
* time within the network subsystem.
*
* @details Timed network protocols (PTP, TDMA, ...) usually require several
* local or remote interfaces to share a common notion of elapsed time within
* well-defined tolerances. Network uptime therefore differs from time
* represented by a single hardware counter peripheral in that it will need to
* be represented in several distinct hardware peripherals with different
* frequencies, accuracy and precision. To co-operate, these hardware counters
* will have to be "syntonized" or "disciplined" (i.e. frequency and phase
* locked) with respect to a common local or remote network reference time
* signal. Be aware that while syntonized clocks share the same frequency and
* phase, they do not usually share the same epoch (zero-point).
*
* This also explains why network time, if represented as a cycle value of some
* specific hardware counter, will never be "precise" but only can be "good
* enough" with respect to the tolerances (resolution, drift, jitter) required
* by a given network protocol. All counter peripherals involved in a timed
* network protocol must comply with these tolerances.
*
* Please use specific cycle/tick counter values rather than net_time_t whenever
* possible especially when referring to the kernel system clock or values of
* any single counter peripheral.
*
* net_time_t cannot represent general clocks referred to an arbitrary epoch as
* it only covers roughly +/- ~290 years. It also cannot be used to represent
* time according to a more complex timescale (e.g. including leap seconds, time
* adjustments, complex calendars or time zones). In these cases you may use
* @ref timespec (C11, POSIX.1-2001), @ref timeval (POSIX.1-2001) or broken down
* time as in @ref tm (C90). The advantage of net_time_t over these structured
* time representations is lower memory footprint, faster and simpler scalar
* arithmetic and easier conversion from/to low-level hardware counter values.
* Also net_time_t can be used in the network stack as well as in applications
* while POSIX concepts cannot. Converting net_time_t from/to structured time
* representations is possible in a limited way but - except for @ref timespec -
* requires concepts that must be implemented by higher-level APIs. Utility
* functions converting from/to @ref timespec will be provided as part of the
* net_time_t API as and when needed.
*
* If you want to represent more coarse grained scalar time in network
* applications, use @ref time_t (C99, POSIX.1-2001) which is specified to
* represent seconds or @ref suseconds_t (POSIX.1-2001) for microsecond
* resolution. Kernel @ref k_ticks_t and cycles (both specific to Zephyr) have
* an unspecified resolution but are useful to represent kernel timer values and
* implement high resolution spinning.
*
* If you need even finer grained time resolution, you may want to look at
* (g)PTP concepts, see @ref net_ptp_extended_time.
*
* The reason why we don't use int64_t directly to represent scalar nanosecond
* resolution times in the network stack is that it has been shown in the past
* that fields using generic type will often not be used correctly (e.g. with
* the wrong resolution or to represent underspecified concepts of time with
* unclear syntonization semantics).
*
* Any API that exposes or consumes net_time_t values SHALL ensure that it
* maintains the specified contract including all protocol specific tolerances
* and therefore clients can rely on common semantics of this type. This makes
* times coming from different hardware peripherals and even from different
* network nodes comparable within well-defined limits and therefore net_time_t
* is the ideal intermediate building block for timed network protocols.
*/
typedef int64_t net_time_t;
/** The largest positive time value that can be represented by net_time_t */
#define NET_TIME_MAX INT64_MAX
/** The smallest negative time value that can be represented by net_time_t */
#define NET_TIME_MIN INT64_MIN
/** The largest positive number of seconds that can be safely represented by net_time_t */
#define NET_TIME_SEC_MAX (NET_TIME_MAX / NSEC_PER_SEC)
/** The smallest negative number of seconds that can be safely represented by net_time_t */
#define NET_TIME_SEC_MIN (NET_TIME_MIN / NSEC_PER_SEC)
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_NET_TIME_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_time.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,204 |
```objective-c
/**
* @file
*
* @brief Public APIs for Ethernet PHY drivers.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_PHY_H_
#define ZEPHYR_INCLUDE_DRIVERS_PHY_H_
/**
* @brief Ethernet PHY Interface
* @defgroup ethernet_phy Ethernet PHY Interface
* @since 2.7
* @version 0.8.0
* @ingroup networking
* @{
*/
#include <zephyr/types.h>
#include <zephyr/device.h>
#ifdef __cplusplus
extern "C" {
#endif
/** @brief Ethernet link speeds. */
enum phy_link_speed {
/** 10Base-T Half-Duplex */
LINK_HALF_10BASE_T = BIT(0),
/** 10Base-T Full-Duplex */
LINK_FULL_10BASE_T = BIT(1),
/** 100Base-T Half-Duplex */
LINK_HALF_100BASE_T = BIT(2),
/** 100Base-T Full-Duplex */
LINK_FULL_100BASE_T = BIT(3),
/** 1000Base-T Half-Duplex */
LINK_HALF_1000BASE_T = BIT(4),
/** 1000Base-T Full-Duplex */
LINK_FULL_1000BASE_T = BIT(5),
};
/**
* @brief Check if phy link is full duplex.
*
* @param x Link capabilities
*
* @return True if link is full duplex, false if not.
*/
#define PHY_LINK_IS_FULL_DUPLEX(x) (x & (BIT(1) | BIT(3) | BIT(5)))
/**
* @brief Check if phy link speed is 1 Gbit/sec.
*
* @param x Link capabilities
*
* @return True if link is 1 Gbit/sec, false if not.
*/
#define PHY_LINK_IS_SPEED_1000M(x) (x & (BIT(4) | BIT(5)))
/**
* @brief Check if phy link speed is 100 Mbit/sec.
*
* @param x Link capabilities
*
* @return True if link is 1 Mbit/sec, false if not.
*/
#define PHY_LINK_IS_SPEED_100M(x) (x & (BIT(2) | BIT(3)))
/** @brief Link state */
struct phy_link_state {
/** Link speed */
enum phy_link_speed speed;
/** When true the link is active and connected */
bool is_up;
};
/**
* @typedef phy_callback_t
* @brief Define the callback function signature for
* `phy_link_callback_set()` function.
*
* @param dev PHY device structure
* @param state Pointer to link_state structure.
* @param user_data Pointer to data specified by user
*/
typedef void (*phy_callback_t)(const struct device *dev,
struct phy_link_state *state,
void *user_data);
/**
* @cond INTERNAL_HIDDEN
*
* These are for internal use only, so skip these in
* public documentation.
*/
__subsystem struct ethphy_driver_api {
/** Get link state */
int (*get_link)(const struct device *dev,
struct phy_link_state *state);
/** Configure link */
int (*cfg_link)(const struct device *dev,
enum phy_link_speed adv_speeds);
/** Set callback to be invoked when link state changes. */
int (*link_cb_set)(const struct device *dev, phy_callback_t cb,
void *user_data);
/** Read PHY register */
int (*read)(const struct device *dev, uint16_t reg_addr,
uint32_t *data);
/** Write PHY register */
int (*write)(const struct device *dev, uint16_t reg_addr,
uint32_t data);
};
/**
* @endcond
*/
/**
* @brief Configure PHY link
*
* This route configures the advertised link speeds.
*
* @param[in] dev PHY device structure
* @param speeds OR'd link speeds to be advertised by the PHY
*
* @retval 0 If successful.
* @retval -EIO If communication with PHY failed.
* @retval -ENOTSUP If not supported.
*/
static inline int phy_configure_link(const struct device *dev,
enum phy_link_speed speeds)
{
const struct ethphy_driver_api *api =
(const struct ethphy_driver_api *)dev->api;
return api->cfg_link(dev, speeds);
}
/**
* @brief Get PHY link state
*
* Returns the current state of the PHY link. This can be used by
* to determine when a link is up and the negotiated link speed.
*
*
* @param[in] dev PHY device structure
* @param state Pointer to receive PHY state
*
* @retval 0 If successful.
* @retval -EIO If communication with PHY failed.
*/
static inline int phy_get_link_state(const struct device *dev,
struct phy_link_state *state)
{
const struct ethphy_driver_api *api =
(const struct ethphy_driver_api *)dev->api;
return api->get_link(dev, state);
}
/**
* @brief Set link state change callback
*
* Sets a callback that is invoked when link state changes. This is the
* preferred method for ethernet drivers to be notified of the PHY link
* state change.
*
* @param[in] dev PHY device structure
* @param callback Callback handler
* @param user_data Pointer to data specified by user.
*
* @retval 0 If successful.
* @retval -ENOTSUP If not supported.
*/
static inline int phy_link_callback_set(const struct device *dev,
phy_callback_t callback,
void *user_data)
{
const struct ethphy_driver_api *api =
(const struct ethphy_driver_api *)dev->api;
return api->link_cb_set(dev, callback, user_data);
}
/**
* @brief Read PHY registers
*
* This routine provides a generic interface to read from a PHY register.
*
* @param[in] dev PHY device structure
* @param[in] reg_addr Register address
* @param value Pointer to receive read value
*
* @retval 0 If successful.
* @retval -EIO If communication with PHY failed.
*/
static inline int phy_read(const struct device *dev, uint16_t reg_addr,
uint32_t *value)
{
const struct ethphy_driver_api *api =
(const struct ethphy_driver_api *)dev->api;
return api->read(dev, reg_addr, value);
}
/**
* @brief Write PHY register
*
* This routine provides a generic interface to write to a PHY register.
*
* @param[in] dev PHY device structure
* @param[in] reg_addr Register address
* @param[in] value Value to write
*
* @retval 0 If successful.
* @retval -EIO If communication with PHY failed.
*/
static inline int phy_write(const struct device *dev, uint16_t reg_addr,
uint32_t value)
{
const struct ethphy_driver_api *api =
(const struct ethphy_driver_api *)dev->api;
return api->write(dev, reg_addr, value);
}
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_DRIVERS_PHY_H_ */
``` | /content/code_sandbox/include/zephyr/net/phy.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,542 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_SOCKET_POLL_H_
#define ZEPHYR_INCLUDE_NET_SOCKET_POLL_H_
/* Setting for pollfd to avoid circular inclusion */
/**
* @brief BSD Sockets compatible API
* @defgroup bsd_sockets BSD Sockets compatible API
* @ingroup networking
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Definition of the monitored socket/file descriptor.
*
* An array of these descriptors is passed as an argument to poll().
*/
struct zsock_pollfd {
int fd; /**< Socket descriptor */
short events; /**< Requested events */
short revents; /**< Returned events */
};
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_SOCKET_POLL_H_ */
``` | /content/code_sandbox/include/zephyr/net/socket_poll.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 163 |
```objective-c
/*
*
*/
/**
* @file
* @brief API for defining conn_mgr connectivity implementations (allowing ifaces to be used with
* conn_mgr_connectivity).
*/
#ifndef ZEPHYR_INCLUDE_CONN_MGR_CONNECTIVITY_IMPL_H_
#define ZEPHYR_INCLUDE_CONN_MGR_CONNECTIVITY_IMPL_H_
#include <zephyr/device.h>
#include <zephyr/net/net_if.h>
#include <zephyr/sys/iterable_sections.h>
#include <zephyr/net/net_mgmt.h>
#include <zephyr/net/conn_mgr_connectivity.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Connection Manager Connectivity Implementation API
* @defgroup conn_mgr_connectivity_impl Connection Manager Connectivity Implementation API
* @since 3.4
* @version 0.1.0
* @ingroup conn_mgr_connectivity
* @{
*/
/* Forward declaration */
struct conn_mgr_conn_binding;
/**
* @brief Connectivity Manager Connectivity API structure
*
* Used to provide generic access to network association parameters and procedures
*/
struct conn_mgr_conn_api {
/**
* @brief When called, the connectivity implementation should start attempting to
* establish connectivity (association with a network) for the bound iface pointed
* to by if_conn->iface.
*
* Must be non-blocking.
*
* Called by @ref conn_mgr_if_connect.
*/
int (*connect)(struct conn_mgr_conn_binding *const binding);
/**
* @brief When called, the connectivity implementation should disconnect (disassociate), or
* stop any in-progress attempts to associate to a network, the bound iface pointed to by
* if_conn->iface.
*
* Must be non-blocking.
*
* Called by @ref conn_mgr_if_disconnect.
*/
int (*disconnect)(struct conn_mgr_conn_binding *const binding);
/**
* @brief Called once for each iface that has been bound to a connectivity implementation
* using this API.
*
* Connectivity implementations should use this callback to perform any required
* per-bound-iface initialization.
*
* Implementations may choose to gracefully handle invalid buffer lengths with partial
* writes, rather than raise errors, if deemed appropriate.
*/
void (*init)(struct conn_mgr_conn_binding *const binding);
/**
* @brief Implementation callback for conn_mgr_if_set_opt.
*
* Used to set implementation-specific connectivity settings.
*
* Calls to conn_mgr_if_set_opt on an iface will result in calls to this callback with
* the conn_mgr_conn_binding struct bound to that iface.
*
* It is up to the connectivity implementation to interpret optname. Options can be
* specific to the bound iface (pointed to by if_conn->iface), or can apply to the whole
* connectivity implementation.
*
* See the description of conn_mgr_if_set_opt for more details.
* set_opt implementations should conform to that description.
*
* Implementations may choose to gracefully handle invalid buffer lengths with partial
* reads, rather than raise errors, if deemed appropriate.
*/
int (*set_opt)(struct conn_mgr_conn_binding *const binding,
int optname, const void *optval, size_t optlen);
/**
* @brief Implementation callback for conn_mgr_if_get_opt.
*
* Used to retrieve implementation-specific connectivity settings.
*
* Calls to conn_mgr_if_get_opt on an iface will result in calls to this callback with
* the conn_mgr_conn_binding struct bound to that iface.
*
* It is up to the connectivity implementation to interpret optname. Options can be
* specific to the bound iface (pointed to by if_conn->iface), or can apply to the whole
* connectivity implementation.
*
* See the description of conn_mgr_if_get_opt for more details.
* get_opt implementations should conform to that description.
*/
int (*get_opt)(struct conn_mgr_conn_binding *const binding,
int optname, void *optval, size_t *optlen);
};
/** @cond INTERNAL_HIDDEN */
#define CONN_MGR_CONN_IMPL_GET_NAME(conn_id) __conn_mgr_conn_##conn_id
#define CONN_MGR_CONN_IMPL_GET_CTX_TYPE(conn_id) conn_id##_CTX_TYPE
/** @endcond */
/**
* @brief Connectivity Implementation struct
*
* Declares a conn_mgr connectivity layer implementation with the provided API
*/
struct conn_mgr_conn_impl {
/** The connectivity API used by the implementation */
struct conn_mgr_conn_api *api;
};
/**
* @brief Define a conn_mgr connectivity implementation that can be bound to network devices.
*
* @param conn_id The name of the new connectivity implementation
* @param conn_api A pointer to a conn_mgr_conn_api struct
*/
#define CONN_MGR_CONN_DEFINE(conn_id, conn_api) \
const struct conn_mgr_conn_impl CONN_MGR_CONN_IMPL_GET_NAME(conn_id) = { \
.api = conn_api, \
};
/**
* @brief Helper macro to make a conn_mgr connectivity implementation publicly available.
*/
#define CONN_MGR_CONN_DECLARE_PUBLIC(conn_id) \
extern const struct conn_mgr_conn_impl CONN_MGR_CONN_IMPL_GET_NAME(conn_id)
/** @cond INTERNAL_HIDDEN */
#define CONN_MGR_CONN_BINDING_GET_NAME(dev_id, sfx) __conn_mgr_bndg_##dev_id##_##sfx
#define CONN_MGR_CONN_BINDING_GET_DATA(dev_id, sfx) __conn_mgr_bndg_data_##dev_id##_##sfx
#define CONN_MGR_CONN_BINDING_GET_MUTEX(dev_id, sfx) __conn_mgr_bndg_mutex_##dev_id##_##sfx
/** @endcond */
/**
* @brief Connectivity Manager network interface binding structure
*
* Binds a conn_mgr connectivity implementation to an iface / network device.
* Stores per-iface state for the connectivity implementation.
*/
struct conn_mgr_conn_binding {
/** The network interface the connectivity implementation is bound to */
struct net_if *iface;
/** The connectivity implementation the network device is bound to */
const struct conn_mgr_conn_impl *impl;
/** Pointer to private, per-iface connectivity context */
void *ctx;
/**
* @name Generic connectivity state
* @{
*/
/**
* Connectivity flags
*
* Public boolean state and configuration values supported by all bindings.
* See conn_mgr_if_flag for options.
*/
uint32_t flags;
/**
* Timeout (seconds)
*
* Indicates to the connectivity implementation how long it should attempt to
* establish connectivity for during a connection attempt before giving up.
*
* The connectivity implementation should give up on establishing connectivity after this
* timeout, even if persistence is enabled.
*
* Set to @ref CONN_MGR_IF_NO_TIMEOUT to indicate that no timeout should be used.
*/
int timeout;
/** @} */
/** @cond INTERNAL_HIDDEN */
/* Internal-use mutex for protecting access to the binding and API functions. */
struct k_mutex *mutex;
/** @endcond */
};
/**
* @brief Associate a connectivity implementation with an existing network device instance
*
* @param dev_id Network device id.
* @param inst Network device instance.
* @param conn_id Name of the connectivity implementation to associate.
*/
#define CONN_MGR_BIND_CONN_INST(dev_id, inst, conn_id) \
K_MUTEX_DEFINE(CONN_MGR_CONN_BINDING_GET_MUTEX(dev_id, inst)); \
static CONN_MGR_CONN_IMPL_GET_CTX_TYPE(conn_id) \
CONN_MGR_CONN_BINDING_GET_DATA(dev_id, inst); \
static STRUCT_SECTION_ITERABLE(conn_mgr_conn_binding, \
CONN_MGR_CONN_BINDING_GET_NAME(dev_id, inst)) = { \
.iface = NET_IF_GET(dev_id, inst), \
.impl = &(CONN_MGR_CONN_IMPL_GET_NAME(conn_id)), \
.ctx = &(CONN_MGR_CONN_BINDING_GET_DATA(dev_id, inst)), \
.mutex = &(CONN_MGR_CONN_BINDING_GET_MUTEX(dev_id, inst)) \
};
/**
* @brief Associate a connectivity implementation with an existing network device
*
* @param dev_id Network device id.
* @param conn_id Name of the connectivity implementation to associate.
*/
#define CONN_MGR_BIND_CONN(dev_id, conn_id) \
CONN_MGR_BIND_CONN_INST(dev_id, 0, conn_id)
/**
* @brief Retrieves the conn_mgr binding struct for a provided iface if it exists.
*
* Bindings for connectivity implementations with missing API structs are ignored.
*
* For use only by connectivity implementations.
*
* @param iface - bound network interface to obtain the binding struct for.
* @return struct conn_mgr_conn_binding* Pointer to the retrieved binding struct if it exists,
* NULL otherwise.
*/
static inline struct conn_mgr_conn_binding *conn_mgr_if_get_binding(struct net_if *iface)
{
STRUCT_SECTION_FOREACH(conn_mgr_conn_binding, binding) {
if (iface == binding->iface) {
if (binding->impl->api) {
return binding;
}
return NULL;
}
}
return NULL;
}
/**
* @brief Lock the passed-in binding, making it safe to access.
*
* Call this whenever accessing binding data, unless inside a conn_mgr_conn_api callback, where it
* is called automatically by conn_mgr.
*
* Reentrant.
*
* For use only by connectivity implementations.
*
* @param binding - Binding to lock
*/
static inline void conn_mgr_binding_lock(struct conn_mgr_conn_binding *binding)
{
(void)k_mutex_lock(binding->mutex, K_FOREVER);
}
/**
* @brief Unlocks the passed-in binding.
*
* Call this after any call to @ref conn_mgr_binding_lock once done accessing binding data.
*
* Reentrant.
*
* For use only by connectivity implementations.
*
* @param binding - Binding to unlock
*/
static inline void conn_mgr_binding_unlock(struct conn_mgr_conn_binding *binding)
{
(void)k_mutex_unlock(binding->mutex);
}
/**
* @brief Set the value of the specified connectivity flag for the provided binding
*
* Can be used from any thread or callback without calling @ref conn_mgr_binding_lock.
*
* For use only by connectivity implementations
*
* @param binding The binding to check
* @param flag The flag to check
* @param value New value for the specified flag
*/
static inline void conn_mgr_binding_set_flag(struct conn_mgr_conn_binding *binding,
enum conn_mgr_if_flag flag, bool value)
{
conn_mgr_binding_lock(binding);
binding->flags &= ~BIT(flag);
if (value) {
binding->flags |= BIT(flag);
}
conn_mgr_binding_unlock(binding);
}
/**
* @brief Check the value of the specified connectivity flag for the provided binding
*
* Can be used from any thread or callback without calling @ref conn_mgr_binding_lock.
*
* For use only by connectivity implementations
*
* @param binding The binding to check
* @param flag The flag to check
* @return bool The value of the specified flag
*/
static inline bool conn_mgr_binding_get_flag(struct conn_mgr_conn_binding *binding,
enum conn_mgr_if_flag flag)
{
bool value = false;
conn_mgr_binding_lock(binding);
value = !!(binding->flags & BIT(flag));
conn_mgr_binding_unlock(binding);
return value;
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_CONN_MGR_CONNECTIVITY_IMPL_H_ */
``` | /content/code_sandbox/include/zephyr/net/conn_mgr_connectivity_impl.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,424 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public API for network L2 interface
*/
#ifndef ZEPHYR_INCLUDE_NET_NET_L2_H_
#define ZEPHYR_INCLUDE_NET_NET_L2_H_
#include <zephyr/device.h>
#include <zephyr/net/buf.h>
#include <zephyr/net/capture.h>
#include <zephyr/sys/iterable_sections.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Network Layer 2 abstraction layer
* @defgroup net_l2 Network L2 Abstraction Layer
* @since 1.5
* @version 1.0.0
* @ingroup networking
* @{
*/
struct net_if;
/** L2 flags */
enum net_l2_flags {
/** IP multicast supported */
NET_L2_MULTICAST = BIT(0),
/** Do not join solicited node multicast group */
NET_L2_MULTICAST_SKIP_JOIN_SOLICIT_NODE = BIT(1),
/** Is promiscuous mode supported */
NET_L2_PROMISC_MODE = BIT(2),
/** Is this L2 point-to-point with tunneling so no need to have
* IP address etc to network interface.
*/
NET_L2_POINT_TO_POINT = BIT(3),
} __packed;
/**
* @brief Network L2 structure
*
* Used to provide an interface to lower network stack.
*/
struct net_l2 {
/**
* This function is used by net core to get iface's L2 layer parsing
* what's relevant to itself.
*/
enum net_verdict (*recv)(struct net_if *iface, struct net_pkt *pkt);
/**
* This function is used by net core to push a packet to lower layer
* (interface's L2), which in turn might work on the packet relevantly.
* (adding proper header etc...)
* Returns a negative error code, or the number of bytes sent otherwise.
*/
int (*send)(struct net_if *iface, struct net_pkt *pkt);
/**
* This function is used to enable/disable traffic over a network
* interface. The function returns <0 if error and >=0 if no error.
*/
int (*enable)(struct net_if *iface, bool state);
/**
* Return L2 flags for the network interface.
*/
enum net_l2_flags (*get_flags)(struct net_if *iface);
};
/** @cond INTERNAL_HIDDEN */
#define NET_L2_GET_NAME(_name) _net_l2_##_name
#define NET_L2_DECLARE_PUBLIC(_name) \
extern const struct net_l2 NET_L2_GET_NAME(_name)
#define NET_L2_GET_CTX_TYPE(_name) _name##_CTX_TYPE
#define VIRTUAL_L2 VIRTUAL
NET_L2_DECLARE_PUBLIC(VIRTUAL_L2);
#define DUMMY_L2 DUMMY
#define DUMMY_L2_CTX_TYPE void*
NET_L2_DECLARE_PUBLIC(DUMMY_L2);
#define OFFLOADED_NETDEV_L2 OFFLOADED_NETDEV
NET_L2_DECLARE_PUBLIC(OFFLOADED_NETDEV_L2);
#define ETHERNET_L2 ETHERNET
NET_L2_DECLARE_PUBLIC(ETHERNET_L2);
#define PPP_L2 PPP
NET_L2_DECLARE_PUBLIC(PPP_L2);
#define IEEE802154_L2 IEEE802154
NET_L2_DECLARE_PUBLIC(IEEE802154_L2);
#define OPENTHREAD_L2 OPENTHREAD
NET_L2_DECLARE_PUBLIC(OPENTHREAD_L2);
#define CANBUS_RAW_L2 CANBUS_RAW
#define CANBUS_RAW_L2_CTX_TYPE void*
NET_L2_DECLARE_PUBLIC(CANBUS_RAW_L2);
#ifdef CONFIG_NET_L2_CUSTOM_IEEE802154
#ifndef CUSTOM_IEEE802154_L2
#define CUSTOM_IEEE802154_L2 CUSTOM_IEEE802154
#endif
#define CUSTOM_IEEE802154_L2_CTX_TYPE void*
NET_L2_DECLARE_PUBLIC(CUSTOM_IEEE802154_L2);
#endif /* CONFIG_NET_L2_CUSTOM_IEEE802154 */
#define NET_L2_INIT(_name, _recv_fn, _send_fn, _enable_fn, _get_flags_fn) \
const STRUCT_SECTION_ITERABLE(net_l2, \
NET_L2_GET_NAME(_name)) = { \
.recv = (_recv_fn), \
.send = (_send_fn), \
.enable = (_enable_fn), \
.get_flags = (_get_flags_fn), \
}
#define NET_L2_GET_DATA(name, sfx) _net_l2_data_##name##sfx
#define NET_L2_DATA_INIT(name, sfx, ctx_type) \
static ctx_type NET_L2_GET_DATA(name, sfx) __used;
typedef int (*net_l2_send_t)(const struct device *dev, struct net_pkt *pkt);
static inline int net_l2_send(net_l2_send_t send_fn,
const struct device *dev,
struct net_if *iface,
struct net_pkt *pkt)
{
net_capture_pkt(iface, pkt);
return send_fn(dev, pkt);
}
/** @endcond */
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_NET_L2_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_l2.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,113 |
```objective-c
/*
*
*/
/** @file mqtt_sn.h
*
* @brief MQTT-SN Client Implementation
*
* @details
* MQTT-SN Client's Application interface is defined in this header.
* Targets protocol version 1.2.
*
* @defgroup mqtt_sn_socket MQTT-SN Client library
* @since 3.3
* @version 0.1.0
* @ingroup networking
* @{
*/
#ifndef ZEPHYR_INCLUDE_NET_MQTT_SN_H_
#define ZEPHYR_INCLUDE_NET_MQTT_SN_H_
#include <stddef.h>
#include <zephyr/net/buf.h>
#include <zephyr/types.h>
#include <sys/types.h>
#ifdef CONFIG_MQTT_SN_TRANSPORT_UDP
#include <zephyr/net/net_ip.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* Quality of Service. QoS 0-2 work the same as basic MQTT, QoS -1 is an MQTT-SN addition.
* QOS -1 is not supported yet.
*/
enum mqtt_sn_qos {
MQTT_SN_QOS_0, /**< QOS 0 */
MQTT_SN_QOS_1, /**< QOS 1 */
MQTT_SN_QOS_2, /**< QOS 2 */
MQTT_SN_QOS_M1 /**< QOS -1 */
};
/**
* MQTT-SN topic types.
*/
enum mqtt_sn_topic_type {
/**
* Normal topic.
* It allows usage of any valid UTF-8 string as a topic name.
*/
MQTT_SN_TOPIC_TYPE_NORMAL,
/**
* Pre-defined topic.
* It allows usage of a two-byte identifier representing a topic name for
* which the corresponding topic name is known in advance by both the client
* and the gateway/server.
*/
MQTT_SN_TOPIC_TYPE_PREDEF,
/**
* Short topic.
* It allows usage of a two-byte string as a topic name.
*/
MQTT_SN_TOPIC_TYPE_SHORT
};
/**
* MQTT-SN return codes.
*/
enum mqtt_sn_return_code {
MQTT_SN_CODE_ACCEPTED = 0x00, /**< Accepted */
MQTT_SN_CODE_REJECTED_CONGESTION = 0x01, /**< Rejected: congestion */
MQTT_SN_CODE_REJECTED_TOPIC_ID = 0x02, /**< Rejected: Invalid Topic ID */
MQTT_SN_CODE_REJECTED_NOTSUP = 0x03, /**< Rejected: Not Supported */
};
/** @brief Abstracts memory buffers. */
struct mqtt_sn_data {
const uint8_t *data; /**< Pointer to data. */
uint16_t size; /**< Size of data, in bytes. */
};
/**
* @brief Initialize memory buffer from C literal string.
*
* Use it as follows:
*
* struct mqtt_sn_data topic = MQTT_SN_DATA_STRING_LITERAL("/zephyr");
*
* @param[in] literal Literal string from which to generate mqtt_sn_data object.
*/
#define MQTT_SN_DATA_STRING_LITERAL(literal) ((struct mqtt_sn_data){literal, sizeof(literal) - 1})
/**
* @brief Initialize memory buffer from single bytes.
*
* Use it as follows:
*
* struct mqtt_sn_data data = MQTT_SN_DATA_BYTES(0x13, 0x37);
*/
#define MQTT_SN_DATA_BYTES(...) \
((struct mqtt_sn_data) { (uint8_t[]){ __VA_ARGS__ }, sizeof((uint8_t[]){ __VA_ARGS__ })})
/**
* Event types that can be emitted by the library.
*/
enum mqtt_sn_evt_type {
MQTT_SN_EVT_CONNECTED, /**< Connected to a gateway */
MQTT_SN_EVT_DISCONNECTED, /**< Disconnected */
MQTT_SN_EVT_ASLEEP, /**< Entered ASLEEP state */
MQTT_SN_EVT_AWAKE, /**< Entered AWAKE state */
MQTT_SN_EVT_PUBLISH, /**< Received a PUBLISH message */
MQTT_SN_EVT_PINGRESP /**< Received a PINGRESP */
};
/**
* Event metadata.
*/
union mqtt_sn_evt_param {
/** Structure holding publish event details */
struct {
/** The payload data associated with the event */
struct mqtt_sn_data data;
/** The type of topic for the event */
enum mqtt_sn_topic_type topic_type;
/** The identifier for the topic of the event */
uint16_t topic_id;
} publish;
};
/**
* MQTT-SN event structure to be handled by the event callback.
*/
struct mqtt_sn_evt {
/** Event type */
enum mqtt_sn_evt_type type;
/** Event parameters */
union mqtt_sn_evt_param param;
};
struct mqtt_sn_client;
/**
* @brief Asynchronous event notification callback registered by the
* application.
*
* @param[in] client Identifies the client for which the event is notified.
* @param[in] evt Event description along with result and associated
* parameters (if any).
*/
typedef void (*mqtt_sn_evt_cb_t)(struct mqtt_sn_client *client, const struct mqtt_sn_evt *evt);
/**
* @brief Structure to describe an MQTT-SN transport.
*
* MQTT-SN does not require transports to be reliable or to hold a connection.
* Transports just need to be frame-based, so you can use UDP, ZigBee, or even
* a simple UART, given some kind of framing protocol is used.
*/
struct mqtt_sn_transport {
/**
* @brief Will be called once on client init to initialize the transport.
*
* Use this to open sockets or similar. May be NULL.
*/
int (*init)(struct mqtt_sn_transport *transport);
/**
* @brief Will be called on client deinit
*
* Use this to close sockets or similar. May be NULL.
*/
void (*deinit)(struct mqtt_sn_transport *transport);
/**
* Will be called by the library when it wants to send a message.
*/
int (*msg_send)(struct mqtt_sn_client *client, void *buf, size_t sz);
/**
* @brief Will be called by the library when it wants to receive a message.
*
* Implementations should follow recv conventions.
*/
ssize_t (*recv)(struct mqtt_sn_client *client, void *buffer, size_t length);
/**
* @brief Check if incoming data is available.
*
* If poll() returns a positive number, recv must not block.
*
* May be NULL, but recv should not block then either.
*
* @return Positive number if data is available, or zero if there is none.
* Negative values signal errors.
*/
int (*poll)(struct mqtt_sn_client *client);
};
#ifdef CONFIG_MQTT_SN_TRANSPORT_UDP
/**
* Transport struct for UDP based transport.
*/
struct mqtt_sn_transport_udp {
/** Parent struct */
struct mqtt_sn_transport tp;
/** Socket FD */
int sock;
/** Address of the gateway */
struct sockaddr gwaddr;
socklen_t gwaddrlen;
};
#define UDP_TRANSPORT(transport) CONTAINER_OF(transport, struct mqtt_sn_transport_udp, tp)
/**
* @brief Initialize the UDP transport.
*
* @param[in] udp The transport to be initialized
* @param[in] gwaddr Pre-initialized gateway address
* @param[in] addrlen Size of the gwaddr structure.
*/
int mqtt_sn_transport_udp_init(struct mqtt_sn_transport_udp *udp, struct sockaddr *gwaddr,
socklen_t addrlen);
#endif
/**
* Structure describing an MQTT-SN client.
*/
struct mqtt_sn_client {
/** 1-23 character unique client ID */
struct mqtt_sn_data client_id;
/** Topic for Will message.
* Must be initialized before connecting with will=true
*/
struct mqtt_sn_data will_topic;
/** Will message.
* Must be initialized before connecting with will=true
*/
struct mqtt_sn_data will_msg;
/** Quality of Service for the Will message */
enum mqtt_sn_qos will_qos;
/** Flag indicating if the will message should be retained by the broker */
bool will_retain;
/** Underlying transport to be used by the client */
struct mqtt_sn_transport *transport;
/** Buffer for outgoing data */
struct net_buf_simple tx;
/** Buffer for incoming data */
struct net_buf_simple rx;
/** Event callback */
mqtt_sn_evt_cb_t evt_cb;
/** Message ID for the next message to be sent */
uint16_t next_msg_id;
/** List of pending publish messages */
sys_slist_t publish;
/** List of registered topics */
sys_slist_t topic;
/** Current state of the MQTT-SN client */
int state;
/** Timestamp of the last ping request */
int64_t last_ping;
/** Number of retries for failed ping attempts */
uint8_t ping_retries;
/** Delayable work structure for processing MQTT-SN events */
struct k_work_delayable process_work;
};
/**
* @brief Initialize a client.
*
* @param client The MQTT-SN client to initialize.
* @param client_id The ID to be used by the client.
* @param transport The transport to be used by the client.
* @param evt_cb The event callback function for the client.
* @param tx Pointer to the transmit buffer.
* @param txsz Size of the transmit buffer.
* @param rx Pointer to the receive buffer.
* @param rxsz Size of the receive buffer.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_sn_client_init(struct mqtt_sn_client *client, const struct mqtt_sn_data *client_id,
struct mqtt_sn_transport *transport, mqtt_sn_evt_cb_t evt_cb, void *tx,
size_t txsz, void *rx, size_t rxsz);
/**
* @brief Deinitialize the client.
*
* This removes all topics and publishes, and also de-inits the transport.
*
* @param client The MQTT-SN client to deinitialize.
*/
void mqtt_sn_client_deinit(struct mqtt_sn_client *client);
/**
* @brief Connect the client.
*
* @param client The MQTT-SN client to connect.
* @param will Flag indicating if a Will message should be sent.
* @param clean_session Flag indicating if a clean session should be started.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_sn_connect(struct mqtt_sn_client *client, bool will, bool clean_session);
/**
* @brief Disconnect the client.
*
* @param client The MQTT-SN client to disconnect.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_sn_disconnect(struct mqtt_sn_client *client);
/**
* @brief Set the client into sleep state.
*
* @param client The MQTT-SN client to be put to sleep.
* @param duration Sleep duration (in seconds).
*
* @return 0 on success, negative errno code on failure.
*/
int mqtt_sn_sleep(struct mqtt_sn_client *client, uint16_t duration);
/**
* @brief Subscribe to a given topic.
*
* @param client The MQTT-SN client that should subscribe.
* @param qos The desired quality of service for the subscription.
* @param topic_name The name of the topic to subscribe to.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_sn_subscribe(struct mqtt_sn_client *client, enum mqtt_sn_qos qos,
struct mqtt_sn_data *topic_name);
/**
* @brief Unsubscribe from a topic.
*
* @param client The MQTT-SN client that should unsubscribe.
* @param qos The quality of service used when subscribing.
* @param topic_name The name of the topic to unsubscribe from.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_sn_unsubscribe(struct mqtt_sn_client *client, enum mqtt_sn_qos qos,
struct mqtt_sn_data *topic_name);
/**
* @brief Publish a value.
*
* If the topic is not yet registered with the gateway, the library takes care of it.
*
* @param client The MQTT-SN client that should publish.
* @param qos The desired quality of service for the publish.
* @param topic_name The name of the topic to publish to.
* @param retain Flag indicating if the message should be retained by the broker.
* @param data The data to be published.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_sn_publish(struct mqtt_sn_client *client, enum mqtt_sn_qos qos,
struct mqtt_sn_data *topic_name, bool retain, struct mqtt_sn_data *data);
/**
* @brief Check the transport for new incoming data.
*
* Call this function periodically, or if you have good reason to believe there is any data.
* If the client's transport struct contains a poll-function, this function is non-blocking.
*
* @param client The MQTT-SN client to check for incoming data.
*
* @return 0 or a negative error code (errno.h) indicating reason of failure.
*/
int mqtt_sn_input(struct mqtt_sn_client *client);
/**
* @brief Get topic name by topic ID.
*
* @param[in] client The MQTT-SN client that uses this topic.
* @param[in] id Topic identifier.
* @param[out] topic_name Will be assigned to topic name.
*
* @return 0 on success, -ENOENT if topic ID doesn't exist,
* or -EINVAL on invalid arguments.
*/
int mqtt_sn_get_topic_name(struct mqtt_sn_client *client, uint16_t id,
struct mqtt_sn_data *topic_name);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_MQTT_SN_H_ */
/**@} */
``` | /content/code_sandbox/include/zephyr/net/mqtt_sn.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,906 |
```objective-c
/*
*
*/
/**
* @file
* @brief socket types definitionis
*/
#ifndef ZEPHYR_INCLUDE_NET_SOCKET_TYPES_H_
#define ZEPHYR_INCLUDE_NET_SOCKET_TYPES_H_
/**
* @brief BSD Sockets compatible API
* @defgroup bsd_sockets BSD Sockets compatible API
* @ingroup networking
* @{
*/
#include <zephyr/types.h>
/** @cond INTERNAL_HIDDEN */
#ifdef CONFIG_NEWLIB_LIBC
#include <newlib.h>
#ifdef __NEWLIB__
#include <sys/_timeval.h>
#else /* __NEWLIB__ */
#include <sys/types.h>
/* workaround for older Newlib 2.x, as it lacks sys/_timeval.h */
struct timeval {
time_t tv_sec;
suseconds_t tv_usec;
};
#endif /* __NEWLIB__ */
#else /* CONFIG_NEWLIB_LIBC */
#if defined(CONFIG_ARCH_POSIX) && defined(CONFIG_EXTERNAL_LIBC)
#include <bits/types/struct_timeval.h>
#else
#include <sys/_timeval.h>
#endif
#endif /* CONFIG_NEWLIB_LIBC */
#ifdef __cplusplus
extern "C" {
#endif
#define zsock_timeval timeval
#ifdef __cplusplus
}
#endif
/** @endcond */
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_SOCKET_TYPES_H_ */
``` | /content/code_sandbox/include/zephyr/net/socket_types.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 266 |
```objective-c
/*
*
*/
/**
* @file
* @brief IEEE 802.11 protocol and general Wi-Fi definitions.
*/
/**
* @brief Wi-Fi Management API.
* @defgroup wifi_mgmt Wi-Fi Management
* @since 1.12
* @version 0.8.0
* @ingroup networking
* @{
*/
#ifndef ZEPHYR_INCLUDE_NET_WIFI_H_
#define ZEPHYR_INCLUDE_NET_WIFI_H_
#include <zephyr/sys/util.h> /* for ARRAY_SIZE */
/** Length of the country code string */
#define WIFI_COUNTRY_CODE_LEN 2
/** @cond INTERNAL_HIDDEN */
#define WIFI_LISTEN_INTERVAL_MIN 0
#define WIFI_LISTEN_INTERVAL_MAX 65535
/** @endcond */
#ifdef __cplusplus
extern "C" {
#endif
/** @brief IEEE 802.11 security types. */
enum wifi_security_type {
/** No security. */
WIFI_SECURITY_TYPE_NONE = 0,
/** WPA2-PSK security. */
WIFI_SECURITY_TYPE_PSK,
/** WPA2-PSK-SHA256 security. */
WIFI_SECURITY_TYPE_PSK_SHA256,
/** WPA3-SAE security. */
WIFI_SECURITY_TYPE_SAE,
/** WPA3-SAE security with hunting-and-pecking loop. */
WIFI_SECURITY_TYPE_SAE_HNP = WIFI_SECURITY_TYPE_SAE,
/** WPA3-SAE security with hash-to-element. */
WIFI_SECURITY_TYPE_SAE_H2E,
/** WPA3-SAE security with both hunting-and-pecking loop and hash-to-element enabled. */
WIFI_SECURITY_TYPE_SAE_AUTO,
/** GB 15629.11-2003 WAPI security. */
WIFI_SECURITY_TYPE_WAPI,
/** EAP security - Enterprise. */
WIFI_SECURITY_TYPE_EAP,
/** EAP TLS security - Enterprise. */
WIFI_SECURITY_TYPE_EAP_TLS = WIFI_SECURITY_TYPE_EAP,
/** WEP security. */
WIFI_SECURITY_TYPE_WEP,
/** WPA-PSK security. */
WIFI_SECURITY_TYPE_WPA_PSK,
/** WPA/WPA2/WPA3 PSK security. */
WIFI_SECURITY_TYPE_WPA_AUTO_PERSONAL,
/** @cond INTERNAL_HIDDEN */
__WIFI_SECURITY_TYPE_AFTER_LAST,
WIFI_SECURITY_TYPE_MAX = __WIFI_SECURITY_TYPE_AFTER_LAST - 1,
WIFI_SECURITY_TYPE_UNKNOWN
/** @endcond */
};
/** Helper function to get user-friendly security type name. */
const char *wifi_security_txt(enum wifi_security_type security);
/** @brief IEEE 802.11w - Management frame protection. */
enum wifi_mfp_options {
/** MFP disabled. */
WIFI_MFP_DISABLE = 0,
/** MFP optional. */
WIFI_MFP_OPTIONAL,
/** MFP required. */
WIFI_MFP_REQUIRED,
/** @cond INTERNAL_HIDDEN */
__WIFI_MFP_AFTER_LAST,
WIFI_MFP_MAX = __WIFI_MFP_AFTER_LAST - 1,
WIFI_MFP_UNKNOWN
/** @endcond */
};
/** Helper function to get user-friendly MFP name.*/
const char *wifi_mfp_txt(enum wifi_mfp_options mfp);
/**
* @brief IEEE 802.11 operational frequency bands (not exhaustive).
*/
enum wifi_frequency_bands {
/** 2.4 GHz band. */
WIFI_FREQ_BAND_2_4_GHZ = 0,
/** 5 GHz band. */
WIFI_FREQ_BAND_5_GHZ,
/** 6 GHz band (Wi-Fi 6E, also extends to 7GHz). */
WIFI_FREQ_BAND_6_GHZ,
/** Number of frequency bands available. */
__WIFI_FREQ_BAND_AFTER_LAST,
/** Highest frequency band available. */
WIFI_FREQ_BAND_MAX = __WIFI_FREQ_BAND_AFTER_LAST - 1,
/** Invalid frequency band */
WIFI_FREQ_BAND_UNKNOWN
};
/** Helper function to get user-friendly frequency band name. */
const char *wifi_band_txt(enum wifi_frequency_bands band);
/** Max SSID length */
#define WIFI_SSID_MAX_LEN 32
/** Minimum PSK length */
#define WIFI_PSK_MIN_LEN 8
/** Maximum PSK length */
#define WIFI_PSK_MAX_LEN 64
/** Max SAW password length */
#define WIFI_SAE_PSWD_MAX_LEN 128
/** MAC address length */
#define WIFI_MAC_ADDR_LEN 6
/** Max enterprise identity length */
#define WIFI_ENT_IDENTITY_MAX_LEN 64
/** Max enterprise password length */
#define WIFI_ENT_PSWD_MAX_LEN 128
/** Minimum channel number */
#define WIFI_CHANNEL_MIN 1
/** Maximum channel number */
#define WIFI_CHANNEL_MAX 233
/** Any channel number */
#define WIFI_CHANNEL_ANY 255
/** @brief Wi-Fi interface states.
*
* Based on path_to_url#a4aeb27c1e4abd046df3064ea9756f0bc
*/
enum wifi_iface_state {
/** Interface is disconnected. */
WIFI_STATE_DISCONNECTED = 0,
/** Interface is disabled (administratively). */
WIFI_STATE_INTERFACE_DISABLED,
/** No enabled networks in the configuration. */
WIFI_STATE_INACTIVE,
/** Interface is scanning for networks. */
WIFI_STATE_SCANNING,
/** Authentication with a network is in progress. */
WIFI_STATE_AUTHENTICATING,
/** Association with a network is in progress. */
WIFI_STATE_ASSOCIATING,
/** Association with a network completed. */
WIFI_STATE_ASSOCIATED,
/** 4-way handshake with a network is in progress. */
WIFI_STATE_4WAY_HANDSHAKE,
/** Group Key exchange with a network is in progress. */
WIFI_STATE_GROUP_HANDSHAKE,
/** All authentication completed, ready to pass data. */
WIFI_STATE_COMPLETED,
/** @cond INTERNAL_HIDDEN */
__WIFI_STATE_AFTER_LAST,
WIFI_STATE_MAX = __WIFI_STATE_AFTER_LAST - 1,
WIFI_STATE_UNKNOWN
/** @endcond */
};
/* We rely on the strict order of the enum values, so, let's check it */
BUILD_ASSERT(WIFI_STATE_DISCONNECTED < WIFI_STATE_INTERFACE_DISABLED &&
WIFI_STATE_INTERFACE_DISABLED < WIFI_STATE_INACTIVE &&
WIFI_STATE_INACTIVE < WIFI_STATE_SCANNING &&
WIFI_STATE_SCANNING < WIFI_STATE_AUTHENTICATING &&
WIFI_STATE_AUTHENTICATING < WIFI_STATE_ASSOCIATING &&
WIFI_STATE_ASSOCIATING < WIFI_STATE_ASSOCIATED &&
WIFI_STATE_ASSOCIATED < WIFI_STATE_4WAY_HANDSHAKE &&
WIFI_STATE_4WAY_HANDSHAKE < WIFI_STATE_GROUP_HANDSHAKE &&
WIFI_STATE_GROUP_HANDSHAKE < WIFI_STATE_COMPLETED);
/** Helper function to get user-friendly interface state name. */
const char *wifi_state_txt(enum wifi_iface_state state);
/** @brief Wi-Fi interface modes.
*
* Based on path_to_url#a4aeb27c1e4abd046df3064ea9756f0bc
*/
enum wifi_iface_mode {
/** Infrastructure station mode. */
WIFI_MODE_INFRA = 0,
/** IBSS (ad-hoc) station mode. */
WIFI_MODE_IBSS = 1,
/** AP mode. */
WIFI_MODE_AP = 2,
/** P2P group owner mode. */
WIFI_MODE_P2P_GO = 3,
/** P2P group formation mode. */
WIFI_MODE_P2P_GROUP_FORMATION = 4,
/** 802.11s Mesh mode. */
WIFI_MODE_MESH = 5,
/** @cond INTERNAL_HIDDEN */
__WIFI_MODE_AFTER_LAST,
WIFI_MODE_MAX = __WIFI_MODE_AFTER_LAST - 1,
WIFI_MODE_UNKNOWN
/** @endcond */
};
/** Helper function to get user-friendly interface mode name. */
const char *wifi_mode_txt(enum wifi_iface_mode mode);
/** @brief Wi-Fi link operating modes
*
* As per path_to_url#Versions_and_generations.
*/
enum wifi_link_mode {
/** 802.11 (legacy). */
WIFI_0 = 0,
/** 802.11b. */
WIFI_1,
/** 802.11a. */
WIFI_2,
/** 802.11g. */
WIFI_3,
/** 802.11n. */
WIFI_4,
/** 802.11ac. */
WIFI_5,
/** 802.11ax. */
WIFI_6,
/** 802.11ax 6GHz. */
WIFI_6E,
/** 802.11be. */
WIFI_7,
/** @cond INTERNAL_HIDDEN */
__WIFI_LINK_MODE_AFTER_LAST,
WIFI_LINK_MODE_MAX = __WIFI_LINK_MODE_AFTER_LAST - 1,
WIFI_LINK_MODE_UNKNOWN
/** @endcond */
};
/** Helper function to get user-friendly link mode name. */
const char *wifi_link_mode_txt(enum wifi_link_mode link_mode);
/** @brief Wi-Fi scanning types. */
enum wifi_scan_type {
/** Active scanning (default). */
WIFI_SCAN_TYPE_ACTIVE = 0,
/** Passive scanning. */
WIFI_SCAN_TYPE_PASSIVE,
};
/** @brief Wi-Fi power save states. */
enum wifi_ps {
/** Power save disabled. */
WIFI_PS_DISABLED = 0,
/** Power save enabled. */
WIFI_PS_ENABLED,
};
/** Helper function to get user-friendly ps name. */
const char *wifi_ps_txt(enum wifi_ps ps_name);
/** @brief Wi-Fi power save modes. */
enum wifi_ps_mode {
/** Legacy power save mode. */
WIFI_PS_MODE_LEGACY = 0,
/* This has to be configured before connecting to the AP,
* as support for ADDTS action frames is not available.
*/
/** WMM power save mode. */
WIFI_PS_MODE_WMM,
};
/** Helper function to get user-friendly ps mode name. */
const char *wifi_ps_mode_txt(enum wifi_ps_mode ps_mode);
/** Network interface index min value */
#define WIFI_INTERFACE_INDEX_MIN 1
/** Network interface index max value */
#define WIFI_INTERFACE_INDEX_MAX 255
/** @brief Wifi operational mode */
enum wifi_operational_modes {
/** STA mode setting enable */
WIFI_STA_MODE = BIT(0),
/** Monitor mode setting enable */
WIFI_MONITOR_MODE = BIT(1),
/** TX injection mode setting enable */
WIFI_TX_INJECTION_MODE = BIT(2),
/** Promiscuous mode setting enable */
WIFI_PROMISCUOUS_MODE = BIT(3),
/** AP mode setting enable */
WIFI_AP_MODE = BIT(4),
/** Softap mode setting enable */
WIFI_SOFTAP_MODE = BIT(5),
};
/** @brief Mode filter settings */
enum wifi_filter {
/** Support management, data and control packet sniffing */
WIFI_PACKET_FILTER_ALL = BIT(0),
/** Support only sniffing of management packets */
WIFI_PACKET_FILTER_MGMT = BIT(1),
/** Support only sniffing of data packets */
WIFI_PACKET_FILTER_DATA = BIT(2),
/** Support only sniffing of control packets */
WIFI_PACKET_FILTER_CTRL = BIT(3),
};
/** @brief Wi-Fi Target Wake Time (TWT) operations. */
enum wifi_twt_operation {
/** TWT setup operation */
WIFI_TWT_SETUP = 0,
/** TWT teardown operation */
WIFI_TWT_TEARDOWN,
};
/** Helper function to get user-friendly twt operation name. */
const char *wifi_twt_operation_txt(enum wifi_twt_operation twt_operation);
/** @brief Wi-Fi Target Wake Time (TWT) negotiation types. */
enum wifi_twt_negotiation_type {
/** TWT individual negotiation */
WIFI_TWT_INDIVIDUAL = 0,
/** TWT broadcast negotiation */
WIFI_TWT_BROADCAST,
/** TWT wake TBTT negotiation */
WIFI_TWT_WAKE_TBTT
};
/** Helper function to get user-friendly twt negotiation type name. */
const char *wifi_twt_negotiation_type_txt(enum wifi_twt_negotiation_type twt_negotiation);
/** @brief Wi-Fi Target Wake Time (TWT) setup commands. */
enum wifi_twt_setup_cmd {
/** TWT setup request */
WIFI_TWT_SETUP_CMD_REQUEST = 0,
/** TWT setup suggest (parameters can be changed by AP) */
WIFI_TWT_SETUP_CMD_SUGGEST,
/** TWT setup demand (parameters can not be changed by AP) */
WIFI_TWT_SETUP_CMD_DEMAND,
/** TWT setup grouping (grouping of TWT flows) */
WIFI_TWT_SETUP_CMD_GROUPING,
/** TWT setup accept (parameters accepted by AP) */
WIFI_TWT_SETUP_CMD_ACCEPT,
/** TWT setup alternate (alternate parameters suggested by AP) */
WIFI_TWT_SETUP_CMD_ALTERNATE,
/** TWT setup dictate (parameters dictated by AP) */
WIFI_TWT_SETUP_CMD_DICTATE,
/** TWT setup reject (parameters rejected by AP) */
WIFI_TWT_SETUP_CMD_REJECT,
};
/** Helper function to get user-friendly twt setup cmd name. */
const char *wifi_twt_setup_cmd_txt(enum wifi_twt_setup_cmd twt_setup);
/** @brief Wi-Fi Target Wake Time (TWT) negotiation status. */
enum wifi_twt_setup_resp_status {
/** TWT response received for TWT request */
WIFI_TWT_RESP_RECEIVED = 0,
/** TWT response not received for TWT request */
WIFI_TWT_RESP_NOT_RECEIVED,
};
/** @brief Target Wake Time (TWT) error codes. */
enum wifi_twt_fail_reason {
/** Unspecified error */
WIFI_TWT_FAIL_UNSPECIFIED,
/** Command execution failed */
WIFI_TWT_FAIL_CMD_EXEC_FAIL,
/** Operation not supported */
WIFI_TWT_FAIL_OPERATION_NOT_SUPPORTED,
/** Unable to get interface status */
WIFI_TWT_FAIL_UNABLE_TO_GET_IFACE_STATUS,
/** Device not connected to AP */
WIFI_TWT_FAIL_DEVICE_NOT_CONNECTED,
/** Peer not HE (802.11ax/Wi-Fi 6) capable */
WIFI_TWT_FAIL_PEER_NOT_HE_CAPAB,
/** Peer not TWT capable */
WIFI_TWT_FAIL_PEER_NOT_TWT_CAPAB,
/** A TWT flow is already in progress */
WIFI_TWT_FAIL_OPERATION_IN_PROGRESS,
/** Invalid negotiated flow id */
WIFI_TWT_FAIL_INVALID_FLOW_ID,
/** IP address not assigned or configured */
WIFI_TWT_FAIL_IP_NOT_ASSIGNED,
/** Flow already exists */
WIFI_TWT_FAIL_FLOW_ALREADY_EXISTS,
};
/** @brief Wi-Fi Target Wake Time (TWT) teradown status. */
enum wifi_twt_teardown_status {
/** TWT teardown success */
WIFI_TWT_TEARDOWN_SUCCESS = 0,
/** TWT teardown failure */
WIFI_TWT_TEARDOWN_FAILED,
};
/** @cond INTERNAL_HIDDEN */
static const char * const wifi_twt_err_code_tbl[] = {
[WIFI_TWT_FAIL_UNSPECIFIED] = "Unspecified",
[WIFI_TWT_FAIL_CMD_EXEC_FAIL] = "Command Execution failed",
[WIFI_TWT_FAIL_OPERATION_NOT_SUPPORTED] =
"Operation not supported",
[WIFI_TWT_FAIL_UNABLE_TO_GET_IFACE_STATUS] =
"Unable to get iface status",
[WIFI_TWT_FAIL_DEVICE_NOT_CONNECTED] =
"Device not connected",
[WIFI_TWT_FAIL_PEER_NOT_HE_CAPAB] = "Peer not HE capable",
[WIFI_TWT_FAIL_PEER_NOT_TWT_CAPAB] = "Peer not TWT capable",
[WIFI_TWT_FAIL_OPERATION_IN_PROGRESS] =
"Operation already in progress",
[WIFI_TWT_FAIL_INVALID_FLOW_ID] =
"Invalid negotiated flow id",
[WIFI_TWT_FAIL_IP_NOT_ASSIGNED] =
"IP address not assigned",
[WIFI_TWT_FAIL_FLOW_ALREADY_EXISTS] =
"Flow already exists",
};
/** @endcond */
/** Helper function to get user-friendly TWT error code name. */
static inline const char *wifi_twt_get_err_code_str(int16_t err_no)
{
if ((err_no) < (int16_t)ARRAY_SIZE(wifi_twt_err_code_tbl)) {
return wifi_twt_err_code_tbl[err_no];
}
return "<unknown>";
}
/** @brief Wi-Fi power save parameters. */
enum wifi_ps_param_type {
/** Power save state. */
WIFI_PS_PARAM_STATE,
/** Power save listen interval. */
WIFI_PS_PARAM_LISTEN_INTERVAL,
/** Power save wakeup mode. */
WIFI_PS_PARAM_WAKEUP_MODE,
/** Power save mode. */
WIFI_PS_PARAM_MODE,
/** Power save timeout. */
WIFI_PS_PARAM_TIMEOUT,
};
/** @brief Wi-Fi power save modes. */
enum wifi_ps_wakeup_mode {
/** DTIM based wakeup. */
WIFI_PS_WAKEUP_MODE_DTIM = 0,
/** Listen interval based wakeup. */
WIFI_PS_WAKEUP_MODE_LISTEN_INTERVAL,
};
/** Helper function to get user-friendly ps wakeup mode name. */
const char *wifi_ps_wakeup_mode_txt(enum wifi_ps_wakeup_mode ps_wakeup_mode);
/** @brief Wi-Fi power save error codes. */
enum wifi_config_ps_param_fail_reason {
/** Unspecified error */
WIFI_PS_PARAM_FAIL_UNSPECIFIED,
/** Command execution failed */
WIFI_PS_PARAM_FAIL_CMD_EXEC_FAIL,
/** Parameter not supported */
WIFI_PS_PARAM_FAIL_OPERATION_NOT_SUPPORTED,
/** Unable to get interface status */
WIFI_PS_PARAM_FAIL_UNABLE_TO_GET_IFACE_STATUS,
/** Device not connected to AP */
WIFI_PS_PARAM_FAIL_DEVICE_NOT_CONNECTED,
/** Device already connected to AP */
WIFI_PS_PARAM_FAIL_DEVICE_CONNECTED,
/** Listen interval out of range */
WIFI_PS_PARAM_LISTEN_INTERVAL_RANGE_INVALID,
};
/** @cond INTERNAL_HIDDEN */
static const char * const wifi_ps_param_config_err_code_tbl[] = {
[WIFI_PS_PARAM_FAIL_UNSPECIFIED] = "Unspecified",
[WIFI_PS_PARAM_FAIL_CMD_EXEC_FAIL] = "Command Execution failed",
[WIFI_PS_PARAM_FAIL_OPERATION_NOT_SUPPORTED] =
"Operation not supported",
[WIFI_PS_PARAM_FAIL_UNABLE_TO_GET_IFACE_STATUS] =
"Unable to get iface status",
[WIFI_PS_PARAM_FAIL_DEVICE_NOT_CONNECTED] =
"Cannot set parameters while device not connected",
[WIFI_PS_PARAM_FAIL_DEVICE_CONNECTED] =
"Cannot set parameters while device connected",
[WIFI_PS_PARAM_LISTEN_INTERVAL_RANGE_INVALID] =
"Parameter out of range",
};
/** @endcond */
#ifdef CONFIG_WIFI_NM_WPA_SUPPLICANT_WNM
/** IEEE 802.11v BTM (BSS transition management) Query reasons.
* Refer to IEEE Std 802.11v-2011 - Table 7-43x-Transition and Transition Query reasons table.
*/
enum wifi_btm_query_reason {
/** Unspecified. */
WIFI_BTM_QUERY_REASON_UNSPECIFIED = 0,
/** Low RSSI. */
WIFI_BTM_QUERY_REASON_LOW_RSSI = 16,
/** Leaving ESS. */
WIFI_BTM_QUERY_REASON_LEAVING_ESS = 20,
};
#endif
/** Helper function to get user-friendly power save error code name. */
static inline const char *wifi_ps_get_config_err_code_str(int16_t err_no)
{
if ((err_no) < (int16_t)ARRAY_SIZE(wifi_ps_param_config_err_code_tbl)) {
return wifi_ps_param_config_err_code_tbl[err_no];
}
return "<unknown>";
}
/** @brief Wi-Fi AP mode configuration parameter */
enum wifi_ap_config_param {
/** Used for AP mode configuration parameter ap_max_inactivity */
WIFI_AP_CONFIG_PARAM_MAX_INACTIVITY = BIT(0),
/** Used for AP mode configuration parameter max_num_sta */
WIFI_AP_CONFIG_PARAM_MAX_NUM_STA = BIT(1),
};
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_WIFI_H_ */
``` | /content/code_sandbox/include/zephyr/net/wifi.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,186 |
```objective-c
/*
* Lukasz Majewski <lukma@denx.de>
*/
/** @file
* @brief DSA definitions and handlers
*/
#ifndef ZEPHYR_INCLUDE_NET_DSA_H_
#define ZEPHYR_INCLUDE_NET_DSA_H_
#include <zephyr/device.h>
#include <zephyr/net/net_if.h>
/**
* @brief DSA definitions and helpers
* @defgroup DSA Distributed Switch Architecture definitions and helpers
* @since 2.5
* @version 0.8.0
* @ingroup networking
* @{
*/
/** @cond INTERNAL_HIDDEN */
#define NET_DSA_PORT_MAX_COUNT 8
#define DSA_STATUS_PERIOD_MS K_MSEC(1000)
/*
* Size of the DSA TAG:
* - KSZ8794 - 1 byte
*/
#if defined(CONFIG_DSA_KSZ8794) && defined(CONFIG_DSA_KSZ_TAIL_TAGGING)
#define DSA_TAG_SIZE 1
#else
#define DSA_TAG_SIZE 0
#endif
/** @endcond */
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief DSA generic transmit function
*
* This is a generic function for passing packets from slave DSA interface to
* master.
*
* @param dev Device
* @param pkt Network packet
*
* Returns:
* - 0 if ok (packet sent via master iface), < 0 if error
*/
int dsa_tx(const struct device *dev, struct net_pkt *pkt);
/**
* @brief DSA (MGMT) Receive packet callback
*
* Callback gets called upon receiving packet. It is responsible for
* freeing packet or indicating to the stack that it needs to free packet
* by returning correct net_verdict.
*
* Returns:
* - NET_DROP, if packet was invalid, rejected or we want the stack to free it.
* In this case the core stack will free the packet.
* - NET_OK, if the packet was accepted, in this case the ownership of the
* net_pkt goes to callback and core network stack will forget it.
*/
typedef enum net_verdict (*dsa_net_recv_cb_t)(struct net_if *iface,
struct net_pkt *pkt);
/**
* @brief Register DSA Rx callback functions
*
* @param iface Network interface
* @param cb Receive callback function
*
* @return 0 if ok, < 0 if error
*/
int dsa_register_recv_callback(struct net_if *iface, dsa_net_recv_cb_t cb);
/**
* @brief Set DSA interface to packet
*
* @param iface Network interface (master)
* @param pkt Network packet
*
* @return Return the slave network interface
*/
struct net_if *dsa_net_recv(struct net_if *iface, struct net_pkt **pkt);
/**
* @brief Pointer to master interface send function
*/
typedef int (*dsa_send_t)(const struct device *dev, struct net_pkt *pkt);
/**
* @brief DSA helper function to register transmit function for master
*
* @param iface Network interface (master)
* @param fn Pointer to master interface send method
*
* Returns:
* - 0 if ok, < 0 if error
*/
int dsa_register_master_tx(struct net_if *iface, dsa_send_t fn);
/**
* @brief DSA helper function to check if port is master
*
* @param iface Network interface (master)
*
* Returns:
* - true if ok, false otherwise
*/
bool dsa_is_port_master(struct net_if *iface);
/**
* @cond INTERNAL_HIDDEN
*
* These are for internal use only, so skip these in
* public documentation.
*/
/** DSA context data */
struct dsa_context {
/** Pointers to all DSA slave network interfaces */
struct net_if *iface_slave[NET_DSA_PORT_MAX_COUNT];
/** Pointer to DSA master network interface */
struct net_if *iface_master;
/** DSA specific API callbacks - filled in the switch IC driver */
struct dsa_api *dapi;
/** DSA related work (e.g. monitor if network interface is up) */
struct k_work_delayable dsa_work;
/** Number of slave ports in the DSA switch */
uint8_t num_slave_ports;
/** Status of each port */
bool link_up[NET_DSA_PORT_MAX_COUNT];
/** Instance specific data */
void *prv_data;
};
/**
* @brief Structure to provide DSA switch api callbacks - it is an augmented
* struct ethernet_api.
*/
struct dsa_api {
/** Function to get proper LAN{123} interface */
struct net_if *(*dsa_get_iface)(struct net_if *iface,
struct net_pkt *pkt);
/*
* Callbacks required for DSA switch initialization and configuration.
*
* Each switch instance (e.g. two KSZ8794 ICs) would have its own struct
* dsa_context.
*/
/** Read value from DSA register */
int (*switch_read)(const struct device *dev, uint16_t reg_addr,
uint8_t *value);
/** Write value to DSA register */
int (*switch_write)(const struct device *dev, uint16_t reg_addr,
uint8_t value);
/** Program (set) mac table entry in the DSA switch */
int (*switch_set_mac_table_entry)(const struct device *dev,
const uint8_t *mac,
uint8_t fw_port,
uint16_t tbl_entry_idx,
uint16_t flags);
/** Read mac table entry from the DSA switch */
int (*switch_get_mac_table_entry)(const struct device *dev,
uint8_t *buf,
uint16_t tbl_entry_idx);
/*
* DSA helper callbacks
*/
struct net_pkt *(*dsa_xmit_pkt)(struct net_if *iface,
struct net_pkt *pkt);
};
/**
* @endcond
*/
/**
* @brief Get network interface of a slave port
*
* @param iface Master port
* @param[in] slave_num Slave port number
*
* @return network interface of the slave if successful
* @return NULL if slave port does not exist
*/
struct net_if *dsa_get_slave_port(struct net_if *iface, int slave_num);
/**
* @brief Read from DSA switch register
*
* @param iface The interface
* @param[in] reg_addr The register address
* @param value The value
*
* @return 0 if successful, negative if error
*/
int dsa_switch_read(struct net_if *iface, uint16_t reg_addr, uint8_t *value);
/**
* @brief Write to DSA switch
*
* @param iface The interface
* @param[in] reg_addr The register address
* @param[in] value The value
*
* @return { description_of_the_return_value }
*/
int dsa_switch_write(struct net_if *iface, uint16_t reg_addr, uint8_t value);
/**
* @brief Write static MAC table entry
*
* @param iface Master DSA interface
* @param[in] mac MAC address
* @param[in] fw_port The firmware port
* @param[in] tbl_entry_idx Table entry index
* @param[in] flags Flags
*
* @return 0 if successful, negative if error
*/
int dsa_switch_set_mac_table_entry(struct net_if *iface,
const uint8_t *mac,
uint8_t fw_port,
uint16_t tbl_entry_idx,
uint16_t flags);
/**
* @brief Read static MAC table entry
*
* @param iface Master DSA interface
* @param buf Buffer to receive MAC address
* @param[in] tbl_entry_idx Table entry index
*
* @return 0 if successful, negative if error
*/
int dsa_switch_get_mac_table_entry(struct net_if *iface,
uint8_t *buf,
uint16_t tbl_entry_idx);
/**
* @brief Structure to provide mac address for each LAN interface
*/
struct dsa_slave_config {
/** MAC address for each LAN{123.,} ports */
uint8_t mac_addr[6];
};
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_DSA_H_ */
``` | /content/code_sandbox/include/zephyr/net/dsa.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,767 |
```objective-c
/*
*
*/
/** @file
* @brief Definitions for IEEE 802.3, Section 2 MII compatible PHY transceivers
*/
#ifndef ZEPHYR_INCLUDE_NET_MII_H_
#define ZEPHYR_INCLUDE_NET_MII_H_
/**
* @brief Ethernet MII (media independent interface) functions
* @defgroup ethernet_mii Ethernet MII Support Functions
* @since 1.7
* @version 0.8.0
* @ingroup ethernet
* @{
*/
/* MII management registers */
/** Basic Mode Control Register */
#define MII_BMCR 0x0
/** Basic Mode Status Register */
#define MII_BMSR 0x1
/** PHY ID 1 Register */
#define MII_PHYID1R 0x2
/** PHY ID 2 Register */
#define MII_PHYID2R 0x3
/** Auto-Negotiation Advertisement Register */
#define MII_ANAR 0x4
/** Auto-Negotiation Link Partner Ability Reg */
#define MII_ANLPAR 0x5
/** Auto-Negotiation Expansion Register */
#define MII_ANER 0x6
/** Auto-Negotiation Next Page Transmit Register */
#define MII_ANNPTR 0x7
/** Auto-Negotiation Link Partner Received Next Page Reg */
#define MII_ANLPRNPR 0x8
/** 1000BASE-T Control Register */
#define MII_1KTCR 0x9
/** 1000BASE-T Status Register */
#define MII_1KSTSR 0xa
/** MMD Access Control Register */
#define MII_MMD_ACR 0xd
/** MMD Access Address Data Register */
#define MII_MMD_AADR 0xe
/** Extended Status Register */
#define MII_ESTAT 0xf
/* Basic Mode Control Register (BMCR) bit definitions */
/** PHY reset */
#define MII_BMCR_RESET (1 << 15)
/** enable loopback mode */
#define MII_BMCR_LOOPBACK (1 << 14)
/** 10=1000Mbps 01=100Mbps; 00=10Mbps */
#define MII_BMCR_SPEED_LSB (1 << 13)
/** Auto-Negotiation enable */
#define MII_BMCR_AUTONEG_ENABLE (1 << 12)
/** power down mode */
#define MII_BMCR_POWER_DOWN (1 << 11)
/** isolate electrically PHY from MII */
#define MII_BMCR_ISOLATE (1 << 10)
/** restart auto-negotiation */
#define MII_BMCR_AUTONEG_RESTART (1 << 9)
/** full duplex mode */
#define MII_BMCR_DUPLEX_MODE (1 << 8)
/** 10=1000Mbps 01=100Mbps; 00=10Mbps */
#define MII_BMCR_SPEED_MSB (1 << 6)
/** Link Speed Field */
#define MII_BMCR_SPEED_MASK (1 << 6 | 1 << 13)
/** select speed 10 Mb/s */
#define MII_BMCR_SPEED_10 (0 << 6 | 0 << 13)
/** select speed 100 Mb/s */
#define MII_BMCR_SPEED_100 (0 << 6 | 1 << 13)
/** select speed 1000 Mb/s */
#define MII_BMCR_SPEED_1000 (1 << 6 | 0 << 13)
/* Basic Mode Status Register (BMSR) bit definitions */
/** 100BASE-T4 capable */
#define MII_BMSR_100BASE_T4 (1 << 15)
/** 100BASE-X full duplex capable */
#define MII_BMSR_100BASE_X_FULL (1 << 14)
/** 100BASE-X half duplex capable */
#define MII_BMSR_100BASE_X_HALF (1 << 13)
/** 10 Mb/s full duplex capable */
#define MII_BMSR_10_FULL (1 << 12)
/** 10 Mb/s half duplex capable */
#define MII_BMSR_10_HALF (1 << 11)
/** 100BASE-T2 full duplex capable */
#define MII_BMSR_100BASE_T2_FULL (1 << 10)
/** 100BASE-T2 half duplex capable */
#define MII_BMSR_100BASE_T2_HALF (1 << 9)
/** extend status information in reg 15 */
#define MII_BMSR_EXTEND_STATUS (1 << 8)
/** PHY accepts management frames with preamble suppressed */
#define MII_BMSR_MF_PREAMB_SUPPR (1 << 6)
/** Auto-negotiation process completed */
#define MII_BMSR_AUTONEG_COMPLETE (1 << 5)
/** remote fault detected */
#define MII_BMSR_REMOTE_FAULT (1 << 4)
/** PHY is able to perform Auto-Negotiation */
#define MII_BMSR_AUTONEG_ABILITY (1 << 3)
/** link is up */
#define MII_BMSR_LINK_STATUS (1 << 2)
/** jabber condition detected */
#define MII_BMSR_JABBER_DETECT (1 << 1)
/** extended register capabilities */
#define MII_BMSR_EXTEND_CAPAB (1 << 0)
/* Auto-negotiation Advertisement Register (ANAR) bit definitions */
/* Auto-negotiation Link Partner Ability Register (ANLPAR) bit definitions */
/** next page */
#define MII_ADVERTISE_NEXT_PAGE (1 << 15)
/** link partner acknowledge response */
#define MII_ADVERTISE_LPACK (1 << 14)
/** remote fault */
#define MII_ADVERTISE_REMOTE_FAULT (1 << 13)
/** try for asymmetric pause */
#define MII_ADVERTISE_ASYM_PAUSE (1 << 11)
/** try for pause */
#define MII_ADVERTISE_PAUSE (1 << 10)
/** try for 100BASE-T4 support */
#define MII_ADVERTISE_100BASE_T4 (1 << 9)
/** try for 100BASE-X full duplex support */
#define MII_ADVERTISE_100_FULL (1 << 8)
/** try for 100BASE-X support */
#define MII_ADVERTISE_100_HALF (1 << 7)
/** try for 10 Mb/s full duplex support */
#define MII_ADVERTISE_10_FULL (1 << 6)
/** try for 10 Mb/s half duplex support */
#define MII_ADVERTISE_10_HALF (1 << 5)
/** Selector Field Mask */
#define MII_ADVERTISE_SEL_MASK (0x1F << 0)
/** Selector Field */
#define MII_ADVERTISE_SEL_IEEE_802_3 0x01
/* 1000BASE-T Control Register bit definitions */
/** try for 1000BASE-T full duplex support */
#define MII_ADVERTISE_1000_FULL (1 << 9)
/** try for 1000BASE-T half duplex support */
#define MII_ADVERTISE_1000_HALF (1 << 8)
/** Advertise all speeds */
#define MII_ADVERTISE_ALL (MII_ADVERTISE_10_HALF | MII_ADVERTISE_10_FULL |\
MII_ADVERTISE_100_HALF | MII_ADVERTISE_100_FULL |\
MII_ADVERTISE_SEL_IEEE_802_3)
/* Extended Status Register bit definitions */
/** 1000BASE-X full-duplex capable */
#define MII_ESTAT_1000BASE_X_FULL (1 << 15)
/** 1000BASE-X half-duplex capable */
#define MII_ESTAT_1000BASE_X_HALF (1 << 14)
/** 1000BASE-T full-duplex capable */
#define MII_ESTAT_1000BASE_T_FULL (1 << 13)
/** 1000BASE-T half-duplex capable */
#define MII_ESTAT_1000BASE_T_HALF (1 << 12)
/* MMD Access Control Register (MII_MMD_ACR) Register bit definitions */
/** DEVAD Mask */
#define MII_MMD_ACR_DEVAD_MASK (0x1F << 0)
/** Address Data bits */
#define MII_MMD_ACR_ADDR (0x00 << 14)
#define MII_MMD_ACR_DATA_NO_POS_INC (0x01 << 14)
#define MII_MMD_ACR_DATA_RW_POS_INC (0x10 << 14)
#define MII_MMD_ACR_DATA_W_POS_INC (0x11 << 14)
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_MII_H_ */
``` | /content/code_sandbox/include/zephyr/net/mii.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,837 |
```objective-c
/*
*
*/
/**
* @file
* @brief Virtual Interface Management interface public header
*/
#ifndef ZEPHYR_INCLUDE_NET_VIRTUAL_MGMT_H_
#define ZEPHYR_INCLUDE_NET_VIRTUAL_MGMT_H_
#include <zephyr/net/virtual.h>
#include <zephyr/net/net_mgmt.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Virtual interface library
* @defgroup virtual_mgmt Virtual Interface Library
* @since 2.6
* @version 0.8.0
* @ingroup networking
* @{
*/
/** @cond INTERNAL_HIDDEN */
#define _NET_VIRTUAL_INTERFACE_LAYER NET_MGMT_LAYER_L2
#define _NET_VIRTUAL_INTERFACE_CODE 0x209
#define _NET_VIRTUAL_INTERFACE_BASE \
(NET_MGMT_IFACE_BIT | \
NET_MGMT_LAYER(_NET_VIRTUAL_INTERFACE_LAYER) | \
NET_MGMT_LAYER_CODE(_NET_VIRTUAL_INTERFACE_CODE))
#define _NET_VIRTUAL_INTERFACE_EVENT \
(_NET_VIRTUAL_INTERFACE_BASE | NET_MGMT_EVENT_BIT)
struct virtual_interface_req_params {
sa_family_t family;
union {
struct in_addr peer4addr;
struct in6_addr peer6addr;
int mtu;
struct virtual_interface_link_types link_types;
};
};
enum net_request_virtual_interface_cmd {
NET_REQUEST_VIRTUAL_INTERFACE_CMD_SET_PEER_ADDR = 1,
NET_REQUEST_VIRTUAL_INTERFACE_CMD_GET_PEER_ADDR,
NET_REQUEST_VIRTUAL_INTERFACE_CMD_SET_MTU,
NET_REQUEST_VIRTUAL_INTERFACE_CMD_GET_MTU,
NET_REQUEST_VIRTUAL_INTERFACE_CMD_SET_LINK_TYPE,
NET_REQUEST_VIRTUAL_INTERFACE_CMD_GET_LINK_TYPE,
};
#define NET_REQUEST_VIRTUAL_INTERFACE_SET_PEER_ADDRESS \
(_NET_VIRTUAL_INTERFACE_BASE | \
NET_REQUEST_VIRTUAL_INTERFACE_CMD_SET_PEER_ADDR)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_VIRTUAL_INTERFACE_SET_PEER_ADDRESS);
#define NET_REQUEST_VIRTUAL_INTERFACE_SET_MTU \
(_NET_VIRTUAL_INTERFACE_BASE | \
NET_REQUEST_VIRTUAL_INTERFACE_CMD_SET_MTU)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_VIRTUAL_INTERFACE_SET_MTU);
#define NET_REQUEST_VIRTUAL_INTERFACE_SET_LINK_TYPE \
(_NET_VIRTUAL_INTERFACE_BASE | \
NET_REQUEST_VIRTUAL_INTERFACE_CMD_SET_LINK_TYPE)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_VIRTUAL_INTERFACE_SET_LINK_TYPE);
#define NET_REQUEST_VIRTUAL_INTERFACE_GET_PEER_ADDRESS \
(_NET_VIRTUAL_INTERFACE_BASE | \
NET_REQUEST_VIRTUAL_INTERFACE_CMD_GET_PEER_ADDR)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_VIRTUAL_INTERFACE_GET_PEER_ADDRESS);
#define NET_REQUEST_VIRTUAL_INTERFACE_GET_MTU \
(_NET_VIRTUAL_INTERFACE_BASE | \
NET_REQUEST_VIRTUAL_INTERFACE_CMD_GET_MTU)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_VIRTUAL_INTERFACE_GET_MTU);
#define NET_REQUEST_VIRTUAL_INTERFACE_GET_LINK_TYPE \
(_NET_VIRTUAL_INTERFACE_BASE | \
NET_REQUEST_VIRTUAL_INTERFACE_CMD_GET_LINK_TYPE)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_VIRTUAL_INTERFACE_GET_LINK_TYPE);
struct net_if;
/** @endcond */
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_VIRTUAL_MGMT_H_ */
``` | /content/code_sandbox/include/zephyr/net/virtual_mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 647 |
```objective-c
/** @file
* @brief Network packet capture definitions
*
* Definitions for capturing network packets.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_CAPTURE_H_
#define ZEPHYR_INCLUDE_NET_CAPTURE_H_
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Network packet capture support functions
* @defgroup net_capture Network packet capture
* @since 2.6
* @version 0.8.0
* @ingroup networking
* @{
*/
/** @cond INTERNAL_HIDDEN */
struct net_if;
struct net_pkt;
struct device;
struct net_capture_interface_api {
/** Cleanup the setup. This will also disable capturing. After this
* call, the setup function can be called again.
*/
int (*cleanup)(const struct device *dev);
/** Enable / start capturing data */
int (*enable)(const struct device *dev, struct net_if *iface);
/** Disable / stop capturing data */
int (*disable)(const struct device *dev);
/** Is capturing enabled (returns true) or disabled (returns false).
*/
bool (*is_enabled)(const struct device *dev);
/** Send captured data */
int (*send)(const struct device *dev, struct net_if *iface, struct net_pkt *pkt);
};
/** @endcond */
/**
* @brief Setup network packet capturing support.
*
* @param remote_addr The value tells the tunnel remote/outer endpoint
* IP address. The IP address can be either IPv4 or IPv6 address.
* This address is used to select the network interface where the tunnel
* is created.
* @param my_local_addr The local/inner IP address of the tunnel. Can contain
* also port number which is used as UDP source port.
* @param peer_addr The peer/inner IP address of the tunnel. Can contain
* also port number which is used as UDP destination port.
* @param dev Network capture device. This is returned to the caller.
*
* @return 0 if ok, <0 if network packet capture setup failed
*/
int net_capture_setup(const char *remote_addr, const char *my_local_addr, const char *peer_addr,
const struct device **dev);
/**
* @brief Cleanup network packet capturing support.
*
* @details This should be called after the capturing is done and resources
* can be released.
*
* @param dev Network capture device. User must allocate using the
* net_capture_setup() function.
*
* @return 0 if ok, <0 if network packet capture cleanup failed
*/
static inline int net_capture_cleanup(const struct device *dev)
{
#if defined(CONFIG_NET_CAPTURE)
const struct net_capture_interface_api *api =
(const struct net_capture_interface_api *)dev->api;
return api->cleanup(dev);
#else
ARG_UNUSED(dev);
return -ENOTSUP;
#endif
}
/**
* @brief Enable network packet capturing support.
*
* @details This creates tunnel network interface where all the
* captured packets are pushed. The captured network packets are
* placed in UDP packets that are sent to tunnel peer.
*
* @param dev Network capture device
* @param iface Network interface we are starting to capture packets.
*
* @return 0 if ok, <0 if network packet capture enable failed
*/
static inline int net_capture_enable(const struct device *dev, struct net_if *iface)
{
#if defined(CONFIG_NET_CAPTURE)
const struct net_capture_interface_api *api =
(const struct net_capture_interface_api *)dev->api;
return api->enable(dev, iface);
#else
ARG_UNUSED(dev);
ARG_UNUSED(iface);
return -ENOTSUP;
#endif
}
/**
* @brief Is network packet capture enabled or disabled.
*
* @param dev Network capture device. If set to NULL, then the
* default capture device is used.
*
* @return True if enabled, False if network capture is disabled.
*/
static inline bool net_capture_is_enabled(const struct device *dev)
{
#if defined(CONFIG_NET_CAPTURE)
const struct net_capture_interface_api *api;
if (dev == NULL) {
/* TODO: Go through all capture devices instead of one */
dev = device_get_binding("NET_CAPTURE0");
if (dev == NULL) {
return false;
}
}
api = (const struct net_capture_interface_api *)dev->api;
return api->is_enabled(dev);
#else
ARG_UNUSED(dev);
return false;
#endif
}
/**
* @brief Disable network packet capturing support.
*
* @param dev Network capture device
*
* @return 0 if ok, <0 if network packet capture disable failed
*/
static inline int net_capture_disable(const struct device *dev)
{
#if defined(CONFIG_NET_CAPTURE)
const struct net_capture_interface_api *api =
(const struct net_capture_interface_api *)dev->api;
return api->disable(dev);
#else
ARG_UNUSED(dev);
return -ENOTSUP;
#endif
}
/** @cond INTERNAL_HIDDEN */
/**
* @brief Send captured packet.
*
* @param dev Network capture device
* @param iface Network interface the packet is being sent
* @param pkt The network packet that is sent
*
* @return 0 if ok, <0 if network packet capture send failed
*/
static inline int net_capture_send(const struct device *dev, struct net_if *iface,
struct net_pkt *pkt)
{
#if defined(CONFIG_NET_CAPTURE)
const struct net_capture_interface_api *api =
(const struct net_capture_interface_api *)dev->api;
return api->send(dev, iface, pkt);
#else
ARG_UNUSED(dev);
ARG_UNUSED(iface);
ARG_UNUSED(pkt);
return -ENOTSUP;
#endif
}
/**
* @brief Check if the network packet needs to be captured or not.
* This is called for every network packet being sent.
*
* @param iface Network interface the packet is being sent
* @param pkt The network packet that is sent
*/
#if defined(CONFIG_NET_CAPTURE)
void net_capture_pkt(struct net_if *iface, struct net_pkt *pkt);
#else
static inline void net_capture_pkt(struct net_if *iface, struct net_pkt *pkt)
{
ARG_UNUSED(iface);
ARG_UNUSED(pkt);
}
#endif
/** @cond INTERNAL_HIDDEN */
/**
* @brief Special variant for net_capture_pkt() which returns the status
* of the send message.
*
* @param iface Network interface the packet is being sent
* @param pkt The network packet that is sent
*
* @return 0 if captured packet was handled ok, <0 if the capture failed
*/
#if defined(CONFIG_NET_CAPTURE)
int net_capture_pkt_with_status(struct net_if *iface, struct net_pkt *pkt);
#else
static inline int net_capture_pkt_with_status(struct net_if *iface, struct net_pkt *pkt)
{
ARG_UNUSED(iface);
ARG_UNUSED(pkt);
return -ENOTSUP;
}
#endif
/** @endcond */
/** The type and direction of the captured data. */
enum net_capture_packet_type {
NET_CAPTURE_HOST, /**< Packet was sent to us by somebody else */
NET_CAPTURE_BROADCAST, /**< Packet was broadcast by somebody else */
NET_CAPTURE_MULTICAST, /**< Packet was multicast, but not broadcast, by somebody else */
NET_CAPTURE_OTHERHOST, /**< Packet was sent by somebody else to somebody else */
NET_CAPTURE_OUTGOING, /**< Packet was sent by us */
};
#define NET_CAPTURE_LL_ADDRLEN 8 /** Maximum length of a link-layer address */
/** The context information for cooked mode capture */
struct net_capture_cooked {
/** Link-layer address type */
uint16_t hatype;
/** Link-layer address length */
uint16_t halen;
/** Link-layer address */
uint8_t addr[NET_CAPTURE_LL_ADDRLEN];
};
/**
* @brief Initialize cooked mode capture context.
*
* @param ctx Cooked context struct allocated by user.
* @param hatype Link-layer address type
* @param halen Link-layer address length (maximum is 8 bytes)
* @param addr Link-layer address
*
* @return 0 if ok, <0 if context initialization failed
*/
#if defined(CONFIG_NET_CAPTURE_COOKED_MODE)
int net_capture_cooked_setup(struct net_capture_cooked *ctx,
uint16_t hatype,
uint16_t halen,
uint8_t *addr);
#else
static inline int net_capture_cooked_setup(struct net_capture_cooked *ctx,
uint16_t hatype,
uint16_t halen,
uint8_t *addr)
{
ARG_UNUSED(ctx);
ARG_UNUSED(hatype);
ARG_UNUSED(halen);
ARG_UNUSED(addr);
return -ENOTSUP;
}
#endif
/**
* @brief Capture arbitrary data from source that does not have an interface.
* This can be used if you do not have a network interface that
* you want to capture from. For example low level modem device
* below PPP containing HDLC frames, CANBUS data or Bluetooth packets etc.
* The data given to this function should only contain full link
* layer packets so that packet boundary is not lost.
*
* @param ctx Cooked mode capture context.
* @param data Data to capture.
* @param len Length of the data.
* @param type The direction and type of the packet (did we sent it etc).
* @param ptype Protocol type id. These are the ETH_P_* types set in ethernet.h
*/
#if defined(CONFIG_NET_CAPTURE_COOKED_MODE)
void net_capture_data(struct net_capture_cooked *ctx,
const uint8_t *data, size_t len,
enum net_capture_packet_type type,
uint16_t ptype);
#else
static inline void net_capture_data(struct net_capture_cooked *ctx,
const uint8_t *data, size_t len,
enum net_capture_packet_type type,
uint16_t ptype)
{
ARG_UNUSED(ctx);
ARG_UNUSED(data);
ARG_UNUSED(len);
ARG_UNUSED(type);
ARG_UNUSED(ptype);
}
#endif
struct net_capture_info {
const struct device *capture_dev;
struct net_if *capture_iface;
struct net_if *tunnel_iface;
struct sockaddr *peer;
struct sockaddr *local;
bool is_enabled;
};
/**
* @typedef net_capture_cb_t
* @brief Callback used while iterating over capture devices
*
* @param info Information about capture device
* @param user_data A valid pointer to user data or NULL
*/
typedef void (*net_capture_cb_t)(struct net_capture_info *info, void *user_data);
/**
* @brief Go through all the capture devices in order to get
* information about them. This is mainly useful in
* net-shell to print data about currently active
* captures.
*
* @param cb Callback to call for each capture device
* @param user_data User supplied data
*/
#if defined(CONFIG_NET_CAPTURE)
void net_capture_foreach(net_capture_cb_t cb, void *user_data);
#else
static inline void net_capture_foreach(net_capture_cb_t cb, void *user_data)
{
ARG_UNUSED(cb);
ARG_UNUSED(user_data);
}
#endif
/** @endcond */
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_CAPTURE_H_ */
``` | /content/code_sandbox/include/zephyr/net/capture.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,364 |
```objective-c
/*
*
*/
/** @file
*
* @brief Utility functions to be used by the Wi-Fi subsystem.
*/
#ifndef ZEPHYR_INCLUDE_NET_WIFI_UTILS_H_
#define ZEPHYR_INCLUDE_NET_WIFI_UTILS_H_
#ifdef __cplusplus
extern "C" {
#endif
/**
* @addtogroup wifi_mgmt
* @{
*/
/**
* @name Wi-Fi utility functions.
*
* Utility functions for the Wi-Fi subsystem.
* @{
*/
/** Maximum length of the band specification string */
#define WIFI_UTILS_MAX_BAND_STR_LEN 3
/** Maximum length of the channel specification string */
#define WIFI_UTILS_MAX_CHAN_STR_LEN 4
/**
* @brief Convert a band specification string to a bitmap representing the bands.
*
* @details The function will parse a string which specifies Wi-Fi frequency band
* values as a comma separated string and convert it to a bitmap. The string can
* use the following characters to represent the bands:
*
* - 2: 2.4 GHz
* - 5: 5 GHz
* - 6: 6 GHz
*
* For the bitmap generated refer to ::wifi_frequency_bands
* for bit position of each band.
*
* E.g. a string "2,5,6" will be converted to a bitmap value of 0x7
*
* @param scan_bands_str String which spe.
* @param band_map Pointer to the bitmap variable to be updated.
*
* @retval 0 on success.
* @retval -errno value in case of failure.
*/
int wifi_utils_parse_scan_bands(char *scan_bands_str, uint8_t *band_map);
/**
* @brief Append a string containing an SSID to an array of SSID strings.
*
* @param scan_ssids_str string to be appended in the list of scanned SSIDs.
* @param ssids Pointer to an array where the SSIDs pointers are to be stored.
* @param num_ssids Maximum number of SSIDs that can be stored.
*
* @retval 0 on success.
* @retval -errno value in case of failure.
*/
int wifi_utils_parse_scan_ssids(char *scan_ssids_str,
const char *ssids[],
uint8_t num_ssids);
/**
* @brief Convert a string containing a specification of scan channels to an array.
*
* @details The function will parse a string which specifies channels to be scanned
* as a string and convert it to an array.
*
* The channel string has to be formatted using the colon (:), comma(,), hyphen (-) and
* underscore (_) delimiters as follows:
* - A colon identifies the value preceding it as a band. A band value
* (2: 2.4 GHz, 5: 5 GHz 6: 6 GHz) has to precede the channels in that band (e.g. 2: etc)
* - Hyphens (-) are used to identify channel ranges (e.g. 2-7, 32-48 etc)
* - Commas are used to separate channel values within a band. Channels can be specified
* as individual values (2,6,48 etc) or channel ranges using hyphens (1-14, 32-48 etc)
* - Underscores (_) are used to specify multiple band-channel sets (e.g. 2:1,2_5:36,40 etc)
* - No spaces should be used anywhere, i.e. before/after commas,
* before/after hyphens etc.
*
* An example channel specification specifying channels in the 2.4 GHz and 5 GHz bands is
* as below:
* 2:1,5,7,9-11_5:36-48,100,163-167
*
* @param scan_chan_str List of channels expressed in the format described above.
* @param chan Pointer to an array where the parsed channels are to be stored.
* @param max_channels Maximum number of channels to store
*
* @retval 0 on success.
* @retval -errno value in case of failure.
*/
int wifi_utils_parse_scan_chan(char *scan_chan_str,
struct wifi_band_channel *chan,
uint8_t max_channels);
/**
* @brief Validate a channel against a band.
*
* @param band Band to validate the channel against.
* @param chan Channel to validate.
*
* @retval true if the channel is valid for the band.
* @retval false if the channel is not valid for the band.
*/
bool wifi_utils_validate_chan(uint8_t band,
uint16_t chan);
/**
* @brief Validate a channel against the 2.4 GHz band.
*
* @param chan Channel to validate.
*
* @retval true if the channel is valid for the band.
* @retval false if the channel is not valid for the band.
*/
bool wifi_utils_validate_chan_2g(uint16_t chan);
/**
* @brief Validate a channel against the 5 GHz band.
*
* @param chan Channel to validate.
*
* @retval true if the channel is valid for the band.
* @retval false if the channel is not valid for the band.
*/
bool wifi_utils_validate_chan_5g(uint16_t chan);
/**
* @brief Validate a channel against the 6 GHz band.
*
* @param chan Channel to validate.
*
* @retval true if the channel is valid for the band.
* @retval false if the channel is not valid for the band.
*/
bool wifi_utils_validate_chan_6g(uint16_t chan);
/**
* @}
*/
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_WIFI_UTILS_H_ */
``` | /content/code_sandbox/include/zephyr/net/wifi_utils.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,185 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public API for network interface
*/
#ifndef ZEPHYR_INCLUDE_NET_NET_IF_H_
#define ZEPHYR_INCLUDE_NET_NET_IF_H_
/**
* @brief Network Interface abstraction layer
* @defgroup net_if Network Interface abstraction layer
* @since 1.5
* @version 1.0.0
* @ingroup networking
* @{
*/
#include <zephyr/device.h>
#include <zephyr/sys/slist.h>
#include <zephyr/sys/iterable_sections.h>
#include <zephyr/net/net_core.h>
#include <zephyr/net/hostname.h>
#include <zephyr/net/net_linkaddr.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/net_l2.h>
#include <zephyr/net/net_stats.h>
#include <zephyr/net/net_timeout.h>
#if defined(CONFIG_NET_DHCPV4) && defined(CONFIG_NET_NATIVE_IPV4)
#include <zephyr/net/dhcpv4.h>
#endif
#if defined(CONFIG_NET_DHCPV6) && defined(CONFIG_NET_NATIVE_IPV6)
#include <zephyr/net/dhcpv6.h>
#endif
#if defined(CONFIG_NET_IPV4_AUTO) && defined(CONFIG_NET_NATIVE_IPV4)
#include <zephyr/net/ipv4_autoconf.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Network Interface unicast IP addresses
*
* Stores the unicast IP addresses assigned to this network interface.
*/
struct net_if_addr {
/** IP address */
struct net_addr address;
/** Reference counter. This is used to prevent address removal if there
* are sockets that have bound the local endpoint to this address.
*/
atomic_t atomic_ref;
#if defined(CONFIG_NET_NATIVE_IPV6)
struct net_timeout lifetime;
#endif
/** How the IP address was set */
enum net_addr_type addr_type;
/** What is the current state of the address */
enum net_addr_state addr_state;
#if defined(CONFIG_NET_NATIVE_IPV6)
#if defined(CONFIG_NET_IPV6_PE)
/** Address creation time. This is used to determine if the maximum
* lifetime for this address is reached or not. The value is in seconds.
*/
uint32_t addr_create_time;
/** Preferred lifetime for the address in seconds.
*/
uint32_t addr_preferred_lifetime;
/** Address timeout value. This is only used if DAD needs to be redone
* for this address because of earlier DAD failure. This value is in
* seconds.
*/
int32_t addr_timeout;
#endif
#endif /* CONFIG_NET_NATIVE_IPV6 */
union {
#if defined(CONFIG_NET_IPV6_DAD)
struct {
/** Duplicate address detection (DAD) timer */
sys_snode_t dad_node;
uint32_t dad_start;
/** How many times we have done DAD */
uint8_t dad_count;
};
#endif /* CONFIG_NET_IPV6_DAD */
#if defined(CONFIG_NET_IPV4_ACD)
struct {
/** Address conflict detection (ACD) timer. */
sys_snode_t acd_node;
k_timepoint_t acd_timeout;
/** ACD probe/announcement counter. */
uint8_t acd_count;
/** ACD status. */
uint8_t acd_state;
};
#endif /* CONFIG_NET_IPV4_ACD */
};
#if defined(CONFIG_NET_IPV6_DAD) || defined(CONFIG_NET_IPV4_ACD)
/** What interface the conflict detection is running */
uint8_t ifindex;
#endif
/** Is the IP address valid forever */
uint8_t is_infinite : 1;
/** Is this IP address used or not */
uint8_t is_used : 1;
/** Is this IP address usage limited to the subnet (mesh) or not */
uint8_t is_mesh_local : 1;
/** Is this IP address temporary and generated for example by
* IPv6 privacy extension (RFC 8981)
*/
uint8_t is_temporary : 1;
uint8_t _unused : 4;
};
/**
* @brief Network Interface multicast IP addresses
*
* Stores the multicast IP addresses assigned to this network interface.
*/
struct net_if_mcast_addr {
/** IP address */
struct net_addr address;
#if defined(CONFIG_NET_IPV4_IGMPV3)
/** Sources to filter on */
struct net_addr sources[CONFIG_NET_IF_MCAST_IPV4_SOURCE_COUNT];
/** Number of sources to be used by the filter */
uint16_t sources_len;
/** Filter mode (used in IGMPV3) */
uint8_t record_type;
#endif
/** Is this multicast IP address used or not */
uint8_t is_used : 1;
/** Did we join to this group */
uint8_t is_joined : 1;
uint8_t _unused : 6;
};
/**
* @brief Network Interface IPv6 prefixes
*
* Stores the IPV6 prefixes assigned to this network interface.
*/
struct net_if_ipv6_prefix {
/** Prefix lifetime */
struct net_timeout lifetime;
/** IPv6 prefix */
struct in6_addr prefix;
/** Backpointer to network interface where this prefix is used */
struct net_if *iface;
/** Prefix length */
uint8_t len;
/** Is the IP prefix valid forever */
uint8_t is_infinite : 1;
/** Is this prefix used or not */
uint8_t is_used : 1;
uint8_t _unused : 6;
};
/**
* @brief Information about routers in the system.
*
* Stores the router information.
*/
struct net_if_router {
/** Slist lifetime timer node */
sys_snode_t node;
/** IP address */
struct net_addr address;
/** Network interface the router is connected to */
struct net_if *iface;
/** Router life timer start */
uint32_t life_start;
/** Router lifetime */
uint16_t lifetime;
/** Is this router used or not */
uint8_t is_used : 1;
/** Is default router */
uint8_t is_default : 1;
/** Is the router valid forever */
uint8_t is_infinite : 1;
uint8_t _unused : 5;
};
/** Network interface flags. */
enum net_if_flag {
/** Interface is admin up. */
NET_IF_UP,
/** Interface is pointopoint */
NET_IF_POINTOPOINT,
/** Interface is in promiscuous mode */
NET_IF_PROMISC,
/** Do not start the interface immediately after initialization.
* This requires that either the device driver or some other entity
* will need to manually take the interface up when needed.
* For example for Ethernet this will happen when the driver calls
* the net_eth_carrier_on() function.
*/
NET_IF_NO_AUTO_START,
/** Power management specific: interface is being suspended */
NET_IF_SUSPENDED,
/** Flag defines if received multicasts of other interface are
* forwarded on this interface. This activates multicast
* routing / forwarding for this interface.
*/
NET_IF_FORWARD_MULTICASTS,
/** Interface supports IPv4 */
NET_IF_IPV4,
/** Interface supports IPv6 */
NET_IF_IPV6,
/** Interface up and running (ready to receive and transmit). */
NET_IF_RUNNING,
/** Driver signals L1 is up. */
NET_IF_LOWER_UP,
/** Driver signals dormant. */
NET_IF_DORMANT,
/** IPv6 Neighbor Discovery disabled. */
NET_IF_IPV6_NO_ND,
/** IPv6 Multicast Listener Discovery disabled. */
NET_IF_IPV6_NO_MLD,
/** Mutex locking on TX data path disabled on the interface. */
NET_IF_NO_TX_LOCK,
/** @cond INTERNAL_HIDDEN */
/* Total number of flags - must be at the end of the enum */
NET_IF_NUM_FLAGS
/** @endcond */
};
/** @brief Network interface operational status (RFC 2863). */
enum net_if_oper_state {
NET_IF_OPER_UNKNOWN, /**< Initial (unknown) value */
NET_IF_OPER_NOTPRESENT, /**< Hardware missing */
NET_IF_OPER_DOWN, /**< Interface is down */
NET_IF_OPER_LOWERLAYERDOWN, /**< Lower layer interface is down */
NET_IF_OPER_TESTING, /**< Training mode */
NET_IF_OPER_DORMANT, /**< Waiting external action */
NET_IF_OPER_UP, /**< Interface is up */
} __packed;
#if defined(CONFIG_NET_OFFLOAD)
struct net_offload;
#endif /* CONFIG_NET_OFFLOAD */
/** @cond INTERNAL_HIDDEN */
#if defined(CONFIG_NET_NATIVE_IPV6)
#define NET_IF_MAX_IPV6_ADDR CONFIG_NET_IF_UNICAST_IPV6_ADDR_COUNT
#define NET_IF_MAX_IPV6_MADDR CONFIG_NET_IF_MCAST_IPV6_ADDR_COUNT
#define NET_IF_MAX_IPV6_PREFIX CONFIG_NET_IF_IPV6_PREFIX_COUNT
#else
#define NET_IF_MAX_IPV6_ADDR 0
#define NET_IF_MAX_IPV6_MADDR 0
#define NET_IF_MAX_IPV6_PREFIX 0
#endif
/* @endcond */
/** IPv6 configuration */
struct net_if_ipv6 {
/** Unicast IP addresses */
struct net_if_addr unicast[NET_IF_MAX_IPV6_ADDR];
/** Multicast IP addresses */
struct net_if_mcast_addr mcast[NET_IF_MAX_IPV6_MADDR];
/** Prefixes */
struct net_if_ipv6_prefix prefix[NET_IF_MAX_IPV6_PREFIX];
/** Default reachable time (RFC 4861, page 52) */
uint32_t base_reachable_time;
/** Reachable time (RFC 4861, page 20) */
uint32_t reachable_time;
/** Retransmit timer (RFC 4861, page 52) */
uint32_t retrans_timer;
#if defined(CONFIG_NET_IPV6_PE)
/** Privacy extension DESYNC_FACTOR value from RFC 8981 ch 3.4.
* "DESYNC_FACTOR is a random value within the range 0 - MAX_DESYNC_FACTOR.
* It is computed every time a temporary address is created.
*/
uint32_t desync_factor;
#endif /* CONFIG_NET_IPV6_PE */
#if defined(CONFIG_NET_IPV6_ND) && defined(CONFIG_NET_NATIVE_IPV6)
/** Router solicitation timer node */
sys_snode_t rs_node;
/* RS start time */
uint32_t rs_start;
/** RS count */
uint8_t rs_count;
#endif
/** IPv6 hop limit */
uint8_t hop_limit;
/** IPv6 multicast hop limit */
uint8_t mcast_hop_limit;
};
#if defined(CONFIG_NET_DHCPV6) && defined(CONFIG_NET_NATIVE_IPV6)
/** DHCPv6 configuration */
struct net_if_dhcpv6 {
/** Used for timer list. */
sys_snode_t node;
/** Generated Client ID. */
struct net_dhcpv6_duid_storage clientid;
/** Server ID of the selected server. */
struct net_dhcpv6_duid_storage serverid;
/** DHCPv6 client state. */
enum net_dhcpv6_state state;
/** DHCPv6 client configuration parameters. */
struct net_dhcpv6_params params;
/** Timeout for the next event, absolute time, milliseconds. */
uint64_t timeout;
/** Time of the current exchange start, absolute time, milliseconds */
uint64_t exchange_start;
/** Renewal time, absolute time, milliseconds. */
uint64_t t1;
/** Rebinding time, absolute time, milliseconds. */
uint64_t t2;
/** The time when the last lease expires (terminates rebinding,
* DHCPv6 RFC8415, ch. 18.2.5). Absolute time, milliseconds.
*/
uint64_t expire;
/** Generated IAID for IA_NA. */
uint32_t addr_iaid;
/** Generated IAID for IA_PD. */
uint32_t prefix_iaid;
/** Retransmit timeout for the current message, milliseconds. */
uint32_t retransmit_timeout;
/** Current best server preference received. */
int16_t server_preference;
/** Retransmission counter. */
uint8_t retransmissions;
/** Transaction ID for current exchange. */
uint8_t tid[DHCPV6_TID_SIZE];
/** Prefix length. */
uint8_t prefix_len;
/** Assigned IPv6 prefix. */
struct in6_addr prefix;
/** Assigned IPv6 address. */
struct in6_addr addr;
};
#endif /* defined(CONFIG_NET_DHCPV6) && defined(CONFIG_NET_NATIVE_IPV6) */
/** @cond INTERNAL_HIDDEN */
#if defined(CONFIG_NET_NATIVE_IPV4)
#define NET_IF_MAX_IPV4_ADDR CONFIG_NET_IF_UNICAST_IPV4_ADDR_COUNT
#define NET_IF_MAX_IPV4_MADDR CONFIG_NET_IF_MCAST_IPV4_ADDR_COUNT
#else
#define NET_IF_MAX_IPV4_ADDR 0
#define NET_IF_MAX_IPV4_MADDR 0
#endif
/** @endcond */
/**
* @brief Network Interface unicast IPv4 address and netmask
*
* Stores the unicast IPv4 address and related netmask.
*/
struct net_if_addr_ipv4 {
/** IPv4 address */
struct net_if_addr ipv4;
/** Netmask */
struct in_addr netmask;
};
/** IPv4 configuration */
struct net_if_ipv4 {
/** Unicast IP addresses */
struct net_if_addr_ipv4 unicast[NET_IF_MAX_IPV4_ADDR];
/** Multicast IP addresses */
struct net_if_mcast_addr mcast[NET_IF_MAX_IPV4_MADDR];
/** Gateway */
struct in_addr gw;
/** IPv4 time-to-live */
uint8_t ttl;
/** IPv4 time-to-live for multicast packets */
uint8_t mcast_ttl;
#if defined(CONFIG_NET_IPV4_ACD)
/** IPv4 conflict count. */
uint8_t conflict_cnt;
#endif
};
#if defined(CONFIG_NET_DHCPV4) && defined(CONFIG_NET_NATIVE_IPV4)
struct net_if_dhcpv4 {
/** Used for timer lists */
sys_snode_t node;
/** Timer start */
int64_t timer_start;
/** Time for INIT, DISCOVER, REQUESTING, RENEWAL */
uint32_t request_time;
uint32_t xid;
/** IP address Lease time */
uint32_t lease_time;
/** IP address Renewal time */
uint32_t renewal_time;
/** IP address Rebinding time */
uint32_t rebinding_time;
/** Server ID */
struct in_addr server_id;
/** Requested IP addr */
struct in_addr requested_ip;
/** Received netmask from the server */
struct in_addr netmask;
/**
* DHCPv4 client state in the process of network
* address allocation.
*/
enum net_dhcpv4_state state;
/** Number of attempts made for REQUEST and RENEWAL messages */
uint8_t attempts;
/** The address of the server the request is sent to */
struct in_addr request_server_addr;
/** The source address of a received DHCP message */
struct in_addr response_src_addr;
#ifdef CONFIG_NET_DHCPV4_OPTION_NTP_SERVER
/** NTP server address */
struct in_addr ntp_addr;
#endif
};
#endif /* CONFIG_NET_DHCPV4 */
#if defined(CONFIG_NET_IPV4_AUTO) && defined(CONFIG_NET_NATIVE_IPV4)
struct net_if_ipv4_autoconf {
/** Backpointer to correct network interface */
struct net_if *iface;
/** Requested IP addr */
struct in_addr requested_ip;
/** IPV4 Autoconf state in the process of network address allocation.
*/
enum net_ipv4_autoconf_state state;
};
#endif /* CONFIG_NET_IPV4_AUTO */
/** @cond INTERNAL_HIDDEN */
/* We always need to have at least one IP config */
#define NET_IF_MAX_CONFIGS 1
/** @endcond */
/**
* @brief Network interface IP address configuration.
*/
struct net_if_ip {
#if defined(CONFIG_NET_NATIVE_IPV6)
struct net_if_ipv6 *ipv6;
#endif /* CONFIG_NET_IPV6 */
#if defined(CONFIG_NET_NATIVE_IPV4)
struct net_if_ipv4 *ipv4;
#endif /* CONFIG_NET_IPV4 */
};
/**
* @brief IP and other configuration related data for network interface.
*/
struct net_if_config {
#if defined(CONFIG_NET_IP)
/** IP address configuration setting */
struct net_if_ip ip;
#endif
#if defined(CONFIG_NET_DHCPV4) && defined(CONFIG_NET_NATIVE_IPV4)
struct net_if_dhcpv4 dhcpv4;
#endif /* CONFIG_NET_DHCPV4 */
#if defined(CONFIG_NET_DHCPV6) && defined(CONFIG_NET_NATIVE_IPV6)
struct net_if_dhcpv6 dhcpv6;
#endif /* CONFIG_NET_DHCPV6 */
#if defined(CONFIG_NET_IPV4_AUTO) && defined(CONFIG_NET_NATIVE_IPV4)
struct net_if_ipv4_autoconf ipv4auto;
#endif /* CONFIG_NET_IPV4_AUTO */
#if defined(CONFIG_NET_L2_VIRTUAL)
/**
* This list keeps track of the virtual network interfaces
* that are attached to this network interface.
*/
sys_slist_t virtual_interfaces;
#endif /* CONFIG_NET_L2_VIRTUAL */
#if defined(CONFIG_NET_INTERFACE_NAME)
/**
* Network interface can have a name and it is possible
* to search a network interface using this name.
*/
char name[CONFIG_NET_INTERFACE_NAME_LEN + 1];
#endif
};
/**
* @brief Network traffic class.
*
* Traffic classes are used when sending or receiving data that is classified
* with different priorities. So some traffic can be marked as high priority
* and it will be sent or received first. Each network packet that is
* transmitted or received goes through a fifo to a thread that will transmit
* it.
*/
struct net_traffic_class {
/** Fifo for handling this Tx or Rx packet */
struct k_fifo fifo;
/** Traffic class handler thread */
struct k_thread handler;
/** Stack for this handler */
k_thread_stack_t *stack;
};
/**
* @typedef net_socket_create_t
* @brief A function prototype to create an offloaded socket. The prototype is
* compatible with socket() function.
*/
typedef int (*net_socket_create_t)(int, int, int);
/**
* @brief Network Interface Device structure
*
* Used to handle a network interface on top of a device driver instance.
* There can be many net_if_dev instance against the same device.
*
* Such interface is mainly to be used by the link layer, but is also tight
* to a network context: it then makes the relation with a network context
* and the network device.
*
* Because of the strong relationship between a device driver and such
* network interface, each net_if_dev should be instantiated by one of the
* network device init macros found in net_if.h.
*/
struct net_if_dev {
/** The actually device driver instance the net_if is related to */
const struct device *dev;
/** Interface's L2 layer */
const struct net_l2 * const l2;
/** Interface's private L2 data pointer */
void *l2_data;
/** For internal use */
ATOMIC_DEFINE(flags, NET_IF_NUM_FLAGS);
/** The hardware link address */
struct net_linkaddr link_addr;
#if defined(CONFIG_NET_OFFLOAD)
/** TCP/IP Offload functions.
* If non-NULL, then the TCP/IP stack is located
* in the communication chip that is accessed via this
* network interface.
*/
struct net_offload *offload;
#endif /* CONFIG_NET_OFFLOAD */
/** The hardware MTU */
uint16_t mtu;
#if defined(CONFIG_NET_SOCKETS_OFFLOAD)
/** A function pointer to create an offloaded socket.
* If non-NULL, the interface is considered offloaded at socket level.
*/
net_socket_create_t socket_offload;
#endif /* CONFIG_NET_SOCKETS_OFFLOAD */
/** RFC 2863 operational status */
enum net_if_oper_state oper_state;
};
/**
* @brief Network Interface structure
*
* Used to handle a network interface on top of a net_if_dev instance.
* There can be many net_if instance against the same net_if_dev instance.
*
*/
struct net_if {
/** The net_if_dev instance the net_if is related to */
struct net_if_dev *if_dev;
#if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
/** Network statistics related to this network interface */
struct net_stats stats;
#endif /* CONFIG_NET_STATISTICS_PER_INTERFACE */
/** Network interface instance configuration */
struct net_if_config config;
#if defined(CONFIG_NET_POWER_MANAGEMENT)
/** Keep track of packets pending in traffic queues. This is
* needed to avoid putting network device driver to sleep if
* there are packets waiting to be sent.
*/
int tx_pending;
#endif
/** Mutex protecting this network interface instance */
struct k_mutex lock;
/** Mutex used when sending data */
struct k_mutex tx_lock;
/** Network interface specific flags */
/** Enable IPv6 privacy extension (RFC 8981), this is enabled
* by default if PE support is enabled in configuration.
*/
uint8_t pe_enabled : 1;
/** If PE is enabled, then this tells whether public addresses
* are preferred over temporary ones for this interface.
*/
uint8_t pe_prefer_public : 1;
/** Unused bit flags (ignore) */
uint8_t _unused : 6;
};
/** @cond INTERNAL_HIDDEN */
static inline void net_if_lock(struct net_if *iface)
{
NET_ASSERT(iface);
(void)k_mutex_lock(&iface->lock, K_FOREVER);
}
static inline void net_if_unlock(struct net_if *iface)
{
NET_ASSERT(iface);
k_mutex_unlock(&iface->lock);
}
static inline bool net_if_flag_is_set(struct net_if *iface,
enum net_if_flag value);
static inline void net_if_tx_lock(struct net_if *iface)
{
NET_ASSERT(iface);
if (net_if_flag_is_set(iface, NET_IF_NO_TX_LOCK)) {
return;
}
(void)k_mutex_lock(&iface->tx_lock, K_FOREVER);
}
static inline void net_if_tx_unlock(struct net_if *iface)
{
NET_ASSERT(iface);
if (net_if_flag_is_set(iface, NET_IF_NO_TX_LOCK)) {
return;
}
k_mutex_unlock(&iface->tx_lock);
}
/** @endcond */
/**
* @brief Set a value in network interface flags
*
* @param iface Pointer to network interface
* @param value Flag value
*/
static inline void net_if_flag_set(struct net_if *iface,
enum net_if_flag value)
{
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
atomic_set_bit(iface->if_dev->flags, value);
}
/**
* @brief Test and set a value in network interface flags
*
* @param iface Pointer to network interface
* @param value Flag value
*
* @return true if the bit was set, false if it wasn't.
*/
static inline bool net_if_flag_test_and_set(struct net_if *iface,
enum net_if_flag value)
{
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
return atomic_test_and_set_bit(iface->if_dev->flags, value);
}
/**
* @brief Clear a value in network interface flags
*
* @param iface Pointer to network interface
* @param value Flag value
*/
static inline void net_if_flag_clear(struct net_if *iface,
enum net_if_flag value)
{
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
atomic_clear_bit(iface->if_dev->flags, value);
}
/**
* @brief Test and clear a value in network interface flags
*
* @param iface Pointer to network interface
* @param value Flag value
*
* @return true if the bit was set, false if it wasn't.
*/
static inline bool net_if_flag_test_and_clear(struct net_if *iface,
enum net_if_flag value)
{
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
return atomic_test_and_clear_bit(iface->if_dev->flags, value);
}
/**
* @brief Check if a value in network interface flags is set
*
* @param iface Pointer to network interface
* @param value Flag value
*
* @return True if the value is set, false otherwise
*/
static inline bool net_if_flag_is_set(struct net_if *iface,
enum net_if_flag value)
{
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
if (iface == NULL) {
return false;
}
return atomic_test_bit(iface->if_dev->flags, value);
}
/**
* @brief Set an operational state on an interface
*
* @param iface Pointer to network interface
* @param oper_state Operational state to set
*
* @return The new operational state of an interface
*/
static inline enum net_if_oper_state net_if_oper_state_set(
struct net_if *iface, enum net_if_oper_state oper_state)
{
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
if (oper_state >= NET_IF_OPER_UNKNOWN && oper_state <= NET_IF_OPER_UP) {
iface->if_dev->oper_state = oper_state;
}
return iface->if_dev->oper_state;
}
/**
* @brief Get an operational state of an interface
*
* @param iface Pointer to network interface
*
* @return Operational state of an interface
*/
static inline enum net_if_oper_state net_if_oper_state(struct net_if *iface)
{
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
return iface->if_dev->oper_state;
}
/**
* @brief Send a packet through a net iface
*
* @param iface Pointer to a network interface structure
* @param pkt Pointer to a net packet to send
*
* return verdict about the packet
*/
enum net_verdict net_if_send_data(struct net_if *iface, struct net_pkt *pkt);
/**
* @brief Get a pointer to the interface L2
*
* @param iface a valid pointer to a network interface structure
*
* @return a pointer to the iface L2
*/
static inline const struct net_l2 *net_if_l2(struct net_if *iface)
{
if (!iface || !iface->if_dev) {
return NULL;
}
return iface->if_dev->l2;
}
/**
* @brief Input a packet through a net iface
*
* @param iface Pointer to a network interface structure
* @param pkt Pointer to a net packet to input
*
* @return verdict about the packet
*/
enum net_verdict net_if_recv_data(struct net_if *iface, struct net_pkt *pkt);
/**
* @brief Get a pointer to the interface L2 private data
*
* @param iface a valid pointer to a network interface structure
*
* @return a pointer to the iface L2 data
*/
static inline void *net_if_l2_data(struct net_if *iface)
{
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
return iface->if_dev->l2_data;
}
/**
* @brief Get an network interface's device
*
* @param iface Pointer to a network interface structure
*
* @return a pointer to the device driver instance
*/
static inline const struct device *net_if_get_device(struct net_if *iface)
{
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
return iface->if_dev->dev;
}
/**
* @brief Queue a packet to the net interface TX queue
*
* @param iface Pointer to a network interface structure
* @param pkt Pointer to a net packet to queue
*/
void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt);
/**
* @brief Return the IP offload status
*
* @param iface Network interface
*
* @return True if IP offloading is active, false otherwise.
*/
static inline bool net_if_is_ip_offloaded(struct net_if *iface)
{
#if defined(CONFIG_NET_OFFLOAD)
return (iface != NULL && iface->if_dev != NULL &&
iface->if_dev->offload != NULL);
#else
ARG_UNUSED(iface);
return false;
#endif
}
/**
* @brief Return offload status of a given network interface.
*
* @param iface Network interface
*
* @return True if IP or socket offloading is active, false otherwise.
*/
bool net_if_is_offloaded(struct net_if *iface);
/**
* @brief Return the IP offload plugin
*
* @param iface Network interface
*
* @return NULL if there is no offload plugin defined, valid pointer otherwise
*/
static inline struct net_offload *net_if_offload(struct net_if *iface)
{
#if defined(CONFIG_NET_OFFLOAD)
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
return iface->if_dev->offload;
#else
ARG_UNUSED(iface);
return NULL;
#endif
}
/**
* @brief Return the socket offload status
*
* @param iface Network interface
*
* @return True if socket offloading is active, false otherwise.
*/
static inline bool net_if_is_socket_offloaded(struct net_if *iface)
{
#if defined(CONFIG_NET_SOCKETS_OFFLOAD)
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
return (iface->if_dev->socket_offload != NULL);
#else
ARG_UNUSED(iface);
return false;
#endif
}
/**
* @brief Set the function to create an offloaded socket
*
* @param iface Network interface
* @param socket_offload A function to create an offloaded socket
*/
static inline void net_if_socket_offload_set(
struct net_if *iface, net_socket_create_t socket_offload)
{
#if defined(CONFIG_NET_SOCKETS_OFFLOAD)
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
iface->if_dev->socket_offload = socket_offload;
#else
ARG_UNUSED(iface);
ARG_UNUSED(socket_offload);
#endif
}
/**
* @brief Return the function to create an offloaded socket
*
* @param iface Network interface
*
* @return NULL if the interface is not socket offloaded, valid pointer otherwise
*/
static inline net_socket_create_t net_if_socket_offload(struct net_if *iface)
{
#if defined(CONFIG_NET_SOCKETS_OFFLOAD)
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
return iface->if_dev->socket_offload;
#else
ARG_UNUSED(iface);
return NULL;
#endif
}
/**
* @brief Get an network interface's link address
*
* @param iface Pointer to a network interface structure
*
* @return a pointer to the network link address
*/
static inline struct net_linkaddr *net_if_get_link_addr(struct net_if *iface)
{
NET_ASSERT(iface);
NET_ASSERT(iface->if_dev);
return &iface->if_dev->link_addr;
}
/**
* @brief Return network configuration for this network interface
*
* @param iface Pointer to a network interface structure
*
* @return Pointer to configuration
*/
static inline struct net_if_config *net_if_get_config(struct net_if *iface)
{
NET_ASSERT(iface);
return &iface->config;
}
/**
* @brief Start duplicate address detection procedure.
*
* @param iface Pointer to a network interface structure
*/
#if defined(CONFIG_NET_IPV6_DAD) && defined(CONFIG_NET_NATIVE_IPV6)
void net_if_start_dad(struct net_if *iface);
#else
static inline void net_if_start_dad(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif
/**
* @brief Start neighbor discovery and send router solicitation message.
*
* @param iface Pointer to a network interface structure
*/
void net_if_start_rs(struct net_if *iface);
/**
* @brief Stop neighbor discovery.
*
* @param iface Pointer to a network interface structure
*/
#if defined(CONFIG_NET_IPV6_ND) && defined(CONFIG_NET_NATIVE_IPV6)
void net_if_stop_rs(struct net_if *iface);
#else
static inline void net_if_stop_rs(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif /* CONFIG_NET_IPV6_ND */
/**
* @brief Provide a reachability hint for IPv6 Neighbor Discovery.
*
* This function is intended for upper-layer protocols to inform the IPv6
* Neighbor Discovery process about an active link to a specific neighbor.
* By signaling a recent "forward progress" event, such as the reception of
* an ACK, this function can help reduce unnecessary ND traffic as per the
* guidelines in RFC 4861 (section 7.3).
*
* @param iface A pointer to the network interface.
* @param ipv6_addr Pointer to the IPv6 address of the neighbor node.
*/
#if defined(CONFIG_NET_IPV6_ND) && defined(CONFIG_NET_NATIVE_IPV6)
void net_if_nbr_reachability_hint(struct net_if *iface, const struct in6_addr *ipv6_addr);
#else
static inline void net_if_nbr_reachability_hint(struct net_if *iface,
const struct in6_addr *ipv6_addr)
{
ARG_UNUSED(iface);
ARG_UNUSED(ipv6_addr);
}
#endif
/** @cond INTERNAL_HIDDEN */
static inline int net_if_set_link_addr_unlocked(struct net_if *iface,
uint8_t *addr, uint8_t len,
enum net_link_type type)
{
if (net_if_flag_is_set(iface, NET_IF_RUNNING)) {
return -EPERM;
}
net_if_get_link_addr(iface)->addr = addr;
net_if_get_link_addr(iface)->len = len;
net_if_get_link_addr(iface)->type = type;
net_hostname_set_postfix(addr, len);
return 0;
}
int net_if_set_link_addr_locked(struct net_if *iface,
uint8_t *addr, uint8_t len,
enum net_link_type type);
#if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
extern int net_if_addr_unref_debug(struct net_if *iface,
sa_family_t family,
const void *addr,
const char *caller, int line);
#define net_if_addr_unref(iface, family, addr) \
net_if_addr_unref_debug(iface, family, addr, __func__, __LINE__)
extern struct net_if_addr *net_if_addr_ref_debug(struct net_if *iface,
sa_family_t family,
const void *addr,
const char *caller,
int line);
#define net_if_addr_ref(iface, family, addr) \
net_if_addr_ref_debug(iface, family, addr, __func__, __LINE__)
#else
extern int net_if_addr_unref(struct net_if *iface,
sa_family_t family,
const void *addr);
extern struct net_if_addr *net_if_addr_ref(struct net_if *iface,
sa_family_t family,
const void *addr);
#endif /* CONFIG_NET_IF_LOG_LEVEL */
/** @endcond */
/**
* @brief Set a network interface's link address
*
* @param iface Pointer to a network interface structure
* @param addr A pointer to a uint8_t buffer representing the address.
* The buffer must remain valid throughout interface lifetime.
* @param len length of the address buffer
* @param type network bearer type of this link address
*
* @return 0 on success
*/
static inline int net_if_set_link_addr(struct net_if *iface,
uint8_t *addr, uint8_t len,
enum net_link_type type)
{
#if defined(CONFIG_NET_RAW_MODE)
return net_if_set_link_addr_unlocked(iface, addr, len, type);
#else
return net_if_set_link_addr_locked(iface, addr, len, type);
#endif
}
/**
* @brief Get an network interface's MTU
*
* @param iface Pointer to a network interface structure
*
* @return the MTU
*/
static inline uint16_t net_if_get_mtu(struct net_if *iface)
{
if (iface == NULL) {
return 0U;
}
NET_ASSERT(iface->if_dev);
return iface->if_dev->mtu;
}
/**
* @brief Set an network interface's MTU
*
* @param iface Pointer to a network interface structure
* @param mtu New MTU, note that we store only 16 bit mtu value.
*/
static inline void net_if_set_mtu(struct net_if *iface,
uint16_t mtu)
{
if (iface == NULL) {
return;
}
NET_ASSERT(iface->if_dev);
iface->if_dev->mtu = mtu;
}
/**
* @brief Set the infinite status of the network interface address
*
* @param ifaddr IP address for network interface
* @param is_infinite Infinite status
*/
static inline void net_if_addr_set_lf(struct net_if_addr *ifaddr,
bool is_infinite)
{
NET_ASSERT(ifaddr);
ifaddr->is_infinite = is_infinite;
}
/**
* @brief Get an interface according to link layer address.
*
* @param ll_addr Link layer address.
*
* @return Network interface or NULL if not found.
*/
struct net_if *net_if_get_by_link_addr(struct net_linkaddr *ll_addr);
/**
* @brief Find an interface from it's related device
*
* @param dev A valid struct device pointer to relate with an interface
*
* @return a valid struct net_if pointer on success, NULL otherwise
*/
struct net_if *net_if_lookup_by_dev(const struct device *dev);
/**
* @brief Get network interface IP config
*
* @param iface Interface to use.
*
* @return NULL if not found or pointer to correct config settings.
*/
static inline struct net_if_config *net_if_config_get(struct net_if *iface)
{
NET_ASSERT(iface);
return &iface->config;
}
/**
* @brief Remove a router from the system
*
* @param router Pointer to existing router
*/
void net_if_router_rm(struct net_if_router *router);
/**
* @brief Set the default network interface.
*
* @param iface New default interface, or NULL to revert to the one set by Kconfig.
*/
void net_if_set_default(struct net_if *iface);
/**
* @brief Get the default network interface.
*
* @return Default interface or NULL if no interfaces are configured.
*/
struct net_if *net_if_get_default(void);
/**
* @brief Get the first network interface according to its type.
*
* @param l2 Layer 2 type of the network interface.
*
* @return First network interface of a given type or NULL if no such
* interfaces was found.
*/
struct net_if *net_if_get_first_by_type(const struct net_l2 *l2);
/**
* @brief Get the first network interface which is up.
*
* @return First network interface which is up or NULL if all
* interfaces are down.
*/
struct net_if *net_if_get_first_up(void);
#if defined(CONFIG_NET_L2_IEEE802154)
/**
* @brief Get the first IEEE 802.15.4 network interface.
*
* @return First IEEE 802.15.4 network interface or NULL if no such
* interfaces was found.
*/
static inline struct net_if *net_if_get_ieee802154(void)
{
return net_if_get_first_by_type(&NET_L2_GET_NAME(IEEE802154));
}
#endif /* CONFIG_NET_L2_IEEE802154 */
/**
* @brief Allocate network interface IPv6 config.
*
* @details This function will allocate new IPv6 config.
*
* @param iface Interface to use.
* @param ipv6 Pointer to allocated IPv6 struct is returned to caller.
*
* @return 0 if ok, <0 if error
*/
int net_if_config_ipv6_get(struct net_if *iface,
struct net_if_ipv6 **ipv6);
/**
* @brief Release network interface IPv6 config.
*
* @param iface Interface to use.
*
* @return 0 if ok, <0 if error
*/
int net_if_config_ipv6_put(struct net_if *iface);
/**
* @brief Check if this IPv6 address belongs to one of the interfaces.
*
* @param addr IPv6 address
* @param iface Pointer to interface is returned
*
* @return Pointer to interface address, NULL if not found.
*/
struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
struct net_if **iface);
/**
* @brief Check if this IPv6 address belongs to this specific interfaces.
*
* @param iface Network interface
* @param addr IPv6 address
*
* @return Pointer to interface address, NULL if not found.
*/
struct net_if_addr *net_if_ipv6_addr_lookup_by_iface(struct net_if *iface,
struct in6_addr *addr);
/**
* @brief Check if this IPv6 address belongs to one of the interface indices.
*
* @param addr IPv6 address
*
* @return >0 if address was found in given network interface index,
* all other values mean address was not found
*/
__syscall int net_if_ipv6_addr_lookup_by_index(const struct in6_addr *addr);
/**
* @brief Add a IPv6 address to an interface
*
* @param iface Network interface
* @param addr IPv6 address
* @param addr_type IPv6 address type
* @param vlifetime Validity time for this address
*
* @return Pointer to interface address, NULL if cannot be added
*/
struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface,
struct in6_addr *addr,
enum net_addr_type addr_type,
uint32_t vlifetime);
/**
* @brief Add a IPv6 address to an interface by index
*
* @param index Network interface index
* @param addr IPv6 address
* @param addr_type IPv6 address type
* @param vlifetime Validity time for this address
*
* @return True if ok, false if address could not be added
*/
__syscall bool net_if_ipv6_addr_add_by_index(int index,
struct in6_addr *addr,
enum net_addr_type addr_type,
uint32_t vlifetime);
/**
* @brief Update validity lifetime time of an IPv6 address.
*
* @param ifaddr Network IPv6 address
* @param vlifetime Validity time for this address
*/
void net_if_ipv6_addr_update_lifetime(struct net_if_addr *ifaddr,
uint32_t vlifetime);
/**
* @brief Remove an IPv6 address from an interface
*
* @param iface Network interface
* @param addr IPv6 address
*
* @return True if successfully removed, false otherwise
*/
bool net_if_ipv6_addr_rm(struct net_if *iface, const struct in6_addr *addr);
/**
* @brief Remove an IPv6 address from an interface by index
*
* @param index Network interface index
* @param addr IPv6 address
*
* @return True if successfully removed, false otherwise
*/
__syscall bool net_if_ipv6_addr_rm_by_index(int index,
const struct in6_addr *addr);
/**
* @typedef net_if_ip_addr_cb_t
* @brief Callback used while iterating over network interface IP addresses
*
* @param iface Pointer to the network interface the address belongs to
* @param addr Pointer to current IP address
* @param user_data A valid pointer to user data or NULL
*/
typedef void (*net_if_ip_addr_cb_t)(struct net_if *iface,
struct net_if_addr *addr,
void *user_data);
/**
* @brief Go through all IPv6 addresses on a network interface and call callback
* for each used address.
*
* @param iface Pointer to the network interface
* @param cb User-supplied callback function to call
* @param user_data User specified data
*/
void net_if_ipv6_addr_foreach(struct net_if *iface, net_if_ip_addr_cb_t cb,
void *user_data);
/**
* @brief Add a IPv6 multicast address to an interface
*
* @param iface Network interface
* @param addr IPv6 multicast address
*
* @return Pointer to interface multicast address, NULL if cannot be added
*/
struct net_if_mcast_addr *net_if_ipv6_maddr_add(struct net_if *iface,
const struct in6_addr *addr);
/**
* @brief Remove an IPv6 multicast address from an interface
*
* @param iface Network interface
* @param addr IPv6 multicast address
*
* @return True if successfully removed, false otherwise
*/
bool net_if_ipv6_maddr_rm(struct net_if *iface, const struct in6_addr *addr);
/**
* @typedef net_if_ip_maddr_cb_t
* @brief Callback used while iterating over network interface multicast IP addresses
*
* @param iface Pointer to the network interface the address belongs to
* @param maddr Pointer to current multicast IP address
* @param user_data A valid pointer to user data or NULL
*/
typedef void (*net_if_ip_maddr_cb_t)(struct net_if *iface,
struct net_if_mcast_addr *maddr,
void *user_data);
/**
* @brief Go through all IPv6 multicast addresses on a network interface and call
* callback for each used address.
*
* @param iface Pointer to the network interface
* @param cb User-supplied callback function to call
* @param user_data User specified data
*/
void net_if_ipv6_maddr_foreach(struct net_if *iface, net_if_ip_maddr_cb_t cb,
void *user_data);
/**
* @brief Check if this IPv6 multicast address belongs to a specific interface
* or one of the interfaces.
*
* @param addr IPv6 address
* @param iface If *iface is null, then pointer to interface is returned,
* otherwise the *iface value needs to be matched.
*
* @return Pointer to interface multicast address, NULL if not found.
*/
struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *addr,
struct net_if **iface);
/**
* @typedef net_if_mcast_callback_t
* @brief Define a callback that is called whenever a IPv6 or IPv4 multicast
* address group is joined or left.
* @param iface A pointer to a struct net_if to which the multicast address is
* attached.
* @param addr IP multicast address.
* @param is_joined True if the multicast group is joined, false if group is left.
*/
typedef void (*net_if_mcast_callback_t)(struct net_if *iface,
const struct net_addr *addr,
bool is_joined);
/**
* @brief Multicast monitor handler struct.
*
* Stores the multicast callback information. Caller must make sure that
* the variable pointed by this is valid during the lifetime of
* registration. Typically this means that the variable cannot be
* allocated from stack.
*/
struct net_if_mcast_monitor {
/** Node information for the slist. */
sys_snode_t node;
/** Network interface */
struct net_if *iface;
/** Multicast callback */
net_if_mcast_callback_t cb;
};
/**
* @brief Register a multicast monitor
*
* @param mon Monitor handle. This is a pointer to a monitor storage structure
* which should be allocated by caller, but does not need to be initialized.
* @param iface Network interface or NULL for all interfaces
* @param cb Monitor callback
*/
void net_if_mcast_mon_register(struct net_if_mcast_monitor *mon,
struct net_if *iface,
net_if_mcast_callback_t cb);
/**
* @brief Unregister a multicast monitor
*
* @param mon Monitor handle
*/
void net_if_mcast_mon_unregister(struct net_if_mcast_monitor *mon);
/**
* @brief Call registered multicast monitors
*
* @param iface Network interface
* @param addr Multicast address
* @param is_joined Is this multicast address group joined (true) or not (false)
*/
void net_if_mcast_monitor(struct net_if *iface, const struct net_addr *addr,
bool is_joined);
/**
* @brief Mark a given multicast address to be joined.
*
* @param iface Network interface the address belongs to
* @param addr IPv6 multicast address
*/
void net_if_ipv6_maddr_join(struct net_if *iface,
struct net_if_mcast_addr *addr);
/**
* @brief Check if given multicast address is joined or not.
*
* @param addr IPv6 multicast address
*
* @return True if address is joined, False otherwise.
*/
static inline bool net_if_ipv6_maddr_is_joined(struct net_if_mcast_addr *addr)
{
NET_ASSERT(addr);
return addr->is_joined;
}
/**
* @brief Mark a given multicast address to be left.
*
* @param iface Network interface the address belongs to
* @param addr IPv6 multicast address
*/
void net_if_ipv6_maddr_leave(struct net_if *iface,
struct net_if_mcast_addr *addr);
/**
* @brief Return prefix that corresponds to this IPv6 address.
*
* @param iface Network interface
* @param addr IPv6 address
*
* @return Pointer to prefix, NULL if not found.
*/
struct net_if_ipv6_prefix *net_if_ipv6_prefix_get(struct net_if *iface,
const struct in6_addr *addr);
/**
* @brief Check if this IPv6 prefix belongs to this interface
*
* @param iface Network interface
* @param addr IPv6 address
* @param len Prefix length
*
* @return Pointer to prefix, NULL if not found.
*/
struct net_if_ipv6_prefix *net_if_ipv6_prefix_lookup(struct net_if *iface,
struct in6_addr *addr,
uint8_t len);
/**
* @brief Add a IPv6 prefix to an network interface.
*
* @param iface Network interface
* @param prefix IPv6 address
* @param len Prefix length
* @param lifetime Prefix lifetime in seconds
*
* @return Pointer to prefix, NULL if the prefix was not added.
*/
struct net_if_ipv6_prefix *net_if_ipv6_prefix_add(struct net_if *iface,
struct in6_addr *prefix,
uint8_t len,
uint32_t lifetime);
/**
* @brief Remove an IPv6 prefix from an interface
*
* @param iface Network interface
* @param addr IPv6 prefix address
* @param len Prefix length
*
* @return True if successfully removed, false otherwise
*/
bool net_if_ipv6_prefix_rm(struct net_if *iface, struct in6_addr *addr,
uint8_t len);
/**
* @brief Set the infinite status of the prefix
*
* @param prefix IPv6 address
* @param is_infinite Infinite status
*/
static inline void net_if_ipv6_prefix_set_lf(struct net_if_ipv6_prefix *prefix,
bool is_infinite)
{
prefix->is_infinite = is_infinite;
}
/**
* @brief Set the prefix lifetime timer.
*
* @param prefix IPv6 address
* @param lifetime Prefix lifetime in seconds
*/
void net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix *prefix,
uint32_t lifetime);
/**
* @brief Unset the prefix lifetime timer.
*
* @param prefix IPv6 address
*/
void net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix *prefix);
/**
* @brief Check if this IPv6 address is part of the subnet of our
* network interface.
*
* @param iface Network interface. This is returned to the caller.
* The iface can be NULL in which case we check all the interfaces.
* @param addr IPv6 address
*
* @return True if address is part of our subnet, false otherwise
*/
bool net_if_ipv6_addr_onlink(struct net_if **iface, struct in6_addr *addr);
/**
* @brief Get the IPv6 address of the given router
* @param router a network router
*
* @return pointer to the IPv6 address, or NULL if none
*/
#if defined(CONFIG_NET_NATIVE_IPV6)
static inline struct in6_addr *net_if_router_ipv6(struct net_if_router *router)
{
NET_ASSERT(router);
return &router->address.in6_addr;
}
#else
static inline struct in6_addr *net_if_router_ipv6(struct net_if_router *router)
{
static struct in6_addr addr;
ARG_UNUSED(router);
return &addr;
}
#endif
/**
* @brief Check if IPv6 address is one of the routers configured
* in the system.
*
* @param iface Network interface
* @param addr IPv6 address
*
* @return Pointer to router information, NULL if cannot be found
*/
struct net_if_router *net_if_ipv6_router_lookup(struct net_if *iface,
struct in6_addr *addr);
/**
* @brief Find default router for this IPv6 address.
*
* @param iface Network interface. This can be NULL in which case we
* go through all the network interfaces to find a suitable router.
* @param addr IPv6 address
*
* @return Pointer to router information, NULL if cannot be found
*/
struct net_if_router *net_if_ipv6_router_find_default(struct net_if *iface,
struct in6_addr *addr);
/**
* @brief Update validity lifetime time of a router.
*
* @param router Network IPv6 address
* @param lifetime Lifetime of this router.
*/
void net_if_ipv6_router_update_lifetime(struct net_if_router *router,
uint16_t lifetime);
/**
* @brief Add IPv6 router to the system.
*
* @param iface Network interface
* @param addr IPv6 address
* @param router_lifetime Lifetime of the router
*
* @return Pointer to router information, NULL if could not be added
*/
struct net_if_router *net_if_ipv6_router_add(struct net_if *iface,
struct in6_addr *addr,
uint16_t router_lifetime);
/**
* @brief Remove IPv6 router from the system.
*
* @param router Router information.
*
* @return True if successfully removed, false otherwise
*/
bool net_if_ipv6_router_rm(struct net_if_router *router);
/**
* @brief Get IPv6 hop limit specified for a given interface. This is the
* default value but can be overridden by the user.
*
* @param iface Network interface
*
* @return Hop limit
*/
uint8_t net_if_ipv6_get_hop_limit(struct net_if *iface);
/**
* @brief Set the default IPv6 hop limit of a given interface.
*
* @param iface Network interface
* @param hop_limit New hop limit
*/
void net_if_ipv6_set_hop_limit(struct net_if *iface, uint8_t hop_limit);
/** @cond INTERNAL_HIDDEN */
/* The old hop limit setter function is deprecated because the naming
* of it was incorrect. The API name was missing "_if_" so this function
* should not be used.
*/
__deprecated
static inline void net_ipv6_set_hop_limit(struct net_if *iface,
uint8_t hop_limit)
{
net_if_ipv6_set_hop_limit(iface, hop_limit);
}
/** @endcond */
/**
* @brief Get IPv6 multicast hop limit specified for a given interface. This is the
* default value but can be overridden by the user.
*
* @param iface Network interface
*
* @return Hop limit
*/
uint8_t net_if_ipv6_get_mcast_hop_limit(struct net_if *iface);
/**
* @brief Set the default IPv6 multicast hop limit of a given interface.
*
* @param iface Network interface
* @param hop_limit New hop limit
*/
void net_if_ipv6_set_mcast_hop_limit(struct net_if *iface, uint8_t hop_limit);
/**
* @brief Set IPv6 reachable time for a given interface
*
* @param iface Network interface
* @param reachable_time New reachable time
*/
static inline void net_if_ipv6_set_base_reachable_time(struct net_if *iface,
uint32_t reachable_time)
{
#if defined(CONFIG_NET_NATIVE_IPV6)
NET_ASSERT(iface);
if (!iface->config.ip.ipv6) {
return;
}
iface->config.ip.ipv6->base_reachable_time = reachable_time;
#else
ARG_UNUSED(iface);
ARG_UNUSED(reachable_time);
#endif
}
/**
* @brief Get IPv6 reachable timeout specified for a given interface
*
* @param iface Network interface
*
* @return Reachable timeout
*/
static inline uint32_t net_if_ipv6_get_reachable_time(struct net_if *iface)
{
#if defined(CONFIG_NET_NATIVE_IPV6)
NET_ASSERT(iface);
if (!iface->config.ip.ipv6) {
return 0;
}
return iface->config.ip.ipv6->reachable_time;
#else
ARG_UNUSED(iface);
return 0;
#endif
}
/**
* @brief Calculate next reachable time value for IPv6 reachable time
*
* @param ipv6 IPv6 address configuration
*
* @return Reachable time
*/
uint32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6);
/**
* @brief Set IPv6 reachable time for a given interface. This requires
* that base reachable time is set for the interface.
*
* @param ipv6 IPv6 address configuration
*/
static inline void net_if_ipv6_set_reachable_time(struct net_if_ipv6 *ipv6)
{
#if defined(CONFIG_NET_NATIVE_IPV6)
if (ipv6 == NULL) {
return;
}
ipv6->reachable_time = net_if_ipv6_calc_reachable_time(ipv6);
#else
ARG_UNUSED(ipv6);
#endif
}
/**
* @brief Set IPv6 retransmit timer for a given interface
*
* @param iface Network interface
* @param retrans_timer New retransmit timer
*/
static inline void net_if_ipv6_set_retrans_timer(struct net_if *iface,
uint32_t retrans_timer)
{
#if defined(CONFIG_NET_NATIVE_IPV6)
NET_ASSERT(iface);
if (!iface->config.ip.ipv6) {
return;
}
iface->config.ip.ipv6->retrans_timer = retrans_timer;
#else
ARG_UNUSED(iface);
ARG_UNUSED(retrans_timer);
#endif
}
/**
* @brief Get IPv6 retransmit timer specified for a given interface
*
* @param iface Network interface
*
* @return Retransmit timer
*/
static inline uint32_t net_if_ipv6_get_retrans_timer(struct net_if *iface)
{
#if defined(CONFIG_NET_NATIVE_IPV6)
NET_ASSERT(iface);
if (!iface->config.ip.ipv6) {
return 0;
}
return iface->config.ip.ipv6->retrans_timer;
#else
ARG_UNUSED(iface);
return 0;
#endif
}
/**
* @brief Get a IPv6 source address that should be used when sending
* network data to destination.
*
* @param iface Interface that was used when packet was received.
* If the interface is not known, then NULL can be given.
* @param dst IPv6 destination address
*
* @return Pointer to IPv6 address to use, NULL if no IPv6 address
* could be found.
*/
#if defined(CONFIG_NET_NATIVE_IPV6)
const struct in6_addr *net_if_ipv6_select_src_addr(struct net_if *iface,
const struct in6_addr *dst);
#else
static inline const struct in6_addr *net_if_ipv6_select_src_addr(
struct net_if *iface, const struct in6_addr *dst)
{
ARG_UNUSED(iface);
ARG_UNUSED(dst);
return NULL;
}
#endif
/**
* @brief Get a IPv6 source address that should be used when sending
* network data to destination. Use a hint set to the socket to select
* the proper address.
*
* @param iface Interface that was used when packet was received.
* If the interface is not known, then NULL can be given.
* @param dst IPv6 destination address
* @param flags Hint from the related socket. See RFC 5014 for value details.
*
* @return Pointer to IPv6 address to use, NULL if no IPv6 address
* could be found.
*/
#if defined(CONFIG_NET_NATIVE_IPV6)
const struct in6_addr *net_if_ipv6_select_src_addr_hint(struct net_if *iface,
const struct in6_addr *dst,
int flags);
#else
static inline const struct in6_addr *net_if_ipv6_select_src_addr_hint(
struct net_if *iface, const struct in6_addr *dst, int flags)
{
ARG_UNUSED(iface);
ARG_UNUSED(dst);
ARG_UNUSED(flags);
return NULL;
}
#endif
/**
* @brief Get a network interface that should be used when sending
* IPv6 network data to destination.
*
* @param dst IPv6 destination address
*
* @return Pointer to network interface to use, NULL if no suitable interface
* could be found.
*/
#if defined(CONFIG_NET_NATIVE_IPV6)
struct net_if *net_if_ipv6_select_src_iface(const struct in6_addr *dst);
#else
static inline struct net_if *net_if_ipv6_select_src_iface(
const struct in6_addr *dst)
{
ARG_UNUSED(dst);
return NULL;
}
#endif
/**
* @brief Get a IPv6 link local address in a given state.
*
* @param iface Interface to use. Must be a valid pointer to an interface.
* @param addr_state IPv6 address state (preferred, tentative, deprecated)
*
* @return Pointer to link local IPv6 address, NULL if no proper IPv6 address
* could be found.
*/
struct in6_addr *net_if_ipv6_get_ll(struct net_if *iface,
enum net_addr_state addr_state);
/**
* @brief Return link local IPv6 address from the first interface that has
* a link local address matching give state.
*
* @param state IPv6 address state (ANY, TENTATIVE, PREFERRED, DEPRECATED)
* @param iface Pointer to interface is returned
*
* @return Pointer to IPv6 address, NULL if not found.
*/
struct in6_addr *net_if_ipv6_get_ll_addr(enum net_addr_state state,
struct net_if **iface);
/**
* @brief Stop IPv6 Duplicate Address Detection (DAD) procedure if
* we find out that our IPv6 address is already in use.
*
* @param iface Interface where the DAD was running.
* @param addr IPv6 address that failed DAD
*/
void net_if_ipv6_dad_failed(struct net_if *iface, const struct in6_addr *addr);
/**
* @brief Return global IPv6 address from the first interface that has
* a global IPv6 address matching the given state.
*
* @param state IPv6 address state (ANY, TENTATIVE, PREFERRED, DEPRECATED)
* @param iface Caller can give an interface to check. If iface is set to NULL,
* then all the interfaces are checked. Pointer to interface where the IPv6
* address is defined is returned to the caller.
*
* @return Pointer to IPv6 address, NULL if not found.
*/
struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
struct net_if **iface);
/**
* @brief Allocate network interface IPv4 config.
*
* @details This function will allocate new IPv4 config.
*
* @param iface Interface to use.
* @param ipv4 Pointer to allocated IPv4 struct is returned to caller.
*
* @return 0 if ok, <0 if error
*/
int net_if_config_ipv4_get(struct net_if *iface,
struct net_if_ipv4 **ipv4);
/**
* @brief Release network interface IPv4 config.
*
* @param iface Interface to use.
*
* @return 0 if ok, <0 if error
*/
int net_if_config_ipv4_put(struct net_if *iface);
/**
* @brief Get IPv4 time-to-live value specified for a given interface
*
* @param iface Network interface
*
* @return Time-to-live
*/
uint8_t net_if_ipv4_get_ttl(struct net_if *iface);
/**
* @brief Set IPv4 time-to-live value specified to a given interface
*
* @param iface Network interface
* @param ttl Time-to-live value
*/
void net_if_ipv4_set_ttl(struct net_if *iface, uint8_t ttl);
/**
* @brief Get IPv4 multicast time-to-live value specified for a given interface
*
* @param iface Network interface
*
* @return Time-to-live
*/
uint8_t net_if_ipv4_get_mcast_ttl(struct net_if *iface);
/**
* @brief Set IPv4 multicast time-to-live value specified to a given interface
*
* @param iface Network interface
* @param ttl Time-to-live value
*/
void net_if_ipv4_set_mcast_ttl(struct net_if *iface, uint8_t ttl);
/**
* @brief Check if this IPv4 address belongs to one of the interfaces.
*
* @param addr IPv4 address
* @param iface Interface is returned
*
* @return Pointer to interface address, NULL if not found.
*/
struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
struct net_if **iface);
/**
* @brief Add a IPv4 address to an interface
*
* @param iface Network interface
* @param addr IPv4 address
* @param addr_type IPv4 address type
* @param vlifetime Validity time for this address
*
* @return Pointer to interface address, NULL if cannot be added
*/
struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface,
struct in_addr *addr,
enum net_addr_type addr_type,
uint32_t vlifetime);
/**
* @brief Remove a IPv4 address from an interface
*
* @param iface Network interface
* @param addr IPv4 address
*
* @return True if successfully removed, false otherwise
*/
bool net_if_ipv4_addr_rm(struct net_if *iface, const struct in_addr *addr);
/**
* @brief Check if this IPv4 address belongs to one of the interface indices.
*
* @param addr IPv4 address
*
* @return >0 if address was found in given network interface index,
* all other values mean address was not found
*/
__syscall int net_if_ipv4_addr_lookup_by_index(const struct in_addr *addr);
/**
* @brief Add a IPv4 address to an interface by network interface index
*
* @param index Network interface index
* @param addr IPv4 address
* @param addr_type IPv4 address type
* @param vlifetime Validity time for this address
*
* @return True if ok, false if the address could not be added
*/
__syscall bool net_if_ipv4_addr_add_by_index(int index,
struct in_addr *addr,
enum net_addr_type addr_type,
uint32_t vlifetime);
/**
* @brief Remove a IPv4 address from an interface by interface index
*
* @param index Network interface index
* @param addr IPv4 address
*
* @return True if successfully removed, false otherwise
*/
__syscall bool net_if_ipv4_addr_rm_by_index(int index,
const struct in_addr *addr);
/**
* @brief Go through all IPv4 addresses on a network interface and call callback
* for each used address.
*
* @param iface Pointer to the network interface
* @param cb User-supplied callback function to call
* @param user_data User specified data
*/
void net_if_ipv4_addr_foreach(struct net_if *iface, net_if_ip_addr_cb_t cb,
void *user_data);
/**
* @brief Add a IPv4 multicast address to an interface
*
* @param iface Network interface
* @param addr IPv4 multicast address
*
* @return Pointer to interface multicast address, NULL if cannot be added
*/
struct net_if_mcast_addr *net_if_ipv4_maddr_add(struct net_if *iface,
const struct in_addr *addr);
/**
* @brief Remove an IPv4 multicast address from an interface
*
* @param iface Network interface
* @param addr IPv4 multicast address
*
* @return True if successfully removed, false otherwise
*/
bool net_if_ipv4_maddr_rm(struct net_if *iface, const struct in_addr *addr);
/**
* @brief Go through all IPv4 multicast addresses on a network interface and call
* callback for each used address.
*
* @param iface Pointer to the network interface
* @param cb User-supplied callback function to call
* @param user_data User specified data
*/
void net_if_ipv4_maddr_foreach(struct net_if *iface, net_if_ip_maddr_cb_t cb,
void *user_data);
/**
* @brief Check if this IPv4 multicast address belongs to a specific interface
* or one of the interfaces.
*
* @param addr IPv4 address
* @param iface If *iface is null, then pointer to interface is returned,
* otherwise the *iface value needs to be matched.
*
* @return Pointer to interface multicast address, NULL if not found.
*/
struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *addr,
struct net_if **iface);
/**
* @brief Mark a given multicast address to be joined.
*
* @param iface Network interface the address belongs to
* @param addr IPv4 multicast address
*/
void net_if_ipv4_maddr_join(struct net_if *iface,
struct net_if_mcast_addr *addr);
/**
* @brief Check if given multicast address is joined or not.
*
* @param addr IPv4 multicast address
*
* @return True if address is joined, False otherwise.
*/
static inline bool net_if_ipv4_maddr_is_joined(struct net_if_mcast_addr *addr)
{
NET_ASSERT(addr);
return addr->is_joined;
}
/**
* @brief Mark a given multicast address to be left.
*
* @param iface Network interface the address belongs to
* @param addr IPv4 multicast address
*/
void net_if_ipv4_maddr_leave(struct net_if *iface,
struct net_if_mcast_addr *addr);
/**
* @brief Get the IPv4 address of the given router
* @param router a network router
*
* @return pointer to the IPv4 address, or NULL if none
*/
#if defined(CONFIG_NET_NATIVE_IPV4)
static inline struct in_addr *net_if_router_ipv4(struct net_if_router *router)
{
NET_ASSERT(router);
return &router->address.in_addr;
}
#else
static inline struct in_addr *net_if_router_ipv4(struct net_if_router *router)
{
static struct in_addr addr;
ARG_UNUSED(router);
return &addr;
}
#endif
/**
* @brief Check if IPv4 address is one of the routers configured
* in the system.
*
* @param iface Network interface
* @param addr IPv4 address
*
* @return Pointer to router information, NULL if cannot be found
*/
struct net_if_router *net_if_ipv4_router_lookup(struct net_if *iface,
struct in_addr *addr);
/**
* @brief Find default router for this IPv4 address.
*
* @param iface Network interface. This can be NULL in which case we
* go through all the network interfaces to find a suitable router.
* @param addr IPv4 address
*
* @return Pointer to router information, NULL if cannot be found
*/
struct net_if_router *net_if_ipv4_router_find_default(struct net_if *iface,
struct in_addr *addr);
/**
* @brief Add IPv4 router to the system.
*
* @param iface Network interface
* @param addr IPv4 address
* @param is_default Is this router the default one
* @param router_lifetime Lifetime of the router
*
* @return Pointer to router information, NULL if could not be added
*/
struct net_if_router *net_if_ipv4_router_add(struct net_if *iface,
struct in_addr *addr,
bool is_default,
uint16_t router_lifetime);
/**
* @brief Remove IPv4 router from the system.
*
* @param router Router information.
*
* @return True if successfully removed, false otherwise
*/
bool net_if_ipv4_router_rm(struct net_if_router *router);
/**
* @brief Check if the given IPv4 address belongs to local subnet.
*
* @param iface Interface to use. Must be a valid pointer to an interface.
* @param addr IPv4 address
*
* @return True if address is part of local subnet, false otherwise.
*/
bool net_if_ipv4_addr_mask_cmp(struct net_if *iface,
const struct in_addr *addr);
/**
* @brief Check if the given IPv4 address is a broadcast address.
*
* @param iface Interface to use. Must be a valid pointer to an interface.
* @param addr IPv4 address, this should be in network byte order
*
* @return True if address is a broadcast address, false otherwise.
*/
bool net_if_ipv4_is_addr_bcast(struct net_if *iface,
const struct in_addr *addr);
/**
* @brief Get a network interface that should be used when sending
* IPv4 network data to destination.
*
* @param dst IPv4 destination address
*
* @return Pointer to network interface to use, NULL if no suitable interface
* could be found.
*/
#if defined(CONFIG_NET_NATIVE_IPV4)
struct net_if *net_if_ipv4_select_src_iface(const struct in_addr *dst);
#else
static inline struct net_if *net_if_ipv4_select_src_iface(
const struct in_addr *dst)
{
ARG_UNUSED(dst);
return NULL;
}
#endif
/**
* @brief Get a IPv4 source address that should be used when sending
* network data to destination.
*
* @param iface Interface to use when sending the packet.
* If the interface is not known, then NULL can be given.
* @param dst IPv4 destination address
*
* @return Pointer to IPv4 address to use, NULL if no IPv4 address
* could be found.
*/
#if defined(CONFIG_NET_NATIVE_IPV4)
const struct in_addr *net_if_ipv4_select_src_addr(struct net_if *iface,
const struct in_addr *dst);
#else
static inline const struct in_addr *net_if_ipv4_select_src_addr(
struct net_if *iface, const struct in_addr *dst)
{
ARG_UNUSED(iface);
ARG_UNUSED(dst);
return NULL;
}
#endif
/**
* @brief Get a IPv4 link local address in a given state.
*
* @param iface Interface to use. Must be a valid pointer to an interface.
* @param addr_state IPv4 address state (preferred, tentative, deprecated)
*
* @return Pointer to link local IPv4 address, NULL if no proper IPv4 address
* could be found.
*/
struct in_addr *net_if_ipv4_get_ll(struct net_if *iface,
enum net_addr_state addr_state);
/**
* @brief Get a IPv4 global address in a given state.
*
* @param iface Interface to use. Must be a valid pointer to an interface.
* @param addr_state IPv4 address state (preferred, tentative, deprecated)
*
* @return Pointer to link local IPv4 address, NULL if no proper IPv4 address
* could be found.
*/
struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
enum net_addr_state addr_state);
/**
* @brief Get IPv4 netmask related to an address of an interface.
*
* @param iface Interface to use.
* @param addr IPv4 address to check.
*
* @return The netmask set on the interface related to the give address,
* unspecified address if not found.
*/
struct in_addr net_if_ipv4_get_netmask_by_addr(struct net_if *iface,
const struct in_addr *addr);
/**
* @brief Get IPv4 netmask of an interface.
*
* @deprecated Use net_if_ipv4_get_netmask_by_addr() instead.
*
* @param iface Interface to use.
*
* @return The netmask set on the interface, unspecified address if not found.
*/
__deprecated struct in_addr net_if_ipv4_get_netmask(struct net_if *iface);
/**
* @brief Set IPv4 netmask for an interface.
*
* @deprecated Use net_if_ipv4_set_netmask_by_addr() instead.
*
* @param iface Interface to use.
* @param netmask IPv4 netmask
*/
__deprecated void net_if_ipv4_set_netmask(struct net_if *iface,
const struct in_addr *netmask);
/**
* @brief Set IPv4 netmask for an interface index.
*
* @deprecated Use net_if_ipv4_set_netmask_by_addr() instead.
*
* @param index Network interface index
* @param netmask IPv4 netmask
*
* @return True if netmask was added, false otherwise.
*/
__deprecated __syscall bool net_if_ipv4_set_netmask_by_index(int index,
const struct in_addr *netmask);
/**
* @brief Set IPv4 netmask for an interface index for a given address.
*
* @param index Network interface index
* @param addr IPv4 address related to this netmask
* @param netmask IPv4 netmask
*
* @return True if netmask was added, false otherwise.
*/
__syscall bool net_if_ipv4_set_netmask_by_addr_by_index(int index,
const struct in_addr *addr,
const struct in_addr *netmask);
/**
* @brief Set IPv4 netmask for an interface index for a given address.
*
* @param iface Network interface
* @param addr IPv4 address related to this netmask
* @param netmask IPv4 netmask
*
* @return True if netmask was added, false otherwise.
*/
bool net_if_ipv4_set_netmask_by_addr(struct net_if *iface,
const struct in_addr *addr,
const struct in_addr *netmask);
/**
* @brief Get IPv4 gateway of an interface.
*
* @param iface Interface to use.
*
* @return The gateway set on the interface, unspecified address if not found.
*/
struct in_addr net_if_ipv4_get_gw(struct net_if *iface);
/**
* @brief Set IPv4 gateway for an interface.
*
* @param iface Interface to use.
* @param gw IPv4 address of an gateway
*/
void net_if_ipv4_set_gw(struct net_if *iface, const struct in_addr *gw);
/**
* @brief Set IPv4 gateway for an interface index.
*
* @param index Network interface index
* @param gw IPv4 address of an gateway
*
* @return True if gateway was added, false otherwise.
*/
__syscall bool net_if_ipv4_set_gw_by_index(int index, const struct in_addr *gw);
/**
* @brief Get a network interface that should be used when sending
* IPv6 or IPv4 network data to destination.
*
* @param dst IPv6 or IPv4 destination address
*
* @return Pointer to network interface to use. Note that the function
* will return the default network interface if the best network interface
* is not found.
*/
struct net_if *net_if_select_src_iface(const struct sockaddr *dst);
/**
* @typedef net_if_link_callback_t
* @brief Define callback that is called after a network packet
* has been sent.
* @param iface A pointer to a struct net_if to which the net_pkt was sent to.
* @param dst Link layer address of the destination where the network packet was sent.
* @param status Send status, 0 is ok, < 0 error.
*/
typedef void (*net_if_link_callback_t)(struct net_if *iface,
struct net_linkaddr *dst,
int status);
/**
* @brief Link callback handler struct.
*
* Stores the link callback information. Caller must make sure that
* the variable pointed by this is valid during the lifetime of
* registration. Typically this means that the variable cannot be
* allocated from stack.
*/
struct net_if_link_cb {
/** Node information for the slist. */
sys_snode_t node;
/** Link callback */
net_if_link_callback_t cb;
};
/**
* @brief Register a link callback.
*
* @param link Caller specified handler for the callback.
* @param cb Callback to register.
*/
void net_if_register_link_cb(struct net_if_link_cb *link,
net_if_link_callback_t cb);
/**
* @brief Unregister a link callback.
*
* @param link Caller specified handler for the callback.
*/
void net_if_unregister_link_cb(struct net_if_link_cb *link);
/**
* @brief Call a link callback function.
*
* @param iface Network interface.
* @param lladdr Destination link layer address
* @param status 0 is ok, < 0 error
*/
void net_if_call_link_cb(struct net_if *iface, struct net_linkaddr *lladdr,
int status);
/** @cond INTERNAL_HIDDEN */
/* used to ensure encoding of checksum support in net_if.h and
* ethernet.h is the same
*/
#define NET_IF_CHECKSUM_NONE_BIT 0
#define NET_IF_CHECKSUM_IPV4_HEADER_BIT BIT(0)
#define NET_IF_CHECKSUM_IPV4_ICMP_BIT BIT(1)
/* Space for future protocols and restrictions for IPV4 */
#define NET_IF_CHECKSUM_IPV6_HEADER_BIT BIT(10)
#define NET_IF_CHECKSUM_IPV6_ICMP_BIT BIT(11)
/* Space for future protocols and restrictions for IPV6 */
#define NET_IF_CHECKSUM_TCP_BIT BIT(21)
#define NET_IF_CHECKSUM_UDP_BIT BIT(22)
/** @endcond */
/**
* @brief Type of checksum for which support in the interface will be queried.
*/
enum net_if_checksum_type {
/** Interface supports IP version 4 header checksum calculation */
NET_IF_CHECKSUM_IPV4_HEADER = NET_IF_CHECKSUM_IPV4_HEADER_BIT,
/** Interface supports checksum calculation for TCP payload in IPv4 */
NET_IF_CHECKSUM_IPV4_TCP = NET_IF_CHECKSUM_IPV4_HEADER_BIT |
NET_IF_CHECKSUM_TCP_BIT,
/** Interface supports checksum calculation for UDP payload in IPv4 */
NET_IF_CHECKSUM_IPV4_UDP = NET_IF_CHECKSUM_IPV4_HEADER_BIT |
NET_IF_CHECKSUM_UDP_BIT,
/** Interface supports checksum calculation for ICMP4 payload in IPv4 */
NET_IF_CHECKSUM_IPV4_ICMP = NET_IF_CHECKSUM_IPV4_ICMP_BIT,
/** Interface supports IP version 6 header checksum calculation */
NET_IF_CHECKSUM_IPV6_HEADER = NET_IF_CHECKSUM_IPV6_HEADER_BIT,
/** Interface supports checksum calculation for TCP payload in IPv6 */
NET_IF_CHECKSUM_IPV6_TCP = NET_IF_CHECKSUM_IPV6_HEADER_BIT |
NET_IF_CHECKSUM_TCP_BIT,
/** Interface supports checksum calculation for UDP payload in IPv6 */
NET_IF_CHECKSUM_IPV6_UDP = NET_IF_CHECKSUM_IPV6_HEADER_BIT |
NET_IF_CHECKSUM_UDP_BIT,
/** Interface supports checksum calculation for ICMP6 payload in IPv6 */
NET_IF_CHECKSUM_IPV6_ICMP = NET_IF_CHECKSUM_IPV6_ICMP_BIT
};
/**
* @brief Check if received network packet checksum calculation can be avoided
* or not. For example many ethernet devices support network packet offloading
* in which case the IP stack does not need to calculate the checksum.
*
* @param iface Network interface
* @param chksum_type L3 and/or L4 protocol for which to compute checksum
*
* @return True if checksum needs to be calculated, false otherwise.
*/
bool net_if_need_calc_rx_checksum(struct net_if *iface,
enum net_if_checksum_type chksum_type);
/**
* @brief Check if network packet checksum calculation can be avoided or not
* when sending the packet. For example many ethernet devices support network
* packet offloading in which case the IP stack does not need to calculate the
* checksum.
*
* @param iface Network interface
* @param chksum_type L3 and/or L4 protocol for which to compute checksum
*
* @return True if checksum needs to be calculated, false otherwise.
*/
bool net_if_need_calc_tx_checksum(struct net_if *iface,
enum net_if_checksum_type chksum_type);
/**
* @brief Get interface according to index
*
* @details This is a syscall only to provide access to the object for purposes
* of assigning permissions.
*
* @param index Interface index
*
* @return Pointer to interface or NULL if not found.
*/
__syscall struct net_if *net_if_get_by_index(int index);
/**
* @brief Get interface index according to pointer
*
* @param iface Pointer to network interface
*
* @return Interface index
*/
int net_if_get_by_iface(struct net_if *iface);
/**
* @typedef net_if_cb_t
* @brief Callback used while iterating over network interfaces
*
* @param iface Pointer to current network interface
* @param user_data A valid pointer to user data or NULL
*/
typedef void (*net_if_cb_t)(struct net_if *iface, void *user_data);
/**
* @brief Go through all the network interfaces and call callback
* for each interface.
*
* @param cb User-supplied callback function to call
* @param user_data User specified data
*/
void net_if_foreach(net_if_cb_t cb, void *user_data);
/**
* @brief Bring interface up
*
* @param iface Pointer to network interface
*
* @return 0 on success
*/
int net_if_up(struct net_if *iface);
/**
* @brief Check if interface is up and running.
*
* @param iface Pointer to network interface
*
* @return True if interface is up, False if it is down.
*/
static inline bool net_if_is_up(struct net_if *iface)
{
NET_ASSERT(iface);
return net_if_flag_is_set(iface, NET_IF_UP) &&
net_if_flag_is_set(iface, NET_IF_RUNNING);
}
/**
* @brief Bring interface down
*
* @param iface Pointer to network interface
*
* @return 0 on success
*/
int net_if_down(struct net_if *iface);
/**
* @brief Check if interface was brought up by the administrator.
*
* @param iface Pointer to network interface
*
* @return True if interface is admin up, false otherwise.
*/
static inline bool net_if_is_admin_up(struct net_if *iface)
{
NET_ASSERT(iface);
return net_if_flag_is_set(iface, NET_IF_UP);
}
/**
* @brief Underlying network device has detected the carrier (cable connected).
*
* @details The function should be used by the respective network device driver
* or L2 implementation to update its state on a network interface.
*
* @param iface Pointer to network interface
*/
void net_if_carrier_on(struct net_if *iface);
/**
* @brief Underlying network device has lost the carrier (cable disconnected).
*
* @details The function should be used by the respective network device driver
* or L2 implementation to update its state on a network interface.
*
* @param iface Pointer to network interface
*/
void net_if_carrier_off(struct net_if *iface);
/**
* @brief Check if carrier is present on network device.
*
* @param iface Pointer to network interface
*
* @return True if carrier is present, false otherwise.
*/
static inline bool net_if_is_carrier_ok(struct net_if *iface)
{
NET_ASSERT(iface);
return net_if_flag_is_set(iface, NET_IF_LOWER_UP);
}
/**
* @brief Mark interface as dormant. Dormant state indicates that the interface
* is not ready to pass packets yet, but is waiting for some event
* (for example Wi-Fi network association).
*
* @details The function should be used by the respective network device driver
* or L2 implementation to update its state on a network interface.
*
* @param iface Pointer to network interface
*/
void net_if_dormant_on(struct net_if *iface);
/**
* @brief Mark interface as not dormant.
*
* @details The function should be used by the respective network device driver
* or L2 implementation to update its state on a network interface.
*
* @param iface Pointer to network interface
*/
void net_if_dormant_off(struct net_if *iface);
/**
* @brief Check if the interface is dormant.
*
* @param iface Pointer to network interface
*
* @return True if interface is dormant, false otherwise.
*/
static inline bool net_if_is_dormant(struct net_if *iface)
{
NET_ASSERT(iface);
return net_if_flag_is_set(iface, NET_IF_DORMANT);
}
#if defined(CONFIG_NET_PKT_TIMESTAMP) && defined(CONFIG_NET_NATIVE)
/**
* @typedef net_if_timestamp_callback_t
* @brief Define callback that is called after a network packet
* has been timestamped.
* @param "struct net_pkt *pkt" A pointer on a struct net_pkt which has
* been timestamped after being sent.
*/
typedef void (*net_if_timestamp_callback_t)(struct net_pkt *pkt);
/**
* @brief Timestamp callback handler struct.
*
* Stores the timestamp callback information. Caller must make sure that
* the variable pointed by this is valid during the lifetime of
* registration. Typically this means that the variable cannot be
* allocated from stack.
*/
struct net_if_timestamp_cb {
/** Node information for the slist. */
sys_snode_t node;
/** Packet for which the callback is needed.
* A NULL value means all packets.
*/
struct net_pkt *pkt;
/** Net interface for which the callback is needed.
* A NULL value means all interfaces.
*/
struct net_if *iface;
/** Timestamp callback */
net_if_timestamp_callback_t cb;
};
/**
* @brief Register a timestamp callback.
*
* @param handle Caller specified handler for the callback.
* @param pkt Net packet for which the callback is registered. NULL for all
* packets.
* @param iface Net interface for which the callback is. NULL for all
* interfaces.
* @param cb Callback to register.
*/
void net_if_register_timestamp_cb(struct net_if_timestamp_cb *handle,
struct net_pkt *pkt,
struct net_if *iface,
net_if_timestamp_callback_t cb);
/**
* @brief Unregister a timestamp callback.
*
* @param handle Caller specified handler for the callback.
*/
void net_if_unregister_timestamp_cb(struct net_if_timestamp_cb *handle);
/**
* @brief Call a timestamp callback function.
*
* @param pkt Network buffer.
*/
void net_if_call_timestamp_cb(struct net_pkt *pkt);
/*
* @brief Add timestamped TX buffer to be handled
*
* @param pkt Timestamped buffer
*/
void net_if_add_tx_timestamp(struct net_pkt *pkt);
#endif /* CONFIG_NET_PKT_TIMESTAMP */
/**
* @brief Set network interface into promiscuous mode
*
* @details Note that not all network technologies will support this.
*
* @param iface Pointer to network interface
*
* @return 0 on success, <0 if error
*/
#if defined(CONFIG_NET_PROMISCUOUS_MODE)
int net_if_set_promisc(struct net_if *iface);
#else
static inline int net_if_set_promisc(struct net_if *iface)
{
ARG_UNUSED(iface);
return -ENOTSUP;
}
#endif
/**
* @brief Set network interface into normal mode
*
* @param iface Pointer to network interface
*/
#if defined(CONFIG_NET_PROMISCUOUS_MODE)
void net_if_unset_promisc(struct net_if *iface);
#else
static inline void net_if_unset_promisc(struct net_if *iface)
{
ARG_UNUSED(iface);
}
#endif
/**
* @brief Check if promiscuous mode is set or not.
*
* @param iface Pointer to network interface
*
* @return True if interface is in promisc mode,
* False if interface is not in promiscuous mode.
*/
#if defined(CONFIG_NET_PROMISCUOUS_MODE)
bool net_if_is_promisc(struct net_if *iface);
#else
static inline bool net_if_is_promisc(struct net_if *iface)
{
ARG_UNUSED(iface);
return false;
}
#endif
/**
* @brief Check if there are any pending TX network data for a given network
* interface.
*
* @param iface Pointer to network interface
*
* @return True if there are pending TX network packets for this network
* interface, False otherwise.
*/
static inline bool net_if_are_pending_tx_packets(struct net_if *iface)
{
#if defined(CONFIG_NET_POWER_MANAGEMENT)
return !!iface->tx_pending;
#else
ARG_UNUSED(iface);
return false;
#endif
}
#ifdef CONFIG_NET_POWER_MANAGEMENT
/**
* @brief Suspend a network interface from a power management perspective
*
* @param iface Pointer to network interface
*
* @return 0 on success, or -EALREADY/-EBUSY as possible errors.
*/
int net_if_suspend(struct net_if *iface);
/**
* @brief Resume a network interface from a power management perspective
*
* @param iface Pointer to network interface
*
* @return 0 on success, or -EALREADY as a possible error.
*/
int net_if_resume(struct net_if *iface);
/**
* @brief Check if the network interface is suspended or not.
*
* @param iface Pointer to network interface
*
* @return True if interface is suspended, False otherwise.
*/
bool net_if_is_suspended(struct net_if *iface);
#endif /* CONFIG_NET_POWER_MANAGEMENT */
/**
* @brief Check if the network interface supports Wi-Fi.
*
* @param iface Pointer to network interface
*
* @return True if interface supports Wi-Fi, False otherwise.
*/
bool net_if_is_wifi(struct net_if *iface);
/**
* @brief Get first Wi-Fi network interface.
*
* @return Pointer to network interface, NULL if not found.
*/
struct net_if *net_if_get_first_wifi(void);
/**
* @brief Get Wi-Fi network station interface.
*
* @return Pointer to network interface, NULL if not found.
*/
struct net_if *net_if_get_wifi_sta(void);
/**
* @brief Get first Wi-Fi network Soft-AP interface.
*
* @return Pointer to network interface, NULL if not found.
*/
struct net_if *net_if_get_wifi_sap(void);
/**
* @brief Get network interface name.
*
* @details If interface name support is not enabled, empty string is returned.
*
* @param iface Pointer to network interface
* @param buf User supplied buffer
* @param len Length of the user supplied buffer
*
* @return Length of the interface name copied to buf,
* -EINVAL if invalid parameters,
* -ERANGE if name cannot be copied to the user supplied buffer,
* -ENOTSUP if interface name support is disabled,
*/
int net_if_get_name(struct net_if *iface, char *buf, int len);
/**
* @brief Set network interface name.
*
* @details Normally this function is not needed to call as the system
* will automatically assign a name to the network interface.
*
* @param iface Pointer to network interface
* @param buf User supplied name
*
* @return 0 name is set correctly
* -ENOTSUP interface name support is disabled
* -EINVAL if invalid parameters are given,
* -ENAMETOOLONG if name is too long
*/
int net_if_set_name(struct net_if *iface, const char *buf);
/**
* @brief Get interface index according to its name
*
* @param name Name of the network interface
*
* @return Interface index
*/
int net_if_get_by_name(const char *name);
/** @cond INTERNAL_HIDDEN */
struct net_if_api {
void (*init)(struct net_if *iface);
};
#define NET_IF_DHCPV4_INIT \
IF_ENABLED(UTIL_AND(IS_ENABLED(CONFIG_NET_DHCPV4), \
IS_ENABLED(CONFIG_NET_NATIVE_IPV4)), \
(.dhcpv4.state = NET_DHCPV4_DISABLED,))
#define NET_IF_DHCPV6_INIT \
IF_ENABLED(UTIL_AND(IS_ENABLED(CONFIG_NET_DHCPV6), \
IS_ENABLED(CONFIG_NET_NATIVE_IPV6)), \
(.dhcpv6.state = NET_DHCPV6_DISABLED,))
#define NET_IF_CONFIG_INIT \
.config = { \
IF_ENABLED(CONFIG_NET_IP, (.ip = {},)) \
NET_IF_DHCPV4_INIT \
NET_IF_DHCPV6_INIT \
}
#define NET_IF_GET_NAME(dev_id, sfx) __net_if_##dev_id##_##sfx
#define NET_IF_DEV_GET_NAME(dev_id, sfx) __net_if_dev_##dev_id##_##sfx
#define NET_IF_GET(dev_id, sfx) \
((struct net_if *)&NET_IF_GET_NAME(dev_id, sfx))
#define NET_IF_INIT(dev_id, sfx, _l2, _mtu, _num_configs) \
static STRUCT_SECTION_ITERABLE(net_if_dev, \
NET_IF_DEV_GET_NAME(dev_id, sfx)) = { \
.dev = &(DEVICE_NAME_GET(dev_id)), \
.l2 = &(NET_L2_GET_NAME(_l2)), \
.l2_data = &(NET_L2_GET_DATA(dev_id, sfx)), \
.mtu = _mtu, \
.flags = {BIT(NET_IF_LOWER_UP)}, \
}; \
static Z_DECL_ALIGN(struct net_if) \
NET_IF_GET_NAME(dev_id, sfx)[_num_configs] \
__used __in_section(_net_if, static, \
dev_id) = { \
[0 ... (_num_configs - 1)] = { \
.if_dev = &(NET_IF_DEV_GET_NAME(dev_id, sfx)), \
NET_IF_CONFIG_INIT \
} \
}
#define NET_IF_OFFLOAD_INIT(dev_id, sfx, _mtu) \
static STRUCT_SECTION_ITERABLE(net_if_dev, \
NET_IF_DEV_GET_NAME(dev_id, sfx)) = { \
.dev = &(DEVICE_NAME_GET(dev_id)), \
.mtu = _mtu, \
.l2 = &(NET_L2_GET_NAME(OFFLOADED_NETDEV)), \
.flags = {BIT(NET_IF_LOWER_UP)}, \
}; \
static Z_DECL_ALIGN(struct net_if) \
NET_IF_GET_NAME(dev_id, sfx)[NET_IF_MAX_CONFIGS] \
__used __in_section(_net_if, static, \
dev_id) = { \
[0 ... (NET_IF_MAX_CONFIGS - 1)] = { \
.if_dev = &(NET_IF_DEV_GET_NAME(dev_id, sfx)), \
NET_IF_CONFIG_INIT \
} \
}
/** @endcond */
/* Network device initialization macros */
#define Z_NET_DEVICE_INIT_INSTANCE(node_id, dev_id, name, instance, \
init_fn, pm, data, config, prio, \
api, l2, l2_ctx_type, mtu) \
Z_DEVICE_STATE_DEFINE(dev_id); \
Z_DEVICE_DEFINE(node_id, dev_id, name, init_fn, pm, data, \
config, POST_KERNEL, prio, api, \
&Z_DEVICE_STATE_NAME(dev_id)); \
NET_L2_DATA_INIT(dev_id, instance, l2_ctx_type); \
NET_IF_INIT(dev_id, instance, l2, mtu, NET_IF_MAX_CONFIGS)
#define Z_NET_DEVICE_INIT(node_id, dev_id, name, init_fn, pm, data, \
config, prio, api, l2, l2_ctx_type, mtu) \
Z_NET_DEVICE_INIT_INSTANCE(node_id, dev_id, name, 0, init_fn, \
pm, data, config, prio, api, l2, \
l2_ctx_type, mtu)
/**
* @brief Create a network interface and bind it to network device.
*
* @param dev_id Network device id.
* @param name The name this instance of the driver exposes to
* the system.
* @param init_fn Address to the init function of the driver.
* @param pm Reference to struct pm_device associated with the device.
* (optional).
* @param data Pointer to the device's private data.
* @param config The address to the structure containing the
* configuration information for this instance of the driver.
* @param prio The initialization level at which configuration occurs.
* @param api Provides an initial pointer to the API function struct
* used by the driver. Can be NULL.
* @param l2 Network L2 layer for this network interface.
* @param l2_ctx_type Type of L2 context data.
* @param mtu Maximum transfer unit in bytes for this network interface.
*/
#define NET_DEVICE_INIT(dev_id, name, init_fn, pm, data, config, prio, \
api, l2, l2_ctx_type, mtu) \
Z_NET_DEVICE_INIT(DT_INVALID_NODE, dev_id, name, init_fn, pm, \
data, config, prio, api, l2, l2_ctx_type, mtu)
/**
* @brief Like NET_DEVICE_INIT but taking metadata from a devicetree node.
* Create a network interface and bind it to network device.
*
* @param node_id The devicetree node identifier.
* @param init_fn Address to the init function of the driver.
* @param pm Reference to struct pm_device associated with the device.
* (optional).
* @param data Pointer to the device's private data.
* @param config The address to the structure containing the
* configuration information for this instance of the driver.
* @param prio The initialization level at which configuration occurs.
* @param api Provides an initial pointer to the API function struct
* used by the driver. Can be NULL.
* @param l2 Network L2 layer for this network interface.
* @param l2_ctx_type Type of L2 context data.
* @param mtu Maximum transfer unit in bytes for this network interface.
*/
#define NET_DEVICE_DT_DEFINE(node_id, init_fn, pm, data, \
config, prio, api, l2, l2_ctx_type, mtu) \
Z_NET_DEVICE_INIT(node_id, Z_DEVICE_DT_DEV_ID(node_id), \
DEVICE_DT_NAME(node_id), init_fn, pm, data, \
config, prio, api, l2, l2_ctx_type, mtu)
/**
* @brief Like NET_DEVICE_DT_DEFINE for an instance of a DT_DRV_COMPAT compatible
*
* @param inst instance number. This is replaced by
* <tt>DT_DRV_COMPAT(inst)</tt> in the call to NET_DEVICE_DT_DEFINE.
*
* @param ... other parameters as expected by NET_DEVICE_DT_DEFINE.
*/
#define NET_DEVICE_DT_INST_DEFINE(inst, ...) \
NET_DEVICE_DT_DEFINE(DT_DRV_INST(inst), __VA_ARGS__)
/**
* @brief Create multiple network interfaces and bind them to network device.
* If your network device needs more than one instance of a network interface,
* use this macro below and provide a different instance suffix each time
* (0, 1, 2, ... or a, b, c ... whatever works for you)
*
* @param dev_id Network device id.
* @param name The name this instance of the driver exposes to
* the system.
* @param instance Instance identifier.
* @param init_fn Address to the init function of the driver.
* @param pm Reference to struct pm_device associated with the device.
* (optional).
* @param data Pointer to the device's private data.
* @param config The address to the structure containing the
* configuration information for this instance of the driver.
* @param prio The initialization level at which configuration occurs.
* @param api Provides an initial pointer to the API function struct
* used by the driver. Can be NULL.
* @param l2 Network L2 layer for this network interface.
* @param l2_ctx_type Type of L2 context data.
* @param mtu Maximum transfer unit in bytes for this network interface.
*/
#define NET_DEVICE_INIT_INSTANCE(dev_id, name, instance, init_fn, pm, \
data, config, prio, api, l2, \
l2_ctx_type, mtu) \
Z_NET_DEVICE_INIT_INSTANCE(DT_INVALID_NODE, dev_id, name, \
instance, init_fn, pm, data, config, \
prio, api, l2, l2_ctx_type, mtu)
/**
* @brief Like NET_DEVICE_OFFLOAD_INIT but taking metadata from a devicetree.
* Create multiple network interfaces and bind them to network device.
* If your network device needs more than one instance of a network interface,
* use this macro below and provide a different instance suffix each time
* (0, 1, 2, ... or a, b, c ... whatever works for you)
*
* @param node_id The devicetree node identifier.
* @param instance Instance identifier.
* @param init_fn Address to the init function of the driver.
* @param pm Reference to struct pm_device associated with the device.
* (optional).
* @param data Pointer to the device's private data.
* @param config The address to the structure containing the
* configuration information for this instance of the driver.
* @param prio The initialization level at which configuration occurs.
* @param api Provides an initial pointer to the API function struct
* used by the driver. Can be NULL.
* @param l2 Network L2 layer for this network interface.
* @param l2_ctx_type Type of L2 context data.
* @param mtu Maximum transfer unit in bytes for this network interface.
*/
#define NET_DEVICE_DT_DEFINE_INSTANCE(node_id, instance, init_fn, pm, \
data, config, prio, api, l2, \
l2_ctx_type, mtu) \
Z_NET_DEVICE_INIT_INSTANCE(node_id, \
Z_DEVICE_DT_DEV_ID(node_id), \
DEVICE_DT_NAME(node_id), instance, \
init_fn, pm, data, config, prio, \
api, l2, l2_ctx_type, mtu)
/**
* @brief Like NET_DEVICE_DT_DEFINE_INSTANCE for an instance of a DT_DRV_COMPAT
* compatible
*
* @param inst instance number. This is replaced by
* <tt>DT_DRV_COMPAT(inst)</tt> in the call to NET_DEVICE_DT_DEFINE_INSTANCE.
*
* @param ... other parameters as expected by NET_DEVICE_DT_DEFINE_INSTANCE.
*/
#define NET_DEVICE_DT_INST_DEFINE_INSTANCE(inst, ...) \
NET_DEVICE_DT_DEFINE_INSTANCE(DT_DRV_INST(inst), __VA_ARGS__)
#define Z_NET_DEVICE_OFFLOAD_INIT(node_id, dev_id, name, init_fn, pm, \
data, config, prio, api, mtu) \
Z_DEVICE_STATE_DEFINE(dev_id); \
Z_DEVICE_DEFINE(node_id, dev_id, name, init_fn, pm, data, \
config, POST_KERNEL, prio, api, \
&Z_DEVICE_STATE_NAME(dev_id)); \
NET_IF_OFFLOAD_INIT(dev_id, 0, mtu)
/**
* @brief Create a offloaded network interface and bind it to network device.
* The offloaded network interface is implemented by a device vendor HAL or
* similar.
*
* @param dev_id Network device id.
* @param name The name this instance of the driver exposes to
* the system.
* @param init_fn Address to the init function of the driver.
* @param pm Reference to struct pm_device associated with the device.
* (optional).
* @param data Pointer to the device's private data.
* @param config The address to the structure containing the
* configuration information for this instance of the driver.
* @param prio The initialization level at which configuration occurs.
* @param api Provides an initial pointer to the API function struct
* used by the driver. Can be NULL.
* @param mtu Maximum transfer unit in bytes for this network interface.
*/
#define NET_DEVICE_OFFLOAD_INIT(dev_id, name, init_fn, pm, data, \
config, prio, api, mtu) \
Z_NET_DEVICE_OFFLOAD_INIT(DT_INVALID_NODE, dev_id, name, \
init_fn, pm, data, config, prio, api, \
mtu)
/**
* @brief Like NET_DEVICE_OFFLOAD_INIT but taking metadata from a devicetree
* node. Create a offloaded network interface and bind it to network device.
* The offloaded network interface is implemented by a device vendor HAL or
* similar.
*
* @param node_id The devicetree node identifier.
* @param init_fn Address to the init function of the driver.
* @param pm Reference to struct pm_device associated with the device.
* (optional).
* @param data Pointer to the device's private data.
* @param config The address to the structure containing the
* configuration information for this instance of the driver.
* @param prio The initialization level at which configuration occurs.
* @param api Provides an initial pointer to the API function struct
* used by the driver. Can be NULL.
* @param mtu Maximum transfer unit in bytes for this network interface.
*/
#define NET_DEVICE_DT_OFFLOAD_DEFINE(node_id, init_fn, pm, data, \
config, prio, api, mtu) \
Z_NET_DEVICE_OFFLOAD_INIT(node_id, Z_DEVICE_DT_DEV_ID(node_id), \
DEVICE_DT_NAME(node_id), init_fn, pm, \
data, config, prio, api, mtu)
/**
* @brief Like NET_DEVICE_DT_OFFLOAD_DEFINE for an instance of a DT_DRV_COMPAT
* compatible
*
* @param inst instance number. This is replaced by
* <tt>DT_DRV_COMPAT(inst)</tt> in the call to NET_DEVICE_DT_OFFLOAD_DEFINE.
*
* @param ... other parameters as expected by NET_DEVICE_DT_OFFLOAD_DEFINE.
*/
#define NET_DEVICE_DT_INST_OFFLOAD_DEFINE(inst, ...) \
NET_DEVICE_DT_OFFLOAD_DEFINE(DT_DRV_INST(inst), __VA_ARGS__)
/**
* @brief Count the number of network interfaces.
*
* @param[out] _dst Pointer to location where result is written.
*/
#define NET_IFACE_COUNT(_dst) \
do { \
extern struct net_if _net_if_list_start[]; \
extern struct net_if _net_if_list_end[]; \
*(_dst) = ((uintptr_t)_net_if_list_end - \
(uintptr_t)_net_if_list_start) / \
sizeof(struct net_if); \
} while (0)
#ifdef __cplusplus
}
#endif
#include <zephyr/syscalls/net_if.h>
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_NET_IF_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_if.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 23,229 |
```objective-c
/*
*
*/
/**
* @file
* @brief IEEE 802.15.4 Management interface public header
*
* @note All references to the standard in this file cite IEEE 802.15.4-2020.
*/
#ifndef ZEPHYR_INCLUDE_NET_IEEE802154_MGMT_H_
#define ZEPHYR_INCLUDE_NET_IEEE802154_MGMT_H_
#include <zephyr/net/ieee802154.h>
#include <zephyr/net/net_mgmt.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup ieee802154_mgmt IEEE 802.15.4 Net Management
* @since 1.0
* @version 0.8.0
* @ingroup ieee802154
*
* @brief IEEE 802.15.4 net management library
*
* @details The IEEE 802.15.4 net management library provides runtime
* configuration features that applications can interface with directly.
*
* Most of these commands are also accessible via shell commands. See the
* shell's help feature (`shell> ieee802154 help`).
*
* @note All section, table and figure references are to the IEEE 802.15.4-2020
* standard.
*
* @{
*/
/**
* @cond INTERNAL_HIDDEN
*/
#define _NET_IEEE802154_LAYER NET_MGMT_LAYER_L2
#define _NET_IEEE802154_CODE 0x154
#define _NET_IEEE802154_BASE (NET_MGMT_IFACE_BIT | \
NET_MGMT_LAYER(_NET_IEEE802154_LAYER) |\
NET_MGMT_LAYER_CODE(_NET_IEEE802154_CODE))
#define _NET_IEEE802154_EVENT (_NET_IEEE802154_BASE | NET_MGMT_EVENT_BIT)
enum net_request_ieee802154_cmd {
NET_REQUEST_IEEE802154_CMD_SET_ACK = 1,
NET_REQUEST_IEEE802154_CMD_UNSET_ACK,
NET_REQUEST_IEEE802154_CMD_PASSIVE_SCAN,
NET_REQUEST_IEEE802154_CMD_ACTIVE_SCAN,
NET_REQUEST_IEEE802154_CMD_CANCEL_SCAN,
NET_REQUEST_IEEE802154_CMD_ASSOCIATE,
NET_REQUEST_IEEE802154_CMD_DISASSOCIATE,
NET_REQUEST_IEEE802154_CMD_SET_CHANNEL,
NET_REQUEST_IEEE802154_CMD_GET_CHANNEL,
NET_REQUEST_IEEE802154_CMD_SET_PAN_ID,
NET_REQUEST_IEEE802154_CMD_GET_PAN_ID,
NET_REQUEST_IEEE802154_CMD_SET_EXT_ADDR,
NET_REQUEST_IEEE802154_CMD_GET_EXT_ADDR,
NET_REQUEST_IEEE802154_CMD_SET_SHORT_ADDR,
NET_REQUEST_IEEE802154_CMD_GET_SHORT_ADDR,
NET_REQUEST_IEEE802154_CMD_GET_TX_POWER,
NET_REQUEST_IEEE802154_CMD_SET_TX_POWER,
NET_REQUEST_IEEE802154_CMD_SET_SECURITY_SETTINGS,
NET_REQUEST_IEEE802154_CMD_GET_SECURITY_SETTINGS,
};
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @name Command Macros
*
* @brief IEEE 802.15.4 net management commands.
*
* @details These IEEE 802.15.4 subsystem net management commands can be called
* by applications via @ref net_mgmt macro.
*
* All attributes and parameters are given in CPU byte order (scalars) or big
* endian (byte arrays) unless otherwise specified.
*
* The following IEEE 802.15.4 MAC management service primitives are referenced
* in this enumeration:
* - MLME-ASSOCIATE.request, see section 8.2.3
* - MLME-DISASSOCIATE.request, see section 8.2.4
* - MLME-SET/GET.request, see section 8.2.6
* - MLME-SCAN.request, see section 8.2.11
*
* The following IEEE 802.15.4 MAC data service primitives are referenced in
* this enumeration:
* - MLME-DATA.request, see section 8.3.2
*
* MAC PIB attributes (mac.../sec...): see sections 8.4.3 and 9.5.
* PHY PIB attributes (phy...): see section 11.3.
* Both are accessed through MLME-SET/GET primitives.
*
* @{
*/
/** Sets AckTx for all subsequent MLME-DATA (aka TX) requests. */
#define NET_REQUEST_IEEE802154_SET_ACK (_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_SET_ACK)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_SET_ACK);
/** Unsets AckTx for all subsequent MLME-DATA requests. */
#define NET_REQUEST_IEEE802154_UNSET_ACK \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_UNSET_ACK)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_UNSET_ACK);
/**
* MLME-SCAN(PASSIVE, ...) request
*
* See @ref ieee802154_req_params for associated command parameters.
*/
#define NET_REQUEST_IEEE802154_PASSIVE_SCAN \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_PASSIVE_SCAN)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_PASSIVE_SCAN);
/**
* MLME-SCAN(ACTIVE, ...) request
*
* See @ref ieee802154_req_params for associated command parameters.
*/
#define NET_REQUEST_IEEE802154_ACTIVE_SCAN \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_ACTIVE_SCAN)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_ACTIVE_SCAN);
/** Cancels an ongoing MLME-SCAN(...) command (non-standard). */
#define NET_REQUEST_IEEE802154_CANCEL_SCAN \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_CANCEL_SCAN)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_CANCEL_SCAN);
/** MLME-ASSOCIATE(...) request */
#define NET_REQUEST_IEEE802154_ASSOCIATE \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_ASSOCIATE)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_ASSOCIATE);
/** MLME-DISASSOCIATE(...) request */
#define NET_REQUEST_IEEE802154_DISASSOCIATE \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_DISASSOCIATE)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_DISASSOCIATE);
/** MLME-SET(phyCurrentChannel) request */
#define NET_REQUEST_IEEE802154_SET_CHANNEL \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_SET_CHANNEL)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_SET_CHANNEL);
/** MLME-GET(phyCurrentChannel) request */
#define NET_REQUEST_IEEE802154_GET_CHANNEL \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_GET_CHANNEL)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_GET_CHANNEL);
/** MLME-SET(macPanId) request */
#define NET_REQUEST_IEEE802154_SET_PAN_ID \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_SET_PAN_ID)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_SET_PAN_ID);
/** MLME-GET(macPanId) request */
#define NET_REQUEST_IEEE802154_GET_PAN_ID \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_GET_PAN_ID)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_GET_PAN_ID);
/**
* Sets the extended interface address (non-standard), see sections 7.1
* and 8.4.3.1, in big endian byte order
*/
#define NET_REQUEST_IEEE802154_SET_EXT_ADDR \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_SET_EXT_ADDR)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_SET_EXT_ADDR);
/** like MLME-GET(macExtendedAddress) but in big endian byte order */
#define NET_REQUEST_IEEE802154_GET_EXT_ADDR \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_GET_EXT_ADDR)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_GET_EXT_ADDR);
/** MLME-SET(macShortAddress) request, only allowed for co-ordinators */
#define NET_REQUEST_IEEE802154_SET_SHORT_ADDR \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_SET_SHORT_ADDR)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_SET_SHORT_ADDR);
/** MLME-GET(macShortAddress) request */
#define NET_REQUEST_IEEE802154_GET_SHORT_ADDR \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_GET_SHORT_ADDR)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_GET_SHORT_ADDR);
/**
* MLME-SET(phyUnicastTxPower/phyBroadcastTxPower) request (currently
* not distinguished)
*/
#define NET_REQUEST_IEEE802154_GET_TX_POWER \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_GET_TX_POWER)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_GET_TX_POWER);
/** MLME-GET(phyUnicastTxPower/phyBroadcastTxPower) request */
#define NET_REQUEST_IEEE802154_SET_TX_POWER \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_SET_TX_POWER)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_SET_TX_POWER);
#ifdef CONFIG_NET_L2_IEEE802154_SECURITY
/**
* Configures basic sec* MAC PIB attributes, implies
* macSecurityEnabled=true.
*
* See @ref ieee802154_security_params for associated command parameters.
*/
#define NET_REQUEST_IEEE802154_SET_SECURITY_SETTINGS \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_SET_SECURITY_SETTINGS)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_SET_SECURITY_SETTINGS);
/**
* Gets the configured sec* attributes.
*
* See @ref ieee802154_security_params for associated command parameters.
*/
#define NET_REQUEST_IEEE802154_GET_SECURITY_SETTINGS \
(_NET_IEEE802154_BASE | NET_REQUEST_IEEE802154_CMD_GET_SECURITY_SETTINGS)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_IEEE802154_GET_SECURITY_SETTINGS);
#endif /* CONFIG_NET_L2_IEEE802154_SECURITY */
/**
* @}
*/
/**
* @cond INTERNAL_HIDDEN
*/
enum net_event_ieee802154_cmd {
NET_EVENT_IEEE802154_CMD_SCAN_RESULT = 1,
};
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @name Event Macros
*
* @brief IEEE 802.15.4 net management events.
*
* @details These IEEE 802.15.4 subsystem net management events can be
* subscribed to by applications via @ref net_mgmt_init_event_callback, @ref
* net_mgmt_add_event_callback and @ref net_mgmt_del_event_callback.
*
* @{
*/
/**
* Signals the result of the @ref NET_REQUEST_IEEE802154_ACTIVE_SCAN or @ref
* NET_REQUEST_IEEE802154_PASSIVE_SCAN net management commands.
*
* See @ref ieee802154_req_params for associated event parameters.
*/
#define NET_EVENT_IEEE802154_SCAN_RESULT \
(_NET_IEEE802154_EVENT | NET_EVENT_IEEE802154_CMD_SCAN_RESULT)
/**
* @}
*/
/**
* @cond INTERNAL_HIDDEN
*/
#define IEEE802154_IS_CHAN_SCANNED(_channel_set, _chan) \
(_channel_set & BIT(_chan - 1))
#define IEEE802154_IS_CHAN_UNSCANNED(_channel_set, _chan) \
(!IEEE802154_IS_CHAN_SCANNED(_channel_set, _chan))
#define IEEE802154_ALL_CHANNELS UINT32_MAX
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @brief Scanning parameters
*
* Used to request a scan and get results as well, see section 8.2.11.2
*/
struct ieee802154_req_params {
/** The set of channels to scan, use above macros to manage it */
uint32_t channel_set;
/** Duration of scan, per-channel, in milliseconds */
uint32_t duration;
/** Current channel in use as a result */
uint16_t channel; /* in CPU byte order */
/** Current pan_id in use as a result */
uint16_t pan_id; /* in CPU byte order */
/** Result address */
union {
uint16_t short_addr; /**< in CPU byte order */
uint8_t addr[IEEE802154_MAX_ADDR_LENGTH]; /**< in big endian */
};
/** length of address */
uint8_t len;
/** Link quality information, between 0 and 255 */
uint8_t lqi;
/** Flag if association is permitted by the coordinator */
bool association_permitted;
/** Additional payload of the beacon if any.*/
uint8_t *beacon_payload;
/** Length of the additional payload. */
size_t beacon_payload_len;
};
/**
* @brief Security parameters
*
* Used to setup the link-layer security settings,
* see tables 9-9 and 9-10 in section 9.5.
*/
struct ieee802154_security_params {
uint8_t key[16]; /**< secKeyDescriptor.secKey */
uint8_t key_len; /**< Key length of 16 bytes is mandatory for standards conformance */
uint8_t key_mode : 2; /**< secKeyIdMode */
uint8_t level : 3; /**< Used instead of a frame-specific SecurityLevel parameter when
* constructing the auxiliary security header
*/
uint8_t _unused : 3; /**< unused value (ignore) */
};
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_IEEE802154_MGMT_H_ */
``` | /content/code_sandbox/include/zephyr/net/ieee802154_mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,930 |
```objective-c
/** @file
* @brief NET_MGMT socket definitions.
*
* Definitions for NET_MGMT socket support.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_SOCKET_NET_MGMT_H_
#define ZEPHYR_INCLUDE_NET_SOCKET_NET_MGMT_H_
#include <zephyr/types.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/net_if.h>
#include <zephyr/net/net_mgmt.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Socket NET_MGMT library
* @defgroup socket_net_mgmt Socket NET_MGMT library
* @since 2.0
* @version 0.1.0
* @ingroup networking
* @{
*/
/** @cond INTERNAL_HIDDEN */
/* Protocols of the protocol family PF_NET_MGMT */
#define NET_MGMT_EVENT_PROTO 0x01
/* Socket NET_MGMT options */
#define SOL_NET_MGMT_BASE 100
#define SOL_NET_MGMT_RAW (SOL_NET_MGMT_BASE + 1)
/** @endcond */
/**
* struct sockaddr_nm - The sockaddr structure for NET_MGMT sockets
*
* Similar concepts are used as in Linux AF_NETLINK. The NETLINK name is not
* used in order to avoid confusion between Zephyr and Linux as the
* implementations are different.
*
* The socket domain (address family) is AF_NET_MGMT, and the type of socket
* is either SOCK_RAW or SOCK_DGRAM, because this is a datagram-oriented
* service.
*
* The protocol (protocol type) selects for which feature the socket is used.
*
* When used with bind(), the nm_pid field of the sockaddr_nm can be
* filled with the calling thread' own id. The nm_pid serves here as the local
* address of this net_mgmt socket. The application is responsible for picking
* a unique integer value to fill in nm_pid.
*/
struct sockaddr_nm {
/** AF_NET_MGMT address family. */
sa_family_t nm_family;
/** Network interface related to this address */
int nm_ifindex;
/** Thread id or similar that is used to separate the different
* sockets. Application can decide how the pid is constructed.
*/
uintptr_t nm_pid;
/** net_mgmt mask */
uint32_t nm_mask;
};
/**
* Each network management message is prefixed with this header.
*/
struct net_mgmt_msghdr {
/** Network management version */
uint32_t nm_msg_version;
/** Length of the data */
uint32_t nm_msg_len;
/** The actual message data follows */
uint8_t nm_msg[];
};
/**
* Version of the message is placed to the header. Currently we have
* following versions.
*
* Network management message versions:
*
* 0x0001 : The net_mgmt event info message follows directly
* after the header.
*/
#define NET_MGMT_SOCKET_VERSION_1 0x0001
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_SOCKET_NET_MGMT_H_ */
``` | /content/code_sandbox/include/zephyr/net/socket_net_mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 634 |
```objective-c
/** @file
* @brief Wi-Fi Network manager API
*
* This file contains the Wi-Fi network manager API. These APIs are used by the
* any network management application to register as a Wi-Fi network manager.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_ZEPHYR_NET_WIFI_NM_H_
#define ZEPHYR_INCLUDE_ZEPHYR_NET_WIFI_NM_H_
#include <zephyr/kernel.h>
#include <zephyr/types.h>
#include <zephyr/sys/iterable_sections.h>
#include <zephyr/net/net_if.h>
#include <zephyr/net/wifi_mgmt.h>
/**
* @brief Wi-Fi Network manager API
* @defgroup wifi_nm Wi-Fi Network Manager API
* @since 3.5
* @version 0.8.0
* @ingroup networking
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/** Types of Wi-Fi interface */
enum wifi_nm_iface_type {
/** IEEE 802.11 Wi-Fi Station */
WIFI_TYPE_STA = 0,
/** IEEE 802.11 Wi-Fi Soft AP */
WIFI_TYPE_SAP,
};
/**
* @brief WiFi Network Managed interfaces
*/
struct wifi_nm_mgd_iface {
/** Wi-Fi interface type */
unsigned char type;
/** Managed net interfaces */
struct net_if *iface;
};
/**
* @brief WiFi Network manager instance
*/
struct wifi_nm_instance {
/** Name of the Network manager instance */
const char *name;
/** Wi-Fi Management operations */
const struct wifi_mgmt_ops *ops;
/** List of Managed interfaces */
struct wifi_nm_mgd_iface mgd_ifaces[CONFIG_WIFI_NM_MAX_MANAGED_INTERFACES];
};
/** @cond INTERNAL_HIDDEN */
#define WIFI_NM_NAME(name) wifi_nm_##name
#define DEFINE_WIFI_NM_INSTANCE(_name, _ops) \
static STRUCT_SECTION_ITERABLE(wifi_nm_instance, WIFI_NM_NAME(_name)) = { \
.name = STRINGIFY(_name), \
.ops = _ops, \
.mgd_ifaces = {}, \
}
/** @endcond */
/**
* @brief Get a Network manager instance for a given name
*
* @param name Name of the Network manager instance
*
*/
struct wifi_nm_instance *wifi_nm_get_instance(const char *name);
/**
* @brief Get a Network manager instance for a given interface
*
* @param iface Interface
*
*/
struct wifi_nm_instance *wifi_nm_get_instance_iface(struct net_if *iface);
/**
* @brief Get a Wi-Fi type for a given interface
*
* @param iface Interface
*
*/
unsigned char wifi_nm_get_type_iface(struct net_if *iface);
/**
* @brief Check if the interface is a Wi-Fi station interface
*
* @param iface Interface
*
* @retval true If the interface is a Wi-Fi station interface.
*
*/
bool wifi_nm_iface_is_sta(struct net_if *iface);
/**
* @brief Check if the interface is a Wi-Fi Soft AP interface
*
* @param iface Interface
*
* @retval true If the interface is a Wi-Fi Soft AP interface.
*
*/
bool wifi_nm_iface_is_sap(struct net_if *iface);
/**
* @brief Register a managed interface
*
* @param nm Pointer to Network manager instance
* @param iface Managed interface
*
* @retval 0 If successful.
* @retval -EINVAL If invalid parameters were passed.
* @retval -ENOTSUP If the interface is not a Wi-Fi interface.
* @retval -ENOMEM If the maximum number of managed interfaces has been reached.
*/
int wifi_nm_register_mgd_iface(struct wifi_nm_instance *nm, struct net_if *iface);
/**
* @brief Register a managed interface
*
* @param nm Pointer to Network manager instance
* @param type Wi-Fi type
* @param iface Managed interface
*
* @retval 0 If successful.
* @retval -EINVAL If invalid parameters were passed.
* @retval -ENOTSUP If the interface is not a Wi-Fi interface.
* @retval -ENOMEM If the maximum number of managed interfaces has been reached.
*/
int wifi_nm_register_mgd_type_iface(struct wifi_nm_instance *nm,
enum wifi_nm_iface_type type, struct net_if *iface);
/**
* @brief Unregister managed interface
*
* @param nm Pointer to Network manager instance
* @param iface Interface
* @return int 0 for OK; -EINVAL for invalid parameters; -ENOENT if interface is not registered
* with the Network manager.
*/
int wifi_nm_unregister_mgd_iface(struct wifi_nm_instance *nm, struct net_if *iface);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ZEPHYR_NET_WIFI_NM_H_ */
``` | /content/code_sandbox/include/zephyr/net/wifi_nm.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 991 |
```objective-c
/*
*
*/
/**
* @file
*
* @brief CoAP implementation for Zephyr.
*/
#ifndef ZEPHYR_INCLUDE_NET_COAP_H_
#define ZEPHYR_INCLUDE_NET_COAP_H_
/**
* @brief COAP library
* @defgroup coap COAP Library
* @since 1.10
* @version 0.8.0
* @ingroup networking
* @{
*/
#include <zephyr/types.h>
#include <stddef.h>
#include <stdbool.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/sys/math_extras.h>
#include <zephyr/sys/slist.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Set of CoAP packet options we are aware of.
*
* Users may add options other than these to their packets, provided
* they know how to format them correctly. The only restriction is
* that all options must be added to a packet in numeric order.
*
* Refer to RFC 7252, section 12.2 for more information.
*/
enum coap_option_num {
COAP_OPTION_IF_MATCH = 1, /**< If-Match */
COAP_OPTION_URI_HOST = 3, /**< Uri-Host */
COAP_OPTION_ETAG = 4, /**< ETag */
COAP_OPTION_IF_NONE_MATCH = 5, /**< If-None-Match */
COAP_OPTION_OBSERVE = 6, /**< Observe (RFC 7641) */
COAP_OPTION_URI_PORT = 7, /**< Uri-Port */
COAP_OPTION_LOCATION_PATH = 8, /**< Location-Path */
COAP_OPTION_URI_PATH = 11, /**< Uri-Path */
COAP_OPTION_CONTENT_FORMAT = 12, /**< Content-Format */
COAP_OPTION_MAX_AGE = 14, /**< Max-Age */
COAP_OPTION_URI_QUERY = 15, /**< Uri-Query */
COAP_OPTION_ACCEPT = 17, /**< Accept */
COAP_OPTION_LOCATION_QUERY = 20, /**< Location-Query */
COAP_OPTION_BLOCK2 = 23, /**< Block2 (RFC 7959) */
COAP_OPTION_BLOCK1 = 27, /**< Block1 (RFC 7959) */
COAP_OPTION_SIZE2 = 28, /**< Size2 (RFC 7959) */
COAP_OPTION_PROXY_URI = 35, /**< Proxy-Uri */
COAP_OPTION_PROXY_SCHEME = 39, /**< Proxy-Scheme */
COAP_OPTION_SIZE1 = 60, /**< Size1 */
COAP_OPTION_ECHO = 252, /**< Echo (RFC 9175) */
COAP_OPTION_REQUEST_TAG = 292 /**< Request-Tag (RFC 9175) */
};
/**
* @brief Available request methods.
*
* To be used when creating a request or a response.
*/
enum coap_method {
COAP_METHOD_GET = 1, /**< GET */
COAP_METHOD_POST = 2, /**< POST */
COAP_METHOD_PUT = 3, /**< PUT */
COAP_METHOD_DELETE = 4, /**< DELETE */
COAP_METHOD_FETCH = 5, /**< FETCH */
COAP_METHOD_PATCH = 6, /**< PATCH */
COAP_METHOD_IPATCH = 7, /**< IPATCH */
};
/** @cond INTERNAL_HIDDEN */
#define COAP_REQUEST_MASK 0x07
#define COAP_VERSION_1 1U
#define COAP_OBSERVE_MAX_AGE 0xFFFFFF
/** @endcond */
/**
* @brief CoAP packets may be of one of these types.
*/
enum coap_msgtype {
/**
* Confirmable message.
*
* The packet is a request or response the destination end-point must
* acknowledge.
*/
COAP_TYPE_CON = 0,
/**
* Non-confirmable message.
*
* The packet is a request or response that doesn't
* require acknowledgements.
*/
COAP_TYPE_NON_CON = 1,
/**
* Acknowledge.
*
* Response to a confirmable message.
*/
COAP_TYPE_ACK = 2,
/**
* Reset.
*
* Rejecting a packet for any reason is done by sending a message
* of this type.
*/
COAP_TYPE_RESET = 3
};
/**
* Utility macro to create a CoAP response code.
* @param class Class of the response code (ex. 2, 4, 5, ...)
* @param det Detail of the response code
* @return Response code literal
*/
#define COAP_MAKE_RESPONSE_CODE(class, det) ((class << 5) | (det))
/**
* @brief Set of response codes available for a response packet.
*
* To be used when creating a response.
*/
enum coap_response_code {
/** 2.00 - OK */
COAP_RESPONSE_CODE_OK = COAP_MAKE_RESPONSE_CODE(2, 0),
/** 2.01 - Created */
COAP_RESPONSE_CODE_CREATED = COAP_MAKE_RESPONSE_CODE(2, 1),
/** 2.02 - Deleted */
COAP_RESPONSE_CODE_DELETED = COAP_MAKE_RESPONSE_CODE(2, 2),
/** 2.03 - Valid */
COAP_RESPONSE_CODE_VALID = COAP_MAKE_RESPONSE_CODE(2, 3),
/** 2.04 - Changed */
COAP_RESPONSE_CODE_CHANGED = COAP_MAKE_RESPONSE_CODE(2, 4),
/** 2.05 - Content */
COAP_RESPONSE_CODE_CONTENT = COAP_MAKE_RESPONSE_CODE(2, 5),
/** 2.31 - Continue */
COAP_RESPONSE_CODE_CONTINUE = COAP_MAKE_RESPONSE_CODE(2, 31),
/** 4.00 - Bad Request */
COAP_RESPONSE_CODE_BAD_REQUEST = COAP_MAKE_RESPONSE_CODE(4, 0),
/** 4.01 - Unauthorized */
COAP_RESPONSE_CODE_UNAUTHORIZED = COAP_MAKE_RESPONSE_CODE(4, 1),
/** 4.02 - Bad Option */
COAP_RESPONSE_CODE_BAD_OPTION = COAP_MAKE_RESPONSE_CODE(4, 2),
/** 4.03 - Forbidden */
COAP_RESPONSE_CODE_FORBIDDEN = COAP_MAKE_RESPONSE_CODE(4, 3),
/** 4.04 - Not Found */
COAP_RESPONSE_CODE_NOT_FOUND = COAP_MAKE_RESPONSE_CODE(4, 4),
/** 4.05 - Method Not Allowed */
COAP_RESPONSE_CODE_NOT_ALLOWED = COAP_MAKE_RESPONSE_CODE(4, 5),
/** 4.06 - Not Acceptable */
COAP_RESPONSE_CODE_NOT_ACCEPTABLE = COAP_MAKE_RESPONSE_CODE(4, 6),
/** 4.08 - Request Entity Incomplete */
COAP_RESPONSE_CODE_INCOMPLETE = COAP_MAKE_RESPONSE_CODE(4, 8),
/** 4.12 - Precondition Failed */
COAP_RESPONSE_CODE_CONFLICT = COAP_MAKE_RESPONSE_CODE(4, 9),
/** 4.12 - Precondition Failed */
COAP_RESPONSE_CODE_PRECONDITION_FAILED = COAP_MAKE_RESPONSE_CODE(4, 12),
/** 4.13 - Request Entity Too Large */
COAP_RESPONSE_CODE_REQUEST_TOO_LARGE = COAP_MAKE_RESPONSE_CODE(4, 13),
/** 4.15 - Unsupported Content-Format */
COAP_RESPONSE_CODE_UNSUPPORTED_CONTENT_FORMAT =
COAP_MAKE_RESPONSE_CODE(4, 15),
/** 4.22 - Unprocessable Entity */
COAP_RESPONSE_CODE_UNPROCESSABLE_ENTITY = COAP_MAKE_RESPONSE_CODE(4, 22),
/** 4.29 - Too Many Requests */
COAP_RESPONSE_CODE_TOO_MANY_REQUESTS = COAP_MAKE_RESPONSE_CODE(4, 29),
/** 5.00 - Internal Server Error */
COAP_RESPONSE_CODE_INTERNAL_ERROR = COAP_MAKE_RESPONSE_CODE(5, 0),
/** 5.01 - Not Implemented */
COAP_RESPONSE_CODE_NOT_IMPLEMENTED = COAP_MAKE_RESPONSE_CODE(5, 1),
/** 5.02 - Bad Gateway */
COAP_RESPONSE_CODE_BAD_GATEWAY = COAP_MAKE_RESPONSE_CODE(5, 2),
/** 5.03 - Service Unavailable */
COAP_RESPONSE_CODE_SERVICE_UNAVAILABLE = COAP_MAKE_RESPONSE_CODE(5, 3),
/** 5.04 - Gateway Timeout */
COAP_RESPONSE_CODE_GATEWAY_TIMEOUT = COAP_MAKE_RESPONSE_CODE(5, 4),
/** 5.05 - Proxying Not Supported */
COAP_RESPONSE_CODE_PROXYING_NOT_SUPPORTED =
COAP_MAKE_RESPONSE_CODE(5, 5)
};
/** @cond INTERNAL_HIDDEN */
#define COAP_CODE_EMPTY (0)
#define COAP_TOKEN_MAX_LEN 8UL
/** @endcond */
/**
* @brief Set of Content-Format option values for CoAP.
*
* To be used when encoding or decoding a Content-Format option.
*/
enum coap_content_format {
COAP_CONTENT_FORMAT_TEXT_PLAIN = 0, /**< text/plain;charset=utf-8 */
COAP_CONTENT_FORMAT_APP_LINK_FORMAT = 40, /**< application/link-format */
COAP_CONTENT_FORMAT_APP_XML = 41, /**< application/xml */
COAP_CONTENT_FORMAT_APP_OCTET_STREAM = 42, /**< application/octet-stream */
COAP_CONTENT_FORMAT_APP_EXI = 47, /**< application/exi */
COAP_CONTENT_FORMAT_APP_JSON = 50, /**< application/json */
COAP_CONTENT_FORMAT_APP_JSON_PATCH_JSON = 51, /**< application/json-patch+json */
COAP_CONTENT_FORMAT_APP_MERGE_PATCH_JSON = 52, /**< application/merge-patch+json */
COAP_CONTENT_FORMAT_APP_CBOR = 60 /**< application/cbor */
};
/** @cond INTERNAL_HIDDEN */
/* block option helper */
#define GET_BLOCK_NUM(v) ((v) >> 4)
#define GET_BLOCK_SIZE(v) (((v) & 0x7))
#define GET_MORE(v) (!!((v) & 0x08))
/** @endcond */
struct coap_observer;
struct coap_packet;
struct coap_pending;
struct coap_reply;
struct coap_resource;
/**
* @typedef coap_method_t
* @brief Type of the callback being called when a resource's method is
* invoked by the remote entity.
*/
typedef int (*coap_method_t)(struct coap_resource *resource,
struct coap_packet *request,
struct sockaddr *addr, socklen_t addr_len);
/**
* @typedef coap_notify_t
* @brief Type of the callback being called when a resource's has observers
* to be informed when an update happens.
*/
typedef void (*coap_notify_t)(struct coap_resource *resource,
struct coap_observer *observer);
/**
* @brief Description of CoAP resource.
*
* CoAP servers often want to register resources, so that clients can act on
* them, by fetching their state or requesting updates to them.
*/
struct coap_resource {
/** Which function to be called for each CoAP method */
coap_method_t get, post, put, del, fetch, patch, ipatch;
/** Notify function to call */
coap_notify_t notify;
/** Resource path */
const char * const *path;
/** User specific opaque data */
void *user_data;
/** List of resource observers */
sys_slist_t observers;
/** Resource age */
int age;
};
/**
* @brief Represents a remote device that is observing a local resource.
*/
struct coap_observer {
/** Observer list node */
sys_snode_t list;
/** Observer connection end point information */
struct sockaddr addr;
/** Observer token */
uint8_t token[8];
/** Extended token length */
uint8_t tkl;
};
/**
* @brief Representation of a CoAP Packet.
*/
struct coap_packet {
uint8_t *data; /**< User allocated buffer */
uint16_t offset; /**< CoAP lib maintains offset while adding data */
uint16_t max_len; /**< Max CoAP packet data length */
uint8_t hdr_len; /**< CoAP header length */
uint16_t opt_len; /**< Total options length (delta + len + value) */
uint16_t delta; /**< Used for delta calculation in CoAP packet */
#if defined(CONFIG_COAP_KEEP_USER_DATA) || defined(DOXYGEN)
/**
* Application specific user data.
* Only available when @kconfig{CONFIG_COAP_KEEP_USER_DATA} is enabled.
*/
void *user_data;
#endif
};
/**
* @brief Representation of a CoAP option.
*/
struct coap_option {
uint16_t delta; /**< Option delta */
#if defined(CONFIG_COAP_EXTENDED_OPTIONS_LEN)
uint16_t len;
uint8_t value[CONFIG_COAP_EXTENDED_OPTIONS_LEN_VALUE];
#else
uint8_t len; /**< Option length */
uint8_t value[12]; /**< Option value */
#endif
};
/**
* @typedef coap_reply_t
* @brief Helper function to be called when a response matches the
* a pending request.
* When sending blocks, the callback is only executed when the
* reply of the last block is received.
* i.e. it is not called when the code of the reply is 'continue' (2.31).
*/
typedef int (*coap_reply_t)(const struct coap_packet *response,
struct coap_reply *reply,
const struct sockaddr *from);
/**
* @brief CoAP transmission parameters.
*/
struct coap_transmission_parameters {
/** Initial ACK timeout. Value is used as a base value to retry pending CoAP packets. */
uint32_t ack_timeout;
/** Set CoAP retry backoff factor. A value of 200 means a factor of 2.0. */
uint16_t coap_backoff_percent;
/** Maximum number of retransmissions. */
uint8_t max_retransmission;
};
/**
* @brief Represents a request awaiting for an acknowledgment (ACK).
*/
struct coap_pending {
struct sockaddr addr; /**< Remote address */
int64_t t0; /**< Time when the request was sent */
uint32_t timeout; /**< Timeout in ms */
uint16_t id; /**< Message id */
uint8_t *data; /**< User allocated buffer */
uint16_t len; /**< Length of the CoAP packet */
uint8_t retries; /**< Number of times the request has been sent */
struct coap_transmission_parameters params; /**< Transmission parameters */
};
/**
* @brief Represents the handler for the reply of a request, it is
* also used when observing resources.
*/
struct coap_reply {
/** CoAP reply callback */
coap_reply_t reply;
/** User specific opaque data */
void *user_data;
/** Reply age */
int age;
/** Reply id */
uint16_t id;
/** Reply token */
uint8_t token[8];
/** Extended token length */
uint8_t tkl;
};
/**
* @brief Returns the version present in a CoAP packet.
*
* @param cpkt CoAP packet representation
*
* @return the CoAP version in packet
*/
uint8_t coap_header_get_version(const struct coap_packet *cpkt);
/**
* @brief Returns the type of the CoAP packet.
*
* @param cpkt CoAP packet representation
*
* @return the type of the packet
*/
uint8_t coap_header_get_type(const struct coap_packet *cpkt);
/**
* @brief Returns the token (if any) in the CoAP packet.
*
* @param cpkt CoAP packet representation
* @param token Where to store the token, must point to a buffer containing
* at least COAP_TOKEN_MAX_LEN bytes
*
* @return Token length in the CoAP packet (0 - COAP_TOKEN_MAX_LEN).
*/
uint8_t coap_header_get_token(const struct coap_packet *cpkt, uint8_t *token);
/**
* @brief Returns the code of the CoAP packet.
*
* @param cpkt CoAP packet representation
*
* @return the code present in the packet
*/
uint8_t coap_header_get_code(const struct coap_packet *cpkt);
/**
* @brief Modifies the code of the CoAP packet.
*
* @param cpkt CoAP packet representation
* @param code CoAP code
* @return 0 on success, -EINVAL on failure
*/
int coap_header_set_code(const struct coap_packet *cpkt, uint8_t code);
/**
* @brief Returns the message id associated with the CoAP packet.
*
* @param cpkt CoAP packet representation
*
* @return the message id present in the packet
*/
uint16_t coap_header_get_id(const struct coap_packet *cpkt);
/**
* @brief Returns the data pointer and length of the CoAP packet.
*
* @param cpkt CoAP packet representation
* @param len Total length of CoAP payload
*
* @return data pointer and length if payload exists
* NULL pointer and length set to 0 in case there is no payload
*/
const uint8_t *coap_packet_get_payload(const struct coap_packet *cpkt,
uint16_t *len);
/**
* @brief Verify if CoAP URI path matches with provided options.
*
* @param path Null-terminated array of strings.
* @param options Parsed options from coap_packet_parse()
* @param opt_num Number of options
*
* @return true if the CoAP URI path matches,
* false otherwise.
*/
bool coap_uri_path_match(const char * const *path,
struct coap_option *options,
uint8_t opt_num);
/**
* @brief Parses the CoAP packet in data, validating it and
* initializing @a cpkt. @a data must remain valid while @a cpkt is used.
*
* @param cpkt Packet to be initialized from received @a data.
* @param data Data containing a CoAP packet, its @a data pointer is
* positioned on the start of the CoAP packet.
* @param len Length of the data
* @param options Parse options and cache its details.
* @param opt_num Number of options
*
* @retval 0 in case of success.
* @retval -EINVAL in case of invalid input args.
* @retval -EBADMSG in case of malformed coap packet header.
* @retval -EILSEQ in case of malformed coap options.
*/
int coap_packet_parse(struct coap_packet *cpkt, uint8_t *data, uint16_t len,
struct coap_option *options, uint8_t opt_num);
/**
* @brief Parses provided coap path (with/without query) or query and appends
* that as options to the @a cpkt.
*
* @param cpkt Packet to append path and query options for.
* @param path Null-terminated string of coap path, query or both.
*
* @retval 0 in case of success or negative in case of error.
*/
int coap_packet_set_path(struct coap_packet *cpkt, const char *path);
/**
* @brief Creates a new CoAP Packet from input data.
*
* @param cpkt New packet to be initialized using the storage from @a data.
* @param data Data that will contain a CoAP packet information
* @param max_len Maximum allowable length of data
* @param ver CoAP header version
* @param type CoAP header type
* @param token_len CoAP header token length
* @param token CoAP header token
* @param code CoAP header code
* @param id CoAP header message id
*
* @return 0 in case of success or negative in case of error.
*/
int coap_packet_init(struct coap_packet *cpkt, uint8_t *data, uint16_t max_len,
uint8_t ver, uint8_t type, uint8_t token_len,
const uint8_t *token, uint8_t code, uint16_t id);
/**
* @brief Create a new CoAP Acknowledgment message for given request.
*
* This function works like @ref coap_packet_init, filling CoAP header type,
* CoAP header token, and CoAP header message id fields according to
* acknowledgment rules.
*
* @param cpkt New packet to be initialized using the storage from @a data.
* @param req CoAP request packet that is being acknowledged
* @param data Data that will contain a CoAP packet information
* @param max_len Maximum allowable length of data
* @param code CoAP header code
*
* @return 0 in case of success or negative in case of error.
*/
int coap_ack_init(struct coap_packet *cpkt, const struct coap_packet *req,
uint8_t *data, uint16_t max_len, uint8_t code);
/**
* @brief Returns a randomly generated array of 8 bytes, that can be
* used as a message's token.
*
* @return a 8-byte pseudo-random token.
*/
uint8_t *coap_next_token(void);
/**
* @brief Helper to generate message ids
*
* @return a new message id
*/
uint16_t coap_next_id(void);
/**
* @brief Return the values associated with the option of value @a
* code.
*
* @param cpkt CoAP packet representation
* @param code Option number to look for
* @param options Array of #coap_option where to store the value
* of the options found
* @param veclen Number of elements in the options array
*
* @return The number of options found in packet matching code,
* negative on error.
*/
int coap_find_options(const struct coap_packet *cpkt, uint16_t code,
struct coap_option *options, uint16_t veclen);
/**
* @brief Appends an option to the packet.
*
* Note: options can be added out of numeric order of their codes. But
* it's more efficient to add them in order.
*
* @param cpkt Packet to be updated
* @param code Option code to add to the packet, see #coap_option_num
* @param value Pointer to the value of the option, will be copied to the
* packet
* @param len Size of the data to be added
*
* @return 0 in case of success or negative in case of error.
*/
int coap_packet_append_option(struct coap_packet *cpkt, uint16_t code,
const uint8_t *value, uint16_t len);
/**
* @brief Remove an option from the packet.
*
* @param cpkt Packet to be updated
* @param code Option code to remove from the packet, see #coap_option_num
*
* @return 0 in case of success or negative in case of error.
*/
int coap_packet_remove_option(struct coap_packet *cpkt, uint16_t code);
/**
* @brief Converts an option to its integer representation.
*
* Assumes that the number is encoded in the network byte order in the
* option.
*
* @param option Pointer to the option value, retrieved by
* coap_find_options()
*
* @return The integer representation of the option
*/
unsigned int coap_option_value_to_int(const struct coap_option *option);
/**
* @brief Appends an integer value option to the packet.
*
* The option must be added in numeric order of their codes, and the
* least amount of bytes will be used to encode the value.
*
* @param cpkt Packet to be updated
* @param code Option code to add to the packet, see #coap_option_num
* @param val Integer value to be added
*
* @return 0 in case of success or negative in case of error.
*/
int coap_append_option_int(struct coap_packet *cpkt, uint16_t code,
unsigned int val);
/**
* @brief Append payload marker to CoAP packet
*
* @param cpkt Packet to append the payload marker (0xFF)
*
* @return 0 in case of success or negative in case of error.
*/
int coap_packet_append_payload_marker(struct coap_packet *cpkt);
/**
* @brief Append payload to CoAP packet
*
* @param cpkt Packet to append the payload
* @param payload CoAP packet payload
* @param payload_len CoAP packet payload len
*
* @return 0 in case of success or negative in case of error.
*/
int coap_packet_append_payload(struct coap_packet *cpkt, const uint8_t *payload,
uint16_t payload_len);
/**
* @brief Check if a CoAP packet is a CoAP request.
*
* @param cpkt Packet to be checked.
*
* @return true if the packet is a request,
* false otherwise.
*/
bool coap_packet_is_request(const struct coap_packet *cpkt);
/**
* @brief When a request is received, call the appropriate methods of
* the matching resources.
*
* @param cpkt Packet received
* @param resources Array of known resources
* @param resources_len Number of resources in the array
* @param options Parsed options from coap_packet_parse()
* @param opt_num Number of options
* @param addr Peer address
* @param addr_len Peer address length
*
* @retval >= 0 in case of success.
* @retval -ENOTSUP in case of invalid request code.
* @retval -EPERM in case resource handler is not implemented.
* @retval -ENOENT in case the resource is not found.
*/
int coap_handle_request_len(struct coap_packet *cpkt,
struct coap_resource *resources,
size_t resources_len,
struct coap_option *options,
uint8_t opt_num,
struct sockaddr *addr, socklen_t addr_len);
/**
* @brief When a request is received, call the appropriate methods of
* the matching resources.
*
* @param cpkt Packet received
* @param resources Array of known resources (terminated with empty resource)
* @param options Parsed options from coap_packet_parse()
* @param opt_num Number of options
* @param addr Peer address
* @param addr_len Peer address length
*
* @retval >= 0 in case of success.
* @retval -ENOTSUP in case of invalid request code.
* @retval -EPERM in case resource handler is not implemented.
* @retval -ENOENT in case the resource is not found.
*/
int coap_handle_request(struct coap_packet *cpkt,
struct coap_resource *resources,
struct coap_option *options,
uint8_t opt_num,
struct sockaddr *addr, socklen_t addr_len);
/**
* Represents the size of each block that will be transferred using
* block-wise transfers [RFC7959]:
*
* Each entry maps directly to the value that is used in the wire.
*
* path_to_url
*/
enum coap_block_size {
COAP_BLOCK_16, /**< 16-byte block size */
COAP_BLOCK_32, /**< 32-byte block size */
COAP_BLOCK_64, /**< 64-byte block size */
COAP_BLOCK_128, /**< 128-byte block size */
COAP_BLOCK_256, /**< 256-byte block size */
COAP_BLOCK_512, /**< 512-byte block size */
COAP_BLOCK_1024, /**< 1024-byte block size */
};
/**
* @brief Helper for converting the enumeration to the size expressed
* in bytes.
*
* @param block_size The block size to be converted
*
* @return The size in bytes that the block_size represents
*/
static inline uint16_t coap_block_size_to_bytes(
enum coap_block_size block_size)
{
return (1 << (block_size + 4));
}
/**
* @brief Helper for converting block size in bytes to enumeration.
*
* NOTE: Only valid CoAP block sizes map correctly.
*
* @param bytes CoAP block size in bytes.
* @return enum coap_block_size
*/
static inline enum coap_block_size coap_bytes_to_block_size(uint16_t bytes)
{
int sz = u32_count_trailing_zeros(bytes) - 4;
if (sz < COAP_BLOCK_16) {
return COAP_BLOCK_16;
}
if (sz > COAP_BLOCK_1024) {
return COAP_BLOCK_1024;
}
return (enum coap_block_size)sz;
}
/**
* @brief Represents the current state of a block-wise transaction.
*/
struct coap_block_context {
/** Total size of the block-wise transaction */
size_t total_size;
/** Current size of the block-wise transaction */
size_t current;
/** Block size */
enum coap_block_size block_size;
};
/**
* @brief Initializes the context of a block-wise transfer.
*
* @param ctx The context to be initialized
* @param block_size The size of the block
* @param total_size The total size of the transfer, if known
*
* @return 0 in case of success or negative in case of error.
*/
int coap_block_transfer_init(struct coap_block_context *ctx,
enum coap_block_size block_size,
size_t total_size);
/**
* @brief Append BLOCK1 or BLOCK2 option to the packet.
*
* If the CoAP packet is a request then BLOCK1 is appended
* otherwise BLOCK2 is appended.
*
* @param cpkt Packet to be updated
* @param ctx Block context from which to retrieve the
* information for the block option
*
* @return 0 in case of success or negative in case of error.
*/
int coap_append_descriptive_block_option(struct coap_packet *cpkt, struct coap_block_context *ctx);
/**
* @brief Check if a descriptive block option is set in the packet.
*
* If the CoAP packet is a request then an available BLOCK1 option
* would be checked otherwise a BLOCK2 option would be checked.
*
* @param cpkt Packet to be checked.
*
* @return true if the corresponding block option is set,
* false otherwise.
*/
bool coap_has_descriptive_block_option(struct coap_packet *cpkt);
/**
* @brief Remove BLOCK1 or BLOCK2 option from the packet.
*
* If the CoAP packet is a request then BLOCK1 is removed
* otherwise BLOCK2 is removed.
*
* @param cpkt Packet to be updated.
*
* @return 0 in case of success or negative in case of error.
*/
int coap_remove_descriptive_block_option(struct coap_packet *cpkt);
/**
* @brief Check if BLOCK1 or BLOCK2 option has more flag set
*
* @param cpkt Packet to be checked.
* @return true If more flag is set in BLOCK1 or BLOCK2
* @return false If MORE flag is not set or BLOCK header not found.
*/
bool coap_block_has_more(struct coap_packet *cpkt);
/**
* @brief Append BLOCK1 option to the packet.
*
* @param cpkt Packet to be updated
* @param ctx Block context from which to retrieve the
* information for the Block1 option
*
* @return 0 in case of success or negative in case of error.
*/
int coap_append_block1_option(struct coap_packet *cpkt,
struct coap_block_context *ctx);
/**
* @brief Append BLOCK2 option to the packet.
*
* @param cpkt Packet to be updated
* @param ctx Block context from which to retrieve the
* information for the Block2 option
*
* @return 0 in case of success or negative in case of error.
*/
int coap_append_block2_option(struct coap_packet *cpkt,
struct coap_block_context *ctx);
/**
* @brief Append SIZE1 option to the packet.
*
* @param cpkt Packet to be updated
* @param ctx Block context from which to retrieve the
* information for the Size1 option
*
* @return 0 in case of success or negative in case of error.
*/
int coap_append_size1_option(struct coap_packet *cpkt,
struct coap_block_context *ctx);
/**
* @brief Append SIZE2 option to the packet.
*
* @param cpkt Packet to be updated
* @param ctx Block context from which to retrieve the
* information for the Size2 option
*
* @return 0 in case of success or negative in case of error.
*/
int coap_append_size2_option(struct coap_packet *cpkt,
struct coap_block_context *ctx);
/**
* @brief Get the integer representation of a CoAP option.
*
* @param cpkt Packet to be inspected
* @param code CoAP option code
*
* @return Integer value >= 0 in case of success or negative in case
* of error.
*/
int coap_get_option_int(const struct coap_packet *cpkt, uint16_t code);
/**
* @brief Get the block size, more flag and block number from the
* CoAP block1 option.
*
* @param cpkt Packet to be inspected
* @param has_more Is set to the value of the more flag
* @param block_number Is set to the number of the block
*
* @return Integer value of the block size in case of success
* or negative in case of error.
*/
int coap_get_block1_option(const struct coap_packet *cpkt, bool *has_more, uint32_t *block_number);
/**
* @brief Get values from CoAP block2 option.
*
* Decode block number, more flag and block size from option.
*
* @param cpkt Packet to be inspected
* @param has_more Is set to the value of the more flag
* @param block_number Is set to the number of the block
*
* @return Integer value of the block size in case of success
* or negative in case of error.
*/
int coap_get_block2_option(const struct coap_packet *cpkt, bool *has_more,
uint32_t *block_number);
/**
* @brief Retrieves BLOCK{1,2} and SIZE{1,2} from @a cpkt and updates
* @a ctx accordingly.
*
* @param cpkt Packet in which to look for block-wise transfers options
* @param ctx Block context to be updated
*
* @return 0 in case of success or negative in case of error.
*/
int coap_update_from_block(const struct coap_packet *cpkt,
struct coap_block_context *ctx);
/**
* @brief Updates @a ctx according to @a option set in @a cpkt
* so after this is called the current entry indicates the correct
* offset in the body of data being transferred.
*
* @param cpkt Packet in which to look for block-wise transfers options
* @param ctx Block context to be updated
* @param option Either COAP_OPTION_BLOCK1 or COAP_OPTION_BLOCK2
*
* @return The offset in the block-wise transfer, 0 if the transfer
* has finished or a negative value in case of an error.
*/
int coap_next_block_for_option(const struct coap_packet *cpkt,
struct coap_block_context *ctx,
enum coap_option_num option);
/**
* @brief Updates @a ctx so after this is called the current entry
* indicates the correct offset in the body of data being
* transferred.
*
* @param cpkt Packet in which to look for block-wise transfers options
* @param ctx Block context to be updated
*
* @return The offset in the block-wise transfer, 0 if the transfer
* has finished.
*/
size_t coap_next_block(const struct coap_packet *cpkt,
struct coap_block_context *ctx);
/**
* @brief Indicates that the remote device referenced by @a addr, with
* @a request, wants to observe a resource.
*
* @param observer Observer to be initialized
* @param request Request on which the observer will be based
* @param addr Address of the remote device
*/
void coap_observer_init(struct coap_observer *observer,
const struct coap_packet *request,
const struct sockaddr *addr);
/**
* @brief After the observer is initialized, associate the observer
* with an resource.
*
* @param resource Resource to add an observer
* @param observer Observer to be added
*
* @return true if this is the first observer added to this resource.
*/
bool coap_register_observer(struct coap_resource *resource,
struct coap_observer *observer);
/**
* @brief Remove this observer from the list of registered observers
* of that resource.
*
* @param resource Resource in which to remove the observer
* @param observer Observer to be removed
*
* @return true if the observer was found and removed.
*/
bool coap_remove_observer(struct coap_resource *resource,
struct coap_observer *observer);
/**
* @brief Returns the observer that matches address @a addr
* and has token @a token.
*
* @param observers Pointer to the array of observers
* @param len Size of the array of observers
* @param addr Address of the endpoint observing a resource
* @param token Pointer to the token
* @param token_len Length of valid bytes in the token
*
* @return A pointer to a observer if a match is found, NULL
* otherwise.
*/
struct coap_observer *coap_find_observer(
struct coap_observer *observers, size_t len,
const struct sockaddr *addr,
const uint8_t *token, uint8_t token_len);
/**
* @brief Returns the observer that matches address @a addr.
*
* @param observers Pointer to the array of observers
* @param len Size of the array of observers
* @param addr Address of the endpoint observing a resource
*
* @note The function coap_find_observer() should be preferred
* if both the observer's address and token are known.
*
* @return A pointer to a observer if a match is found, NULL
* otherwise.
*/
struct coap_observer *coap_find_observer_by_addr(
struct coap_observer *observers, size_t len,
const struct sockaddr *addr);
/**
* @brief Returns the observer that has token @a token.
*
* @param observers Pointer to the array of observers
* @param len Size of the array of observers
* @param token Pointer to the token
* @param token_len Length of valid bytes in the token
*
* @note The function coap_find_observer() should be preferred
* if both the observer's address and token are known.
*
* @return A pointer to a observer if a match is found, NULL
* otherwise.
*/
struct coap_observer *coap_find_observer_by_token(
struct coap_observer *observers, size_t len,
const uint8_t *token, uint8_t token_len);
/**
* @brief Returns the next available observer representation.
*
* @param observers Pointer to the array of observers
* @param len Size of the array of observers
*
* @return A pointer to a observer if there's an available observer,
* NULL otherwise.
*/
struct coap_observer *coap_observer_next_unused(
struct coap_observer *observers, size_t len);
/**
* @brief Indicates that a reply is expected for @a request.
*
* @param reply Reply structure to be initialized
* @param request Request from which @a reply will be based
*/
void coap_reply_init(struct coap_reply *reply,
const struct coap_packet *request);
/**
* @brief Initialize a pending request with a request.
*
* The request's fields are copied into the pending struct, so @a
* request doesn't have to live for as long as the pending struct
* lives, but "data" that needs to live for at least that long.
*
* @param pending Structure representing the waiting for a
* confirmation message, initialized with data from @a request
* @param request Message waiting for confirmation
* @param addr Address to send the retransmission
* @param params Pointer to the CoAP transmission parameters struct,
* or NULL to use default values
*
* @return 0 in case of success or negative in case of error.
*/
int coap_pending_init(struct coap_pending *pending,
const struct coap_packet *request,
const struct sockaddr *addr,
const struct coap_transmission_parameters *params);
/**
* @brief Returns the next available pending struct, that can be used
* to track the retransmission status of a request.
*
* @param pendings Pointer to the array of #coap_pending structures
* @param len Size of the array of #coap_pending structures
*
* @return pointer to a free #coap_pending structure, NULL in case
* none could be found.
*/
struct coap_pending *coap_pending_next_unused(
struct coap_pending *pendings, size_t len);
/**
* @brief Returns the next available reply struct, so it can be used
* to track replies and notifications received.
*
* @param replies Pointer to the array of #coap_reply structures
* @param len Size of the array of #coap_reply structures
*
* @return pointer to a free #coap_reply structure, NULL in case
* none could be found.
*/
struct coap_reply *coap_reply_next_unused(
struct coap_reply *replies, size_t len);
/**
* @brief After a response is received, returns if there is any
* matching pending request exits. User has to clear all pending
* retransmissions related to that response by calling
* coap_pending_clear().
*
* @param response The received response
* @param pendings Pointer to the array of #coap_reply structures
* @param len Size of the array of #coap_reply structures
*
* @return pointer to the associated #coap_pending structure, NULL in
* case none could be found.
*/
struct coap_pending *coap_pending_received(
const struct coap_packet *response,
struct coap_pending *pendings, size_t len);
/**
* @brief After a response is received, call coap_reply_t handler
* registered in #coap_reply structure
*
* @param response A response received
* @param from Address from which the response was received
* @param replies Pointer to the array of #coap_reply structures
* @param len Size of the array of #coap_reply structures
*
* @return Pointer to the reply matching the packet received, NULL if
* none could be found.
*/
struct coap_reply *coap_response_received(
const struct coap_packet *response,
const struct sockaddr *from,
struct coap_reply *replies, size_t len);
/**
* @brief Returns the next pending about to expire, pending->timeout
* informs how many ms to next expiration.
*
* @param pendings Pointer to the array of #coap_pending structures
* @param len Size of the array of #coap_pending structures
*
* @return The next #coap_pending to expire, NULL if none is about to
* expire.
*/
struct coap_pending *coap_pending_next_to_expire(
struct coap_pending *pendings, size_t len);
/**
* @brief After a request is sent, user may want to cycle the pending
* retransmission so the timeout is updated.
*
* @param pending Pending representation to have its timeout updated
*
* @return false if this is the last retransmission.
*/
bool coap_pending_cycle(struct coap_pending *pending);
/**
* @brief Cancels the pending retransmission, so it again becomes
* available.
*
* @param pending Pending representation to be canceled
*/
void coap_pending_clear(struct coap_pending *pending);
/**
* @brief Cancels all pending retransmissions, so they become
* available again.
*
* @param pendings Pointer to the array of #coap_pending structures
* @param len Size of the array of #coap_pending structures
*/
void coap_pendings_clear(struct coap_pending *pendings, size_t len);
/**
* @brief Count number of pending requests.
*
* @param len Number of elements in array.
* @param pendings Array of pending requests.
* @return count of elements where timeout is not zero.
*/
size_t coap_pendings_count(struct coap_pending *pendings, size_t len);
/**
* @brief Cancels awaiting for this reply, so it becomes available
* again. User responsibility to free the memory associated with data.
*
* @param reply The reply to be canceled
*/
void coap_reply_clear(struct coap_reply *reply);
/**
* @brief Cancels all replies, so they become available again.
*
* @param replies Pointer to the array of #coap_reply structures
* @param len Size of the array of #coap_reply structures
*/
void coap_replies_clear(struct coap_reply *replies, size_t len);
/**
* @brief Indicates that this resource was updated and that the @a
* notify callback should be called for every registered observer.
*
* @param resource Resource that was updated
*
* @return 0 in case of success or negative in case of error.
*/
int coap_resource_notify(struct coap_resource *resource);
/**
* @brief Returns if this request is enabling observing a resource.
*
* @param request Request to be checked
*
* @return True if the request is enabling observing a resource, False
* otherwise
*/
bool coap_request_is_observe(const struct coap_packet *request);
/**
* @brief Get currently active CoAP transmission parameters.
*
* @return CoAP transmission parameters structure.
*/
struct coap_transmission_parameters coap_get_transmission_parameters(void);
/**
* @brief Set CoAP transmission parameters.
*
* @param params Pointer to the transmission parameters structure.
*/
void coap_set_transmission_parameters(const struct coap_transmission_parameters *params);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_COAP_H_ */
``` | /content/code_sandbox/include/zephyr/net/coap.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 9,725 |
```objective-c
/*
*
*/
/**
* @file
* @brief Definitions for IEEE 802.3 management interface
*/
#ifndef ZEPHYR_INCLUDE_NET_MDIO_H_
#define ZEPHYR_INCLUDE_NET_MDIO_H_
/**
* @brief Definitions for IEEE 802.3 management interface
* @defgroup ethernet_mdio IEEE 802.3 management interface
* @since 3.5
* @version 0.8.0
* @ingroup ethernet
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/** MDIO transaction operation code */
enum mdio_opcode {
/** IEEE 802.3 22.2.4.5.4 write operation */
MDIO_OP_C22_WRITE = 1,
/** IEEE 802.3 22.2.4.5.4 read operation */
MDIO_OP_C22_READ = 2,
/** IEEE 802.3 45.3.4 address operation */
MDIO_OP_C45_ADDRESS = 0,
/** IEEE 802.3 45.3.4 write operation */
MDIO_OP_C45_WRITE = 1,
/** IEEE 802.3 45.3.4 post-read-increment-address operation */
MDIO_OP_C45_READ_INC = 2,
/** IEEE 802.3 45.3.4 read operation */
MDIO_OP_C45_READ = 3
};
/* MDIO Manageable Device addresses */
/** Physical Medium Attachment / Physical Medium Dependent */
#define MDIO_MMD_PMAPMD 0x01U
/** WAN Interface Sublayer */
#define MDIO_MMD_WIS 0x02U
/** Physical Coding Sublayer */
#define MDIO_MMD_PCS 0x03U
/** PHY Extender Sublayer */
#define MDIO_MMD_PHYXS 0x04U
/** DTE Extender Sublayer */
#define MDIO_MMD_DTEXS 0x05U
/** Transmission Convergence */
#define MDIO_MMD_TC 0x06U
/** Auto-negotiation */
#define MDIO_MMD_AN 0x07U
/** Separated PMA (1) */
#define MDIO_MMD_SEPARATED_PMA1 0x08U
/** Separated PMA (2) */
#define MDIO_MMD_SEPARATED_PMA2 0x09U
/** Separated PMA (3) */
#define MDIO_MMD_SEPARATED_PMA3 0x0AU
/** Separated PMA (4) */
#define MDIO_MMD_SEPARATED_PMA4 0x0BU
/** Clause 22 extension */
#define MDIO_MMD_C22EXT 0x1DU
/** Vendor Specific 1 */
#define MDIO_MMD_VENDOR_SPECIFIC1 0x1EU
/** Vendor Specific 2 */
#define MDIO_MMD_VENDOR_SPECIFIC2 0x1FU
/* MDIO generic registers */
/** Control 1 */
#define MDIO_CTRL1 0x0000U
/** Status 1 */
#define MDIO_STAT1 0x0001U
/** Device identifier (1) */
#define MDIO_DEVID1 0x0002U
/** Device identifier (2) */
#define MDIO_DEVID2 0x0003U
/** Speed ability */
#define MDIO_SPEED 0x0004U
/** Devices in package (1) */
#define MDIO_DEVS1 0x0005U
/** Devices in package (2) */
#define MDIO_DEVS2 0x0006U
/** Control 2 */
#define MDIO_CTRL2 0x0007U
/** Status 2 */
#define MDIO_STAT2 0x0008U
/** Package identifier (1) */
#define MDIO_PKGID1 0x000EU
/** Package identifier (2) */
#define MDIO_PKGID2 0x000FU
/* PCS Register: EEE capability Register */
#define MDIO_PCS_EEE_CAP 0x0014U
/* Auto-negotiation Register: EEE advertisement Register */
#define MDIO_AN_EEE_ADV 0x003CU
/* BASE-T1 registers */
/** BASE-T1 Auto-negotiation control */
#define MDIO_AN_T1_CTRL 0x0200U
/** BASE-T1 Auto-negotiation status */
#define MDIO_AN_T1_STAT 0x0201U
/** BASE-T1 Auto-negotiation advertisement register [15:0] */
#define MDIO_AN_T1_ADV_L 0x0202U
/** BASE-T1 Auto-negotiation advertisement register [31:16] */
#define MDIO_AN_T1_ADV_M 0x0203U
/** BASE-T1 Auto-negotiation advertisement register [47:32] */
#define MDIO_AN_T1_ADV_H 0x0204U
/** BASE-T1 PMA/PMD control register */
#define MDIO_PMA_PMD_BT1_CTRL 0x0834U
/* BASE-T1 Auto-negotiation Control register */
/** Auto-negotiation Restart */
#define MDIO_AN_T1_CTRL_RESTART BIT(9)
/** Auto-negotiation Enable */
#define MDIO_AN_T1_CTRL_EN BIT(12)
/* BASE-T1 Auto-negotiation Status register */
/** Link Status */
#define MDIO_AN_T1_STAT_LINK_STATUS BIT(2)
/** Auto-negotiation Ability */
#define MDIO_AN_T1_STAT_ABLE BIT(3)
/** Auto-negotiation Remote Fault */
#define MDIO_AN_T1_STAT_REMOTE_FAULT BIT(4)
/** Auto-negotiation Complete */
#define MDIO_AN_T1_STAT_COMPLETE BIT(5)
/** Page Received */
#define MDIO_AN_T1_STAT_PAGE_RX BIT(6)
/* BASE-T1 Auto-negotiation Advertisement register [15:0] */
/** Pause Ability */
#define MDIO_AN_T1_ADV_L_PAUSE_CAP BIT(10)
/** Pause Ability */
#define MDIO_AN_T1_ADV_L_PAUSE_ASYM BIT(11)
/** Force Master/Slave Configuration */
#define MDIO_AN_T1_ADV_L_FORCE_MS BIT(12)
/** Remote Fault */
#define MDIO_AN_T1_ADV_L_REMOTE_FAULT BIT(13)
/** Acknowledge (ACK) */
#define MDIO_AN_T1_ADV_L_ACK BIT(14)
/** Next Page Request */
#define MDIO_AN_T1_ADV_L_NEXT_PAGE_REQ BIT(15)
/* BASE-T1 Auto-negotiation Advertisement register [31:16] */
/** 10BASE-T1L Ability */
#define MDIO_AN_T1_ADV_M_B10L BIT(14)
/** Master/slave Configuration */
#define MDIO_AN_T1_ADV_M_MST BIT(4)
/* BASE-T1 Auto-negotiation Advertisement register [47:32] */
/** 10BASE-T1L High Level Transmit Operating Mode Request */
#define MDIO_AN_T1_ADV_H_10L_TX_HI_REQ BIT(12)
/** 10BASE-T1L High Level Transmit Operating Mode Ability */
#define MDIO_AN_T1_ADV_H_10L_TX_HI BIT(13)
/* BASE-T1 PMA/PMD control register */
/** BASE-T1 master/slave configuration */
#define MDIO_PMA_PMD_BT1_CTRL_CFG_MST BIT(14)
/* 10BASE-T1L registers */
/** 10BASE-T1L PMA control */
#define MDIO_PMA_B10L_CTRL 0x08F6U
/** 10BASE-T1L PMA status */
#define MDIO_PMA_B10L_STAT 0x08F7U
/** 10BASE-T1L PMA link status*/
#define MDIO_PMA_B10L_LINK_STAT 0x8302U
/** 10BASE-T1L PCS control */
#define MDIO_PCS_B10L_CTRL 0x08E6U
/** 10BASE-T1L PCS status */
#define MDIO_PCS_B10L_STAT 0x08E7U
/* 10BASE-T1L PMA control register */
/** 10BASE-T1L Transmit Disable Mode */
#define MDIO_PMA_B10L_CTRL_TX_DIS_MODE_EN BIT(14)
/** 10BASE-T1L Transmit Voltage Amplitude Control */
#define MDIO_PMA_B10L_CTRL_TX_LVL_HI BIT(12)
/** 10BASE-T1L EEE Enable */
#define MDIO_PMA_B10L_CTRL_EEE BIT(10)
/** 10BASE-T1L PMA Loopback */
#define MDIO_PMA_B10L_CTRL_LB_PMA_LOC_EN BIT(0)
/* 10BASE-T1L PMA status register */
/** 10BASE-T1L PMA receive link up */
#define MDIO_PMA_B10L_STAT_LINK BIT(0)
/** 10BASE-T1L Fault condition detected */
#define MDIO_PMA_B10L_STAT_FAULT BIT(1)
/** 10BASE-T1L Receive polarity is reversed */
#define MDIO_PMA_B10L_STAT_POLARITY BIT(2)
/** 10BASE-T1L Able to detect fault on receive path */
#define MDIO_PMA_B10L_STAT_RECV_FAULT BIT(9)
/** 10BASE-T1L PHY has EEE ability */
#define MDIO_PMA_B10L_STAT_EEE BIT(10)
/** 10BASE-T1L PMA has low-power ability */
#define MDIO_PMA_B10L_STAT_LOW_POWER BIT(11)
/** 10BASE-T1L PHY has 2.4 Vpp operating mode ability */
#define MDIO_PMA_B10L_STAT_2V4_ABLE BIT(12)
/** 10BASE-T1L PHY has loopback ability */
#define MDIO_PMA_B10L_STAT_LB_ABLE BIT(13)
/* 10BASE-T1L PMA link status*/
/** 10BASE-T1L Remote Receiver Status OK Latch Low */
#define MDIO_PMA_B10L_LINK_STAT_REM_RCVR_STAT_OK_LL BIT(9)
/** 10BASE-T1L Remote Receiver Status OK */
#define MDIO_PMA_B10L_LINK_STAT_REM_RCVR_STAT_OK BIT(8)
/** 10BASE-T1L Local Receiver Status OK */
#define MDIO_PMA_B10L_LINK_STAT_LOC_RCVR_STAT_OK_LL BIT(7)
/** 10BASE-T1L Local Receiver Status OK */
#define MDIO_PMA_B10L_LINK_STAT_LOC_RCVR_STAT_OK BIT(6)
/** 10BASE-T1L Descrambler Status OK Latch Low */
#define MDIO_PMA_B10L_LINK_STAT_DSCR_STAT_OK_LL BIT(5)
/** 10BASE-T1L Descrambler Status OK */
#define MDIO_PMA_B10L_LINK_STAT_DSCR_STAT_OK BIT(4)
/** 10BASE-T1L Link Status OK Latch Low */
#define MDIO_PMA_B10L_LINK_STAT_LINK_STAT_OK_LL BIT(1)
/** 10BASE-T1L Link Status OK */
#define MDIO_PMA_B10L_LINK_STAT_LINK_STAT_OK BIT(0)
/* 10BASE-T1L PCS control */
/** 10BASE-T1L PCS Loopback Enable */
#define MDIO_PCS_B10L_CTRL_LB_PCS_EN BIT(14)
/* 10BASE-T1L PCS status */
/** 10BASE-T1L PCS Descrambler Status */
#define MDIO_PCS_B10L_STAT_DSCR_STAT_OK_LL BIT(2)
/* Auto-negotiation Register: EEE advertisement Register */
/** Advertise 1000T capability */
#define MDIO_AN_EEE_ADV_1000T BIT(2)
/** Advertise 100TX capability */
#define MDIO_AN_EEE_ADV_100TX BIT(1)
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_MDIO_H_ */
``` | /content/code_sandbox/include/zephyr/net/mdio.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,518 |
```objective-c
/** @file
* @brief Network core definitions
*
* Definitions for networking support.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_NET_CORE_H_
#define ZEPHYR_INCLUDE_NET_NET_CORE_H_
#include <stdbool.h>
#include <string.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/kernel.h>
#include <zephyr/net/net_timeout.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Networking
* @defgroup networking Networking
* @since 1.0
* @version 1.0.0
* @ingroup connectivity
* @{
* @}
*/
/**
* @brief Network core library
* @defgroup net_core Network Core Library
* @since 1.0
* @version 1.0.0
* @ingroup networking
* @{
*/
/** @cond INTERNAL_HIDDEN */
/* Network subsystem logging helpers */
#ifdef CONFIG_THREAD_NAME
#define NET_DBG(fmt, ...) LOG_DBG("(%s): " fmt, \
k_thread_name_get(k_current_get()), \
##__VA_ARGS__)
#else
#define NET_DBG(fmt, ...) LOG_DBG("(%p): " fmt, k_current_get(), \
##__VA_ARGS__)
#endif /* CONFIG_THREAD_NAME */
#define NET_ERR(fmt, ...) LOG_ERR(fmt, ##__VA_ARGS__)
#define NET_WARN(fmt, ...) LOG_WRN(fmt, ##__VA_ARGS__)
#define NET_INFO(fmt, ...) LOG_INF(fmt, ##__VA_ARGS__)
#define NET_HEXDUMP_DBG(_data, _length, _str) LOG_HEXDUMP_DBG(_data, _length, _str)
#define NET_HEXDUMP_ERR(_data, _length, _str) LOG_HEXDUMP_ERR(_data, _length, _str)
#define NET_HEXDUMP_WARN(_data, _length, _str) LOG_HEXDUMP_WRN(_data, _length, _str)
#define NET_HEXDUMP_INFO(_data, _length, _str) LOG_HEXDUMP_INF(_data, _length, _str)
#define NET_ASSERT(cond, ...) __ASSERT(cond, "" __VA_ARGS__)
/* This needs to be here in order to avoid circular include dependency between
* net_pkt.h and net_if.h
*/
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
#if !defined(NET_PKT_DETAIL_STATS_COUNT)
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)
#if defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
#define NET_PKT_DETAIL_STATS_COUNT 4
#else
#define NET_PKT_DETAIL_STATS_COUNT 3
#endif /* CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
#else
#define NET_PKT_DETAIL_STATS_COUNT 4
#endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL */
#endif /* !NET_PKT_DETAIL_STATS_COUNT */
#endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
/** @endcond */
struct net_buf;
struct net_pkt;
struct net_context;
struct net_if;
/**
* @brief Net Verdict
*/
enum net_verdict {
/** Packet has been taken care of. */
NET_OK,
/** Packet has not been touched, other part should decide about its
* fate.
*/
NET_CONTINUE,
/** Packet must be dropped. */
NET_DROP,
};
/**
* @brief Called by lower network stack or network device driver when
* a network packet has been received. The function will push the packet up in
* the network stack for further processing.
*
* @param iface Network interface where the packet was received.
* @param pkt Network packet data.
*
* @return 0 if ok, <0 if error.
*/
int net_recv_data(struct net_if *iface, struct net_pkt *pkt);
/**
* @brief Send data to network.
*
* @details Send data to network. This should not be used normally by
* applications as it requires that the network packet is properly
* constructed.
*
* @param pkt Network packet.
*
* @return 0 if ok, <0 if error. If <0 is returned, then the caller needs
* to unref the pkt in order to avoid memory leak.
*/
int net_send_data(struct net_pkt *pkt);
/** @cond INTERNAL_HIDDEN */
/* Some helper defines for traffic class support */
#if defined(CONFIG_NET_TC_TX_COUNT) && defined(CONFIG_NET_TC_RX_COUNT)
#define NET_TC_TX_COUNT CONFIG_NET_TC_TX_COUNT
#define NET_TC_RX_COUNT CONFIG_NET_TC_RX_COUNT
#if NET_TC_TX_COUNT > NET_TC_RX_COUNT
#define NET_TC_COUNT NET_TC_TX_COUNT
#else
#define NET_TC_COUNT NET_TC_RX_COUNT
#endif
#else /* CONFIG_NET_TC_TX_COUNT && CONFIG_NET_TC_RX_COUNT */
#define NET_TC_TX_COUNT 0
#define NET_TC_RX_COUNT 0
#define NET_TC_COUNT 0
#endif /* CONFIG_NET_TC_TX_COUNT && CONFIG_NET_TC_RX_COUNT */
/* @endcond */
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_NET_CORE_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_core.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,055 |
```objective-c
/*
*
*/
/**
* @file
* @brief WiFi L2 stack public header
*/
#ifndef ZEPHYR_INCLUDE_NET_WIFI_MGMT_H_
#define ZEPHYR_INCLUDE_NET_WIFI_MGMT_H_
#include <zephyr/net/net_mgmt.h>
#include <zephyr/net/wifi.h>
#include <zephyr/net/ethernet.h>
#include <zephyr/net/offloaded_netdev.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @addtogroup wifi_mgmt
* @{
*/
/* Management part definitions */
/** @cond INTERNAL_HIDDEN */
#define _NET_WIFI_LAYER NET_MGMT_LAYER_L2
#define _NET_WIFI_CODE 0x156
#define _NET_WIFI_BASE (NET_MGMT_IFACE_BIT | \
NET_MGMT_LAYER(_NET_WIFI_LAYER) | \
NET_MGMT_LAYER_CODE(_NET_WIFI_CODE))
#define _NET_WIFI_EVENT (_NET_WIFI_BASE | NET_MGMT_EVENT_BIT)
#ifdef CONFIG_WIFI_MGMT_SCAN_SSID_FILT_MAX
#define WIFI_MGMT_SCAN_SSID_FILT_MAX CONFIG_WIFI_MGMT_SCAN_SSID_FILT_MAX
#else
#define WIFI_MGMT_SCAN_SSID_FILT_MAX 1
#endif /* CONFIG_WIFI_MGMT_SCAN_SSID_FILT_MAX */
#ifdef CONFIG_WIFI_MGMT_SCAN_CHAN_MAX_MANUAL
#define WIFI_MGMT_SCAN_CHAN_MAX_MANUAL CONFIG_WIFI_MGMT_SCAN_CHAN_MAX_MANUAL
#else
#define WIFI_MGMT_SCAN_CHAN_MAX_MANUAL 1
#endif /* CONFIG_WIFI_MGMT_SCAN_CHAN_MAX_MANUAL */
#define WIFI_MGMT_BAND_STR_SIZE_MAX 8
#define WIFI_MGMT_SCAN_MAX_BSS_CNT 65535
#define WIFI_MGMT_SKIP_INACTIVITY_POLL IS_ENABLED(CONFIG_WIFI_MGMT_AP_STA_SKIP_INACTIVITY_POLL)
/** @endcond */
/** @brief Wi-Fi management commands */
enum net_request_wifi_cmd {
/** Scan for Wi-Fi networks */
NET_REQUEST_WIFI_CMD_SCAN = 1,
/** Connect to a Wi-Fi network */
NET_REQUEST_WIFI_CMD_CONNECT,
/** Disconnect from a Wi-Fi network */
NET_REQUEST_WIFI_CMD_DISCONNECT,
/** Enable AP mode */
NET_REQUEST_WIFI_CMD_AP_ENABLE,
/** Disable AP mode */
NET_REQUEST_WIFI_CMD_AP_DISABLE,
/** Get interface status */
NET_REQUEST_WIFI_CMD_IFACE_STATUS,
/** Set power save status */
NET_REQUEST_WIFI_CMD_PS,
/** Setup or teardown TWT flow */
NET_REQUEST_WIFI_CMD_TWT,
/** Get power save config */
NET_REQUEST_WIFI_CMD_PS_CONFIG,
/** Set or get regulatory domain */
NET_REQUEST_WIFI_CMD_REG_DOMAIN,
/** Set or get Mode of operation */
NET_REQUEST_WIFI_CMD_MODE,
/** Set or get packet filter setting for current mode */
NET_REQUEST_WIFI_CMD_PACKET_FILTER,
/** Set or get Wi-Fi channel for Monitor or TX-Injection mode */
NET_REQUEST_WIFI_CMD_CHANNEL,
/** Disconnect a STA from AP */
NET_REQUEST_WIFI_CMD_AP_STA_DISCONNECT,
/** Get Wi-Fi driver and Firmware versions */
NET_REQUEST_WIFI_CMD_VERSION,
/** Get Wi-Fi latest connection parameters */
NET_REQUEST_WIFI_CMD_CONN_PARAMS,
/** Set RTS threshold */
NET_REQUEST_WIFI_CMD_RTS_THRESHOLD,
/** Configure AP parameter */
NET_REQUEST_WIFI_CMD_AP_CONFIG_PARAM,
/** DPP actions */
NET_REQUEST_WIFI_CMD_DPP,
#ifdef CONFIG_WIFI_NM_WPA_SUPPLICANT_WNM
/** BSS transition management query */
NET_REQUEST_WIFI_CMD_BTM_QUERY,
#endif
/** Flush PMKSA cache entries */
NET_REQUEST_WIFI_CMD_PMKSA_FLUSH,
/** Set enterprise mode credential */
NET_REQUEST_WIFI_CMD_ENTERPRISE_CREDS,
/** @cond INTERNAL_HIDDEN */
NET_REQUEST_WIFI_CMD_MAX
/** @endcond */
};
/** Request a Wi-Fi scan */
#define NET_REQUEST_WIFI_SCAN \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_SCAN)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_SCAN);
/** Request a Wi-Fi connect */
#define NET_REQUEST_WIFI_CONNECT \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_CONNECT)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_CONNECT);
/** Request a Wi-Fi disconnect */
#define NET_REQUEST_WIFI_DISCONNECT \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_DISCONNECT)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_DISCONNECT);
/** Request a Wi-Fi access point enable */
#define NET_REQUEST_WIFI_AP_ENABLE \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_AP_ENABLE)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_AP_ENABLE);
/** Request a Wi-Fi access point disable */
#define NET_REQUEST_WIFI_AP_DISABLE \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_AP_DISABLE)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_AP_DISABLE);
/** Request a Wi-Fi network interface status */
#define NET_REQUEST_WIFI_IFACE_STATUS \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_IFACE_STATUS)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_IFACE_STATUS);
/** Request a Wi-Fi power save */
#define NET_REQUEST_WIFI_PS \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_PS)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_PS);
/** Request a Wi-Fi TWT */
#define NET_REQUEST_WIFI_TWT \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_TWT)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_TWT);
/** Request a Wi-Fi power save configuration */
#define NET_REQUEST_WIFI_PS_CONFIG \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_PS_CONFIG)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_PS_CONFIG);
/** Request a Wi-Fi regulatory domain */
#define NET_REQUEST_WIFI_REG_DOMAIN \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_REG_DOMAIN)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_REG_DOMAIN);
/** Request current Wi-Fi mode */
#define NET_REQUEST_WIFI_MODE \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_MODE)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_MODE);
/** Request Wi-Fi packet filter */
#define NET_REQUEST_WIFI_PACKET_FILTER \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_PACKET_FILTER)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_PACKET_FILTER);
/** Request a Wi-Fi channel */
#define NET_REQUEST_WIFI_CHANNEL \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_CHANNEL)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_CHANNEL);
/** Request a Wi-Fi access point to disconnect a station */
#define NET_REQUEST_WIFI_AP_STA_DISCONNECT \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_AP_STA_DISCONNECT)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_AP_STA_DISCONNECT);
/** Request a Wi-Fi version */
#define NET_REQUEST_WIFI_VERSION \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_VERSION)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_VERSION);
/** Request a Wi-Fi connection parameters */
#define NET_REQUEST_WIFI_CONN_PARAMS \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_CONN_PARAMS)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_CONN_PARAMS);
/** Request a Wi-Fi RTS threshold */
#define NET_REQUEST_WIFI_RTS_THRESHOLD \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_RTS_THRESHOLD)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_RTS_THRESHOLD);
/** Request a Wi-Fi AP parameters configuration */
#define NET_REQUEST_WIFI_AP_CONFIG_PARAM \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_AP_CONFIG_PARAM)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_AP_CONFIG_PARAM);
/** Request a Wi-Fi DPP operation */
#define NET_REQUEST_WIFI_DPP \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_DPP)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_DPP);
#ifdef CONFIG_WIFI_NM_WPA_SUPPLICANT_WNM
/** Request a Wi-Fi BTM query */
#define NET_REQUEST_WIFI_BTM_QUERY (_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_BTM_QUERY)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_BTM_QUERY);
#endif
/** Request a Wi-Fi PMKSA cache entries flush */
#define NET_REQUEST_WIFI_PMKSA_FLUSH \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_PMKSA_FLUSH)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_PMKSA_FLUSH);
/** Set Wi-Fi enterprise mode CA/client Cert and key */
#define NET_REQUEST_WIFI_ENTERPRISE_CREDS \
(_NET_WIFI_BASE | NET_REQUEST_WIFI_CMD_ENTERPRISE_CREDS)
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_WIFI_ENTERPRISE_CREDS);
/** @brief Wi-Fi management events */
enum net_event_wifi_cmd {
/** Scan results available */
NET_EVENT_WIFI_CMD_SCAN_RESULT = 1,
/** Scan done */
NET_EVENT_WIFI_CMD_SCAN_DONE,
/** Connect result */
NET_EVENT_WIFI_CMD_CONNECT_RESULT,
/** Disconnect result */
NET_EVENT_WIFI_CMD_DISCONNECT_RESULT,
/** Interface status */
NET_EVENT_WIFI_CMD_IFACE_STATUS,
/** TWT events */
NET_EVENT_WIFI_CMD_TWT,
/** TWT sleep status: awake or sleeping, can be used by application
* to determine if it can send data or not.
*/
NET_EVENT_WIFI_CMD_TWT_SLEEP_STATE,
/** Raw scan results available */
NET_EVENT_WIFI_CMD_RAW_SCAN_RESULT,
/** Disconnect complete */
NET_EVENT_WIFI_CMD_DISCONNECT_COMPLETE,
/** AP mode enable result */
NET_EVENT_WIFI_CMD_AP_ENABLE_RESULT,
/** AP mode disable result */
NET_EVENT_WIFI_CMD_AP_DISABLE_RESULT,
/** STA connected to AP */
NET_EVENT_WIFI_CMD_AP_STA_CONNECTED,
/** STA disconnected from AP */
NET_EVENT_WIFI_CMD_AP_STA_DISCONNECTED,
};
/** Event emitted for Wi-Fi scan result */
#define NET_EVENT_WIFI_SCAN_RESULT \
(_NET_WIFI_EVENT | NET_EVENT_WIFI_CMD_SCAN_RESULT)
/** Event emitted when Wi-Fi scan is done */
#define NET_EVENT_WIFI_SCAN_DONE \
(_NET_WIFI_EVENT | NET_EVENT_WIFI_CMD_SCAN_DONE)
/** Event emitted for Wi-Fi connect result */
#define NET_EVENT_WIFI_CONNECT_RESULT \
(_NET_WIFI_EVENT | NET_EVENT_WIFI_CMD_CONNECT_RESULT)
/** Event emitted for Wi-Fi disconnect result */
#define NET_EVENT_WIFI_DISCONNECT_RESULT \
(_NET_WIFI_EVENT | NET_EVENT_WIFI_CMD_DISCONNECT_RESULT)
/** Event emitted for Wi-Fi network interface status */
#define NET_EVENT_WIFI_IFACE_STATUS \
(_NET_WIFI_EVENT | NET_EVENT_WIFI_CMD_IFACE_STATUS)
/** Event emitted for Wi-Fi TWT information */
#define NET_EVENT_WIFI_TWT \
(_NET_WIFI_EVENT | NET_EVENT_WIFI_CMD_TWT)
/** Event emitted for Wi-Fi TWT sleep state */
#define NET_EVENT_WIFI_TWT_SLEEP_STATE \
(_NET_WIFI_EVENT | NET_EVENT_WIFI_CMD_TWT_SLEEP_STATE)
/** Event emitted for Wi-Fi raw scan result */
#define NET_EVENT_WIFI_RAW_SCAN_RESULT \
(_NET_WIFI_EVENT | NET_EVENT_WIFI_CMD_RAW_SCAN_RESULT)
/** Event emitted Wi-Fi disconnect is completed */
#define NET_EVENT_WIFI_DISCONNECT_COMPLETE \
(_NET_WIFI_EVENT | NET_EVENT_WIFI_CMD_DISCONNECT_COMPLETE)
/** Event emitted for Wi-Fi access point enable result */
#define NET_EVENT_WIFI_AP_ENABLE_RESULT \
(_NET_WIFI_EVENT | NET_EVENT_WIFI_CMD_AP_ENABLE_RESULT)
/** Event emitted for Wi-Fi access point disable result */
#define NET_EVENT_WIFI_AP_DISABLE_RESULT \
(_NET_WIFI_EVENT | NET_EVENT_WIFI_CMD_AP_DISABLE_RESULT)
/** Event emitted when Wi-Fi station is connected in AP mode */
#define NET_EVENT_WIFI_AP_STA_CONNECTED \
(_NET_WIFI_EVENT | NET_EVENT_WIFI_CMD_AP_STA_CONNECTED)
/** Event emitted Wi-Fi station is disconnected from AP */
#define NET_EVENT_WIFI_AP_STA_DISCONNECTED \
(_NET_WIFI_EVENT | NET_EVENT_WIFI_CMD_AP_STA_DISCONNECTED)
/** @brief Wi-Fi version */
struct wifi_version {
/** Driver version */
const char *drv_version;
/** Firmware version */
const char *fw_version;
};
/**
* @brief Wi-Fi structure to uniquely identify a band-channel pair
*/
struct wifi_band_channel {
/** Frequency band */
uint8_t band;
/** Channel */
uint8_t channel;
};
/**
* @brief Wi-Fi scan parameters structure.
* Used to specify parameters which can control how the Wi-Fi scan
* is performed.
*/
struct wifi_scan_params {
/** Scan type, see enum wifi_scan_type.
*
* The scan_type is only a hint to the underlying Wi-Fi chip for the
* preferred mode of scan. The actual mode of scan can depend on factors
* such as the Wi-Fi chip implementation support, regulatory domain
* restrictions etc.
*/
enum wifi_scan_type scan_type;
/** Bitmap of bands to be scanned.
* Refer to ::wifi_frequency_bands for bit position of each band.
*/
uint8_t bands;
/** Active scan dwell time (in ms) on a channel.
*/
uint16_t dwell_time_active;
/** Passive scan dwell time (in ms) on a channel.
*/
uint16_t dwell_time_passive;
/** Array of SSID strings to scan.
*/
const char *ssids[WIFI_MGMT_SCAN_SSID_FILT_MAX];
/** Specifies the maximum number of scan results to return. These results would be the
* BSSIDS with the best RSSI values, in all the scanned channels. This should only be
* used to limit the number of returned scan results, and cannot be counted upon to limit
* the scan time, since the underlying Wi-Fi chip might have to scan all the channels to
* find the max_bss_cnt number of APs with the best signal strengths. A value of 0
* signifies that there is no restriction on the number of scan results to be returned.
*/
uint16_t max_bss_cnt;
/** Channel information array indexed on Wi-Fi frequency bands and channels within that
* band.
* E.g. to scan channel 6 and 11 on the 2.4 GHz band, channel 36 on the 5 GHz band:
* @code{.c}
* chan[0] = {WIFI_FREQ_BAND_2_4_GHZ, 6};
* chan[1] = {WIFI_FREQ_BAND_2_4_GHZ, 11};
* chan[2] = {WIFI_FREQ_BAND_5_GHZ, 36};
* @endcode
*
* This list specifies the channels to be __considered for scan__. The underlying
* Wi-Fi chip can silently omit some channels due to various reasons such as channels
* not conforming to regulatory restrictions etc. The invoker of the API should
* ensure that the channels specified follow regulatory rules.
*/
struct wifi_band_channel band_chan[WIFI_MGMT_SCAN_CHAN_MAX_MANUAL];
};
/** @brief Wi-Fi scan result, each result is provided to the net_mgmt_event_callback
* via its info attribute (see net_mgmt.h)
*/
struct wifi_scan_result {
/** SSID */
uint8_t ssid[WIFI_SSID_MAX_LEN];
/** SSID length */
uint8_t ssid_length;
/** Frequency band */
uint8_t band;
/** Channel */
uint8_t channel;
/** Security type */
enum wifi_security_type security;
/** MFP options */
enum wifi_mfp_options mfp;
/** RSSI */
int8_t rssi;
/** BSSID */
uint8_t mac[WIFI_MAC_ADDR_LEN];
/** BSSID length */
uint8_t mac_length;
};
/** @brief Wi-Fi connect request parameters */
struct wifi_connect_req_params {
/** SSID */
const uint8_t *ssid;
/** SSID length */
uint8_t ssid_length; /* Max 32 */
/** Pre-shared key */
const uint8_t *psk;
/** Pre-shared key length */
uint8_t psk_length; /* Min 8 - Max 64 */
/** SAE password (same as PSK but with no length restrictions), optional */
const uint8_t *sae_password;
/** SAE password length */
uint8_t sae_password_length; /* No length restrictions */
/** Frequency band */
uint8_t band;
/** Channel */
uint8_t channel;
/** Security type */
enum wifi_security_type security;
/** MFP options */
enum wifi_mfp_options mfp;
/** BSSID */
uint8_t bssid[WIFI_MAC_ADDR_LEN];
/** Connect timeout in seconds, SYS_FOREVER_MS for no timeout */
int timeout;
/** anonymous identity */
const uint8_t *anon_id;
/** anon_id length */
uint8_t aid_length; /* Max 64 */
/** Private key passwd for enterprise mode */
const uint8_t *key_passwd;
/** Private key passwd length */
uint8_t key_passwd_length; /* Max 128 */
};
/** @brief Wi-Fi connect result codes. To be overlaid on top of \ref wifi_status
* in the connect result event for detailed status.
*/
enum wifi_conn_status {
/** Connection successful */
WIFI_STATUS_CONN_SUCCESS = 0,
/** Connection failed - generic failure */
WIFI_STATUS_CONN_FAIL,
/** Connection failed - wrong password
* Few possible reasons for 4-way handshake failure that we can guess are as follows:
* 1) Incorrect key
* 2) EAPoL frames lost causing timeout
*
* #1 is the likely cause, so, we convey to the user that it is due to
* Wrong passphrase/password.
*/
WIFI_STATUS_CONN_WRONG_PASSWORD,
/** Connection timed out */
WIFI_STATUS_CONN_TIMEOUT,
/** Connection failed - AP not found */
WIFI_STATUS_CONN_AP_NOT_FOUND,
/** Last connection status */
WIFI_STATUS_CONN_LAST_STATUS,
/** Connection disconnected status */
WIFI_STATUS_DISCONN_FIRST_STATUS = WIFI_STATUS_CONN_LAST_STATUS,
};
/** @brief Wi-Fi disconnect reason codes. To be overlaid on top of \ref wifi_status
* in the disconnect result event for detailed reason.
*/
enum wifi_disconn_reason {
/** Success, overload status as reason */
WIFI_REASON_DISCONN_SUCCESS = 0,
/** Unspecified reason */
WIFI_REASON_DISCONN_UNSPECIFIED,
/** Disconnected due to user request */
WIFI_REASON_DISCONN_USER_REQUEST,
/** Disconnected due to AP leaving */
WIFI_REASON_DISCONN_AP_LEAVING,
/** Disconnected due to inactivity */
WIFI_REASON_DISCONN_INACTIVITY,
};
/** @brief Wi-Fi AP mode result codes. To be overlaid on top of \ref wifi_status
* in the AP mode enable or disable result event for detailed status.
*/
enum wifi_ap_status {
/** AP mode enable or disable successful */
WIFI_STATUS_AP_SUCCESS = 0,
/** AP mode enable or disable failed - generic failure */
WIFI_STATUS_AP_FAIL,
/** AP mode enable failed - channel not supported */
WIFI_STATUS_AP_CHANNEL_NOT_SUPPORTED,
/** AP mode enable failed - channel not allowed */
WIFI_STATUS_AP_CHANNEL_NOT_ALLOWED,
/** AP mode enable failed - SSID not allowed */
WIFI_STATUS_AP_SSID_NOT_ALLOWED,
/** AP mode enable failed - authentication type not supported */
WIFI_STATUS_AP_AUTH_TYPE_NOT_SUPPORTED,
/** AP mode enable failed - operation not supported */
WIFI_STATUS_AP_OP_NOT_SUPPORTED,
/** AP mode enable failed - operation not permitted */
WIFI_STATUS_AP_OP_NOT_PERMITTED,
};
/** @brief Generic Wi-Fi status for commands and events */
struct wifi_status {
union {
/** Status value */
int status;
/** Connection status */
enum wifi_conn_status conn_status;
/** Disconnection reason status */
enum wifi_disconn_reason disconn_reason;
/** Access point status */
enum wifi_ap_status ap_status;
};
};
/** @brief Wi-Fi interface status */
struct wifi_iface_status {
/** Interface state, see enum wifi_iface_state */
int state;
/** SSID length */
unsigned int ssid_len;
/** SSID */
char ssid[WIFI_SSID_MAX_LEN];
/** BSSID */
char bssid[WIFI_MAC_ADDR_LEN];
/** Frequency band */
enum wifi_frequency_bands band;
/** Channel */
unsigned int channel;
/** Interface mode, see enum wifi_iface_mode */
enum wifi_iface_mode iface_mode;
/** Link mode, see enum wifi_link_mode */
enum wifi_link_mode link_mode;
/** Security type, see enum wifi_security_type */
enum wifi_security_type security;
/** MFP options, see enum wifi_mfp_options */
enum wifi_mfp_options mfp;
/** RSSI */
int rssi;
/** DTIM period */
unsigned char dtim_period;
/** Beacon interval */
unsigned short beacon_interval;
/** is TWT capable? */
bool twt_capable;
/** The current 802.11 PHY data rate */
int current_phy_rate;
};
/** @brief Wi-Fi power save parameters */
struct wifi_ps_params {
/** Power save state */
enum wifi_ps enabled;
/** Listen interval */
unsigned short listen_interval;
/** Wi-Fi power save wakeup mode */
enum wifi_ps_wakeup_mode wakeup_mode;
/** Wi-Fi power save mode */
enum wifi_ps_mode mode;
/** Wi-Fi power save timeout
*
* This is the time out to wait after sending a TX packet
* before going back to power save (in ms) to receive any replies
* from the AP. Zero means this feature is disabled.
*
* It's a tradeoff between power consumption and latency.
*/
unsigned int timeout_ms;
/** Wi-Fi power save type */
enum wifi_ps_param_type type;
/** Wi-Fi power save fail reason */
enum wifi_config_ps_param_fail_reason fail_reason;
};
/** @brief Wi-Fi TWT parameters */
struct wifi_twt_params {
/** TWT operation, see enum wifi_twt_operation */
enum wifi_twt_operation operation;
/** TWT negotiation type, see enum wifi_twt_negotiation_type */
enum wifi_twt_negotiation_type negotiation_type;
/** TWT setup command, see enum wifi_twt_setup_cmd */
enum wifi_twt_setup_cmd setup_cmd;
/** TWT setup response status, see enum wifi_twt_setup_resp_status */
enum wifi_twt_setup_resp_status resp_status;
/** TWT teardown cmd status, see enum wifi_twt_teardown_status */
enum wifi_twt_teardown_status teardown_status;
/** Dialog token, used to map requests to responses */
uint8_t dialog_token;
/** Flow ID, used to map setup with teardown */
uint8_t flow_id;
union {
/** Setup specific parameters */
struct {
/**Interval = Wake up time + Sleeping time */
uint64_t twt_interval;
/** Requestor or responder */
bool responder;
/** Trigger enabled or disabled */
bool trigger;
/** Implicit or explicit */
bool implicit;
/** Announced or unannounced */
bool announce;
/** Wake up time */
uint32_t twt_wake_interval;
/** Wake ahead notification is sent earlier than
* TWT Service period (SP) start based on this duration.
* This should give applications ample time to
* prepare the data before TWT SP starts.
*/
uint32_t twt_wake_ahead_duration;
} setup;
/** Teardown specific parameters */
struct {
/** Teardown all flows */
bool teardown_all;
} teardown;
};
/** TWT fail reason, see enum wifi_twt_fail_reason */
enum wifi_twt_fail_reason fail_reason;
};
/** @cond INTERNAL_HIDDEN */
/* Flow ID is only 3 bits */
#define WIFI_MAX_TWT_FLOWS 8
#define WIFI_MAX_TWT_INTERVAL_US (LONG_MAX - 1)
/* 256 (u8) * 1TU */
#define WIFI_MAX_TWT_WAKE_INTERVAL_US 262144
#define WIFI_MAX_TWT_WAKE_AHEAD_DURATION_US (LONG_MAX - 1)
/** @endcond */
/** @brief Wi-Fi TWT flow information */
struct wifi_twt_flow_info {
/** Interval = Wake up time + Sleeping time */
uint64_t twt_interval;
/** Dialog token, used to map requests to responses */
uint8_t dialog_token;
/** Flow ID, used to map setup with teardown */
uint8_t flow_id;
/** TWT negotiation type, see enum wifi_twt_negotiation_type */
enum wifi_twt_negotiation_type negotiation_type;
/** Requestor or responder */
bool responder;
/** Trigger enabled or disabled */
bool trigger;
/** Implicit or explicit */
bool implicit;
/** Announced or unannounced */
bool announce;
/** Wake up time */
uint32_t twt_wake_interval;
/** Wake ahead duration */
uint32_t twt_wake_ahead_duration;
};
/** Wi-Fi enterprise mode credentials */
struct wifi_enterprise_creds_params {
/** CA certification */
uint8_t *ca_cert;
/** CA certification length */
uint32_t ca_cert_len;
/** Client certification */
uint8_t *client_cert;
/** Client certification length */
uint32_t client_cert_len;
/** Client key */
uint8_t *client_key;
/** Client key length */
uint32_t client_key_len;
};
/** @brief Wi-Fi power save configuration */
struct wifi_ps_config {
/** Number of TWT flows */
char num_twt_flows;
/** TWT flow details */
struct wifi_twt_flow_info twt_flows[WIFI_MAX_TWT_FLOWS];
/** Power save configuration */
struct wifi_ps_params ps_params;
};
/** @brief Generic get/set operation for any command*/
enum wifi_mgmt_op {
/** Get operation */
WIFI_MGMT_GET = 0,
/** Set operation */
WIFI_MGMT_SET = 1,
};
/** Max regulatory channel number */
#define MAX_REG_CHAN_NUM 42
/** @brief Per-channel regulatory attributes */
struct wifi_reg_chan_info {
/** Center frequency in MHz */
unsigned short center_frequency;
/** Maximum transmission power (in dBm) */
unsigned short max_power:8;
/** Is channel supported or not */
unsigned short supported:1;
/** Passive transmissions only */
unsigned short passive_only:1;
/** Is a DFS channel */
unsigned short dfs:1;
} __packed;
/** @brief Regulatory domain information or configuration */
struct wifi_reg_domain {
/** Regulatory domain operation */
enum wifi_mgmt_op oper;
/** Ignore all other regulatory hints over this one */
bool force;
/** Country code: ISO/IEC 3166-1 alpha-2 */
uint8_t country_code[WIFI_COUNTRY_CODE_LEN];
/** Number of channels supported */
unsigned int num_channels;
/** Channels information */
struct wifi_reg_chan_info *chan_info;
};
/** @brief Wi-Fi TWT sleep states */
enum wifi_twt_sleep_state {
/** TWT sleep state: sleeping */
WIFI_TWT_STATE_SLEEP = 0,
/** TWT sleep state: awake */
WIFI_TWT_STATE_AWAKE = 1,
};
#if defined(CONFIG_WIFI_MGMT_RAW_SCAN_RESULTS) || defined(__DOXYGEN__)
/** @brief Wi-Fi raw scan result */
struct wifi_raw_scan_result {
/** RSSI */
int8_t rssi;
/** Frame length */
int frame_length;
/** Frequency */
unsigned short frequency;
/** Raw scan data */
uint8_t data[CONFIG_WIFI_MGMT_RAW_SCAN_RESULT_LENGTH];
};
#endif /* CONFIG_WIFI_MGMT_RAW_SCAN_RESULTS */
/** @brief AP mode - connected STA details */
struct wifi_ap_sta_info {
/** Link mode, see enum wifi_link_mode */
enum wifi_link_mode link_mode;
/** MAC address */
uint8_t mac[WIFI_MAC_ADDR_LEN];
/** MAC address length */
uint8_t mac_length;
/** is TWT capable ? */
bool twt_capable;
};
/** @cond INTERNAL_HIDDEN */
/* for use in max info size calculations */
union wifi_mgmt_events {
struct wifi_scan_result scan_result;
struct wifi_status connect_status;
struct wifi_iface_status iface_status;
#ifdef CONFIG_WIFI_MGMT_RAW_SCAN_RESULTS
struct wifi_raw_scan_result raw_scan_result;
#endif /* CONFIG_WIFI_MGMT_RAW_SCAN_RESULTS */
struct wifi_twt_params twt_params;
struct wifi_ap_sta_info ap_sta_info;
};
/** @endcond */
/** @brief Wi-Fi mode setup */
struct wifi_mode_info {
/** Mode setting for a specific mode of operation */
uint8_t mode;
/** Interface index */
uint8_t if_index;
/** Get or set operation */
enum wifi_mgmt_op oper;
};
/** @brief Wi-Fi filter setting for monitor, prmoiscuous, TX-injection modes */
struct wifi_filter_info {
/** Filter setting */
uint8_t filter;
/** Interface index */
uint8_t if_index;
/** Filter buffer size */
uint16_t buffer_size;
/** Get or set operation */
enum wifi_mgmt_op oper;
};
/** @brief Wi-Fi channel setting for monitor and TX-injection modes */
struct wifi_channel_info {
/** Channel value to set */
uint16_t channel;
/** Interface index */
uint8_t if_index;
/** Get or set operation */
enum wifi_mgmt_op oper;
};
/** @cond INTERNAL_HIDDEN */
#define WIFI_AP_STA_MAX_INACTIVITY (LONG_MAX - 1)
/** @endcond */
/** @brief Wi-Fi AP configuration parameter */
struct wifi_ap_config_params {
/** Parameter used to identify the different AP parameters */
enum wifi_ap_config_param type;
/** Parameter used for setting maximum inactivity duration for stations */
uint32_t max_inactivity;
/** Parameter used for setting maximum number of stations */
uint32_t max_num_sta;
};
/** @brief Wi-Fi DPP configuration parameter */
/** Wi-Fi DPP QR-CODE in string max len for SHA512 */
#define WIFI_DPP_QRCODE_MAX_LEN 255
/** Wi-Fi DPP operations */
enum wifi_dpp_op {
/** Unset invalid operation */
WIFI_DPP_OP_INVALID = 0,
/** Add configurator */
WIFI_DPP_CONFIGURATOR_ADD,
/** Start DPP auth as configurator or enrollee */
WIFI_DPP_AUTH_INIT,
/** Scan qr_code as parameter */
WIFI_DPP_QR_CODE,
/** Start DPP chirp to send DPP announcement */
WIFI_DPP_CHIRP,
/** Listen on specific frequency */
WIFI_DPP_LISTEN,
/** Generate a bootstrap like qrcode */
WIFI_DPP_BOOTSTRAP_GEN,
/** Get a bootstrap uri for external device to scan */
WIFI_DPP_BOOTSTRAP_GET_URI,
/** Set configurator parameters */
WIFI_DPP_SET_CONF_PARAM,
/** Set DPP rx response wait timeout */
WIFI_DPP_SET_WAIT_RESP_TIME
};
/** Wi-Fi DPP crypto Elliptic Curves */
enum wifi_dpp_curves {
/** Unset default use P-256 */
WIFI_DPP_CURVES_DEFAULT = 0,
/** prime256v1 */
WIFI_DPP_CURVES_P_256,
/** secp384r1 */
WIFI_DPP_CURVES_P_384,
/** secp521r1 */
WIFI_DPP_CURVES_P_512,
/** brainpoolP256r1 */
WIFI_DPP_CURVES_BP_256,
/** brainpoolP384r1 */
WIFI_DPP_CURVES_BP_384,
/** brainpoolP512r1 */
WIFI_DPP_CURVES_BP_512
};
/** Wi-Fi DPP role */
enum wifi_dpp_role {
/** Unset role */
WIFI_DPP_ROLE_UNSET = 0,
/** Configurator passes AP config to enrollee */
WIFI_DPP_ROLE_CONFIGURATOR,
/** Enrollee gets AP config and connect to AP */
WIFI_DPP_ROLE_ENROLLEE,
/** Both configurator and enrollee might be chosen */
WIFI_DPP_ROLE_EITHER
};
/** Wi-Fi DPP security type
*
* current only support DPP only AKM
*/
enum wifi_dpp_conf {
/** Unset conf */
WIFI_DPP_CONF_UNSET = 0,
/** conf=sta-dpp, AKM DPP only for sta */
WIFI_DPP_CONF_STA,
/** conf=ap-dpp, AKM DPP only for ap */
WIFI_DPP_CONF_AP,
/** conf=query, query for AKM */
WIFI_DPP_CONF_QUERY
};
/** Wi-Fi DPP bootstrap type
*
* current default and only support QR-CODE
*/
enum wifi_dpp_bootstrap_type {
/** Unset type */
WIFI_DPP_BOOTSTRAP_TYPE_UNSET = 0,
/** qrcode */
WIFI_DPP_BOOTSTRAP_TYPE_QRCODE,
/** pkex */
WIFI_DPP_BOOTSTRAP_TYPE_PKEX,
/** nfc */
WIFI_DPP_BOOTSTRAP_TYPE_NFC_URI
};
/** Wi-Fi DPP params for various operations
*/
struct wifi_dpp_params {
/** Operation enum */
int action;
union {
/** Params to add DPP configurator */
struct wifi_dpp_configurator_add_params {
/** ECP curves for private key */
int curve;
/** ECP curves for net access key */
int net_access_key_curve;
} configurator_add;
/** Params to initiate a DPP auth procedure */
struct wifi_dpp_auth_init_params {
/** Peer bootstrap id */
int peer;
/** Configuration parameter id */
int configurator;
/** Role configurator or enrollee */
int role;
/** Security type */
int conf;
/** SSID in string */
char ssid[WIFI_SSID_MAX_LEN + 1];
} auth_init;
/** Params to do DPP chirp */
struct wifi_dpp_chirp_params {
/** Own bootstrap id */
int id;
/** Chirp on frequency */
int freq;
} chirp;
/** Params to do DPP listen */
struct wifi_dpp_listen_params {
/** Listen on frequency */
int freq;
/** Role configurator or enrollee */
int role;
} listen;
/** Params to generate a DPP bootstrap */
struct wifi_dpp_bootstrap_gen_params {
/** Bootstrap type */
int type;
/** Own operating class */
int op_class;
/** Own working channel */
int chan;
/** ECP curves */
int curve;
/** Own mac address */
uint8_t mac[WIFI_MAC_ADDR_LEN];
} bootstrap_gen;
/** Params to set specific DPP configurator */
struct wifi_dpp_configurator_set_params {
/** Peer bootstrap id */
int peer;
/** Configuration parameter id */
int configurator;
/** Role configurator or enrollee */
int role;
/** Security type */
int conf;
/** ECP curves for private key */
int curve;
/** ECP curves for net access key */
int net_access_key_curve;
/** Own mac address */
char ssid[WIFI_SSID_MAX_LEN + 1];
} configurator_set;
/** Bootstrap get uri id */
int id;
/** Timeout for DPP frame response rx */
int dpp_resp_wait_time;
/** DPP QR-CODE, max for SHA512 */
uint8_t dpp_qr_code[WIFI_DPP_QRCODE_MAX_LEN + 1];
/** Request response reusing request buffer.
* So once a request is sent, buffer will be
* fulfilled by response
*/
char resp[WIFI_DPP_QRCODE_MAX_LEN + 1];
};
};
#include <zephyr/net/net_if.h>
/** Scan result callback
*
* @param iface Network interface
* @param status Scan result status
* @param entry Scan result entry
*/
typedef void (*scan_result_cb_t)(struct net_if *iface, int status,
struct wifi_scan_result *entry);
#ifdef CONFIG_WIFI_MGMT_RAW_SCAN_RESULTS
/** Raw scan result callback
*
* @param iface Network interface
* @param status Raw scan result status
* @param entry Raw scan result entry
*/
typedef void (*raw_scan_result_cb_t)(struct net_if *iface, int status,
struct wifi_raw_scan_result *entry);
#endif /* CONFIG_WIFI_MGMT_RAW_SCAN_RESULTS */
/** Wi-Fi management API */
struct wifi_mgmt_ops {
/** Scan for Wi-Fi networks
*
* @param dev Pointer to the device structure for the driver instance.
* @param params Scan parameters
* @param cb Callback to be called for each result
* cb parameter is the cb that should be called for each
* result by the driver. The wifi mgmt part will take care of
* raising the necessary event etc.
*
* @return 0 if ok, < 0 if error
*/
int (*scan)(const struct device *dev,
struct wifi_scan_params *params,
scan_result_cb_t cb);
/** Connect to a Wi-Fi network
*
* @param dev Pointer to the device structure for the driver instance.
* @param params Connect parameters
*
* @return 0 if ok, < 0 if error
*/
int (*connect)(const struct device *dev,
struct wifi_connect_req_params *params);
/** Disconnect from a Wi-Fi network
*
* @param dev Pointer to the device structure for the driver instance.
*
* @return 0 if ok, < 0 if error
*/
int (*disconnect)(const struct device *dev);
/** Enable AP mode
*
* @param dev Pointer to the device structure for the driver instance.
* @param params AP mode parameters
*
* @return 0 if ok, < 0 if error
*/
int (*ap_enable)(const struct device *dev,
struct wifi_connect_req_params *params);
/** Disable AP mode
*
* @param dev Pointer to the device structure for the driver instance.
*
* @return 0 if ok, < 0 if error
*/
int (*ap_disable)(const struct device *dev);
/** Disconnect a STA from AP
*
* @param dev Pointer to the device structure for the driver instance.
* @param mac MAC address of the STA to disconnect
*
* @return 0 if ok, < 0 if error
*/
int (*ap_sta_disconnect)(const struct device *dev, const uint8_t *mac);
/** Get interface status
*
* @param dev Pointer to the device structure for the driver instance.
* @param status Interface status
*
* @return 0 if ok, < 0 if error
*/
int (*iface_status)(const struct device *dev, struct wifi_iface_status *status);
#if defined(CONFIG_NET_STATISTICS_WIFI) || defined(__DOXYGEN__)
/** Get Wi-Fi statistics
*
* @param dev Pointer to the device structure for the driver instance.
* @param stats Wi-Fi statistics
*
* @return 0 if ok, < 0 if error
*/
int (*get_stats)(const struct device *dev, struct net_stats_wifi *stats);
/** Reset Wi-Fi statistics
*
* @param dev Pointer to the device structure for the driver instance.
*
* @return 0 if ok, < 0 if error
*/
int (*reset_stats)(const struct device *dev);
#endif /* CONFIG_NET_STATISTICS_WIFI */
/** Set power save status
*
* @param dev Pointer to the device structure for the driver instance.
* @param params Power save parameters
*
* @return 0 if ok, < 0 if error
*/
int (*set_power_save)(const struct device *dev, struct wifi_ps_params *params);
/** Setup or teardown TWT flow
*
* @param dev Pointer to the device structure for the driver instance.
* @param params TWT parameters
*
* @return 0 if ok, < 0 if error
*/
int (*set_twt)(const struct device *dev, struct wifi_twt_params *params);
/** Get power save config
*
* @param dev Pointer to the device structure for the driver instance.
* @param config Power save config
*
* @return 0 if ok, < 0 if error
*/
int (*get_power_save_config)(const struct device *dev, struct wifi_ps_config *config);
/** Set or get regulatory domain
*
* @param dev Pointer to the device structure for the driver instance.
* @param reg_domain Regulatory domain
*
* @return 0 if ok, < 0 if error
*/
int (*reg_domain)(const struct device *dev, struct wifi_reg_domain *reg_domain);
/** Set or get packet filter settings for monitor and promiscuous modes
*
* @param dev Pointer to the device structure for the driver instance.
* @param packet filter settings
*
* @return 0 if ok, < 0 if error
*/
int (*filter)(const struct device *dev, struct wifi_filter_info *filter);
/** Set or get mode of operation
*
* @param dev Pointer to the device structure for the driver instance.
* @param mode settings
*
* @return 0 if ok, < 0 if error
*/
int (*mode)(const struct device *dev, struct wifi_mode_info *mode);
/** Set or get current channel of operation
*
* @param dev Pointer to the device structure for the driver instance.
* @param channel settings
*
* @return 0 if ok, < 0 if error
*/
int (*channel)(const struct device *dev, struct wifi_channel_info *channel);
#ifdef CONFIG_WIFI_NM_WPA_SUPPLICANT_WNM
/** Send BTM query
*
* @param dev Pointer to the device structure for the driver instance.
* @param reason query reason
*
* @return 0 if ok, < 0 if error
*/
int (*btm_query)(const struct device *dev, uint8_t reason);
#endif
/** Get Version of WiFi driver and Firmware
*
* The driver that implements the get_version function must not use stack to allocate the
* version information pointers that are returned as params struct members.
* The version pointer parameters should point to a static memory either in ROM (preferred)
* or in RAM.
*
* @param dev Pointer to the device structure for the driver instance
* @param params Version parameters
*
* @return 0 if ok, < 0 if error
*/
int (*get_version)(const struct device *dev, struct wifi_version *params);
/** Get Wi-Fi connection parameters recently used
*
* @param dev Pointer to the device structure for the driver instance
* @param params the Wi-Fi connection parameters recently used
*
* @return 0 if ok, < 0 if error
*/
int (*get_conn_params)(const struct device *dev, struct wifi_connect_req_params *params);
/** Set RTS threshold value
*
* @param dev Pointer to the device structure for the driver instance.
* @param RTS threshold value
*
* @return 0 if ok, < 0 if error
*/
int (*set_rts_threshold)(const struct device *dev, unsigned int rts_threshold);
/** Configure AP parameter
*
* @param dev Pointer to the device structure for the driver instance.
* @param params AP mode parameter configuration parameter info
*
* @return 0 if ok, < 0 if error
*/
int (*ap_config_params)(const struct device *dev, struct wifi_ap_config_params *params);
/** Dispatch DPP operations by action enum, with or without arguments in string format
*
* @param dev Pointer to the device structure for the driver instance
* @param params DPP action enum and parameters in string
*
* @return 0 if ok, < 0 if error
*/
int (*dpp_dispatch)(const struct device *dev, struct wifi_dpp_params *params);
/** Flush PMKSA cache entries
*
* @param dev Pointer to the device structure for the driver instance.
*
* @return 0 if ok, < 0 if error
*/
int (*pmksa_flush)(const struct device *dev);
/** Set Wi-Fi enterprise mode CA/client Cert and key
*
* @param dev Pointer to the device structure for the driver instance.
* @param creds Pointer to the CA/client Cert and key.
*
* @return 0 if ok, < 0 if error
*/
#ifdef CONFIG_WIFI_NM_WPA_SUPPLICANT_CRYPTO_ENTERPRISE
int (*enterprise_creds)(const struct device *dev,
struct wifi_enterprise_creds_params *creds);
#endif
};
/** Wi-Fi management offload API */
struct net_wifi_mgmt_offload {
/**
* Mandatory to get in first position.
* A network device should indeed provide a pointer on such
* net_if_api structure. So we make current structure pointer
* that can be casted to a net_if_api structure pointer.
*/
#if defined(CONFIG_WIFI_USE_NATIVE_NETWORKING) || defined(__DOXYGEN__)
/** Ethernet API */
struct ethernet_api wifi_iface;
#else
/** Offloaded network device API */
struct offloaded_if_api wifi_iface;
#endif
/** Wi-Fi management API */
const struct wifi_mgmt_ops *const wifi_mgmt_api;
#if defined(CONFIG_WIFI_NM_WPA_SUPPLICANT) || defined(__DOXYGEN__)
/** Wi-Fi supplicant driver API */
const void *wifi_drv_ops;
#endif
};
#if defined(CONFIG_WIFI_NM_WPA_SUPPLICANT)
/* Make sure wifi_drv_ops is after wifi_mgmt_api */
BUILD_ASSERT(offsetof(struct net_wifi_mgmt_offload, wifi_mgmt_api) <
offsetof(struct net_wifi_mgmt_offload, wifi_drv_ops));
#endif
/* Make sure that the network interface API is properly setup inside
* Wifi mgmt offload API struct (it is the first one).
*/
BUILD_ASSERT(offsetof(struct net_wifi_mgmt_offload, wifi_iface) == 0);
/** Wi-Fi management connect result event
*
* @param iface Network interface
* @param status Connect result status
*/
void wifi_mgmt_raise_connect_result_event(struct net_if *iface, int status);
/** Wi-Fi management disconnect result event
*
* @param iface Network interface
* @param status Disconnect result status
*/
void wifi_mgmt_raise_disconnect_result_event(struct net_if *iface, int status);
/** Wi-Fi management interface status event
*
* @param iface Network interface
* @param iface_status Interface status
*/
void wifi_mgmt_raise_iface_status_event(struct net_if *iface,
struct wifi_iface_status *iface_status);
/** Wi-Fi management TWT event
*
* @param iface Network interface
* @param twt_params TWT parameters
*/
void wifi_mgmt_raise_twt_event(struct net_if *iface,
struct wifi_twt_params *twt_params);
/** Wi-Fi management TWT sleep state event
*
* @param iface Network interface
* @param twt_sleep_state TWT sleep state
*/
void wifi_mgmt_raise_twt_sleep_state(struct net_if *iface, int twt_sleep_state);
#if defined(CONFIG_WIFI_MGMT_RAW_SCAN_RESULTS) || defined(__DOXYGEN__)
/** Wi-Fi management raw scan result event
*
* @param iface Network interface
* @param raw_scan_info Raw scan result
*/
void wifi_mgmt_raise_raw_scan_result_event(struct net_if *iface,
struct wifi_raw_scan_result *raw_scan_info);
#endif /* CONFIG_WIFI_MGMT_RAW_SCAN_RESULTS */
/** Wi-Fi management disconnect complete event
*
* @param iface Network interface
* @param status Disconnect complete status
*/
void wifi_mgmt_raise_disconnect_complete_event(struct net_if *iface, int status);
/** Wi-Fi management AP mode enable result event
*
* @param iface Network interface
* @param status AP mode enable result status
*/
void wifi_mgmt_raise_ap_enable_result_event(struct net_if *iface, enum wifi_ap_status status);
/** Wi-Fi management AP mode disable result event
*
* @param iface Network interface
* @param status AP mode disable result status
*/
void wifi_mgmt_raise_ap_disable_result_event(struct net_if *iface, enum wifi_ap_status status);
/** Wi-Fi management AP mode STA connected event
*
* @param iface Network interface
* @param sta_info STA information
*/
void wifi_mgmt_raise_ap_sta_connected_event(struct net_if *iface,
struct wifi_ap_sta_info *sta_info);
/** Wi-Fi management AP mode STA disconnected event
* @param iface Network interface
* @param sta_info STA information
*/
void wifi_mgmt_raise_ap_sta_disconnected_event(struct net_if *iface,
struct wifi_ap_sta_info *sta_info);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_WIFI_MGMT_H_ */
``` | /content/code_sandbox/include/zephyr/net/wifi_mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 10,330 |
```objective-c
/*
*
*/
/**
* @file
* @brief This file extends interface of ieee802154_radio.h for OpenThread.
*/
#ifndef ZEPHYR_INCLUDE_NET_IEEE802154_RADIO_OPENTHREAD_H_
#define ZEPHYR_INCLUDE_NET_IEEE802154_RADIO_OPENTHREAD_H_
#include <zephyr/net/ieee802154_radio.h>
/**
* OpenThread specific capabilities of ieee802154 driver.
* This type extends @ref ieee802154_hw_caps.
*/
enum ieee802154_openthread_hw_caps {
/** Capability to transmit with @ref IEEE802154_OPENTHREAD_TX_MODE_TXTIME_MULTIPLE_CCA
* mode.
*/
IEEE802154_OPENTHREAD_HW_MULTIPLE_CCA = BIT(IEEE802154_HW_CAPS_BITS_PRIV_START),
};
/** @brief TX mode */
enum ieee802154_openthread_tx_mode {
/**
* The @ref IEEE802154_OPENTHREAD_TX_MODE_TXTIME_MULTIPLE_CCA mode allows to send
* a scheduled packet if the channel is reported idle after at most
* 1 + max_extra_cca_attempts CCAs performed back-to-back.
*
* This mode is a non-standard experimental OpenThread feature. It allows transmission
* of a packet within a certain time window.
* The earliest transmission time is specified as in the other TXTIME modes:
* When the first CCA reports an idle channel then the first symbol of the packet's PHR
* SHALL be present at the local antenna at the time represented by the scheduled
* TX timestamp (referred to as T_tx below).
*
* If the first CCA reports a busy channel, then additional CCAs up to
* max_extra_cca_attempts will be done until one of them reports an idle channel and
* the packet is sent out or the max number of attempts is reached in which case
* the transmission fails.
*
* The timing of these additional CCAs depends on the capabilities of the driver
* which reports them in the T_recca and T_ccatx driver attributes
* (see @ref IEEE802154_OPENTHREAD_ATTR_T_RECCA and
* @ref IEEE802154_OPENTHREAD_ATTR_T_CCATX). Based on these attributes the upper layer
* can calculate the latest point in time (T_txmax) that the first symbol of the scheduled
* packet's PHR SHALL be present at the local antenna:
*
* T_maxtxdelay = max_extra_cca_attempts * (aCcaTime + T_recca) - T_recca + T_ccatx
* T_txmax = T_tx + T_maxtxdelay
*
* See IEEE 802.15.4-2020, section 11.3, table 11-1 for the definition of aCcaTime.
*
* Drivers implementing this TX mode SHOULD keep T_recca and T_ccatx as short as possible.
* T_ccatx SHALL be less than or equal aTurnaroundTime as defined in ibid.,
* section 11.3, table 11-1.
*
* CCA SHALL be executed as defined by the phyCcaMode PHY PIB attribute (see ibid.,
* section 11.3, table 11-2).
*
* Requires IEEE802154_OPENTHREAD_HW_MULTIPLE_CCA capability.
*/
IEEE802154_OPENTHREAD_TX_MODE_TXTIME_MULTIPLE_CCA = IEEE802154_TX_MODE_PRIV_START
};
/**
* OpenThread specific configuration types of ieee802154 driver.
* This type extends @ref ieee802154_config_type.
*/
enum ieee802154_openthread_config_type {
/** Allows to configure extra CCA for transmission requested with mode
* @ref IEEE802154_OPENTHREAD_TX_MODE_TXTIME_MULTIPLE_CCA.
* Requires IEEE802154_OPENTHREAD_HW_MULTIPLE_CCA capability.
*/
IEEE802154_OPENTHREAD_CONFIG_MAX_EXTRA_CCA_ATTEMPTS = IEEE802154_CONFIG_PRIV_START
};
/**
* Thread vendor OUI for vendor specific header or nested information elements,
* see IEEE 802.15.4-2020, sections 7.4.2.2 and 7.4.4.30.
*
* in little endian
*/
#define IEEE802154_OPENTHREAD_THREAD_IE_VENDOR_OUI { 0x9b, 0xb8, 0xea }
/** length of IEEE 802.15.4-2020 vendor OUIs */
#define IEEE802154_OPENTHREAD_VENDOR_OUI_LEN 3
/** OpenThread specific configuration data of ieee802154 driver. */
struct ieee802154_openthread_config {
union {
/** Common configuration */
struct ieee802154_config common;
/** ``IEEE802154_OPENTHREAD_CONFIG_MAX_EXTRA_CCA_ATTEMPTS``
*
* The maximum number of extra CCAs to be performed when transmission is
* requested with mode @ref IEEE802154_OPENTHREAD_TX_MODE_TXTIME_MULTIPLE_CCA.
*/
uint8_t max_extra_cca_attempts;
};
};
/**
* OpenThread specific attributes of ieee802154 driver.
* This type extends @ref ieee802154_attr
*/
enum ieee802154_openthread_attr {
/** Attribute: Maximum time between consecutive CCAs performed back-to-back.
*
* This is attribute for T_recca parameter mentioned for
* @ref IEEE802154_OPENTHREAD_TX_MODE_TXTIME_MULTIPLE_CCA.
* Time is expressed in microseconds.
*/
IEEE802154_OPENTHREAD_ATTR_T_RECCA = IEEE802154_ATTR_PRIV_START,
/** Attribute: Maximum time between detection of CCA idle channel and the moment of
* start of SHR at the local antenna.
*
* This is attribute for T_ccatx parameter mentioned for
* @ref IEEE802154_OPENTHREAD_TX_MODE_TXTIME_MULTIPLE_CCA.
* Time is expressed in microseconds.
*/
IEEE802154_OPENTHREAD_ATTR_T_CCATX
};
/**
* OpenThread specific attribute value data of ieee802154 driver.
* This type extends @ref ieee802154_attr_value
*/
struct ieee802154_openthread_attr_value {
union {
/** Common attribute value */
struct ieee802154_attr_value common;
/** @brief Attribute value for @ref IEEE802154_OPENTHREAD_ATTR_T_RECCA */
uint16_t t_recca;
/** @brief Attribute value for @ref IEEE802154_OPENTHREAD_ATTR_T_CCATX */
uint16_t t_ccatx;
};
};
#endif /* ZEPHYR_INCLUDE_NET_IEEE802154_RADIO_OPENTHREAD_H_ */
``` | /content/code_sandbox/include/zephyr/net/ieee802154_radio_openthread.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,463 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public functions for the Precision Time Protocol.
*
* References are to version 2019 of IEEE 1588, ("PTP")
*/
#ifndef ZEPHYR_INCLUDE_NET_PTP_H_
#define ZEPHYR_INCLUDE_NET_PTP_H_
/**
* @brief Precision Time Protocol (PTP) support
* @defgroup ptp PTP support
* @since 3.7
* @version 0.1.0
* @ingroup networking
* @{
*/
#include <zephyr/net/ptp_time.h>
#ifdef __cplusplus
extern "C" {
#endif
#define PTP_MAJOR_VERSION 2 /**< Major PTP Version */
#define PTP_MINOR_VERSION 1 /**< Minor PTP Version */
#define PTP_VERSION (PTP_MINOR_VERSION << 4 | PTP_MAJOR_VERSION) /**< PTP version IEEE-1588:2019 */
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_PTP_H_ */
``` | /content/code_sandbox/include/zephyr/net/ptp.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 214 |
```objective-c
/*
*
*/
/** @file
* @brief mDNS responder API
*
* This file contains the mDNS responder API. These APIs are used by the
* to register mDNS records.
*/
#ifndef ZEPHYR_INCLUDE_NET_MDNS_RESPONDER_H_
#define ZEPHYR_INCLUDE_NET_MDNS_RESPONDER_H_
#include <stddef.h>
#include <zephyr/net/dns_sd.h>
/**
* @brief Register continuous memory of @ref dns_sd_rec records.
*
* mDNS responder will start with iteration over mDNS records registered using
* @ref DNS_SD_REGISTER_SERVICE (if any) and then go over external records.
*
* @param records A pointer to an array of mDNS records. It is stored internally
* without copying the content so it must be kept valid. It can
* be set to NULL, e.g. before freeing the memory block.
* @param count The number of elements
* @return 0 for OK; -EINVAL for invalid parameters.
*/
int mdns_responder_set_ext_records(const struct dns_sd_rec *records, size_t count);
#endif /* ZEPHYR_INCLUDE_NET_MDNS_RESPONDER_H_ */
``` | /content/code_sandbox/include/zephyr/net/mdns_responder.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 247 |
```objective-c
/** @file
* @brief DHCPv4 Client Handler
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_DHCPV4_H_
#define ZEPHYR_INCLUDE_NET_DHCPV4_H_
#include <zephyr/sys/slist.h>
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief DHCPv4
* @defgroup dhcpv4 DHCPv4
* @since 1.7
* @version 0.8.0
* @ingroup networking
* @{
*/
/** @cond INTERNAL_HIDDEN */
/** Current state of DHCPv4 client address negotiation.
*
* Additions removals and reorders in this definition must be
* reflected within corresponding changes to net_dhcpv4_state_name.
*/
enum net_dhcpv4_state {
NET_DHCPV4_DISABLED,
NET_DHCPV4_INIT,
NET_DHCPV4_SELECTING,
NET_DHCPV4_REQUESTING,
NET_DHCPV4_RENEWING,
NET_DHCPV4_REBINDING,
NET_DHCPV4_BOUND,
NET_DHCPV4_DECLINE,
} __packed;
/** @endcond */
/**
* @brief DHCPv4 message types
*
* These enumerations represent RFC2131 defined msy type codes, hence
* they should not be renumbered.
*
* Additions, removald and reorders in this definition must be reflected
* within corresponding changes to net_dhcpv4_msg_type_name.
*/
enum net_dhcpv4_msg_type {
NET_DHCPV4_MSG_TYPE_DISCOVER = 1, /**< Discover message */
NET_DHCPV4_MSG_TYPE_OFFER = 2, /**< Offer message */
NET_DHCPV4_MSG_TYPE_REQUEST = 3, /**< Request message */
NET_DHCPV4_MSG_TYPE_DECLINE = 4, /**< Decline message */
NET_DHCPV4_MSG_TYPE_ACK = 5, /**< Acknowledge message */
NET_DHCPV4_MSG_TYPE_NAK = 6, /**< Negative acknowledge message */
NET_DHCPV4_MSG_TYPE_RELEASE = 7, /**< Release message */
NET_DHCPV4_MSG_TYPE_INFORM = 8, /**< Inform message */
};
struct net_dhcpv4_option_callback;
/**
* @typedef net_dhcpv4_option_callback_handler_t
* @brief Define the application callback handler function signature
*
* @param cb Original struct net_dhcpv4_option_callback owning this handler
* @param length The length of data returned by the server. If this is
* greater than cb->max_length, only cb->max_length bytes
* will be available in cb->data
* @param msg_type Type of DHCP message that triggered the callback
* @param iface The interface on which the DHCP message was received
*
* Note: cb pointer can be used to retrieve private data through
* CONTAINER_OF() if original struct net_dhcpv4_option_callback is stored in
* another private structure.
*/
typedef void (*net_dhcpv4_option_callback_handler_t)(struct net_dhcpv4_option_callback *cb,
size_t length,
enum net_dhcpv4_msg_type msg_type,
struct net_if *iface);
/** @cond INTERNAL_HIDDEN */
/**
* @brief DHCP option callback structure
*
* Used to register a callback in the DHCPv4 client callback list.
* As many callbacks as needed can be added as long as each of them
* are unique pointers of struct net_dhcpv4_option_callback.
* Beware such structure should not be allocated on stack.
*
* Note: To help setting it, see net_dhcpv4_init_option_callback() below
*/
struct net_dhcpv4_option_callback {
/** This is meant to be used internally and the user should not
* mess with it.
*/
sys_snode_t node;
/** Actual callback function being called when relevant. */
net_dhcpv4_option_callback_handler_t handler;
/** The DHCP option this callback is attached to. */
uint8_t option;
/** Maximum length of data buffer. */
size_t max_length;
/** Pointer to a buffer of size max_length that is used to store the
* option data.
*/
void *data;
};
/** @endcond */
/**
* @brief Helper to initialize a struct net_dhcpv4_option_callback properly
* @param callback A valid Application's callback structure pointer.
* @param handler A valid handler function pointer.
* @param option The DHCP option the callback responds to.
* @param data A pointer to a buffer for max_length bytes.
* @param max_length The maximum length of the data returned.
*/
static inline void net_dhcpv4_init_option_callback(struct net_dhcpv4_option_callback *callback,
net_dhcpv4_option_callback_handler_t handler,
uint8_t option,
void *data,
size_t max_length)
{
__ASSERT(callback, "Callback pointer should not be NULL");
__ASSERT(handler, "Callback handler pointer should not be NULL");
__ASSERT(data, "Data pointer should not be NULL");
callback->handler = handler;
callback->option = option;
callback->data = data;
callback->max_length = max_length;
}
/**
* @brief Add an application callback.
* @param cb A valid application's callback structure pointer.
* @return 0 if successful, negative errno code on failure.
*/
int net_dhcpv4_add_option_callback(struct net_dhcpv4_option_callback *cb);
/**
* @brief Remove an application callback.
* @param cb A valid application's callback structure pointer.
* @return 0 if successful, negative errno code on failure.
*/
int net_dhcpv4_remove_option_callback(struct net_dhcpv4_option_callback *cb);
/**
* @brief Helper to initialize a struct net_dhcpv4_option_callback for encapsulated vendor-specific
* options properly
* @param callback A valid Application's callback structure pointer.
* @param handler A valid handler function pointer.
* @param option The DHCP encapsulated vendor-specific option the callback responds to.
* @param data A pointer to a buffer for max_length bytes.
* @param max_length The maximum length of the data returned.
*/
static inline void
net_dhcpv4_init_option_vendor_callback(struct net_dhcpv4_option_callback *callback,
net_dhcpv4_option_callback_handler_t handler, uint8_t option,
void *data, size_t max_length)
{
__ASSERT(callback, "Callback pointer should not be NULL");
__ASSERT(handler, "Callback handler pointer should not be NULL");
__ASSERT(data, "Data pointer should not be NULL");
callback->handler = handler;
callback->option = option;
callback->data = data;
callback->max_length = max_length;
}
/**
* @brief Add an application callback for encapsulated vendor-specific options.
* @param cb A valid application's callback structure pointer.
* @return 0 if successful, negative errno code on failure.
*/
int net_dhcpv4_add_option_vendor_callback(struct net_dhcpv4_option_callback *cb);
/**
* @brief Remove an application callback for encapsulated vendor-specific options.
* @param cb A valid application's callback structure pointer.
* @return 0 if successful, negative errno code on failure.
*/
int net_dhcpv4_remove_option_vendor_callback(struct net_dhcpv4_option_callback *cb);
/**
* @brief Start DHCPv4 client on an iface
*
* @details Start DHCPv4 client on a given interface. DHCPv4 client
* will start negotiation for IPv4 address. Once the negotiation is
* success IPv4 address details will be added to interface.
*
* @param iface A valid pointer on an interface
*/
void net_dhcpv4_start(struct net_if *iface);
/**
* @brief Stop DHCPv4 client on an iface
*
* @details Stop DHCPv4 client on a given interface. DHCPv4 client
* will remove all configuration obtained from a DHCP server from the
* interface and stop any further negotiation with the server.
*
* @param iface A valid pointer on an interface
*/
void net_dhcpv4_stop(struct net_if *iface);
/**
* @brief Restart DHCPv4 client on an iface
*
* @details Restart DHCPv4 client on a given interface. DHCPv4 client
* will restart the state machine without any of the initial delays
* used in start.
*
* @param iface A valid pointer on an interface
*/
void net_dhcpv4_restart(struct net_if *iface);
/** @cond INTERNAL_HIDDEN */
/**
* @brief DHCPv4 state name
*
* @internal
*/
const char *net_dhcpv4_state_name(enum net_dhcpv4_state state);
/** @endcond */
/**
* @brief Return a text representation of the msg_type
*
* @param msg_type The msg_type to be converted to text
* @return A text representation of msg_type
*/
const char *net_dhcpv4_msg_type_name(enum net_dhcpv4_msg_type msg_type);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_DHCPV4_H_ */
``` | /content/code_sandbox/include/zephyr/net/dhcpv4.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,957 |
```objective-c
/*
*
*/
/**
* @file zperf.h
*
* @brief Zperf API
* @defgroup zperf Zperf API
* @since 3.3
* @version 0.8.0
* @ingroup networking
* @{
*/
#ifndef ZEPHYR_INCLUDE_NET_ZPERF_H_
#define ZEPHYR_INCLUDE_NET_ZPERF_H_
#include <zephyr/net/net_ip.h>
#include <zephyr/net/socket.h>
#ifdef __cplusplus
extern "C" {
#endif
/** @cond INTERNAL_HIDDEN */
enum zperf_status {
ZPERF_SESSION_STARTED,
ZPERF_SESSION_PERIODIC_RESULT,
ZPERF_SESSION_FINISHED,
ZPERF_SESSION_ERROR
} __packed;
struct zperf_upload_params {
struct sockaddr peer_addr;
uint32_t duration_ms;
uint32_t rate_kbps;
uint16_t packet_size;
char if_name[IFNAMSIZ];
struct {
uint8_t tos;
int tcp_nodelay;
int priority;
uint32_t report_interval_ms;
} options;
};
struct zperf_download_params {
uint16_t port;
struct sockaddr addr;
char if_name[IFNAMSIZ];
};
/** @endcond */
/** Performance results */
struct zperf_results {
uint32_t nb_packets_sent; /**< Number of packets sent */
uint32_t nb_packets_rcvd; /**< Number of packets received */
uint32_t nb_packets_lost; /**< Number of packets lost */
uint32_t nb_packets_outorder; /**< Number of packets out of order */
uint64_t total_len; /**< Total length of the transferred data */
uint64_t time_in_us; /**< Total time of the transfer in microseconds */
uint32_t jitter_in_us; /**< Jitter in microseconds */
uint64_t client_time_in_us; /**< Client connection time in microseconds */
uint32_t packet_size; /**< Packet size */
uint32_t nb_packets_errors; /**< Number of packet errors */
};
/**
* @brief Zperf callback function used for asynchronous operations.
*
* @param status Session status.
* @param result Session results. May be NULL for certain events.
* @param user_data A pointer to the user provided data.
*/
typedef void (*zperf_callback)(enum zperf_status status,
struct zperf_results *result,
void *user_data);
/**
* @brief Synchronous UDP upload operation. The function blocks until the upload
* is complete.
*
* @param param Upload parameters.
* @param result Session results.
*
* @return 0 if session completed successfully, a negative error code otherwise.
*/
int zperf_udp_upload(const struct zperf_upload_params *param,
struct zperf_results *result);
/**
* @brief Synchronous TCP upload operation. The function blocks until the upload
* is complete.
*
* @param param Upload parameters.
* @param result Session results.
*
* @return 0 if session completed successfully, a negative error code otherwise.
*/
int zperf_tcp_upload(const struct zperf_upload_params *param,
struct zperf_results *result);
/**
* @brief Asynchronous UDP upload operation.
*
* @note Only one asynchronous upload can be performed at a time.
*
* @param param Upload parameters.
* @param callback Session results callback.
* @param user_data A pointer to the user data to be provided with the callback.
*
* @return 0 if session was scheduled successfully, a negative error code
* otherwise.
*/
int zperf_udp_upload_async(const struct zperf_upload_params *param,
zperf_callback callback, void *user_data);
/**
* @brief Asynchronous TCP upload operation.
*
* @note Only one asynchronous upload can be performed at a time.
*
* @param param Upload parameters.
* @param callback Session results callback.
* @param user_data A pointer to the user data to be provided with the callback.
*
* @return 0 if session was scheduled successfully, a negative error code
* otherwise.
*/
int zperf_tcp_upload_async(const struct zperf_upload_params *param,
zperf_callback callback, void *user_data);
/**
* @brief Start UDP server.
*
* @note Only one UDP server instance can run at a time.
*
* @param param Download parameters.
* @param callback Session results callback.
* @param user_data A pointer to the user data to be provided with the callback.
*
* @return 0 if server was started, a negative error code otherwise.
*/
int zperf_udp_download(const struct zperf_download_params *param,
zperf_callback callback, void *user_data);
/**
* @brief Start TCP server.
*
* @note Only one TCP server instance can run at a time.
*
* @param param Download parameters.
* @param callback Session results callback.
* @param user_data A pointer to the user data to be provided with the callback.
*
* @return 0 if server was started, a negative error code otherwise.
*/
int zperf_tcp_download(const struct zperf_download_params *param,
zperf_callback callback, void *user_data);
/**
* @brief Stop UDP server.
*
* @return 0 if server was stopped successfully, a negative error code otherwise.
*/
int zperf_udp_download_stop(void);
/**
* @brief Stop TCP server.
*
* @return 0 if server was stopped successfully, a negative error code otherwise.
*/
int zperf_tcp_download_stop(void);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_ZPERF_H_ */
``` | /content/code_sandbox/include/zephyr/net/zperf.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,149 |
```objective-c
/** @file
* @brief SocketCAN utilities.
*
* Utilities for SocketCAN support.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_SOCKETCAN_UTILS_H_
#define ZEPHYR_INCLUDE_NET_SOCKETCAN_UTILS_H_
#include <zephyr/drivers/can.h>
#include <zephyr/net/socketcan.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief SocketCAN utilities
* @addtogroup socket_can
* @{
*/
/**
* @brief Translate a @a socketcan_frame struct to a @a can_frame struct.
*
* @param sframe Pointer to sockecan_frame struct.
* @param zframe Pointer to can_frame struct.
*/
static inline void socketcan_to_can_frame(const struct socketcan_frame *sframe,
struct can_frame *zframe)
{
memset(zframe, 0, sizeof(*zframe));
zframe->flags |= (sframe->can_id & BIT(31)) != 0 ? CAN_FRAME_IDE : 0;
zframe->flags |= (sframe->can_id & BIT(30)) != 0 ? CAN_FRAME_RTR : 0;
zframe->flags |= (sframe->flags & CANFD_FDF) != 0 ? CAN_FRAME_FDF : 0;
zframe->flags |= (sframe->flags & CANFD_BRS) != 0 ? CAN_FRAME_BRS : 0;
zframe->id = sframe->can_id & BIT_MASK(29);
zframe->dlc = can_bytes_to_dlc(sframe->len);
if ((zframe->flags & CAN_FRAME_RTR) == 0U) {
memcpy(zframe->data, sframe->data,
MIN(sframe->len, MIN(sizeof(sframe->data), sizeof(zframe->data))));
}
}
/**
* @brief Translate a @a can_frame struct to a @a socketcan_frame struct.
*
* @param zframe Pointer to can_frame struct.
* @param sframe Pointer to socketcan_frame struct.
*/
static inline void socketcan_from_can_frame(const struct can_frame *zframe,
struct socketcan_frame *sframe)
{
memset(sframe, 0, sizeof(*sframe));
sframe->can_id = zframe->id;
sframe->can_id |= (zframe->flags & CAN_FRAME_IDE) != 0 ? BIT(31) : 0;
sframe->can_id |= (zframe->flags & CAN_FRAME_RTR) != 0 ? BIT(30) : 0;
sframe->len = can_dlc_to_bytes(zframe->dlc);
if ((zframe->flags & CAN_FRAME_FDF) != 0) {
sframe->flags |= CANFD_FDF;
}
if ((zframe->flags & CAN_FRAME_BRS) != 0) {
sframe->flags |= CANFD_BRS;
}
if ((zframe->flags & CAN_FRAME_RTR) == 0U) {
memcpy(sframe->data, zframe->data,
MIN(sframe->len, MIN(sizeof(zframe->data), sizeof(sframe->data))));
}
}
/**
* @brief Translate a @a socketcan_filter struct to a @a can_filter struct.
*
* @param sfilter Pointer to socketcan_filter struct.
* @param zfilter Pointer to can_filter struct.
*/
static inline void socketcan_to_can_filter(const struct socketcan_filter *sfilter,
struct can_filter *zfilter)
{
memset(zfilter, 0, sizeof(*zfilter));
zfilter->flags |= (sfilter->can_id & BIT(31)) != 0 ? CAN_FILTER_IDE : 0;
zfilter->id = sfilter->can_id & BIT_MASK(29);
zfilter->mask = sfilter->can_mask & BIT_MASK(29);
}
/**
* @brief Translate a @a can_filter struct to a @a socketcan_filter struct.
*
* @param zfilter Pointer to can_filter struct.
* @param sfilter Pointer to socketcan_filter struct.
*/
static inline void socketcan_from_can_filter(const struct can_filter *zfilter,
struct socketcan_filter *sfilter)
{
memset(sfilter, 0, sizeof(*sfilter));
sfilter->can_id = zfilter->id;
sfilter->can_id |= (zfilter->flags & CAN_FILTER_IDE) != 0 ? BIT(31) : 0;
sfilter->can_mask = zfilter->mask;
sfilter->can_mask |= (zfilter->flags & CAN_FILTER_IDE) != 0 ? BIT(31) : 0;
if (!IS_ENABLED(CONFIG_CAN_ACCEPT_RTR)) {
sfilter->can_mask |= BIT(30);
}
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_SOCKETCAN_H_ */
``` | /content/code_sandbox/include/zephyr/net/socketcan_utils.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,016 |
```objective-c
/*
*
*/
/**
* @file
* @brief Network Management API public header
*/
#ifndef ZEPHYR_INCLUDE_NET_NET_MGMT_H_
#define ZEPHYR_INCLUDE_NET_NET_MGMT_H_
#include <zephyr/sys/__assert.h>
#include <zephyr/net/net_core.h>
#include <zephyr/net/net_event.h>
#include <zephyr/sys/iterable_sections.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Network Management
* @defgroup net_mgmt Network Management
* @since 1.7
* @version 1.0.0
* @ingroup networking
* @{
*/
struct net_if;
/** @cond INTERNAL_HIDDEN */
/**
* @brief NET MGMT event mask basics, normalizing parts of bit fields
*/
#define NET_MGMT_EVENT_MASK 0x80000000
#define NET_MGMT_ON_IFACE_MASK 0x40000000
#define NET_MGMT_LAYER_MASK 0x30000000
#define NET_MGMT_SYNC_EVENT_MASK 0x08000000
#define NET_MGMT_LAYER_CODE_MASK 0x07FF0000
#define NET_MGMT_COMMAND_MASK 0x0000FFFF
#define NET_MGMT_EVENT_BIT BIT(31)
#define NET_MGMT_IFACE_BIT BIT(30)
#define NET_MGMT_SYNC_EVENT_BIT BIT(27)
#define NET_MGMT_LAYER(_layer) (_layer << 28)
#define NET_MGMT_LAYER_CODE(_code) (_code << 16)
#define NET_MGMT_EVENT(mgmt_request) \
(mgmt_request & NET_MGMT_EVENT_MASK)
#define NET_MGMT_ON_IFACE(mgmt_request) \
(mgmt_request & NET_MGMT_ON_IFACE_MASK)
#define NET_MGMT_EVENT_SYNCHRONOUS(mgmt_request) \
(mgmt_request & NET_MGMT_SYNC_EVENT_MASK)
#define NET_MGMT_GET_LAYER(mgmt_request) \
((mgmt_request & NET_MGMT_LAYER_MASK) >> 28)
#define NET_MGMT_GET_LAYER_CODE(mgmt_request) \
((mgmt_request & NET_MGMT_LAYER_CODE_MASK) >> 16)
#define NET_MGMT_GET_COMMAND(mgmt_request) \
(mgmt_request & NET_MGMT_COMMAND_MASK)
/* Useful generic definitions */
#define NET_MGMT_LAYER_L2 1
#define NET_MGMT_LAYER_L3 2
#define NET_MGMT_LAYER_L4 3
/** @endcond */
/**
* @typedef net_mgmt_request_handler_t
* @brief Signature which all Net MGMT request handler need to follow
* @param mgmt_request The exact request value the handler is being called
* through
* @param iface A valid pointer on struct net_if if the request is meant
* to be tied to a network interface. NULL otherwise.
* @param data A valid pointer on a data understood by the handler.
* NULL otherwise.
* @param len Length in byte of the memory pointed by data.
*/
typedef int (*net_mgmt_request_handler_t)(uint32_t mgmt_request,
struct net_if *iface,
void *data, size_t len);
/**
* @brief Generate a network management event.
*
* @param _mgmt_request Management event identifier
* @param _iface Network interface
* @param _data Any additional data for the event
* @param _len Length of the additional data.
*/
#define net_mgmt(_mgmt_request, _iface, _data, _len) \
net_mgmt_##_mgmt_request(_mgmt_request, _iface, _data, _len)
/**
* @brief Declare a request handler function for the given network event.
*
* @param _mgmt_request Management event identifier
*/
#define NET_MGMT_DEFINE_REQUEST_HANDLER(_mgmt_request) \
extern int net_mgmt_##_mgmt_request(uint32_t mgmt_request, \
struct net_if *iface, \
void *data, size_t len)
/**
* @brief Create a request handler function for the given network event.
*
* @param _mgmt_request Management event identifier
* @param _func Function for handling this event
*/
#define NET_MGMT_REGISTER_REQUEST_HANDLER(_mgmt_request, _func) \
FUNC_ALIAS(_func, net_mgmt_##_mgmt_request, int)
struct net_mgmt_event_callback;
/**
* @typedef net_mgmt_event_handler_t
* @brief Define the user's callback handler function signature
* @param cb Original struct net_mgmt_event_callback owning this handler.
* @param mgmt_event The network event being notified.
* @param iface A pointer on a struct net_if to which the event belongs to,
* if it's an event on an iface. NULL otherwise.
*/
typedef void (*net_mgmt_event_handler_t)(struct net_mgmt_event_callback *cb,
uint32_t mgmt_event,
struct net_if *iface);
/**
* @brief Network Management event callback structure
* Used to register a callback into the network management event part, in order
* to let the owner of this struct to get network event notification based on
* given event mask.
*/
struct net_mgmt_event_callback {
/** Meant to be used internally, to insert the callback into a list.
* So nobody should mess with it.
*/
sys_snode_t node;
union {
/** Actual callback function being used to notify the owner
*/
net_mgmt_event_handler_t handler;
/** Semaphore meant to be used internally for the synchronous
* net_mgmt_event_wait() function.
*/
struct k_sem *sync_call;
};
#ifdef CONFIG_NET_MGMT_EVENT_INFO
const void *info;
size_t info_length;
#endif
/** A mask of network events on which the above handler should be
* called in case those events come. Such mask can be modified
* whenever necessary by the owner, and thus will affect the handler
* being called or not.
*/
union {
/** A mask of network events on which the above handler should
* be called in case those events come.
* Note that only the command part is treated as a mask,
* matching one to several commands. Layer and layer code will
* be made of an exact match. This means that in order to
* receive events from multiple layers, one must have multiple
* listeners registered, one for each layer being listened.
*/
uint32_t event_mask;
/** Internal place holder when a synchronous event wait is
* successfully unlocked on a event.
*/
uint32_t raised_event;
};
};
/**
* @typedef net_mgmt_event_static_handler_t
* @brief Define the user's callback handler function signature
* @param mgmt_event The network event being notified.
* @param iface A pointer on a struct net_if to which the event belongs to,
* if it's an event on an iface. NULL otherwise.
* @param info A valid pointer on a data understood by the handler.
* NULL otherwise.
* @param info_length Length in bytes of the memory pointed by @p info.
* @param user_data Data provided by the user to the handler.
*/
typedef void (*net_mgmt_event_static_handler_t)(uint32_t mgmt_event,
struct net_if *iface,
void *info, size_t info_length,
void *user_data);
/** @cond INTERNAL_HIDDEN */
/* Structure for event handler registered at compile time */
struct net_mgmt_event_static_handler {
uint32_t event_mask;
net_mgmt_event_static_handler_t handler;
void *user_data;
};
/** @endcond */
/**
* @brief Define a static network event handler.
* @param _name Name of the event handler.
* @param _event_mask A mask of network events on which the passed handler should
* be called in case those events come.
* Note that only the command part is treated as a mask,
* matching one to several commands. Layer and layer code will
* be made of an exact match. This means that in order to
* receive events from multiple layers, one must have multiple
* listeners registered, one for each layer being listened.
* @param _func The function to be called upon network events being emitted.
* @param _user_data User data passed to the handler being called on network events.
*/
#define NET_MGMT_REGISTER_EVENT_HANDLER(_name, _event_mask, _func, _user_data) \
const STRUCT_SECTION_ITERABLE(net_mgmt_event_static_handler, _name) = { \
.event_mask = _event_mask, \
.handler = _func, \
.user_data = (void *)_user_data, \
}
/**
* @brief Helper to initialize a struct net_mgmt_event_callback properly
* @param cb A valid application's callback structure pointer.
* @param handler A valid handler function pointer.
* @param mgmt_event_mask A mask of relevant events for the handler
*/
#ifdef CONFIG_NET_MGMT_EVENT
static inline
void net_mgmt_init_event_callback(struct net_mgmt_event_callback *cb,
net_mgmt_event_handler_t handler,
uint32_t mgmt_event_mask)
{
__ASSERT(cb, "Callback pointer should not be NULL");
__ASSERT(handler, "Handler pointer should not be NULL");
cb->handler = handler;
cb->event_mask = mgmt_event_mask;
};
#else
#define net_mgmt_init_event_callback(...)
#endif
/**
* @brief Add a user callback
* @param cb A valid pointer on user's callback to add.
*/
#ifdef CONFIG_NET_MGMT_EVENT
void net_mgmt_add_event_callback(struct net_mgmt_event_callback *cb);
#else
#define net_mgmt_add_event_callback(...)
#endif
/**
* @brief Delete a user callback
* @param cb A valid pointer on user's callback to delete.
*/
#ifdef CONFIG_NET_MGMT_EVENT
void net_mgmt_del_event_callback(struct net_mgmt_event_callback *cb);
#else
#define net_mgmt_del_event_callback(...)
#endif
/**
* @brief Used by the system to notify an event.
* @param mgmt_event The actual network event code to notify
* @param iface a valid pointer on a struct net_if if only the event is
* based on an iface. NULL otherwise.
* @param info A valid pointer on the information you want to pass along
* with the event. NULL otherwise. Note the data pointed there is
* normalized by the related event.
* @param length size of the data pointed by info pointer.
*
* Note: info and length are disabled if CONFIG_NET_MGMT_EVENT_INFO
* is not defined.
*/
#if defined(CONFIG_NET_MGMT_EVENT)
void net_mgmt_event_notify_with_info(uint32_t mgmt_event, struct net_if *iface,
const void *info, size_t length);
#else
#define net_mgmt_event_notify_with_info(...)
#endif
/**
* @brief Used by the system to notify an event without any additional information.
* @param mgmt_event The actual network event code to notify
* @param iface A valid pointer on a struct net_if if only the event is
* based on an iface. NULL otherwise.
*/
#if defined(CONFIG_NET_MGMT_EVENT)
static inline void net_mgmt_event_notify(uint32_t mgmt_event,
struct net_if *iface)
{
net_mgmt_event_notify_with_info(mgmt_event, iface, NULL, 0);
}
#else
#define net_mgmt_event_notify(...)
#endif
/**
* @brief Used to wait synchronously on an event mask
* @param mgmt_event_mask A mask of relevant events to wait on.
* @param raised_event a pointer on a uint32_t to get which event from
* the mask generated the event. Can be NULL if the caller is not
* interested in that information.
* @param iface a pointer on a place holder for the iface on which the
* event has originated from. This is valid if only the event mask
* has bit NET_MGMT_IFACE_BIT set relevantly, depending on events
* the caller wants to listen to.
* @param info a valid pointer if user wants to get the information the
* event might bring along. NULL otherwise.
* @param info_length tells how long the info memory area is. Only valid if
* the info is not NULL.
* @param timeout A timeout delay. K_FOREVER can be used to wait indefinitely.
*
* @return 0 on success, a negative error code otherwise. -ETIMEDOUT will
* be specifically returned if the timeout kick-in instead of an
* actual event.
*/
#ifdef CONFIG_NET_MGMT_EVENT
int net_mgmt_event_wait(uint32_t mgmt_event_mask,
uint32_t *raised_event,
struct net_if **iface,
const void **info,
size_t *info_length,
k_timeout_t timeout);
#else
static inline int net_mgmt_event_wait(uint32_t mgmt_event_mask,
uint32_t *raised_event,
struct net_if **iface,
const void **info,
size_t *info_length,
k_timeout_t timeout)
{
return 0;
}
#endif
/**
* @brief Used to wait synchronously on an event mask for a specific iface
* @param iface a pointer on a valid network interface to listen event to
* @param mgmt_event_mask A mask of relevant events to wait on. Listened
* to events should be relevant to iface events and thus have the bit
* NET_MGMT_IFACE_BIT set.
* @param raised_event a pointer on a uint32_t to get which event from
* the mask generated the event. Can be NULL if the caller is not
* interested in that information.
* @param info a valid pointer if user wants to get the information the
* event might bring along. NULL otherwise.
* @param info_length tells how long the info memory area is. Only valid if
* the info is not NULL.
* @param timeout A timeout delay. K_FOREVER can be used to wait indefinitely.
*
* @return 0 on success, a negative error code otherwise. -ETIMEDOUT will
* be specifically returned if the timeout kick-in instead of an
* actual event.
*/
#ifdef CONFIG_NET_MGMT_EVENT
int net_mgmt_event_wait_on_iface(struct net_if *iface,
uint32_t mgmt_event_mask,
uint32_t *raised_event,
const void **info,
size_t *info_length,
k_timeout_t timeout);
#else
static inline int net_mgmt_event_wait_on_iface(struct net_if *iface,
uint32_t mgmt_event_mask,
uint32_t *raised_event,
const void **info,
size_t *info_length,
k_timeout_t timeout)
{
return 0;
}
#endif
/**
* @brief Used by the core of the network stack to initialize the network
* event processing.
*/
#ifdef CONFIG_NET_MGMT_EVENT
void net_mgmt_event_init(void);
#else
#define net_mgmt_event_init(...)
#endif /* CONFIG_NET_MGMT_EVENT */
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_NET_MGMT_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_mgmt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,176 |
```objective-c
/** @file
* @brief Network statistics
*
* Network statistics data. This should only be enabled when
* debugging as it consumes memory.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_NET_STATS_H_
#define ZEPHYR_INCLUDE_NET_NET_STATS_H_
#include <zephyr/types.h>
#include <zephyr/net/net_core.h>
#include <zephyr/net/net_mgmt.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Network statistics library
* @defgroup net_stats Network Statistics Library
* @since 1.5
* @version 0.8.0
* @ingroup networking
* @{
*/
/**
* @typedef net_stats_t
* @brief Network statistics counter
*/
typedef uint32_t net_stats_t;
/**
* @brief Number of bytes sent and received.
*/
struct net_stats_bytes {
/** Number of bytes sent */
net_stats_t sent;
/** Number of bytes received */
net_stats_t received;
};
/**
* @brief Number of network packets sent and received.
*/
struct net_stats_pkts {
/** Number of packets sent */
net_stats_t tx;
/** Number of packets received */
net_stats_t rx;
};
/**
* @brief IP layer statistics
*/
struct net_stats_ip {
/** Number of received packets at the IP layer. */
net_stats_t recv;
/** Number of sent packets at the IP layer. */
net_stats_t sent;
/** Number of forwarded packets at the IP layer. */
net_stats_t forwarded;
/** Number of dropped packets at the IP layer. */
net_stats_t drop;
};
/**
* @brief IP layer error statistics
*/
struct net_stats_ip_errors {
/** Number of packets dropped due to wrong IP version
* or header length.
*/
net_stats_t vhlerr;
/** Number of packets dropped due to wrong IP length, high byte. */
net_stats_t hblenerr;
/** Number of packets dropped due to wrong IP length, low byte. */
net_stats_t lblenerr;
/** Number of packets dropped because they were IP fragments. */
net_stats_t fragerr;
/** Number of packets dropped due to IP checksum errors. */
net_stats_t chkerr;
/** Number of packets dropped because they were neither ICMP,
* UDP nor TCP.
*/
net_stats_t protoerr;
};
/**
* @brief ICMP statistics
*/
struct net_stats_icmp {
/** Number of received ICMP packets. */
net_stats_t recv;
/** Number of sent ICMP packets. */
net_stats_t sent;
/** Number of dropped ICMP packets. */
net_stats_t drop;
/** Number of ICMP packets with a wrong type. */
net_stats_t typeerr;
/** Number of ICMP packets with a bad checksum. */
net_stats_t chkerr;
};
/**
* @brief TCP statistics
*/
struct net_stats_tcp {
/** Amount of received and sent TCP application data. */
struct net_stats_bytes bytes;
/** Amount of retransmitted data. */
net_stats_t resent;
/** Number of dropped packets at the TCP layer. */
net_stats_t drop;
/** Number of received TCP segments. */
net_stats_t recv;
/** Number of sent TCP segments. */
net_stats_t sent;
/** Number of dropped TCP segments. */
net_stats_t seg_drop;
/** Number of TCP segments with a bad checksum. */
net_stats_t chkerr;
/** Number of received TCP segments with a bad ACK number. */
net_stats_t ackerr;
/** Number of received bad TCP RST (reset) segments. */
net_stats_t rsterr;
/** Number of received TCP RST (reset) segments. */
net_stats_t rst;
/** Number of retransmitted TCP segments. */
net_stats_t rexmit;
/** Number of dropped connection attempts because too few connections
* were available.
*/
net_stats_t conndrop;
/** Number of connection attempts for closed ports, triggering a RST. */
net_stats_t connrst;
};
/**
* @brief UDP statistics
*/
struct net_stats_udp {
/** Number of dropped UDP segments. */
net_stats_t drop;
/** Number of received UDP segments. */
net_stats_t recv;
/** Number of sent UDP segments. */
net_stats_t sent;
/** Number of UDP segments with a bad checksum. */
net_stats_t chkerr;
};
/**
* @brief IPv6 neighbor discovery statistics
*/
struct net_stats_ipv6_nd {
/** Number of dropped IPv6 neighbor discovery packets. */
net_stats_t drop;
/** Number of received IPv6 neighbor discovery packets. */
net_stats_t recv;
/** Number of sent IPv6 neighbor discovery packets. */
net_stats_t sent;
};
/**
* @brief IPv6 multicast listener daemon statistics
*/
struct net_stats_ipv6_mld {
/** Number of received IPv6 MLD queries. */
net_stats_t recv;
/** Number of sent IPv6 MLD reports. */
net_stats_t sent;
/** Number of dropped IPv6 MLD packets. */
net_stats_t drop;
};
/**
* @brief IPv4 IGMP daemon statistics
*/
struct net_stats_ipv4_igmp {
/** Number of received IPv4 IGMP queries */
net_stats_t recv;
/** Number of sent IPv4 IGMP reports */
net_stats_t sent;
/** Number of dropped IPv4 IGMP packets */
net_stats_t drop;
};
/**
* @brief Network packet transfer times for calculating average TX time
*/
struct net_stats_tx_time {
/** Sum of network packet transfer times. */
uint64_t sum;
/** Number of network packets transferred. */
net_stats_t count;
};
/**
* @brief Network packet receive times for calculating average RX time
*/
struct net_stats_rx_time {
/** Sum of network packet receive times. */
uint64_t sum;
/** Number of network packets received. */
net_stats_t count;
};
/** @cond INTERNAL_HIDDEN */
#if NET_TC_TX_COUNT == 0
#define NET_TC_TX_STATS_COUNT 1
#else
#define NET_TC_TX_STATS_COUNT NET_TC_TX_COUNT
#endif
#if NET_TC_RX_COUNT == 0
#define NET_TC_RX_STATS_COUNT 1
#else
#define NET_TC_RX_STATS_COUNT NET_TC_RX_COUNT
#endif
/** @endcond */
/**
* @brief Traffic class statistics
*/
struct net_stats_tc {
/** TX statistics for each traffic class */
struct {
/** Helper for calculating average TX time statistics */
struct net_stats_tx_time tx_time;
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)
/** Detailed TX time statistics inside network stack */
struct net_stats_tx_time
tx_time_detail[NET_PKT_DETAIL_STATS_COUNT];
#endif
/** Number of packets sent for this traffic class */
net_stats_t pkts;
/** Number of bytes sent for this traffic class */
net_stats_t bytes;
/** Priority of this traffic class */
uint8_t priority;
} sent[NET_TC_TX_STATS_COUNT];
/** RX statistics for each traffic class */
struct {
/** Helper for calculating average RX time statistics */
struct net_stats_rx_time rx_time;
#if defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
/** Detailed RX time statistics inside network stack */
struct net_stats_rx_time
rx_time_detail[NET_PKT_DETAIL_STATS_COUNT];
#endif
/** Number of packets received for this traffic class */
net_stats_t pkts;
/** Number of bytes received for this traffic class */
net_stats_t bytes;
/** Priority of this traffic class */
uint8_t priority;
} recv[NET_TC_RX_STATS_COUNT];
};
/**
* @brief Power management statistics
*/
struct net_stats_pm {
/** Total suspend time */
uint64_t overall_suspend_time;
/** How many times we were suspended */
net_stats_t suspend_count;
/** How long the last suspend took */
uint32_t last_suspend_time;
/** Network interface last suspend start time */
uint32_t start_time;
};
/**
* @brief All network statistics in one struct.
*/
struct net_stats {
/** Count of malformed packets or packets we do not have handler for */
net_stats_t processing_error;
/**
* This calculates amount of data transferred through all the
* network interfaces.
*/
struct net_stats_bytes bytes;
/** IP layer errors */
struct net_stats_ip_errors ip_errors;
#if defined(CONFIG_NET_STATISTICS_IPV6)
/** IPv6 statistics */
struct net_stats_ip ipv6;
#endif
#if defined(CONFIG_NET_STATISTICS_IPV4)
/** IPv4 statistics */
struct net_stats_ip ipv4;
#endif
#if defined(CONFIG_NET_STATISTICS_ICMP)
/** ICMP statistics */
struct net_stats_icmp icmp;
#endif
#if defined(CONFIG_NET_STATISTICS_TCP)
/** TCP statistics */
struct net_stats_tcp tcp;
#endif
#if defined(CONFIG_NET_STATISTICS_UDP)
/** UDP statistics */
struct net_stats_udp udp;
#endif
#if defined(CONFIG_NET_STATISTICS_IPV6_ND)
/** IPv6 neighbor discovery statistics */
struct net_stats_ipv6_nd ipv6_nd;
#endif
#if defined(CONFIG_NET_STATISTICS_MLD)
/** IPv6 MLD statistics */
struct net_stats_ipv6_mld ipv6_mld;
#endif
#if defined(CONFIG_NET_STATISTICS_IGMP)
/** IPv4 IGMP statistics */
struct net_stats_ipv4_igmp ipv4_igmp;
#endif
#if NET_TC_COUNT > 1
/** Traffic class statistics */
struct net_stats_tc tc;
#endif
#if defined(CONFIG_NET_PKT_TXTIME_STATS)
/** Network packet TX time statistics */
struct net_stats_tx_time tx_time;
#endif
#if defined(CONFIG_NET_PKT_RXTIME_STATS)
/** Network packet RX time statistics */
struct net_stats_rx_time rx_time;
#endif
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)
/** Network packet TX time detail statistics */
struct net_stats_tx_time tx_time_detail[NET_PKT_DETAIL_STATS_COUNT];
#endif
#if defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
/** Network packet RX time detail statistics */
struct net_stats_rx_time rx_time_detail[NET_PKT_DETAIL_STATS_COUNT];
#endif
#if defined(CONFIG_NET_STATISTICS_POWER_MANAGEMENT)
/** Power management statistics */
struct net_stats_pm pm;
#endif
};
/**
* @brief Ethernet error statistics
*/
struct net_stats_eth_errors {
/** Number of RX length errors */
net_stats_t rx_length_errors;
/** Number of RX overrun errors */
net_stats_t rx_over_errors;
/** Number of RX CRC errors */
net_stats_t rx_crc_errors;
/** Number of RX frame errors */
net_stats_t rx_frame_errors;
/** Number of RX net_pkt allocation errors */
net_stats_t rx_no_buffer_count;
/** Number of RX missed errors */
net_stats_t rx_missed_errors;
/** Number of RX long length errors */
net_stats_t rx_long_length_errors;
/** Number of RX short length errors */
net_stats_t rx_short_length_errors;
/** Number of RX buffer align errors */
net_stats_t rx_align_errors;
/** Number of RX DMA failed errors */
net_stats_t rx_dma_failed;
/** Number of RX net_buf allocation errors */
net_stats_t rx_buf_alloc_failed;
/** Number of TX aborted errors */
net_stats_t tx_aborted_errors;
/** Number of TX carrier errors */
net_stats_t tx_carrier_errors;
/** Number of TX FIFO errors */
net_stats_t tx_fifo_errors;
/** Number of TX heartbeat errors */
net_stats_t tx_heartbeat_errors;
/** Number of TX window errors */
net_stats_t tx_window_errors;
/** Number of TX DMA failed errors */
net_stats_t tx_dma_failed;
/** Number of uncorrected ECC errors */
net_stats_t uncorr_ecc_errors;
/** Number of corrected ECC errors */
net_stats_t corr_ecc_errors;
};
/**
* @brief Ethernet flow control statistics
*/
struct net_stats_eth_flow {
/** Number of RX XON flow control */
net_stats_t rx_flow_control_xon;
/** Number of RX XOFF flow control */
net_stats_t rx_flow_control_xoff;
/** Number of TX XON flow control */
net_stats_t tx_flow_control_xon;
/** Number of TX XOFF flow control */
net_stats_t tx_flow_control_xoff;
};
/**
* @brief Ethernet checksum statistics
*/
struct net_stats_eth_csum {
/** Number of good RX checksum offloading */
net_stats_t rx_csum_offload_good;
/** Number of failed RX checksum offloading */
net_stats_t rx_csum_offload_errors;
};
/**
* @brief Ethernet hardware timestamp statistics
*/
struct net_stats_eth_hw_timestamp {
/** Number of RX hardware timestamp cleared */
net_stats_t rx_hwtstamp_cleared;
/** Number of RX hardware timestamp timeout */
net_stats_t tx_hwtstamp_timeouts;
/** Number of RX hardware timestamp skipped */
net_stats_t tx_hwtstamp_skipped;
};
#ifdef CONFIG_NET_STATISTICS_ETHERNET_VENDOR
/**
* @brief Ethernet vendor specific statistics
*/
struct net_stats_eth_vendor {
const char * const key; /**< Key name of vendor statistics */
uint32_t value; /**< Value of the statistics key */
};
#endif
/**
* @brief All Ethernet specific statistics
*/
struct net_stats_eth {
/** Total number of bytes received and sent */
struct net_stats_bytes bytes;
/** Total number of packets received and sent */
struct net_stats_pkts pkts;
/** Total number of broadcast packets received and sent */
struct net_stats_pkts broadcast;
/** Total number of multicast packets received and sent */
struct net_stats_pkts multicast;
/** Total number of errors in RX and TX */
struct net_stats_pkts errors;
/** Total number of errors in RX and TX */
struct net_stats_eth_errors error_details;
/** Total number of flow control errors in RX and TX */
struct net_stats_eth_flow flow_control;
/** Total number of checksum errors in RX and TX */
struct net_stats_eth_csum csum;
/** Total number of hardware timestamp errors in RX and TX */
struct net_stats_eth_hw_timestamp hw_timestamp;
/** Total number of collisions */
net_stats_t collisions;
/** Total number of dropped TX packets */
net_stats_t tx_dropped;
/** Total number of TX timeout errors */
net_stats_t tx_timeout_count;
/** Total number of TX queue restarts */
net_stats_t tx_restart_queue;
/** Total number of RX unknown protocol packets */
net_stats_t unknown_protocol;
#ifdef CONFIG_NET_STATISTICS_ETHERNET_VENDOR
/** Array is terminated with an entry containing a NULL key */
struct net_stats_eth_vendor *vendor;
#endif
};
/**
* @brief All PPP specific statistics
*/
struct net_stats_ppp {
/** Total number of bytes received and sent */
struct net_stats_bytes bytes;
/** Total number of packets received and sent */
struct net_stats_pkts pkts;
/** Number of received and dropped PPP frames. */
net_stats_t drop;
/** Number of received PPP frames with a bad checksum. */
net_stats_t chkerr;
};
/**
* @brief All Wi-Fi management statistics
*/
struct net_stats_sta_mgmt {
/** Number of received beacons */
net_stats_t beacons_rx;
/** Number of missed beacons */
net_stats_t beacons_miss;
};
/**
* @brief All Wi-Fi specific statistics
*/
struct net_stats_wifi {
/** Total number of beacon errors */
struct net_stats_sta_mgmt sta_mgmt;
/** Total number of bytes received and sent */
struct net_stats_bytes bytes;
/** Total number of packets received and sent */
struct net_stats_pkts pkts;
/** Total number of broadcast packets received and sent */
struct net_stats_pkts broadcast;
/** Total number of multicast packets received and sent */
struct net_stats_pkts multicast;
/** Total number of errors in RX and TX */
struct net_stats_pkts errors;
/** Total number of unicast packets received and sent */
struct net_stats_pkts unicast;
/** Total number of dropped packets at received and sent*/
net_stats_t overrun_count;
};
#if defined(CONFIG_NET_STATISTICS_USER_API)
/* Management part definitions */
/** @cond INTERNAL_HIDDEN */
#define _NET_STATS_LAYER NET_MGMT_LAYER_L3
#define _NET_STATS_CODE 0x101
#define _NET_STATS_BASE (NET_MGMT_LAYER(_NET_STATS_LAYER) | \
NET_MGMT_LAYER_CODE(_NET_STATS_CODE))
enum net_request_stats_cmd {
NET_REQUEST_STATS_CMD_GET_ALL = 1,
NET_REQUEST_STATS_CMD_GET_PROCESSING_ERROR,
NET_REQUEST_STATS_CMD_GET_BYTES,
NET_REQUEST_STATS_CMD_GET_IP_ERRORS,
NET_REQUEST_STATS_CMD_GET_IPV4,
NET_REQUEST_STATS_CMD_GET_IPV6,
NET_REQUEST_STATS_CMD_GET_IPV6_ND,
NET_REQUEST_STATS_CMD_GET_ICMP,
NET_REQUEST_STATS_CMD_GET_UDP,
NET_REQUEST_STATS_CMD_GET_TCP,
NET_REQUEST_STATS_CMD_GET_ETHERNET,
NET_REQUEST_STATS_CMD_GET_PPP,
NET_REQUEST_STATS_CMD_GET_PM,
NET_REQUEST_STATS_CMD_GET_WIFI,
NET_REQUEST_STATS_CMD_RESET_WIFI,
};
/** @endcond */
/** Request all network statistics */
#define NET_REQUEST_STATS_GET_ALL \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_ALL)
/** Request all processing error statistics */
#define NET_REQUEST_STATS_GET_PROCESSING_ERROR \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_PROCESSING_ERROR)
/** Request number of received and sent bytes */
#define NET_REQUEST_STATS_GET_BYTES \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_BYTES)
/** Request IP error statistics */
#define NET_REQUEST_STATS_GET_IP_ERRORS \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_IP_ERRORS)
/** @cond INTERNAL_HIDDEN */
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_ALL);
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_PROCESSING_ERROR);
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_BYTES);
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_IP_ERRORS);
/** @endcond */
#if defined(CONFIG_NET_STATISTICS_IPV4)
/** Request IPv4 statistics */
#define NET_REQUEST_STATS_GET_IPV4 \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_IPV4)
/** @cond INTERNAL_HIDDEN */
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_IPV4);
/** @endcond */
#endif /* CONFIG_NET_STATISTICS_IPV4 */
#if defined(CONFIG_NET_STATISTICS_IPV6)
/** Request IPv6 statistics */
#define NET_REQUEST_STATS_GET_IPV6 \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_IPV6)
/** @cond INTERNAL_HIDDEN */
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_IPV6);
/** @endcond */
#endif /* CONFIG_NET_STATISTICS_IPV6 */
#if defined(CONFIG_NET_STATISTICS_IPV6_ND)
/** Request IPv6 neighbor discovery statistics */
#define NET_REQUEST_STATS_GET_IPV6_ND \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_IPV6_ND)
/** @cond INTERNAL_HIDDEN */
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_IPV6_ND);
/** @endcond */
#endif /* CONFIG_NET_STATISTICS_IPV6_ND */
#if defined(CONFIG_NET_STATISTICS_ICMP)
/** Request ICMPv4 and ICMPv6 statistics */
#define NET_REQUEST_STATS_GET_ICMP \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_ICMP)
/** @cond INTERNAL_HIDDEN */
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_ICMP);
/** @endcond */
#endif /* CONFIG_NET_STATISTICS_ICMP */
#if defined(CONFIG_NET_STATISTICS_UDP)
/** Request UDP statistics */
#define NET_REQUEST_STATS_GET_UDP \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_UDP)
/** @cond INTERNAL_HIDDEN */
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_UDP);
/** @endcond */
#endif /* CONFIG_NET_STATISTICS_UDP */
#if defined(CONFIG_NET_STATISTICS_TCP)
/** Request TCP statistics */
#define NET_REQUEST_STATS_GET_TCP \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_TCP)
/** @cond INTERNAL_HIDDEN */
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_TCP);
/** @endcond */
#endif /* CONFIG_NET_STATISTICS_TCP */
#if defined(CONFIG_NET_STATISTICS_ETHERNET)
/** Request Ethernet statistics */
#define NET_REQUEST_STATS_GET_ETHERNET \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_ETHERNET)
/** @cond INTERNAL_HIDDEN */
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_ETHERNET);
/** @endcond */
#endif /* CONFIG_NET_STATISTICS_ETHERNET */
#if defined(CONFIG_NET_STATISTICS_PPP)
/** Request PPP statistics */
#define NET_REQUEST_STATS_GET_PPP \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_PPP)
/** @cond INTERNAL_HIDDEN */
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_PPP);
/** @endcond */
#endif /* CONFIG_NET_STATISTICS_PPP */
#endif /* CONFIG_NET_STATISTICS_USER_API */
#if defined(CONFIG_NET_STATISTICS_POWER_MANAGEMENT)
/** Request network power management statistics */
#define NET_REQUEST_STATS_GET_PM \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_PM)
/** @cond INTERNAL_HIDDEN */
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_PM);
/** @endcond */
#endif /* CONFIG_NET_STATISTICS_POWER_MANAGEMENT */
#if defined(CONFIG_NET_STATISTICS_WIFI)
/** Request Wi-Fi statistics */
#define NET_REQUEST_STATS_GET_WIFI \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_WIFI)
/** @cond INTERNAL_HIDDEN */
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_WIFI);
/** @endcond */
/** Reset Wi-Fi statistics*/
#define NET_REQUEST_STATS_RESET_WIFI \
(_NET_STATS_BASE | NET_REQUEST_STATS_CMD_RESET_WIFI)
/** @cond INTERNAL_HIDDEN */
NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_RESET_WIFI);
/** @endcond */
#endif /* CONFIG_NET_STATISTICS_WIFI */
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_NET_STATS_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_stats.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,537 |
```objective-c
/*
*
*/
/**
* @file
* @brief Network Events code public header
*/
#ifndef ZEPHYR_INCLUDE_NET_NET_EVENT_H_
#define ZEPHYR_INCLUDE_NET_NET_EVENT_H_
#include <zephyr/net/net_ip.h>
#include <zephyr/net/hostname.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @addtogroup net_mgmt
* @{
*/
/** @cond INTERNAL_HIDDEN */
/* Network Interface events */
#define _NET_IF_LAYER NET_MGMT_LAYER_L2
#define _NET_IF_CORE_CODE 0x001
#define _NET_EVENT_IF_BASE (NET_MGMT_EVENT_BIT | \
NET_MGMT_IFACE_BIT | \
NET_MGMT_LAYER(_NET_IF_LAYER) | \
NET_MGMT_LAYER_CODE(_NET_IF_CORE_CODE))
enum net_event_if_cmd {
NET_EVENT_IF_CMD_DOWN = 1,
NET_EVENT_IF_CMD_UP,
NET_EVENT_IF_CMD_ADMIN_DOWN,
NET_EVENT_IF_CMD_ADMIN_UP,
};
/* IPv6 Events */
#define _NET_IPV6_LAYER NET_MGMT_LAYER_L3
#define _NET_IPV6_CORE_CODE 0x060
#define _NET_EVENT_IPV6_BASE (NET_MGMT_EVENT_BIT | \
NET_MGMT_IFACE_BIT | \
NET_MGMT_LAYER(_NET_IPV6_LAYER) | \
NET_MGMT_LAYER_CODE(_NET_IPV6_CORE_CODE))
enum net_event_ipv6_cmd {
NET_EVENT_IPV6_CMD_ADDR_ADD = 1,
NET_EVENT_IPV6_CMD_ADDR_DEL,
NET_EVENT_IPV6_CMD_MADDR_ADD,
NET_EVENT_IPV6_CMD_MADDR_DEL,
NET_EVENT_IPV6_CMD_PREFIX_ADD,
NET_EVENT_IPV6_CMD_PREFIX_DEL,
NET_EVENT_IPV6_CMD_MCAST_JOIN,
NET_EVENT_IPV6_CMD_MCAST_LEAVE,
NET_EVENT_IPV6_CMD_ROUTER_ADD,
NET_EVENT_IPV6_CMD_ROUTER_DEL,
NET_EVENT_IPV6_CMD_ROUTE_ADD,
NET_EVENT_IPV6_CMD_ROUTE_DEL,
NET_EVENT_IPV6_CMD_DAD_SUCCEED,
NET_EVENT_IPV6_CMD_DAD_FAILED,
NET_EVENT_IPV6_CMD_NBR_ADD,
NET_EVENT_IPV6_CMD_NBR_DEL,
NET_EVENT_IPV6_CMD_DHCP_START,
NET_EVENT_IPV6_CMD_DHCP_BOUND,
NET_EVENT_IPV6_CMD_DHCP_STOP,
NET_EVENT_IPV6_CMD_ADDR_DEPRECATED,
NET_EVENT_IPV6_CMD_PE_ENABLED,
NET_EVENT_IPV6_CMD_PE_DISABLED,
NET_EVENT_IPV6_CMD_PE_FILTER_ADD,
NET_EVENT_IPV6_CMD_PE_FILTER_DEL,
};
/* IPv4 Events*/
#define _NET_IPV4_LAYER NET_MGMT_LAYER_L3
#define _NET_IPV4_CORE_CODE 0x004
#define _NET_EVENT_IPV4_BASE (NET_MGMT_EVENT_BIT | \
NET_MGMT_IFACE_BIT | \
NET_MGMT_LAYER(_NET_IPV4_LAYER) | \
NET_MGMT_LAYER_CODE(_NET_IPV4_CORE_CODE))
enum net_event_ipv4_cmd {
NET_EVENT_IPV4_CMD_ADDR_ADD = 1,
NET_EVENT_IPV4_CMD_ADDR_DEL,
NET_EVENT_IPV4_CMD_MADDR_ADD,
NET_EVENT_IPV4_CMD_MADDR_DEL,
NET_EVENT_IPV4_CMD_ROUTER_ADD,
NET_EVENT_IPV4_CMD_ROUTER_DEL,
NET_EVENT_IPV4_CMD_DHCP_START,
NET_EVENT_IPV4_CMD_DHCP_BOUND,
NET_EVENT_IPV4_CMD_DHCP_STOP,
NET_EVENT_IPV4_CMD_MCAST_JOIN,
NET_EVENT_IPV4_CMD_MCAST_LEAVE,
NET_EVENT_IPV4_CMD_ACD_SUCCEED,
NET_EVENT_IPV4_CMD_ACD_FAILED,
NET_EVENT_IPV4_CMD_ACD_CONFLICT,
};
/* L4 network events */
#define _NET_L4_LAYER NET_MGMT_LAYER_L4
#define _NET_L4_CORE_CODE 0x114
#define _NET_EVENT_L4_BASE (NET_MGMT_EVENT_BIT | \
NET_MGMT_IFACE_BIT | \
NET_MGMT_LAYER(_NET_L4_LAYER) | \
NET_MGMT_LAYER_CODE(_NET_L4_CORE_CODE))
enum net_event_l4_cmd {
NET_EVENT_L4_CMD_CONNECTED = 1,
NET_EVENT_L4_CMD_DISCONNECTED,
NET_EVENT_L4_CMD_IPV4_CONNECTED,
NET_EVENT_L4_CMD_IPV4_DISCONNECTED,
NET_EVENT_L4_CMD_IPV6_CONNECTED,
NET_EVENT_L4_CMD_IPV6_DISCONNECTED,
NET_EVENT_L4_CMD_DNS_SERVER_ADD,
NET_EVENT_L4_CMD_DNS_SERVER_DEL,
NET_EVENT_L4_CMD_HOSTNAME_CHANGED,
NET_EVENT_L4_CMD_CAPTURE_STARTED,
NET_EVENT_L4_CMD_CAPTURE_STOPPED,
};
/** @endcond */
/** Event emitted when the network interface goes down. */
#define NET_EVENT_IF_DOWN \
(_NET_EVENT_IF_BASE | NET_EVENT_IF_CMD_DOWN)
/** Event emitted when the network interface goes up. */
#define NET_EVENT_IF_UP \
(_NET_EVENT_IF_BASE | NET_EVENT_IF_CMD_UP)
/** Event emitted when the network interface is taken down manually. */
#define NET_EVENT_IF_ADMIN_DOWN \
(_NET_EVENT_IF_BASE | NET_EVENT_IF_CMD_ADMIN_DOWN)
/** Event emitted when the network interface goes up manually. */
#define NET_EVENT_IF_ADMIN_UP \
(_NET_EVENT_IF_BASE | NET_EVENT_IF_CMD_ADMIN_UP)
/** Event emitted when an IPv6 address is added to the system. */
#define NET_EVENT_IPV6_ADDR_ADD \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_ADDR_ADD)
/** Event emitted when an IPv6 address is removed from the system. */
#define NET_EVENT_IPV6_ADDR_DEL \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_ADDR_DEL)
/** Event emitted when an IPv6 multicast address is added to the system. */
#define NET_EVENT_IPV6_MADDR_ADD \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_MADDR_ADD)
/** Event emitted when an IPv6 multicast address is removed from the system. */
#define NET_EVENT_IPV6_MADDR_DEL \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_MADDR_DEL)
/** Event emitted when an IPv6 prefix is added to the system. */
#define NET_EVENT_IPV6_PREFIX_ADD \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_PREFIX_ADD)
/** Event emitted when an IPv6 prefix is removed from the system. */
#define NET_EVENT_IPV6_PREFIX_DEL \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_PREFIX_DEL)
/** Event emitted when an IPv6 multicast group is joined. */
#define NET_EVENT_IPV6_MCAST_JOIN \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_MCAST_JOIN)
/** Event emitted when an IPv6 multicast group is left. */
#define NET_EVENT_IPV6_MCAST_LEAVE \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_MCAST_LEAVE)
/** Event emitted when an IPv6 router is added to the system. */
#define NET_EVENT_IPV6_ROUTER_ADD \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_ROUTER_ADD)
/** Event emitted when an IPv6 router is removed from the system. */
#define NET_EVENT_IPV6_ROUTER_DEL \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_ROUTER_DEL)
/** Event emitted when an IPv6 route is added to the system. */
#define NET_EVENT_IPV6_ROUTE_ADD \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_ROUTE_ADD)
/** Event emitted when an IPv6 route is removed from the system. */
#define NET_EVENT_IPV6_ROUTE_DEL \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_ROUTE_DEL)
/** Event emitted when an IPv6 duplicate address detection succeeds. */
#define NET_EVENT_IPV6_DAD_SUCCEED \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_DAD_SUCCEED)
/** Event emitted when an IPv6 duplicate address detection fails. */
#define NET_EVENT_IPV6_DAD_FAILED \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_DAD_FAILED)
/** Event emitted when an IPv6 neighbor is added to the system. */
#define NET_EVENT_IPV6_NBR_ADD \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_NBR_ADD)
/** Event emitted when an IPv6 neighbor is removed from the system. */
#define NET_EVENT_IPV6_NBR_DEL \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_NBR_DEL)
/** Event emitted when an IPv6 DHCP client starts. */
#define NET_EVENT_IPV6_DHCP_START \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_DHCP_START)
/** Event emitted when an IPv6 DHCP client address is bound. */
#define NET_EVENT_IPV6_DHCP_BOUND \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_DHCP_BOUND)
/** Event emitted when an IPv6 DHCP client is stopped. */
#define NET_EVENT_IPV6_DHCP_STOP \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_DHCP_STOP)
/** IPv6 address is deprecated. */
#define NET_EVENT_IPV6_ADDR_DEPRECATED \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_ADDR_DEPRECATED)
/** IPv6 Privacy extension is enabled. */
#define NET_EVENT_IPV6_PE_ENABLED \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_PE_ENABLED)
/** IPv6 Privacy extension is disabled. */
#define NET_EVENT_IPV6_PE_DISABLED \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_PE_DISABLED)
/** IPv6 Privacy extension filter is added. */
#define NET_EVENT_IPV6_PE_FILTER_ADD \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_PE_FILTER_ADD)
/** IPv6 Privacy extension filter is removed. */
#define NET_EVENT_IPV6_PE_FILTER_DEL \
(_NET_EVENT_IPV6_BASE | NET_EVENT_IPV6_CMD_PE_FILTER_DEL)
/** Event emitted when an IPv4 address is added to the system. */
#define NET_EVENT_IPV4_ADDR_ADD \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_ADDR_ADD)
/** Event emitted when an IPv4 address is removed from the system. */
#define NET_EVENT_IPV4_ADDR_DEL \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_ADDR_DEL)
/** Event emitted when an IPv4 multicast address is added to the system. */
#define NET_EVENT_IPV4_MADDR_ADD \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_MADDR_ADD)
/** Event emitted when an IPv4 multicast address is removed from the system. */
#define NET_EVENT_IPV4_MADDR_DEL \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_MADDR_DEL)
/** Event emitted when an IPv4 router is added to the system. */
#define NET_EVENT_IPV4_ROUTER_ADD \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_ROUTER_ADD)
/** Event emitted when an IPv4 router is removed from the system. */
#define NET_EVENT_IPV4_ROUTER_DEL \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_ROUTER_DEL)
/** Event emitted when an IPv4 DHCP client is started. */
#define NET_EVENT_IPV4_DHCP_START \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_DHCP_START)
/** Event emitted when an IPv4 DHCP client address is bound. */
#define NET_EVENT_IPV4_DHCP_BOUND \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_DHCP_BOUND)
/** Event emitted when an IPv4 DHCP client is stopped. */
#define NET_EVENT_IPV4_DHCP_STOP \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_DHCP_STOP)
/** Event emitted when an IPv4 multicast group is joined. */
#define NET_EVENT_IPV4_MCAST_JOIN \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_MCAST_JOIN)
/** Event emitted when an IPv4 multicast group is left. */
#define NET_EVENT_IPV4_MCAST_LEAVE \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_MCAST_LEAVE)
/** Event emitted when an IPv4 address conflict detection succeeds. */
#define NET_EVENT_IPV4_ACD_SUCCEED \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_ACD_SUCCEED)
/** Event emitted when an IPv4 address conflict detection fails. */
#define NET_EVENT_IPV4_ACD_FAILED \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_ACD_FAILED)
/** Event emitted when an IPv4 address conflict was detected after the address
* was confirmed as safe to use. It's up to the application to determine on
* how to act in such case.
*/
#define NET_EVENT_IPV4_ACD_CONFLICT \
(_NET_EVENT_IPV4_BASE | NET_EVENT_IPV4_CMD_ACD_CONFLICT)
/** Event emitted when the system is considered to be connected.
* The connected in this context means that the network interface is up,
* and the interface has either IPv4 or IPv6 address assigned to it.
*/
#define NET_EVENT_L4_CONNECTED \
(_NET_EVENT_L4_BASE | NET_EVENT_L4_CMD_CONNECTED)
/** Event emitted when the system is no longer connected.
* Typically this means that network connectivity is lost either by
* the network interface is going down, or the interface has no longer
* an IP address etc.
*/
#define NET_EVENT_L4_DISCONNECTED \
(_NET_EVENT_L4_BASE | NET_EVENT_L4_CMD_DISCONNECTED)
/** Event raised when IPv4 network connectivity is available. */
#define NET_EVENT_L4_IPV4_CONNECTED \
(_NET_EVENT_L4_BASE | NET_EVENT_L4_CMD_IPV4_CONNECTED)
/** Event emitted when IPv4 network connectivity is lost. */
#define NET_EVENT_L4_IPV4_DISCONNECTED \
(_NET_EVENT_L4_BASE | NET_EVENT_L4_CMD_IPV4_DISCONNECTED)
/** Event emitted when IPv6 network connectivity is available. */
#define NET_EVENT_L4_IPV6_CONNECTED \
(_NET_EVENT_L4_BASE | NET_EVENT_L4_CMD_IPV6_CONNECTED)
/** Event emitted when IPv6 network connectivity is lost. */
#define NET_EVENT_L4_IPV6_DISCONNECTED \
(_NET_EVENT_L4_BASE | NET_EVENT_L4_CMD_IPV6_DISCONNECTED)
/** Event emitted when a DNS server is added to the system. */
#define NET_EVENT_DNS_SERVER_ADD \
(_NET_EVENT_L4_BASE | NET_EVENT_L4_CMD_DNS_SERVER_ADD)
/** Event emitted when a DNS server is removed from the system. */
#define NET_EVENT_DNS_SERVER_DEL \
(_NET_EVENT_L4_BASE | NET_EVENT_L4_CMD_DNS_SERVER_DEL)
/** Event emitted when the system hostname is changed. */
#define NET_EVENT_HOSTNAME_CHANGED \
(_NET_EVENT_L4_BASE | NET_EVENT_L4_CMD_HOSTNAME_CHANGED)
/** Network packet capture is started. */
#define NET_EVENT_CAPTURE_STARTED \
(_NET_EVENT_L4_BASE | NET_EVENT_L4_CMD_CAPTURE_STARTED)
/** Network packet capture is stopped. */
#define NET_EVENT_CAPTURE_STOPPED \
(_NET_EVENT_L4_BASE | NET_EVENT_L4_CMD_CAPTURE_STOPPED)
/**
* @brief Network Management event information structure
* Used to pass information on network events like
* NET_EVENT_IPV6_ADDR_ADD,
* NET_EVENT_IPV6_ADDR_DEL,
* NET_EVENT_IPV6_MADDR_ADD and
* NET_EVENT_IPV6_MADDR_DEL
* when CONFIG_NET_MGMT_EVENT_INFO enabled and event generator pass the
* information.
*/
struct net_event_ipv6_addr {
/** IPv6 address related to this event */
struct in6_addr addr;
};
/**
* @brief Network Management event information structure
* Used to pass information on network events like
* NET_EVENT_IPV6_NBR_ADD and
* NET_EVENT_IPV6_NBR_DEL
* when CONFIG_NET_MGMT_EVENT_INFO enabled and event generator pass the
* information.
* @note: idx will be '-1' in case of NET_EVENT_IPV6_NBR_DEL event.
*/
struct net_event_ipv6_nbr {
/** Neighbor IPv6 address */
struct in6_addr addr;
/** Neighbor index in cache */
int idx;
};
/**
* @brief Network Management event information structure
* Used to pass information on network events like
* NET_EVENT_IPV6_ROUTE_ADD and
* NET_EVENT_IPV6_ROUTE_DEL
* when CONFIG_NET_MGMT_EVENT_INFO enabled and event generator pass the
* information.
*/
struct net_event_ipv6_route {
/** IPv6 address of the next hop */
struct in6_addr nexthop;
/** IPv6 address or prefix of the route */
struct in6_addr addr;
/** IPv6 prefix length */
uint8_t prefix_len;
};
/**
* @brief Network Management event information structure
* Used to pass information on network events like
* NET_EVENT_IPV6_PREFIX_ADD and
* NET_EVENT_IPV6_PREFIX_DEL
* when CONFIG_NET_MGMT_EVENT_INFO is enabled and event generator pass the
* information.
*/
struct net_event_ipv6_prefix {
/** IPv6 prefix */
struct in6_addr addr;
/** IPv6 prefix length */
uint8_t len;
/** IPv6 prefix lifetime in seconds */
uint32_t lifetime;
};
/**
* @brief Network Management event information structure
* Used to pass information on NET_EVENT_HOSTNAME_CHANGED event when
* CONFIG_NET_MGMT_EVENT_INFO is enabled and event generator pass the
* information.
*/
struct net_event_l4_hostname {
/** New hostname */
char hostname[NET_HOSTNAME_SIZE];
};
/**
* @brief Network Management event information structure
* Used to pass information on network events like
* NET_EVENT_IPV6_PE_FILTER_ADD and
* NET_EVENT_IPV6_PE_FILTER_DEL
* when CONFIG_NET_MGMT_EVENT_INFO is enabled and event generator pass the
* information.
*
* This is only available if CONFIG_NET_IPV6_PE_FILTER_PREFIX_COUNT is >0.
*/
struct net_event_ipv6_pe_filter {
/** IPv6 address of privacy extension filter */
struct in6_addr prefix;
/** IPv6 filter deny or allow list */
bool is_deny_list;
};
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_NET_EVENT_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_event.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,885 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_LWM2M_PATH_H_
#define ZEPHYR_INCLUDE_NET_LWM2M_PATH_H_
/**
* @file lwm2m.h
*
* @brief LwM2M path helper macros
*
* @defgroup lwm2m_path_helpers LwM2M path helper macros
* @since 2.5
* @version 0.8.0
* @ingroup lwm2m_api
* @{
*/
/**
* @brief Generate LwM2M string paths using numeric components.
*
* Accepts at least one and up to four arguments. Each argument will be
* stringified by the pre-processor, so calling this with non-literals will
* likely not do what you want.
*
* For example:
*
* @code{c}
* #define MY_OBJ_ID 3
* LWM2M_PATH(MY_OBJ_ID, 0, 1)
* @endcode
*
* would evaluate to "3/0/1", while
*
* @code{c}
* int x = 3;
* LWM2M_PATH(x, 0, 1)
* @endcode
*
* evaluates to "x/0/1".
*/
#define LWM2M_PATH(...) \
LWM2M_PATH_MACRO(__VA_ARGS__, LWM2M_PATH4, LWM2M_PATH3, \
LWM2M_PATH2, LWM2M_PATH1)(__VA_ARGS__)
/** @cond INTERNAL_HIDDEN */
/* Internal helper macros for the LWM2M_PATH macro */
#define LWM2M_PATH_VA_NUM_ARGS(...) \
LWM2M_PATH_VA_NUM_ARGS_IMPL(__VA_ARGS__, 5, 4, 3, 2, 1)
#define LWM2M_PATH_VA_NUM_ARGS_IMPL(_1, _2, _3, _4, N, ...) N
#define LWM2M_PATH1(_x) #_x
#define LWM2M_PATH2(_x, _y) #_x "/" #_y
#define LWM2M_PATH3(_x, _y, _z) #_x "/" #_y "/" #_z
#define LWM2M_PATH4(_a, _x, _y, _z) #_a "/" #_x "/" #_y "/" #_z
#define LWM2M_PATH_MACRO(_1, _2, _3, _4, NAME, ...) NAME
/** @endcond */
/**
* @brief Initialize LwM2M object structure
*
* Accepts at least one and up to four arguments. Fill up @ref lwm2m_obj_path structure
* and sets the level.
*
* For example:
*
* @code{c}
* struct lwm2m_obj_path p = LWM2M_OBJ(MY_OBJ, 0, RESOURCE);
* @endcode
*
* Can also be used in place of function argument to return the structure allocated from stack
*
* @code{c}
* lwm2m_notify_observer_path(&LWM2M_OBJ(MY_OBJ, inst_id, RESOURCE));
* @endcode
*
*/
#define LWM2M_OBJ(...) \
GET_OBJ_MACRO(__VA_ARGS__, LWM2M_OBJ4, LWM2M_OBJ3, LWM2M_OBJ2, LWM2M_OBJ1)(__VA_ARGS__)
/** @cond INTERNAL_HIDDEN */
/* Internal helper macros for the LWM2M_OBJ macro */
#define GET_OBJ_MACRO(_1, _2, _3, _4, NAME, ...) NAME
#define LWM2M_OBJ1(oi) (struct lwm2m_obj_path) {.obj_id = oi, .level = 1}
#define LWM2M_OBJ2(oi, oii) (struct lwm2m_obj_path) {.obj_id = oi, .obj_inst_id = oii, .level = 2}
#define LWM2M_OBJ3(oi, oii, ri) (struct lwm2m_obj_path) \
{.obj_id = oi, .obj_inst_id = oii, .res_id = ri, .level = 3}
#define LWM2M_OBJ4(oi, oii, ri, rii) (struct lwm2m_obj_path) \
{.obj_id = oi, .obj_inst_id = oii, .res_id = ri, .res_inst_id = rii, .level = 4}
/** @endcond */
/** @} */
#endif /* ZEPHYR_INCLUDE_NET_LWM2M_PATH_H_ */
``` | /content/code_sandbox/include/zephyr/net/lwm2m_path.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 971 |
```objective-c
/** @file
* @brief DNS resolving library
*
* An API for applications to resolve a DNS name.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_DNS_RESOLVE_H_
#define ZEPHYR_INCLUDE_NET_DNS_RESOLVE_H_
#include <zephyr/kernel.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/socket_poll.h>
#include <zephyr/net/net_core.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief DNS resolving library
* @defgroup dns_resolve DNS Resolve Library
* @since 1.8
* @version 0.8.0
* @ingroup networking
* @{
*/
/**
* DNS query type enum
*/
enum dns_query_type {
/** IPv4 query */
DNS_QUERY_TYPE_A = 1,
/** IPv6 query */
DNS_QUERY_TYPE_AAAA = 28
};
/** Max size of the resolved name. */
#ifndef DNS_MAX_NAME_SIZE
#define DNS_MAX_NAME_SIZE 20
#endif
/** @cond INTERNAL_HIDDEN */
#define DNS_BUF_TIMEOUT K_MSEC(500) /* ms */
/* This value is recommended by RFC 1035 */
#define DNS_RESOLVER_MAX_BUF_SIZE 512
/* Make sure that we can compile things even if CONFIG_DNS_RESOLVER
* is not enabled.
*/
#if defined(CONFIG_DNS_RESOLVER_MAX_SERVERS)
#define DNS_RESOLVER_MAX_SERVERS CONFIG_DNS_RESOLVER_MAX_SERVERS
#else
#define DNS_RESOLVER_MAX_SERVERS 0
#endif
#if defined(CONFIG_DNS_NUM_CONCUR_QUERIES)
#define DNS_NUM_CONCUR_QUERIES CONFIG_DNS_NUM_CONCUR_QUERIES
#else
#define DNS_NUM_CONCUR_QUERIES 1
#endif
#if defined(CONFIG_NET_IF_MAX_IPV6_COUNT)
#define MAX_IPV6_IFACE_COUNT CONFIG_NET_IF_MAX_IPV6_COUNT
#else
#define MAX_IPV6_IFACE_COUNT 1
#endif
#if defined(CONFIG_NET_IF_MAX_IPV4_COUNT)
#define MAX_IPV4_IFACE_COUNT CONFIG_NET_IF_MAX_IPV4_COUNT
#else
#define MAX_IPV4_IFACE_COUNT 1
#endif
/* If mDNS is enabled, then add some extra well known multicast servers to the
* server list.
*/
#if defined(CONFIG_MDNS_RESOLVER)
#if defined(CONFIG_NET_IPV6) && defined(CONFIG_NET_IPV4)
#define MDNS_SERVER_COUNT 2
#else
#define MDNS_SERVER_COUNT 1
#endif /* CONFIG_NET_IPV6 && CONFIG_NET_IPV4 */
#else
#define MDNS_SERVER_COUNT 0
#endif /* CONFIG_MDNS_RESOLVER */
/* If LLMNR is enabled, then add some extra well known multicast servers to the
* server list.
*/
#if defined(CONFIG_LLMNR_RESOLVER)
#if defined(CONFIG_NET_IPV6) && defined(CONFIG_NET_IPV4)
#define LLMNR_SERVER_COUNT 2
#else
#define LLMNR_SERVER_COUNT 1
#endif /* CONFIG_NET_IPV6 && CONFIG_NET_IPV4 */
#else
#define LLMNR_SERVER_COUNT 0
#endif /* CONFIG_MDNS_RESOLVER */
#define DNS_MAX_MCAST_SERVERS (MDNS_SERVER_COUNT + LLMNR_SERVER_COUNT)
#if defined(CONFIG_MDNS_RESPONDER)
#if defined(CONFIG_NET_IPV6)
#define MDNS_MAX_IPV6_IFACE_COUNT CONFIG_NET_IF_MAX_IPV6_COUNT
#else
#define MDNS_MAX_IPV6_IFACE_COUNT 0
#endif /* CONFIG_NET_IPV6 */
#if defined(CONFIG_NET_IPV4)
#define MDNS_MAX_IPV4_IFACE_COUNT CONFIG_NET_IF_MAX_IPV4_COUNT
#else
#define MDNS_MAX_IPV4_IFACE_COUNT 0
#endif /* CONFIG_NET_IPV4 */
#define MDNS_MAX_POLL (MDNS_MAX_IPV4_IFACE_COUNT + MDNS_MAX_IPV6_IFACE_COUNT)
#else
#define MDNS_MAX_POLL 0
#endif /* CONFIG_MDNS_RESPONDER */
#if defined(CONFIG_LLMNR_RESPONDER)
#if defined(CONFIG_NET_IPV6) && defined(CONFIG_NET_IPV4)
#define LLMNR_MAX_POLL 2
#else
#define LLMNR_MAX_POLL 1
#endif
#else
#define LLMNR_MAX_POLL 0
#endif /* CONFIG_LLMNR_RESPONDER */
#define DNS_RESOLVER_MAX_POLL (DNS_RESOLVER_MAX_SERVERS + DNS_MAX_MCAST_SERVERS)
/** How many sockets the dispatcher is able to poll. */
#define DNS_DISPATCHER_MAX_POLL (DNS_RESOLVER_MAX_POLL + MDNS_MAX_POLL + LLMNR_MAX_POLL)
#if defined(CONFIG_NET_SOCKETS_POLL_MAX)
BUILD_ASSERT(CONFIG_NET_SOCKETS_POLL_MAX >= DNS_DISPATCHER_MAX_POLL,
"CONFIG_NET_SOCKETS_POLL_MAX must be larger than " STRINGIFY(DNS_DISPATCHER_MAX_POLL));
#endif
/** @brief What is the type of the socket given to DNS socket dispatcher,
* resolver or responder.
*/
enum dns_socket_type {
DNS_SOCKET_RESOLVER = 1, /**< Socket is used for resolving (client type) */
DNS_SOCKET_RESPONDER = 2 /**< Socket is used for responding (server type) */
};
struct dns_resolve_context;
struct mdns_responder_context;
/**
* @typedef dns_socket_dispatcher_cb
* @brief Callback used when the DNS socket dispatcher has found a handler for
* this type of socket.
*
* @param ctx DNS resolve or mDNS responder context.
* @param sock Socket which is seeing traffic.
* @param addr Socket address of the peer that sent the DNS packet.
* @param addrlen Length of the socket address.
* @param buf Pointer to data buffer containing the DNS message.
* @param data_len Length of the data in buffer chain.
*
* @return 0 if ok, <0 if error
*/
typedef int (*dns_socket_dispatcher_cb)(void *ctx, int sock,
struct sockaddr *addr, size_t addrlen,
struct net_buf *buf, size_t data_len);
/** @brief DNS socket dispatcher context. */
struct dns_socket_dispatcher {
/** slist node for the different dispatcher contexts */
sys_snode_t node;
/** Socket service for this dispatcher instance */
const struct net_socket_service_desc *svc;
/** DNS resolver context that contains information needed by the
* resolver/responder handler, or mDNS responder context.
*/
union {
void *ctx;
struct dns_resolve_context *resolve_ctx;
struct mdns_responder_context *mdns_ctx;
};
/** Type of the socket (resolver / responder) */
enum dns_socket_type type;
/** Local endpoint address (used when binding the socket) */
struct sockaddr local_addr;
/** DNS socket dispatcher callback is called for incoming traffic */
dns_socket_dispatcher_cb cb;
/** Socket descriptors to poll */
struct zsock_pollfd *fds;
/** Length of the poll array */
int fds_len;
/** Local socket to dispatch */
int sock;
/** There can be two contexts to dispatch. This points to the other
* context if sharing the socket between resolver / responder.
*/
struct dns_socket_dispatcher *pair;
/** Mutex lock protecting access to this dispatcher context */
struct k_mutex lock;
/** Buffer allocation timeout */
k_timeout_t buf_timeout;
};
struct mdns_responder_context {
struct sockaddr server_addr;
struct dns_socket_dispatcher dispatcher;
struct zsock_pollfd fds[1];
int sock;
};
/**
* @brief Register a DNS dispatcher socket. Each code wanting to use
* the dispatcher needs to create the dispatcher context and call
* this function.
*
* @param ctx DNS socket dispatcher context.
*
* @return 0 if ok, <1 if error
*/
int dns_dispatcher_register(struct dns_socket_dispatcher *ctx);
/**
* @brief Unregister a DNS dispatcher socket. Called when the
* resolver/responder no longer needs to receive traffic for the
* socket.
*
* @param ctx DNS socket dispatcher context.
*
* @return 0 if ok, <1 if error
*/
int dns_dispatcher_unregister(struct dns_socket_dispatcher *ctx);
/** @endcond */
/**
* Address info struct is passed to callback that gets all the results.
*/
struct dns_addrinfo {
/** IP address information */
struct sockaddr ai_addr;
/** Length of the ai_addr field */
socklen_t ai_addrlen;
/** Address family of the address information */
uint8_t ai_family;
/** Canonical name of the address */
char ai_canonname[DNS_MAX_NAME_SIZE + 1];
};
/**
* Status values for the callback.
*/
enum dns_resolve_status {
/** Invalid value for `ai_flags' field */
DNS_EAI_BADFLAGS = -1,
/** NAME or SERVICE is unknown */
DNS_EAI_NONAME = -2,
/** Temporary failure in name resolution */
DNS_EAI_AGAIN = -3,
/** Non-recoverable failure in name res */
DNS_EAI_FAIL = -4,
/** No address associated with NAME */
DNS_EAI_NODATA = -5,
/** `ai_family' not supported */
DNS_EAI_FAMILY = -6,
/** `ai_socktype' not supported */
DNS_EAI_SOCKTYPE = -7,
/** SRV not supported for `ai_socktype' */
DNS_EAI_SERVICE = -8,
/** Address family for NAME not supported */
DNS_EAI_ADDRFAMILY = -9,
/** Memory allocation failure */
DNS_EAI_MEMORY = -10,
/** System error returned in `errno' */
DNS_EAI_SYSTEM = -11,
/** Argument buffer overflow */
DNS_EAI_OVERFLOW = -12,
/** Processing request in progress */
DNS_EAI_INPROGRESS = -100,
/** Request canceled */
DNS_EAI_CANCELED = -101,
/** Request not canceled */
DNS_EAI_NOTCANCELED = -102,
/** All requests done */
DNS_EAI_ALLDONE = -103,
/** IDN encoding failed */
DNS_EAI_IDN_ENCODE = -105,
};
/**
* @typedef dns_resolve_cb_t
* @brief DNS resolve callback
*
* @details The DNS resolve callback is called after a successful
* DNS resolving. The resolver can call this callback multiple times, one
* for each resolved address.
*
* @param status The status of the query:
* DNS_EAI_INPROGRESS returned for each resolved address
* DNS_EAI_ALLDONE mark end of the resolving, info is set to NULL in
* this case
* DNS_EAI_CANCELED if the query was canceled manually or timeout happened
* DNS_EAI_FAIL if the name cannot be resolved by the server
* DNS_EAI_NODATA if there is no such name
* other values means that an error happened.
* @param info Query results are stored here.
* @param user_data The user data given in dns_resolve_name() call.
*/
typedef void (*dns_resolve_cb_t)(enum dns_resolve_status status,
struct dns_addrinfo *info,
void *user_data);
/** @cond INTERNAL_HIDDEN */
enum dns_resolve_context_state {
DNS_RESOLVE_CONTEXT_ACTIVE,
DNS_RESOLVE_CONTEXT_DEACTIVATING,
DNS_RESOLVE_CONTEXT_INACTIVE,
};
/** @endcond */
/**
* DNS resolve context structure.
*/
struct dns_resolve_context {
/** List of configured DNS servers */
struct dns_server {
/** DNS server information */
struct sockaddr dns_server;
/** Connection to the DNS server */
int sock;
/** Is this server mDNS one */
uint8_t is_mdns : 1;
/** Is this server LLMNR one */
uint8_t is_llmnr : 1;
/** @cond INTERNAL_HIDDEN */
/** Dispatch DNS data between resolver and responder */
struct dns_socket_dispatcher dispatcher;
/** @endcond */
} servers[DNS_RESOLVER_MAX_POLL];
/** @cond INTERNAL_HIDDEN */
/** Socket polling for each server connection */
struct zsock_pollfd fds[DNS_RESOLVER_MAX_POLL];
/** @endcond */
/** Prevent concurrent access */
struct k_mutex lock;
/** This timeout is also used when a buffer is required from the
* buffer pools.
*/
k_timeout_t buf_timeout;
/** Result callbacks. We have multiple callbacks here so that it is
* possible to do multiple queries at the same time.
*
* Contents of this structure can be inspected and changed only when
* the lock is held.
*/
struct dns_pending_query {
/** Timeout timer */
struct k_work_delayable timer;
/** Back pointer to ctx, needed in timeout handler */
struct dns_resolve_context *ctx;
/** Result callback.
*
* A null value indicates the slot is not in use.
*/
dns_resolve_cb_t cb;
/** User data */
void *user_data;
/** TX timeout */
k_timeout_t timeout;
/** String containing the thing to resolve like www.example.com
*
* This is set to a non-null value when the query is started,
* and is not used thereafter.
*
* If the query completed at a point where the work item was
* still pending the pointer is cleared to indicate that the
* query is complete, but release of the query slot will be
* deferred until a request for a slot determines that the
* work item has been released.
*/
const char *query;
/** Query type */
enum dns_query_type query_type;
/** DNS id of this query */
uint16_t id;
/** Hash of the DNS name + query type we are querying.
* This hash is calculated so we can match the response that
* we are receiving. This is needed mainly for mDNS which is
* setting the DNS id to 0, which means that the id alone
* cannot be used to find correct pending query.
*/
uint16_t query_hash;
} queries[DNS_NUM_CONCUR_QUERIES];
/** Is this context in use */
enum dns_resolve_context_state state;
};
/**
* @brief Init DNS resolving context.
*
* @details This function sets the DNS server address and initializes the
* DNS context that is used by the actual resolver. DNS server addresses
* can be specified either in textual form, or as struct sockaddr (or both).
* Note that the recommended way to resolve DNS names is to use
* the dns_get_addr_info() API. In that case user does not need to
* call dns_resolve_init() as the DNS servers are already setup by the system.
*
* @param ctx DNS context. If the context variable is allocated from
* the stack, then the variable needs to be valid for the whole duration of
* the resolving. Caller does not need to fill the variable beforehand or
* edit the context afterwards.
* @param dns_servers_str DNS server addresses using textual strings. The
* array is NULL terminated. The port number can be given in the string.
* Syntax for the server addresses with or without port numbers:
* IPv4 : 10.0.9.1
* IPv4 + port : 10.0.9.1:5353
* IPv6 : 2001:db8::22:42
* IPv6 + port : [2001:db8::22:42]:5353
* @param dns_servers_sa DNS server addresses as struct sockaddr. The array
* is NULL terminated. Port numbers are optional in struct sockaddr, the
* default will be used if set to 0.
*
* @return 0 if ok, <0 if error.
*/
int dns_resolve_init(struct dns_resolve_context *ctx,
const char *dns_servers_str[],
const struct sockaddr *dns_servers_sa[]);
/**
* @brief Init DNS resolving context with default Kconfig options.
*
* @param ctx DNS context.
*
* @return 0 if ok, <0 if error.
*/
int dns_resolve_init_default(struct dns_resolve_context *ctx);
/**
* @brief Close DNS resolving context.
*
* @details This releases DNS resolving context and marks the context unusable.
* Caller must call the dns_resolve_init() again to make context usable.
*
* @param ctx DNS context
*
* @return 0 if ok, <0 if error.
*/
int dns_resolve_close(struct dns_resolve_context *ctx);
/**
* @brief Reconfigure DNS resolving context.
*
* @details Reconfigures DNS context with new server list.
*
* @param ctx DNS context
* @param servers_str DNS server addresses using textual strings. The
* array is NULL terminated. The port number can be given in the string.
* Syntax for the server addresses with or without port numbers:
* IPv4 : 10.0.9.1
* IPv4 + port : 10.0.9.1:5353
* IPv6 : 2001:db8::22:42
* IPv6 + port : [2001:db8::22:42]:5353
* @param servers_sa DNS server addresses as struct sockaddr. The array
* is NULL terminated. Port numbers are optional in struct sockaddr, the
* default will be used if set to 0.
*
* @return 0 if ok, <0 if error.
*/
int dns_resolve_reconfigure(struct dns_resolve_context *ctx,
const char *servers_str[],
const struct sockaddr *servers_sa[]);
/**
* @brief Cancel a pending DNS query.
*
* @details This releases DNS resources used by a pending query.
*
* @param ctx DNS context
* @param dns_id DNS id of the pending query
*
* @return 0 if ok, <0 if error.
*/
int dns_resolve_cancel(struct dns_resolve_context *ctx,
uint16_t dns_id);
/**
* @brief Cancel a pending DNS query using id, name and type.
*
* @details This releases DNS resources used by a pending query.
*
* @param ctx DNS context
* @param dns_id DNS id of the pending query
* @param query_name Name of the resource we are trying to query (hostname)
* @param query_type Type of the query (A or AAAA)
*
* @return 0 if ok, <0 if error.
*/
int dns_resolve_cancel_with_name(struct dns_resolve_context *ctx,
uint16_t dns_id,
const char *query_name,
enum dns_query_type query_type);
/**
* @brief Resolve DNS name.
*
* @details This function can be used to resolve e.g., IPv4 or IPv6 address.
* Note that this is asynchronous call, the function will return immediately
* and system will call the callback after resolving has finished or timeout
* has occurred.
* We might send the query to multiple servers (if there are more than one
* server configured), but we only use the result of the first received
* response.
*
* @param ctx DNS context
* @param query What the caller wants to resolve.
* @param type What kind of data the caller wants to get.
* @param dns_id DNS id is returned to the caller. This is needed if one
* wishes to cancel the query. This can be set to NULL if there is no need
* to cancel the query.
* @param cb Callback to call after the resolving has finished or timeout
* has happened.
* @param user_data The user data.
* @param timeout The timeout value for the query. Possible values:
* SYS_FOREVER_MS: the query is tried forever, user needs to cancel it
* manually if it takes too long time to finish
* >0: start the query and let the system timeout it after specified ms
*
* @return 0 if resolving was started ok, < 0 otherwise
*/
int dns_resolve_name(struct dns_resolve_context *ctx,
const char *query,
enum dns_query_type type,
uint16_t *dns_id,
dns_resolve_cb_t cb,
void *user_data,
int32_t timeout);
/**
* @brief Get default DNS context.
*
* @details The system level DNS context uses DNS servers that are
* defined in project config file. If no DNS servers are defined by the
* user, then resolving DNS names using default DNS context will do nothing.
* The configuration options are described in subsys/net/lib/dns/Kconfig file.
*
* @return Default DNS context.
*/
struct dns_resolve_context *dns_resolve_get_default(void);
/**
* @brief Get IP address info from DNS.
*
* @details This function can be used to resolve e.g., IPv4 or IPv6 address.
* Note that this is asynchronous call, the function will return immediately
* and system will call the callback after resolving has finished or timeout
* has occurred.
* We might send the query to multiple servers (if there are more than one
* server configured), but we only use the result of the first received
* response.
* This variant uses system wide DNS servers.
*
* @param query What the caller wants to resolve.
* @param type What kind of data the caller wants to get.
* @param dns_id DNS id is returned to the caller. This is needed if one
* wishes to cancel the query. This can be set to NULL if there is no need
* to cancel the query.
* @param cb Callback to call after the resolving has finished or timeout
* has happened.
* @param user_data The user data.
* @param timeout The timeout value for the connection. Possible values:
* SYS_FOREVER_MS: the query is tried forever, user needs to cancel it
* manually if it takes too long time to finish
* >0: start the query and let the system timeout it after specified ms
*
* @return 0 if resolving was started ok, < 0 otherwise
*/
static inline int dns_get_addr_info(const char *query,
enum dns_query_type type,
uint16_t *dns_id,
dns_resolve_cb_t cb,
void *user_data,
int32_t timeout)
{
return dns_resolve_name(dns_resolve_get_default(),
query,
type,
dns_id,
cb,
user_data,
timeout);
}
/**
* @brief Cancel a pending DNS query.
*
* @details This releases DNS resources used by a pending query.
*
* @param dns_id DNS id of the pending query
*
* @return 0 if ok, <0 if error.
*/
static inline int dns_cancel_addr_info(uint16_t dns_id)
{
return dns_resolve_cancel(dns_resolve_get_default(), dns_id);
}
/**
* @}
*/
/** @cond INTERNAL_HIDDEN */
/**
* @brief Initialize DNS subsystem.
*/
#if defined(CONFIG_DNS_RESOLVER_AUTO_INIT)
void dns_init_resolver(void);
#else
#define dns_init_resolver(...)
#endif /* CONFIG_DNS_RESOLVER_AUTO_INIT */
/** @endcond */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_DNS_RESOLVE_H_ */
``` | /content/code_sandbox/include/zephyr/net/dns_resolve.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,826 |
```objective-c
/** @file
* @brief Network packet buffer descriptor API
*
* Network data is passed between different parts of the stack via
* net_buf struct.
*/
/*
*
*/
/* Data buffer API - used for all data to/from net */
#ifndef ZEPHYR_INCLUDE_NET_NET_PKT_H_
#define ZEPHYR_INCLUDE_NET_NET_PKT_H_
#include <zephyr/types.h>
#include <stdbool.h>
#include <zephyr/net/buf.h>
#if defined(CONFIG_IEEE802154)
#include <zephyr/net/ieee802154_pkt.h>
#endif
#include <zephyr/net/net_core.h>
#include <zephyr/net/net_linkaddr.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/net_if.h>
#include <zephyr/net/net_context.h>
#include <zephyr/net/net_time.h>
#include <zephyr/net/ethernet_vlan.h>
#include <zephyr/net/ptp_time.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Network packet management library
* @defgroup net_pkt Network Packet Library
* @since 1.5
* @version 0.8.0
* @ingroup networking
* @{
*/
struct net_context;
/** @cond INTERNAL_HIDDEN */
/* buffer cursor used in net_pkt */
struct net_pkt_cursor {
/** Current net_buf pointer by the cursor */
struct net_buf *buf;
/** Current position in the data buffer of the net_buf */
uint8_t *pos;
};
/** @endcond */
/**
* @brief Network packet.
*
* Note that if you add new fields into net_pkt, remember to update
* net_pkt_clone() function.
*/
struct net_pkt {
/**
* The fifo is used by RX/TX threads and by socket layer. The net_pkt
* is queued via fifo to the processing thread.
*/
intptr_t fifo;
/** Slab pointer from where it belongs to */
struct k_mem_slab *slab;
/** buffer holding the packet */
union {
struct net_buf *frags; /**< buffer fragment */
struct net_buf *buffer; /**< alias to a buffer fragment */
};
/** Internal buffer iterator used for reading/writing */
struct net_pkt_cursor cursor;
/** Network connection context */
struct net_context *context;
/** Network interface */
struct net_if *iface;
/** @cond ignore */
#if defined(CONFIG_NET_TCP)
/** Allow placing the packet into sys_slist_t */
sys_snode_t next;
#endif
#if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
struct net_if *orig_iface; /* Original network interface */
#endif
#if defined(CONFIG_NET_PKT_TIMESTAMP) || defined(CONFIG_NET_PKT_TXTIME)
/**
* TX or RX timestamp if available
*
* For packets that have been sent over the medium, the timestamp refers
* to the time the message timestamp point was encountered at the
* reference plane.
*
* Unsent packages can be scheduled by setting the timestamp to a future
* point in time.
*
* All timestamps refer to the network subsystem's local clock.
*
* See @ref net_ptp_time for definitions of local clock, message
* timestamp point and reference plane. See @ref net_time_t for
* semantics of the network reference clock.
*
* TODO: Replace with net_time_t to decouple from PTP.
*/
struct net_ptp_time timestamp;
#endif
#if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS) || \
defined(CONFIG_TRACING_NET_CORE)
struct {
/** Create time in cycles */
uint32_t create_time;
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
/** Collect extra statistics for net_pkt processing
* from various points in the IP stack. See networking
* documentation where these points are located and how
* to interpret the results.
*/
struct {
uint32_t stat[NET_PKT_DETAIL_STATS_COUNT];
int count;
} detail;
#endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
};
#endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS */
/** Reference counter */
atomic_t atomic_ref;
/* Filled by layer 2 when network packet is received. */
struct net_linkaddr lladdr_src;
struct net_linkaddr lladdr_dst;
uint16_t ll_proto_type;
#if defined(CONFIG_NET_IP)
uint8_t ip_hdr_len; /* pre-filled in order to avoid func call */
#endif
uint8_t overwrite : 1; /* Is packet content being overwritten? */
uint8_t eof : 1; /* Last packet before EOF */
uint8_t ptp_pkt : 1; /* For outgoing packet: is this packet
* a L2 PTP packet.
* Used only if defined (CONFIG_NET_L2_PTP)
*/
uint8_t forwarding : 1; /* Are we forwarding this pkt
* Used only if defined(CONFIG_NET_ROUTE)
*/
uint8_t family : 3; /* Address family, see net_ip.h */
/* bitfield byte alignment boundary */
#if defined(CONFIG_NET_IPV4_ACD)
uint8_t ipv4_acd_arp_msg : 1; /* Is this pkt IPv4 conflict detection ARP
* message.
* Note: family needs to be
* AF_INET.
*/
#endif
#if defined(CONFIG_NET_LLDP)
uint8_t lldp_pkt : 1; /* Is this pkt an LLDP message.
* Note: family needs to be
* AF_UNSPEC.
*/
#endif
uint8_t ppp_msg : 1; /* This is a PPP message */
uint8_t captured : 1; /* Set to 1 if this packet is already being
* captured
*/
uint8_t l2_bridged : 1; /* set to 1 if this packet comes from a bridge
* and already contains its L2 header to be
* preserved. Useful only if
* defined(CONFIG_NET_ETHERNET_BRIDGE).
*/
uint8_t l2_processed : 1; /* Set to 1 if this packet has already been
* processed by the L2
*/
uint8_t chksum_done : 1; /* Checksum has already been computed for
* the packet.
*/
#if defined(CONFIG_NET_IP_FRAGMENT)
uint8_t ip_reassembled : 1; /* Packet is a reassembled IP packet. */
#endif
#if defined(CONFIG_NET_PKT_TIMESTAMP)
uint8_t tx_timestamping : 1; /** Timestamp transmitted packet */
uint8_t rx_timestamping : 1; /** Timestamp received packet */
#endif
/* bitfield byte alignment boundary */
#if defined(CONFIG_NET_IP)
union {
/* IPv6 hop limit or IPv4 ttl for this network packet.
* The value is shared between IPv6 and IPv4.
*/
#if defined(CONFIG_NET_IPV6)
uint8_t ipv6_hop_limit;
#endif
#if defined(CONFIG_NET_IPV4)
uint8_t ipv4_ttl;
#endif
};
union {
#if defined(CONFIG_NET_IPV4)
uint8_t ipv4_opts_len; /* length of IPv4 header options */
#endif
#if defined(CONFIG_NET_IPV6)
uint16_t ipv6_ext_len; /* length of extension headers */
#endif
};
#if defined(CONFIG_NET_IP_FRAGMENT)
union {
#if defined(CONFIG_NET_IPV4_FRAGMENT)
struct {
uint16_t flags; /* Fragment offset and M (More Fragment) flag */
uint16_t id; /* Fragment ID */
} ipv4_fragment;
#endif /* CONFIG_NET_IPV4_FRAGMENT */
#if defined(CONFIG_NET_IPV6_FRAGMENT)
struct {
uint16_t flags; /* Fragment offset and M (More Fragment) flag */
uint32_t id; /* Fragment id */
uint16_t hdr_start; /* Where starts the fragment header */
} ipv6_fragment;
#endif /* CONFIG_NET_IPV6_FRAGMENT */
};
#endif /* CONFIG_NET_IP_FRAGMENT */
#if defined(CONFIG_NET_IPV6)
/* Where is the start of the last header before payload data
* in IPv6 packet. This is offset value from start of the IPv6
* packet. Note that this value should be updated by who ever
* adds IPv6 extension headers to the network packet.
*/
uint16_t ipv6_prev_hdr_start;
uint8_t ipv6_ext_opt_len; /* IPv6 ND option length */
uint8_t ipv6_next_hdr; /* What is the very first next header */
#endif /* CONFIG_NET_IPV6 */
#if defined(CONFIG_NET_IP_DSCP_ECN)
/** IPv4/IPv6 Differentiated Services Code Point value. */
uint8_t ip_dscp : 6;
/** IPv4/IPv6 Explicit Congestion Notification value. */
uint8_t ip_ecn : 2;
#endif /* CONFIG_NET_IP_DSCP_ECN */
#endif /* CONFIG_NET_IP */
#if defined(CONFIG_NET_VLAN)
/* VLAN TCI (Tag Control Information). This contains the Priority
* Code Point (PCP), Drop Eligible Indicator (DEI) and VLAN
* Identifier (VID, called more commonly VLAN tag). This value is
* kept in host byte order.
*/
uint16_t vlan_tci;
#endif /* CONFIG_NET_VLAN */
#if defined(NET_PKT_HAS_CONTROL_BLOCK)
/* TODO: Evolve this into a union of orthogonal
* control block declarations if further L2
* stacks require L2-specific attributes.
*/
#if defined(CONFIG_IEEE802154)
/* The following structure requires a 4-byte alignment
* boundary to avoid padding.
*/
struct net_pkt_cb_ieee802154 cb;
#endif /* CONFIG_IEEE802154 */
#endif /* NET_PKT_HAS_CONTROL_BLOCK */
/** Network packet priority, can be left out in which case packet
* is not prioritised.
*/
uint8_t priority;
#if defined(CONFIG_NET_OFFLOAD) || defined(CONFIG_NET_L2_IPIP)
/* Remote address of the received packet. This is only used by
* network interfaces with an offloaded TCP/IP stack, or if we
* have network tunneling in use.
*/
union {
struct sockaddr remote;
/* This will make sure that there is enough storage to store
* the address struct. The access to value is via remote
* address.
*/
struct sockaddr_storage remote_storage;
};
#endif /* CONFIG_NET_OFFLOAD */
#if defined(CONFIG_NET_CAPTURE_COOKED_MODE)
/* Tell the capture api that this is a captured packet */
uint8_t cooked_mode_pkt : 1;
#endif /* CONFIG_NET_CAPTURE_COOKED_MODE */
/* @endcond */
};
/** @cond ignore */
/* The interface real ll address */
static inline struct net_linkaddr *net_pkt_lladdr_if(struct net_pkt *pkt)
{
return net_if_get_link_addr(pkt->iface);
}
static inline struct net_context *net_pkt_context(struct net_pkt *pkt)
{
return pkt->context;
}
static inline void net_pkt_set_context(struct net_pkt *pkt,
struct net_context *ctx)
{
pkt->context = ctx;
}
static inline struct net_if *net_pkt_iface(struct net_pkt *pkt)
{
return pkt->iface;
}
static inline void net_pkt_set_iface(struct net_pkt *pkt, struct net_if *iface)
{
pkt->iface = iface;
/* If the network interface is set in pkt, then also set the type of
* the network address that is stored in pkt. This is done here so
* that the address type is properly set and is not forgotten.
*/
if (iface) {
uint8_t type = net_if_get_link_addr(iface)->type;
pkt->lladdr_src.type = type;
pkt->lladdr_dst.type = type;
}
}
static inline struct net_if *net_pkt_orig_iface(struct net_pkt *pkt)
{
#if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
return pkt->orig_iface;
#else
return pkt->iface;
#endif
}
static inline void net_pkt_set_orig_iface(struct net_pkt *pkt,
struct net_if *iface)
{
#if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
pkt->orig_iface = iface;
#else
ARG_UNUSED(pkt);
ARG_UNUSED(iface);
#endif
}
static inline uint8_t net_pkt_family(struct net_pkt *pkt)
{
return pkt->family;
}
static inline void net_pkt_set_family(struct net_pkt *pkt, uint8_t family)
{
pkt->family = family;
}
static inline bool net_pkt_is_ptp(struct net_pkt *pkt)
{
return !!(pkt->ptp_pkt);
}
static inline void net_pkt_set_ptp(struct net_pkt *pkt, bool is_ptp)
{
pkt->ptp_pkt = is_ptp;
}
static inline bool net_pkt_is_tx_timestamping(struct net_pkt *pkt)
{
#if defined(CONFIG_NET_PKT_TIMESTAMP)
return !!(pkt->tx_timestamping);
#else
ARG_UNUSED(pkt);
return false;
#endif
}
static inline void net_pkt_set_tx_timestamping(struct net_pkt *pkt, bool is_timestamping)
{
#if defined(CONFIG_NET_PKT_TIMESTAMP)
pkt->tx_timestamping = is_timestamping;
#else
ARG_UNUSED(pkt);
ARG_UNUSED(is_timestamping);
#endif
}
static inline bool net_pkt_is_rx_timestamping(struct net_pkt *pkt)
{
#if defined(CONFIG_NET_PKT_TIMESTAMP)
return !!(pkt->rx_timestamping);
#else
ARG_UNUSED(pkt);
return false;
#endif
}
static inline void net_pkt_set_rx_timestamping(struct net_pkt *pkt, bool is_timestamping)
{
#if defined(CONFIG_NET_PKT_TIMESTAMP)
pkt->rx_timestamping = is_timestamping;
#else
ARG_UNUSED(pkt);
ARG_UNUSED(is_timestamping);
#endif
}
static inline bool net_pkt_is_captured(struct net_pkt *pkt)
{
return !!(pkt->captured);
}
static inline void net_pkt_set_captured(struct net_pkt *pkt, bool is_captured)
{
pkt->captured = is_captured;
}
static inline bool net_pkt_is_l2_bridged(struct net_pkt *pkt)
{
return IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE) ? !!(pkt->l2_bridged) : 0;
}
static inline void net_pkt_set_l2_bridged(struct net_pkt *pkt, bool is_l2_bridged)
{
if (IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE)) {
pkt->l2_bridged = is_l2_bridged;
}
}
static inline bool net_pkt_is_l2_processed(struct net_pkt *pkt)
{
return !!(pkt->l2_processed);
}
static inline void net_pkt_set_l2_processed(struct net_pkt *pkt,
bool is_l2_processed)
{
pkt->l2_processed = is_l2_processed;
}
static inline bool net_pkt_is_chksum_done(struct net_pkt *pkt)
{
return !!(pkt->chksum_done);
}
static inline void net_pkt_set_chksum_done(struct net_pkt *pkt,
bool is_chksum_done)
{
pkt->chksum_done = is_chksum_done;
}
static inline uint8_t net_pkt_ip_hdr_len(struct net_pkt *pkt)
{
#if defined(CONFIG_NET_IP)
return pkt->ip_hdr_len;
#else
ARG_UNUSED(pkt);
return 0;
#endif
}
static inline void net_pkt_set_ip_hdr_len(struct net_pkt *pkt, uint8_t len)
{
#if defined(CONFIG_NET_IP)
pkt->ip_hdr_len = len;
#else
ARG_UNUSED(pkt);
ARG_UNUSED(len);
#endif
}
static inline uint8_t net_pkt_ip_dscp(struct net_pkt *pkt)
{
#if defined(CONFIG_NET_IP_DSCP_ECN)
return pkt->ip_dscp;
#else
ARG_UNUSED(pkt);
return 0;
#endif
}
static inline void net_pkt_set_ip_dscp(struct net_pkt *pkt, uint8_t dscp)
{
#if defined(CONFIG_NET_IP_DSCP_ECN)
pkt->ip_dscp = dscp;
#else
ARG_UNUSED(pkt);
ARG_UNUSED(dscp);
#endif
}
static inline uint8_t net_pkt_ip_ecn(struct net_pkt *pkt)
{
#if defined(CONFIG_NET_IP_DSCP_ECN)
return pkt->ip_ecn;
#else
ARG_UNUSED(pkt);
return 0;
#endif
}
static inline void net_pkt_set_ip_ecn(struct net_pkt *pkt, uint8_t ecn)
{
#if defined(CONFIG_NET_IP_DSCP_ECN)
pkt->ip_ecn = ecn;
#else
ARG_UNUSED(pkt);
ARG_UNUSED(ecn);
#endif
}
static inline uint8_t net_pkt_eof(struct net_pkt *pkt)
{
return pkt->eof;
}
static inline void net_pkt_set_eof(struct net_pkt *pkt, bool eof)
{
pkt->eof = eof;
}
static inline bool net_pkt_forwarding(struct net_pkt *pkt)
{
return !!(pkt->forwarding);
}
static inline void net_pkt_set_forwarding(struct net_pkt *pkt, bool forward)
{
pkt->forwarding = forward;
}
#if defined(CONFIG_NET_IPV4)
static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
{
return pkt->ipv4_ttl;
}
static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
uint8_t ttl)
{
pkt->ipv4_ttl = ttl;
}
static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
{
return pkt->ipv4_opts_len;
}
static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
uint8_t opts_len)
{
pkt->ipv4_opts_len = opts_len;
}
#else
static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
uint8_t ttl)
{
ARG_UNUSED(pkt);
ARG_UNUSED(ttl);
}
static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
uint8_t opts_len)
{
ARG_UNUSED(pkt);
ARG_UNUSED(opts_len);
}
#endif
#if defined(CONFIG_NET_IPV6)
static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
{
return pkt->ipv6_ext_opt_len;
}
static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
uint8_t len)
{
pkt->ipv6_ext_opt_len = len;
}
static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
{
return pkt->ipv6_next_hdr;
}
static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
uint8_t next_hdr)
{
pkt->ipv6_next_hdr = next_hdr;
}
static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
{
return pkt->ipv6_ext_len;
}
static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
{
pkt->ipv6_ext_len = len;
}
static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
{
return pkt->ipv6_prev_hdr_start;
}
static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
uint16_t offset)
{
pkt->ipv6_prev_hdr_start = offset;
}
static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
{
return pkt->ipv6_hop_limit;
}
static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
uint8_t hop_limit)
{
pkt->ipv6_hop_limit = hop_limit;
}
#else /* CONFIG_NET_IPV6 */
static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
uint8_t len)
{
ARG_UNUSED(pkt);
ARG_UNUSED(len);
}
static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
uint8_t next_hdr)
{
ARG_UNUSED(pkt);
ARG_UNUSED(next_hdr);
}
static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
{
ARG_UNUSED(pkt);
ARG_UNUSED(len);
}
static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
uint16_t offset)
{
ARG_UNUSED(pkt);
ARG_UNUSED(offset);
}
static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
uint8_t hop_limit)
{
ARG_UNUSED(pkt);
ARG_UNUSED(hop_limit);
}
#endif /* CONFIG_NET_IPV6 */
static inline uint16_t net_pkt_ip_opts_len(struct net_pkt *pkt)
{
#if defined(CONFIG_NET_IPV6)
return pkt->ipv6_ext_len;
#elif defined(CONFIG_NET_IPV4)
return pkt->ipv4_opts_len;
#else
ARG_UNUSED(pkt);
return 0;
#endif
}
#if defined(CONFIG_NET_IPV4_FRAGMENT)
static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
{
return (pkt->ipv4_fragment.flags & NET_IPV4_FRAGH_OFFSET_MASK) * 8;
}
static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
{
return (pkt->ipv4_fragment.flags & NET_IPV4_MORE_FRAG_MASK) != 0;
}
static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
{
pkt->ipv4_fragment.flags = flags;
}
static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
{
return pkt->ipv4_fragment.id;
}
static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
{
pkt->ipv4_fragment.id = id;
}
#else /* CONFIG_NET_IPV4_FRAGMENT */
static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
{
ARG_UNUSED(pkt);
ARG_UNUSED(flags);
}
static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
{
ARG_UNUSED(pkt);
ARG_UNUSED(id);
}
#endif /* CONFIG_NET_IPV4_FRAGMENT */
#if defined(CONFIG_NET_IPV6_FRAGMENT)
static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
{
return pkt->ipv6_fragment.hdr_start;
}
static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
uint16_t start)
{
pkt->ipv6_fragment.hdr_start = start;
}
static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
{
return pkt->ipv6_fragment.flags & NET_IPV6_FRAGH_OFFSET_MASK;
}
static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
{
return (pkt->ipv6_fragment.flags & 0x01) != 0;
}
static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
uint16_t flags)
{
pkt->ipv6_fragment.flags = flags;
}
static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
{
return pkt->ipv6_fragment.id;
}
static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
uint32_t id)
{
pkt->ipv6_fragment.id = id;
}
#else /* CONFIG_NET_IPV6_FRAGMENT */
static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
uint16_t start)
{
ARG_UNUSED(pkt);
ARG_UNUSED(start);
}
static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
uint16_t flags)
{
ARG_UNUSED(pkt);
ARG_UNUSED(flags);
}
static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
uint32_t id)
{
ARG_UNUSED(pkt);
ARG_UNUSED(id);
}
#endif /* CONFIG_NET_IPV6_FRAGMENT */
#if defined(CONFIG_NET_IP_FRAGMENT)
static inline bool net_pkt_is_ip_reassembled(struct net_pkt *pkt)
{
return !!(pkt->ip_reassembled);
}
static inline void net_pkt_set_ip_reassembled(struct net_pkt *pkt,
bool reassembled)
{
pkt->ip_reassembled = reassembled;
}
#else /* CONFIG_NET_IP_FRAGMENT */
static inline bool net_pkt_is_ip_reassembled(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return false;
}
static inline void net_pkt_set_ip_reassembled(struct net_pkt *pkt,
bool reassembled)
{
ARG_UNUSED(pkt);
ARG_UNUSED(reassembled);
}
#endif /* CONFIG_NET_IP_FRAGMENT */
static inline uint8_t net_pkt_priority(struct net_pkt *pkt)
{
return pkt->priority;
}
static inline void net_pkt_set_priority(struct net_pkt *pkt,
uint8_t priority)
{
pkt->priority = priority;
}
#if defined(CONFIG_NET_CAPTURE_COOKED_MODE)
static inline bool net_pkt_is_cooked_mode(struct net_pkt *pkt)
{
return pkt->cooked_mode_pkt;
}
static inline void net_pkt_set_cooked_mode(struct net_pkt *pkt, bool value)
{
pkt->cooked_mode_pkt = value;
}
#else
static inline bool net_pkt_is_cooked_mode(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return false;
}
static inline void net_pkt_set_cooked_mode(struct net_pkt *pkt, bool value)
{
ARG_UNUSED(pkt);
ARG_UNUSED(value);
}
#endif /* CONFIG_NET_CAPTURE_COOKED_MODE */
#if defined(CONFIG_NET_VLAN)
static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
{
return net_eth_vlan_get_vid(pkt->vlan_tci);
}
static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
{
pkt->vlan_tci = net_eth_vlan_set_vid(pkt->vlan_tci, tag);
}
static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
{
return net_eth_vlan_get_pcp(pkt->vlan_tci);
}
static inline void net_pkt_set_vlan_priority(struct net_pkt *pkt,
uint8_t priority)
{
pkt->vlan_tci = net_eth_vlan_set_pcp(pkt->vlan_tci, priority);
}
static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
{
return net_eth_vlan_get_dei(pkt->vlan_tci);
}
static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
{
pkt->vlan_tci = net_eth_vlan_set_dei(pkt->vlan_tci, dei);
}
static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
{
pkt->vlan_tci = tci;
}
static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
{
return pkt->vlan_tci;
}
#else
static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return NET_VLAN_TAG_UNSPEC;
}
static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
{
ARG_UNUSED(pkt);
ARG_UNUSED(tag);
}
static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return false;
}
static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
{
ARG_UNUSED(pkt);
ARG_UNUSED(dei);
}
static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return NET_VLAN_TAG_UNSPEC; /* assumes priority is 0 */
}
static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
{
ARG_UNUSED(pkt);
ARG_UNUSED(tci);
}
#endif
#if defined(CONFIG_NET_PKT_TIMESTAMP) || defined(CONFIG_NET_PKT_TXTIME)
static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
{
return &pkt->timestamp;
}
static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
struct net_ptp_time *timestamp)
{
pkt->timestamp.second = timestamp->second;
pkt->timestamp.nanosecond = timestamp->nanosecond;
}
static inline net_time_t net_pkt_timestamp_ns(struct net_pkt *pkt)
{
return net_ptp_time_to_ns(&pkt->timestamp);
}
static inline void net_pkt_set_timestamp_ns(struct net_pkt *pkt, net_time_t timestamp)
{
pkt->timestamp = ns_to_net_ptp_time(timestamp);
}
#else
static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return NULL;
}
static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
struct net_ptp_time *timestamp)
{
ARG_UNUSED(pkt);
ARG_UNUSED(timestamp);
}
static inline net_time_t net_pkt_timestamp_ns(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_set_timestamp_ns(struct net_pkt *pkt, net_time_t timestamp)
{
ARG_UNUSED(pkt);
ARG_UNUSED(timestamp);
}
#endif /* CONFIG_NET_PKT_TIMESTAMP || CONFIG_NET_PKT_TXTIME */
#if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS) || \
defined(CONFIG_TRACING_NET_CORE)
static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
{
return pkt->create_time;
}
static inline void net_pkt_set_create_time(struct net_pkt *pkt,
uint32_t create_time)
{
pkt->create_time = create_time;
}
#else
static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0U;
}
static inline void net_pkt_set_create_time(struct net_pkt *pkt,
uint32_t create_time)
{
ARG_UNUSED(pkt);
ARG_UNUSED(create_time);
}
#endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS ||
* CONFIG_TRACING_NET_CORE
*/
/**
* @deprecated Use @ref net_pkt_timestamp or @ref net_pkt_timestamp_ns instead.
*/
static inline uint64_t net_pkt_txtime(struct net_pkt *pkt)
{
#if defined(CONFIG_NET_PKT_TXTIME)
return pkt->timestamp.second * NSEC_PER_SEC + pkt->timestamp.nanosecond;
#else
ARG_UNUSED(pkt);
return 0;
#endif /* CONFIG_NET_PKT_TXTIME */
}
/**
* @deprecated Use @ref net_pkt_set_timestamp or @ref net_pkt_set_timestamp_ns
* instead.
*/
static inline void net_pkt_set_txtime(struct net_pkt *pkt, uint64_t txtime)
{
#if defined(CONFIG_NET_PKT_TXTIME)
pkt->timestamp.second = txtime / NSEC_PER_SEC;
pkt->timestamp.nanosecond = txtime % NSEC_PER_SEC;
#else
ARG_UNUSED(pkt);
ARG_UNUSED(txtime);
#endif /* CONFIG_NET_PKT_TXTIME */
}
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
{
return pkt->detail.stat;
}
static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
{
return pkt->detail.count;
}
static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
{
memset(&pkt->detail, 0, sizeof(pkt->detail));
}
static ALWAYS_INLINE void net_pkt_set_stats_tick(struct net_pkt *pkt,
uint32_t tick)
{
if (pkt->detail.count >= NET_PKT_DETAIL_STATS_COUNT) {
NET_ERR("Detail stats count overflow (%d >= %d)",
pkt->detail.count, NET_PKT_DETAIL_STATS_COUNT);
return;
}
pkt->detail.stat[pkt->detail.count++] = tick;
}
#define net_pkt_set_tx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
#define net_pkt_set_rx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
#else
static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return NULL;
}
static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
}
static inline void net_pkt_set_stats_tick(struct net_pkt *pkt, uint32_t tick)
{
ARG_UNUSED(pkt);
ARG_UNUSED(tick);
}
#define net_pkt_set_tx_stats_tick(pkt, tick)
#define net_pkt_set_rx_stats_tick(pkt, tick)
#endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
static inline size_t net_pkt_get_len(struct net_pkt *pkt)
{
return net_buf_frags_len(pkt->frags);
}
static inline uint8_t *net_pkt_data(struct net_pkt *pkt)
{
return pkt->frags->data;
}
static inline uint8_t *net_pkt_ip_data(struct net_pkt *pkt)
{
return pkt->frags->data;
}
static inline bool net_pkt_is_empty(struct net_pkt *pkt)
{
return !pkt->buffer || !net_pkt_data(pkt) || pkt->buffer->len == 0;
}
static inline struct net_linkaddr *net_pkt_lladdr_src(struct net_pkt *pkt)
{
return &pkt->lladdr_src;
}
static inline struct net_linkaddr *net_pkt_lladdr_dst(struct net_pkt *pkt)
{
return &pkt->lladdr_dst;
}
static inline void net_pkt_lladdr_swap(struct net_pkt *pkt)
{
uint8_t *addr = net_pkt_lladdr_src(pkt)->addr;
net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_dst(pkt)->addr;
net_pkt_lladdr_dst(pkt)->addr = addr;
}
static inline void net_pkt_lladdr_clear(struct net_pkt *pkt)
{
net_pkt_lladdr_src(pkt)->addr = NULL;
net_pkt_lladdr_src(pkt)->len = 0U;
}
static inline uint16_t net_pkt_ll_proto_type(struct net_pkt *pkt)
{
return pkt->ll_proto_type;
}
static inline void net_pkt_set_ll_proto_type(struct net_pkt *pkt, uint16_t type)
{
pkt->ll_proto_type = type;
}
#if defined(CONFIG_NET_IPV4_ACD)
static inline bool net_pkt_ipv4_acd(struct net_pkt *pkt)
{
return !!(pkt->ipv4_acd_arp_msg);
}
static inline void net_pkt_set_ipv4_acd(struct net_pkt *pkt,
bool is_acd_arp_msg)
{
pkt->ipv4_acd_arp_msg = is_acd_arp_msg;
}
#else /* CONFIG_NET_IPV4_ACD */
static inline bool net_pkt_ipv4_acd(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return false;
}
static inline void net_pkt_set_ipv4_acd(struct net_pkt *pkt,
bool is_acd_arp_msg)
{
ARG_UNUSED(pkt);
ARG_UNUSED(is_acd_arp_msg);
}
#endif /* CONFIG_NET_IPV4_ACD */
#if defined(CONFIG_NET_LLDP)
static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
{
return !!(pkt->lldp_pkt);
}
static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
{
pkt->lldp_pkt = is_lldp;
}
#else
static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return false;
}
static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
{
ARG_UNUSED(pkt);
ARG_UNUSED(is_lldp);
}
#endif /* CONFIG_NET_LLDP */
#if defined(CONFIG_NET_L2_PPP)
static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
{
return !!(pkt->ppp_msg);
}
static inline void net_pkt_set_ppp(struct net_pkt *pkt,
bool is_ppp_msg)
{
pkt->ppp_msg = is_ppp_msg;
}
#else /* CONFIG_NET_L2_PPP */
static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return false;
}
static inline void net_pkt_set_ppp(struct net_pkt *pkt,
bool is_ppp_msg)
{
ARG_UNUSED(pkt);
ARG_UNUSED(is_ppp_msg);
}
#endif /* CONFIG_NET_L2_PPP */
#if defined(NET_PKT_HAS_CONTROL_BLOCK)
static inline void *net_pkt_cb(struct net_pkt *pkt)
{
return &pkt->cb;
}
#else
static inline void *net_pkt_cb(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return NULL;
}
#endif
#define NET_IPV6_HDR(pkt) ((struct net_ipv6_hdr *)net_pkt_ip_data(pkt))
#define NET_IPV4_HDR(pkt) ((struct net_ipv4_hdr *)net_pkt_ip_data(pkt))
static inline void net_pkt_set_src_ipv6_addr(struct net_pkt *pkt)
{
net_if_ipv6_select_src_addr(net_context_get_iface(
net_pkt_context(pkt)),
(struct in6_addr *)NET_IPV6_HDR(pkt)->src);
}
static inline void net_pkt_set_overwrite(struct net_pkt *pkt, bool overwrite)
{
pkt->overwrite = overwrite;
}
static inline bool net_pkt_is_being_overwritten(struct net_pkt *pkt)
{
return !!(pkt->overwrite);
}
#ifdef CONFIG_NET_PKT_FILTER
bool net_pkt_filter_send_ok(struct net_pkt *pkt);
bool net_pkt_filter_recv_ok(struct net_pkt *pkt);
#else
static inline bool net_pkt_filter_send_ok(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return true;
}
static inline bool net_pkt_filter_recv_ok(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return true;
}
#endif /* CONFIG_NET_PKT_FILTER */
#if defined(CONFIG_NET_PKT_FILTER) && \
(defined(CONFIG_NET_PKT_FILTER_IPV4_HOOK) || defined(CONFIG_NET_PKT_FILTER_IPV6_HOOK))
bool net_pkt_filter_ip_recv_ok(struct net_pkt *pkt);
#else
static inline bool net_pkt_filter_ip_recv_ok(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return true;
}
#endif /* CONFIG_NET_PKT_FILTER_IPV4_HOOK || CONFIG_NET_PKT_FILTER_IPV6_HOOK */
#if defined(CONFIG_NET_PKT_FILTER) && defined(CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK)
bool net_pkt_filter_local_in_recv_ok(struct net_pkt *pkt);
#else
static inline bool net_pkt_filter_local_in_recv_ok(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return true;
}
#endif /* CONFIG_NET_PKT_FILTER && CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK */
#if defined(CONFIG_NET_OFFLOAD) || defined(CONFIG_NET_L2_IPIP)
static inline struct sockaddr *net_pkt_remote_address(struct net_pkt *pkt)
{
return &pkt->remote;
}
static inline void net_pkt_set_remote_address(struct net_pkt *pkt,
struct sockaddr *address,
socklen_t len)
{
memcpy(&pkt->remote, address, len);
}
#endif /* CONFIG_NET_OFFLOAD || CONFIG_NET_L2_IPIP */
/* @endcond */
/**
* @brief Create a net_pkt slab
*
* A net_pkt slab is used to store meta-information about
* network packets. It must be coupled with a data fragment pool
* (@ref NET_PKT_DATA_POOL_DEFINE) used to store the actual
* packet data. The macro can be used by an application to define
* additional custom per-context TX packet slabs (see
* net_context_setup_pools()).
*
* @param name Name of the slab.
* @param count Number of net_pkt in this slab.
*/
#define NET_PKT_SLAB_DEFINE(name, count) \
K_MEM_SLAB_DEFINE(name, sizeof(struct net_pkt), count, 4)
/** @cond INTERNAL_HIDDEN */
/* Backward compatibility macro */
#define NET_PKT_TX_SLAB_DEFINE(name, count) NET_PKT_SLAB_DEFINE(name, count)
/** @endcond */
/**
* @brief Create a data fragment net_buf pool
*
* A net_buf pool is used to store actual data for
* network packets. It must be coupled with a net_pkt slab
* (@ref NET_PKT_SLAB_DEFINE) used to store the packet
* meta-information. The macro can be used by an application to
* define additional custom per-context TX packet pools (see
* net_context_setup_pools()).
*
* @param name Name of the pool.
* @param count Number of net_buf in this pool.
*/
#define NET_PKT_DATA_POOL_DEFINE(name, count) \
NET_BUF_POOL_DEFINE(name, count, CONFIG_NET_BUF_DATA_SIZE, \
0, NULL)
/** @cond INTERNAL_HIDDEN */
#if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC) || \
(CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG)
#define NET_PKT_DEBUG_ENABLED
#endif
#if defined(NET_PKT_DEBUG_ENABLED)
/* Debug versions of the net_pkt functions that are used when tracking
* buffer usage.
*/
struct net_buf *net_pkt_get_reserve_data_debug(struct net_buf_pool *pool,
size_t min_len,
k_timeout_t timeout,
const char *caller,
int line);
#define net_pkt_get_reserve_data(pool, min_len, timeout) \
net_pkt_get_reserve_data_debug(pool, min_len, timeout, __func__, __LINE__)
struct net_buf *net_pkt_get_reserve_rx_data_debug(size_t min_len,
k_timeout_t timeout,
const char *caller,
int line);
#define net_pkt_get_reserve_rx_data(min_len, timeout) \
net_pkt_get_reserve_rx_data_debug(min_len, timeout, __func__, __LINE__)
struct net_buf *net_pkt_get_reserve_tx_data_debug(size_t min_len,
k_timeout_t timeout,
const char *caller,
int line);
#define net_pkt_get_reserve_tx_data(min_len, timeout) \
net_pkt_get_reserve_tx_data_debug(min_len, timeout, __func__, __LINE__)
struct net_buf *net_pkt_get_frag_debug(struct net_pkt *pkt, size_t min_len,
k_timeout_t timeout,
const char *caller, int line);
#define net_pkt_get_frag(pkt, min_len, timeout) \
net_pkt_get_frag_debug(pkt, min_len, timeout, __func__, __LINE__)
void net_pkt_unref_debug(struct net_pkt *pkt, const char *caller, int line);
#define net_pkt_unref(pkt) net_pkt_unref_debug(pkt, __func__, __LINE__)
struct net_pkt *net_pkt_ref_debug(struct net_pkt *pkt, const char *caller,
int line);
#define net_pkt_ref(pkt) net_pkt_ref_debug(pkt, __func__, __LINE__)
struct net_buf *net_pkt_frag_ref_debug(struct net_buf *frag,
const char *caller, int line);
#define net_pkt_frag_ref(frag) net_pkt_frag_ref_debug(frag, __func__, __LINE__)
void net_pkt_frag_unref_debug(struct net_buf *frag,
const char *caller, int line);
#define net_pkt_frag_unref(frag) \
net_pkt_frag_unref_debug(frag, __func__, __LINE__)
struct net_buf *net_pkt_frag_del_debug(struct net_pkt *pkt,
struct net_buf *parent,
struct net_buf *frag,
const char *caller, int line);
#define net_pkt_frag_del(pkt, parent, frag) \
net_pkt_frag_del_debug(pkt, parent, frag, __func__, __LINE__)
void net_pkt_frag_add_debug(struct net_pkt *pkt, struct net_buf *frag,
const char *caller, int line);
#define net_pkt_frag_add(pkt, frag) \
net_pkt_frag_add_debug(pkt, frag, __func__, __LINE__)
void net_pkt_frag_insert_debug(struct net_pkt *pkt, struct net_buf *frag,
const char *caller, int line);
#define net_pkt_frag_insert(pkt, frag) \
net_pkt_frag_insert_debug(pkt, frag, __func__, __LINE__)
#endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC ||
* CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
*/
/** @endcond */
/**
* @brief Print fragment list and the fragment sizes
*
* @details Only available if debugging is activated.
*
* @param pkt Network pkt.
*/
#if defined(NET_PKT_DEBUG_ENABLED)
void net_pkt_print_frags(struct net_pkt *pkt);
#else
#define net_pkt_print_frags(pkt)
#endif
/**
* @brief Get a data buffer from a given pool.
*
* @details Normally this version is not useful for applications
* but is mainly used by network fragmentation code.
*
* @param pool The net_buf pool to use.
* @param min_len Minimum length of the requested fragment.
* @param timeout Affects the action taken should the net buf pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait up to the specified time.
*
* @return Network buffer if successful, NULL otherwise.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
struct net_buf *net_pkt_get_reserve_data(struct net_buf_pool *pool,
size_t min_len, k_timeout_t timeout);
#endif
/**
* @brief Get RX DATA buffer from pool.
* Normally you should use net_pkt_get_frag() instead.
*
* @details Normally this version is not useful for applications
* but is mainly used by network fragmentation code.
*
* @param min_len Minimum length of the requested fragment.
* @param timeout Affects the action taken should the net buf pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait up to the specified time.
*
* @return Network buffer if successful, NULL otherwise.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
struct net_buf *net_pkt_get_reserve_rx_data(size_t min_len, k_timeout_t timeout);
#endif
/**
* @brief Get TX DATA buffer from pool.
* Normally you should use net_pkt_get_frag() instead.
*
* @details Normally this version is not useful for applications
* but is mainly used by network fragmentation code.
*
* @param min_len Minimum length of the requested fragment.
* @param timeout Affects the action taken should the net buf pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait up to the specified time.
*
* @return Network buffer if successful, NULL otherwise.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
struct net_buf *net_pkt_get_reserve_tx_data(size_t min_len, k_timeout_t timeout);
#endif
/**
* @brief Get a data fragment that might be from user specific
* buffer pool or from global DATA pool.
*
* @param pkt Network packet.
* @param min_len Minimum length of the requested fragment.
* @param timeout Affects the action taken should the net buf pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait up to the specified time.
*
* @return Network buffer if successful, NULL otherwise.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
struct net_buf *net_pkt_get_frag(struct net_pkt *pkt, size_t min_len,
k_timeout_t timeout);
#endif
/**
* @brief Place packet back into the available packets slab
*
* @details Releases the packet to other use. This needs to be
* called by application after it has finished with the packet.
*
* @param pkt Network packet to release.
*
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
void net_pkt_unref(struct net_pkt *pkt);
#endif
/**
* @brief Increase the packet ref count
*
* @details Mark the packet to be used still.
*
* @param pkt Network packet to ref.
*
* @return Network packet if successful, NULL otherwise.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
struct net_pkt *net_pkt_ref(struct net_pkt *pkt);
#endif
/**
* @brief Increase the packet fragment ref count
*
* @details Mark the fragment to be used still.
*
* @param frag Network fragment to ref.
*
* @return a pointer on the referenced Network fragment.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
struct net_buf *net_pkt_frag_ref(struct net_buf *frag);
#endif
/**
* @brief Decrease the packet fragment ref count
*
* @param frag Network fragment to unref.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
void net_pkt_frag_unref(struct net_buf *frag);
#endif
/**
* @brief Delete existing fragment from a packet
*
* @param pkt Network packet from which frag belongs to.
* @param parent parent fragment of frag, or NULL if none.
* @param frag Fragment to delete.
*
* @return Pointer to the following fragment, or NULL if it had no
* further fragments.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
struct net_buf *net_pkt_frag_del(struct net_pkt *pkt,
struct net_buf *parent,
struct net_buf *frag);
#endif
/**
* @brief Add a fragment to a packet at the end of its fragment list
*
* @param pkt pkt Network packet where to add the fragment
* @param frag Fragment to add
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
void net_pkt_frag_add(struct net_pkt *pkt, struct net_buf *frag);
#endif
/**
* @brief Insert a fragment to a packet at the beginning of its fragment list
*
* @param pkt pkt Network packet where to insert the fragment
* @param frag Fragment to insert
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
void net_pkt_frag_insert(struct net_pkt *pkt, struct net_buf *frag);
#endif
/**
* @brief Compact the fragment list of a packet.
*
* @details After this there is no more any free space in individual fragments.
* @param pkt Network packet.
*/
void net_pkt_compact(struct net_pkt *pkt);
/**
* @brief Get information about predefined RX, TX and DATA pools.
*
* @param rx Pointer to RX pool is returned.
* @param tx Pointer to TX pool is returned.
* @param rx_data Pointer to RX DATA pool is returned.
* @param tx_data Pointer to TX DATA pool is returned.
*/
void net_pkt_get_info(struct k_mem_slab **rx,
struct k_mem_slab **tx,
struct net_buf_pool **rx_data,
struct net_buf_pool **tx_data);
/** @cond INTERNAL_HIDDEN */
#if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
/**
* @brief Debug helper to print out the buffer allocations
*/
void net_pkt_print(void);
typedef void (*net_pkt_allocs_cb_t)(struct net_pkt *pkt,
struct net_buf *buf,
const char *func_alloc,
int line_alloc,
const char *func_free,
int line_free,
bool in_use,
void *user_data);
void net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb, void *user_data);
const char *net_pkt_slab2str(struct k_mem_slab *slab);
const char *net_pkt_pool2str(struct net_buf_pool *pool);
#else
#define net_pkt_print(...)
#endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
/* New allocator, and API are defined below.
* This will be simpler when time will come to get rid of former API above.
*/
#if defined(NET_PKT_DEBUG_ENABLED)
struct net_pkt *net_pkt_alloc_debug(k_timeout_t timeout,
const char *caller, int line);
#define net_pkt_alloc(_timeout) \
net_pkt_alloc_debug(_timeout, __func__, __LINE__)
struct net_pkt *net_pkt_alloc_from_slab_debug(struct k_mem_slab *slab,
k_timeout_t timeout,
const char *caller, int line);
#define net_pkt_alloc_from_slab(_slab, _timeout) \
net_pkt_alloc_from_slab_debug(_slab, _timeout, __func__, __LINE__)
struct net_pkt *net_pkt_rx_alloc_debug(k_timeout_t timeout,
const char *caller, int line);
#define net_pkt_rx_alloc(_timeout) \
net_pkt_rx_alloc_debug(_timeout, __func__, __LINE__)
struct net_pkt *net_pkt_alloc_on_iface_debug(struct net_if *iface,
k_timeout_t timeout,
const char *caller,
int line);
#define net_pkt_alloc_on_iface(_iface, _timeout) \
net_pkt_alloc_on_iface_debug(_iface, _timeout, __func__, __LINE__)
struct net_pkt *net_pkt_rx_alloc_on_iface_debug(struct net_if *iface,
k_timeout_t timeout,
const char *caller,
int line);
#define net_pkt_rx_alloc_on_iface(_iface, _timeout) \
net_pkt_rx_alloc_on_iface_debug(_iface, _timeout, \
__func__, __LINE__)
int net_pkt_alloc_buffer_debug(struct net_pkt *pkt,
size_t size,
enum net_ip_protocol proto,
k_timeout_t timeout,
const char *caller, int line);
#define net_pkt_alloc_buffer(_pkt, _size, _proto, _timeout) \
net_pkt_alloc_buffer_debug(_pkt, _size, _proto, _timeout, \
__func__, __LINE__)
int net_pkt_alloc_buffer_raw_debug(struct net_pkt *pkt, size_t size,
k_timeout_t timeout,
const char *caller, int line);
#define net_pkt_alloc_buffer_raw(_pkt, _size, _timeout) \
net_pkt_alloc_buffer_raw_debug(_pkt, _size, _timeout, \
__func__, __LINE__)
struct net_pkt *net_pkt_alloc_with_buffer_debug(struct net_if *iface,
size_t size,
sa_family_t family,
enum net_ip_protocol proto,
k_timeout_t timeout,
const char *caller,
int line);
#define net_pkt_alloc_with_buffer(_iface, _size, _family, \
_proto, _timeout) \
net_pkt_alloc_with_buffer_debug(_iface, _size, _family, \
_proto, _timeout, \
__func__, __LINE__)
struct net_pkt *net_pkt_rx_alloc_with_buffer_debug(struct net_if *iface,
size_t size,
sa_family_t family,
enum net_ip_protocol proto,
k_timeout_t timeout,
const char *caller,
int line);
#define net_pkt_rx_alloc_with_buffer(_iface, _size, _family, \
_proto, _timeout) \
net_pkt_rx_alloc_with_buffer_debug(_iface, _size, _family, \
_proto, _timeout, \
__func__, __LINE__)
#endif /* NET_PKT_DEBUG_ENABLED */
/** @endcond */
/**
* @brief Allocate an initialized net_pkt
*
* @details for the time being, 2 pools are used. One for TX and one for RX.
* This allocator has to be used for TX.
*
* @param timeout Maximum time to wait for an allocation.
*
* @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
struct net_pkt *net_pkt_alloc(k_timeout_t timeout);
#endif
/**
* @brief Allocate an initialized net_pkt from a specific slab
*
* @details unlike net_pkt_alloc() which uses core slabs, this one will use
* an external slab (see NET_PKT_SLAB_DEFINE()).
* Do _not_ use it unless you know what you are doing. Basically, only
* net_context should be using this, in order to allocate packet and
* then buffer on its local slab/pool (if any).
*
* @param slab The slab to use for allocating the packet
* @param timeout Maximum time to wait for an allocation.
*
* @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
struct net_pkt *net_pkt_alloc_from_slab(struct k_mem_slab *slab,
k_timeout_t timeout);
#endif
/**
* @brief Allocate an initialized net_pkt for RX
*
* @details for the time being, 2 pools are used. One for TX and one for RX.
* This allocator has to be used for RX.
*
* @param timeout Maximum time to wait for an allocation.
*
* @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
struct net_pkt *net_pkt_rx_alloc(k_timeout_t timeout);
#endif
/**
* @brief Allocate a network packet for a specific network interface.
*
* @param iface The network interface the packet is supposed to go through.
* @param timeout Maximum time to wait for an allocation.
*
* @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
struct net_pkt *net_pkt_alloc_on_iface(struct net_if *iface,
k_timeout_t timeout);
/** @cond INTERNAL_HIDDEN */
/* Same as above but specifically for RX packet */
struct net_pkt *net_pkt_rx_alloc_on_iface(struct net_if *iface,
k_timeout_t timeout);
/** @endcond */
#endif
/**
* @brief Allocate buffer for a net_pkt
*
* @details: such allocator will take into account space necessary for headers,
* MTU, and existing buffer (if any). Beware that, due to all these
* criteria, the allocated size might be smaller/bigger than
* requested one.
*
* @param pkt The network packet requiring buffer to be allocated.
* @param size The size of buffer being requested.
* @param proto The IP protocol type (can be 0 for none).
* @param timeout Maximum time to wait for an allocation.
*
* @return 0 on success, negative errno code otherwise.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
int net_pkt_alloc_buffer(struct net_pkt *pkt,
size_t size,
enum net_ip_protocol proto,
k_timeout_t timeout);
#endif
/**
* @brief Allocate buffer for a net_pkt, of specified size, w/o any additional
* preconditions
*
* @details: The actual buffer size may be larger than requested one if fixed
* size buffers are in use.
*
* @param pkt The network packet requiring buffer to be allocated.
* @param size The size of buffer being requested.
* @param timeout Maximum time to wait for an allocation.
*
* @return 0 on success, negative errno code otherwise.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
int net_pkt_alloc_buffer_raw(struct net_pkt *pkt, size_t size,
k_timeout_t timeout);
#endif
/**
* @brief Allocate a network packet and buffer at once
*
* @param iface The network interface the packet is supposed to go through.
* @param size The size of buffer.
* @param family The family to which the packet belongs.
* @param proto The IP protocol type (can be 0 for none).
* @param timeout Maximum time to wait for an allocation.
*
* @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
struct net_pkt *net_pkt_alloc_with_buffer(struct net_if *iface,
size_t size,
sa_family_t family,
enum net_ip_protocol proto,
k_timeout_t timeout);
/** @cond INTERNAL_HIDDEN */
/* Same as above but specifically for RX packet */
struct net_pkt *net_pkt_rx_alloc_with_buffer(struct net_if *iface,
size_t size,
sa_family_t family,
enum net_ip_protocol proto,
k_timeout_t timeout);
/** @endcond */
#endif
/**
* @brief Append a buffer in packet
*
* @param pkt Network packet where to append the buffer
* @param buffer Buffer to append
*/
void net_pkt_append_buffer(struct net_pkt *pkt, struct net_buf *buffer);
/**
* @brief Get available buffer space from a pkt
*
* @note Reserved bytes (headroom) in any of the fragments are not considered to
* be available.
*
* @param pkt The net_pkt which buffer availability should be evaluated
*
* @return the amount of buffer available
*/
size_t net_pkt_available_buffer(struct net_pkt *pkt);
/**
* @brief Get available buffer space for payload from a pkt
*
* @note Reserved bytes (headroom) in any of the fragments are not considered to
* be available.
*
* @details Unlike net_pkt_available_buffer(), this will take into account
* the headers space.
*
* @param pkt The net_pkt which payload buffer availability should
* be evaluated
* @param proto The IP protocol type (can be 0 for none).
*
* @return the amount of buffer available for payload
*/
size_t net_pkt_available_payload_buffer(struct net_pkt *pkt,
enum net_ip_protocol proto);
/**
* @brief Trim net_pkt buffer
*
* @details This will basically check for unused buffers and deallocate
* them relevantly
*
* @param pkt The net_pkt which buffer will be trimmed
*/
void net_pkt_trim_buffer(struct net_pkt *pkt);
/**
* @brief Remove @a length bytes from tail of packet
*
* @details This function does not take packet cursor into account. It is a
* helper to remove unneeded bytes from tail of packet (like appended
* CRC). It takes care of buffer deallocation if removed bytes span
* whole buffer(s).
*
* @param pkt Network packet
* @param length Number of bytes to be removed
*
* @retval 0 On success.
* @retval -EINVAL If packet length is shorter than @a length.
*/
int net_pkt_remove_tail(struct net_pkt *pkt, size_t length);
/**
* @brief Initialize net_pkt cursor
*
* @details This will initialize the net_pkt cursor from its buffer.
*
* @param pkt The net_pkt whose cursor is going to be initialized
*/
void net_pkt_cursor_init(struct net_pkt *pkt);
/**
* @brief Backup net_pkt cursor
*
* @param pkt The net_pkt whose cursor is going to be backed up
* @param backup The cursor where to backup net_pkt cursor
*/
static inline void net_pkt_cursor_backup(struct net_pkt *pkt,
struct net_pkt_cursor *backup)
{
backup->buf = pkt->cursor.buf;
backup->pos = pkt->cursor.pos;
}
/**
* @brief Restore net_pkt cursor from a backup
*
* @param pkt The net_pkt whose cursor is going to be restored
* @param backup The cursor from where to restore net_pkt cursor
*/
static inline void net_pkt_cursor_restore(struct net_pkt *pkt,
struct net_pkt_cursor *backup)
{
pkt->cursor.buf = backup->buf;
pkt->cursor.pos = backup->pos;
}
/**
* @brief Returns current position of the cursor
*
* @param pkt The net_pkt whose cursor position is going to be returned
*
* @return cursor's position
*/
static inline void *net_pkt_cursor_get_pos(struct net_pkt *pkt)
{
return pkt->cursor.pos;
}
/**
* @brief Skip some data from a net_pkt
*
* @details net_pkt's cursor should be properly initialized
* Cursor position will be updated after the operation.
* Depending on the value of pkt->overwrite bit, this function
* will affect the buffer length or not. If it's true, it will
* advance the cursor to the requested length. If it's false,
* it will do the same but if the cursor was already also at the
* end of existing data, it will increment the buffer length.
* So in this case, its behavior is just like net_pkt_write or
* net_pkt_memset, difference being that it will not affect the
* buffer content itself (which may be just garbage then).
*
* @param pkt The net_pkt whose cursor will be updated to skip given
* amount of data from the buffer.
* @param length Amount of data to skip in the buffer
*
* @return 0 in success, negative errno code otherwise.
*/
int net_pkt_skip(struct net_pkt *pkt, size_t length);
/**
* @brief Memset some data in a net_pkt
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* Cursor position will be updated after the operation.
*
* @param pkt The net_pkt whose buffer to fill starting at the current
* cursor position.
* @param byte The byte to write in memory
* @param length Amount of data to memset with given byte
*
* @return 0 in success, negative errno code otherwise.
*/
int net_pkt_memset(struct net_pkt *pkt, int byte, size_t length);
/**
* @brief Copy data from a packet into another one.
*
* @details Both net_pkt cursors should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* The cursors will be updated after the operation.
*
* @param pkt_dst Destination network packet.
* @param pkt_src Source network packet.
* @param length Length of data to be copied.
*
* @return 0 on success, negative errno code otherwise.
*/
int net_pkt_copy(struct net_pkt *pkt_dst,
struct net_pkt *pkt_src,
size_t length);
/**
* @brief Clone pkt and its buffer. The cloned packet will be allocated on
* the same pool as the original one.
*
* @param pkt Original pkt to be cloned
* @param timeout Timeout to wait for free buffer
*
* @return NULL if error, cloned packet otherwise.
*/
struct net_pkt *net_pkt_clone(struct net_pkt *pkt, k_timeout_t timeout);
/**
* @brief Clone pkt and its buffer. The cloned packet will be allocated on
* the RX packet poll.
*
* @param pkt Original pkt to be cloned
* @param timeout Timeout to wait for free buffer
*
* @return NULL if error, cloned packet otherwise.
*/
struct net_pkt *net_pkt_rx_clone(struct net_pkt *pkt, k_timeout_t timeout);
/**
* @brief Clone pkt and increase the refcount of its buffer.
*
* @param pkt Original pkt to be shallow cloned
* @param timeout Timeout to wait for free packet
*
* @return NULL if error, cloned packet otherwise.
*/
struct net_pkt *net_pkt_shallow_clone(struct net_pkt *pkt,
k_timeout_t timeout);
/**
* @brief Read some data from a net_pkt
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* Cursor position will be updated after the operation.
*
* @param pkt The network packet from where to read some data
* @param data The destination buffer where to copy the data
* @param length The amount of data to copy
*
* @return 0 on success, negative errno code otherwise.
*/
int net_pkt_read(struct net_pkt *pkt, void *data, size_t length);
/**
* @brief Read a byte (uint8_t) from a net_pkt
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* Cursor position will be updated after the operation.
*
* @param pkt The network packet from where to read
* @param data The destination uint8_t where to copy the data
*
* @return 0 on success, negative errno code otherwise.
*/
static inline int net_pkt_read_u8(struct net_pkt *pkt, uint8_t *data)
{
return net_pkt_read(pkt, data, 1);
}
/**
* @brief Read uint16_t big endian data from a net_pkt
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* Cursor position will be updated after the operation.
*
* @param pkt The network packet from where to read
* @param data The destination uint16_t where to copy the data
*
* @return 0 on success, negative errno code otherwise.
*/
int net_pkt_read_be16(struct net_pkt *pkt, uint16_t *data);
/**
* @brief Read uint16_t little endian data from a net_pkt
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* Cursor position will be updated after the operation.
*
* @param pkt The network packet from where to read
* @param data The destination uint16_t where to copy the data
*
* @return 0 on success, negative errno code otherwise.
*/
int net_pkt_read_le16(struct net_pkt *pkt, uint16_t *data);
/**
* @brief Read uint32_t big endian data from a net_pkt
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* Cursor position will be updated after the operation.
*
* @param pkt The network packet from where to read
* @param data The destination uint32_t where to copy the data
*
* @return 0 on success, negative errno code otherwise.
*/
int net_pkt_read_be32(struct net_pkt *pkt, uint32_t *data);
/**
* @brief Write data into a net_pkt
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* Cursor position will be updated after the operation.
*
* @param pkt The network packet where to write
* @param data Data to be written
* @param length Length of the data to be written
*
* @return 0 on success, negative errno code otherwise.
*/
int net_pkt_write(struct net_pkt *pkt, const void *data, size_t length);
/**
* @brief Write a byte (uint8_t) data to a net_pkt
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* Cursor position will be updated after the operation.
*
* @param pkt The network packet from where to read
* @param data The uint8_t value to write
*
* @return 0 on success, negative errno code otherwise.
*/
static inline int net_pkt_write_u8(struct net_pkt *pkt, uint8_t data)
{
return net_pkt_write(pkt, &data, sizeof(uint8_t));
}
/**
* @brief Write a uint16_t big endian data to a net_pkt
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* Cursor position will be updated after the operation.
*
* @param pkt The network packet from where to read
* @param data The uint16_t value in host byte order to write
*
* @return 0 on success, negative errno code otherwise.
*/
static inline int net_pkt_write_be16(struct net_pkt *pkt, uint16_t data)
{
uint16_t data_be16 = htons(data);
return net_pkt_write(pkt, &data_be16, sizeof(uint16_t));
}
/**
* @brief Write a uint32_t big endian data to a net_pkt
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* Cursor position will be updated after the operation.
*
* @param pkt The network packet from where to read
* @param data The uint32_t value in host byte order to write
*
* @return 0 on success, negative errno code otherwise.
*/
static inline int net_pkt_write_be32(struct net_pkt *pkt, uint32_t data)
{
uint32_t data_be32 = htonl(data);
return net_pkt_write(pkt, &data_be32, sizeof(uint32_t));
}
/**
* @brief Write a uint32_t little endian data to a net_pkt
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* Cursor position will be updated after the operation.
*
* @param pkt The network packet from where to read
* @param data The uint32_t value in host byte order to write
*
* @return 0 on success, negative errno code otherwise.
*/
static inline int net_pkt_write_le32(struct net_pkt *pkt, uint32_t data)
{
uint32_t data_le32 = sys_cpu_to_le32(data);
return net_pkt_write(pkt, &data_le32, sizeof(uint32_t));
}
/**
* @brief Write a uint16_t little endian data to a net_pkt
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* Cursor position will be updated after the operation.
*
* @param pkt The network packet from where to read
* @param data The uint16_t value in host byte order to write
*
* @return 0 on success, negative errno code otherwise.
*/
static inline int net_pkt_write_le16(struct net_pkt *pkt, uint16_t data)
{
uint16_t data_le16 = sys_cpu_to_le16(data);
return net_pkt_write(pkt, &data_le16, sizeof(uint16_t));
}
/**
* @brief Get the amount of data which can be read from current cursor position
*
* @param pkt Network packet
*
* @return Amount of data which can be read from current pkt cursor
*/
size_t net_pkt_remaining_data(struct net_pkt *pkt);
/**
* @brief Update the overall length of a packet
*
* @details Unlike net_pkt_pull() below, this does not take packet cursor
* into account. It's mainly a helper dedicated for ipv4 and ipv6
* input functions. It shrinks the overall length by given parameter.
*
* @param pkt Network packet
* @param length The new length of the packet
*
* @return 0 on success, negative errno code otherwise.
*/
int net_pkt_update_length(struct net_pkt *pkt, size_t length);
/**
* @brief Remove data from the packet at current location
*
* @details net_pkt's cursor should be properly initialized and,
* eventually, properly positioned using net_pkt_skip/read/write.
* Note that net_pkt's cursor is reset by this function.
*
* @param pkt Network packet
* @param length Number of bytes to be removed
*
* @return 0 on success, negative errno code otherwise.
*/
int net_pkt_pull(struct net_pkt *pkt, size_t length);
/**
* @brief Get the actual offset in the packet from its cursor
*
* @param pkt Network packet.
*
* @return a valid offset on success, 0 otherwise as there is nothing that
* can be done to evaluate the offset.
*/
uint16_t net_pkt_get_current_offset(struct net_pkt *pkt);
/**
* @brief Check if a data size could fit contiguously
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
*
* @param pkt Network packet.
* @param size The size to check for contiguity
*
* @return true if that is the case, false otherwise.
*/
bool net_pkt_is_contiguous(struct net_pkt *pkt, size_t size);
/**
* Get the contiguous buffer space
*
* @param pkt Network packet
*
* @return The available contiguous buffer space in bytes starting from the
* current cursor position. 0 in case of an error.
*/
size_t net_pkt_get_contiguous_len(struct net_pkt *pkt);
/** @cond INTERNAL_HIDDEN */
struct net_pkt_data_access {
#if !defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
void *data;
#endif
const size_t size;
};
#if defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
#define NET_PKT_DATA_ACCESS_DEFINE(_name, _type) \
struct net_pkt_data_access _name = { \
.size = sizeof(_type), \
}
#define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type) \
NET_PKT_DATA_ACCESS_DEFINE(_name, _type)
#else
#define NET_PKT_DATA_ACCESS_DEFINE(_name, _type) \
_type _hdr_##_name; \
struct net_pkt_data_access _name = { \
.data = &_hdr_##_name, \
.size = sizeof(_type), \
}
#define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type) \
struct net_pkt_data_access _name = { \
.data = NULL, \
.size = sizeof(_type), \
}
#endif /* CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS */
/** @endcond */
/**
* @brief Get data from a network packet in a contiguous way
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip. Unlike other functions,
* cursor position will not be updated after the operation.
*
* @param pkt The network packet from where to get the data.
* @param access A pointer to a valid net_pkt_data_access describing the
* data to get in a contiguous way.
*
* @return a pointer to the requested contiguous data, NULL otherwise.
*/
void *net_pkt_get_data(struct net_pkt *pkt,
struct net_pkt_data_access *access);
/**
* @brief Set contiguous data into a network packet
*
* @details net_pkt's cursor should be properly initialized and,
* if needed, positioned using net_pkt_skip.
* Cursor position will be updated after the operation.
*
* @param pkt The network packet to where the data should be set.
* @param access A pointer to a valid net_pkt_data_access describing the
* data to set.
*
* @return 0 on success, a negative errno otherwise.
*/
int net_pkt_set_data(struct net_pkt *pkt,
struct net_pkt_data_access *access);
/**
* Acknowledge previously contiguous data taken from a network packet
* Packet needs to be set to overwrite mode.
*/
static inline int net_pkt_acknowledge_data(struct net_pkt *pkt,
struct net_pkt_data_access *access)
{
return net_pkt_skip(pkt, access->size);
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_NET_PKT_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_pkt.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 16,857 |
```objective-c
/*
*
*/
/**
* @file
* @brief SNTP (Simple Network Time Protocol)
*/
#ifndef ZEPHYR_INCLUDE_NET_SNTP_H_
#define ZEPHYR_INCLUDE_NET_SNTP_H_
#include <zephyr/net/socket.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Simple Network Time Protocol API
* @defgroup sntp SNTP
* @since 1.10
* @version 0.8.0
* @ingroup networking
* @{
*/
/** Time as returned by SNTP API, fractional seconds since 1 Jan 1970 */
struct sntp_time {
uint64_t seconds; /**< Second value */
uint32_t fraction; /**< Fractional seconds value */
#if defined(CONFIG_SNTP_UNCERTAINTY)
uint64_t uptime_us; /**< Uptime in microseconds */
uint32_t uncertainty_us; /**< Uncertainty in microseconds */
#endif
};
/** SNTP context */
struct sntp_ctx {
/** @cond INTERNAL_HIDDEN */
struct {
struct zsock_pollfd fds[1];
int nfds;
int fd;
} sock;
/** @endcond */
/** Timestamp when the request was sent from client to server.
* This is used to check if the originated timestamp in the server
* reply matches the one in client request.
*/
struct sntp_time expected_orig_ts;
};
/**
* @brief Initialize SNTP context
*
* @param ctx Address of sntp context.
* @param addr IP address of NTP/SNTP server.
* @param addr_len IP address length of NTP/SNTP server.
*
* @return 0 if ok, <0 if error.
*/
int sntp_init(struct sntp_ctx *ctx, struct sockaddr *addr,
socklen_t addr_len);
/**
* @brief Perform SNTP query
*
* @param ctx Address of sntp context.
* @param timeout Timeout of waiting for sntp response (in milliseconds).
* @param time Timestamp including integer and fractional seconds since
* 1 Jan 1970 (output).
*
* @return 0 if ok, <0 if error (-ETIMEDOUT if timeout).
*/
int sntp_query(struct sntp_ctx *ctx, uint32_t timeout,
struct sntp_time *time);
/**
* @brief Release SNTP context
*
* @param ctx Address of sntp context.
*/
void sntp_close(struct sntp_ctx *ctx);
/**
* @brief Convenience function to query SNTP in one-shot fashion
*
* Convenience wrapper which calls getaddrinfo(), sntp_init(),
* sntp_query(), and sntp_close().
*
* @param server Address of server in format addr[:port]
* @param timeout Query timeout
* @param ts Timestamp including integer and fractional seconds since
* 1 Jan 1970 (output).
*
* @return 0 if ok, <0 if error (-ETIMEDOUT if timeout).
*/
int sntp_simple(const char *server, uint32_t timeout,
struct sntp_time *ts);
/**
* @brief Convenience function to query SNTP in one-shot fashion
* using a pre-initialized address struct
*
* Convenience wrapper which calls sntp_init(), sntp_query() and
* sntp_close().
*
* @param addr IP address of NTP/SNTP server.
* @param addr_len IP address length of NTP/SNTP server.
* @param timeout Query timeout
* @param ts Timestamp including integer and fractional seconds since
* 1 Jan 1970 (output).
*
* @return 0 if ok, <0 if error (-ETIMEDOUT if timeout).
*/
int sntp_simple_addr(struct sockaddr *addr, socklen_t addr_len, uint32_t timeout,
struct sntp_time *ts);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif
``` | /content/code_sandbox/include/zephyr/net/sntp.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 807 |
```objective-c
/** @file
* @brief Offloaded network device iface API
*
* This is not to be included by the application.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_OFFLOADED_NETDEV_H_
#define ZEPHYR_INCLUDE_OFFLOADED_NETDEV_H_
#include <zephyr/kernel.h>
#include <zephyr/types.h>
#include <stdbool.h>
#include <zephyr/net/net_if.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Offloaded Net Devices
* @defgroup offloaded_netdev Offloaded Net Devices
* @since 3.4
* @version 0.8.0
* @ingroup networking
* @{
*/
/** Types of offloaded netdev L2 */
enum offloaded_net_if_types {
/** Unknown, device hasn't register a type */
L2_OFFLOADED_NET_IF_TYPE_UNKNOWN,
/** Ethernet devices */
L2_OFFLOADED_NET_IF_TYPE_ETHERNET,
/** Modem */
L2_OFFLOADED_NET_IF_TYPE_MODEM,
/** IEEE 802.11 Wi-Fi */
L2_OFFLOADED_NET_IF_TYPE_WIFI,
};
/**
* @brief Extended net_if_api for offloaded ifaces/network devices, allowing handling of
* admin up/down state changes
*/
struct offloaded_if_api {
/**
* The net_if_api must be placed in first position in this
* struct so that we are compatible with network interface API.
*/
struct net_if_api iface_api;
/** Enable or disable the device (in response to admin state change) */
int (*enable)(const struct net_if *iface, bool state);
/** Types of offloaded net device */
enum offloaded_net_if_types (*get_type)(void);
};
/* Ensure offloaded_if_api is compatible with net_if_api */
BUILD_ASSERT(offsetof(struct offloaded_if_api, iface_api) == 0);
/**
* @brief Check if the offloaded network interface supports Wi-Fi.
*
* @param iface Pointer to network interface
*
* @return True if interface supports Wi-Fi, False otherwise.
*/
static inline bool net_off_is_wifi_offloaded(struct net_if *iface)
{
const struct offloaded_if_api *api = (const struct offloaded_if_api *)
net_if_get_device(iface)->api;
return api->get_type && api->get_type() == L2_OFFLOADED_NET_IF_TYPE_WIFI;
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_OFFLOADED_NETDEV_H_ */
``` | /content/code_sandbox/include/zephyr/net/offloaded_netdev.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 527 |
```objective-c
/*
*
*/
/** @file tftp.h
*
* @brief TFTP Client Implementation
*
* @defgroup tftp_client TFTP Client library
* @since 2.3
* @version 0.1.0
* @ingroup networking
* @{
*/
#ifndef ZEPHYR_INCLUDE_NET_TFTP_H_
#define ZEPHYR_INCLUDE_NET_TFTP_H_
#include <zephyr/kernel.h>
#include <zephyr/net/socket.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* RFC1350: the file is sent in fixed length blocks of 512 bytes.
* Each data packet contains one block of data, and must be acknowledged
* by an acknowledgment packet before the next packet can be sent.
* A data packet of less than 512 bytes signals termination of a transfer.
*/
#define TFTP_BLOCK_SIZE 512
/**
* RFC1350: For non-request TFTP message, the header contains 2-byte operation
* code plus 2-byte block number or error code.
*/
#define TFTP_HEADER_SIZE 4
/** Maximum amount of data that can be sent or received */
#define TFTPC_MAX_BUF_SIZE (TFTP_BLOCK_SIZE + TFTP_HEADER_SIZE)
/**
* @name TFTP client error codes.
* @{
*/
#define TFTPC_SUCCESS 0 /**< Success. */
#define TFTPC_DUPLICATE_DATA -1 /**< Duplicate data received. */
#define TFTPC_BUFFER_OVERFLOW -2 /**< User buffer is too small. */
#define TFTPC_UNKNOWN_FAILURE -3 /**< Unknown failure. */
#define TFTPC_REMOTE_ERROR -4 /**< Remote server error. */
#define TFTPC_RETRIES_EXHAUSTED -5 /**< Retries exhausted. */
/**
* @}
*/
/**
* @brief TFTP Asynchronous Events notified to the application from the module
* through the callback registered by the application.
*/
enum tftp_evt_type {
/**
* DATA event when data is received from remote server.
*
* @note DATA event structure contains payload data and size.
*/
TFTP_EVT_DATA,
/**
* ERROR event when error is received from remote server.
*
* @note ERROR event structure contains error code and message.
*/
TFTP_EVT_ERROR
};
/** @brief Parameters for data event. */
struct tftp_data_param {
uint8_t *data_ptr; /**< Pointer to binary data. */
uint32_t len; /**< Length of binary data. */
};
/** @brief Parameters for error event. */
struct tftp_error_param {
char *msg; /**< Error message. */
int code; /**< Error code. */
};
/**
* @brief Defines event parameters notified along with asynchronous events
* to the application.
*/
union tftp_evt_param {
/** Parameters accompanying TFTP_EVT_DATA event. */
struct tftp_data_param data;
/** Parameters accompanying TFTP_EVT_ERROR event. */
struct tftp_error_param error;
};
/** @brief Defines TFTP asynchronous event notified to the application. */
struct tftp_evt {
/** Identifies the event. */
enum tftp_evt_type type;
/** Contains parameters (if any) accompanying the event. */
union tftp_evt_param param;
};
/**
* @typedef tftp_callback_t
*
* @brief TFTP event notification callback registered by the application.
*
* @param[in] evt Event description along with result and associated
* parameters (if any).
*/
typedef void (*tftp_callback_t)(const struct tftp_evt *evt);
/**
* @brief TFTP client definition to maintain information relevant to the
* client.
*
* @note Application must initialize `server` and `callback` before calling
* GET or PUT API with the `tftpc` structure.
*/
struct tftpc {
/** Socket address pointing to the remote TFTP server */
struct sockaddr server;
/** Event notification callback. No notification if NULL */
tftp_callback_t callback;
/** Buffer for internal usage */
uint8_t tftp_buf[TFTPC_MAX_BUF_SIZE];
};
/**
* @brief This function gets data from a "file" on the remote server.
*
* @param client Client information of type @ref tftpc.
* @param remote_file Name of the remote file to get.
* @param mode TFTP Client "mode" setting.
*
* @retval The size of data being received if the operation completed successfully.
* @retval TFTPC_BUFFER_OVERFLOW if the file is larger than the user buffer.
* @retval TFTPC_REMOTE_ERROR if the server failed to process our request.
* @retval TFTPC_RETRIES_EXHAUSTED if the client timed out waiting for server.
* @retval -EINVAL if `client` is NULL.
*
* @note This function blocks until the transfer is completed or network error happens. The
* integrity of the `client` structure must be ensured until the function returns.
*/
int tftp_get(struct tftpc *client,
const char *remote_file, const char *mode);
/**
* @brief This function puts data to a "file" on the remote server.
*
* @param client Client information of type @ref tftpc.
* @param remote_file Name of the remote file to put.
* @param mode TFTP Client "mode" setting.
* @param user_buf Data buffer containing the data to put.
* @param user_buf_size Length of the data to put.
*
* @retval The size of data being sent if the operation completed successfully.
* @retval TFTPC_REMOTE_ERROR if the server failed to process our request.
* @retval TFTPC_RETRIES_EXHAUSTED if the client timed out waiting for server.
* @retval -EINVAL if `client` or `user_buf` is NULL or if `user_buf_size` is zero.
*
* @note This function blocks until the transfer is completed or network error happens. The
* integrity of the `client` structure must be ensured until the function returns.
*/
int tftp_put(struct tftpc *client,
const char *remote_file, const char *mode,
const uint8_t *user_buf, uint32_t user_buf_size);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_TFTP_H_ */
/** @} */
``` | /content/code_sandbox/include/zephyr/net/tftp.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,301 |
```objective-c
/** @file
* @brief Network packet filtering public header file
*
* The network packet filtering provides a mechanism for deciding the fate
* of an incoming or outgoing packet based on a set of basic rules.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_PKT_FILTER_H_
#define ZEPHYR_INCLUDE_NET_PKT_FILTER_H_
#include <limits.h>
#include <stdbool.h>
#include <zephyr/sys/slist.h>
#include <zephyr/net/net_core.h>
#include <zephyr/net/ethernet.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Network Packet Filter API
* @defgroup net_pkt_filter Network Packet Filter API
* @since 3.0
* @version 0.8.0
* @ingroup networking
* @{
*/
/** @cond INTERNAL_HIDDEN */
struct npf_test;
typedef bool (npf_test_fn_t)(struct npf_test *test, struct net_pkt *pkt);
/** @endcond */
/** @brief common filter test structure to be embedded into larger structures */
struct npf_test {
npf_test_fn_t *fn; /**< packet condition test function */
};
/** @brief filter rule structure */
struct npf_rule {
sys_snode_t node; /**< Slist rule list node */
enum net_verdict result; /**< result if all tests pass */
uint32_t nb_tests; /**< number of tests for this rule */
struct npf_test *tests[]; /**< pointers to @ref npf_test instances */
};
/** @brief Default rule list termination for accepting a packet */
extern struct npf_rule npf_default_ok;
/** @brief Default rule list termination for rejecting a packet */
extern struct npf_rule npf_default_drop;
/** @brief rule set for a given test location */
struct npf_rule_list {
sys_slist_t rule_head; /**< List head */
struct k_spinlock lock; /**< Lock protecting the list access */
};
/** @brief rule list applied to outgoing packets */
extern struct npf_rule_list npf_send_rules;
/** @brief rule list applied to incoming packets */
extern struct npf_rule_list npf_recv_rules;
/** @brief rule list applied for local incoming packets */
extern struct npf_rule_list npf_local_in_recv_rules;
/** @brief rule list applied for IPv4 incoming packets */
extern struct npf_rule_list npf_ipv4_recv_rules;
/** @brief rule list applied for IPv6 incoming packets */
extern struct npf_rule_list npf_ipv6_recv_rules;
/**
* @brief Insert a rule at the front of given rule list
*
* @param rules the affected rule list
* @param rule the rule to be inserted
*/
void npf_insert_rule(struct npf_rule_list *rules, struct npf_rule *rule);
/**
* @brief Append a rule at the end of given rule list
*
* @param rules the affected rule list
* @param rule the rule to be appended
*/
void npf_append_rule(struct npf_rule_list *rules, struct npf_rule *rule);
/**
* @brief Remove a rule from the given rule list
*
* @param rules the affected rule list
* @param rule the rule to be removed
* @retval true if given rule was found in the rule list and removed
*/
bool npf_remove_rule(struct npf_rule_list *rules, struct npf_rule *rule);
/**
* @brief Remove all rules from the given rule list
*
* @param rules the affected rule list
* @retval true if at least one rule was removed from the rule list
*/
bool npf_remove_all_rules(struct npf_rule_list *rules);
/** @cond INTERNAL_HIDDEN */
/* convenience shortcuts */
#define npf_insert_send_rule(rule) npf_insert_rule(&npf_send_rules, rule)
#define npf_insert_recv_rule(rule) npf_insert_rule(&npf_recv_rules, rule)
#define npf_append_send_rule(rule) npf_append_rule(&npf_send_rules, rule)
#define npf_append_recv_rule(rule) npf_append_rule(&npf_recv_rules, rule)
#define npf_remove_send_rule(rule) npf_remove_rule(&npf_send_rules, rule)
#define npf_remove_recv_rule(rule) npf_remove_rule(&npf_recv_rules, rule)
#define npf_remove_all_send_rules() npf_remove_all_rules(&npf_send_rules)
#define npf_remove_all_recv_rules() npf_remove_all_rules(&npf_recv_rules)
#ifdef CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK
#define npf_insert_local_in_recv_rule(rule) npf_insert_rule(&npf_local_in_recv_rules, rule)
#define npf_append_local_in_recv_rule(rule) npf_append_rule(&npf_local_in_recv_rules, rule)
#define npf_remove_local_in_recv_rule(rule) npf_remove_rule(&npf_local_in_recv_rules, rule)
#define npf_remove_all_local_in_recv_rules() npf_remove_all_rules(&npf_local_in_recv_rules)
#endif /* CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK */
#ifdef CONFIG_NET_PKT_FILTER_IPV4_HOOK
#define npf_insert_ipv4_recv_rule(rule) npf_insert_rule(&npf_ipv4_recv_rules, rule)
#define npf_append_ipv4_recv_rule(rule) npf_append_rule(&npf_ipv4_recv_rules, rule)
#define npf_remove_ipv4_recv_rule(rule) npf_remove_rule(&npf_ipv4_recv_rules, rule)
#define npf_remove_all_ipv4_recv_rules() npf_remove_all_rules(&npf_ipv4_recv_rules)
#endif /* CONFIG_NET_PKT_FILTER_IPV4_HOOK */
#ifdef CONFIG_NET_PKT_FILTER_IPV6_HOOK
#define npf_insert_ipv6_recv_rule(rule) npf_insert_rule(&npf_ipv6_recv_rules, rule)
#define npf_append_ipv6_recv_rule(rule) npf_append_rule(&npf_ipv6_recv_rules, rule)
#define npf_remove_ipv6_recv_rule(rule) npf_remove_rule(&npf_ipv6_recv_rules, rule)
#define npf_remove_all_ipv6_recv_rules() npf_remove_all_rules(&npf_ipv6_recv_rules)
#endif /* CONFIG_NET_PKT_FILTER_IPV6_HOOK */
/** @endcond */
/**
* @brief Statically define one packet filter rule
*
* This creates a rule from a variable amount of filter conditions.
* This rule can then be inserted or appended to the rule list for a given
* network packet path.
*
* Example:
*
* @code{.c}
*
* static NPF_SIZE_MAX(maxsize_200, 200);
* static NPF_ETH_TYPE_MATCH(ip_packet, NET_ETH_PTYPE_IP);
*
* static NPF_RULE(small_ip_pkt, NET_OK, ip_packet, maxsize_200);
*
* void install_my_filter(void)
* {
* npf_insert_recv_rule(&npf_default_drop);
* npf_insert_recv_rule(&small_ip_pkt);
* }
*
* @endcode
*
* The above would accept IP packets that are 200 bytes or smaller, and drop
* all other packets.
*
* Another (less efficient) way to create the same result could be:
*
* @code{.c}
*
* static NPF_SIZE_MIN(minsize_201, 201);
* static NPF_ETH_TYPE_UNMATCH(not_ip_packet, NET_ETH_PTYPE_IP);
*
* static NPF_RULE(reject_big_pkts, NET_DROP, minsize_201);
* static NPF_RULE(reject_non_ip, NET_DROP, not_ip_packet);
*
* void install_my_filter(void) {
* npf_append_recv_rule(&reject_big_pkts);
* npf_append_recv_rule(&reject_non_ip);
* npf_append_recv_rule(&npf_default_ok);
* }
*
* @endcode
*
* The first rule in the list for which all conditions are true determines
* the fate of the packet. If one condition is false then the next rule in
* the list is evaluated.
*
* @param _name Name for this rule.
* @param _result Fate of the packet if all conditions are true, either
* <tt>NET_OK</tt> or <tt>NET_DROP</tt>.
* @param ... List of conditions for this rule.
*/
#define NPF_RULE(_name, _result, ...) \
struct npf_rule _name = { \
.result = (_result), \
.nb_tests = NUM_VA_ARGS_LESS_1(__VA_ARGS__) + 1, \
.tests = { FOR_EACH(Z_NPF_TEST_ADDR, (,), __VA_ARGS__) }, \
}
#define Z_NPF_TEST_ADDR(arg) &arg.test
/** @} */
/**
* @defgroup npf_basic_cond Basic Filter Conditions
* @since 3.0
* @version 0.8.0
* @ingroup net_pkt_filter
* @{
*/
/** @cond INTERNAL_HIDDEN */
struct npf_test_iface {
struct npf_test test;
struct net_if *iface;
};
extern npf_test_fn_t npf_iface_match;
extern npf_test_fn_t npf_iface_unmatch;
extern npf_test_fn_t npf_orig_iface_match;
extern npf_test_fn_t npf_orig_iface_unmatch;
/** @endcond */
/**
* @brief Statically define an "interface match" packet filter condition
*
* @param _name Name of the condition
* @param _iface Interface to match
*/
#define NPF_IFACE_MATCH(_name, _iface) \
struct npf_test_iface _name = { \
.iface = (_iface), \
.test.fn = npf_iface_match, \
}
/**
* @brief Statically define an "interface unmatch" packet filter condition
*
* @param _name Name of the condition
* @param _iface Interface to exclude
*/
#define NPF_IFACE_UNMATCH(_name, _iface) \
struct npf_test_iface _name = { \
.iface = (_iface), \
.test.fn = npf_iface_unmatch, \
}
/**
* @brief Statically define an "orig interface match" packet filter condition
*
* @param _name Name of the condition
* @param _iface Interface to match
*/
#define NPF_ORIG_IFACE_MATCH(_name, _iface) \
struct npf_test_iface _name = { \
.iface = (_iface), \
.test.fn = npf_orig_iface_match, \
}
/**
* @brief Statically define an "orig interface unmatch" packet filter condition
*
* @param _name Name of the condition
* @param _iface Interface to exclude
*/
#define NPF_ORIG_IFACE_UNMATCH(_name, _iface) \
struct npf_test_iface _name = { \
.iface = (_iface), \
.test.fn = npf_orig_iface_unmatch, \
}
/** @cond INTERNAL_HIDDEN */
struct npf_test_size_bounds {
struct npf_test test;
size_t min;
size_t max;
};
extern npf_test_fn_t npf_size_inbounds;
/** @endcond */
/**
* @brief Statically define a "data minimum size" packet filter condition
*
* @param _name Name of the condition
* @param _size Lower bound of the packet's data size
*/
#define NPF_SIZE_MIN(_name, _size) \
struct npf_test_size_bounds _name = { \
.min = (_size), \
.max = SIZE_MAX, \
.test.fn = npf_size_inbounds, \
}
/**
* @brief Statically define a "data maximum size" packet filter condition
*
* @param _name Name of the condition
* @param _size Higher bound of the packet's data size
*/
#define NPF_SIZE_MAX(_name, _size) \
struct npf_test_size_bounds _name = { \
.min = 0, \
.max = (_size), \
.test.fn = npf_size_inbounds, \
}
/**
* @brief Statically define a "data bounded size" packet filter condition
*
* @param _name Name of the condition
* @param _min_size Lower bound of the packet's data size
* @param _max_size Higher bound of the packet's data size
*/
#define NPF_SIZE_BOUNDS(_name, _min_size, _max_size) \
struct npf_test_size_bounds _name = { \
.min = (_min_size), \
.max = (_max_size), \
.test.fn = npf_size_inbounds, \
}
/** @cond INTERNAL_HIDDEN */
struct npf_test_ip {
struct npf_test test;
uint8_t addr_family;
void *ipaddr;
uint32_t ipaddr_num;
};
extern npf_test_fn_t npf_ip_src_addr_match;
extern npf_test_fn_t npf_ip_src_addr_unmatch;
/** @endcond */
/**
* @brief Statically define a "ip address allowlist" packet filter condition
*
* This tests if the packet source ip address matches any of the ip
* addresses contained in the provided set.
*
* @param _name Name of the condition
* @param _ip_addr_array Array of <tt>struct in_addr</tt> or <tt>struct in6_addr</tt> items to test
*against
* @param _ip_addr_num number of IP addresses in the array
* @param _af Addresses family type (AF_INET / AF_INET6) in the array
*/
#define NPF_IP_SRC_ADDR_ALLOWLIST(_name, _ip_addr_array, _ip_addr_num, _af) \
struct npf_test_ip _name = { \
.addr_family = _af, \
.ipaddr = (_ip_addr_array), \
.ipaddr_num = _ip_addr_num, \
.test.fn = npf_ip_src_addr_match, \
}
/**
* @brief Statically define a "ip address blocklist" packet filter condition
*
* This tests if the packet source ip address matches any of the ip
* addresses contained in the provided set.
*
* @param _name Name of the condition
* @param _ip_addr_array Array of <tt>struct in_addr</tt> or <tt>struct in6_addr</tt> items to test
*against
* @param _ip_addr_num number of IP addresses in the array
* @param _af Addresses family type (AF_INET / AF_INET6) in the array
*/
#define NPF_IP_SRC_ADDR_BLOCKLIST(_name, _ip_addr_array, _ip_addr_num, _af) \
struct npf_test_ip _name = { \
.addr_family = _af, \
.ipaddr = (_ip_addr_array), \
.ipaddr_num = _ip_addr_num, \
.test.fn = npf_ip_src_addr_unmatch, \
}
/** @} */
/**
* @defgroup npf_eth_cond Ethernet Filter Conditions
* @ingroup net_pkt_filter
* @since 3.0
* @version 0.8.0
* @{
*/
/** @cond INTERNAL_HIDDEN */
struct npf_test_eth_addr {
struct npf_test test;
unsigned int nb_addresses;
struct net_eth_addr *addresses;
struct net_eth_addr mask;
};
extern npf_test_fn_t npf_eth_src_addr_match;
extern npf_test_fn_t npf_eth_src_addr_unmatch;
extern npf_test_fn_t npf_eth_dst_addr_match;
extern npf_test_fn_t npf_eth_dst_addr_unmatch;
/** @endcond */
/**
* @brief Statically define a "source address match" packet filter condition
*
* This tests if the packet source address matches any of the Ethernet
* addresses contained in the provided set.
*
* @param _name Name of the condition
* @param _addr_array Array of <tt>struct net_eth_addr</tt> items to test against
*/
#define NPF_ETH_SRC_ADDR_MATCH(_name, _addr_array) \
struct npf_test_eth_addr _name = { \
.addresses = (_addr_array), \
.nb_addresses = ARRAY_SIZE(_addr_array), \
.test.fn = npf_eth_src_addr_match, \
.mask.addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, \
}
/**
* @brief Statically define a "source address unmatch" packet filter condition
*
* This tests if the packet source address matches none of the Ethernet
* addresses contained in the provided set.
*
* @param _name Name of the condition
* @param _addr_array Array of <tt>struct net_eth_addr</tt> items to test against
*/
#define NPF_ETH_SRC_ADDR_UNMATCH(_name, _addr_array) \
struct npf_test_eth_addr _name = { \
.addresses = (_addr_array), \
.nb_addresses = ARRAY_SIZE(_addr_array), \
.test.fn = npf_eth_src_addr_unmatch, \
.mask.addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, \
}
/**
* @brief Statically define a "destination address match" packet filter condition
*
* This tests if the packet destination address matches any of the Ethernet
* addresses contained in the provided set.
*
* @param _name Name of the condition
* @param _addr_array Array of <tt>struct net_eth_addr</tt> items to test against
*/
#define NPF_ETH_DST_ADDR_MATCH(_name, _addr_array) \
struct npf_test_eth_addr _name = { \
.addresses = (_addr_array), \
.nb_addresses = ARRAY_SIZE(_addr_array), \
.test.fn = npf_eth_dst_addr_match, \
.mask.addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, \
}
/**
* @brief Statically define a "destination address unmatch" packet filter condition
*
* This tests if the packet destination address matches none of the Ethernet
* addresses contained in the provided set.
*
* @param _name Name of the condition
* @param _addr_array Array of <tt>struct net_eth_addr</tt> items to test against
*/
#define NPF_ETH_DST_ADDR_UNMATCH(_name, _addr_array) \
struct npf_test_eth_addr _name = { \
.addresses = (_addr_array), \
.nb_addresses = ARRAY_SIZE(_addr_array), \
.test.fn = npf_eth_dst_addr_unmatch, \
.mask.addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, \
}
/**
* @brief Statically define a "source address match with mask" packet filter condition
*
* This tests if the packet source address matches any of the Ethernet
* addresses contained in the provided set after applying specified mask.
*
* @param _name Name of the condition
* @param _addr_array Array of <tt>struct net_eth_addr</tt> items to test against
* @param ... up to 6 mask bytes
*/
#define NPF_ETH_SRC_ADDR_MASK_MATCH(_name, _addr_array, ...) \
struct npf_test_eth_addr _name = { \
.addresses = (_addr_array), \
.nb_addresses = ARRAY_SIZE(_addr_array), \
.mask.addr = { __VA_ARGS__ }, \
.test.fn = npf_eth_src_addr_match, \
}
/**
* @brief Statically define a "destination address match with mask" packet filter condition
*
* This tests if the packet destination address matches any of the Ethernet
* addresses contained in the provided set after applying specified mask.
*
* @param _name Name of the condition
* @param _addr_array Array of <tt>struct net_eth_addr</tt> items to test against
* @param ... up to 6 mask bytes
*/
#define NPF_ETH_DST_ADDR_MASK_MATCH(_name, _addr_array, ...) \
struct npf_test_eth_addr _name = { \
.addresses = (_addr_array), \
.nb_addresses = ARRAY_SIZE(_addr_array), \
.mask.addr = { __VA_ARGS__ }, \
.test.fn = npf_eth_dst_addr_match, \
}
/** @cond INTERNAL_HIDDEN */
struct npf_test_eth_type {
struct npf_test test;
uint16_t type; /* type in network order */
};
extern npf_test_fn_t npf_eth_type_match;
extern npf_test_fn_t npf_eth_type_unmatch;
/** @endcond */
/**
* @brief Statically define an "Ethernet type match" packet filter condition
*
* @param _name Name of the condition
* @param _type Ethernet type to match
*/
#define NPF_ETH_TYPE_MATCH(_name, _type) \
struct npf_test_eth_type _name = { \
.type = htons(_type), \
.test.fn = npf_eth_type_match, \
}
/**
* @brief Statically define an "Ethernet type unmatch" packet filter condition
*
* @param _name Name of the condition
* @param _type Ethernet type to exclude
*/
#define NPF_ETH_TYPE_UNMATCH(_name, _type) \
struct npf_test_eth_type _name = { \
.type = htons(_type), \
.test.fn = npf_eth_type_unmatch, \
}
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_PKT_FILTER_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_pkt_filter.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,522 |
```objective-c
/** @file
* @brief Buffer management.
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_BUF_H_
#define ZEPHYR_INCLUDE_NET_BUF_H_
#include <stddef.h>
#include <zephyr/types.h>
#include <zephyr/sys/util.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/iterable_sections.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Network buffer library
* @defgroup net_buf Network Buffer Library
* @since 1.0
* @version 1.0.0
* @ingroup networking
* @{
*/
/* Alignment needed for various parts of the buffer definition */
#if CONFIG_NET_BUF_ALIGNMENT == 0
#define __net_buf_align __aligned(sizeof(void *))
#else
#define __net_buf_align __aligned(CONFIG_NET_BUF_ALIGNMENT)
#endif
/**
* @brief Define a net_buf_simple stack variable.
*
* This is a helper macro which is used to define a net_buf_simple object
* on the stack.
*
* @param _name Name of the net_buf_simple object.
* @param _size Maximum data storage for the buffer.
*/
#define NET_BUF_SIMPLE_DEFINE(_name, _size) \
uint8_t net_buf_data_##_name[_size]; \
struct net_buf_simple _name = { \
.data = net_buf_data_##_name, \
.len = 0, \
.size = _size, \
.__buf = net_buf_data_##_name, \
}
/**
*
* @brief Define a static net_buf_simple variable.
*
* This is a helper macro which is used to define a static net_buf_simple
* object.
*
* @param _name Name of the net_buf_simple object.
* @param _size Maximum data storage for the buffer.
*/
#define NET_BUF_SIMPLE_DEFINE_STATIC(_name, _size) \
static __noinit uint8_t net_buf_data_##_name[_size]; \
static struct net_buf_simple _name = { \
.data = net_buf_data_##_name, \
.len = 0, \
.size = _size, \
.__buf = net_buf_data_##_name, \
}
/**
* @brief Simple network buffer representation.
*
* This is a simpler variant of the net_buf object (in fact net_buf uses
* net_buf_simple internally). It doesn't provide any kind of reference
* counting, user data, dynamic allocation, or in general the ability to
* pass through kernel objects such as FIFOs.
*
* The main use of this is for scenarios where the meta-data of the normal
* net_buf isn't needed and causes too much overhead. This could be e.g.
* when the buffer only needs to be allocated on the stack or when the
* access to and lifetime of the buffer is well controlled and constrained.
*/
struct net_buf_simple {
/** Pointer to the start of data in the buffer. */
uint8_t *data;
/**
* Length of the data behind the data pointer.
*
* To determine the max length, use net_buf_simple_max_len(), not #size!
*/
uint16_t len;
/** Amount of data that net_buf_simple#__buf can store. */
uint16_t size;
/** Start of the data storage. Not to be accessed directly
* (the data pointer should be used instead).
*/
uint8_t *__buf;
};
/**
*
* @brief Define a net_buf_simple stack variable and get a pointer to it.
*
* This is a helper macro which is used to define a net_buf_simple object on
* the stack and the get a pointer to it as follows:
*
* struct net_buf_simple *my_buf = NET_BUF_SIMPLE(10);
*
* After creating the object it needs to be initialized by calling
* net_buf_simple_init().
*
* @param _size Maximum data storage for the buffer.
*
* @return Pointer to stack-allocated net_buf_simple object.
*/
#define NET_BUF_SIMPLE(_size) \
((struct net_buf_simple *)(&(struct { \
struct net_buf_simple buf; \
uint8_t data[_size]; \
}) { \
.buf.size = _size, \
}))
/**
* @brief Initialize a net_buf_simple object.
*
* This needs to be called after creating a net_buf_simple object using
* the NET_BUF_SIMPLE macro.
*
* @param buf Buffer to initialize.
* @param reserve_head Headroom to reserve.
*/
static inline void net_buf_simple_init(struct net_buf_simple *buf,
size_t reserve_head)
{
if (!buf->__buf) {
buf->__buf = (uint8_t *)buf + sizeof(*buf);
}
buf->data = buf->__buf + reserve_head;
buf->len = 0U;
}
/**
* @brief Initialize a net_buf_simple object with data.
*
* Initialized buffer object with external data.
*
* @param buf Buffer to initialize.
* @param data External data pointer
* @param size Amount of data the pointed data buffer if able to fit.
*/
void net_buf_simple_init_with_data(struct net_buf_simple *buf,
void *data, size_t size);
/**
* @brief Reset buffer
*
* Reset buffer data so it can be reused for other purposes.
*
* @param buf Buffer to reset.
*/
static inline void net_buf_simple_reset(struct net_buf_simple *buf)
{
buf->len = 0U;
buf->data = buf->__buf;
}
/**
* Clone buffer state, using the same data buffer.
*
* Initializes a buffer to point to the same data as an existing buffer.
* Allows operations on the same data without altering the length and
* offset of the original.
*
* @param original Buffer to clone.
* @param clone The new clone.
*/
void net_buf_simple_clone(const struct net_buf_simple *original,
struct net_buf_simple *clone);
/**
* @brief Prepare data to be added at the end of the buffer
*
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param len Number of bytes to increment the length with.
*
* @return The original tail of the buffer.
*/
void *net_buf_simple_add(struct net_buf_simple *buf, size_t len);
/**
* @brief Copy given number of bytes from memory to the end of the buffer
*
* Increments the data length of the buffer to account for more data at the
* end.
*
* @param buf Buffer to update.
* @param mem Location of data to be added.
* @param len Length of data to be added
*
* @return The original tail of the buffer.
*/
void *net_buf_simple_add_mem(struct net_buf_simple *buf, const void *mem,
size_t len);
/**
* @brief Add (8-bit) byte at the end of the buffer
*
* Increments the data length of the buffer to account for more data at the
* end.
*
* @param buf Buffer to update.
* @param val byte value to be added.
*
* @return Pointer to the value added
*/
uint8_t *net_buf_simple_add_u8(struct net_buf_simple *buf, uint8_t val);
/**
* @brief Add 16-bit value at the end of the buffer
*
* Adds 16-bit value in little endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 16-bit value to be added.
*/
void net_buf_simple_add_le16(struct net_buf_simple *buf, uint16_t val);
/**
* @brief Add 16-bit value at the end of the buffer
*
* Adds 16-bit value in big endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 16-bit value to be added.
*/
void net_buf_simple_add_be16(struct net_buf_simple *buf, uint16_t val);
/**
* @brief Add 24-bit value at the end of the buffer
*
* Adds 24-bit value in little endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 24-bit value to be added.
*/
void net_buf_simple_add_le24(struct net_buf_simple *buf, uint32_t val);
/**
* @brief Add 24-bit value at the end of the buffer
*
* Adds 24-bit value in big endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 24-bit value to be added.
*/
void net_buf_simple_add_be24(struct net_buf_simple *buf, uint32_t val);
/**
* @brief Add 32-bit value at the end of the buffer
*
* Adds 32-bit value in little endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 32-bit value to be added.
*/
void net_buf_simple_add_le32(struct net_buf_simple *buf, uint32_t val);
/**
* @brief Add 32-bit value at the end of the buffer
*
* Adds 32-bit value in big endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 32-bit value to be added.
*/
void net_buf_simple_add_be32(struct net_buf_simple *buf, uint32_t val);
/**
* @brief Add 40-bit value at the end of the buffer
*
* Adds 40-bit value in little endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 40-bit value to be added.
*/
void net_buf_simple_add_le40(struct net_buf_simple *buf, uint64_t val);
/**
* @brief Add 40-bit value at the end of the buffer
*
* Adds 40-bit value in big endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 40-bit value to be added.
*/
void net_buf_simple_add_be40(struct net_buf_simple *buf, uint64_t val);
/**
* @brief Add 48-bit value at the end of the buffer
*
* Adds 48-bit value in little endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 48-bit value to be added.
*/
void net_buf_simple_add_le48(struct net_buf_simple *buf, uint64_t val);
/**
* @brief Add 48-bit value at the end of the buffer
*
* Adds 48-bit value in big endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 48-bit value to be added.
*/
void net_buf_simple_add_be48(struct net_buf_simple *buf, uint64_t val);
/**
* @brief Add 64-bit value at the end of the buffer
*
* Adds 64-bit value in little endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 64-bit value to be added.
*/
void net_buf_simple_add_le64(struct net_buf_simple *buf, uint64_t val);
/**
* @brief Add 64-bit value at the end of the buffer
*
* Adds 64-bit value in big endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 64-bit value to be added.
*/
void net_buf_simple_add_be64(struct net_buf_simple *buf, uint64_t val);
/**
* @brief Remove data from the end of the buffer.
*
* Removes data from the end of the buffer by modifying the buffer length.
*
* @param buf Buffer to update.
* @param len Number of bytes to remove.
*
* @return New end of the buffer data.
*/
void *net_buf_simple_remove_mem(struct net_buf_simple *buf, size_t len);
/**
* @brief Remove a 8-bit value from the end of the buffer
*
* Same idea as with net_buf_simple_remove_mem(), but a helper for operating
* on 8-bit values.
*
* @param buf A valid pointer on a buffer.
*
* @return The 8-bit removed value
*/
uint8_t net_buf_simple_remove_u8(struct net_buf_simple *buf);
/**
* @brief Remove and convert 16 bits from the end of the buffer.
*
* Same idea as with net_buf_simple_remove_mem(), but a helper for operating
* on 16-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 16-bit value converted from little endian to host endian.
*/
uint16_t net_buf_simple_remove_le16(struct net_buf_simple *buf);
/**
* @brief Remove and convert 16 bits from the end of the buffer.
*
* Same idea as with net_buf_simple_remove_mem(), but a helper for operating
* on 16-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 16-bit value converted from big endian to host endian.
*/
uint16_t net_buf_simple_remove_be16(struct net_buf_simple *buf);
/**
* @brief Remove and convert 24 bits from the end of the buffer.
*
* Same idea as with net_buf_simple_remove_mem(), but a helper for operating
* on 24-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 24-bit value converted from little endian to host endian.
*/
uint32_t net_buf_simple_remove_le24(struct net_buf_simple *buf);
/**
* @brief Remove and convert 24 bits from the end of the buffer.
*
* Same idea as with net_buf_simple_remove_mem(), but a helper for operating
* on 24-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 24-bit value converted from big endian to host endian.
*/
uint32_t net_buf_simple_remove_be24(struct net_buf_simple *buf);
/**
* @brief Remove and convert 32 bits from the end of the buffer.
*
* Same idea as with net_buf_simple_remove_mem(), but a helper for operating
* on 32-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 32-bit value converted from little endian to host endian.
*/
uint32_t net_buf_simple_remove_le32(struct net_buf_simple *buf);
/**
* @brief Remove and convert 32 bits from the end of the buffer.
*
* Same idea as with net_buf_simple_remove_mem(), but a helper for operating
* on 32-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 32-bit value converted from big endian to host endian.
*/
uint32_t net_buf_simple_remove_be32(struct net_buf_simple *buf);
/**
* @brief Remove and convert 40 bits from the end of the buffer.
*
* Same idea as with net_buf_simple_remove_mem(), but a helper for operating
* on 40-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 40-bit value converted from little endian to host endian.
*/
uint64_t net_buf_simple_remove_le40(struct net_buf_simple *buf);
/**
* @brief Remove and convert 40 bits from the end of the buffer.
*
* Same idea as with net_buf_simple_remove_mem(), but a helper for operating
* on 40-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 40-bit value converted from big endian to host endian.
*/
uint64_t net_buf_simple_remove_be40(struct net_buf_simple *buf);
/**
* @brief Remove and convert 48 bits from the end of the buffer.
*
* Same idea as with net_buf_simple_remove_mem(), but a helper for operating
* on 48-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 48-bit value converted from little endian to host endian.
*/
uint64_t net_buf_simple_remove_le48(struct net_buf_simple *buf);
/**
* @brief Remove and convert 48 bits from the end of the buffer.
*
* Same idea as with net_buf_simple_remove_mem(), but a helper for operating
* on 48-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 48-bit value converted from big endian to host endian.
*/
uint64_t net_buf_simple_remove_be48(struct net_buf_simple *buf);
/**
* @brief Remove and convert 64 bits from the end of the buffer.
*
* Same idea as with net_buf_simple_remove_mem(), but a helper for operating
* on 64-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 64-bit value converted from little endian to host endian.
*/
uint64_t net_buf_simple_remove_le64(struct net_buf_simple *buf);
/**
* @brief Remove and convert 64 bits from the end of the buffer.
*
* Same idea as with net_buf_simple_remove_mem(), but a helper for operating
* on 64-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 64-bit value converted from big endian to host endian.
*/
uint64_t net_buf_simple_remove_be64(struct net_buf_simple *buf);
/**
* @brief Prepare data to be added to the start of the buffer
*
* Modifies the data pointer and buffer length to account for more data
* in the beginning of the buffer.
*
* @param buf Buffer to update.
* @param len Number of bytes to add to the beginning.
*
* @return The new beginning of the buffer data.
*/
void *net_buf_simple_push(struct net_buf_simple *buf, size_t len);
/**
* @brief Copy given number of bytes from memory to the start of the buffer.
*
* Modifies the data pointer and buffer length to account for more data
* in the beginning of the buffer.
*
* @param buf Buffer to update.
* @param mem Location of data to be added.
* @param len Length of data to be added.
*
* @return The new beginning of the buffer data.
*/
void *net_buf_simple_push_mem(struct net_buf_simple *buf, const void *mem,
size_t len);
/**
* @brief Push 16-bit value to the beginning of the buffer
*
* Adds 16-bit value in little endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 16-bit value to be pushed to the buffer.
*/
void net_buf_simple_push_le16(struct net_buf_simple *buf, uint16_t val);
/**
* @brief Push 16-bit value to the beginning of the buffer
*
* Adds 16-bit value in big endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 16-bit value to be pushed to the buffer.
*/
void net_buf_simple_push_be16(struct net_buf_simple *buf, uint16_t val);
/**
* @brief Push 8-bit value to the beginning of the buffer
*
* Adds 8-bit value the beginning of the buffer.
*
* @param buf Buffer to update.
* @param val 8-bit value to be pushed to the buffer.
*/
void net_buf_simple_push_u8(struct net_buf_simple *buf, uint8_t val);
/**
* @brief Push 24-bit value to the beginning of the buffer
*
* Adds 24-bit value in little endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 24-bit value to be pushed to the buffer.
*/
void net_buf_simple_push_le24(struct net_buf_simple *buf, uint32_t val);
/**
* @brief Push 24-bit value to the beginning of the buffer
*
* Adds 24-bit value in big endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 24-bit value to be pushed to the buffer.
*/
void net_buf_simple_push_be24(struct net_buf_simple *buf, uint32_t val);
/**
* @brief Push 32-bit value to the beginning of the buffer
*
* Adds 32-bit value in little endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 32-bit value to be pushed to the buffer.
*/
void net_buf_simple_push_le32(struct net_buf_simple *buf, uint32_t val);
/**
* @brief Push 32-bit value to the beginning of the buffer
*
* Adds 32-bit value in big endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 32-bit value to be pushed to the buffer.
*/
void net_buf_simple_push_be32(struct net_buf_simple *buf, uint32_t val);
/**
* @brief Push 40-bit value to the beginning of the buffer
*
* Adds 40-bit value in little endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 40-bit value to be pushed to the buffer.
*/
void net_buf_simple_push_le40(struct net_buf_simple *buf, uint64_t val);
/**
* @brief Push 40-bit value to the beginning of the buffer
*
* Adds 40-bit value in big endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 40-bit value to be pushed to the buffer.
*/
void net_buf_simple_push_be40(struct net_buf_simple *buf, uint64_t val);
/**
* @brief Push 48-bit value to the beginning of the buffer
*
* Adds 48-bit value in little endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 48-bit value to be pushed to the buffer.
*/
void net_buf_simple_push_le48(struct net_buf_simple *buf, uint64_t val);
/**
* @brief Push 48-bit value to the beginning of the buffer
*
* Adds 48-bit value in big endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 48-bit value to be pushed to the buffer.
*/
void net_buf_simple_push_be48(struct net_buf_simple *buf, uint64_t val);
/**
* @brief Push 64-bit value to the beginning of the buffer
*
* Adds 64-bit value in little endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 64-bit value to be pushed to the buffer.
*/
void net_buf_simple_push_le64(struct net_buf_simple *buf, uint64_t val);
/**
* @brief Push 64-bit value to the beginning of the buffer
*
* Adds 64-bit value in big endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 64-bit value to be pushed to the buffer.
*/
void net_buf_simple_push_be64(struct net_buf_simple *buf, uint64_t val);
/**
* @brief Remove data from the beginning of the buffer.
*
* Removes data from the beginning of the buffer by modifying the data
* pointer and buffer length.
*
* @param buf Buffer to update.
* @param len Number of bytes to remove.
*
* @return New beginning of the buffer data.
*/
void *net_buf_simple_pull(struct net_buf_simple *buf, size_t len);
/**
* @brief Remove data from the beginning of the buffer.
*
* Removes data from the beginning of the buffer by modifying the data
* pointer and buffer length.
*
* @param buf Buffer to update.
* @param len Number of bytes to remove.
*
* @return Pointer to the old location of the buffer data.
*/
void *net_buf_simple_pull_mem(struct net_buf_simple *buf, size_t len);
/**
* @brief Remove a 8-bit value from the beginning of the buffer
*
* Same idea as with net_buf_simple_pull(), but a helper for operating
* on 8-bit values.
*
* @param buf A valid pointer on a buffer.
*
* @return The 8-bit removed value
*/
uint8_t net_buf_simple_pull_u8(struct net_buf_simple *buf);
/**
* @brief Remove and convert 16 bits from the beginning of the buffer.
*
* Same idea as with net_buf_simple_pull(), but a helper for operating
* on 16-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 16-bit value converted from little endian to host endian.
*/
uint16_t net_buf_simple_pull_le16(struct net_buf_simple *buf);
/**
* @brief Remove and convert 16 bits from the beginning of the buffer.
*
* Same idea as with net_buf_simple_pull(), but a helper for operating
* on 16-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 16-bit value converted from big endian to host endian.
*/
uint16_t net_buf_simple_pull_be16(struct net_buf_simple *buf);
/**
* @brief Remove and convert 24 bits from the beginning of the buffer.
*
* Same idea as with net_buf_simple_pull(), but a helper for operating
* on 24-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 24-bit value converted from little endian to host endian.
*/
uint32_t net_buf_simple_pull_le24(struct net_buf_simple *buf);
/**
* @brief Remove and convert 24 bits from the beginning of the buffer.
*
* Same idea as with net_buf_simple_pull(), but a helper for operating
* on 24-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 24-bit value converted from big endian to host endian.
*/
uint32_t net_buf_simple_pull_be24(struct net_buf_simple *buf);
/**
* @brief Remove and convert 32 bits from the beginning of the buffer.
*
* Same idea as with net_buf_simple_pull(), but a helper for operating
* on 32-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 32-bit value converted from little endian to host endian.
*/
uint32_t net_buf_simple_pull_le32(struct net_buf_simple *buf);
/**
* @brief Remove and convert 32 bits from the beginning of the buffer.
*
* Same idea as with net_buf_simple_pull(), but a helper for operating
* on 32-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 32-bit value converted from big endian to host endian.
*/
uint32_t net_buf_simple_pull_be32(struct net_buf_simple *buf);
/**
* @brief Remove and convert 40 bits from the beginning of the buffer.
*
* Same idea as with net_buf_simple_pull(), but a helper for operating
* on 40-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 40-bit value converted from little endian to host endian.
*/
uint64_t net_buf_simple_pull_le40(struct net_buf_simple *buf);
/**
* @brief Remove and convert 40 bits from the beginning of the buffer.
*
* Same idea as with net_buf_simple_pull(), but a helper for operating
* on 40-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 40-bit value converted from big endian to host endian.
*/
uint64_t net_buf_simple_pull_be40(struct net_buf_simple *buf);
/**
* @brief Remove and convert 48 bits from the beginning of the buffer.
*
* Same idea as with net_buf_simple_pull(), but a helper for operating
* on 48-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 48-bit value converted from little endian to host endian.
*/
uint64_t net_buf_simple_pull_le48(struct net_buf_simple *buf);
/**
* @brief Remove and convert 48 bits from the beginning of the buffer.
*
* Same idea as with net_buf_simple_pull(), but a helper for operating
* on 48-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 48-bit value converted from big endian to host endian.
*/
uint64_t net_buf_simple_pull_be48(struct net_buf_simple *buf);
/**
* @brief Remove and convert 64 bits from the beginning of the buffer.
*
* Same idea as with net_buf_simple_pull(), but a helper for operating
* on 64-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 64-bit value converted from little endian to host endian.
*/
uint64_t net_buf_simple_pull_le64(struct net_buf_simple *buf);
/**
* @brief Remove and convert 64 bits from the beginning of the buffer.
*
* Same idea as with net_buf_simple_pull(), but a helper for operating
* on 64-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 64-bit value converted from big endian to host endian.
*/
uint64_t net_buf_simple_pull_be64(struct net_buf_simple *buf);
/**
* @brief Get the tail pointer for a buffer.
*
* Get a pointer to the end of the data in a buffer.
*
* @param buf Buffer.
*
* @return Tail pointer for the buffer.
*/
static inline uint8_t *net_buf_simple_tail(const struct net_buf_simple *buf)
{
return buf->data + buf->len;
}
/**
* @brief Check buffer headroom.
*
* Check how much free space there is in the beginning of the buffer.
*
* buf A valid pointer on a buffer
*
* @return Number of bytes available in the beginning of the buffer.
*/
size_t net_buf_simple_headroom(const struct net_buf_simple *buf);
/**
* @brief Check buffer tailroom.
*
* Check how much free space there is at the end of the buffer.
*
* @param buf A valid pointer on a buffer
*
* @return Number of bytes available at the end of the buffer.
*/
size_t net_buf_simple_tailroom(const struct net_buf_simple *buf);
/**
* @brief Check maximum net_buf_simple::len value.
*
* This value is depending on the number of bytes being reserved as headroom.
*
* @param buf A valid pointer on a buffer
*
* @return Number of bytes usable behind the net_buf_simple::data pointer.
*/
uint16_t net_buf_simple_max_len(const struct net_buf_simple *buf);
/**
* @brief Parsing state of a buffer.
*
* This is used for temporarily storing the parsing state of a buffer
* while giving control of the parsing to a routine which we don't
* control.
*/
struct net_buf_simple_state {
/** Offset of the data pointer from the beginning of the storage */
uint16_t offset;
/** Length of data */
uint16_t len;
};
/**
* @brief Save the parsing state of a buffer.
*
* Saves the parsing state of a buffer so it can be restored later.
*
* @param buf Buffer from which the state should be saved.
* @param state Storage for the state.
*/
static inline void net_buf_simple_save(const struct net_buf_simple *buf,
struct net_buf_simple_state *state)
{
state->offset = (uint16_t)net_buf_simple_headroom(buf);
state->len = buf->len;
}
/**
* @brief Restore the parsing state of a buffer.
*
* Restores the parsing state of a buffer from a state previously stored
* by net_buf_simple_save().
*
* @param buf Buffer to which the state should be restored.
* @param state Stored state.
*/
static inline void net_buf_simple_restore(struct net_buf_simple *buf,
struct net_buf_simple_state *state)
{
buf->data = buf->__buf + state->offset;
buf->len = state->len;
}
/**
* Flag indicating that the buffer's associated data pointer, points to
* externally allocated memory. Therefore once ref goes down to zero, the
* pointed data will not need to be deallocated. This never needs to be
* explicitly set or unset by the net_buf API user. Such net_buf is
* exclusively instantiated via net_buf_alloc_with_data() function.
* Reference count mechanism however will behave the same way, and ref
* count going to 0 will free the net_buf but no the data pointer in it.
*/
#define NET_BUF_EXTERNAL_DATA BIT(0)
/**
* @brief Network buffer representation.
*
* This struct is used to represent network buffers. Such buffers are
* normally defined through the NET_BUF_POOL_*_DEFINE() APIs and allocated
* using the net_buf_alloc() API.
*/
struct net_buf {
/** Allow placing the buffer into sys_slist_t */
sys_snode_t node;
/** Fragments associated with this buffer. */
struct net_buf *frags;
/** Reference count. */
uint8_t ref;
/** Bit-field of buffer flags. */
uint8_t flags;
/** Where the buffer should go when freed up. */
uint8_t pool_id;
/** Size of user data on this buffer */
uint8_t user_data_size;
/** Union for convenience access to the net_buf_simple members, also
* preserving the old API.
*/
union {
/* The ABI of this struct must match net_buf_simple */
struct {
/** Pointer to the start of data in the buffer. */
uint8_t *data;
/** Length of the data behind the data pointer. */
uint16_t len;
/** Amount of data that this buffer can store. */
uint16_t size;
/** Start of the data storage. Not to be accessed
* directly (the data pointer should be used
* instead).
*/
uint8_t *__buf;
};
/** @cond INTERNAL_HIDDEN */
struct net_buf_simple b;
/** @endcond */
};
/** System metadata for this buffer. Cleared on allocation. */
uint8_t user_data[] __net_buf_align;
};
/** @cond INTERNAL_HIDDEN */
struct net_buf_data_cb {
uint8_t * __must_check (*alloc)(struct net_buf *buf, size_t *size,
k_timeout_t timeout);
uint8_t * __must_check (*ref)(struct net_buf *buf, uint8_t *data);
void (*unref)(struct net_buf *buf, uint8_t *data);
};
struct net_buf_data_alloc {
const struct net_buf_data_cb *cb;
void *alloc_data;
size_t max_alloc_size;
};
/** @endcond */
/**
* @brief Network buffer pool representation.
*
* This struct is used to represent a pool of network buffers.
*/
struct net_buf_pool {
/** LIFO to place the buffer into when free */
struct k_lifo free;
/** To prevent concurrent access/modifications */
struct k_spinlock lock;
/** Number of buffers in pool */
const uint16_t buf_count;
/** Number of uninitialized buffers */
uint16_t uninit_count;
/** Size of user data allocated to this pool */
uint8_t user_data_size;
#if defined(CONFIG_NET_BUF_POOL_USAGE)
/** Amount of available buffers in the pool. */
atomic_t avail_count;
/** Total size of the pool. */
const uint16_t pool_size;
/** Name of the pool. Used when printing pool information. */
const char *name;
#endif /* CONFIG_NET_BUF_POOL_USAGE */
/** Optional destroy callback when buffer is freed. */
void (*const destroy)(struct net_buf *buf);
/** Data allocation handlers. */
const struct net_buf_data_alloc *alloc;
/** Start of buffer storage array */
struct net_buf * const __bufs;
};
/** @cond INTERNAL_HIDDEN */
#define NET_BUF_POOL_USAGE_INIT(_pool, _count) \
IF_ENABLED(CONFIG_NET_BUF_POOL_USAGE, (.avail_count = ATOMIC_INIT(_count),)) \
IF_ENABLED(CONFIG_NET_BUF_POOL_USAGE, (.name = STRINGIFY(_pool),))
#define NET_BUF_POOL_INITIALIZER(_pool, _alloc, _bufs, _count, _ud_size, _destroy) \
{ \
.free = Z_LIFO_INITIALIZER(_pool.free), \
.lock = { }, \
.buf_count = _count, \
.uninit_count = _count, \
.user_data_size = _ud_size, \
NET_BUF_POOL_USAGE_INIT(_pool, _count) \
.destroy = _destroy, \
.alloc = _alloc, \
.__bufs = (struct net_buf *)_bufs, \
}
#define _NET_BUF_ARRAY_DEFINE(_name, _count, _ud_size) \
struct _net_buf_##_name { uint8_t b[sizeof(struct net_buf)]; \
uint8_t ud[_ud_size]; } __net_buf_align; \
BUILD_ASSERT(_ud_size <= UINT8_MAX); \
BUILD_ASSERT(offsetof(struct net_buf, user_data) == \
offsetof(struct _net_buf_##_name, ud), "Invalid offset"); \
BUILD_ASSERT(__alignof__(struct net_buf) == \
__alignof__(struct _net_buf_##_name), "Invalid alignment"); \
BUILD_ASSERT(sizeof(struct _net_buf_##_name) == \
ROUND_UP(sizeof(struct net_buf) + _ud_size, __alignof__(struct net_buf)), \
"Size cannot be determined"); \
static struct _net_buf_##_name _net_buf_##_name[_count] __noinit
extern const struct net_buf_data_alloc net_buf_heap_alloc;
/** @endcond */
/**
*
* @brief Define a new pool for buffers using the heap for the data.
*
* Defines a net_buf_pool struct and the necessary memory storage (array of
* structs) for the needed amount of buffers. After this, the buffers can be
* accessed from the pool through net_buf_alloc. The pool is defined as a
* static variable, so if it needs to be exported outside the current module
* this needs to happen with the help of a separate pointer rather than an
* extern declaration.
*
* The data payload of the buffers will be allocated from the heap using
* k_malloc, so CONFIG_HEAP_MEM_POOL_SIZE must be set to a positive value.
* This kind of pool does not support blocking on the data allocation, so
* the timeout passed to net_buf_alloc will be always treated as K_NO_WAIT
* when trying to allocate the data. This means that allocation failures,
* i.e. NULL returns, must always be handled cleanly.
*
* If provided with a custom destroy callback, this callback is
* responsible for eventually calling net_buf_destroy() to complete the
* process of returning the buffer to the pool.
*
* @param _name Name of the pool variable.
* @param _count Number of buffers in the pool.
* @param _ud_size User data space to reserve per buffer.
* @param _destroy Optional destroy callback when buffer is freed.
*/
#define NET_BUF_POOL_HEAP_DEFINE(_name, _count, _ud_size, _destroy) \
_NET_BUF_ARRAY_DEFINE(_name, _count, _ud_size); \
static STRUCT_SECTION_ITERABLE(net_buf_pool, _name) = \
NET_BUF_POOL_INITIALIZER(_name, &net_buf_heap_alloc, \
_net_buf_##_name, _count, _ud_size, \
_destroy)
/** @cond INTERNAL_HIDDEN */
struct net_buf_pool_fixed {
uint8_t *data_pool;
};
extern const struct net_buf_data_cb net_buf_fixed_cb;
/** @endcond */
/**
*
* @brief Define a new pool for buffers based on fixed-size data
*
* Defines a net_buf_pool struct and the necessary memory storage (array of
* structs) for the needed amount of buffers. After this, the buffers can be
* accessed from the pool through net_buf_alloc. The pool is defined as a
* static variable, so if it needs to be exported outside the current module
* this needs to happen with the help of a separate pointer rather than an
* extern declaration.
*
* The data payload of the buffers will be allocated from a byte array
* of fixed sized chunks. This kind of pool does not support blocking on
* the data allocation, so the timeout passed to net_buf_alloc will be
* always treated as K_NO_WAIT when trying to allocate the data. This means
* that allocation failures, i.e. NULL returns, must always be handled
* cleanly.
*
* If provided with a custom destroy callback, this callback is
* responsible for eventually calling net_buf_destroy() to complete the
* process of returning the buffer to the pool.
*
* @param _name Name of the pool variable.
* @param _count Number of buffers in the pool.
* @param _data_size Maximum data payload per buffer.
* @param _ud_size User data space to reserve per buffer.
* @param _destroy Optional destroy callback when buffer is freed.
*/
#define NET_BUF_POOL_FIXED_DEFINE(_name, _count, _data_size, _ud_size, _destroy) \
_NET_BUF_ARRAY_DEFINE(_name, _count, _ud_size); \
static uint8_t __noinit net_buf_data_##_name[_count][_data_size] __net_buf_align; \
static const struct net_buf_pool_fixed net_buf_fixed_##_name = { \
.data_pool = (uint8_t *)net_buf_data_##_name, \
}; \
static const struct net_buf_data_alloc net_buf_fixed_alloc_##_name = { \
.cb = &net_buf_fixed_cb, \
.alloc_data = (void *)&net_buf_fixed_##_name, \
.max_alloc_size = _data_size, \
}; \
static STRUCT_SECTION_ITERABLE(net_buf_pool, _name) = \
NET_BUF_POOL_INITIALIZER(_name, &net_buf_fixed_alloc_##_name, \
_net_buf_##_name, _count, _ud_size, \
_destroy)
/** @cond INTERNAL_HIDDEN */
extern const struct net_buf_data_cb net_buf_var_cb;
/** @endcond */
/**
*
* @brief Define a new pool for buffers with variable size payloads
*
* Defines a net_buf_pool struct and the necessary memory storage (array of
* structs) for the needed amount of buffers. After this, the buffers can be
* accessed from the pool through net_buf_alloc. The pool is defined as a
* static variable, so if it needs to be exported outside the current module
* this needs to happen with the help of a separate pointer rather than an
* extern declaration.
*
* The data payload of the buffers will be based on a memory pool from which
* variable size payloads may be allocated.
*
* If provided with a custom destroy callback, this callback is
* responsible for eventually calling net_buf_destroy() to complete the
* process of returning the buffer to the pool.
*
* @param _name Name of the pool variable.
* @param _count Number of buffers in the pool.
* @param _data_size Total amount of memory available for data payloads.
* @param _ud_size User data space to reserve per buffer.
* @param _destroy Optional destroy callback when buffer is freed.
*/
#define NET_BUF_POOL_VAR_DEFINE(_name, _count, _data_size, _ud_size, _destroy) \
_NET_BUF_ARRAY_DEFINE(_name, _count, _ud_size); \
K_HEAP_DEFINE(net_buf_mem_pool_##_name, _data_size); \
static const struct net_buf_data_alloc net_buf_data_alloc_##_name = { \
.cb = &net_buf_var_cb, \
.alloc_data = &net_buf_mem_pool_##_name, \
.max_alloc_size = 0, \
}; \
static STRUCT_SECTION_ITERABLE(net_buf_pool, _name) = \
NET_BUF_POOL_INITIALIZER(_name, &net_buf_data_alloc_##_name, \
_net_buf_##_name, _count, _ud_size, \
_destroy)
/**
*
* @brief Define a new pool for buffers
*
* Defines a net_buf_pool struct and the necessary memory storage (array of
* structs) for the needed amount of buffers. After this,the buffers can be
* accessed from the pool through net_buf_alloc. The pool is defined as a
* static variable, so if it needs to be exported outside the current module
* this needs to happen with the help of a separate pointer rather than an
* extern declaration.
*
* If provided with a custom destroy callback this callback is
* responsible for eventually calling net_buf_destroy() to complete the
* process of returning the buffer to the pool.
*
* @param _name Name of the pool variable.
* @param _count Number of buffers in the pool.
* @param _size Maximum data size for each buffer.
* @param _ud_size Amount of user data space to reserve.
* @param _destroy Optional destroy callback when buffer is freed.
*/
#define NET_BUF_POOL_DEFINE(_name, _count, _size, _ud_size, _destroy) \
NET_BUF_POOL_FIXED_DEFINE(_name, _count, _size, _ud_size, _destroy)
/**
* @brief Looks up a pool based on its ID.
*
* @param id Pool ID (e.g. from buf->pool_id).
*
* @return Pointer to pool.
*/
struct net_buf_pool *net_buf_pool_get(int id);
/**
* @brief Get a zero-based index for a buffer.
*
* This function will translate a buffer into a zero-based index,
* based on its placement in its buffer pool. This can be useful if you
* want to associate an external array of meta-data contexts with the
* buffers of a pool.
*
* @param buf Network buffer.
*
* @return Zero-based index for the buffer.
*/
int net_buf_id(const struct net_buf *buf);
/**
* @brief Allocate a new fixed buffer from a pool.
*
* @note Some types of data allocators do not support
* blocking (such as the HEAP type). In this case it's still possible
* for net_buf_alloc() to fail (return NULL) even if it was given
* K_FOREVER.
*
* @note The timeout value will be overridden to K_NO_WAIT if called from the
* system workqueue.
*
* @param pool Which pool to allocate the buffer from.
* @param timeout Affects the action taken should the pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait until the specified
* timeout.
*
* @return New buffer or NULL if out of buffers.
*/
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf * __must_check net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
k_timeout_t timeout,
const char *func,
int line);
#define net_buf_alloc_fixed(_pool, _timeout) \
net_buf_alloc_fixed_debug(_pool, _timeout, __func__, __LINE__)
#else
struct net_buf * __must_check net_buf_alloc_fixed(struct net_buf_pool *pool,
k_timeout_t timeout);
#endif
/**
* @copydetails net_buf_alloc_fixed
*/
static inline struct net_buf * __must_check net_buf_alloc(struct net_buf_pool *pool,
k_timeout_t timeout)
{
return net_buf_alloc_fixed(pool, timeout);
}
/**
* @brief Allocate a new variable length buffer from a pool.
*
* @note Some types of data allocators do not support
* blocking (such as the HEAP type). In this case it's still possible
* for net_buf_alloc() to fail (return NULL) even if it was given
* K_FOREVER.
*
* @note The timeout value will be overridden to K_NO_WAIT if called from the
* system workqueue.
*
* @param pool Which pool to allocate the buffer from.
* @param size Amount of data the buffer must be able to fit.
* @param timeout Affects the action taken should the pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait until the specified
* timeout.
*
* @return New buffer or NULL if out of buffers.
*/
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf * __must_check net_buf_alloc_len_debug(struct net_buf_pool *pool,
size_t size,
k_timeout_t timeout,
const char *func,
int line);
#define net_buf_alloc_len(_pool, _size, _timeout) \
net_buf_alloc_len_debug(_pool, _size, _timeout, __func__, __LINE__)
#else
struct net_buf * __must_check net_buf_alloc_len(struct net_buf_pool *pool,
size_t size,
k_timeout_t timeout);
#endif
/**
* @brief Allocate a new buffer from a pool but with external data pointer.
*
* Allocate a new buffer from a pool, where the data pointer comes from the
* user and not from the pool.
*
* @note Some types of data allocators do not support
* blocking (such as the HEAP type). In this case it's still possible
* for net_buf_alloc() to fail (return NULL) even if it was given
* K_FOREVER.
*
* @note The timeout value will be overridden to K_NO_WAIT if called from the
* system workqueue.
*
* @param pool Which pool to allocate the buffer from.
* @param data External data pointer
* @param size Amount of data the pointed data buffer if able to fit.
* @param timeout Affects the action taken should the pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait until the specified
* timeout.
*
* @return New buffer or NULL if out of buffers.
*/
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf * __must_check net_buf_alloc_with_data_debug(struct net_buf_pool *pool,
void *data, size_t size,
k_timeout_t timeout,
const char *func, int line);
#define net_buf_alloc_with_data(_pool, _data_, _size, _timeout) \
net_buf_alloc_with_data_debug(_pool, _data_, _size, _timeout, \
__func__, __LINE__)
#else
struct net_buf * __must_check net_buf_alloc_with_data(struct net_buf_pool *pool,
void *data, size_t size,
k_timeout_t timeout);
#endif
/**
* @brief Get a buffer from a FIFO.
*
* @deprecated Use @a k_fifo_get() instead.
*
* @param fifo Which FIFO to take the buffer from.
* @param timeout Affects the action taken should the FIFO be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then wait as
* long as necessary. Otherwise, wait until the specified timeout.
*
* @return New buffer or NULL if the FIFO is empty.
*/
#if defined(CONFIG_NET_BUF_LOG)
__deprecated struct net_buf * __must_check net_buf_get_debug(struct k_fifo *fifo,
k_timeout_t timeout,
const char *func, int line);
#define net_buf_get(_fifo, _timeout) \
net_buf_get_debug(_fifo, _timeout, __func__, __LINE__)
#else
__deprecated struct net_buf * __must_check net_buf_get(struct k_fifo *fifo,
k_timeout_t timeout);
#endif
/**
* @brief Destroy buffer from custom destroy callback
*
* This helper is only intended to be used from custom destroy callbacks.
* If no custom destroy callback is given to NET_BUF_POOL_*_DEFINE() then
* there is no need to use this API.
*
* @param buf Buffer to destroy.
*/
static inline void net_buf_destroy(struct net_buf *buf)
{
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
if (buf->__buf) {
if (!(buf->flags & NET_BUF_EXTERNAL_DATA)) {
pool->alloc->cb->unref(buf, buf->__buf);
}
buf->__buf = NULL;
}
k_lifo_put(&pool->free, buf);
}
/**
* @brief Reset buffer
*
* Reset buffer data and flags so it can be reused for other purposes.
*
* @param buf Buffer to reset.
*/
void net_buf_reset(struct net_buf *buf);
/**
* @brief Initialize buffer with the given headroom.
*
* The buffer is not expected to contain any data when this API is called.
*
* @param buf Buffer to initialize.
* @param reserve How much headroom to reserve.
*/
void net_buf_simple_reserve(struct net_buf_simple *buf, size_t reserve);
/**
* @brief Put a buffer into a list
*
* @param list Which list to append the buffer to.
* @param buf Buffer.
*/
void net_buf_slist_put(sys_slist_t *list, struct net_buf *buf);
/**
* @brief Get a buffer from a list.
*
* @param list Which list to take the buffer from.
*
* @return New buffer or NULL if the FIFO is empty.
*/
struct net_buf * __must_check net_buf_slist_get(sys_slist_t *list);
/**
* @brief Put a buffer to the end of a FIFO.
*
* @deprecated Use @a k_fifo_put() instead.
*
* @param fifo Which FIFO to put the buffer to.
* @param buf Buffer.
*/
__deprecated void net_buf_put(struct k_fifo *fifo, struct net_buf *buf);
/**
* @brief Decrements the reference count of a buffer.
*
* The buffer is put back into the pool if the reference count reaches zero.
*
* @param buf A valid pointer on a buffer
*/
#if defined(CONFIG_NET_BUF_LOG)
void net_buf_unref_debug(struct net_buf *buf, const char *func, int line);
#define net_buf_unref(_buf) \
net_buf_unref_debug(_buf, __func__, __LINE__)
#else
void net_buf_unref(struct net_buf *buf);
#endif
/**
* @brief Increment the reference count of a buffer.
*
* @param buf A valid pointer on a buffer
*
* @return the buffer newly referenced
*/
struct net_buf * __must_check net_buf_ref(struct net_buf *buf);
/**
* @brief Clone buffer
*
* Duplicate given buffer including any (user) data and headers currently stored.
*
* @param buf A valid pointer on a buffer
* @param timeout Affects the action taken should the pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait until the specified
* timeout.
*
* @return Cloned buffer or NULL if out of buffers.
*/
struct net_buf * __must_check net_buf_clone(struct net_buf *buf,
k_timeout_t timeout);
/**
* @brief Get a pointer to the user data of a buffer.
*
* @param buf A valid pointer on a buffer
*
* @return Pointer to the user data of the buffer.
*/
static inline void * __must_check net_buf_user_data(const struct net_buf *buf)
{
return (void *)buf->user_data;
}
/**
* @brief Copy user data from one to another buffer.
*
* @param dst A valid pointer to a buffer gettings its user data overwritten.
* @param src A valid pointer to a buffer gettings its user data copied. User data size must be
* equal to or exceed @a dst.
*
* @return 0 on success or negative error number on failure.
*/
int net_buf_user_data_copy(struct net_buf *dst, const struct net_buf *src);
/**
* @brief Initialize buffer with the given headroom.
*
* The buffer is not expected to contain any data when this API is called.
*
* @param buf Buffer to initialize.
* @param reserve How much headroom to reserve.
*/
static inline void net_buf_reserve(struct net_buf *buf, size_t reserve)
{
net_buf_simple_reserve(&buf->b, reserve);
}
/**
* @brief Prepare data to be added at the end of the buffer
*
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param len Number of bytes to increment the length with.
*
* @return The original tail of the buffer.
*/
static inline void *net_buf_add(struct net_buf *buf, size_t len)
{
return net_buf_simple_add(&buf->b, len);
}
/**
* @brief Copies the given number of bytes to the end of the buffer
*
* Increments the data length of the buffer to account for more data at
* the end.
*
* @param buf Buffer to update.
* @param mem Location of data to be added.
* @param len Length of data to be added
*
* @return The original tail of the buffer.
*/
static inline void *net_buf_add_mem(struct net_buf *buf, const void *mem,
size_t len)
{
return net_buf_simple_add_mem(&buf->b, mem, len);
}
/**
* @brief Add (8-bit) byte at the end of the buffer
*
* Increments the data length of the buffer to account for more data at
* the end.
*
* @param buf Buffer to update.
* @param val byte value to be added.
*
* @return Pointer to the value added
*/
static inline uint8_t *net_buf_add_u8(struct net_buf *buf, uint8_t val)
{
return net_buf_simple_add_u8(&buf->b, val);
}
/**
* @brief Add 16-bit value at the end of the buffer
*
* Adds 16-bit value in little endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 16-bit value to be added.
*/
static inline void net_buf_add_le16(struct net_buf *buf, uint16_t val)
{
net_buf_simple_add_le16(&buf->b, val);
}
/**
* @brief Add 16-bit value at the end of the buffer
*
* Adds 16-bit value in big endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 16-bit value to be added.
*/
static inline void net_buf_add_be16(struct net_buf *buf, uint16_t val)
{
net_buf_simple_add_be16(&buf->b, val);
}
/**
* @brief Add 24-bit value at the end of the buffer
*
* Adds 24-bit value in little endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 24-bit value to be added.
*/
static inline void net_buf_add_le24(struct net_buf *buf, uint32_t val)
{
net_buf_simple_add_le24(&buf->b, val);
}
/**
* @brief Add 24-bit value at the end of the buffer
*
* Adds 24-bit value in big endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 24-bit value to be added.
*/
static inline void net_buf_add_be24(struct net_buf *buf, uint32_t val)
{
net_buf_simple_add_be24(&buf->b, val);
}
/**
* @brief Add 32-bit value at the end of the buffer
*
* Adds 32-bit value in little endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 32-bit value to be added.
*/
static inline void net_buf_add_le32(struct net_buf *buf, uint32_t val)
{
net_buf_simple_add_le32(&buf->b, val);
}
/**
* @brief Add 32-bit value at the end of the buffer
*
* Adds 32-bit value in big endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 32-bit value to be added.
*/
static inline void net_buf_add_be32(struct net_buf *buf, uint32_t val)
{
net_buf_simple_add_be32(&buf->b, val);
}
/**
* @brief Add 40-bit value at the end of the buffer
*
* Adds 40-bit value in little endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 40-bit value to be added.
*/
static inline void net_buf_add_le40(struct net_buf *buf, uint64_t val)
{
net_buf_simple_add_le40(&buf->b, val);
}
/**
* @brief Add 40-bit value at the end of the buffer
*
* Adds 40-bit value in big endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 40-bit value to be added.
*/
static inline void net_buf_add_be40(struct net_buf *buf, uint64_t val)
{
net_buf_simple_add_be40(&buf->b, val);
}
/**
* @brief Add 48-bit value at the end of the buffer
*
* Adds 48-bit value in little endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 48-bit value to be added.
*/
static inline void net_buf_add_le48(struct net_buf *buf, uint64_t val)
{
net_buf_simple_add_le48(&buf->b, val);
}
/**
* @brief Add 48-bit value at the end of the buffer
*
* Adds 48-bit value in big endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 48-bit value to be added.
*/
static inline void net_buf_add_be48(struct net_buf *buf, uint64_t val)
{
net_buf_simple_add_be48(&buf->b, val);
}
/**
* @brief Add 64-bit value at the end of the buffer
*
* Adds 64-bit value in little endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 64-bit value to be added.
*/
static inline void net_buf_add_le64(struct net_buf *buf, uint64_t val)
{
net_buf_simple_add_le64(&buf->b, val);
}
/**
* @brief Add 64-bit value at the end of the buffer
*
* Adds 64-bit value in big endian format at the end of buffer.
* Increments the data length of a buffer to account for more data
* at the end.
*
* @param buf Buffer to update.
* @param val 64-bit value to be added.
*/
static inline void net_buf_add_be64(struct net_buf *buf, uint64_t val)
{
net_buf_simple_add_be64(&buf->b, val);
}
/**
* @brief Remove data from the end of the buffer.
*
* Removes data from the end of the buffer by modifying the buffer length.
*
* @param buf Buffer to update.
* @param len Number of bytes to remove.
*
* @return New end of the buffer data.
*/
static inline void *net_buf_remove_mem(struct net_buf *buf, size_t len)
{
return net_buf_simple_remove_mem(&buf->b, len);
}
/**
* @brief Remove a 8-bit value from the end of the buffer
*
* Same idea as with net_buf_remove_mem(), but a helper for operating on
* 8-bit values.
*
* @param buf A valid pointer on a buffer.
*
* @return The 8-bit removed value
*/
static inline uint8_t net_buf_remove_u8(struct net_buf *buf)
{
return net_buf_simple_remove_u8(&buf->b);
}
/**
* @brief Remove and convert 16 bits from the end of the buffer.
*
* Same idea as with net_buf_remove_mem(), but a helper for operating on
* 16-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 16-bit value converted from little endian to host endian.
*/
static inline uint16_t net_buf_remove_le16(struct net_buf *buf)
{
return net_buf_simple_remove_le16(&buf->b);
}
/**
* @brief Remove and convert 16 bits from the end of the buffer.
*
* Same idea as with net_buf_remove_mem(), but a helper for operating on
* 16-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 16-bit value converted from big endian to host endian.
*/
static inline uint16_t net_buf_remove_be16(struct net_buf *buf)
{
return net_buf_simple_remove_be16(&buf->b);
}
/**
* @brief Remove and convert 24 bits from the end of the buffer.
*
* Same idea as with net_buf_remove_mem(), but a helper for operating on
* 24-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 24-bit value converted from big endian to host endian.
*/
static inline uint32_t net_buf_remove_be24(struct net_buf *buf)
{
return net_buf_simple_remove_be24(&buf->b);
}
/**
* @brief Remove and convert 24 bits from the end of the buffer.
*
* Same idea as with net_buf_remove_mem(), but a helper for operating on
* 24-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 24-bit value converted from little endian to host endian.
*/
static inline uint32_t net_buf_remove_le24(struct net_buf *buf)
{
return net_buf_simple_remove_le24(&buf->b);
}
/**
* @brief Remove and convert 32 bits from the end of the buffer.
*
* Same idea as with net_buf_remove_mem(), but a helper for operating on
* 32-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 32-bit value converted from little endian to host endian.
*/
static inline uint32_t net_buf_remove_le32(struct net_buf *buf)
{
return net_buf_simple_remove_le32(&buf->b);
}
/**
* @brief Remove and convert 32 bits from the end of the buffer.
*
* Same idea as with net_buf_remove_mem(), but a helper for operating on
* 32-bit big endian data.
*
* @param buf A valid pointer on a buffer
*
* @return 32-bit value converted from big endian to host endian.
*/
static inline uint32_t net_buf_remove_be32(struct net_buf *buf)
{
return net_buf_simple_remove_be32(&buf->b);
}
/**
* @brief Remove and convert 40 bits from the end of the buffer.
*
* Same idea as with net_buf_remove_mem(), but a helper for operating on
* 40-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 40-bit value converted from little endian to host endian.
*/
static inline uint64_t net_buf_remove_le40(struct net_buf *buf)
{
return net_buf_simple_remove_le40(&buf->b);
}
/**
* @brief Remove and convert 40 bits from the end of the buffer.
*
* Same idea as with net_buf_remove_mem(), but a helper for operating on
* 40-bit big endian data.
*
* @param buf A valid pointer on a buffer
*
* @return 40-bit value converted from big endian to host endian.
*/
static inline uint64_t net_buf_remove_be40(struct net_buf *buf)
{
return net_buf_simple_remove_be40(&buf->b);
}
/**
* @brief Remove and convert 48 bits from the end of the buffer.
*
* Same idea as with net_buf_remove_mem(), but a helper for operating on
* 48-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 48-bit value converted from little endian to host endian.
*/
static inline uint64_t net_buf_remove_le48(struct net_buf *buf)
{
return net_buf_simple_remove_le48(&buf->b);
}
/**
* @brief Remove and convert 48 bits from the end of the buffer.
*
* Same idea as with net_buf_remove_mem(), but a helper for operating on
* 48-bit big endian data.
*
* @param buf A valid pointer on a buffer
*
* @return 48-bit value converted from big endian to host endian.
*/
static inline uint64_t net_buf_remove_be48(struct net_buf *buf)
{
return net_buf_simple_remove_be48(&buf->b);
}
/**
* @brief Remove and convert 64 bits from the end of the buffer.
*
* Same idea as with net_buf_remove_mem(), but a helper for operating on
* 64-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 64-bit value converted from little endian to host endian.
*/
static inline uint64_t net_buf_remove_le64(struct net_buf *buf)
{
return net_buf_simple_remove_le64(&buf->b);
}
/**
* @brief Remove and convert 64 bits from the end of the buffer.
*
* Same idea as with net_buf_remove_mem(), but a helper for operating on
* 64-bit big endian data.
*
* @param buf A valid pointer on a buffer
*
* @return 64-bit value converted from big endian to host endian.
*/
static inline uint64_t net_buf_remove_be64(struct net_buf *buf)
{
return net_buf_simple_remove_be64(&buf->b);
}
/**
* @brief Prepare data to be added at the start of the buffer
*
* Modifies the data pointer and buffer length to account for more data
* in the beginning of the buffer.
*
* @param buf Buffer to update.
* @param len Number of bytes to add to the beginning.
*
* @return The new beginning of the buffer data.
*/
static inline void *net_buf_push(struct net_buf *buf, size_t len)
{
return net_buf_simple_push(&buf->b, len);
}
/**
* @brief Copies the given number of bytes to the start of the buffer
*
* Modifies the data pointer and buffer length to account for more data
* in the beginning of the buffer.
*
* @param buf Buffer to update.
* @param mem Location of data to be added.
* @param len Length of data to be added.
*
* @return The new beginning of the buffer data.
*/
static inline void *net_buf_push_mem(struct net_buf *buf, const void *mem,
size_t len)
{
return net_buf_simple_push_mem(&buf->b, mem, len);
}
/**
* @brief Push 8-bit value to the beginning of the buffer
*
* Adds 8-bit value the beginning of the buffer.
*
* @param buf Buffer to update.
* @param val 8-bit value to be pushed to the buffer.
*/
static inline void net_buf_push_u8(struct net_buf *buf, uint8_t val)
{
net_buf_simple_push_u8(&buf->b, val);
}
/**
* @brief Push 16-bit value to the beginning of the buffer
*
* Adds 16-bit value in little endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 16-bit value to be pushed to the buffer.
*/
static inline void net_buf_push_le16(struct net_buf *buf, uint16_t val)
{
net_buf_simple_push_le16(&buf->b, val);
}
/**
* @brief Push 16-bit value to the beginning of the buffer
*
* Adds 16-bit value in big endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 16-bit value to be pushed to the buffer.
*/
static inline void net_buf_push_be16(struct net_buf *buf, uint16_t val)
{
net_buf_simple_push_be16(&buf->b, val);
}
/**
* @brief Push 24-bit value to the beginning of the buffer
*
* Adds 24-bit value in little endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 24-bit value to be pushed to the buffer.
*/
static inline void net_buf_push_le24(struct net_buf *buf, uint32_t val)
{
net_buf_simple_push_le24(&buf->b, val);
}
/**
* @brief Push 24-bit value to the beginning of the buffer
*
* Adds 24-bit value in big endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 24-bit value to be pushed to the buffer.
*/
static inline void net_buf_push_be24(struct net_buf *buf, uint32_t val)
{
net_buf_simple_push_be24(&buf->b, val);
}
/**
* @brief Push 32-bit value to the beginning of the buffer
*
* Adds 32-bit value in little endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 32-bit value to be pushed to the buffer.
*/
static inline void net_buf_push_le32(struct net_buf *buf, uint32_t val)
{
net_buf_simple_push_le32(&buf->b, val);
}
/**
* @brief Push 32-bit value to the beginning of the buffer
*
* Adds 32-bit value in big endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 32-bit value to be pushed to the buffer.
*/
static inline void net_buf_push_be32(struct net_buf *buf, uint32_t val)
{
net_buf_simple_push_be32(&buf->b, val);
}
/**
* @brief Push 40-bit value to the beginning of the buffer
*
* Adds 40-bit value in little endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 40-bit value to be pushed to the buffer.
*/
static inline void net_buf_push_le40(struct net_buf *buf, uint64_t val)
{
net_buf_simple_push_le40(&buf->b, val);
}
/**
* @brief Push 40-bit value to the beginning of the buffer
*
* Adds 40-bit value in big endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 40-bit value to be pushed to the buffer.
*/
static inline void net_buf_push_be40(struct net_buf *buf, uint64_t val)
{
net_buf_simple_push_be40(&buf->b, val);
}
/**
* @brief Push 48-bit value to the beginning of the buffer
*
* Adds 48-bit value in little endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 48-bit value to be pushed to the buffer.
*/
static inline void net_buf_push_le48(struct net_buf *buf, uint64_t val)
{
net_buf_simple_push_le48(&buf->b, val);
}
/**
* @brief Push 48-bit value to the beginning of the buffer
*
* Adds 48-bit value in big endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 48-bit value to be pushed to the buffer.
*/
static inline void net_buf_push_be48(struct net_buf *buf, uint64_t val)
{
net_buf_simple_push_be48(&buf->b, val);
}
/**
* @brief Push 64-bit value to the beginning of the buffer
*
* Adds 64-bit value in little endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 64-bit value to be pushed to the buffer.
*/
static inline void net_buf_push_le64(struct net_buf *buf, uint64_t val)
{
net_buf_simple_push_le64(&buf->b, val);
}
/**
* @brief Push 64-bit value to the beginning of the buffer
*
* Adds 64-bit value in big endian format to the beginning of the
* buffer.
*
* @param buf Buffer to update.
* @param val 64-bit value to be pushed to the buffer.
*/
static inline void net_buf_push_be64(struct net_buf *buf, uint64_t val)
{
net_buf_simple_push_be64(&buf->b, val);
}
/**
* @brief Remove data from the beginning of the buffer.
*
* Removes data from the beginning of the buffer by modifying the data
* pointer and buffer length.
*
* @param buf Buffer to update.
* @param len Number of bytes to remove.
*
* @return New beginning of the buffer data.
*/
static inline void *net_buf_pull(struct net_buf *buf, size_t len)
{
return net_buf_simple_pull(&buf->b, len);
}
/**
* @brief Remove data from the beginning of the buffer.
*
* Removes data from the beginning of the buffer by modifying the data
* pointer and buffer length.
*
* @param buf Buffer to update.
* @param len Number of bytes to remove.
*
* @return Pointer to the old beginning of the buffer data.
*/
static inline void *net_buf_pull_mem(struct net_buf *buf, size_t len)
{
return net_buf_simple_pull_mem(&buf->b, len);
}
/**
* @brief Remove a 8-bit value from the beginning of the buffer
*
* Same idea as with net_buf_pull(), but a helper for operating on
* 8-bit values.
*
* @param buf A valid pointer on a buffer.
*
* @return The 8-bit removed value
*/
static inline uint8_t net_buf_pull_u8(struct net_buf *buf)
{
return net_buf_simple_pull_u8(&buf->b);
}
/**
* @brief Remove and convert 16 bits from the beginning of the buffer.
*
* Same idea as with net_buf_pull(), but a helper for operating on
* 16-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 16-bit value converted from little endian to host endian.
*/
static inline uint16_t net_buf_pull_le16(struct net_buf *buf)
{
return net_buf_simple_pull_le16(&buf->b);
}
/**
* @brief Remove and convert 16 bits from the beginning of the buffer.
*
* Same idea as with net_buf_pull(), but a helper for operating on
* 16-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 16-bit value converted from big endian to host endian.
*/
static inline uint16_t net_buf_pull_be16(struct net_buf *buf)
{
return net_buf_simple_pull_be16(&buf->b);
}
/**
* @brief Remove and convert 24 bits from the beginning of the buffer.
*
* Same idea as with net_buf_pull(), but a helper for operating on
* 24-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 24-bit value converted from little endian to host endian.
*/
static inline uint32_t net_buf_pull_le24(struct net_buf *buf)
{
return net_buf_simple_pull_le24(&buf->b);
}
/**
* @brief Remove and convert 24 bits from the beginning of the buffer.
*
* Same idea as with net_buf_pull(), but a helper for operating on
* 24-bit big endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 24-bit value converted from big endian to host endian.
*/
static inline uint32_t net_buf_pull_be24(struct net_buf *buf)
{
return net_buf_simple_pull_be24(&buf->b);
}
/**
* @brief Remove and convert 32 bits from the beginning of the buffer.
*
* Same idea as with net_buf_pull(), but a helper for operating on
* 32-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 32-bit value converted from little endian to host endian.
*/
static inline uint32_t net_buf_pull_le32(struct net_buf *buf)
{
return net_buf_simple_pull_le32(&buf->b);
}
/**
* @brief Remove and convert 32 bits from the beginning of the buffer.
*
* Same idea as with net_buf_pull(), but a helper for operating on
* 32-bit big endian data.
*
* @param buf A valid pointer on a buffer
*
* @return 32-bit value converted from big endian to host endian.
*/
static inline uint32_t net_buf_pull_be32(struct net_buf *buf)
{
return net_buf_simple_pull_be32(&buf->b);
}
/**
* @brief Remove and convert 40 bits from the beginning of the buffer.
*
* Same idea as with net_buf_pull(), but a helper for operating on
* 40-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 40-bit value converted from little endian to host endian.
*/
static inline uint64_t net_buf_pull_le40(struct net_buf *buf)
{
return net_buf_simple_pull_le40(&buf->b);
}
/**
* @brief Remove and convert 40 bits from the beginning of the buffer.
*
* Same idea as with net_buf_pull(), but a helper for operating on
* 40-bit big endian data.
*
* @param buf A valid pointer on a buffer
*
* @return 40-bit value converted from big endian to host endian.
*/
static inline uint64_t net_buf_pull_be40(struct net_buf *buf)
{
return net_buf_simple_pull_be40(&buf->b);
}
/**
* @brief Remove and convert 48 bits from the beginning of the buffer.
*
* Same idea as with net_buf_pull(), but a helper for operating on
* 48-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 48-bit value converted from little endian to host endian.
*/
static inline uint64_t net_buf_pull_le48(struct net_buf *buf)
{
return net_buf_simple_pull_le48(&buf->b);
}
/**
* @brief Remove and convert 48 bits from the beginning of the buffer.
*
* Same idea as with net_buf_pull(), but a helper for operating on
* 48-bit big endian data.
*
* @param buf A valid pointer on a buffer
*
* @return 48-bit value converted from big endian to host endian.
*/
static inline uint64_t net_buf_pull_be48(struct net_buf *buf)
{
return net_buf_simple_pull_be48(&buf->b);
}
/**
* @brief Remove and convert 64 bits from the beginning of the buffer.
*
* Same idea as with net_buf_pull(), but a helper for operating on
* 64-bit little endian data.
*
* @param buf A valid pointer on a buffer.
*
* @return 64-bit value converted from little endian to host endian.
*/
static inline uint64_t net_buf_pull_le64(struct net_buf *buf)
{
return net_buf_simple_pull_le64(&buf->b);
}
/**
* @brief Remove and convert 64 bits from the beginning of the buffer.
*
* Same idea as with net_buf_pull(), but a helper for operating on
* 64-bit big endian data.
*
* @param buf A valid pointer on a buffer
*
* @return 64-bit value converted from big endian to host endian.
*/
static inline uint64_t net_buf_pull_be64(struct net_buf *buf)
{
return net_buf_simple_pull_be64(&buf->b);
}
/**
* @brief Check buffer tailroom.
*
* Check how much free space there is at the end of the buffer.
*
* @param buf A valid pointer on a buffer
*
* @return Number of bytes available at the end of the buffer.
*/
static inline size_t net_buf_tailroom(const struct net_buf *buf)
{
return net_buf_simple_tailroom(&buf->b);
}
/**
* @brief Check buffer headroom.
*
* Check how much free space there is in the beginning of the buffer.
*
* buf A valid pointer on a buffer
*
* @return Number of bytes available in the beginning of the buffer.
*/
static inline size_t net_buf_headroom(const struct net_buf *buf)
{
return net_buf_simple_headroom(&buf->b);
}
/**
* @brief Check maximum net_buf::len value.
*
* This value is depending on the number of bytes being reserved as headroom.
*
* @param buf A valid pointer on a buffer
*
* @return Number of bytes usable behind the net_buf::data pointer.
*/
static inline uint16_t net_buf_max_len(const struct net_buf *buf)
{
return net_buf_simple_max_len(&buf->b);
}
/**
* @brief Get the tail pointer for a buffer.
*
* Get a pointer to the end of the data in a buffer.
*
* @param buf Buffer.
*
* @return Tail pointer for the buffer.
*/
static inline uint8_t *net_buf_tail(const struct net_buf *buf)
{
return net_buf_simple_tail(&buf->b);
}
/**
* @brief Find the last fragment in the fragment list.
*
* @return Pointer to last fragment in the list.
*/
struct net_buf *net_buf_frag_last(struct net_buf *frags);
/**
* @brief Insert a new fragment to a chain of bufs.
*
* Insert a new fragment into the buffer fragments list after the parent.
*
* Note: This function takes ownership of the fragment reference so the
* caller is not required to unref.
*
* @param parent Parent buffer/fragment.
* @param frag Fragment to insert.
*/
void net_buf_frag_insert(struct net_buf *parent, struct net_buf *frag);
/**
* @brief Add a new fragment to the end of a chain of bufs.
*
* Append a new fragment into the buffer fragments list.
*
* Note: This function takes ownership of the fragment reference so the
* caller is not required to unref.
*
* @param head Head of the fragment chain.
* @param frag Fragment to add.
*
* @return New head of the fragment chain. Either head (if head
* was non-NULL) or frag (if head was NULL).
*/
struct net_buf *net_buf_frag_add(struct net_buf *head, struct net_buf *frag);
/**
* @brief Delete existing fragment from a chain of bufs.
*
* @param parent Parent buffer/fragment, or NULL if there is no parent.
* @param frag Fragment to delete.
*
* @return Pointer to the buffer following the fragment, or NULL if it
* had no further fragments.
*/
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
struct net_buf *frag,
const char *func, int line);
#define net_buf_frag_del(_parent, _frag) \
net_buf_frag_del_debug(_parent, _frag, __func__, __LINE__)
#else
struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag);
#endif
/**
* @brief Copy bytes from net_buf chain starting at offset to linear buffer
*
* Copy (extract) @a len bytes from @a src net_buf chain, starting from @a
* offset in it, to a linear buffer @a dst. Return number of bytes actually
* copied, which may be less than requested, if net_buf chain doesn't have
* enough data, or destination buffer is too small.
*
* @param dst Destination buffer
* @param dst_len Destination buffer length
* @param src Source net_buf chain
* @param offset Starting offset to copy from
* @param len Number of bytes to copy
* @return number of bytes actually copied
*/
size_t net_buf_linearize(void *dst, size_t dst_len,
const struct net_buf *src, size_t offset, size_t len);
/**
* @typedef net_buf_allocator_cb
* @brief Network buffer allocator callback.
*
* @details The allocator callback is called when net_buf_append_bytes
* needs to allocate a new net_buf.
*
* @param timeout Affects the action taken should the net buf pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait until the specified
* timeout.
* @param user_data The user data given in net_buf_append_bytes call.
* @return pointer to allocated net_buf or NULL on error.
*/
typedef struct net_buf * __must_check (*net_buf_allocator_cb)(k_timeout_t timeout,
void *user_data);
/**
* @brief Append data to a list of net_buf
*
* @details Append data to a net_buf. If there is not enough space in the
* net_buf then more net_buf will be added, unless there are no free net_buf
* and timeout occurs. If not allocator is provided it attempts to allocate from
* the same pool as the original buffer.
*
* @param buf Network buffer.
* @param len Total length of input data
* @param value Data to be added
* @param timeout Timeout is passed to the net_buf allocator callback.
* @param allocate_cb When a new net_buf is required, use this callback.
* @param user_data A user data pointer to be supplied to the allocate_cb.
* This pointer is can be anything from a mem_pool or a net_pkt, the
* logic is left up to the allocate_cb function.
*
* @return Length of data actually added. This may be less than input
* length if other timeout than K_FOREVER was used, and there
* were no free fragments in a pool to accommodate all data.
*/
size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
const void *value, k_timeout_t timeout,
net_buf_allocator_cb allocate_cb, void *user_data);
/**
* @brief Match data with a net_buf's content
*
* @details Compare data with a content of a net_buf. Provide information about
* the number of bytes matching between both. If needed, traverse
* through multiple buffer fragments.
*
* @param buf Network buffer
* @param offset Starting offset to compare from
* @param data Data buffer for comparison
* @param len Number of bytes to compare
*
* @return The number of bytes compared before the first difference.
*/
size_t net_buf_data_match(const struct net_buf *buf, size_t offset, const void *data, size_t len);
/**
* @brief Skip N number of bytes in a net_buf
*
* @details Skip N number of bytes starting from fragment's offset. If the total
* length of data is placed in multiple fragments, this function will skip from
* all fragments until it reaches N number of bytes. Any fully skipped buffers
* are removed from the net_buf list.
*
* @param buf Network buffer.
* @param len Total length of data to be skipped.
*
* @return Pointer to the fragment or
* NULL and pos is 0 after successful skip,
* NULL and pos is 0xffff otherwise.
*/
static inline struct net_buf *net_buf_skip(struct net_buf *buf, size_t len)
{
while (buf && len--) {
net_buf_pull_u8(buf);
if (!buf->len) {
buf = net_buf_frag_del(NULL, buf);
}
}
return buf;
}
/**
* @brief Calculate amount of bytes stored in fragments.
*
* Calculates the total amount of data stored in the given buffer and the
* fragments linked to it.
*
* @param buf Buffer to start off with.
*
* @return Number of bytes in the buffer and its fragments.
*/
static inline size_t net_buf_frags_len(const struct net_buf *buf)
{
size_t bytes = 0;
while (buf) {
bytes += buf->len;
buf = buf->frags;
}
return bytes;
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_BUF_H_ */
``` | /content/code_sandbox/include/zephyr/net/buf.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 19,964 |
```objective-c
/*
*
*/
/** @file
* @brief CAN bus socket API definitions.
*/
#ifndef ZEPHYR_INCLUDE_NET_CANBUS_H_
#define ZEPHYR_INCLUDE_NET_CANBUS_H_
#include <zephyr/types.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/net_if.h>
#include <zephyr/drivers/can.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* CAN L2 network driver API.
*/
struct canbus_api {
/**
* The net_if_api must be placed in first position in this
* struct so that we are compatible with network interface API.
*/
struct net_if_api iface_api;
/** Send a CAN packet by socket */
int (*send)(const struct device *dev, struct net_pkt *pkt);
/** Close the related CAN socket */
void (*close)(const struct device *dev, int filter_id);
/** Set socket CAN option */
int (*setsockopt)(const struct device *dev, void *obj, int level,
int optname,
const void *optval, socklen_t optlen);
/** Get socket CAN option */
int (*getsockopt)(const struct device *dev, void *obj, int level,
int optname,
const void *optval, socklen_t *optlen);
};
/* Make sure that the network interface API is properly setup inside
* CANBUS API struct (it is the first one).
*/
BUILD_ASSERT(offsetof(struct canbus_api, iface_api) == 0);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_CANBUS_H_ */
``` | /content/code_sandbox/include/zephyr/net/canbus.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 341 |
```objective-c
/*
*
*/
/**
* @file
* @brief Public API for offloading IP stack
*/
#ifndef ZEPHYR_INCLUDE_NET_NET_OFFLOAD_H_
#define ZEPHYR_INCLUDE_NET_NET_OFFLOAD_H_
/**
* @brief Network offloading interface
* @defgroup net_offload Network Offloading Interface
* @since 1.7
* @version 0.8.0
* @ingroup networking
* @{
*/
#include <zephyr/net/buf.h>
#include <zephyr/net/net_ip.h>
#include <zephyr/net/net_context.h>
#ifdef __cplusplus
extern "C" {
#endif
#if defined(CONFIG_NET_OFFLOAD)
/** @cond INTERNAL_HIDDEN */
static inline int32_t timeout_to_int32(k_timeout_t timeout)
{
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
return 0;
} else if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
return -1;
} else {
return k_ticks_to_ms_floor32(timeout.ticks);
}
}
/** @endcond */
/** For return parameters and return values of the elements in this
* struct, see similarly named functions in net_context.h
*/
struct net_offload {
/**
* This function is called when the socket is to be opened.
*/
int (*get)(sa_family_t family,
enum net_sock_type type,
enum net_ip_protocol ip_proto,
struct net_context **context);
/**
* This function is called when user wants to bind to local IP address.
*/
int (*bind)(struct net_context *context,
const struct sockaddr *addr,
socklen_t addrlen);
/**
* This function is called when user wants to mark the socket
* to be a listening one.
*/
int (*listen)(struct net_context *context, int backlog);
/**
* This function is called when user wants to create a connection
* to a peer host.
*/
int (*connect)(struct net_context *context,
const struct sockaddr *addr,
socklen_t addrlen,
net_context_connect_cb_t cb,
int32_t timeout,
void *user_data);
/**
* This function is called when user wants to accept a connection
* being established.
*/
int (*accept)(struct net_context *context,
net_tcp_accept_cb_t cb,
int32_t timeout,
void *user_data);
/**
* This function is called when user wants to send data to peer host.
*/
int (*send)(struct net_pkt *pkt,
net_context_send_cb_t cb,
int32_t timeout,
void *user_data);
/**
* This function is called when user wants to send data to peer host.
*/
int (*sendto)(struct net_pkt *pkt,
const struct sockaddr *dst_addr,
socklen_t addrlen,
net_context_send_cb_t cb,
int32_t timeout,
void *user_data);
/**
* This function is called when user wants to receive data from peer
* host.
*/
int (*recv)(struct net_context *context,
net_context_recv_cb_t cb,
int32_t timeout,
void *user_data);
/**
* This function is called when user wants to close the socket.
*/
int (*put)(struct net_context *context);
};
/**
* @brief Get a network socket/context from the offloaded IP stack.
*
* @details Network socket is used to define the connection
* 5-tuple (protocol, remote address, remote port, source
* address and source port). This is similar as BSD socket()
* function.
*
* @param iface Network interface where the offloaded IP stack can be
* reached.
* @param family IP address family (AF_INET or AF_INET6)
* @param type Type of the socket, SOCK_STREAM or SOCK_DGRAM
* @param ip_proto IP protocol, IPPROTO_UDP or IPPROTO_TCP
* @param context The allocated context is returned to the caller.
*
* @return 0 if ok, < 0 if error
*/
static inline int net_offload_get(struct net_if *iface,
sa_family_t family,
enum net_sock_type type,
enum net_ip_protocol ip_proto,
struct net_context **context)
{
NET_ASSERT(iface);
NET_ASSERT(net_if_offload(iface));
NET_ASSERT(net_if_offload(iface)->get);
return net_if_offload(iface)->get(family, type, ip_proto, context);
}
/**
* @brief Assign a socket a local address.
*
* @details This is similar as BSD bind() function.
*
* @param iface Network interface where the offloaded IP stack can be
* reached.
* @param context The context to be assigned.
* @param addr Address to assigned.
* @param addrlen Length of the address.
*
* @return 0 if ok, < 0 if error
*/
static inline int net_offload_bind(struct net_if *iface,
struct net_context *context,
const struct sockaddr *addr,
socklen_t addrlen)
{
NET_ASSERT(iface);
NET_ASSERT(net_if_offload(iface));
NET_ASSERT(net_if_offload(iface)->bind);
return net_if_offload(iface)->bind(context, addr, addrlen);
}
/**
* @brief Mark the context as a listening one.
*
* @details This is similar as BSD listen() function.
*
* @param iface Network interface where the offloaded IP stack can be
* reached.
* @param context The context to use.
* @param backlog The size of the pending connections backlog.
*
* @return 0 if ok, < 0 if error
*/
static inline int net_offload_listen(struct net_if *iface,
struct net_context *context,
int backlog)
{
NET_ASSERT(iface);
NET_ASSERT(net_if_offload(iface));
NET_ASSERT(net_if_offload(iface)->listen);
return net_if_offload(iface)->listen(context, backlog);
}
/**
* @brief Create a network connection.
*
* @details The net_context_connect function creates a network
* connection to the host specified by addr. After the
* connection is established, the user-supplied callback (cb)
* is executed. cb is called even if the timeout was set to
* K_FOREVER. cb is not called if the timeout expires.
* For datagram sockets (SOCK_DGRAM), this function only sets
* the peer address.
* This function is similar to the BSD connect() function.
*
* @param iface Network interface where the offloaded IP stack can be
* reached.
* @param context The network context.
* @param addr The peer address to connect to.
* @param addrlen Peer address length.
* @param cb Callback function. Set to NULL if not required.
* @param timeout The timeout value for the connection. Possible values:
* * K_NO_WAIT: this function will return immediately,
* * K_FOREVER: this function will block until the
* connection is established,
* * >0: this function will wait the specified ms.
* @param user_data Data passed to the callback function.
*
* @return 0 on success.
* @return -EINVAL if an invalid parameter is passed as an argument.
* @return -ENOTSUP if the operation is not supported or implemented.
*/
static inline int net_offload_connect(struct net_if *iface,
struct net_context *context,
const struct sockaddr *addr,
socklen_t addrlen,
net_context_connect_cb_t cb,
k_timeout_t timeout,
void *user_data)
{
NET_ASSERT(iface);
NET_ASSERT(net_if_offload(iface));
NET_ASSERT(net_if_offload(iface)->connect);
return net_if_offload(iface)->connect(
context, addr, addrlen, cb,
timeout_to_int32(timeout),
user_data);
}
/**
* @brief Accept a network connection attempt.
*
* @details Accept a connection being established. This function
* will return immediately if the timeout is set to K_NO_WAIT.
* In this case the context will call the supplied callback when ever
* there is a connection established to this context. This is "a register
* handler and forget" type of call (async).
* If the timeout is set to K_FOREVER, the function will wait
* until the connection is established. Timeout value > 0, will wait as
* many ms.
* After the connection is established a caller-supplied callback is called.
* The callback is called even if timeout was set to K_FOREVER, the
* callback is called before this function will return in this case.
* The callback is not called if the timeout expires.
* This is similar as BSD accept() function.
*
* @param iface Network interface where the offloaded IP stack can be
* reached.
* @param context The context to use.
* @param cb Caller-supplied callback function.
* @param timeout Timeout for the connection. Possible values
* are K_FOREVER, K_NO_WAIT, >0.
* @param user_data Caller-supplied user data.
*
* @return 0 if ok, < 0 if error
*/
static inline int net_offload_accept(struct net_if *iface,
struct net_context *context,
net_tcp_accept_cb_t cb,
k_timeout_t timeout,
void *user_data)
{
NET_ASSERT(iface);
NET_ASSERT(net_if_offload(iface));
NET_ASSERT(net_if_offload(iface)->accept);
return net_if_offload(iface)->accept(
context, cb,
timeout_to_int32(timeout),
user_data);
}
/**
* @brief Send a network packet to a peer.
*
* @details This function can be used to send network data to a peer
* connection. This function will return immediately if the timeout
* is set to K_NO_WAIT. If the timeout is set to K_FOREVER, the function
* will wait until the network packet is sent. Timeout value > 0 will
* wait as many ms. After the network packet is sent,
* a caller-supplied callback is called. The callback is called even
* if timeout was set to K_FOREVER, the callback is called
* before this function will return in this case. The callback is not
* called if the timeout expires. For context of type SOCK_DGRAM,
* the destination address must have been set by the call to
* net_context_connect().
* This is similar as BSD send() function.
*
* @param iface Network interface where the offloaded IP stack can be
* reached.
* @param pkt The network packet to send.
* @param cb Caller-supplied callback function.
* @param timeout Timeout for the connection. Possible values
* are K_FOREVER, K_NO_WAIT, >0.
* @param user_data Caller-supplied user data.
*
* @return 0 if ok, < 0 if error
*/
static inline int net_offload_send(struct net_if *iface,
struct net_pkt *pkt,
net_context_send_cb_t cb,
k_timeout_t timeout,
void *user_data)
{
NET_ASSERT(iface);
NET_ASSERT(net_if_offload(iface));
NET_ASSERT(net_if_offload(iface)->send);
return net_if_offload(iface)->send(
pkt, cb,
timeout_to_int32(timeout),
user_data);
}
/**
* @brief Send a network packet to a peer specified by address.
*
* @details This function can be used to send network data to a peer
* specified by address. This variant can only be used for datagram
* connections of type SOCK_DGRAM. This function will return immediately
* if the timeout is set to K_NO_WAIT. If the timeout is set to K_FOREVER,
* the function will wait until the network packet is sent. Timeout
* value > 0 will wait as many ms. After the network packet
* is sent, a caller-supplied callback is called. The callback is called
* even if timeout was set to K_FOREVER, the callback is called
* before this function will return. The callback is not called if the
* timeout expires.
* This is similar as BSD sendto() function.
*
* @param iface Network interface where the offloaded IP stack can be
* reached.
* @param pkt The network packet to send.
* @param dst_addr Destination address. This will override the address
* already set in network packet.
* @param addrlen Length of the address.
* @param cb Caller-supplied callback function.
* @param timeout Timeout for the connection. Possible values
* are K_FOREVER, K_NO_WAIT, >0.
* @param user_data Caller-supplied user data.
*
* @return 0 if ok, < 0 if error
*/
static inline int net_offload_sendto(struct net_if *iface,
struct net_pkt *pkt,
const struct sockaddr *dst_addr,
socklen_t addrlen,
net_context_send_cb_t cb,
k_timeout_t timeout,
void *user_data)
{
NET_ASSERT(iface);
NET_ASSERT(net_if_offload(iface));
NET_ASSERT(net_if_offload(iface)->sendto);
return net_if_offload(iface)->sendto(
pkt, dst_addr, addrlen, cb,
timeout_to_int32(timeout),
user_data);
}
/**
* @brief Receive network data from a peer specified by context.
*
* @details This function can be used to register a callback function
* that is called by the network stack when network data has been received
* for this context. As this function registers a callback, then there
* is no need to call this function multiple times if timeout is set to
* K_NO_WAIT.
* If callback function or user data changes, then the function can be called
* multiple times to register new values.
* This function will return immediately if the timeout is set to K_NO_WAIT.
* If the timeout is set to K_FOREVER, the function will wait until the
* network packet is received. Timeout value > 0 will wait as many ms.
* After the network packet is received, a caller-supplied callback is
* called. The callback is called even if timeout was set to K_FOREVER,
* the callback is called before this function will return in this case.
* The callback is not called if the timeout expires. The timeout functionality
* can be compiled out if synchronous behavior is not needed. The sync call
* logic requires some memory that can be saved if only async way of call is
* used. If CONFIG_NET_CONTEXT_SYNC_RECV is not set, then the timeout parameter
* value is ignored.
* This is similar as BSD recv() function.
*
* @param iface Network interface where the offloaded IP stack can be
* reached.
* @param context The network context to use.
* @param cb Caller-supplied callback function.
* @param timeout Caller-supplied timeout. Possible values
* are K_FOREVER, K_NO_WAIT, >0.
* @param user_data Caller-supplied user data.
*
* @return 0 if ok, < 0 if error
*/
static inline int net_offload_recv(struct net_if *iface,
struct net_context *context,
net_context_recv_cb_t cb,
k_timeout_t timeout,
void *user_data)
{
NET_ASSERT(iface);
NET_ASSERT(net_if_offload(iface));
NET_ASSERT(net_if_offload(iface)->recv);
return net_if_offload(iface)->recv(
context, cb,
timeout_to_int32(timeout),
user_data);
}
/**
* @brief Free/close a network context.
*
* @details This releases the context. It is not possible to
* send or receive data via this context after this call.
* This is similar as BSD shutdown() function.
*
* @param iface Network interface where the offloaded IP stack can be
* reached.
* @param context The context to be closed.
*
* @return 0 if ok, < 0 if error
*/
static inline int net_offload_put(struct net_if *iface,
struct net_context *context)
{
NET_ASSERT(iface);
NET_ASSERT(net_if_offload(iface));
NET_ASSERT(net_if_offload(iface)->put);
return net_if_offload(iface)->put(context);
}
#else
/** @cond INTERNAL_HIDDEN */
static inline int net_offload_get(struct net_if *iface,
sa_family_t family,
enum net_sock_type type,
enum net_ip_protocol ip_proto,
struct net_context **context)
{
return 0;
}
static inline int net_offload_bind(struct net_if *iface,
struct net_context *context,
const struct sockaddr *addr,
socklen_t addrlen)
{
return 0;
}
static inline int net_offload_listen(struct net_if *iface,
struct net_context *context,
int backlog)
{
return 0;
}
static inline int net_offload_connect(struct net_if *iface,
struct net_context *context,
const struct sockaddr *addr,
socklen_t addrlen,
net_context_connect_cb_t cb,
k_timeout_t timeout,
void *user_data)
{
return 0;
}
static inline int net_offload_accept(struct net_if *iface,
struct net_context *context,
net_tcp_accept_cb_t cb,
k_timeout_t timeout,
void *user_data)
{
return 0;
}
static inline int net_offload_send(struct net_if *iface,
struct net_pkt *pkt,
net_context_send_cb_t cb,
k_timeout_t timeout,
void *user_data)
{
return 0;
}
static inline int net_offload_sendto(struct net_if *iface,
struct net_pkt *pkt,
const struct sockaddr *dst_addr,
socklen_t addrlen,
net_context_send_cb_t cb,
k_timeout_t timeout,
void *user_data)
{
return 0;
}
static inline int net_offload_recv(struct net_if *iface,
struct net_context *context,
net_context_recv_cb_t cb,
k_timeout_t timeout,
void *user_data)
{
return 0;
}
static inline int net_offload_put(struct net_if *iface,
struct net_context *context)
{
return 0;
}
/** @endcond */
#endif /* CONFIG_NET_OFFLOAD */
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_NET_NET_OFFLOAD_H_ */
``` | /content/code_sandbox/include/zephyr/net/net_offload.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,901 |
```objective-c
/** @file
* @brief Hostname configuration definitions
*/
/*
*
*/
#ifndef ZEPHYR_INCLUDE_NET_HOSTNAME_H_
#define ZEPHYR_INCLUDE_NET_HOSTNAME_H_
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Network hostname configuration library
* @defgroup net_hostname Network Hostname Library
* @since 1.10
* @version 0.8.0
* @ingroup networking
* @{
*/
#if defined(CONFIG_NET_HOSTNAME_MAX_LEN)
#define NET_HOSTNAME_MAX_LEN \
MAX(CONFIG_NET_HOSTNAME_MAX_LEN, \
(sizeof(CONFIG_NET_HOSTNAME) - 1 + \
(IS_ENABLED(CONFIG_NET_HOSTNAME_UNIQUE) ? sizeof("0011223344556677") - 1 : 0)))
#else
/** Maximum hostname length */
#define NET_HOSTNAME_MAX_LEN \
(sizeof(CONFIG_NET_HOSTNAME) - 1 + \
(IS_ENABLED(CONFIG_NET_HOSTNAME_UNIQUE) ? sizeof("0011223344556677") - 1 : 0))
#endif
/** @cond INTERNAL_HIDDEN */
#if defined(CONFIG_NET_HOSTNAME_ENABLE)
#define NET_HOSTNAME_SIZE NET_HOSTNAME_MAX_LEN + 1
#else
#define NET_HOSTNAME_SIZE 1
#endif
/** @endcond */
/**
* @brief Get the device hostname
*
* @details Return pointer to device hostname.
*
* @return Pointer to hostname or NULL if not set.
*/
#if defined(CONFIG_NET_HOSTNAME_ENABLE)
const char *net_hostname_get(void);
#else
static inline const char *net_hostname_get(void)
{
return "zephyr";
}
#endif /* CONFIG_NET_HOSTNAME_ENABLE */
/**
* @brief Set the device hostname
*
* @param host new hostname as char array.
* @param len Length of the hostname array.
*
* @return 0 if ok, <0 on error
*/
#if defined(CONFIG_NET_HOSTNAME_DYNAMIC)
int net_hostname_set(char *host, size_t len);
#else
static inline int net_hostname_set(char *host, size_t len)
{
return -ENOTSUP;
}
#endif
/**
* @brief Initialize and set the device hostname.
*
*/
#if defined(CONFIG_NET_HOSTNAME_ENABLE)
void net_hostname_init(void);
#else
static inline void net_hostname_init(void)
{
}
#endif /* CONFIG_NET_HOSTNAME_ENABLE */
/**
* @brief Set the device hostname postfix
*
* @details Convert the hostname postfix to hexadecimal value and set the
* device hostname with the converted value. This is only used if
* CONFIG_NET_HOSTNAME_UNIQUE is set.
*
* @param hostname_postfix Usually link address. The function will convert this
* to a hexadecimal string.
* @param postfix_len Length of the hostname_postfix array.
*
* @return 0 if ok, <0 if error
*/
#if defined(CONFIG_NET_HOSTNAME_UNIQUE)
int net_hostname_set_postfix(const uint8_t *hostname_postfix,
int postfix_len);
#else
static inline int net_hostname_set_postfix(const uint8_t *hostname_postfix,
int postfix_len)
{
ARG_UNUSED(hostname_postfix);
ARG_UNUSED(postfix_len);
return -EMSGSIZE;
}
#endif /* CONFIG_NET_HOSTNAME_UNIQUE */
/**
* @brief Set the postfix string for the network hostname.
*
* @details Set the hostname postfix string for the network hostname as is, without any conversion.
* This is only used if CONFIG_NET_HOSTNAME_UNIQUE is set. The function checks if the combined
* length of the default hostname (defined by CONFIG_NET_HOSTNAME) and the postfix does not exceed
* NET_HOSTNAME_MAX_LEN. If the postfix is too long, the function returns an
* error.
*
* @param hostname_postfix Pointer to the postfix string to be appended to the network hostname.
* @param postfix_len Length of the hostname_postfix array.
*
* @return 0 if ok, <0 if error
*/
#if defined(CONFIG_NET_HOSTNAME_UNIQUE)
int net_hostname_set_postfix_str(const uint8_t *hostname_postfix,
int postfix_len);
#else
static inline int net_hostname_set_postfix_str(const uint8_t *hostname_postfix,
int postfix_len)
{
ARG_UNUSED(hostname_postfix);
ARG_UNUSED(postfix_len);
return -EMSGSIZE;
}
#endif /* CONFIG_NET_HOSTNAME_UNIQUE */
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_NET_HOSTNAME_H_ */
``` | /content/code_sandbox/include/zephyr/net/hostname.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 916 |
```objective-c
/*
*
*/
/**
* @file
* @brief API for monitoring network connections and interfaces.
*/
#ifndef ZEPHYR_INCLUDE_CONN_MGR_H_
#define ZEPHYR_INCLUDE_CONN_MGR_H_
#ifdef __cplusplus
extern "C" {
#endif
#if defined(CONFIG_NET_CONNECTION_MANAGER) || defined(__DOXYGEN__)
/**
* @brief Connection Manager API
* @defgroup conn_mgr Connection Manager API
* @since 2.0
* @version 0.1.0
* @ingroup networking
* @{
*/
struct net_if;
struct net_l2;
/**
* @brief Resend either NET_L4_CONNECTED or NET_L4_DISCONNECTED depending on whether connectivity
* is currently available.
*/
void conn_mgr_mon_resend_status(void);
/**
* @brief Mark an iface to be ignored by conn_mgr.
*
* Ignoring an iface forces conn_mgr to consider it unready/disconnected.
*
* This means that events related to the iface connecting/disconnecting will not be fired,
* and if the iface was connected before being ignored, events will be fired as though it
* disconnected at that moment.
*
* @param iface iface to be ignored.
*/
void conn_mgr_ignore_iface(struct net_if *iface);
/**
* @brief Watch (stop ignoring) an iface.
*
* conn_mgr will no longer be forced to consider the iface unreadly/disconnected.
*
* Events related to the iface connecting/disconnecting will no longer be blocked,
* and if the iface was connected before being watched, events will be fired as though
* it connected in that moment.
*
* All ifaces default to watched at boot.
*
* @param iface iface to no longer ignore.
*/
void conn_mgr_watch_iface(struct net_if *iface);
/**
* @brief Check whether the provided iface is currently ignored.
*
* @param iface The iface to check.
* @retval true if the iface is being ignored by conn_mgr.
* @retval false if the iface is being watched by conn_mgr.
*/
bool conn_mgr_is_iface_ignored(struct net_if *iface);
/**
* @brief Mark an L2 to be ignored by conn_mgr.
*
* This is a wrapper for conn_mgr_ignore_iface that ignores all ifaces that use the L2.
*
* @param l2 L2 to be ignored.
*/
void conn_mgr_ignore_l2(const struct net_l2 *l2);
/**
* @brief Watch (stop ignoring) an L2.
*
* This is a wrapper for conn_mgr_watch_iface that watches all ifaces that use the L2.
*
* @param l2 L2 to watch.
*/
void conn_mgr_watch_l2(const struct net_l2 *l2);
/**
* @}
*/
#else
#define conn_mgr_mon_resend_status(...)
#define conn_mgr_ignore_iface(...)
#define conn_mgr_watch_iface(...)
#define conn_mgr_ignore_l2(...)
#define conn_mgr_watch_l2(...)
#endif /* CONFIG_NET_CONNECTION_MANAGER */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_CONN_MGR_H_ */
``` | /content/code_sandbox/include/zephyr/net/conn_mgr_monitor.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 620 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.