text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```objective-c /* * */ #include <zephyr/kernel.h> #include <zephyr/types.h> #include <zephyr/modem/ubx.h> #include "gnss_u_blox_protocol_defines.h" #ifndef ZEPHYR_U_BLOX_PROTOCOL_ #define ZEPHYR_U_BLOX_PROTOCOL_ #define UBX_BAUDRATE_COUNT 9 /* When a configuration frame is sent, the device requires some delay to reflect the changes. */ /* TODO: check what is the precise waiting time for each message. */ #define UBX_CFG_RST_WAIT_MS 6000 #define UBX_CFG_GNSS_WAIT_MS 6000 #define UBX_CFG_NAV5_WAIT_MS 6000 extern const uint32_t ubx_baudrate[UBX_BAUDRATE_COUNT]; #define UBX_FRM_GET_PAYLOAD_SZ 0 #define UBX_CFG_ACK_PAYLOAD_SZ 2 #define UBX_CFG_NAK_PAYLOAD_SZ 2 #define UBX_CFG_RATE_PAYLOAD_SZ 6 #define UBX_CFG_PRT_POLL_PAYLOAD_SZ 1 #define UBX_CFG_PRT_POLL_FRM_SZ (UBX_FRM_SZ_WO_PAYLOAD + UBX_CFG_PRT_POLL_PAYLOAD_SZ) #define UBX_CFG_PRT_SET_PAYLOAD_SZ 20 #define UBX_CFG_PRT_SET_FRM_SZ (UBX_FRM_SZ_WO_PAYLOAD + UBX_CFG_PRT_SET_PAYLOAD_SZ) #define UBX_CFG_RST_PAYLOAD_SZ 4 #define UBX_CFG_RST_FRM_SZ (UBX_FRM_SZ_WO_PAYLOAD + UBX_CFG_RST_PAYLOAD_SZ) #define UBX_CFG_NAV5_PAYLOAD_SZ 36 #define UBX_CFG_NAV5_FRM_SZ (UBX_FRM_SZ_WO_PAYLOAD + UBX_CFG_NAV5_PAYLOAD_SZ) #define UBX_CFG_MSG_PAYLOAD_SZ 3 #define UBX_CFG_MSG_FRM_SZ (UBX_FRM_SZ_WO_PAYLOAD + UBX_CFG_MSG_PAYLOAD_SZ) #define UBX_CFG_GNSS_PAYLOAD_INIT_SZ 4 #define UBX_CFG_GNSS_PAYLOAD_CFG_BLK_SZ 8 #define UBX_CFG_GNSS_PAYLOAD_SZ(n) \ (UBX_CFG_GNSS_PAYLOAD_INIT_SZ + UBX_CFG_GNSS_PAYLOAD_CFG_BLK_SZ * n) #define UBX_CFG_GNSS_FRM_SZ(n) (UBX_FRM_SZ_WO_PAYLOAD + UBX_CFG_GNSS_PAYLOAD_SZ(n)) int ubx_create_and_validate_frame(uint8_t *ubx_frame, uint16_t ubx_frame_size, uint8_t msg_cls, uint8_t msg_id, const void *payload, uint16_t payload_size); struct ubx_cfg_ack_payload { uint8_t message_class; uint8_t message_id; }; void ubx_cfg_ack_payload_default(struct ubx_cfg_ack_payload *payload); #define UBX_CFG_RATE_TIME_REF_UTC 0 /* Align measurements to UTC time. */ #define UBX_CFG_RATE_TIME_REF_GPS 1 /* Align measurements to GPS time. */ #define UBX_CFG_RATE_TIME_REF_GLO 2 /* Align measurements to GLONASS time. */ #define UBX_CFG_RATE_TIME_REF_BDS 3 /* Align measurements to BeiDou time. */ #define UBX_CFG_RATE_TIME_REF_GAL 4 /* Align measurements to Galileo time. */ struct ubx_cfg_rate_payload { uint16_t meas_rate_ms; uint16_t nav_rate; uint16_t time_ref; }; void ubx_cfg_rate_payload_default(struct ubx_cfg_rate_payload *payload); struct ubx_cfg_prt_poll_payload { uint8_t port_id; }; void ubx_cfg_prt_poll_payload_default(struct ubx_cfg_prt_poll_payload *payload); #define UBX_CFG_PRT_IN_PROTO_UBX BIT(0) #define UBX_CFG_PRT_IN_PROTO_NMEA BIT(1) #define UBX_CFG_PRT_IN_PROTO_RTCM BIT(2) #define UBX_CFG_PRT_IN_PROTO_RTCM3 BIT(5) #define UBX_CFG_PRT_OUT_PROTO_UBX BIT(0) #define UBX_CFG_PRT_OUT_PROTO_NMEA BIT(1) #define UBX_CFG_PRT_OUT_PROTO_RTCM3 BIT(5) #define UBX_CFG_PRT_PORT_MODE_CHAR_LEN_5 0U #define UBX_CFG_PRT_PORT_MODE_CHAR_LEN_6 BIT(6) #define UBX_CFG_PRT_PORT_MODE_CHAR_LEN_7 BIT(7) #define UBX_CFG_PRT_PORT_MODE_CHAR_LEN_8 (BIT(6) | BIT(7)) #define UBX_CFG_PRT_PORT_MODE_PARITY_EVEN 0U #define UBX_CFG_PRT_PORT_MODE_PARITY_ODD BIT(9) #define UBX_CFG_PRT_PORT_MODE_PARITY_NONE BIT(11) #define UBX_CFG_PRT_PORT_MODE_STOP_BITS_1 0U #define UBX_CFG_PRT_PORT_MODE_STOP_BITS_1_HALF BIT(12) #define UBX_CFG_PRT_PORT_MODE_STOP_BITS_2 BIT(13) #define UBX_CFG_PRT_PORT_MODE_STOP_BITS_HALF (BIT(12) | BIT(13)) #define UBX_CFG_PRT_RESERVED0 0x00 #define UBX_CFG_PRT_TX_READY_PIN_CONF_DEFAULT 0x0000 #define UBX_CFG_PRT_TX_READY_PIN_CONF_EN BIT(0) #define UBX_CFG_PRT_TX_READY_PIN_CONF_POL_LOW BIT(1) #define UBX_CFG_PRT_TX_READY_PIN_CONF_POL_HIGH 0U #define UBX_CFG_PRT_RESERVED1 0x00 #define UBX_CFG_PRT_FLAGS_DEFAULT 0x0000 #define UBX_CFG_PRT_FLAGS_EXTENDED_TX_TIMEOUT BIT(0) struct ubx_cfg_prt_set_payload { uint8_t port_id; uint8_t reserved0; uint16_t tx_ready_pin_conf; uint32_t port_mode; uint32_t baudrate; uint16_t in_proto_mask; uint16_t out_proto_mask; uint16_t flags; uint8_t reserved1; }; void ubx_cfg_prt_set_payload_default(struct ubx_cfg_prt_set_payload *payload); #define UBX_CFG_RST_NAV_BBR_MASK_HOT_START 0x0000 #define UBX_CFG_RST_NAV_BBR_MASK_WARM_START 0x0001 #define UBX_CFG_RST_NAV_BBR_MASK_COLD_START 0xFFFF #define UBX_CFG_RST_RESET_MODE_HARD_RESET 0x00 #define UBX_CFG_RST_RESET_MODE_CONTROLLED_SOFT_RESET 0x01 #define UBX_CFG_RST_RESET_MODE_CONTROLLED_SOFT_RESET_GNSS_ONLY 0x02 #define UBX_CFG_RST_RESET_MODE_HARD_RESET_AFTER_SHUTDOWN 0x04 #define UBX_CFG_RST_RESET_MODE_CONTROLLED_GNSS_STOP 0x08 #define UBX_CFG_RST_RESET_MODE_CONTROLLED_GNSS_START 0x09 #define UBX_CFG_RST_RESERVED0 0x00 struct ubx_cfg_rst_payload { uint16_t nav_bbr_mask; uint8_t reset_mode; uint8_t reserved0; }; void ubx_cfg_rst_payload_default(struct ubx_cfg_rst_payload *payload); #define UBX_CFG_NAV5_MASK_ALL 0x05FF #define UBX_CFG_NAV5_FIX_MODE_DEFAULT UBX_FIX_AUTO_FIX #define UBX_CFG_NAV5_FIXED_ALT_DEFAULT 0 #define UBX_CFG_NAV5_FIXED_ALT_VAR_DEFAULT 1U #define UBX_CFG_NAV5_MIN_ELEV_DEFAULT 5 #define UBX_CFG_NAV5_DR_LIMIT_DEFAULT 3U #define UBX_CFG_NAV5_P_DOP_DEFAULT 100U #define UBX_CFG_NAV5_T_DOP_DEFAULT 100U #define UBX_CFG_NAV5_P_ACC_DEFAULT 100U #define UBX_CFG_NAV5_T_ACC_DEFAULT 350U #define UBX_CFG_NAV5_STATIC_HOLD_THRESHOLD_DEFAULT 0U #define UBX_CFG_NAV5_DGNSS_TIMEOUT_DEFAULT 60U #define UBX_CFG_NAV5_CNO_THRESHOLD_NUM_SVS_DEFAULT 0U #define UBX_CFG_NAV5_CNO_THRESHOLD_DEFAULT 0U #define UBX_CFG_NAV5_RESERVED0 0U #define UBX_CFG_NAV5_STATIC_HOLD_DIST_THRESHOLD 0U #define UBX_CFG_NAV5_UTC_STANDARD_DEFAULT UBX_UTC_AUTOUTC struct ubx_cfg_nav5_payload { uint16_t mask; uint8_t dyn_model; uint8_t fix_mode; int32_t fixed_alt; uint32_t fixed_alt_var; int8_t min_elev; uint8_t dr_limit; uint16_t p_dop; uint16_t t_dop; uint16_t p_acc; uint16_t t_acc; uint8_t static_hold_threshold; uint8_t dgnss_timeout; uint8_t cno_threshold_num_svs; uint8_t cno_threshold; uint16_t reserved0; uint16_t static_hold_dist_threshold; uint8_t utc_standard; }; void ubx_cfg_nav5_payload_default(struct ubx_cfg_nav5_payload *payload); #define UBX_CFG_GNSS_MSG_VER 0x00 #define UBX_CFG_GNSS_NUM_TRK_CH_HW_DEFAULT 0x31 #define UBX_CFG_GNSS_NUM_TRK_CH_USE_DEFAULT 0x31 #define UBX_CFG_GNSS_RESERVED0 0x00 #define UBX_CFG_GNSS_FLAG_ENABLE BIT(0) #define UBX_CFG_GNSS_FLAG_DISABLE 0U #define UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT 16 /* When gnss_id is 0 (GPS) */ #define UBX_CFG_GNSS_FLAG_SGN_CNF_GPS_L1C_A (0x01 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) #define UBX_CFG_GNSS_FLAG_SGN_CNF_GPS_L2C (0x10 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) #define UBX_CFG_GNSS_FLAG_SGN_CNF_GPS_L5 (0x20 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) /* When gnss_id is 1 (SBAS) */ #define UBX_CFG_GNSS_FLAG_SGN_CNF_SBAS_L1C_A (0x01 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) /* When gnss_id is 2 (Galileo) */ #define UBX_CFG_GNSS_FLAG_SGN_CNF_GALILEO_E1 (0x01 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) #define UBX_CFG_GNSS_FLAG_SGN_CNF_GALILEO_E5A (0x10 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) #define UBX_CFG_GNSS_FLAG_SGN_CNF_GALILEO_E5B (0x20 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) /* When gnss_id is 3 (BeiDou) */ #define UBX_CFG_GNSS_FLAG_SGN_CNF_BEIDOU_B1I (0x01 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) #define UBX_CFG_GNSS_FLAG_SGN_CNF_BEIDOU_B2I (0x10 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) #define UBX_CFG_GNSS_FLAG_SGN_CNF_BEIDOU_B2A (0x80 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) /* When gnss_id is 4 (IMES) */ #define UBX_CFG_GNSS_FLAG_SGN_CNF_IMES_L1 (0x01 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) /* When gnss_id is 5 (QZSS) */ #define UBX_CFG_GNSS_FLAG_SGN_CNF_QZSS_L1C_A (0x01 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) #define UBX_CFG_GNSS_FLAG_SGN_CNF_QZSS_L1S (0x04 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) #define UBX_CFG_GNSS_FLAG_SGN_CNF_QZSS_L2C (0x10 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) #define UBX_CFG_GNSS_FLAG_SGN_CNF_QZSS_L5 (0x20 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) /* When gnss_id is 6 (GLONASS) */ #define UBX_CFG_GNSS_FLAG_SGN_CNF_GLONASS_L1 (0x01 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) #define UBX_CFG_GNSS_FLAG_SGN_CNF_GLONASS_L2 (0x10 << UBX_CFG_GNSS_FLAG_SGN_CNF_SHIFT) struct ubx_cfg_gnss_payload_config_block { uint8_t gnss_id; uint8_t num_res_trk_ch; uint8_t max_num_trk_ch; uint8_t reserved0; uint32_t flags; }; struct ubx_cfg_gnss_payload { uint8_t msg_ver; uint8_t num_trk_ch_hw; uint8_t num_trk_ch_use; uint8_t num_config_blocks; struct ubx_cfg_gnss_payload_config_block config_blocks[]; }; void ubx_cfg_gnss_payload_default(struct ubx_cfg_gnss_payload *payload); #define UBX_CFG_MSG_RATE_DEFAULT 1 struct ubx_cfg_msg_payload { uint8_t message_class; uint8_t message_id; uint8_t rate; }; void ubx_cfg_msg_payload_default(struct ubx_cfg_msg_payload *payload); #endif /* ZEPHYR_U_BLOX_PROTOCOL_ */ ```
/content/code_sandbox/drivers/gnss/gnss_u_blox_protocol/gnss_u_blox_protocol.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,889
```c /* * */ /* * MMC disk driver using zephyr SD subsystem */ #define DT_DRV_COMPAT zephyr_mmc_disk #include <zephyr/sd/mmc.h> #include <zephyr/drivers/disk.h> enum sd_status { SD_UNINIT, SD_ERROR, SD_OK, }; struct mmc_config { const struct device *host_controller; uint8_t bus_width; }; struct mmc_data { struct sd_card card; enum sd_status status; char *name; }; static int disk_mmc_access_init(struct disk_info *disk) { const struct device *dev = disk->dev; const struct mmc_config *cfg = dev->config; struct mmc_data *data = dev->data; int ret; ret = sd_init(cfg->host_controller, &data->card); if (ret) { data->status = SD_ERROR; return ret; } data->status = SD_OK; return 0; } static int disk_mmc_access_status(struct disk_info *disk) { const struct device *dev = disk->dev; struct mmc_data *data = dev->data; if (data->status == SD_OK) { return DISK_STATUS_OK; } else { return DISK_STATUS_UNINIT; } } static int disk_mmc_access_read(struct disk_info *disk, uint8_t *buf, uint32_t sector, uint32_t count) { const struct device *dev = disk->dev; struct mmc_data *data = dev->data; return mmc_read_blocks(&data->card, buf, sector, count); } static int disk_mmc_access_write(struct disk_info *disk, const uint8_t *buf, uint32_t sector, uint32_t count) { const struct device *dev = disk->dev; struct mmc_data *data = dev->data; return mmc_write_blocks(&data->card, buf, sector, count); } static int disk_mmc_access_ioctl(struct disk_info *disk, uint8_t cmd, void *buf) { const struct device *dev = disk->dev; struct mmc_data *data = dev->data; switch (cmd) { case DISK_IOCTL_CTRL_INIT: return disk_mmc_access_init(disk); case DISK_IOCTL_CTRL_DEINIT: mmc_ioctl(&data->card, DISK_IOCTL_CTRL_SYNC, NULL); /* sd_init() will toggle power to MMC, so we can just mark * disk as uninitialized */ data->status = SD_UNINIT; return 0; default: return mmc_ioctl(&data->card, cmd, buf); } return 0; } static const struct disk_operations mmc_disk_ops = { .init = disk_mmc_access_init, .status = disk_mmc_access_status, .read = disk_mmc_access_read, .write = disk_mmc_access_write, .ioctl = disk_mmc_access_ioctl, }; static struct disk_info mmc_disk = { .ops = &mmc_disk_ops, }; static int disk_mmc_init(const struct device *dev) { struct mmc_data *data = dev->data; const struct mmc_config *config = dev->config; data->status = SD_UNINIT; data->card.bus_width = config->bus_width; mmc_disk.dev = dev; mmc_disk.name = data->name; return disk_access_register(&mmc_disk); } #define DISK_ACCESS_MMC_INIT(n) \ static const struct mmc_config mmc_config_##n = { \ .host_controller = DEVICE_DT_GET(DT_INST_PARENT(n)), \ .bus_width = DT_INST_PROP(n, bus_width), \ }; \ \ static struct mmc_data mmc_data_##n = { \ .name = CONFIG_MMC_VOLUME_NAME, \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ &disk_mmc_init, \ NULL, \ &mmc_data_##n, \ &mmc_config_##n, \ POST_KERNEL, \ CONFIG_SD_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(DISK_ACCESS_MMC_INIT) ```
/content/code_sandbox/drivers/disk/mmc_subsys.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
886
```c /* * */ /* * SDMMC disk driver using zephyr SD subsystem */ #define DT_DRV_COMPAT zephyr_sdmmc_disk #include <zephyr/sd/sdmmc.h> #include <zephyr/drivers/disk.h> enum sd_status { SD_UNINIT, SD_ERROR, SD_OK, }; struct sdmmc_config { const struct device *host_controller; }; struct sdmmc_data { struct sd_card card; enum sd_status status; char *name; }; static int disk_sdmmc_access_init(struct disk_info *disk) { const struct device *dev = disk->dev; const struct sdmmc_config *cfg = dev->config; struct sdmmc_data *data = dev->data; int ret; if (!sd_is_card_present(cfg->host_controller)) { return DISK_STATUS_NOMEDIA; } ret = sd_init(cfg->host_controller, &data->card); if (ret) { data->status = SD_ERROR; return ret; } data->status = SD_OK; return 0; } static int disk_sdmmc_access_status(struct disk_info *disk) { const struct device *dev = disk->dev; const struct sdmmc_config *cfg = dev->config; struct sdmmc_data *data = dev->data; if (!sd_is_card_present(cfg->host_controller)) { return DISK_STATUS_NOMEDIA; } if (data->status == SD_OK) { return DISK_STATUS_OK; } else { return DISK_STATUS_UNINIT; } } static int disk_sdmmc_access_read(struct disk_info *disk, uint8_t *buf, uint32_t sector, uint32_t count) { const struct device *dev = disk->dev; struct sdmmc_data *data = dev->data; return sdmmc_read_blocks(&data->card, buf, sector, count); } static int disk_sdmmc_access_write(struct disk_info *disk, const uint8_t *buf, uint32_t sector, uint32_t count) { const struct device *dev = disk->dev; struct sdmmc_data *data = dev->data; return sdmmc_write_blocks(&data->card, buf, sector, count); } static int disk_sdmmc_access_ioctl(struct disk_info *disk, uint8_t cmd, void *buf) { const struct device *dev = disk->dev; struct sdmmc_data *data = dev->data; switch (cmd) { case DISK_IOCTL_CTRL_INIT: return disk_sdmmc_access_init(disk); case DISK_IOCTL_CTRL_DEINIT: /* Card will be uninitialized after DEINIT */ data->status = SD_UNINIT; return sdmmc_ioctl(&data->card, DISK_IOCTL_CTRL_DEINIT, NULL); default: return sdmmc_ioctl(&data->card, cmd, buf); } return 0; } static const struct disk_operations sdmmc_disk_ops = { .init = disk_sdmmc_access_init, .status = disk_sdmmc_access_status, .read = disk_sdmmc_access_read, .write = disk_sdmmc_access_write, .ioctl = disk_sdmmc_access_ioctl, }; static struct disk_info sdmmc_disk = { .ops = &sdmmc_disk_ops, }; static int disk_sdmmc_init(const struct device *dev) { struct sdmmc_data *data = dev->data; data->status = SD_UNINIT; sdmmc_disk.dev = dev; sdmmc_disk.name = data->name; return disk_access_register(&sdmmc_disk); } #define DISK_ACCESS_SDMMC_INIT(n) \ static const struct sdmmc_config sdmmc_config_##n = { \ .host_controller = DEVICE_DT_GET(DT_INST_PARENT(n)), \ }; \ \ static struct sdmmc_data sdmmc_data_##n = { \ .name = CONFIG_SDMMC_VOLUME_NAME, \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ &disk_sdmmc_init, \ NULL, \ &sdmmc_data_##n, \ &sdmmc_config_##n, \ POST_KERNEL, \ CONFIG_SD_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(DISK_ACCESS_SDMMC_INIT) ```
/content/code_sandbox/drivers/disk/sdmmc_subsys.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
906
```c /* * */ #include <string.h> #include <zephyr/types.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/util.h> #include <zephyr/drivers/disk.h> #include <errno.h> #include <zephyr/init.h> #include <zephyr/device.h> #include <zephyr/drivers/flash.h> #include <zephyr/storage/flash_map.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(flashdisk, CONFIG_FLASHDISK_LOG_LEVEL); #if defined(CONFIG_FLASH_HAS_EXPLICIT_ERASE) && \ defined(CONFIG_FLASH_HAS_NO_EXPLICIT_ERASE) #define DISK_ERASE_RUNTIME_CHECK #endif struct flashdisk_data { struct disk_info info; struct k_mutex lock; const unsigned int area_id; const off_t offset; uint8_t *const cache; const size_t cache_size; const size_t size; const size_t sector_size; size_t page_size; off_t cached_addr; bool cache_valid; bool cache_dirty; bool erase_required; }; #define GET_SIZE_TO_BOUNDARY(start, block_size) \ (block_size - (start & (block_size - 1))) /* * The default block size is used for devices not requiring erase. * It defaults to 512 as this is most widely used sector size * on storage devices. */ #define DEFAULT_BLOCK_SIZE 512 static inline bool flashdisk_with_erase(const struct flashdisk_data *ctx) { ARG_UNUSED(ctx); #if CONFIG_FLASH_HAS_EXPLICIT_ERASE #if CONFIG_FLASH_HAS_NO_EXPLICIT_ERASE return ctx->erase_required; #else return true; #endif #endif return false; } static inline void flashdisk_probe_erase(struct flashdisk_data *ctx) { #if defined(DISK_ERASE_RUNTIME_CHECK) ctx->erase_required = flash_params_get_erase_cap(flash_get_parameters(ctx->info.dev)) & FLASH_ERASE_C_EXPLICIT; #else ARG_UNUSED(ctx); #endif } static int disk_flash_access_status(struct disk_info *disk) { LOG_DBG("status : %s", disk->dev ? "okay" : "no media"); if (!disk->dev) { return DISK_STATUS_NOMEDIA; } return DISK_STATUS_OK; } static int flashdisk_init_runtime(struct flashdisk_data *ctx, const struct flash_area *fap) { int rc; struct flash_pages_info page; off_t offset; flashdisk_probe_erase(ctx); if (IS_ENABLED(CONFIG_FLASHDISK_VERIFY_PAGE_LAYOUT) && flashdisk_with_erase(ctx)) { rc = flash_get_page_info_by_offs(ctx->info.dev, ctx->offset, &page); if (rc < 0) { LOG_ERR("Error %d while getting page info", rc); return rc; } ctx->page_size = page.size; } else { ctx->page_size = DEFAULT_BLOCK_SIZE; } LOG_INF("Initialize device %s", ctx->info.name); LOG_INF("offset %lx, sector size %zu, page size %zu, volume size %zu", (long)ctx->offset, ctx->sector_size, ctx->page_size, ctx->size); if (ctx->cache_size == 0) { /* Read-only flashdisk, no flash partition constraints */ LOG_INF("%s is read-only", ctx->info.name); return 0; } if (IS_ENABLED(CONFIG_FLASHDISK_VERIFY_PAGE_LAYOUT) && flashdisk_with_erase(ctx)) { if (ctx->offset != page.start_offset) { LOG_ERR("Disk %s does not start at page boundary", ctx->info.name); return -EINVAL; } offset = ctx->offset + page.size; while (offset < ctx->offset + ctx->size) { rc = flash_get_page_info_by_offs(ctx->info.dev, offset, &page); if (rc < 0) { LOG_ERR("Error %d getting page info at offset %lx", rc, offset); return rc; } if (page.size != ctx->page_size) { LOG_ERR("Non-uniform page size is not supported"); return rc; } offset += page.size; } if (offset != ctx->offset + ctx->size) { LOG_ERR("Last page crossess disk %s boundary", ctx->info.name); return -EINVAL; } } if (ctx->page_size > ctx->cache_size) { LOG_ERR("Cache too small (%zu needs %zu)", ctx->cache_size, ctx->page_size); return -ENOMEM; } return 0; } static int disk_flash_access_init(struct disk_info *disk) { struct flashdisk_data *ctx; const struct flash_area *fap; int rc; ctx = CONTAINER_OF(disk, struct flashdisk_data, info); rc = flash_area_open(ctx->area_id, &fap); if (rc < 0) { LOG_ERR("Flash area %u open error %d", ctx->area_id, rc); return rc; } k_mutex_lock(&ctx->lock, K_FOREVER); disk->dev = flash_area_get_device(fap); rc = flashdisk_init_runtime(ctx, fap); if (rc < 0) { flash_area_close(fap); } k_mutex_unlock(&ctx->lock); return rc; } static bool sectors_in_range(struct flashdisk_data *ctx, uint32_t start_sector, uint32_t sector_count) { uint32_t start, end; start = ctx->offset + (start_sector * ctx->sector_size); end = start + (sector_count * ctx->sector_size); if ((end >= start) && (start >= ctx->offset) && (end <= ctx->offset + ctx->size)) { return true; } LOG_ERR("sector start %" PRIu32 " count %" PRIu32 " outside partition boundary", start_sector, sector_count); return false; } static int disk_flash_access_read(struct disk_info *disk, uint8_t *buff, uint32_t start_sector, uint32_t sector_count) { struct flashdisk_data *ctx; off_t fl_addr; uint32_t remaining; uint32_t offset; uint32_t len; int rc = 0; ctx = CONTAINER_OF(disk, struct flashdisk_data, info); if (!sectors_in_range(ctx, start_sector, sector_count)) { return -EINVAL; } fl_addr = ctx->offset + start_sector * ctx->sector_size; remaining = (sector_count * ctx->sector_size); k_mutex_lock(&ctx->lock, K_FOREVER); /* Operate on page addresses to easily check for cached data */ offset = fl_addr & (ctx->page_size - 1); fl_addr = ROUND_DOWN(fl_addr, ctx->page_size); /* Read up to page boundary on first iteration */ len = ctx->page_size - offset; while (remaining) { if (remaining < len) { len = remaining; } if (ctx->cache_valid && ctx->cached_addr == fl_addr) { memcpy(buff, &ctx->cache[offset], len); } else if (flash_read(disk->dev, fl_addr + offset, buff, len) < 0) { rc = -EIO; goto end; } fl_addr += ctx->page_size; remaining -= len; buff += len; /* Try to read whole page on next iteration */ len = ctx->page_size; offset = 0; } end: k_mutex_unlock(&ctx->lock); return rc; } static int flashdisk_cache_commit(struct flashdisk_data *ctx) { if (!ctx->cache_valid || !ctx->cache_dirty) { /* Either no cached data or cache matches flash data */ return 0; } if (flashdisk_with_erase(ctx)) { if (flash_erase(ctx->info.dev, ctx->cached_addr, ctx->page_size) < 0) { return -EIO; } } /* write data to flash */ if (flash_write(ctx->info.dev, ctx->cached_addr, ctx->cache, ctx->page_size) < 0) { return -EIO; } ctx->cache_dirty = false; return 0; } static int flashdisk_cache_load(struct flashdisk_data *ctx, off_t fl_addr) { int rc; __ASSERT_NO_MSG((fl_addr & (ctx->page_size - 1)) == 0); if (ctx->cache_valid) { if (ctx->cached_addr == fl_addr) { /* Page is already cached */ return 0; } /* Different page is in cache, commit it first */ rc = flashdisk_cache_commit(ctx); if (rc < 0) { /* Failed to commit dirty page, abort */ return rc; } } /* Load page into cache */ ctx->cache_valid = false; ctx->cache_dirty = false; ctx->cached_addr = fl_addr; rc = flash_read(ctx->info.dev, fl_addr, ctx->cache, ctx->page_size); if (rc == 0) { /* Successfully loaded into cache, mark as valid */ ctx->cache_valid = true; return 0; } return -EIO; } /* input size is either less or equal to a block size (ctx->page_size) * and write data never spans across adjacent blocks. */ static int flashdisk_cache_write(struct flashdisk_data *ctx, off_t start_addr, uint32_t size, const void *buff) { int rc; off_t fl_addr; uint32_t offset; /* adjust offset if starting address is not erase-aligned address */ offset = start_addr & (ctx->page_size - 1); /* always align starting address for flash cache operations */ fl_addr = ROUND_DOWN(start_addr, ctx->page_size); /* when writing full page the address must be page aligned * when writing partial page user data must be within a single page */ __ASSERT_NO_MSG(fl_addr + ctx->page_size >= start_addr + size); rc = flashdisk_cache_load(ctx, fl_addr); if (rc < 0) { return rc; } /* Do not mark cache as dirty if data to be written matches cache. * If cache is already dirty, copy data to cache without compare. */ if (ctx->cache_dirty || memcmp(&ctx->cache[offset], buff, size)) { /* Update cache and mark it as dirty */ memcpy(&ctx->cache[offset], buff, size); ctx->cache_dirty = true; } return 0; } static int disk_flash_access_write(struct disk_info *disk, const uint8_t *buff, uint32_t start_sector, uint32_t sector_count) { struct flashdisk_data *ctx; off_t fl_addr; uint32_t remaining; uint32_t size; int rc = 0; ctx = CONTAINER_OF(disk, struct flashdisk_data, info); if (ctx->cache_size == 0) { return -ENOTSUP; } if (!sectors_in_range(ctx, start_sector, sector_count)) { return -EINVAL; } fl_addr = ctx->offset + start_sector * ctx->sector_size; remaining = (sector_count * ctx->sector_size); k_mutex_lock(&ctx->lock, K_FOREVER); /* check if start address is erased-aligned address */ if (fl_addr & (ctx->page_size - 1)) { off_t block_bnd; /* not aligned */ /* check if the size goes over flash block boundary */ block_bnd = fl_addr + ctx->page_size; block_bnd = block_bnd & ~(ctx->page_size - 1); if ((fl_addr + remaining) <= block_bnd) { /* not over block boundary (a partial block also) */ if (flashdisk_cache_write(ctx, fl_addr, remaining, buff) < 0) { rc = -EIO; } goto end; } /* write goes over block boundary */ size = GET_SIZE_TO_BOUNDARY(fl_addr, ctx->page_size); /* write first partial block */ if (flashdisk_cache_write(ctx, fl_addr, size, buff) < 0) { rc = -EIO; goto end; } fl_addr += size; remaining -= size; buff += size; } /* start is an erase-aligned address */ while (remaining) { if (remaining < ctx->page_size) { break; } if (flashdisk_cache_write(ctx, fl_addr, ctx->page_size, buff) < 0) { rc = -EIO; goto end; } fl_addr += ctx->page_size; remaining -= ctx->page_size; buff += ctx->page_size; } /* remaining partial block */ if (remaining) { if (flashdisk_cache_write(ctx, fl_addr, remaining, buff) < 0) { rc = -EIO; goto end; } } end: k_mutex_unlock(&ctx->lock); return 0; } static int disk_flash_access_ioctl(struct disk_info *disk, uint8_t cmd, void *buff) { int rc; struct flashdisk_data *ctx; ctx = CONTAINER_OF(disk, struct flashdisk_data, info); switch (cmd) { case DISK_IOCTL_CTRL_DEINIT: case DISK_IOCTL_CTRL_SYNC: k_mutex_lock(&ctx->lock, K_FOREVER); rc = flashdisk_cache_commit(ctx); k_mutex_unlock(&ctx->lock); return rc; case DISK_IOCTL_GET_SECTOR_COUNT: *(uint32_t *)buff = ctx->size / ctx->sector_size; return 0; case DISK_IOCTL_GET_SECTOR_SIZE: *(uint32_t *)buff = ctx->sector_size; return 0; case DISK_IOCTL_GET_ERASE_BLOCK_SZ: /* in sectors */ k_mutex_lock(&ctx->lock, K_FOREVER); *(uint32_t *)buff = ctx->page_size / ctx->sector_size; k_mutex_unlock(&ctx->lock); return 0; case DISK_IOCTL_CTRL_INIT: return disk_flash_access_init(disk); default: break; } return -EINVAL; } static const struct disk_operations flash_disk_ops = { .init = disk_flash_access_init, .status = disk_flash_access_status, .read = disk_flash_access_read, .write = disk_flash_access_write, .ioctl = disk_flash_access_ioctl, }; #define DT_DRV_COMPAT zephyr_flash_disk #define PARTITION_PHANDLE(n) DT_PHANDLE_BY_IDX(DT_DRV_INST(n), partition, 0) /* Force cache size to 0 if partition is read-only */ #define CACHE_SIZE(n) (DT_INST_PROP(n, cache_size) * !DT_PROP(PARTITION_PHANDLE(n), read_only)) #define DEFINE_FLASHDISKS_CACHE(n) \ static uint8_t __aligned(4) flashdisk##n##_cache[CACHE_SIZE(n)]; DT_INST_FOREACH_STATUS_OKAY(DEFINE_FLASHDISKS_CACHE) #define DEFINE_FLASHDISKS_DEVICE(n) \ { \ .info = { \ .ops = &flash_disk_ops, \ .name = DT_INST_PROP(n, disk_name), \ }, \ .area_id = DT_FIXED_PARTITION_ID(PARTITION_PHANDLE(n)), \ .offset = DT_REG_ADDR(PARTITION_PHANDLE(n)), \ .cache = flashdisk##n##_cache, \ .cache_size = sizeof(flashdisk##n##_cache), \ .size = DT_REG_SIZE(PARTITION_PHANDLE(n)), \ .sector_size = DT_INST_PROP(n, sector_size), \ }, static struct flashdisk_data flash_disks[] = { DT_INST_FOREACH_STATUS_OKAY(DEFINE_FLASHDISKS_DEVICE) }; #define VERIFY_CACHE_SIZE_IS_NOT_ZERO_IF_NOT_READ_ONLY(n) \ COND_CODE_1(DT_PROP(PARTITION_PHANDLE(n), read_only), \ (/* cache-size is not used for read-only disks */), \ (BUILD_ASSERT(DT_INST_PROP(n, cache_size) != 0, \ "Devicetree node " DT_NODE_PATH(DT_DRV_INST(n)) \ " must have non-zero cache-size");)) DT_INST_FOREACH_STATUS_OKAY(VERIFY_CACHE_SIZE_IS_NOT_ZERO_IF_NOT_READ_ONLY) #define VERIFY_CACHE_SIZE_IS_MULTIPLY_OF_SECTOR_SIZE(n) \ BUILD_ASSERT(DT_INST_PROP(n, cache_size) % DT_INST_PROP(n, sector_size) == 0, \ "Devicetree node " DT_NODE_PATH(DT_DRV_INST(n)) \ " has cache size which is not a multiple of its sector size"); DT_INST_FOREACH_STATUS_OKAY(VERIFY_CACHE_SIZE_IS_MULTIPLY_OF_SECTOR_SIZE) static int disk_flash_init(void) { int err = 0; for (int i = 0; i < ARRAY_SIZE(flash_disks); i++) { int rc; k_mutex_init(&flash_disks[i].lock); rc = disk_access_register(&flash_disks[i].info); if (rc < 0) { LOG_ERR("Failed to register disk %s error %d", flash_disks[i].info.name, rc); err = rc; } } return err; } SYS_INIT(disk_flash_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); ```
/content/code_sandbox/drivers/disk/flashdisk.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,788
```unknown DT_STM32_SDMMC_HAS_DMA := $(dt_nodelabel_has_prop,sdmmc,dmas) config DISK_DRIVER_SDMMC bool "SDMMC card driver" depends on DT_HAS_ZEPHYR_SDMMC_DISK_ENABLED || \ DT_HAS_ST_STM32_SDMMC_ENABLED default y help SDMMC card driver. if DISK_DRIVER_SDMMC config SD_INIT_PRIORITY int "Init priority" default 90 help SDMMC controller driver initialization priority. config SDMMC_VOLUME_NAME string "SDMMC Disk mount point or drive name" default "SD" if FAT_FILESYSTEM_ELM default "SDMMC" help Disk name as per file system naming guidelines. config SDMMC_SUBSYS bool "SDMMC access via SD subsystem" select SDMMC_STACK default y depends on DT_HAS_ZEPHYR_SDMMC_DISK_ENABLED help Enable SDMMC access via SD subsystem. config SDMMC_STM32 bool "STM32 SDMMC driver" default y depends on DT_HAS_ST_STM32_SDMMC_ENABLED select USE_STM32_HAL_SD if !SDMMC_STM32_EMMC select USE_STM32_HAL_SD_EX if !SDMMC_STM32_EMMC && SOC_SERIES_STM32L4X select USE_STM32_HAL_MMC if SDMMC_STM32_EMMC select USE_STM32_HAL_MMC_EX if SDMMC_STM32_EMMC && SOC_SERIES_STM32L4X select USE_STM32_LL_SDMMC select USE_STM32_HAL_DMA if (SOC_SERIES_STM32L4X || SOC_SERIES_STM32F7X || SOC_SERIES_STM32F4X) select DMA if ($(DT_STM32_SDMMC_HAS_DMA) && SOC_SERIES_STM32F4X) select RESET help File system on sdmmc accessed through stm32 sdmmc. config SDMMC_STM32_HWFC bool "STM32 SDMMC Hardware Flow control" depends on SDMMC_STM32 depends on SOC_SERIES_STM32H5X || \ SOC_SERIES_STM32H7X || \ SOC_SERIES_STM32F7X || \ SOC_SERIES_STM32L4X || \ SOC_SERIES_STM32L5X help Enable SDMMC Hardware Flow Control to avoid FIFO underrun (TX mode) and overrun (RX mode) errors. config SDMMC_STM32_EMMC bool "STM32 SDMMC eMMC mode" depends on SDMMC_STM32 help Use eMMC instead of SDIO (SD card) protocol. These are similar, but not entirely the same. config SDMMC_STM32_CLOCK_CHECK bool "Runtime SDMMC 48MHz clock check" depends on SDMMC_STM32 default y help Enable SDMMC clock 48MHz configuration runtime check. In specific cases, this check might provide wrong verdict and should be disabled. module = SDMMC module-str = sdmmc source "subsys/logging/Kconfig.template.log_config" endif # DISK_DRIVER_SDMMC ```
/content/code_sandbox/drivers/disk/Kconfig.sdmmc
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
677
```unknown menuconfig DISK_DRIVERS bool "Disk drivers" help Disk Driver configuration if DISK_DRIVERS source "drivers/disk/Kconfig.ram" source "drivers/disk/Kconfig.flash" source "drivers/disk/Kconfig.sdmmc" source "drivers/disk/Kconfig.mmc" source "drivers/disk/Kconfig.loopback" rsource "nvme/Kconfig" endif # DISK_DRIVERS ```
/content/code_sandbox/drivers/disk/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
91
```unknown config DISK_DRIVER_MMC bool "MMC card driver" depends on DT_HAS_ZEPHYR_MMC_DISK_ENABLED default y help MMC card driver. if DISK_DRIVER_MMC config SD_INIT_PRIORITY int "Init priority" default 90 help MMC controller driver initialization priority. config MMC_VOLUME_NAME string "MMC Disk mount point or drive name" default "SD" if FAT_FILESYSTEM_ELM default "MMC" help Disk name as per file system naming guidelines. config MMC_SUBSYS bool "MMC access via SD subsystem" select MMC_STACK default y depends on DT_HAS_ZEPHYR_MMC_DISK_ENABLED help Enable MMC access via SD subsystem. module = MMC module-str = mmc source "subsys/logging/Kconfig.template.log_config" endif # DISK_DRIVER_MMC ```
/content/code_sandbox/drivers/disk/Kconfig.mmc
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
183
```c /* * */ #include <string.h> #include <zephyr/types.h> #include <zephyr/drivers/disk.h> #include <errno.h> #include <zephyr/init.h> #include <zephyr/device.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ramdisk, CONFIG_RAMDISK_LOG_LEVEL); struct ram_disk_data { struct disk_info info; const size_t sector_size; const size_t sector_count; uint8_t *const buf; }; struct ram_disk_config { const size_t sector_size; const size_t sector_count; const size_t size; uint8_t *const buf; }; static void *lba_to_address(const struct device *dev, uint32_t lba) { const struct ram_disk_config *config = dev->config; return &config->buf[lba * config->sector_size]; } static int disk_ram_access_status(struct disk_info *disk) { return DISK_STATUS_OK; } static int disk_ram_access_read(struct disk_info *disk, uint8_t *buff, uint32_t sector, uint32_t count) { const struct device *dev = disk->dev; const struct ram_disk_config *config = dev->config; uint32_t last_sector = sector + count; if (last_sector < sector || last_sector > config->sector_count) { LOG_ERR("Sector %" PRIu32 " is outside the range %zu", last_sector, config->sector_count); return -EIO; } memcpy(buff, lba_to_address(dev, sector), count * config->sector_size); return 0; } static int disk_ram_access_write(struct disk_info *disk, const uint8_t *buff, uint32_t sector, uint32_t count) { const struct device *dev = disk->dev; const struct ram_disk_config *config = dev->config; uint32_t last_sector = sector + count; if (last_sector < sector || last_sector > config->sector_count) { LOG_ERR("Sector %" PRIu32 " is outside the range %zu", last_sector, config->sector_count); return -EIO; } memcpy(lba_to_address(dev, sector), buff, count * config->sector_size); return 0; } static int disk_ram_access_ioctl(struct disk_info *disk, uint8_t cmd, void *buff) { const struct ram_disk_config *config = disk->dev->config; switch (cmd) { case DISK_IOCTL_CTRL_SYNC: break; case DISK_IOCTL_GET_SECTOR_COUNT: *(uint32_t *)buff = config->sector_count; break; case DISK_IOCTL_GET_SECTOR_SIZE: *(uint32_t *)buff = config->sector_size; break; case DISK_IOCTL_GET_ERASE_BLOCK_SZ: *(uint32_t *)buff = 1U; break; case DISK_IOCTL_CTRL_INIT: case DISK_IOCTL_CTRL_DEINIT: break; default: return -EINVAL; } return 0; } static int disk_ram_access_init(struct disk_info *disk) { return disk_ram_access_ioctl(disk, DISK_IOCTL_CTRL_INIT, NULL); } static int disk_ram_init(const struct device *dev) { struct disk_info *info = dev->data; info->dev = dev; return disk_access_register(info); } static const struct disk_operations ram_disk_ops = { .init = disk_ram_access_init, .status = disk_ram_access_status, .read = disk_ram_access_read, .write = disk_ram_access_write, .ioctl = disk_ram_access_ioctl, }; #define DT_DRV_COMPAT zephyr_ram_disk #define RAMDISK_DEVICE_SIZE(n) \ (DT_INST_PROP(n, sector_size) * DT_INST_PROP(n, sector_count)) #define RAMDISK_DEVICE_CONFIG_DEFINE_MEMREG(n) \ BUILD_ASSERT(RAMDISK_DEVICE_SIZE(n) <= \ DT_REG_SIZE(DT_INST_PHANDLE(n, ram_region)), \ "Disk size is smaller than memory region"); \ \ static struct ram_disk_config disk_config_##n = { \ .sector_size = DT_INST_PROP(n, sector_size), \ .sector_count = DT_INST_PROP(n, sector_count), \ .size = RAMDISK_DEVICE_SIZE(n), \ .buf = UINT_TO_POINTER(DT_REG_ADDR(DT_INST_PHANDLE(n, ram_region))), \ } #define RAMDISK_DEVICE_CONFIG_DEFINE_LOCAL(n) \ static uint8_t disk_buf_##n[DT_INST_PROP(n, sector_size) * \ DT_INST_PROP(n, sector_count)]; \ \ static struct ram_disk_config disk_config_##n = { \ .sector_size = DT_INST_PROP(n, sector_size), \ .sector_count = DT_INST_PROP(n, sector_count), \ .size = RAMDISK_DEVICE_SIZE(n), \ .buf = disk_buf_##n, \ } #define RAMDISK_DEVICE_CONFIG_DEFINE(n) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(n, ram_region), \ (RAMDISK_DEVICE_CONFIG_DEFINE_MEMREG(n)), \ (RAMDISK_DEVICE_CONFIG_DEFINE_LOCAL(n))) #define RAMDISK_DEVICE_DEFINE(n) \ \ static struct disk_info disk_info_##n = { \ .name = DT_INST_PROP(n, disk_name), \ .ops = &ram_disk_ops, \ }; \ \ RAMDISK_DEVICE_CONFIG_DEFINE(n); \ \ DEVICE_DT_INST_DEFINE(n, disk_ram_init, NULL, \ &disk_info_##n, &disk_config_##n, \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &ram_disk_ops); DT_INST_FOREACH_STATUS_OKAY(RAMDISK_DEVICE_DEFINE) ```
/content/code_sandbox/drivers/disk/ramdisk.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,260
```unknown config DISK_DRIVER_FLASH bool "Flash" depends on DT_HAS_ZEPHYR_FLASH_DISK_ENABLED default y select FLASH select FLASH_MAP help Flash device is used for the file system. if DISK_DRIVER_FLASH config FLASHDISK_VERIFY_PAGE_LAYOUT bool "Verify flashdisk partition layout" default y help Enable runtime zephyr,flash-disk partition page layout constraints verification. Disable to reduce code size. module = FLASHDISK module-str = flashdisk source "subsys/logging/Kconfig.template.log_config" endif # DISK_DRIVER_FLASH ```
/content/code_sandbox/drivers/disk/Kconfig.flash
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
128
```c /* * */ #define DT_DRV_COMPAT st_stm32_sdmmc #include <zephyr/devicetree.h> #include <zephyr/drivers/disk.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/reset.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <soc.h> #include <stm32_ll_rcc.h> LOG_MODULE_REGISTER(stm32_sdmmc, CONFIG_SDMMC_LOG_LEVEL); #define STM32_SDMMC_USE_DMA DT_NODE_HAS_PROP(DT_DRV_INST(0), dmas) #if STM32_SDMMC_USE_DMA #include <zephyr/drivers/dma.h> #include <zephyr/drivers/dma/dma_stm32.h> #include <stm32_ll_dma.h> #endif #ifndef MMC_TypeDef #define MMC_TypeDef SDMMC_TypeDef #endif #ifndef SDMMC_BUS_WIDE_1B #define SDMMC_BUS_WIDE_1B SDIO_BUS_WIDE_1B #endif #ifndef SDMMC_BUS_WIDE_4B #define SDMMC_BUS_WIDE_4B SDIO_BUS_WIDE_4B #endif #ifndef SDMMC_BUS_WIDE_8B #define SDMMC_BUS_WIDE_8B SDIO_BUS_WIDE_8B #endif typedef void (*irq_config_func_t)(const struct device *dev); #if STM32_SDMMC_USE_DMA static const uint32_t table_priority[] = { DMA_PRIORITY_LOW, DMA_PRIORITY_MEDIUM, DMA_PRIORITY_HIGH, DMA_PRIORITY_VERY_HIGH }; struct sdmmc_dma_stream { const struct device *dev; uint32_t channel; uint32_t channel_nb; DMA_TypeDef *reg; struct dma_config cfg; }; #endif #ifdef CONFIG_SDMMC_STM32_EMMC typedef MMC_HandleTypeDef HandleTypeDef; typedef HAL_MMC_CardInfoTypeDef CardInfoTypeDef; #else typedef SD_HandleTypeDef HandleTypeDef; typedef HAL_SD_CardInfoTypeDef CardInfoTypeDef; #endif struct stm32_sdmmc_priv { irq_config_func_t irq_config; struct k_sem thread_lock; struct k_sem sync; HandleTypeDef hsd; int status; struct k_work work; struct gpio_callback cd_cb; struct gpio_dt_spec cd; struct gpio_dt_spec pe; struct stm32_pclken *pclken; const struct pinctrl_dev_config *pcfg; const struct reset_dt_spec reset; #if STM32_SDMMC_USE_DMA struct sdmmc_dma_stream dma_rx; struct sdmmc_dma_stream dma_tx; #endif }; #ifdef CONFIG_SDMMC_STM32_HWFC static void stm32_sdmmc_fc_enable(struct stm32_sdmmc_priv *priv) { MMC_TypeDef *sdmmcx = priv->hsd.Instance; sdmmcx->CLKCR |= SDMMC_CLKCR_HWFC_EN; } #endif static void stm32_sdmmc_isr(const struct device *dev) { struct stm32_sdmmc_priv *priv = dev->data; #ifdef CONFIG_SDMMC_STM32_EMMC HAL_MMC_IRQHandler(&priv->hsd); #else HAL_SD_IRQHandler(&priv->hsd); #endif } #define DEFINE_HAL_CALLBACK(name) \ void name(HandleTypeDef *hsd) \ { \ struct stm32_sdmmc_priv *priv = CONTAINER_OF(hsd, struct stm32_sdmmc_priv, hsd); \ \ priv->status = hsd->ErrorCode; \ \ k_sem_give(&priv->sync); \ } #ifdef CONFIG_SDMMC_STM32_EMMC DEFINE_HAL_CALLBACK(HAL_MMC_TxCpltCallback); DEFINE_HAL_CALLBACK(HAL_MMC_RxCpltCallback); DEFINE_HAL_CALLBACK(HAL_MMC_ErrorCallback); #else DEFINE_HAL_CALLBACK(HAL_SD_TxCpltCallback); DEFINE_HAL_CALLBACK(HAL_SD_RxCpltCallback); DEFINE_HAL_CALLBACK(HAL_SD_ErrorCallback); #endif static int stm32_sdmmc_clock_enable(struct stm32_sdmmc_priv *priv) { const struct device *clock; /* HSI48 Clock is enabled through using the device tree */ clock = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (DT_INST_NUM_CLOCKS(0) > 1) { if (clock_control_configure(clock, (clock_control_subsys_t)&priv->pclken[1], NULL) != 0) { LOG_ERR("Failed to enable SDMMC domain clock"); return -EIO; } } if (IS_ENABLED(CONFIG_SDMMC_STM32_CLOCK_CHECK)) { uint32_t sdmmc_clock_rate; if (clock_control_get_rate(clock, (clock_control_subsys_t)&priv->pclken[1], &sdmmc_clock_rate) != 0) { LOG_ERR("Failed to get SDMMC domain clock rate"); return -EIO; } if (sdmmc_clock_rate != MHZ(48)) { LOG_ERR("SDMMC Clock is not 48MHz (%d)", sdmmc_clock_rate); return -ENOTSUP; } } /* Enable the APB clock for stm32_sdmmc */ return clock_control_on(clock, (clock_control_subsys_t)&priv->pclken[0]); } #if !defined(CONFIG_SDMMC_STM32_EMMC) static int stm32_sdmmc_clock_disable(struct stm32_sdmmc_priv *priv) { const struct device *clock; clock = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); return clock_control_off(clock, (clock_control_subsys_t)&priv->pclken); } #endif #if STM32_SDMMC_USE_DMA static void stm32_sdmmc_dma_cb(const struct device *dev, void *arg, uint32_t channel, int status) { DMA_HandleTypeDef *hdma = arg; if (status != 0) { LOG_ERR("DMA callback error with channel %d.", channel); } HAL_DMA_IRQHandler(hdma); } static int stm32_sdmmc_configure_dma(DMA_HandleTypeDef *handle, struct sdmmc_dma_stream *dma) { int ret; if (!device_is_ready(dma->dev)) { LOG_ERR("Failed to get dma dev"); return -ENODEV; } dma->cfg.user_data = handle; ret = dma_config(dma->dev, dma->channel, &dma->cfg); if (ret != 0) { LOG_ERR("Failed to conig"); return ret; } handle->Instance = __LL_DMA_GET_STREAM_INSTANCE(dma->reg, dma->channel_nb); handle->Init.Channel = dma->cfg.dma_slot * DMA_CHANNEL_1; handle->Init.PeriphInc = DMA_PINC_DISABLE; handle->Init.MemInc = DMA_MINC_ENABLE; handle->Init.PeriphDataAlignment = DMA_PDATAALIGN_WORD; handle->Init.MemDataAlignment = DMA_MDATAALIGN_WORD; handle->Init.Mode = DMA_PFCTRL; handle->Init.Priority = table_priority[dma->cfg.channel_priority], handle->Init.FIFOMode = DMA_FIFOMODE_ENABLE; handle->Init.FIFOThreshold = DMA_FIFO_THRESHOLD_FULL; handle->Init.MemBurst = DMA_MBURST_INC4; handle->Init.PeriphBurst = DMA_PBURST_INC4; return ret; } static int stm32_sdmmc_dma_init(struct stm32_sdmmc_priv *priv) { static DMA_HandleTypeDef dma_tx_handle; static DMA_HandleTypeDef dma_rx_handle; int err; LOG_DBG("using dma"); err = stm32_sdmmc_configure_dma(&dma_tx_handle, &priv->dma_tx); if (err) { LOG_ERR("failed to init tx dma"); return err; } __HAL_LINKDMA(&priv->hsd, hdmatx, dma_tx_handle); HAL_DMA_DeInit(&dma_tx_handle); HAL_DMA_Init(&dma_tx_handle); err = stm32_sdmmc_configure_dma(&dma_rx_handle, &priv->dma_rx); if (err) { LOG_ERR("failed to init rx dma"); return err; } __HAL_LINKDMA(&priv->hsd, hdmarx, dma_rx_handle); HAL_DMA_DeInit(&dma_rx_handle); HAL_DMA_Init(&dma_rx_handle); return err; } #endif static int stm32_sdmmc_access_init(struct disk_info *disk) { const struct device *dev = disk->dev; struct stm32_sdmmc_priv *priv = dev->data; int err; if (priv->status == DISK_STATUS_NOMEDIA) { return -ENODEV; } #if STM32_SDMMC_USE_DMA err = stm32_sdmmc_dma_init(priv); if (err) { LOG_ERR("DMA init failed"); return err; } #endif err = stm32_sdmmc_clock_enable(priv); if (err) { LOG_ERR("failed to init clocks"); return err; } err = reset_line_toggle_dt(&priv->reset); if (err) { LOG_ERR("failed to reset peripheral"); return err; } #ifdef CONFIG_SDMMC_STM32_EMMC err = HAL_MMC_Init(&priv->hsd); #else err = HAL_SD_Init(&priv->hsd); #endif if (err != HAL_OK) { LOG_ERR("failed to init stm32_sdmmc (ErrorCode 0x%X)", priv->hsd.ErrorCode); return -EIO; } #ifdef CONFIG_SDMMC_STM32_HWFC stm32_sdmmc_fc_enable(priv); #endif priv->status = DISK_STATUS_OK; return 0; } static int stm32_sdmmc_access_deinit(struct stm32_sdmmc_priv *priv) { #if defined(CONFIG_SDMMC_STM32_EMMC) HAL_MMC_DeInit(&priv->hsd); #else HAL_SD_DeInit(&priv->hsd); stm32_sdmmc_clock_disable(priv); #endif priv->status = DISK_STATUS_UNINIT; return 0; } static int stm32_sdmmc_access_status(struct disk_info *disk) { const struct device *dev = disk->dev; struct stm32_sdmmc_priv *priv = dev->data; return priv->status; } static int stm32_sdmmc_is_card_in_transfer(HandleTypeDef *hsd) { #ifdef CONFIG_SDMMC_STM32_EMMC return HAL_MMC_GetCardState(hsd) == HAL_MMC_CARD_TRANSFER; #else return HAL_SD_GetCardState(hsd) == HAL_SD_CARD_TRANSFER; #endif } static int stm32_sdmmc_read_blocks(HandleTypeDef *hsd, uint8_t *data_buf, uint32_t start_sector, uint32_t num_sector) { #if STM32_SDMMC_USE_DMA || IS_ENABLED(DT_PROP(DT_DRV_INST(0), idma)) #ifdef CONFIG_SDMMC_STM32_EMMC return HAL_MMC_ReadBlocks_DMA(hsd, data_buf, start_sector, num_sector); #else return HAL_SD_ReadBlocks_DMA(hsd, data_buf, start_sector, num_sector); #endif #else #ifdef CONFIG_SDMMC_STM32_EMMC return HAL_MMC_ReadBlocks_IT(hsd, data_buf, start_sector, num_sector); #else return HAL_SD_ReadBlocks_IT(hsd, data_buf, start_sector, num_sector); #endif #endif } static int stm32_sdmmc_access_read(struct disk_info *disk, uint8_t *data_buf, uint32_t start_sector, uint32_t num_sector) { const struct device *dev = disk->dev; struct stm32_sdmmc_priv *priv = dev->data; int err; k_sem_take(&priv->thread_lock, K_FOREVER); err = stm32_sdmmc_read_blocks(&priv->hsd, data_buf, start_sector, num_sector); if (err != HAL_OK) { LOG_ERR("sd read block failed %d", err); err = -EIO; goto end; } k_sem_take(&priv->sync, K_FOREVER); if (priv->status != DISK_STATUS_OK) { LOG_ERR("sd read error %d", priv->status); err = -EIO; goto end; } while (!stm32_sdmmc_is_card_in_transfer(&priv->hsd)) { } end: k_sem_give(&priv->thread_lock); return err; } static int stm32_sdmmc_write_blocks(HandleTypeDef *hsd, uint8_t *data_buf, uint32_t start_sector, uint32_t num_sector) { #if STM32_SDMMC_USE_DMA || IS_ENABLED(DT_PROP(DT_DRV_INST(0), idma)) #ifdef CONFIG_SDMMC_STM32_EMMC return HAL_MMC_WriteBlocks_DMA(hsd, data_buf, start_sector, num_sector); #else return HAL_SD_WriteBlocks_DMA(hsd, data_buf, start_sector, num_sector); #endif #else #ifdef CONFIG_SDMMC_STM32_EMMC return HAL_MMC_WriteBlocks_IT(hsd, data_buf, start_sector, num_sector); #else return HAL_SD_WriteBlocks_IT(hsd, data_buf, start_sector, num_sector); #endif #endif } static int stm32_sdmmc_access_write(struct disk_info *disk, const uint8_t *data_buf, uint32_t start_sector, uint32_t num_sector) { const struct device *dev = disk->dev; struct stm32_sdmmc_priv *priv = dev->data; int err; k_sem_take(&priv->thread_lock, K_FOREVER); err = stm32_sdmmc_write_blocks(&priv->hsd, (uint8_t *)data_buf, start_sector, num_sector); if (err != HAL_OK) { LOG_ERR("sd write block failed %d", err); err = -EIO; goto end; } k_sem_take(&priv->sync, K_FOREVER); if (priv->status != DISK_STATUS_OK) { LOG_ERR("sd write error %d", priv->status); err = -EIO; goto end; } while (!stm32_sdmmc_is_card_in_transfer(&priv->hsd)) { } end: k_sem_give(&priv->thread_lock); return err; } static int stm32_sdmmc_get_card_info(HandleTypeDef *hsd, CardInfoTypeDef *info) { #ifdef CONFIG_SDMMC_STM32_EMMC return HAL_MMC_GetCardInfo(hsd, info); #else return HAL_SD_GetCardInfo(hsd, info); #endif } static int stm32_sdmmc_access_ioctl(struct disk_info *disk, uint8_t cmd, void *buff) { const struct device *dev = disk->dev; struct stm32_sdmmc_priv *priv = dev->data; CardInfoTypeDef info; int err; switch (cmd) { case DISK_IOCTL_GET_SECTOR_COUNT: err = stm32_sdmmc_get_card_info(&priv->hsd, &info); if (err != HAL_OK) { return -EIO; } *(uint32_t *)buff = info.LogBlockNbr; break; case DISK_IOCTL_GET_SECTOR_SIZE: err = stm32_sdmmc_get_card_info(&priv->hsd, &info); if (err != HAL_OK) { return -EIO; } *(uint32_t *)buff = info.LogBlockSize; break; case DISK_IOCTL_GET_ERASE_BLOCK_SZ: *(uint32_t *)buff = 1; break; case DISK_IOCTL_CTRL_SYNC: /* we use a blocking API, so nothing to do for sync */ break; case DISK_IOCTL_CTRL_INIT: return stm32_sdmmc_access_init(disk); case DISK_IOCTL_CTRL_DEINIT: return stm32_sdmmc_access_deinit(priv); default: return -EINVAL; } return 0; } static const struct disk_operations stm32_sdmmc_ops = { .init = stm32_sdmmc_access_init, .status = stm32_sdmmc_access_status, .read = stm32_sdmmc_access_read, .write = stm32_sdmmc_access_write, .ioctl = stm32_sdmmc_access_ioctl, }; static struct disk_info stm32_sdmmc_info = { .name = CONFIG_SDMMC_VOLUME_NAME, .ops = &stm32_sdmmc_ops, }; #ifdef CONFIG_SDMMC_STM32_EMMC static bool stm32_sdmmc_card_present(struct stm32_sdmmc_priv *priv) { return true; } #else /* CONFIG_SDMMC_STM32_EMMC */ /* * Check if the card is present or not. If no card detect gpio is set, assume * the card is present. If reading the gpio fails for some reason, assume the * card is there. */ static bool stm32_sdmmc_card_present(struct stm32_sdmmc_priv *priv) { int err; if (!priv->cd.port) { return true; } err = gpio_pin_get_dt(&priv->cd); if (err < 0) { LOG_WRN("reading card detect failed %d", err); return true; } return err; } static void stm32_sdmmc_cd_handler(struct k_work *item) { struct stm32_sdmmc_priv *priv = CONTAINER_OF(item, struct stm32_sdmmc_priv, work); if (stm32_sdmmc_card_present(priv)) { LOG_DBG("card inserted"); priv->status = DISK_STATUS_UNINIT; } else { LOG_DBG("card removed"); stm32_sdmmc_access_deinit(priv); priv->status = DISK_STATUS_NOMEDIA; } } static void stm32_sdmmc_cd_callback(const struct device *gpiodev, struct gpio_callback *cb, uint32_t pin) { struct stm32_sdmmc_priv *priv = CONTAINER_OF(cb, struct stm32_sdmmc_priv, cd_cb); k_work_submit(&priv->work); } static int stm32_sdmmc_card_detect_init(struct stm32_sdmmc_priv *priv) { int err; if (!priv->cd.port) { return 0; } if (!gpio_is_ready_dt(&priv->cd)) { return -ENODEV; } gpio_init_callback(&priv->cd_cb, stm32_sdmmc_cd_callback, 1 << priv->cd.pin); err = gpio_add_callback(priv->cd.port, &priv->cd_cb); if (err) { return err; } err = gpio_pin_configure_dt(&priv->cd, GPIO_INPUT); if (err) { goto remove_callback; } err = gpio_pin_interrupt_configure_dt(&priv->cd, GPIO_INT_EDGE_BOTH); if (err) { goto unconfigure_pin; } return 0; unconfigure_pin: gpio_pin_configure_dt(&priv->cd, GPIO_DISCONNECTED); remove_callback: gpio_remove_callback(priv->cd.port, &priv->cd_cb); return err; } static int stm32_sdmmc_card_detect_uninit(struct stm32_sdmmc_priv *priv) { if (!priv->cd.port) { return 0; } gpio_pin_interrupt_configure_dt(&priv->cd, GPIO_INT_MODE_DISABLED); gpio_pin_configure_dt(&priv->cd, GPIO_DISCONNECTED); gpio_remove_callback(priv->cd.port, &priv->cd_cb); return 0; } #endif /* !CONFIG_SDMMC_STM32_EMMC */ static int stm32_sdmmc_pwr_init(struct stm32_sdmmc_priv *priv) { int err; if (!priv->pe.port) { return 0; } if (!gpio_is_ready_dt(&priv->pe)) { return -ENODEV; } err = gpio_pin_configure_dt(&priv->pe, GPIO_OUTPUT_ACTIVE); if (err) { return err; } k_sleep(K_MSEC(50)); return 0; } static int stm32_sdmmc_pwr_uninit(struct stm32_sdmmc_priv *priv) { if (!priv->pe.port) { return 0; } gpio_pin_configure_dt(&priv->pe, GPIO_DISCONNECTED); return 0; } static int disk_stm32_sdmmc_init(const struct device *dev) { struct stm32_sdmmc_priv *priv = dev->data; const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); int err; if (!device_is_ready(clk)) { LOG_ERR("clock control device not ready"); return -ENODEV; } if (!device_is_ready(priv->reset.dev)) { LOG_ERR("reset control device not ready"); return -ENODEV; } /* Configure dt provided device signals when available */ err = pinctrl_apply_state(priv->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } priv->irq_config(dev); /* Initialize semaphores */ k_sem_init(&priv->thread_lock, 1, 1); k_sem_init(&priv->sync, 0, 1); #if !defined(CONFIG_SDMMC_STM32_EMMC) k_work_init(&priv->work, stm32_sdmmc_cd_handler); err = stm32_sdmmc_card_detect_init(priv); if (err) { return err; } #endif err = stm32_sdmmc_pwr_init(priv); if (err) { goto err_card_detect; } if (stm32_sdmmc_card_present(priv)) { priv->status = DISK_STATUS_UNINIT; } else { priv->status = DISK_STATUS_NOMEDIA; } stm32_sdmmc_info.dev = dev; err = disk_access_register(&stm32_sdmmc_info); if (err) { goto err_pwr; } return 0; err_pwr: stm32_sdmmc_pwr_uninit(priv); err_card_detect: #if !defined(CONFIG_SDMMC_STM32_EMMC) stm32_sdmmc_card_detect_uninit(priv); #endif return err; } #if DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) #if STM32_SDMMC_USE_DMA #define SDMMC_DMA_CHANNEL_INIT(dir, dir_cap) \ .dev = DEVICE_DT_GET(STM32_DMA_CTLR(0, dir)), \ .channel = DT_INST_DMAS_CELL_BY_NAME(0, dir, channel), \ .channel_nb = DT_DMAS_CELL_BY_NAME( \ DT_DRV_INST(0), dir, channel), \ .reg = (DMA_TypeDef *)DT_REG_ADDR( \ DT_PHANDLE_BY_NAME(DT_DRV_INST(0), dmas, dir)), \ .cfg = { \ .dma_slot = STM32_DMA_SLOT(0, dir, slot), \ .channel_priority = STM32_DMA_CONFIG_PRIORITY( \ STM32_DMA_CHANNEL_CONFIG(0, dir)), \ .dma_callback = stm32_sdmmc_dma_cb, \ .linked_channel = STM32_DMA_HAL_OVERRIDE, \ }, \ #define SDMMC_DMA_CHANNEL(dir, DIR) \ .dma_##dir = { \ COND_CODE_1(DT_INST_DMAS_HAS_NAME(0, dir), \ (SDMMC_DMA_CHANNEL_INIT(dir, DIR)), \ (NULL)) \ }, #else #define SDMMC_DMA_CHANNEL(dir, DIR) #endif PINCTRL_DT_INST_DEFINE(0); static void stm32_sdmmc_irq_config_func(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), stm32_sdmmc_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); } #if DT_INST_PROP(0, bus_width) == 1 #define SDMMC_BUS_WIDTH SDMMC_BUS_WIDE_1B #elif DT_INST_PROP(0, bus_width) == 4 #define SDMMC_BUS_WIDTH SDMMC_BUS_WIDE_4B #elif DT_INST_PROP(0, bus_width) == 8 #define SDMMC_BUS_WIDTH SDMMC_BUS_WIDE_8B #endif /* DT_INST_PROP(0, bus_width) */ static struct stm32_pclken pclken_sdmmc[] = STM32_DT_INST_CLOCKS(0); static struct stm32_sdmmc_priv stm32_sdmmc_priv_1 = { .irq_config = stm32_sdmmc_irq_config_func, .hsd = { .Instance = (MMC_TypeDef *)DT_INST_REG_ADDR(0), .Init.BusWide = SDMMC_BUS_WIDTH, #if DT_INST_NODE_HAS_PROP(0, clk_div) .Init.ClockDiv = DT_INST_PROP(0, clk_div), #endif }, #if DT_INST_NODE_HAS_PROP(0, cd_gpios) .cd = GPIO_DT_SPEC_INST_GET(0, cd_gpios), #endif #if DT_INST_NODE_HAS_PROP(0, pwr_gpios) .pe = GPIO_DT_SPEC_INST_GET(0, pwr_gpios), #endif .pclken = pclken_sdmmc, .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), .reset = RESET_DT_SPEC_INST_GET(0), SDMMC_DMA_CHANNEL(rx, RX) SDMMC_DMA_CHANNEL(tx, TX) }; DEVICE_DT_INST_DEFINE(0, disk_stm32_sdmmc_init, NULL, &stm32_sdmmc_priv_1, NULL, POST_KERNEL, CONFIG_SD_INIT_PRIORITY, NULL); #endif ```
/content/code_sandbox/drivers/disk/sdmmc_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,364
```c /* * * Derived from FreeBSD original driver made by Jim Harris */ #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(nvme, CONFIG_NVME_LOG_LEVEL); #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include <stdio.h> #include "nvme.h" uint32_t nvme_namespace_get_sector_size(struct nvme_namespace *ns) { uint8_t flbas_fmt, lbads; flbas_fmt = (ns->data.flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) & NVME_NS_DATA_FLBAS_FORMAT_MASK; lbads = (ns->data.lbaf[flbas_fmt] >> NVME_NS_DATA_LBAF_LBADS_SHIFT) & NVME_NS_DATA_LBAF_LBADS_MASK; return 1 << lbads; } uint64_t nvme_namespace_get_num_sectors(struct nvme_namespace *ns) { return ns->data.nsze; } uint64_t nvme_namespace_get_size(struct nvme_namespace *ns) { return nvme_namespace_get_num_sectors(ns) * nvme_namespace_get_sector_size(ns); } uint32_t nvme_namespace_get_flags(struct nvme_namespace *ns) { return ns->flags; } const char *nvme_namespace_get_serial_number(struct nvme_namespace *ns) { return (const char *)ns->ctrlr->cdata.sn; } const char *nvme_namespace_get_model_number(struct nvme_namespace *ns) { return (const char *)ns->ctrlr->cdata.mn; } const struct nvme_namespace_data * nvme_namespace_get_data(struct nvme_namespace *ns) { return &ns->data; } uint32_t nvme_namespace_get_stripesize(struct nvme_namespace *ns) { if (((ns->data.nsfeat >> NVME_NS_DATA_NSFEAT_NPVALID_SHIFT) & NVME_NS_DATA_NSFEAT_NPVALID_MASK) != 0) { uint32_t ss = nvme_namespace_get_sector_size(ns); if (ns->data.npwa != 0) { return (ns->data.npwa + 1) * ss; } else if (ns->data.npwg != 0) { return (ns->data.npwg + 1) * ss; } } return ns->boundary; } int nvme_namespace_construct(struct nvme_namespace *ns, uint32_t id, struct nvme_controller *ctrlr) { struct nvme_completion_poll_status status = NVME_CPL_STATUS_POLL_INIT(status); uint8_t flbas_fmt; uint8_t vwc_present; ns->ctrlr = ctrlr; ns->id = id; nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data, nvme_completion_poll_cb, &status); nvme_completion_poll(&status); if (nvme_cpl_status_is_error(&status)) { LOG_DBG("Identifying NS id %d failed", id); return -EIO; } nvme_namespace_data_swapbytes(&ns->data); if (nvme_namespace_get_num_sectors(ns) == 0) { LOG_DBG("Namespace %d not present", id); return -ENODEV; } flbas_fmt = (ns->data.flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) & NVME_NS_DATA_FLBAS_FORMAT_MASK; /* Note: format is a 0-based value, so > is appropriate here not >=. */ if (flbas_fmt > ns->data.nlbaf) { LOG_DBG("NS id %d: lba format %d exceeds number supported (%d)", id, flbas_fmt, ns->data.nlbaf + 1); return -EIO; } ns->boundary = ns->data.noiob * nvme_namespace_get_sector_size(ns); if (nvme_controller_has_dataset_mgmt(ctrlr)) { ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED; } vwc_present = (ctrlr->cdata.vwc >> NVME_CTRLR_DATA_VWC_PRESENT_SHIFT) & NVME_CTRLR_DATA_VWC_PRESENT_MASK; if (vwc_present) { ns->flags |= NVME_NS_FLUSH_SUPPORTED; } snprintf(ns->name, NVME_NAMESPACE_NAME_MAX_LENGTH, "nvme%dn%d", ctrlr->id, ns->id-1); if (nvme_namespace_disk_setup(ns, &ns->disk) != 0) { LOG_ERR("Could not register no disk subsystem"); } return 0; } ```
/content/code_sandbox/drivers/disk/nvme/nvme_namespace.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
984
```objective-c /* */ #ifndef ZEPHYR_DRIVERS_DISK_NVME_NHME_HELPERS_H_ #define ZEPHYR_DRIVERS_DISK_NVME_NHME_HELPERS_H_ #define NVME_GONE 0xfffffffful /* * Macros to deal with NVME revisions, as defined VS register */ #define NVME_REV(x, y) (((x) << 16) | ((y) << 8)) #define NVME_MAJOR(r) (((r) >> 16) & 0xffff) #define NVME_MINOR(r) (((r) >> 8) & 0xff) /* * Use to mark a command to apply to all namespaces, or to retrieve global * log pages. */ #define NVME_GLOBAL_NAMESPACE_TAG ((uint32_t)0xFFFFFFFF) /* Many items are expressed in terms of power of two times MPS */ #define NVME_MPS_SHIFT 12 /* Register field definitions */ #define NVME_CAP_LO_REG_MQES_SHIFT (0) #define NVME_CAP_LO_REG_MQES_MASK (0xFFFF) #define NVME_CAP_LO_REG_CQR_SHIFT (16) #define NVME_CAP_LO_REG_CQR_MASK (0x1) #define NVME_CAP_LO_REG_AMS_SHIFT (17) #define NVME_CAP_LO_REG_AMS_MASK (0x3) #define NVME_CAP_LO_REG_TO_SHIFT (24) #define NVME_CAP_LO_REG_TO_MASK (0xFF) #define NVME_CAP_LO_MQES(x) \ (((x) >> NVME_CAP_LO_REG_MQES_SHIFT) & NVME_CAP_LO_REG_MQES_MASK) #define NVME_CAP_LO_CQR(x) \ (((x) >> NVME_CAP_LO_REG_CQR_SHIFT) & NVME_CAP_LO_REG_CQR_MASK) #define NVME_CAP_LO_AMS(x) \ (((x) >> NVME_CAP_LO_REG_AMS_SHIFT) & NVME_CAP_LO_REG_AMS_MASK) #define NVME_CAP_LO_TO(x) \ (((x) >> NVME_CAP_LO_REG_TO_SHIFT) & NVME_CAP_LO_REG_TO_MASK) #define NVME_CAP_HI_REG_DSTRD_SHIFT (0) #define NVME_CAP_HI_REG_DSTRD_MASK (0xF) #define NVME_CAP_HI_REG_NSSRS_SHIFT (4) #define NVME_CAP_HI_REG_NSSRS_MASK (0x1) #define NVME_CAP_HI_REG_CSS_SHIFT (5) #define NVME_CAP_HI_REG_CSS_MASK (0xff) #define NVME_CAP_HI_REG_CSS_NVM_SHIFT (5) #define NVME_CAP_HI_REG_CSS_NVM_MASK (0x1) #define NVME_CAP_HI_REG_BPS_SHIFT (13) #define NVME_CAP_HI_REG_BPS_MASK (0x1) #define NVME_CAP_HI_REG_MPSMIN_SHIFT (16) #define NVME_CAP_HI_REG_MPSMIN_MASK (0xF) #define NVME_CAP_HI_REG_MPSMAX_SHIFT (20) #define NVME_CAP_HI_REG_MPSMAX_MASK (0xF) #define NVME_CAP_HI_REG_PMRS_SHIFT (24) #define NVME_CAP_HI_REG_PMRS_MASK (0x1) #define NVME_CAP_HI_REG_CMBS_SHIFT (25) #define NVME_CAP_HI_REG_CMBS_MASK (0x1) #define NVME_CAP_HI_DSTRD(x) \ (((x) >> NVME_CAP_HI_REG_DSTRD_SHIFT) & NVME_CAP_HI_REG_DSTRD_MASK) #define NVME_CAP_HI_NSSRS(x) \ (((x) >> NVME_CAP_HI_REG_NSSRS_SHIFT) & NVME_CAP_HI_REG_NSSRS_MASK) #define NVME_CAP_HI_CSS(x) \ (((x) >> NVME_CAP_HI_REG_CSS_SHIFT) & NVME_CAP_HI_REG_CSS_MASK) #define NVME_CAP_HI_CSS_NVM(x) \ (((x) >> NVME_CAP_HI_REG_CSS_NVM_SHIFT) & NVME_CAP_HI_REG_CSS_NVM_MASK) #define NVME_CAP_HI_BPS(x) \ (((x) >> NVME_CAP_HI_REG_BPS_SHIFT) & NVME_CAP_HI_REG_BPS_MASK) #define NVME_CAP_HI_MPSMIN(x) \ (((x) >> NVME_CAP_HI_REG_MPSMIN_SHIFT) & NVME_CAP_HI_REG_MPSMIN_MASK) #define NVME_CAP_HI_MPSMAX(x) \ (((x) >> NVME_CAP_HI_REG_MPSMAX_SHIFT) & NVME_CAP_HI_REG_MPSMAX_MASK) #define NVME_CAP_HI_PMRS(x) \ (((x) >> NVME_CAP_HI_REG_PMRS_SHIFT) & NVME_CAP_HI_REG_PMRS_MASK) #define NVME_CAP_HI_CMBS(x) \ (((x) >> NVME_CAP_HI_REG_CMBS_SHIFT) & NVME_CAP_HI_REG_CMBS_MASK) #define NVME_CC_REG_EN_SHIFT (0) #define NVME_CC_REG_EN_MASK (0x1) #define NVME_CC_REG_CSS_SHIFT (4) #define NVME_CC_REG_CSS_MASK (0x7) #define NVME_CC_REG_MPS_SHIFT (7) #define NVME_CC_REG_MPS_MASK (0xF) #define NVME_CC_REG_AMS_SHIFT (11) #define NVME_CC_REG_AMS_MASK (0x7) #define NVME_CC_REG_SHN_SHIFT (14) #define NVME_CC_REG_SHN_MASK (0x3) #define NVME_CC_REG_IOSQES_SHIFT (16) #define NVME_CC_REG_IOSQES_MASK (0xF) #define NVME_CC_REG_IOCQES_SHIFT (20) #define NVME_CC_REG_IOCQES_MASK (0xF) #define NVME_CSTS_REG_RDY_SHIFT (0) #define NVME_CSTS_REG_RDY_MASK (0x1) #define NVME_CSTS_REG_CFS_SHIFT (1) #define NVME_CSTS_REG_CFS_MASK (0x1) #define NVME_CSTS_REG_SHST_SHIFT (2) #define NVME_CSTS_REG_SHST_MASK (0x3) #define NVME_CSTS_REG_NVSRO_SHIFT (4) #define NVME_CSTS_REG_NVSRO_MASK (0x1) #define NVME_CSTS_REG_PP_SHIFT (5) #define NVME_CSTS_REG_PP_MASK (0x1) #define NVME_CSTS_GET_SHST(csts) \ (((csts) >> NVME_CSTS_REG_SHST_SHIFT) & NVME_CSTS_REG_SHST_MASK) #define NVME_AQA_REG_ASQS_SHIFT (0) #define NVME_AQA_REG_ASQS_MASK (0xFFF) #define NVME_AQA_REG_ACQS_SHIFT (16) #define NVME_AQA_REG_ACQS_MASK (0xFFF) #define NVME_PMRCAP_REG_RDS_SHIFT (3) #define NVME_PMRCAP_REG_RDS_MASK (0x1) #define NVME_PMRCAP_REG_WDS_SHIFT (4) #define NVME_PMRCAP_REG_WDS_MASK (0x1) #define NVME_PMRCAP_REG_BIR_SHIFT (5) #define NVME_PMRCAP_REG_BIR_MASK (0x7) #define NVME_PMRCAP_REG_PMRTU_SHIFT (8) #define NVME_PMRCAP_REG_PMRTU_MASK (0x3) #define NVME_PMRCAP_REG_PMRWBM_SHIFT (10) #define NVME_PMRCAP_REG_PMRWBM_MASK (0xf) #define NVME_PMRCAP_REG_PMRTO_SHIFT (16) #define NVME_PMRCAP_REG_PMRTO_MASK (0xff) #define NVME_PMRCAP_REG_CMSS_SHIFT (24) #define NVME_PMRCAP_REG_CMSS_MASK (0x1) #define NVME_PMRCAP_RDS(x) \ (((x) >> NVME_PMRCAP_REG_RDS_SHIFT) & NVME_PMRCAP_REG_RDS_MASK) #define NVME_PMRCAP_WDS(x) \ (((x) >> NVME_PMRCAP_REG_WDS_SHIFT) & NVME_PMRCAP_REG_WDS_MASK) #define NVME_PMRCAP_BIR(x) \ (((x) >> NVME_PMRCAP_REG_BIR_SHIFT) & NVME_PMRCAP_REG_BIR_MASK) #define NVME_PMRCAP_PMRTU(x) \ (((x) >> NVME_PMRCAP_REG_PMRTU_SHIFT) & NVME_PMRCAP_REG_PMRTU_MASK) #define NVME_PMRCAP_PMRWBM(x) \ (((x) >> NVME_PMRCAP_REG_PMRWBM_SHIFT) & NVME_PMRCAP_REG_PMRWBM_MASK) #define NVME_PMRCAP_PMRTO(x) \ (((x) >> NVME_PMRCAP_REG_PMRTO_SHIFT) & NVME_PMRCAP_REG_PMRTO_MASK) #define NVME_PMRCAP_CMSS(x) \ (((x) >> NVME_PMRCAP_REG_CMSS_SHIFT) & NVME_PMRCAP_REG_CMSS_MASK) /* Command field definitions */ #define NVME_CMD_FUSE_SHIFT (8) #define NVME_CMD_FUSE_MASK (0x3) #define NVME_STATUS_P_SHIFT (0) #define NVME_STATUS_P_MASK (0x1) #define NVME_STATUS_SC_SHIFT (1) #define NVME_STATUS_SC_MASK (0xFF) #define NVME_STATUS_SCT_SHIFT (9) #define NVME_STATUS_SCT_MASK (0x7) #define NVME_STATUS_CRD_SHIFT (12) #define NVME_STATUS_CRD_MASK (0x3) #define NVME_STATUS_M_SHIFT (14) #define NVME_STATUS_M_MASK (0x1) #define NVME_STATUS_DNR_SHIFT (15) #define NVME_STATUS_DNR_MASK (0x1) #define NVME_STATUS_GET_P(st) \ (((st) >> NVME_STATUS_P_SHIFT) & NVME_STATUS_P_MASK) #define NVME_STATUS_GET_SC(st) \ (((st) >> NVME_STATUS_SC_SHIFT) & NVME_STATUS_SC_MASK) #define NVME_STATUS_GET_SCT(st) \ (((st) >> NVME_STATUS_SCT_SHIFT) & NVME_STATUS_SCT_MASK) #define NVME_STATUS_GET_CRD(st) \ (((st) >> NVME_STATUS_CRD_SHIFT) & NVME_STATUS_CRD_MASK) #define NVME_STATUS_GET_M(st) \ (((st) >> NVME_STATUS_M_SHIFT) & NVME_STATUS_M_MASK) #define NVME_STATUS_GET_DNR(st) \ (((st) >> NVME_STATUS_DNR_SHIFT) & NVME_STATUS_DNR_MASK) /** Controller Multi-path I/O and Namespace Sharing Capabilities */ /* More then one port */ #define NVME_CTRLR_DATA_MIC_MPORTS_SHIFT (0) #define NVME_CTRLR_DATA_MIC_MPORTS_MASK (0x1) /* More then one controller */ #define NVME_CTRLR_DATA_MIC_MCTRLRS_SHIFT (1) #define NVME_CTRLR_DATA_MIC_MCTRLRS_MASK (0x1) /* SR-IOV Virtual Function */ #define NVME_CTRLR_DATA_MIC_SRIOVVF_SHIFT (2) #define NVME_CTRLR_DATA_MIC_SRIOVVF_MASK (0x1) /* Asymmetric Namespace Access Reporting */ #define NVME_CTRLR_DATA_MIC_ANAR_SHIFT (3) #define NVME_CTRLR_DATA_MIC_ANAR_MASK (0x1) /** OAES - Optional Asynchronous Events Supported */ /* supports Namespace Attribute Notices event */ #define NVME_CTRLR_DATA_OAES_NS_ATTR_SHIFT (8) #define NVME_CTRLR_DATA_OAES_NS_ATTR_MASK (0x1) /* supports Firmware Activation Notices event */ #define NVME_CTRLR_DATA_OAES_FW_ACTIVATE_SHIFT (9) #define NVME_CTRLR_DATA_OAES_FW_ACTIVATE_MASK (0x1) /* supports Asymmetric Namespace Access Change Notices event */ #define NVME_CTRLR_DATA_OAES_ASYM_NS_CHANGE_SHIFT (11) #define NVME_CTRLR_DATA_OAES_ASYM_NS_CHANGE_MASK (0x1) /* supports Predictable Latency Event Aggregate Log Change Notices event */ #define NVME_CTRLR_DATA_OAES_PREDICT_LATENCY_SHIFT (12) #define NVME_CTRLR_DATA_OAES_PREDICT_LATENCY_MASK (0x1) /* supports LBA Status Information Notices event */ #define NVME_CTRLR_DATA_OAES_LBA_STATUS_SHIFT (13) #define NVME_CTRLR_DATA_OAES_LBA_STATUS_MASK (0x1) /* supports Endurance Group Event Aggregate Log Page Changes Notices event */ #define NVME_CTRLR_DATA_OAES_ENDURANCE_GROUP_SHIFT (14) #define NVME_CTRLR_DATA_OAES_ENDURANCE_GROUP_MASK (0x1) /* supports Normal NVM Subsystem Shutdown event */ #define NVME_CTRLR_DATA_OAES_NORMAL_SHUTDOWN_SHIFT (15) #define NVME_CTRLR_DATA_OAES_NORMAL_SHUTDOWN_MASK (0x1) /* supports Zone Descriptor Changed Notices event */ #define NVME_CTRLR_DATA_OAES_ZONE_DESC_CHANGE_SHIFT (27) #define NVME_CTRLR_DATA_OAES_ZONE_DESC_CHANGE_MASK (0x1) /* supports Discovery Log Page Change Notification event */ #define NVME_CTRLR_DATA_OAES_LOG_PAGE_CHANGE_SHIFT (31) #define NVME_CTRLR_DATA_OAES_LOG_PAGE_CHANGE_MASK (0x1) /** OACS - optional admin command support */ /* supports security send/receive commands */ #define NVME_CTRLR_DATA_OACS_SECURITY_SHIFT (0) #define NVME_CTRLR_DATA_OACS_SECURITY_MASK (0x1) /* supports format nvm command */ #define NVME_CTRLR_DATA_OACS_FORMAT_SHIFT (1) #define NVME_CTRLR_DATA_OACS_FORMAT_MASK (0x1) /* supports firmware activate/download commands */ #define NVME_CTRLR_DATA_OACS_FIRMWARE_SHIFT (2) #define NVME_CTRLR_DATA_OACS_FIRMWARE_MASK (0x1) /* supports namespace management commands */ #define NVME_CTRLR_DATA_OACS_NSMGMT_SHIFT (3) #define NVME_CTRLR_DATA_OACS_NSMGMT_MASK (0x1) /* supports Device Self-test command */ #define NVME_CTRLR_DATA_OACS_SELFTEST_SHIFT (4) #define NVME_CTRLR_DATA_OACS_SELFTEST_MASK (0x1) /* supports Directives */ #define NVME_CTRLR_DATA_OACS_DIRECTIVES_SHIFT (5) #define NVME_CTRLR_DATA_OACS_DIRECTIVES_MASK (0x1) /* supports NVMe-MI Send/Receive */ #define NVME_CTRLR_DATA_OACS_NVMEMI_SHIFT (6) #define NVME_CTRLR_DATA_OACS_NVMEMI_MASK (0x1) /* supports Virtualization Management */ #define NVME_CTRLR_DATA_OACS_VM_SHIFT (7) #define NVME_CTRLR_DATA_OACS_VM_MASK (0x1) /* supports Doorbell Buffer Config */ #define NVME_CTRLR_DATA_OACS_DBBUFFER_SHIFT (8) #define NVME_CTRLR_DATA_OACS_DBBUFFER_MASK (0x1) /* supports Get LBA Status */ #define NVME_CTRLR_DATA_OACS_GETLBA_SHIFT (9) #define NVME_CTRLR_DATA_OACS_GETLBA_MASK (0x1) /** firmware updates */ /* first slot is read-only */ #define NVME_CTRLR_DATA_FRMW_SLOT1_RO_SHIFT (0) #define NVME_CTRLR_DATA_FRMW_SLOT1_RO_MASK (0x1) /* number of firmware slots */ #define NVME_CTRLR_DATA_FRMW_NUM_SLOTS_SHIFT (1) #define NVME_CTRLR_DATA_FRMW_NUM_SLOTS_MASK (0x7) /* firmware activation without reset */ #define NVME_CTRLR_DATA_FRMW_ACT_WO_RESET_SHIFT (4) #define NVME_CTRLR_DATA_FRMW_ACT_WO_RESET_MASK (0x1) /** log page attributes */ /* per namespace smart/health log page */ #define NVME_CTRLR_DATA_LPA_NS_SMART_SHIFT (0) #define NVME_CTRLR_DATA_LPA_NS_SMART_MASK (0x1) /** AVSCC - admin vendor specific command configuration */ /* admin vendor specific commands use spec format */ #define NVME_CTRLR_DATA_AVSCC_SPEC_FORMAT_SHIFT (0) #define NVME_CTRLR_DATA_AVSCC_SPEC_FORMAT_MASK (0x1) /** Autonomous Power State Transition Attributes */ /* Autonomous Power State Transitions supported */ #define NVME_CTRLR_DATA_APSTA_APST_SUPP_SHIFT (0) #define NVME_CTRLR_DATA_APSTA_APST_SUPP_MASK (0x1) /** Sanitize Capabilities */ /* Crypto Erase Support */ #define NVME_CTRLR_DATA_SANICAP_CES_SHIFT (0) #define NVME_CTRLR_DATA_SANICAP_CES_MASK (0x1) /* Block Erase Support */ #define NVME_CTRLR_DATA_SANICAP_BES_SHIFT (1) #define NVME_CTRLR_DATA_SANICAP_BES_MASK (0x1) /* Overwrite Support */ #define NVME_CTRLR_DATA_SANICAP_OWS_SHIFT (2) #define NVME_CTRLR_DATA_SANICAP_OWS_MASK (0x1) /* No-Deallocate Inhibited */ #define NVME_CTRLR_DATA_SANICAP_NDI_SHIFT (29) #define NVME_CTRLR_DATA_SANICAP_NDI_MASK (0x1) /* No-Deallocate Modifies Media After Sanitize */ #define NVME_CTRLR_DATA_SANICAP_NODMMAS_SHIFT (30) #define NVME_CTRLR_DATA_SANICAP_NODMMAS_MASK (0x3) #define NVME_CTRLR_DATA_SANICAP_NODMMAS_UNDEF (0) #define NVME_CTRLR_DATA_SANICAP_NODMMAS_NO (1) #define NVME_CTRLR_DATA_SANICAP_NODMMAS_YES (2) /** submission queue entry size */ #define NVME_CTRLR_DATA_SQES_MIN_SHIFT (0) #define NVME_CTRLR_DATA_SQES_MIN_MASK (0xF) #define NVME_CTRLR_DATA_SQES_MAX_SHIFT (4) #define NVME_CTRLR_DATA_SQES_MAX_MASK (0xF) /** completion queue entry size */ #define NVME_CTRLR_DATA_CQES_MIN_SHIFT (0) #define NVME_CTRLR_DATA_CQES_MIN_MASK (0xF) #define NVME_CTRLR_DATA_CQES_MAX_SHIFT (4) #define NVME_CTRLR_DATA_CQES_MAX_MASK (0xF) /** optional nvm command support */ #define NVME_CTRLR_DATA_ONCS_COMPARE_SHIFT (0) #define NVME_CTRLR_DATA_ONCS_COMPARE_MASK (0x1) #define NVME_CTRLR_DATA_ONCS_WRITE_UNC_SHIFT (1) #define NVME_CTRLR_DATA_ONCS_WRITE_UNC_MASK (0x1) #define NVME_CTRLR_DATA_ONCS_DSM_SHIFT (2) #define NVME_CTRLR_DATA_ONCS_DSM_MASK (0x1) #define NVME_CTRLR_DATA_ONCS_WRZERO_SHIFT (3) #define NVME_CTRLR_DATA_ONCS_WRZERO_MASK (0x1) #define NVME_CTRLR_DATA_ONCS_SAVEFEAT_SHIFT (4) #define NVME_CTRLR_DATA_ONCS_SAVEFEAT_MASK (0x1) #define NVME_CTRLR_DATA_ONCS_RESERV_SHIFT (5) #define NVME_CTRLR_DATA_ONCS_RESERV_MASK (0x1) #define NVME_CTRLR_DATA_ONCS_TIMESTAMP_SHIFT (6) #define NVME_CTRLR_DATA_ONCS_TIMESTAMP_MASK (0x1) #define NVME_CTRLR_DATA_ONCS_VERIFY_SHIFT (7) #define NVME_CTRLR_DATA_ONCS_VERIFY_MASK (0x1) /** Fused Operation Support */ #define NVME_CTRLR_DATA_FUSES_CNW_SHIFT (0) #define NVME_CTRLR_DATA_FUSES_CNW_MASK (0x1) /** Format NVM Attributes */ #define NVME_CTRLR_DATA_FNA_FORMAT_ALL_SHIFT (0) #define NVME_CTRLR_DATA_FNA_FORMAT_ALL_MASK (0x1) #define NVME_CTRLR_DATA_FNA_ERASE_ALL_SHIFT (1) #define NVME_CTRLR_DATA_FNA_ERASE_ALL_MASK (0x1) #define NVME_CTRLR_DATA_FNA_CRYPTO_ERASE_SHIFT (2) #define NVME_CTRLR_DATA_FNA_CRYPTO_ERASE_MASK (0x1) /** volatile write cache */ /* volatile write cache present */ #define NVME_CTRLR_DATA_VWC_PRESENT_SHIFT (0) #define NVME_CTRLR_DATA_VWC_PRESENT_MASK (0x1) /* flush all namespaces supported */ #define NVME_CTRLR_DATA_VWC_ALL_SHIFT (1) #define NVME_CTRLR_DATA_VWC_ALL_MASK (0x3) #define NVME_CTRLR_DATA_VWC_ALL_UNKNOWN (0) #define NVME_CTRLR_DATA_VWC_ALL_NO (2) #define NVME_CTRLR_DATA_VWC_ALL_YES (3) /** namespace features */ /* thin provisioning */ #define NVME_NS_DATA_NSFEAT_THIN_PROV_SHIFT (0) #define NVME_NS_DATA_NSFEAT_THIN_PROV_MASK (0x1) /* NAWUN, NAWUPF, and NACWU fields are valid */ #define NVME_NS_DATA_NSFEAT_NA_FIELDS_SHIFT (1) #define NVME_NS_DATA_NSFEAT_NA_FIELDS_MASK (0x1) /* Deallocated or Unwritten Logical Block errors supported */ #define NVME_NS_DATA_NSFEAT_DEALLOC_SHIFT (2) #define NVME_NS_DATA_NSFEAT_DEALLOC_MASK (0x1) /* NGUID and EUI64 fields are not reusable */ #define NVME_NS_DATA_NSFEAT_NO_ID_REUSE_SHIFT (3) #define NVME_NS_DATA_NSFEAT_NO_ID_REUSE_MASK (0x1) /* NPWG, NPWA, NPDG, NPDA, and NOWS are valid */ #define NVME_NS_DATA_NSFEAT_NPVALID_SHIFT (4) #define NVME_NS_DATA_NSFEAT_NPVALID_MASK (0x1) /** formatted lba size */ #define NVME_NS_DATA_FLBAS_FORMAT_SHIFT (0) #define NVME_NS_DATA_FLBAS_FORMAT_MASK (0xF) #define NVME_NS_DATA_FLBAS_EXTENDED_SHIFT (4) #define NVME_NS_DATA_FLBAS_EXTENDED_MASK (0x1) /** metadata capabilities */ /* metadata can be transferred as part of data prp list */ #define NVME_NS_DATA_MC_EXTENDED_SHIFT (0) #define NVME_NS_DATA_MC_EXTENDED_MASK (0x1) /* metadata can be transferred with separate metadata pointer */ #define NVME_NS_DATA_MC_POINTER_SHIFT (1) #define NVME_NS_DATA_MC_POINTER_MASK (0x1) /** end-to-end data protection capabilities */ /* protection information type 1 */ #define NVME_NS_DATA_DPC_PIT1_SHIFT (0) #define NVME_NS_DATA_DPC_PIT1_MASK (0x1) /* protection information type 2 */ #define NVME_NS_DATA_DPC_PIT2_SHIFT (1) #define NVME_NS_DATA_DPC_PIT2_MASK (0x1) /* protection information type 3 */ #define NVME_NS_DATA_DPC_PIT3_SHIFT (2) #define NVME_NS_DATA_DPC_PIT3_MASK (0x1) /* first eight bytes of metadata */ #define NVME_NS_DATA_DPC_MD_START_SHIFT (3) #define NVME_NS_DATA_DPC_MD_START_MASK (0x1) /* last eight bytes of metadata */ #define NVME_NS_DATA_DPC_MD_END_SHIFT (4) #define NVME_NS_DATA_DPC_MD_END_MASK (0x1) /** end-to-end data protection type settings */ /* protection information type */ #define NVME_NS_DATA_DPS_PIT_SHIFT (0) #define NVME_NS_DATA_DPS_PIT_MASK (0x7) /* 1 == protection info transferred at start of metadata */ /* 0 == protection info transferred at end of metadata */ #define NVME_NS_DATA_DPS_MD_START_SHIFT (3) #define NVME_NS_DATA_DPS_MD_START_MASK (0x1) /** Namespace Multi-path I/O and Namespace Sharing Capabilities */ /* the namespace may be attached to two or more controllers */ #define NVME_NS_DATA_NMIC_MAY_BE_SHARED_SHIFT (0) #define NVME_NS_DATA_NMIC_MAY_BE_SHARED_MASK (0x1) /** Reservation Capabilities */ /* Persist Through Power Loss */ #define NVME_NS_DATA_RESCAP_PTPL_SHIFT (0) #define NVME_NS_DATA_RESCAP_PTPL_MASK (0x1) /* supports the Write Exclusive */ #define NVME_NS_DATA_RESCAP_WR_EX_SHIFT (1) #define NVME_NS_DATA_RESCAP_WR_EX_MASK (0x1) /* supports the Exclusive Access */ #define NVME_NS_DATA_RESCAP_EX_AC_SHIFT (2) #define NVME_NS_DATA_RESCAP_EX_AC_MASK (0x1) /* supports the Write Exclusive Registrants Only */ #define NVME_NS_DATA_RESCAP_WR_EX_RO_SHIFT (3) #define NVME_NS_DATA_RESCAP_WR_EX_RO_MASK (0x1) /* supports the Exclusive Access - Registrants Only */ #define NVME_NS_DATA_RESCAP_EX_AC_RO_SHIFT (4) #define NVME_NS_DATA_RESCAP_EX_AC_RO_MASK (0x1) /* supports the Write Exclusive All Registrants */ #define NVME_NS_DATA_RESCAP_WR_EX_AR_SHIFT (5) #define NVME_NS_DATA_RESCAP_WR_EX_AR_MASK (0x1) /* supports the Exclusive Access - All Registrants */ #define NVME_NS_DATA_RESCAP_EX_AC_AR_SHIFT (6) #define NVME_NS_DATA_RESCAP_EX_AC_AR_MASK (0x1) /* Ignore Existing Key is used as defined in revision 1.3 or later */ #define NVME_NS_DATA_RESCAP_IEKEY13_SHIFT (7) #define NVME_NS_DATA_RESCAP_IEKEY13_MASK (0x1) /** Format Progress Indicator */ /* percentage of the Format NVM command that remains to be completed */ #define NVME_NS_DATA_FPI_PERC_SHIFT (0) #define NVME_NS_DATA_FPI_PERC_MASK (0x7f) /* namespace supports the Format Progress Indicator */ #define NVME_NS_DATA_FPI_SUPP_SHIFT (7) #define NVME_NS_DATA_FPI_SUPP_MASK (0x1) /** Deallocate Logical Block Features */ /* deallocated logical block read behavior */ #define NVME_NS_DATA_DLFEAT_READ_SHIFT (0) #define NVME_NS_DATA_DLFEAT_READ_MASK (0x07) #define NVME_NS_DATA_DLFEAT_READ_NR (0x00) #define NVME_NS_DATA_DLFEAT_READ_00 (0x01) #define NVME_NS_DATA_DLFEAT_READ_FF (0x02) /* supports the Deallocate bit in the Write Zeroes */ #define NVME_NS_DATA_DLFEAT_DWZ_SHIFT (3) #define NVME_NS_DATA_DLFEAT_DWZ_MASK (0x01) /* Guard field for deallocated logical blocks is set to the CRC */ #define NVME_NS_DATA_DLFEAT_GCRC_SHIFT (4) #define NVME_NS_DATA_DLFEAT_GCRC_MASK (0x01) /** lba format support */ /* metadata size */ #define NVME_NS_DATA_LBAF_MS_SHIFT (0) #define NVME_NS_DATA_LBAF_MS_MASK (0xFFFF) /* lba data size */ #define NVME_NS_DATA_LBAF_LBADS_SHIFT (16) #define NVME_NS_DATA_LBAF_LBADS_MASK (0xFF) /* relative performance */ #define NVME_NS_DATA_LBAF_RP_SHIFT (24) #define NVME_NS_DATA_LBAF_RP_MASK (0x3) enum nvme_critical_warning_state { NVME_CRIT_WARN_ST_AVAILABLE_SPARE = 0x1, NVME_CRIT_WARN_ST_TEMPERATURE = 0x2, NVME_CRIT_WARN_ST_DEVICE_RELIABILITY = 0x4, NVME_CRIT_WARN_ST_READ_ONLY = 0x8, NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP = 0x10, }; #define NVME_CRIT_WARN_ST_RESERVED_MASK (0xE0) #define NVME_ASYNC_EVENT_NS_ATTRIBUTE (0x100) #define NVME_ASYNC_EVENT_FW_ACTIVATE (0x200) /* Helper macro to combine *_MASK and *_SHIFT defines */ #define NVMEB(name) (name##_MASK << name##_SHIFT) /* CC register SHN field values */ enum shn_value { NVME_SHN_NORMAL = 0x1, NVME_SHN_ABRUPT = 0x2, }; /* CSTS register SHST field values */ enum shst_value { NVME_SHST_NORMAL = 0x0, NVME_SHST_OCCURRING = 0x1, NVME_SHST_COMPLETE = 0x2, }; #define nvme_mmio_offsetof(reg) \ offsetof(struct nvme_registers, reg) #define nvme_mmio_read_4(b_a, reg) \ sys_read32((mm_reg_t)b_a + nvme_mmio_offsetof(reg)) #define nvme_mmio_write_4(b_a, reg, val) \ sys_write32(val, (mm_reg_t)b_a + nvme_mmio_offsetof(reg)) #define nvme_mmio_write_8(b_a, reg, val) \ do { \ sys_write32(val & 0xFFFFFFFF, \ (mm_reg_t)b_a + nvme_mmio_offsetof(reg)); \ sys_write32((val & 0xFFFFFFFF00000000ULL) >> 32, \ (mm_reg_t)b_a + nvme_mmio_offsetof(reg) + 4); \ } while (0) #define NVME_IS_BUFFER_DWORD_ALIGNED(_buf_addr) (!((uintptr_t)_buf_addr & 0x3)) #endif /* ZEPHYR_DRIVERS_DISK_NVME_NHME_HELPERS_H_ */ ```
/content/code_sandbox/drivers/disk/nvme/nvme_helpers.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,674
```c /* */ #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(nvme, CONFIG_NVME_LOG_LEVEL); #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include "nvme.h" static int nvme_disk_status(struct disk_info *disk) { return 0; } static int nvme_disk_read(struct disk_info *disk, uint8_t *data_buf, uint32_t start_sector, uint32_t num_sector) { struct nvme_namespace *ns = CONTAINER_OF(disk->name, struct nvme_namespace, name[0]); struct nvme_completion_poll_status status = NVME_CPL_STATUS_POLL_INIT(status); struct nvme_request *request; uint32_t payload_size; int ret = 0; if (!NVME_IS_BUFFER_DWORD_ALIGNED(data_buf)) { LOG_WRN("Data buffer pointer needs to be 4-bytes aligned"); return -EINVAL; } nvme_lock(disk->dev); payload_size = num_sector * nvme_namespace_get_sector_size(ns); request = nvme_allocate_request_vaddr((void *)data_buf, payload_size, nvme_completion_poll_cb, &status); if (request == NULL) { ret = -ENOMEM; goto out; } nvme_namespace_read_cmd(&request->cmd, ns->id, start_sector, num_sector); /* We use only the first ioq atm * ToDo: use smp cpu id and use it to select ioq */ nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request); nvme_completion_poll(&status); if (nvme_cpl_status_is_error(&status)) { LOG_WRN("Reading at sector %u (count %d) on disk %s failed", start_sector, num_sector, ns->name); nvme_completion_print(&status.cpl); ret = -EIO; } out: nvme_unlock(disk->dev); return ret; } static int nvme_disk_write(struct disk_info *disk, const uint8_t *data_buf, uint32_t start_sector, uint32_t num_sector) { struct nvme_namespace *ns = CONTAINER_OF(disk->name, struct nvme_namespace, name[0]); struct nvme_completion_poll_status status = NVME_CPL_STATUS_POLL_INIT(status); struct nvme_request *request; uint32_t payload_size; int ret = 0; if (!NVME_IS_BUFFER_DWORD_ALIGNED(data_buf)) { LOG_WRN("Data buffer pointer needs to be 4-bytes aligned"); return -EINVAL; } nvme_lock(disk->dev); payload_size = num_sector * nvme_namespace_get_sector_size(ns); request = nvme_allocate_request_vaddr((void *)data_buf, payload_size, nvme_completion_poll_cb, &status); if (request == NULL) { ret = -ENOMEM; goto out; } nvme_namespace_write_cmd(&request->cmd, ns->id, start_sector, num_sector); /* We use only the first ioq atm * ToDo: use smp cpu id and use it to select ioq */ nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request); nvme_completion_poll(&status); if (nvme_cpl_status_is_error(&status)) { LOG_WRN("Writing at sector %u (count %d) on disk %s failed", start_sector, num_sector, ns->name); nvme_completion_print(&status.cpl); ret = -EIO; } out: nvme_unlock(disk->dev); return ret; } static int nvme_disk_flush(struct nvme_namespace *ns) { struct nvme_completion_poll_status status = NVME_CPL_STATUS_POLL_INIT(status); struct nvme_request *request; request = nvme_allocate_request_null(nvme_completion_poll_cb, &status); if (request == NULL) { return -ENOMEM; } nvme_namespace_flush_cmd(&request->cmd, ns->id); /* We use only the first ioq * ToDo: use smp cpu id and use it to select ioq */ nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request); nvme_completion_poll(&status); if (nvme_cpl_status_is_error(&status)) { LOG_ERR("Flushing disk %s failed", ns->name); nvme_completion_print(&status.cpl); return -EIO; } return 0; } static int nvme_disk_ioctl(struct disk_info *disk, uint8_t cmd, void *buff) { struct nvme_namespace *ns = CONTAINER_OF(disk->name, struct nvme_namespace, name[0]); int ret = 0; nvme_lock(disk->dev); switch (cmd) { case DISK_IOCTL_GET_SECTOR_COUNT: if (!buff) { ret = -EINVAL; break; } *(uint32_t *)buff = nvme_namespace_get_num_sectors(ns); break; case DISK_IOCTL_GET_SECTOR_SIZE: if (!buff) { ret = -EINVAL; break; } *(uint32_t *)buff = nvme_namespace_get_sector_size(ns); break; case DISK_IOCTL_GET_ERASE_BLOCK_SZ: if (!buff) { ret = -EINVAL; break; } *(uint32_t *)buff = nvme_namespace_get_sector_size(ns); break; case DISK_IOCTL_CTRL_DEINIT: case DISK_IOCTL_CTRL_SYNC: ret = nvme_disk_flush(ns); break; case DISK_IOCTL_CTRL_INIT: ret = 0; break; default: ret = -EINVAL; } nvme_unlock(disk->dev); return ret; } static int nvme_disk_init(struct disk_info *disk) { return nvme_disk_ioctl(disk, DISK_IOCTL_CTRL_INIT, NULL); } static const struct disk_operations nvme_disk_ops = { .init = nvme_disk_init, .status = nvme_disk_status, .read = nvme_disk_read, .write = nvme_disk_write, .ioctl = nvme_disk_ioctl, }; int nvme_namespace_disk_setup(struct nvme_namespace *ns, struct disk_info *disk) { disk->name = ns->name; disk->ops = &nvme_disk_ops; disk->dev = ns->ctrlr->dev; return disk_access_register(disk); } ```
/content/code_sandbox/drivers/disk/nvme/nvme_disk.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,408
```objective-c /* * * Derived from FreeBSD original driver made by Jim Harris * with contributions from Alexander Motin and Wojciech Macek */ #ifndef ZEPHYR_DRIVERS_DISK_NVME_NVME_COMMAND_H_ #define ZEPHYR_DRIVERS_DISK_NVME_NVME_COMMAND_H_ #include <zephyr/sys/slist.h> #include <zephyr/sys/byteorder.h> struct nvme_command { /* dword 0 */ struct _cdw0 { uint8_t opc; /* opcode */ uint8_t fuse : 2; /* fused operation */ uint8_t rsvd : 4; /* reserved */ uint8_t psdt : 2; /* PRP or SGL for Data Transfer */ uint16_t cid; /* command identifier */ } cdw0; /* dword 1 */ uint32_t nsid; /* namespace identifier */ /* dword 2-3 */ uint32_t cdw2; uint32_t cdw3; /* dword 4-5 */ uint64_t mptr; /* metadata pointer */ /* dword 6-7 and 8-9 */ struct _dptr { uint64_t prp1; /* prp entry 1 */ uint64_t prp2; /* prp entry 2 */ } dptr; /* data pointer */ /* dword 10 */ union { uint32_t cdw10; /* command-specific */ uint32_t ndt; /* Number of Dwords in Data transfer */ }; /* dword 11 */ union { uint32_t cdw11; /* command-specific */ uint32_t ndm; /* Number of Dwords in Metadata transfer */ }; /* dword 12-15 */ uint32_t cdw12; /* command-specific */ uint32_t cdw13; /* command-specific */ uint32_t cdw14; /* command-specific */ uint32_t cdw15; /* command-specific */ }; struct nvme_completion { /* dword 0 */ uint32_t cdw0; /* command-specific */ /* dword 1 */ uint32_t rsvd; /* dword 2 */ uint16_t sqhd; /* submission queue head pointer */ uint16_t sqid; /* submission queue identifier */ /* dword 3 */ uint16_t cid; /* command identifier */ uint16_t status; } __aligned(8); struct nvme_completion_poll_status { int status; struct nvme_completion cpl; struct k_sem sem; }; /* status code types */ enum nvme_status_code_type { NVME_SCT_GENERIC = 0x0, NVME_SCT_COMMAND_SPECIFIC = 0x1, NVME_SCT_MEDIA_ERROR = 0x2, NVME_SCT_PATH_RELATED = 0x3, /* 0x3-0x6 - reserved */ NVME_SCT_VENDOR_SPECIFIC = 0x7, }; /* generic command status codes */ enum nvme_generic_command_status_code { NVME_SC_SUCCESS = 0x00, NVME_SC_INVALID_OPCODE = 0x01, NVME_SC_INVALID_FIELD = 0x02, NVME_SC_COMMAND_ID_CONFLICT = 0x03, NVME_SC_DATA_TRANSFER_ERROR = 0x04, NVME_SC_ABORTED_POWER_LOSS = 0x05, NVME_SC_INTERNAL_DEVICE_ERROR = 0x06, NVME_SC_ABORTED_BY_REQUEST = 0x07, NVME_SC_ABORTED_SQ_DELETION = 0x08, NVME_SC_ABORTED_FAILED_FUSED = 0x09, NVME_SC_ABORTED_MISSING_FUSED = 0x0a, NVME_SC_INVALID_NAMESPACE_OR_FORMAT = 0x0b, NVME_SC_COMMAND_SEQUENCE_ERROR = 0x0c, NVME_SC_INVALID_SGL_SEGMENT_DESCR = 0x0d, NVME_SC_INVALID_NUMBER_OF_SGL_DESCR = 0x0e, NVME_SC_DATA_SGL_LENGTH_INVALID = 0x0f, NVME_SC_METADATA_SGL_LENGTH_INVALID = 0x10, NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID = 0x11, NVME_SC_INVALID_USE_OF_CMB = 0x12, NVME_SC_PRP_OFFSET_INVALID = 0x13, NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED = 0x14, NVME_SC_OPERATION_DENIED = 0x15, NVME_SC_SGL_OFFSET_INVALID = 0x16, /* 0x17 - reserved */ NVME_SC_HOST_ID_INCONSISTENT_FORMAT = 0x18, NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED = 0x19, NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID = 0x1a, NVME_SC_ABORTED_DUE_TO_PREEMPT = 0x1b, NVME_SC_SANITIZE_FAILED = 0x1c, NVME_SC_SANITIZE_IN_PROGRESS = 0x1d, NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID = 0x1e, NVME_SC_NOT_SUPPORTED_IN_CMB = 0x1f, NVME_SC_NAMESPACE_IS_WRITE_PROTECTED = 0x20, NVME_SC_COMMAND_INTERRUPTED = 0x21, NVME_SC_TRANSIENT_TRANSPORT_ERROR = 0x22, NVME_SC_LBA_OUT_OF_RANGE = 0x80, NVME_SC_CAPACITY_EXCEEDED = 0x81, NVME_SC_NAMESPACE_NOT_READY = 0x82, NVME_SC_RESERVATION_CONFLICT = 0x83, NVME_SC_FORMAT_IN_PROGRESS = 0x84, }; /* command specific status codes */ enum nvme_command_specific_status_code { NVME_SC_COMPLETION_QUEUE_INVALID = 0x00, NVME_SC_INVALID_QUEUE_IDENTIFIER = 0x01, NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED = 0x02, NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED = 0x03, /* 0x04 - reserved */ NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05, NVME_SC_INVALID_FIRMWARE_SLOT = 0x06, NVME_SC_INVALID_FIRMWARE_IMAGE = 0x07, NVME_SC_INVALID_INTERRUPT_VECTOR = 0x08, NVME_SC_INVALID_LOG_PAGE = 0x09, NVME_SC_INVALID_FORMAT = 0x0a, NVME_SC_FIRMWARE_REQUIRES_RESET = 0x0b, NVME_SC_INVALID_QUEUE_DELETION = 0x0c, NVME_SC_FEATURE_NOT_SAVEABLE = 0x0d, NVME_SC_FEATURE_NOT_CHANGEABLE = 0x0e, NVME_SC_FEATURE_NOT_NS_SPECIFIC = 0x0f, NVME_SC_FW_ACT_REQUIRES_NVMS_RESET = 0x10, NVME_SC_FW_ACT_REQUIRES_RESET = 0x11, NVME_SC_FW_ACT_REQUIRES_TIME = 0x12, NVME_SC_FW_ACT_PROHIBITED = 0x13, NVME_SC_OVERLAPPING_RANGE = 0x14, NVME_SC_NS_INSUFFICIENT_CAPACITY = 0x15, NVME_SC_NS_ID_UNAVAILABLE = 0x16, /* 0x17 - reserved */ NVME_SC_NS_ALREADY_ATTACHED = 0x18, NVME_SC_NS_IS_PRIVATE = 0x19, NVME_SC_NS_NOT_ATTACHED = 0x1a, NVME_SC_THIN_PROV_NOT_SUPPORTED = 0x1b, NVME_SC_CTRLR_LIST_INVALID = 0x1c, NVME_SC_SELF_TEST_IN_PROGRESS = 0x1d, NVME_SC_BOOT_PART_WRITE_PROHIB = 0x1e, NVME_SC_INVALID_CTRLR_ID = 0x1f, NVME_SC_INVALID_SEC_CTRLR_STATE = 0x20, NVME_SC_INVALID_NUM_OF_CTRLR_RESRC = 0x21, NVME_SC_INVALID_RESOURCE_ID = 0x22, NVME_SC_SANITIZE_PROHIBITED_WPMRE = 0x23, NVME_SC_ANA_GROUP_ID_INVALID = 0x24, NVME_SC_ANA_ATTACH_FAILED = 0x25, NVME_SC_CONFLICTING_ATTRIBUTES = 0x80, NVME_SC_INVALID_PROTECTION_INFO = 0x81, NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE = 0x82, }; /* media error status codes */ enum nvme_media_error_status_code { NVME_SC_WRITE_FAULTS = 0x80, NVME_SC_UNRECOVERED_READ_ERROR = 0x81, NVME_SC_GUARD_CHECK_ERROR = 0x82, NVME_SC_APPLICATION_TAG_CHECK_ERROR = 0x83, NVME_SC_REFERENCE_TAG_CHECK_ERROR = 0x84, NVME_SC_COMPARE_FAILURE = 0x85, NVME_SC_ACCESS_DENIED = 0x86, NVME_SC_DEALLOCATED_OR_UNWRITTEN = 0x87, }; /* path related status codes */ enum nvme_path_related_status_code { NVME_SC_INTERNAL_PATH_ERROR = 0x00, NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS = 0x01, NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE = 0x02, NVME_SC_ASYMMETRIC_ACCESS_TRANSITION = 0x03, NVME_SC_CONTROLLER_PATHING_ERROR = 0x60, NVME_SC_HOST_PATHING_ERROR = 0x70, NVME_SC_COMMAND_ABORTED_BY_HOST = 0x71, }; /* admin opcodes */ enum nvme_admin_opcode { NVME_OPC_DELETE_IO_SQ = 0x00, NVME_OPC_CREATE_IO_SQ = 0x01, NVME_OPC_GET_LOG_PAGE = 0x02, /* 0x03 - reserved */ NVME_OPC_DELETE_IO_CQ = 0x04, NVME_OPC_CREATE_IO_CQ = 0x05, NVME_OPC_IDENTIFY = 0x06, /* 0x07 - reserved */ NVME_OPC_ABORT = 0x08, NVME_OPC_SET_FEATURES = 0x09, NVME_OPC_GET_FEATURES = 0x0a, /* 0x0b - reserved */ NVME_OPC_ASYNC_EVENT_REQUEST = 0x0c, NVME_OPC_NAMESPACE_MANAGEMENT = 0x0d, /* 0x0e-0x0f - reserved */ NVME_OPC_FIRMWARE_ACTIVATE = 0x10, NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD = 0x11, /* 0x12-0x13 - reserved */ NVME_OPC_DEVICE_SELF_TEST = 0x14, NVME_OPC_NAMESPACE_ATTACHMENT = 0x15, /* 0x16-0x17 - reserved */ NVME_OPC_KEEP_ALIVE = 0x18, NVME_OPC_DIRECTIVE_SEND = 0x19, NVME_OPC_DIRECTIVE_RECEIVE = 0x1a, /* 0x1b - reserved */ NVME_OPC_VIRTUALIZATION_MANAGEMENT = 0x1c, NVME_OPC_NVME_MI_SEND = 0x1d, NVME_OPC_NVME_MI_RECEIVE = 0x1e, /* 0x1f-0x7b - reserved */ NVME_OPC_DOORBELL_BUFFER_CONFIG = 0x7c, NVME_OPC_FORMAT_NVM = 0x80, NVME_OPC_SECURITY_SEND = 0x81, NVME_OPC_SECURITY_RECEIVE = 0x82, /* 0x83 - reserved */ NVME_OPC_SANITIZE = 0x84, /* 0x85 - reserved */ NVME_OPC_GET_LBA_STATUS = 0x86, }; /* nvme nvm opcodes */ enum nvme_nvm_opcode { NVME_OPC_FLUSH = 0x00, NVME_OPC_WRITE = 0x01, NVME_OPC_READ = 0x02, /* 0x03 - reserved */ NVME_OPC_WRITE_UNCORRECTABLE = 0x04, NVME_OPC_COMPARE = 0x05, /* 0x06-0x07 - reserved */ NVME_OPC_WRITE_ZEROES = 0x08, NVME_OPC_DATASET_MANAGEMENT = 0x09, /* 0x0a-0x0b - reserved */ NVME_OPC_VERIFY = 0x0c, NVME_OPC_RESERVATION_REGISTER = 0x0d, NVME_OPC_RESERVATION_REPORT = 0x0e, /* 0x0f-0x10 - reserved */ NVME_OPC_RESERVATION_ACQUIRE = 0x11, /* 0x12-0x14 - reserved */ NVME_OPC_RESERVATION_RELEASE = 0x15, }; enum nvme_feature { /* 0x00 - reserved */ NVME_FEAT_ARBITRATION = 0x01, NVME_FEAT_POWER_MANAGEMENT = 0x02, NVME_FEAT_LBA_RANGE_TYPE = 0x03, NVME_FEAT_TEMPERATURE_THRESHOLD = 0x04, NVME_FEAT_ERROR_RECOVERY = 0x05, NVME_FEAT_VOLATILE_WRITE_CACHE = 0x06, NVME_FEAT_NUMBER_OF_QUEUES = 0x07, NVME_FEAT_INTERRUPT_COALESCING = 0x08, NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09, NVME_FEAT_WRITE_ATOMICITY = 0x0A, NVME_FEAT_ASYNC_EVENT_CONFIGURATION = 0x0B, NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION = 0x0C, NVME_FEAT_HOST_MEMORY_BUFFER = 0x0D, NVME_FEAT_TIMESTAMP = 0x0E, NVME_FEAT_KEEP_ALIVE_TIMER = 0x0F, NVME_FEAT_HOST_CONTROLLED_THERMAL_MGMT = 0x10, NVME_FEAT_NON_OP_POWER_STATE_CONFIG = 0x11, NVME_FEAT_READ_RECOVERY_LEVEL_CONFIG = 0x12, NVME_FEAT_PREDICTABLE_LATENCY_MODE_CONFIG = 0x13, NVME_FEAT_PREDICTABLE_LATENCY_MODE_WINDOW = 0x14, NVME_FEAT_LBA_STATUS_INFORMATION_ATTRIBUTES = 0x15, NVME_FEAT_HOST_BEHAVIOR_SUPPORT = 0x16, NVME_FEAT_SANITIZE_CONFIG = 0x17, NVME_FEAT_ENDURANCE_GROUP_EVENT_CONFIGURATION = 0x18, /* 0x19-0x77 - reserved */ /* 0x78-0x7f - NVMe Management Interface */ NVME_FEAT_SOFTWARE_PROGRESS_MARKER = 0x80, NVME_FEAT_HOST_IDENTIFIER = 0x81, NVME_FEAT_RESERVATION_NOTIFICATION_MASK = 0x82, NVME_FEAT_RESERVATION_PERSISTENCE = 0x83, NVME_FEAT_NAMESPACE_WRITE_PROTECTION_CONFIG = 0x84, /* 0x85-0xBF - command set specific (reserved) */ /* 0xC0-0xFF - vendor specific */ }; #if !defined(CONFIG_DCACHE_LINE_SIZE) || (CONFIG_DCACHE_LINE_SIZE == 0) #define CACHE_LINE_SIZE (64) #else #define CACHE_LINE_SIZE CONFIG_DCACHE_LINE_SIZE #endif #define NVME_PBAO_MASK (CONFIG_MMU_PAGE_SIZE - 1) #define NVME_PRP_NEXT_PAGE(_addr) \ ((_addr & ~NVME_PBAO_MASK) + CONFIG_MMU_PAGE_SIZE) struct nvme_prp_list { uintptr_t prp[CONFIG_MMU_PAGE_SIZE / sizeof(uintptr_t)] __aligned(CONFIG_MMU_PAGE_SIZE); sys_dnode_t node; }; struct nvme_cmd_qpair { struct nvme_controller *ctrlr; uint32_t id; uint32_t num_entries; uint32_t sq_tdbl_off; uint32_t cq_hdbl_off; uint32_t phase; uint32_t sq_head; uint32_t sq_tail; uint32_t cq_head; int64_t num_cmds; int64_t num_intr_handler_calls; int64_t num_retries; int64_t num_failures; int64_t num_ignored; struct nvme_command *cmd; struct nvme_completion *cpl; uintptr_t cmd_bus_addr; uintptr_t cpl_bus_addr; uint16_t vector; } __aligned(CACHE_LINE_SIZE); typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *); enum nvme_request_type { NVME_REQUEST_NULL = 1, NVME_REQUEST_VADDR = 2, }; struct nvme_request { struct nvme_command cmd; struct nvme_cmd_qpair *qpair; uint32_t type; uint32_t req_start; int32_t retries; void *payload; uint32_t payload_size; nvme_cb_fn_t cb_fn; void *cb_arg; struct nvme_prp_list *prp_list; sys_dnode_t node; }; void nvme_cmd_init(void); void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl); #ifdef CONFIG_NVME_LOG_LEVEL_DBG void nvme_completion_print(const struct nvme_completion *cpl); #else #define nvme_completion_print(...) #endif /* CONFIG_NVME_LOG_LEVEL_DBG */ void nvme_cmd_request_free(struct nvme_request *request); struct nvme_request *nvme_cmd_request_alloc(void); int nvme_cmd_qpair_setup(struct nvme_cmd_qpair *qpair, struct nvme_controller *ctrlr, uint32_t id); void nvme_cmd_qpair_reset(struct nvme_cmd_qpair *qpair); int nvme_cmd_qpair_submit_request(struct nvme_cmd_qpair *qpair, struct nvme_request *request); int nvme_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload, nvme_cb_fn_t cb_fn, void *cb_arg); int nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, nvme_cb_fn_t cb_fn, void *cb_arg); int nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint32_t nsid, void *payload, nvme_cb_fn_t cb_fn, void *cb_arg); int nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, struct nvme_cmd_qpair *io_queue, nvme_cb_fn_t cb_fn, void *cb_arg); int nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr, struct nvme_cmd_qpair *io_queue, nvme_cb_fn_t cb_fn, void *cb_arg); int nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr, struct nvme_cmd_qpair *io_queue, nvme_cb_fn_t cb_fn, void *cb_arg); int nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr, struct nvme_cmd_qpair *io_queue, nvme_cb_fn_t cb_fn, void *cb_arg); int nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature, uint32_t cdw11, uint32_t cdw12, uint32_t cdw13, uint32_t cdw14, uint32_t cdw15, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg); int nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature, uint32_t cdw11, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg); int nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, uint32_t num_queues, nvme_cb_fn_t cb_fn, void *cb_arg); static inline struct nvme_request *nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *request; request = nvme_cmd_request_alloc(); if (request != NULL) { request->cb_fn = cb_fn; request->cb_arg = cb_arg; } return request; } static inline struct nvme_request *nvme_allocate_request_vaddr(void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *request; request = nvme_allocate_request(cb_fn, cb_arg); if (request != NULL) { request->type = NVME_REQUEST_VADDR; request->payload = payload; request->payload_size = payload_size; } return request; } static inline struct nvme_request *nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *request; request = nvme_allocate_request(cb_fn, cb_arg); if (request != NULL) { request->type = NVME_REQUEST_NULL; } return request; } /* * Command building helper functions * These functions assume allocator zeros out cmd structure */ static inline void nvme_namespace_flush_cmd(struct nvme_command *cmd, uint32_t nsid) { cmd->cdw0.opc = NVME_OPC_FLUSH; cmd->nsid = sys_cpu_to_le32(nsid); } static inline void nvme_namespace_rw_cmd(struct nvme_command *cmd, uint32_t rwcmd, uint32_t nsid, uint64_t lba, uint32_t count) { cmd->cdw0.opc = rwcmd; cmd->nsid = sys_cpu_to_le32(nsid); cmd->cdw10 = sys_cpu_to_le32(lba & 0xffffffffu); cmd->cdw11 = sys_cpu_to_le32(lba >> 32); cmd->cdw12 = sys_cpu_to_le32(count-1); } static inline void nvme_namespace_write_cmd(struct nvme_command *cmd, uint32_t nsid, uint64_t lba, uint32_t count) { nvme_namespace_rw_cmd(cmd, NVME_OPC_WRITE, nsid, lba, count); } static inline void nvme_namespace_read_cmd(struct nvme_command *cmd, uint32_t nsid, uint64_t lba, uint32_t count) { nvme_namespace_rw_cmd(cmd, NVME_OPC_READ, nsid, lba, count); } static inline void nvme_completion_swapbytes(struct nvme_completion *cpl) { #if _BYTE_ORDER != _LITTLE_ENDIAN cpl->cdw0 = sys_le32_to_cpu(cpl->cdw0); /* omit rsvd1 */ cpl->sqhd = sys_le16_to_cpu(cpl->sqhd); cpl->sqid = sys_le16_to_cpu(cpl->sqid); /* omit cid */ cpl->status = sys_le16_to_cpu(s->status); #else ARG_UNUSED(cpl); #endif } static inline void nvme_completion_poll(struct nvme_completion_poll_status *status) { k_sem_take(&status->sem, K_FOREVER); } #define NVME_CPL_STATUS_POLL_INIT(cpl_status) \ { \ .status = 0, \ .sem = Z_SEM_INITIALIZER(cpl_status.sem, 0, 1), \ } static inline void nvme_cpl_status_poll_init(struct nvme_completion_poll_status *status) { status->status = 0; k_sem_init(&status->sem, 0, 1); } #define nvme_completion_is_error(cpl) \ ((NVME_STATUS_GET_SC((cpl)->status) != 0) | \ (NVME_STATUS_GET_SCT((cpl)->status) != 0)) static inline bool nvme_cpl_status_is_error(struct nvme_completion_poll_status *status) { return ((status->status != 0) || nvme_completion_is_error(&status->cpl)); } #endif /* ZEPHYR_DRIVERS_DISK_NVME_NVME_COMMAND_H_ */ ```
/content/code_sandbox/drivers/disk/nvme/nvme_cmd.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,658
```c /* * * Derived from FreeBSD original driver made by Jim Harris */ #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(nvme, CONFIG_NVME_LOG_LEVEL); #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include <string.h> #include "nvme.h" #include "nvme_helpers.h" int nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *request; request = nvme_allocate_request_vaddr( &ctrlr->cdata, sizeof(struct nvme_controller_data), cb_fn, cb_arg); if (!request) { return -ENOMEM; } memset(&request->cmd, 0, sizeof(request->cmd)); request->cmd.cdw0.opc = NVME_OPC_IDENTIFY; request->cmd.cdw10 = sys_cpu_to_le32(1); return nvme_cmd_qpair_submit_request(ctrlr->adminq, request); } int nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint32_t nsid, void *payload, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *request; request = nvme_allocate_request_vaddr( payload, sizeof(struct nvme_namespace_data), cb_fn, cb_arg); if (!request) { return -ENOMEM; } request->cmd.cdw0.opc = NVME_OPC_IDENTIFY; /* * TODO: create an identify command data structure */ request->cmd.nsid = sys_cpu_to_le32(nsid); return nvme_cmd_qpair_submit_request(ctrlr->adminq, request); } int nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, struct nvme_cmd_qpair *io_queue, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *request; struct nvme_command *cmd; request = nvme_allocate_request_null(cb_fn, cb_arg); if (!request) { return -ENOMEM; } cmd = &request->cmd; cmd->cdw0.opc = NVME_OPC_CREATE_IO_CQ; /* * TODO: create a create io completion queue command data * structure. */ cmd->cdw10 = sys_cpu_to_le32(((io_queue->num_entries-1) << 16) | io_queue->id); /* 0x3 = interrupts enabled | physically contiguous */ cmd->cdw11 = sys_cpu_to_le32((io_queue->vector << 16) | 0x3); cmd->dptr.prp1 = sys_cpu_to_le64(io_queue->cpl_bus_addr); return nvme_cmd_qpair_submit_request(ctrlr->adminq, request); } int nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr, struct nvme_cmd_qpair *io_queue, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *request; struct nvme_command *cmd; request = nvme_allocate_request_null(cb_fn, cb_arg); if (!request) { return -ENOMEM; } cmd = &request->cmd; cmd->cdw0.opc = NVME_OPC_CREATE_IO_SQ; /* * TODO: create a create io submission queue command data * structure. */ cmd->cdw10 = sys_cpu_to_le32(((io_queue->num_entries - 1) << 16) | io_queue->id); /* 0x1 = physically contiguous */ cmd->cdw11 = sys_cpu_to_le32((io_queue->id << 16) | 0x1); cmd->dptr.prp1 = sys_cpu_to_le64(io_queue->cmd_bus_addr); return nvme_cmd_qpair_submit_request(ctrlr->adminq, request); } int nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr, struct nvme_cmd_qpair *io_queue, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *request; struct nvme_command *cmd; request = nvme_allocate_request_null(cb_fn, cb_arg); if (!request) { return -ENOMEM; } cmd = &request->cmd; cmd->cdw0.opc = NVME_OPC_DELETE_IO_CQ; /* * TODO: create a delete io completion queue command data * structure. */ cmd->cdw10 = sys_cpu_to_le32(io_queue->id); return nvme_cmd_qpair_submit_request(ctrlr->adminq, request); } int nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr, struct nvme_cmd_qpair *io_queue, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *request; struct nvme_command *cmd; request = nvme_allocate_request_null(cb_fn, cb_arg); if (!request) { return -ENOMEM; } cmd = &request->cmd; cmd->cdw0.opc = NVME_OPC_DELETE_IO_SQ; /* * TODO: create a delete io submission queue command data * structure. */ cmd->cdw10 = sys_cpu_to_le32(io_queue->id); return nvme_cmd_qpair_submit_request(ctrlr->adminq, request); } int nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature, uint32_t cdw11, uint32_t cdw12, uint32_t cdw13, uint32_t cdw14, uint32_t cdw15, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *request; struct nvme_command *cmd; request = nvme_allocate_request_null(cb_fn, cb_arg); if (!request) { return -ENOMEM; } cmd = &request->cmd; cmd->cdw0.opc = NVME_OPC_SET_FEATURES; cmd->cdw10 = sys_cpu_to_le32(feature); cmd->cdw11 = sys_cpu_to_le32(cdw11); cmd->cdw12 = sys_cpu_to_le32(cdw12); cmd->cdw13 = sys_cpu_to_le32(cdw13); cmd->cdw14 = sys_cpu_to_le32(cdw14); cmd->cdw15 = sys_cpu_to_le32(cdw15); return nvme_cmd_qpair_submit_request(ctrlr->adminq, request); } int nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature, uint32_t cdw11, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *request; struct nvme_command *cmd; request = nvme_allocate_request_null(cb_fn, cb_arg); if (!request) { return -ENOMEM; } cmd = &request->cmd; cmd->cdw0.opc = NVME_OPC_GET_FEATURES; cmd->cdw10 = sys_cpu_to_le32(feature); cmd->cdw11 = sys_cpu_to_le32(cdw11); return nvme_cmd_qpair_submit_request(ctrlr->adminq, request); } int nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, uint32_t num_queues, nvme_cb_fn_t cb_fn, void *cb_arg) { uint32_t cdw11; cdw11 = ((num_queues - 1) << 16) | (num_queues - 1); return nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_NUMBER_OF_QUEUES, cdw11, 0, 0, 0, 0, NULL, 0, cb_fn, cb_arg); } ```
/content/code_sandbox/drivers/disk/nvme/nvme_controller_cmd.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,700
```unknown menuconfig NVME bool "NVMe disk" depends on PCIE select PCIE_MSI_X select PCIE_MSI_MULTI_VECTOR help NVMe disk(s) might be present on the system through PCIe, enable this driver to support these. It will enable MSI-X and MSI multi-vector support if NVME config NVME_ADMIN_ENTRIES int "Number of admin queue entries" range 2 4096 default 256 help This sets the amount of allocated admin queue entries. Do not touch this unless you know what you are doing. config NVME_IO_QUEUES int "Number of IO queues" range 1 65536 default 1 help This sets the amount of allocated I/O queues. Do not touch this unless you know what you are doing. config NVME_IO_ENTRIES int "Number of IO queue entries" range 2 65536 default 256 help This sets the amount of allocated IO queue entries. Do not touch this unless you know what you are doing. config NVME_RETRY_COUNT int "Retry count" default 2 help This sets the amount of possible retries per-request. Do not touch this unless you know what you are doing. config NVME_REQUEST_TIMEOUT int "Timeout period for NVMe request" range 5 120 default 5 help This sets the waiting time for a request to succeed. Do not touch this unless you know what you are doing. config NVME_PRP_LIST_AMOUNT int "Number of allocated PRP list" default 2 help This sets the amount of pre-allocated PRP list. Each list can be used in a NVMe command to address memory where to read or write data. Each PRP list is of page size be careful on this number as it may take a sensible amount of memory. config NVME_MAX_NAMESPACES int "Maximum namespace to allocate" range 1 16 default 1 help This sets the maximum namespace to allocate. Note that there may be much less to be actually instantiated at runtime. This option can be used to reduce the memory usage a bit then. config NVME_INT_PRIORITY int "Interrupt priority" default 2 help Interrupt priority used for the MSI-X generated interrupts. module = NVME module-str = nvme source "subsys/logging/Kconfig.template.log_config" endif # NVME ```
/content/code_sandbox/drivers/disk/nvme/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
531
```c /* * * Derived from FreeBSD original driver made by Jim Harris * with contributions from Alexander Motin, Wojciech Macek, and Warner Losh */ #define DT_DRV_COMPAT nvme_controller #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(nvme, CONFIG_NVME_LOG_LEVEL); #include <errno.h> #include <zephyr/kernel.h> #include <soc.h> #include <zephyr/device.h> #include <zephyr/init.h> #include "nvme_helpers.h" #include "nvme.h" static int nvme_controller_wait_for_ready(const struct device *dev, const int desired_val) { struct nvme_controller *nvme_ctrlr = dev->data; mm_reg_t regs = DEVICE_MMIO_GET(dev); int timeout = sys_clock_tick_get_32() + k_ms_to_ticks_ceil32(nvme_ctrlr->ready_timeout_in_ms); uint32_t delta_t = USEC_PER_MSEC; uint32_t csts; while (1) { csts = nvme_mmio_read_4(regs, csts); if (csts == NVME_GONE) { LOG_ERR("Controller is unreachable"); return -EIO; } if (((csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK) == desired_val) { break; } if ((int64_t)timeout - sys_clock_tick_get_32() < 0) { LOG_ERR("Timeout error"); return -EIO; } k_busy_wait(delta_t); delta_t = MIN((MSEC_PER_SEC * USEC_PER_MSEC), delta_t * 3 / 2); } return 0; } static int nvme_controller_disable(const struct device *dev) { mm_reg_t regs = DEVICE_MMIO_GET(dev); uint32_t cc, csts; uint8_t enabled, ready; int err; cc = nvme_mmio_read_4(regs, cc); csts = nvme_mmio_read_4(regs, csts); ready = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK; enabled = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK; if (enabled == 0) { /* Wait for RDY == 0 or timeout & fail */ if (ready == 0) { return 0; } return nvme_controller_wait_for_ready(dev, 0); } if (ready == 0) { /* EN == 1, wait for RDY == 1 or timeout & fail */ err = nvme_controller_wait_for_ready(dev, 1); if (err != 0) { return err; } } cc &= ~NVME_CC_REG_EN_MASK; nvme_mmio_write_4(regs, cc, cc); return nvme_controller_wait_for_ready(dev, 0); } static int nvme_controller_enable(const struct device *dev) { struct nvme_controller *nvme_ctrlr = dev->data; mm_reg_t regs = DEVICE_MMIO_GET(dev); uint8_t enabled, ready; uint32_t cc, csts; int err; cc = nvme_mmio_read_4(regs, cc); csts = nvme_mmio_read_4(regs, csts); ready = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK; enabled = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK; if (enabled == 1) { if (ready == 1) { LOG_DBG("Already enabled"); return 0; } return nvme_controller_wait_for_ready(dev, 1); } /* EN == 0 already wait for RDY == 0 or timeout & fail */ err = nvme_controller_wait_for_ready(dev, 0); if (err != 0) { return err; } /* Initialization values for CC */ cc = 0; cc |= 1 << NVME_CC_REG_EN_SHIFT; cc |= 0 << NVME_CC_REG_CSS_SHIFT; cc |= 0 << NVME_CC_REG_AMS_SHIFT; cc |= 0 << NVME_CC_REG_SHN_SHIFT; cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */ cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */ cc |= nvme_ctrlr->mps << NVME_CC_REG_MPS_SHIFT; nvme_mmio_write_4(regs, cc, cc); return nvme_controller_wait_for_ready(dev, 1); } static int nvme_controller_setup_admin_queues(const struct device *dev) { struct nvme_controller *nvme_ctrlr = dev->data; mm_reg_t regs = DEVICE_MMIO_GET(dev); uint32_t aqa, qsize; nvme_cmd_qpair_reset(nvme_ctrlr->adminq); /* Admin queue is always id 0 */ if (nvme_cmd_qpair_setup(nvme_ctrlr->adminq, nvme_ctrlr, 0) != 0) { LOG_ERR("Admin cmd qpair setup failed"); return -EIO; } nvme_mmio_write_8(regs, asq, nvme_ctrlr->adminq->cmd_bus_addr); nvme_mmio_write_8(regs, acq, nvme_ctrlr->adminq->cpl_bus_addr); /* acqs and asqs are 0-based. */ qsize = CONFIG_NVME_ADMIN_ENTRIES - 1; aqa = 0; aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT; aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT; nvme_mmio_write_4(regs, aqa, aqa); return 0; } static int nvme_controller_setup_io_queues(const struct device *dev) { struct nvme_controller *nvme_ctrlr = dev->data; struct nvme_completion_poll_status status; struct nvme_cmd_qpair *io_qpair; int cq_allocated, sq_allocated; int ret, idx; nvme_cpl_status_poll_init(&status); ret = nvme_ctrlr_cmd_set_num_queues(nvme_ctrlr, nvme_ctrlr->num_io_queues, nvme_completion_poll_cb, &status); if (ret != 0) { return ret; } nvme_completion_poll(&status); if (nvme_cpl_status_is_error(&status)) { LOG_ERR("Could not set IO num queues to %u", nvme_ctrlr->num_io_queues); nvme_completion_print(&status.cpl); return -EIO; } /* * Data in cdw0 is 0-based. * Lower 16-bits indicate number of submission queues allocated. * Upper 16-bits indicate number of completion queues allocated. */ sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; cq_allocated = (status.cpl.cdw0 >> 16) + 1; /* * Controller may allocate more queues than we requested, * so use the minimum of the number requested and what was * actually allocated. */ nvme_ctrlr->num_io_queues = MIN(nvme_ctrlr->num_io_queues, sq_allocated); nvme_ctrlr->num_io_queues = MIN(nvme_ctrlr->num_io_queues, cq_allocated); for (idx = 0; idx < nvme_ctrlr->num_io_queues; idx++) { io_qpair = &nvme_ctrlr->ioq[idx]; if (nvme_cmd_qpair_setup(io_qpair, nvme_ctrlr, idx+1) != 0) { LOG_ERR("IO cmd qpair %u setup failed", idx+1); return -EIO; } nvme_cmd_qpair_reset(io_qpair); nvme_cpl_status_poll_init(&status); ret = nvme_ctrlr_cmd_create_io_cq(nvme_ctrlr, io_qpair, nvme_completion_poll_cb, &status); if (ret != 0) { return ret; } nvme_completion_poll(&status); if (nvme_cpl_status_is_error(&status)) { LOG_ERR("IO CQ creation failed"); nvme_completion_print(&status.cpl); return -EIO; } nvme_cpl_status_poll_init(&status); ret = nvme_ctrlr_cmd_create_io_sq(nvme_ctrlr, io_qpair, nvme_completion_poll_cb, &status); if (ret != 0) { return ret; } nvme_completion_poll(&status); if (nvme_cpl_status_is_error(&status)) { LOG_ERR("IO CQ creation failed"); nvme_completion_print(&status.cpl); return -EIO; } } return 0; } static void nvme_controller_gather_info(const struct device *dev) { struct nvme_controller *nvme_ctrlr = dev->data; mm_reg_t regs = DEVICE_MMIO_GET(dev); uint32_t cap_lo, cap_hi, to, vs, pmrcap; nvme_ctrlr->cap_lo = cap_lo = nvme_mmio_read_4(regs, cap_lo); LOG_DBG("CapLo: 0x%08x: MQES %u%s%s%s%s, TO %u", cap_lo, NVME_CAP_LO_MQES(cap_lo), NVME_CAP_LO_CQR(cap_lo) ? ", CQR" : "", NVME_CAP_LO_AMS(cap_lo) ? ", AMS" : "", (NVME_CAP_LO_AMS(cap_lo) & 0x1) ? " WRRwUPC" : "", (NVME_CAP_LO_AMS(cap_lo) & 0x2) ? " VS" : "", NVME_CAP_LO_TO(cap_lo)); nvme_ctrlr->cap_hi = cap_hi = nvme_mmio_read_4(regs, cap_hi); LOG_DBG("CapHi: 0x%08x: DSTRD %u%s, CSS %x%s, " "MPSMIN %u, MPSMAX %u%s%s", cap_hi, NVME_CAP_HI_DSTRD(cap_hi), NVME_CAP_HI_NSSRS(cap_hi) ? ", NSSRS" : "", NVME_CAP_HI_CSS(cap_hi), NVME_CAP_HI_BPS(cap_hi) ? ", BPS" : "", NVME_CAP_HI_MPSMIN(cap_hi), NVME_CAP_HI_MPSMAX(cap_hi), NVME_CAP_HI_PMRS(cap_hi) ? ", PMRS" : "", NVME_CAP_HI_CMBS(cap_hi) ? ", CMBS" : ""); vs = nvme_mmio_read_4(regs, vs); LOG_DBG("Version: 0x%08x: %d.%d", vs, NVME_MAJOR(vs), NVME_MINOR(vs)); if (NVME_CAP_HI_PMRS(cap_hi)) { pmrcap = nvme_mmio_read_4(regs, pmrcap); LOG_DBG("PMRCap: 0x%08x: BIR %u%s%s, PMRTU %u, " "PMRWBM %x, PMRTO %u%s", pmrcap, NVME_PMRCAP_BIR(pmrcap), NVME_PMRCAP_RDS(pmrcap) ? ", RDS" : "", NVME_PMRCAP_WDS(pmrcap) ? ", WDS" : "", NVME_PMRCAP_PMRTU(pmrcap), NVME_PMRCAP_PMRWBM(pmrcap), NVME_PMRCAP_PMRTO(pmrcap), NVME_PMRCAP_CMSS(pmrcap) ? ", CMSS" : ""); } nvme_ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2; nvme_ctrlr->mps = NVME_CAP_HI_MPSMIN(cap_hi); nvme_ctrlr->page_size = 1 << (NVME_MPS_SHIFT + nvme_ctrlr->mps); LOG_DBG("MPS: %u - Page Size: %u bytes", nvme_ctrlr->mps, nvme_ctrlr->page_size); /* Get ready timeout value from controller, in units of 500ms. */ to = NVME_CAP_LO_TO(cap_lo) + 1; nvme_ctrlr->ready_timeout_in_ms = to * 500; /* Cap transfers by the maximum addressable by * page-sized PRP (4KB pages -> 2MB). * ToDo: it could be less -> take the minimum. */ nvme_ctrlr->max_xfer_size = nvme_ctrlr->page_size / 8 * nvme_ctrlr->page_size; LOG_DBG("Max transfer size: %u bytes", nvme_ctrlr->max_xfer_size); } static int nvme_controller_pcie_configure(const struct device *dev) { const struct nvme_controller_config *nvme_ctrlr_cfg = dev->config; struct nvme_controller *nvme_ctrlr = dev->data; struct pcie_bar mbar_regs; uint8_t n_vectors; if (nvme_ctrlr_cfg->pcie->bdf == PCIE_BDF_NONE) { LOG_ERR("Controller not found"); return -ENODEV; } LOG_DBG("Configuring NVME controller ID %x:%x at %d:%x.%d", PCIE_ID_TO_VEND(nvme_ctrlr_cfg->pcie->id), PCIE_ID_TO_DEV(nvme_ctrlr_cfg->pcie->id), PCIE_BDF_TO_BUS(nvme_ctrlr_cfg->pcie->bdf), PCIE_BDF_TO_DEV(nvme_ctrlr_cfg->pcie->bdf), PCIE_BDF_TO_FUNC(nvme_ctrlr_cfg->pcie->bdf)); if (!pcie_get_mbar(nvme_ctrlr_cfg->pcie->bdf, NVME_PCIE_BAR_IDX, &mbar_regs)) { LOG_ERR("Could not get NVME registers"); return -EIO; } device_map(DEVICE_MMIO_RAM_PTR(dev), mbar_regs.phys_addr, mbar_regs.size, K_MEM_CACHE_NONE); /* Allocating vectors */ n_vectors = pcie_msi_vectors_allocate(nvme_ctrlr_cfg->pcie->bdf, CONFIG_NVME_INT_PRIORITY, nvme_ctrlr->vectors, NVME_PCIE_MSIX_VECTORS); if (n_vectors == 0) { LOG_ERR("Could not allocate %u MSI-X vectors", NVME_PCIE_MSIX_VECTORS); return -EIO; } /* Enabling MSI-X and the vectors */ if (!pcie_msi_enable(nvme_ctrlr_cfg->pcie->bdf, nvme_ctrlr->vectors, n_vectors, 0)) { LOG_ERR("Could not enable MSI-X"); return -EIO; } return 0; } static int nvme_controller_identify(struct nvme_controller *nvme_ctrlr) { struct nvme_completion_poll_status status = NVME_CPL_STATUS_POLL_INIT(status); nvme_ctrlr_cmd_identify_controller(nvme_ctrlr, nvme_completion_poll_cb, &status); nvme_completion_poll(&status); if (nvme_cpl_status_is_error(&status)) { LOG_ERR("Could not identify the controller"); nvme_completion_print(&status.cpl); return -EIO; } nvme_controller_data_swapbytes(&nvme_ctrlr->cdata); /* * Use MDTS to ensure our default max_xfer_size doesn't exceed what the * controller supports. */ if (nvme_ctrlr->cdata.mdts > 0) { nvme_ctrlr->max_xfer_size = MIN(nvme_ctrlr->max_xfer_size, 1 << (nvme_ctrlr->cdata.mdts + NVME_MPS_SHIFT + NVME_CAP_HI_MPSMIN(nvme_ctrlr->cap_hi))); } return 0; } static void nvme_controller_setup_namespaces(struct nvme_controller *nvme_ctrlr) { uint32_t i; for (i = 0; i < MIN(nvme_ctrlr->cdata.nn, CONFIG_NVME_MAX_NAMESPACES); i++) { struct nvme_namespace *ns = &nvme_ctrlr->ns[i]; if (nvme_namespace_construct(ns, i+1, nvme_ctrlr) != 0) { break; } LOG_DBG("Namespace id %u setup and running", i); } } static int nvme_controller_init(const struct device *dev) { struct nvme_controller *nvme_ctrlr = dev->data; int ret; k_mutex_init(&nvme_ctrlr->lock); nvme_cmd_init(); nvme_ctrlr->dev = dev; ret = nvme_controller_pcie_configure(dev); if (ret != 0) { return ret; } nvme_controller_gather_info(dev); ret = nvme_controller_disable(dev); if (ret != 0) { LOG_ERR("Controller cannot be disabled"); return ret; } ret = nvme_controller_setup_admin_queues(dev); if (ret != 0) { return ret; } ret = nvme_controller_enable(dev); if (ret != 0) { LOG_ERR("Controller cannot be enabled"); return ret; } ret = nvme_controller_setup_io_queues(dev); if (ret != 0) { return ret; } ret = nvme_controller_identify(nvme_ctrlr); if (ret != 0) { return ret; } nvme_controller_setup_namespaces(nvme_ctrlr); return 0; } #define NVME_CONTROLLER_DEVICE_INIT(n) \ DEVICE_PCIE_INST_DECLARE(n); \ NVME_ADMINQ_ALLOCATE(n, CONFIG_NVME_ADMIN_ENTRIES); \ NVME_IOQ_ALLOCATE(n, CONFIG_NVME_IO_ENTRIES); \ \ static struct nvme_controller nvme_ctrlr_data_##n = { \ .id = n, \ .num_io_queues = CONFIG_NVME_IO_QUEUES, \ .adminq = &admin_##n, \ .ioq = &io_##n, \ }; \ \ static struct nvme_controller_config nvme_ctrlr_cfg_##n = \ { \ DEVICE_PCIE_INST_INIT(n, pcie), \ }; \ \ DEVICE_DT_INST_DEFINE(n, &nvme_controller_init, \ NULL, &nvme_ctrlr_data_##n, \ &nvme_ctrlr_cfg_##n, POST_KERNEL, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, NULL); DT_INST_FOREACH_STATUS_OKAY(NVME_CONTROLLER_DEVICE_INIT) ```
/content/code_sandbox/drivers/disk/nvme/nvme_controller.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,224
```objective-c /* * * Derived from FreeBSD original driver made by Jim Harris * with contributions from Alexander Motin and Wojciech Macek */ #ifndef ZEPHYR_DRIVERS_DISK_NVME_NHME_H_ #define ZEPHYR_DRIVERS_DISK_NVME_NVME_H_ #include "nvme_helpers.h" #include "nvme_cmd.h" #include "nvme_namespace.h" struct nvme_registers { uint32_t cap_lo; /* controller capabilities */ uint32_t cap_hi; uint32_t vs; /* version */ uint32_t intms; /* interrupt mask set */ uint32_t intmc; /* interrupt mask clear */ uint32_t cc; /* controller configuration */ uint32_t reserved1; uint32_t csts; /* controller status */ uint32_t nssr; /* NVM Subsystem Reset */ uint32_t aqa; /* admin queue attributes */ uint64_t asq; /* admin submission queue base addr */ uint64_t acq; /* admin completion queue base addr */ uint32_t cmbloc; /* Controller Memory Buffer Location */ uint32_t cmbsz; /* Controller Memory Buffer Size */ uint32_t bpinfo; /* Boot Partition Information */ uint32_t bprsel; /* Boot Partition Read Select */ uint64_t bpmbl; /* Boot Partition Memory Buffer Location */ uint64_t cmbmsc; /* Controller Memory Buffer Memory Space Control */ uint32_t cmbsts; /* Controller Memory Buffer Status */ uint8_t reserved3[3492]; /* 5Ch - DFFh */ uint32_t pmrcap; /* Persistent Memory Capabilities */ uint32_t pmrctl; /* Persistent Memory Region Control */ uint32_t pmrsts; /* Persistent Memory Region Status */ uint32_t pmrebs; /* Persistent Memory Region Elasticity Buffer Size */ uint32_t pmrswtp; /* Persistent Memory Region Sustained Write Throughput */ uint32_t pmrmsc_lo; /* Persistent Memory Region Controller Memory Space Control */ uint32_t pmrmsc_hi; uint8_t reserved4[484]; /* E1Ch - FFFh */ struct { uint32_t sq_tdbl; /* submission queue tail doorbell */ uint32_t cq_hdbl; /* completion queue head doorbell */ } doorbell[1]; }; struct nvme_power_state { /** Maximum Power */ uint16_t mp; uint8_t ps_rsvd1; /** Max Power Scale, Non-Operational State */ uint8_t mps_nops; /** Entry Latency */ uint32_t enlat; /** Exit Latency */ uint32_t exlat; /** Relative Read Throughput */ uint8_t rrt; /** Relative Read Latency */ uint8_t rrl; /** Relative Write Throughput */ uint8_t rwt; /** Relative Write Latency */ uint8_t rwl; /** Idle Power */ uint16_t idlp; /** Idle Power Scale */ uint8_t ips; uint8_t ps_rsvd8; /** Active Power */ uint16_t actp; /** Active Power Workload, Active Power Scale */ uint8_t apw_aps; uint8_t ps_rsvd10[9]; } __packed; #define NVME_SERIAL_NUMBER_LENGTH 20 #define NVME_MODEL_NUMBER_LENGTH 40 #define NVME_FIRMWARE_REVISION_LENGTH 8 struct nvme_controller_data { /* bytes 0-255: controller capabilities and features */ /** pci vendor id */ uint16_t vid; /** pci subsystem vendor id */ uint16_t ssvid; /** serial number */ uint8_t sn[NVME_SERIAL_NUMBER_LENGTH]; /** model number */ uint8_t mn[NVME_MODEL_NUMBER_LENGTH]; /** firmware revision */ uint8_t fr[NVME_FIRMWARE_REVISION_LENGTH]; /** recommended arbitration burst */ uint8_t rab; /** ieee oui identifier */ uint8_t ieee[3]; /** multi-interface capabilities */ uint8_t mic; /** maximum data transfer size */ uint8_t mdts; /** Controller ID */ uint16_t ctrlr_id; /** Version */ uint32_t ver; /** RTD3 Resume Latency */ uint32_t rtd3r; /** RTD3 Enter Latency */ uint32_t rtd3e; /** Optional Asynchronous Events Supported */ uint32_t oaes; /* bitfield really */ /** Controller Attributes */ uint32_t ctratt; /* bitfield really */ /** Read Recovery Levels Supported */ uint16_t rrls; uint8_t reserved1[9]; /** Controller Type */ uint8_t cntrltype; /** FRU Globally Unique Identifier */ uint8_t fguid[16]; /** Command Retry Delay Time 1 */ uint16_t crdt1; /** Command Retry Delay Time 2 */ uint16_t crdt2; /** Command Retry Delay Time 3 */ uint16_t crdt3; uint8_t reserved2[122]; /* bytes 256-511: admin command set attributes */ /** optional admin command support */ uint16_t oacs; /** abort command limit */ uint8_t acl; /** asynchronous event request limit */ uint8_t aerl; /** firmware updates */ uint8_t frmw; /** log page attributes */ uint8_t lpa; /** error log page entries */ uint8_t elpe; /** number of power states supported */ uint8_t npss; /** admin vendor specific command configuration */ uint8_t avscc; /** Autonomous Power State Transition Attributes */ uint8_t apsta; /** Warning Composite Temperature Threshold */ uint16_t wctemp; /** Critical Composite Temperature Threshold */ uint16_t cctemp; /** Maximum Time for Firmware Activation */ uint16_t mtfa; /** Host Memory Buffer Preferred Size */ uint32_t hmpre; /** Host Memory Buffer Minimum Size */ uint32_t hmmin; /** Name space capabilities */ struct { /* if nsmgmt, report tnvmcap and unvmcap */ uint8_t tnvmcap[16]; uint8_t unvmcap[16]; } __packed untncap; /** Replay Protected Memory Block Support */ uint32_t rpmbs; /* Really a bitfield */ /** Extended Device Self-test Time */ uint16_t edstt; /** Device Self-test Options */ uint8_t dsto; /* Really a bitfield */ /** Firmware Update Granularity */ uint8_t fwug; /** Keep Alive Support */ uint16_t kas; /** Host Controlled Thermal Management Attributes */ uint16_t hctma; /* Really a bitfield */ /** Minimum Thermal Management Temperature */ uint16_t mntmt; /** Maximum Thermal Management Temperature */ uint16_t mxtmt; /** Sanitize Capabilities */ uint32_t sanicap; /* Really a bitfield */ /** Host Memory Buffer Minimum Descriptor Entry Size */ uint32_t hmminds; /** Host Memory Maximum Descriptors Entries */ uint16_t hmmaxd; /** NVM Set Identifier Maximum */ uint16_t nsetidmax; /** Endurance Group Identifier Maximum */ uint16_t endgidmax; /** ANA Transition Time */ uint8_t anatt; /** Asymmetric Namespace Access Capabilities */ uint8_t anacap; /** ANA Group Identifier Maximum */ uint32_t anagrpmax; /** Number of ANA Group Identifiers */ uint32_t nanagrpid; /** Persistent Event Log Size */ uint32_t pels; uint8_t reserved3[156]; /* bytes 512-703: nvm command set attributes */ /** submission queue entry size */ uint8_t sqes; /** completion queue entry size */ uint8_t cqes; /** Maximum Outstanding Commands */ uint16_t maxcmd; /** number of namespaces */ uint32_t nn; /** optional nvm command support */ uint16_t oncs; /** fused operation support */ uint16_t fuses; /** format nvm attributes */ uint8_t fna; /** volatile write cache */ uint8_t vwc; /** Atomic Write Unit Normal */ uint16_t awun; /** Atomic Write Unit Power Fail */ uint16_t awupf; /** NVM Vendor Specific Command Configuration */ uint8_t nvscc; /** Namespace Write Protection Capabilities */ uint8_t nwpc; /** Atomic Compare & Write Unit */ uint16_t acwu; uint16_t reserved6; /** SGL Support */ uint32_t sgls; /** Maximum Number of Allowed Namespaces */ uint32_t mnan; /* bytes 540-767: Reserved */ uint8_t reserved7[224]; /** NVM Subsystem NVMe Qualified Name */ uint8_t subnqn[256]; /* bytes 1024-1791: Reserved */ uint8_t reserved8[768]; /* bytes 1792-2047: NVMe over Fabrics specification */ uint8_t reserved9[256]; /* bytes 2048-3071: power state descriptors */ struct nvme_power_state power_state[32]; /* bytes 3072-4095: vendor specific */ uint8_t vs[1024]; } __packed __aligned(4); static inline void nvme_controller_data_swapbytes(struct nvme_controller_data *s) { #if _BYTE_ORDER != _LITTLE_ENDIAN s->vid = sys_le16_to_cpu(s->vid); s->ssvid = sys_le16_to_cpu(s->ssvid); s->ctrlr_id = sys_le16_to_cpu(s->ctrlr_id); s->ver = sys_le32_to_cpu(s->ver); s->rtd3r = sys_le32_to_cpu(s->rtd3r); s->rtd3e = sys_le32_to_cpu(s->rtd3e); s->oaes = sys_le32_to_cpu(s->oaes); s->ctratt = sys_le32_to_cpu(s->ctratt); s->rrls = sys_le16_to_cpu(s->rrls); s->crdt1 = sys_le16_to_cpu(s->crdt1); s->crdt2 = sys_le16_to_cpu(s->crdt2); s->crdt3 = sys_le16_to_cpu(s->crdt3); s->oacs = sys_le16_to_cpu(s->oacs); s->wctemp = sys_le16_to_cpu(s->wctemp); s->cctemp = sys_le16_to_cpu(s->cctemp); s->mtfa = sys_le16_to_cpu(s->mtfa); s->hmpre = sys_le32_to_cpu(s->hmpre); s->hmmin = sys_le32_to_cpu(s->hmmin); s->rpmbs = sys_le32_to_cpu(s->rpmbs); s->edstt = sys_le16_to_cpu(s->edstt); s->kas = sys_le16_to_cpu(s->kas); s->hctma = sys_le16_to_cpu(s->hctma); s->mntmt = sys_le16_to_cpu(s->mntmt); s->mxtmt = sys_le16_to_cpu(s->mxtmt); s->sanicap = sys_le32_to_cpu(s->sanicap); s->hmminds = sys_le32_to_cpu(s->hmminds); s->hmmaxd = sys_le16_to_cpu(s->hmmaxd); s->nsetidmax = sys_le16_to_cpu(s->nsetidmax); s->endgidmax = sys_le16_to_cpu(s->endgidmax); s->anagrpmax = sys_le32_to_cpu(s->anagrpmax); s->nanagrpid = sys_le32_to_cpu(s->nanagrpid); s->pels = sys_le32_to_cpu(s->pels); s->maxcmd = sys_le16_to_cpu(s->maxcmd); s->nn = sys_le32_to_cpu(s->nn); s->oncs = sys_le16_to_cpu(s->oncs); s->fuses = sys_le16_to_cpu(s->fuses); s->awun = sys_le16_to_cpu(s->awun); s->awupf = sys_le16_to_cpu(s->awupf); s->acwu = sys_le16_to_cpu(s->acwu); s->sgls = sys_le32_to_cpu(s->sgls); s->mnan = sys_le32_to_cpu(s->mnan); #else ARG_UNUSED(s); #endif } #include <zephyr/device.h> #include <zephyr/drivers/pcie/pcie.h> #include <zephyr/drivers/pcie/msi.h> #define NVME_PCIE_BAR_IDX 0 #define NVME_REQUEST_AMOUNT (CONFIG_NVME_ADMIN_ENTRIES + \ CONFIG_NVME_IO_ENTRIES) /* admin queue + io queue(s) */ #define NVME_PCIE_MSIX_VECTORS 1 + CONFIG_NVME_IO_QUEUES #define NVME_QUEUE_ALLOCATE(name, n_entries) \ static struct nvme_command cmd_##name[n_entries] __aligned(0x1000); \ static struct nvme_completion cpl_##name[n_entries] __aligned(0x1000); \ \ static struct nvme_cmd_qpair name = { \ .num_entries = n_entries, \ .cmd = cmd_##name, \ .cpl = cpl_##name, \ } #define NVME_ADMINQ_ALLOCATE(n, n_entries) \ NVME_QUEUE_ALLOCATE(admin_##n, n_entries) #define NVME_IOQ_ALLOCATE(n, n_entries) \ NVME_QUEUE_ALLOCATE(io_##n, n_entries) struct nvme_controller_config { struct pcie_dev *pcie; }; struct nvme_controller { DEVICE_MMIO_RAM; const struct device *dev; struct k_mutex lock; uint32_t id; msi_vector_t vectors[NVME_PCIE_MSIX_VECTORS]; struct nvme_controller_data cdata; uint32_t num_io_queues; struct nvme_cmd_qpair *adminq; struct nvme_cmd_qpair *ioq; uint32_t ready_timeout_in_ms; /** LO and HI capacity mask */ uint32_t cap_lo; uint32_t cap_hi; /** Page size and log2(page_size) - 12 that we're currently using */ uint32_t page_size; uint32_t mps; /** doorbell stride */ uint32_t dstrd; /** maximum i/o size in bytes */ uint32_t max_xfer_size; struct nvme_namespace ns[CONFIG_NVME_MAX_NAMESPACES]; }; static inline bool nvme_controller_has_dataset_mgmt(struct nvme_controller *ctrlr) { /* Assumes cd was byte swapped by nvme_controller_data_swapbytes() */ return ((ctrlr->cdata.oncs >> NVME_CTRLR_DATA_ONCS_DSM_SHIFT) & NVME_CTRLR_DATA_ONCS_DSM_MASK); } static inline void nvme_lock(const struct device *dev) { struct nvme_controller *nvme_ctrlr = dev->data; k_mutex_lock(&nvme_ctrlr->lock, K_FOREVER); } static inline void nvme_unlock(const struct device *dev) { struct nvme_controller *nvme_ctrlr = dev->data; k_mutex_unlock(&nvme_ctrlr->lock); } #endif /* ZEPHYR_DRIVERS_DISK_NVME_NHME_H_ */ ```
/content/code_sandbox/drivers/disk/nvme/nvme.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,425
```objective-c /* * * Derived from FreeBSD original driver made by Jim Harris * with contributions from Alexander Motin and Wojciech Macek */ #ifndef ZEPHYR_DRIVERS_DISK_NVME_NVME_NAMESPACE_H_ #define ZEPHYR_DRIVERS_DISK_NVME_NVME_NAMESPACE_H_ #include <zephyr/drivers/disk.h> struct nvme_namespace_data { /** namespace size */ uint64_t nsze; /** namespace capacity */ uint64_t ncap; /** namespace utilization */ uint64_t nuse; /** namespace features */ uint8_t nsfeat; /** number of lba formats */ uint8_t nlbaf; /** formatted lba size */ uint8_t flbas; /** metadata capabilities */ uint8_t mc; /** end-to-end data protection capabilities */ uint8_t dpc; /** end-to-end data protection type settings */ uint8_t dps; /** Namespace Multi-path I/O and Namespace Sharing Capabilities */ uint8_t nmic; /** Reservation Capabilities */ uint8_t rescap; /** Format Progress Indicator */ uint8_t fpi; /** Deallocate Logical Block Features */ uint8_t dlfeat; /** Namespace Atomic Write Unit Normal */ uint16_t nawun; /** Namespace Atomic Write Unit Power Fail */ uint16_t nawupf; /** Namespace Atomic Compare & Write Unit */ uint16_t nacwu; /** Namespace Atomic Boundary Size Normal */ uint16_t nabsn; /** Namespace Atomic Boundary Offset */ uint16_t nabo; /** Namespace Atomic Boundary Size Power Fail */ uint16_t nabspf; /** Namespace Optimal IO Boundary */ uint16_t noiob; /** NVM Capacity */ uint8_t nvmcap[16]; /** Namespace Preferred Write Granularity */ uint16_t npwg; /** Namespace Preferred Write Alignment */ uint16_t npwa; /** Namespace Preferred Deallocate Granularity */ uint16_t npdg; /** Namespace Preferred Deallocate Alignment */ uint16_t npda; /** Namespace Optimal Write Size */ uint16_t nows; /* bytes 74-91: Reserved */ uint8_t reserved5[18]; /** ANA Group Identifier */ uint32_t anagrpid; /* bytes 96-98: Reserved */ uint8_t reserved6[3]; /** Namespace Attributes */ uint8_t nsattr; /** NVM Set Identifier */ uint16_t nvmsetid; /** Endurance Group Identifier */ uint16_t endgid; /** Namespace Globally Unique Identifier */ uint8_t nguid[16]; /** IEEE Extended Unique Identifier */ uint8_t eui64[8]; /** lba format support */ uint32_t lbaf[16]; uint8_t reserved7[192]; uint8_t vendor_specific[3712]; } __packed __aligned(4); static inline void nvme_namespace_data_swapbytes(struct nvme_namespace_data *s) { #if _BYTE_ORDER != _LITTLE_ENDIAN int i; s->nsze = sys_le64_to_cpu(s->nsze); s->ncap = sys_le64_to_cpu(s->ncap); s->nuse = sys_le64_to_cpu(s->nuse); s->nawun = sys_le16_to_cpu(s->nawun); s->nawupf = sys_le16_to_cpu(s->nawupf); s->nacwu = sys_le16_to_cpu(s->nacwu); s->nabsn = sys_le16_to_cpu(s->nabsn); s->nabo = sys_le16_to_cpu(s->nabo); s->nabspf = sys_le16_to_cpu(s->nabspf); s->noiob = sys_le16_to_cpu(s->noiob); s->npwg = sys_le16_to_cpu(s->npwg); s->npwa = sys_le16_to_cpu(s->npwa); s->npdg = sys_le16_to_cpu(s->npdg); s->npda = sys_le16_to_cpu(s->npda); s->nows = sys_le16_to_cpu(s->nows); s->anagrpid = sys_le32_to_cpu(s->anagrpid); s->nvmsetid = sys_le16_to_cpu(s->nvmsetid); s->endgid = sys_le16_to_cpu(s->endgid); for (i = 0; i < 16; i++) { s->lbaf[i] = sys_le32_to_cpu(s->lbaf[i]); } #else ARG_UNUSED(s); #endif } /* Readable identifier: nvme%%n%%\0 */ #define NVME_NAMESPACE_NAME_MAX_LENGTH 10 struct nvme_namespace { struct nvme_controller *ctrlr; struct nvme_namespace_data data; struct disk_info disk; uint32_t id; uint32_t flags; uint32_t boundary; char name[NVME_NAMESPACE_NAME_MAX_LENGTH]; }; enum nvme_namespace_flags { NVME_NS_DEALLOCATE_SUPPORTED = 0x1, NVME_NS_FLUSH_SUPPORTED = 0x2, }; uint32_t nvme_namespace_get_sector_size(struct nvme_namespace *ns); uint64_t nvme_namespace_get_num_sectors(struct nvme_namespace *ns); uint64_t nvme_namespace_get_size(struct nvme_namespace *ns); uint32_t nvme_namespace_get_flags(struct nvme_namespace *ns); const char *nvme_namespace_get_serial_number(struct nvme_namespace *ns); const char *nvme_namespace_get_model_number(struct nvme_namespace *ns); const struct nvme_namespace_data * nvme_namespace_get_data(struct nvme_namespace *ns); uint32_t nvme_namespace_get_stripesize(struct nvme_namespace *ns); int nvme_namespace_construct(struct nvme_namespace *ns, uint32_t id, struct nvme_controller *ctrlr); int nvme_namespace_disk_setup(struct nvme_namespace *ns, struct disk_info *disk); #endif /* ZEPHYR_DRIVERS_DISK_NVME_NVME_NAMESPACE_H_ */ ```
/content/code_sandbox/drivers/disk/nvme/nvme_namespace.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,327
```unknown config MEMC_SAM_SMC bool "Atmel Static Memory Controller (SMC)" default y depends on DT_HAS_ATMEL_SAM_SMC_ENABLED help Enable Atmel Static Memory Controller. ```
/content/code_sandbox/drivers/memc/Kconfig.sam
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
48
```c /* * * Derived from FreeBSD original driver made by Jim Harris * with contributions from Alexander Motin, Wojciech Macek, and Warner Losh */ #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(nvme, CONFIG_NVME_LOG_LEVEL); #include <zephyr/kernel.h> #include <zephyr/cache.h> #include <zephyr/sys/byteorder.h> #include <string.h> #include "nvme.h" #include "nvme_helpers.h" static struct nvme_prp_list prp_list_pool[CONFIG_NVME_PRP_LIST_AMOUNT]; static sys_dlist_t free_prp_list; static struct nvme_request request_pool[NVME_REQUEST_AMOUNT]; static sys_dlist_t free_request; static sys_dlist_t pending_request; static void request_timeout(struct k_work *work); static K_WORK_DELAYABLE_DEFINE(request_timer, request_timeout); #ifdef CONFIG_NVME_LOG_LEVEL_DBG struct nvme_status_string { uint16_t sc; const char *str; }; static struct nvme_status_string generic_status[] = { { NVME_SC_SUCCESS, "SUCCESS" }, { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" }, { NVME_SC_INVALID_FIELD, "INVALID_FIELD" }, { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" }, { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" }, { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" }, { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" }, { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" }, { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" }, { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" }, { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" }, { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" }, { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" }, { NVME_SC_INVALID_SGL_SEGMENT_DESCR, "INVALID SGL SEGMENT DESCRIPTOR" }, { NVME_SC_INVALID_NUMBER_OF_SGL_DESCR, "INVALID NUMBER OF SGL DESCRIPTORS" }, { NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" }, { NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" }, { NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" }, { NVME_SC_INVALID_USE_OF_CMB, "INVALID USE OF CONTROLLER MEMORY BUFFER" }, { NVME_SC_PRP_OFFSET_INVALID, "PRP OFFSET INVALID" }, { NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" }, { NVME_SC_OPERATION_DENIED, "OPERATION DENIED" }, { NVME_SC_SGL_OFFSET_INVALID, "SGL OFFSET INVALID" }, { NVME_SC_HOST_ID_INCONSISTENT_FORMAT, "HOST IDENTIFIER INCONSISTENT FORMAT" }, { NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED, "KEEP ALIVE TIMEOUT EXPIRED" }, { NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID, "KEEP ALIVE TIMEOUT INVALID" }, { NVME_SC_ABORTED_DUE_TO_PREEMPT, "COMMAND ABORTED DUE TO PREEMPT AND ABORT" }, { NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" }, { NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" }, { NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID, "SGL_DATA_BLOCK_GRANULARITY_INVALID" }, { NVME_SC_NOT_SUPPORTED_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" }, { NVME_SC_NAMESPACE_IS_WRITE_PROTECTED, "NAMESPACE IS WRITE PROTECTED" }, { NVME_SC_COMMAND_INTERRUPTED, "COMMAND INTERRUPTED" }, { NVME_SC_TRANSIENT_TRANSPORT_ERROR, "TRANSIENT TRANSPORT ERROR" }, { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" }, { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" }, { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" }, { NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" }, { NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" }, { 0xFFFF, "GENERIC" } }; static struct nvme_status_string command_specific_status[] = { { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" }, { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" }, { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" }, { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" }, { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" }, { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" }, { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" }, { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" }, { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" }, { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" }, { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" }, { NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" }, { NVME_SC_FEATURE_NOT_SAVEABLE, "FEATURE IDENTIFIER NOT SAVEABLE" }, { NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" }, { NVME_SC_FEATURE_NOT_NS_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" }, { NVME_SC_FW_ACT_REQUIRES_NVMS_RESET, "FIRMWARE ACTIVATION REQUIRES NVM SUBSYSTEM RESET" }, { NVME_SC_FW_ACT_REQUIRES_RESET, "FIRMWARE ACTIVATION REQUIRES RESET" }, { NVME_SC_FW_ACT_REQUIRES_TIME, "FIRMWARE ACTIVATION REQUIRES MAXIMUM TIME VIOLATION" }, { NVME_SC_FW_ACT_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" }, { NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" }, { NVME_SC_NS_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" }, { NVME_SC_NS_ID_UNAVAILABLE, "NAMESPACE IDENTIFIER UNAVAILABLE" }, { NVME_SC_NS_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" }, { NVME_SC_NS_IS_PRIVATE, "NAMESPACE IS PRIVATE" }, { NVME_SC_NS_NOT_ATTACHED, "NS NOT ATTACHED" }, { NVME_SC_THIN_PROV_NOT_SUPPORTED, "THIN PROVISIONING NOT SUPPORTED" }, { NVME_SC_CTRLR_LIST_INVALID, "CONTROLLER LIST INVALID" }, { NVME_SC_SELF_TEST_IN_PROGRESS, "DEVICE SELF-TEST IN PROGRESS" }, { NVME_SC_BOOT_PART_WRITE_PROHIB, "BOOT PARTITION WRITE PROHIBITED" }, { NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER IDENTIFIER" }, { NVME_SC_INVALID_SEC_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" }, { NVME_SC_INVALID_NUM_OF_CTRLR_RESRC, "INVALID NUMBER OF CONTROLLER RESOURCES" }, { NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" }, { NVME_SC_SANITIZE_PROHIBITED_WPMRE, "SANITIZE PROHIBITED WRITE PERSISTENT MEMORY REGION ENABLED" }, { NVME_SC_ANA_GROUP_ID_INVALID, "ANA GROUP IDENTIFIED INVALID" }, { NVME_SC_ANA_ATTACH_FAILED, "ANA ATTACH FAILED" }, { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" }, { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" }, { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" }, { 0xFFFF, "COMMAND SPECIFIC" } }; static struct nvme_status_string media_error_status[] = { { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" }, { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" }, { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" }, { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" }, { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" }, { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" }, { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" }, { NVME_SC_DEALLOCATED_OR_UNWRITTEN, "DEALLOCATED OR UNWRITTEN LOGICAL BLOCK" }, { 0xFFFF, "MEDIA ERROR" } }; static struct nvme_status_string path_related_status[] = { { NVME_SC_INTERNAL_PATH_ERROR, "INTERNAL PATH ERROR" }, { NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS, "ASYMMETRIC ACCESS PERSISTENT LOSS" }, { NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE, "ASYMMETRIC ACCESS INACCESSIBLE" }, { NVME_SC_ASYMMETRIC_ACCESS_TRANSITION, "ASYMMETRIC ACCESS TRANSITION" }, { NVME_SC_CONTROLLER_PATHING_ERROR, "CONTROLLER PATHING ERROR" }, { NVME_SC_HOST_PATHING_ERROR, "HOST PATHING ERROR" }, { NVME_SC_COMMAND_ABORTED_BY_HOST, "COMMAND ABORTED BY HOST" }, { 0xFFFF, "PATH RELATED" }, }; static const char *get_status_string(uint16_t sct, uint16_t sc) { struct nvme_status_string *entry; switch (sct) { case NVME_SCT_GENERIC: entry = generic_status; break; case NVME_SCT_COMMAND_SPECIFIC: entry = command_specific_status; break; case NVME_SCT_MEDIA_ERROR: entry = media_error_status; break; case NVME_SCT_PATH_RELATED: entry = path_related_status; break; case NVME_SCT_VENDOR_SPECIFIC: return "VENDOR SPECIFIC"; default: return "RESERVED"; } while (entry->sc != 0xFFFF) { if (entry->sc == sc) { return entry->str; } entry++; } return entry->str; } void nvme_completion_print(const struct nvme_completion *cpl) { uint8_t sct, sc, crd, m, dnr, p; sct = NVME_STATUS_GET_SCT(cpl->status); sc = NVME_STATUS_GET_SC(cpl->status); crd = NVME_STATUS_GET_CRD(cpl->status); m = NVME_STATUS_GET_M(cpl->status); dnr = NVME_STATUS_GET_DNR(cpl->status); p = NVME_STATUS_GET_P(cpl->status); LOG_DBG("%s (%02x/%02x) crd:%x m:%x dnr:%x p:%d " "sqid:%d cid:%d cdw0:%x\n", get_status_string(sct, sc), sct, sc, crd, m, dnr, p, cpl->sqid, cpl->cid, cpl->cdw0); } #endif /* CONFIG_NVME_LOG_LEVEL_DBG */ void nvme_cmd_init(void) { int idx; sys_dlist_init(&free_request); sys_dlist_init(&pending_request); sys_dlist_init(&free_prp_list); for (idx = 0; idx < NVME_REQUEST_AMOUNT; idx++) { sys_dlist_append(&free_request, &request_pool[idx].node); } for (idx = 0; idx < CONFIG_NVME_PRP_LIST_AMOUNT; idx++) { sys_dlist_append(&free_prp_list, &prp_list_pool[idx].node); } } static struct nvme_prp_list *nvme_prp_list_alloc(void) { sys_dnode_t *node; node = sys_dlist_peek_head(&free_prp_list); if (!node) { LOG_ERR("Could not allocate PRP list"); return NULL; } sys_dlist_remove(node); return CONTAINER_OF(node, struct nvme_prp_list, node); } static void nvme_prp_list_free(struct nvme_prp_list *prp_list) { memset(prp_list, 0, sizeof(struct nvme_prp_list)); sys_dlist_append(&free_prp_list, &prp_list->node); } void nvme_cmd_request_free(struct nvme_request *request) { if (sys_dnode_is_linked(&request->node)) { sys_dlist_remove(&request->node); } if (request->prp_list != NULL) { nvme_prp_list_free(request->prp_list); } memset(request, 0, sizeof(struct nvme_request)); sys_dlist_append(&free_request, &request->node); } struct nvme_request *nvme_cmd_request_alloc(void) { sys_dnode_t *node; node = sys_dlist_peek_head(&free_request); if (!node) { LOG_ERR("Could not allocate request"); return NULL; } sys_dlist_remove(node); return CONTAINER_OF(node, struct nvme_request, node); } static void nvme_cmd_register_request(struct nvme_request *request) { sys_dlist_append(&pending_request, &request->node); request->req_start = k_uptime_get_32(); if (!k_work_delayable_remaining_get(&request_timer)) { k_work_reschedule(&request_timer, K_SECONDS(CONFIG_NVME_REQUEST_TIMEOUT)); } } static void request_timeout(struct k_work *work) { uint32_t current = k_uptime_get_32(); struct nvme_request *request, *next; ARG_UNUSED(work); SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&pending_request, request, next, node) { if ((int32_t)(request->req_start + CONFIG_NVME_REQUEST_TIMEOUT - current) > 0) { break; } LOG_WRN("Request %p CID %u timed-out", request, request->cmd.cdw0.cid); /* ToDo: * - check CSTS for fatal fault * - reset hw otherwise if it's the case * - or check completion for missed interruption */ if (request->cb_fn) { request->cb_fn(request->cb_arg, NULL); } nvme_cmd_request_free(request); } if (request) { k_work_reschedule(&request_timer, K_SECONDS(request->req_start + CONFIG_NVME_REQUEST_TIMEOUT - current)); } } static bool nvme_completion_is_retry(const struct nvme_completion *cpl) { uint8_t sct, sc, dnr; sct = NVME_STATUS_GET_SCT(cpl->status); sc = NVME_STATUS_GET_SC(cpl->status); dnr = NVME_STATUS_GET_DNR(cpl->status); /* * TODO: spec is not clear how commands that are aborted due * to TLER will be marked. So for now, it seems * NAMESPACE_NOT_READY is the only case where we should * look at the DNR bit. Requests failed with ABORTED_BY_REQUEST * set the DNR bit correctly since the driver controls that. */ switch (sct) { case NVME_SCT_GENERIC: switch (sc) { case NVME_SC_ABORTED_BY_REQUEST: case NVME_SC_NAMESPACE_NOT_READY: if (dnr) { return false; } return true; case NVME_SC_INVALID_OPCODE: case NVME_SC_INVALID_FIELD: case NVME_SC_COMMAND_ID_CONFLICT: case NVME_SC_DATA_TRANSFER_ERROR: case NVME_SC_ABORTED_POWER_LOSS: case NVME_SC_INTERNAL_DEVICE_ERROR: case NVME_SC_ABORTED_SQ_DELETION: case NVME_SC_ABORTED_FAILED_FUSED: case NVME_SC_ABORTED_MISSING_FUSED: case NVME_SC_INVALID_NAMESPACE_OR_FORMAT: case NVME_SC_COMMAND_SEQUENCE_ERROR: case NVME_SC_LBA_OUT_OF_RANGE: case NVME_SC_CAPACITY_EXCEEDED: default: return false; } case NVME_SCT_COMMAND_SPECIFIC: case NVME_SCT_MEDIA_ERROR: return false; case NVME_SCT_PATH_RELATED: switch (sc) { case NVME_SC_INTERNAL_PATH_ERROR: if (dnr) { return false; } return true; default: return false; } case NVME_SCT_VENDOR_SPECIFIC: default: return false; } } static void nvme_cmd_request_complete(struct nvme_request *request, struct nvme_completion *cpl) { bool error, retriable, retry; error = nvme_completion_is_error(cpl); retriable = nvme_completion_is_retry(cpl); retry = error && retriable && request->retries < CONFIG_NVME_RETRY_COUNT; if (retry) { LOG_DBG("CMD will be retried"); request->qpair->num_retries++; } if (error && (!retriable || (request->retries >= CONFIG_NVME_RETRY_COUNT))) { LOG_DBG("CMD error"); request->qpair->num_failures++; } if (cpl->cid != request->cmd.cdw0.cid) { LOG_ERR("cpl cid != cmd cid"); } if (retry) { LOG_DBG("Retrying CMD"); /* Let's remove it from pending... */ sys_dlist_remove(&request->node); /* ...and re-submit, thus re-adding to pending */ nvme_cmd_qpair_submit_request(request->qpair, request); request->retries++; } else { LOG_DBG("Request %p CMD complete on %p/%p", request, request->cb_fn, request->cb_arg); if (request->cb_fn) { request->cb_fn(request->cb_arg, cpl); } nvme_cmd_request_free(request); } } static void nvme_cmd_qpair_process_completion(struct nvme_cmd_qpair *qpair) { struct nvme_request *request; struct nvme_completion cpl; int done = 0; if (qpair->num_intr_handler_calls == 0 && qpair->phase == 0) { LOG_WRN("Phase wrong for first interrupt call."); } qpair->num_intr_handler_calls++; while (1) { uint16_t status; status = sys_le16_to_cpu(qpair->cpl[qpair->cq_head].status); if (NVME_STATUS_GET_P(status) != qpair->phase) { break; } cpl = qpair->cpl[qpair->cq_head]; nvme_completion_swapbytes(&cpl); if (NVME_STATUS_GET_P(status) != NVME_STATUS_GET_P(cpl.status)) { LOG_WRN("Phase unexpectedly inconsistent"); } if (cpl.cid < NVME_REQUEST_AMOUNT) { request = &request_pool[cpl.cid]; } else { request = NULL; } done++; if (request != NULL) { nvme_cmd_request_complete(request, &cpl); qpair->sq_head = cpl.sqhd; } else { LOG_ERR("cpl (cid = %u) does not map to cmd", cpl.cid); } qpair->cq_head++; if (qpair->cq_head == qpair->num_entries) { qpair->cq_head = 0; qpair->phase = !qpair->phase; } } if (done != 0) { mm_reg_t regs = DEVICE_MMIO_GET(qpair->ctrlr->dev); sys_write32(qpair->cq_head, regs + qpair->cq_hdbl_off); } } static void nvme_cmd_qpair_msi_handler(const void *arg) { const struct nvme_cmd_qpair *qpair = arg; nvme_cmd_qpair_process_completion((struct nvme_cmd_qpair *)qpair); } int nvme_cmd_qpair_setup(struct nvme_cmd_qpair *qpair, struct nvme_controller *ctrlr, uint32_t id) { const struct nvme_controller_config *nvme_ctrlr_cfg = ctrlr->dev->config; qpair->ctrlr = ctrlr; qpair->id = id; qpair->vector = qpair->id; qpair->num_cmds = 0; qpair->num_intr_handler_calls = 0; qpair->num_retries = 0; qpair->num_failures = 0; qpair->num_ignored = 0; qpair->cmd_bus_addr = (uintptr_t)qpair->cmd; qpair->cpl_bus_addr = (uintptr_t)qpair->cpl; qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell) + (qpair->id << (ctrlr->dstrd + 1)); qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell) + (qpair->id << (ctrlr->dstrd + 1)) + (1 << ctrlr->dstrd); if (!pcie_msi_vector_connect(nvme_ctrlr_cfg->pcie->bdf, &ctrlr->vectors[qpair->vector], nvme_cmd_qpair_msi_handler, qpair, 0)) { LOG_ERR("Failed to connect MSI-X vector %u", qpair->id); return -EIO; } LOG_DBG("CMD Qpair created ID %u, %u entries - cmd/cpl addr " "0x%lx/0x%lx - sq/cq offsets %u/%u", qpair->id, qpair->num_entries, qpair->cmd_bus_addr, qpair->cpl_bus_addr, qpair->sq_tdbl_off, qpair->cq_hdbl_off); return 0; } void nvme_cmd_qpair_reset(struct nvme_cmd_qpair *qpair) { qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0; /* * First time through the completion queue, HW will set phase * bit on completions to 1. So set this to 1 here, indicating * we're looking for a 1 to know which entries have completed. * we'll toggle the bit each time when the completion queue * rolls over. */ qpair->phase = 1; memset(qpair->cmd, 0, qpair->num_entries * sizeof(struct nvme_command)); memset(qpair->cpl, 0, qpair->num_entries * sizeof(struct nvme_completion)); } static int nvme_cmd_qpair_fill_prp_list(struct nvme_cmd_qpair *qpair, struct nvme_request *request, int n_prp) { struct nvme_prp_list *prp_list; uintptr_t p_addr; int idx; prp_list = nvme_prp_list_alloc(); if (prp_list == NULL) { return -ENOMEM; } p_addr = (uintptr_t)request->payload; request->cmd.dptr.prp1 = (uint64_t)sys_cpu_to_le64(p_addr); request->cmd.dptr.prp2 = (uint64_t)sys_cpu_to_le64(&prp_list->prp); p_addr = NVME_PRP_NEXT_PAGE(p_addr); for (idx = 0; idx < n_prp; idx++) { prp_list->prp[idx] = (uint64_t)sys_cpu_to_le64(p_addr); p_addr = NVME_PRP_NEXT_PAGE(p_addr); } request->prp_list = prp_list; return 0; } static int compute_n_prp(uintptr_t addr, uint32_t size) { int n_prp; /* See Common Command Format, Data Pointer (DPTR) field */ n_prp = size / CONFIG_MMU_PAGE_SIZE; if (n_prp == 0) { n_prp = 1; } if (size != CONFIG_MMU_PAGE_SIZE) { size = size % CONFIG_MMU_PAGE_SIZE; } if (n_prp == 1) { if ((addr + (uintptr_t)size) > NVME_PRP_NEXT_PAGE(addr)) { n_prp++; } } else if (size > 0) { n_prp++; } return n_prp; } static int nvme_cmd_qpair_fill_dptr(struct nvme_cmd_qpair *qpair, struct nvme_request *request) { switch (request->type) { case NVME_REQUEST_NULL: break; case NVME_REQUEST_VADDR: int n_prp; if (request->payload_size > qpair->ctrlr->max_xfer_size) { LOG_ERR("VADDR request's payload too big"); return -EINVAL; } n_prp = compute_n_prp((uintptr_t)request->payload, request->payload_size); if (n_prp <= 2) { request->cmd.dptr.prp1 = (uint64_t)sys_cpu_to_le64(request->payload); if (n_prp == 2) { request->cmd.dptr.prp2 = (uint64_t)sys_cpu_to_le64( NVME_PRP_NEXT_PAGE( (uintptr_t)request->payload)); } else { request->cmd.dptr.prp2 = 0; } break; } return nvme_cmd_qpair_fill_prp_list(qpair, request, n_prp); default: break; } return 0; } int nvme_cmd_qpair_submit_request(struct nvme_cmd_qpair *qpair, struct nvme_request *request) { mm_reg_t regs = DEVICE_MMIO_GET(qpair->ctrlr->dev); int ret; request->qpair = qpair; request->cmd.cdw0.cid = sys_cpu_to_le16((uint16_t)(request - request_pool)); ret = nvme_cmd_qpair_fill_dptr(qpair, request); if (ret != 0) { nvme_cmd_request_free(request); return ret; } nvme_cmd_register_request(request); memcpy(&qpair->cmd[qpair->sq_tail], &request->cmd, sizeof(request->cmd)); qpair->sq_tail++; if (qpair->sq_tail == qpair->num_entries) { qpair->sq_tail = 0; } sys_write32(qpair->sq_tail, regs + qpair->sq_tdbl_off); qpair->num_cmds++; LOG_DBG("Request %p %llu submitted: CID %u - sq_tail %u", request, qpair->num_cmds, request->cmd.cdw0.cid, qpair->sq_tail - 1); return 0; } void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl) { struct nvme_completion_poll_status *status = arg; if (cpl != NULL) { memcpy(&status->cpl, cpl, sizeof(*cpl)); } else { status->status = -ETIMEDOUT; } k_sem_give(&status->sem); } ```
/content/code_sandbox/drivers/disk/nvme/nvme_cmd.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,956
```unknown if DT_HAS_NXP_IMX_FLEXSPI_ENABLED config MEMC_MCUX_FLEXSPI_W956A8MBYA bool "MCUX FlexSPI Winbond W956A8MBYA HyperRAM driver" default y depends on DT_HAS_NXP_IMX_FLEXSPI_W956A8MBYA_ENABLED select MEMC_MCUX_FLEXSPI config MEMC_MCUX_FLEXSPI_S27KS0641 bool "MCUX FlexSPI Cypress S27KS0641 HyperRAM driver" default y depends on DT_HAS_NXP_IMX_FLEXSPI_S27KS0641_ENABLED select MEMC_MCUX_FLEXSPI config MEMC_MCUX_FLEXSPI_APS6408L bool "MCUX FlexSPI AP Memory APS6408L pSRAM driver" default y depends on DT_HAS_NXP_IMX_FLEXSPI_APS6408L_ENABLED select MEMC_MCUX_FLEXSPI config MEMC_MCUX_FLEXSPI_IS66WVQ8M4 bool "MCUX FlexSPI ISSI IS66WVQ8M4 pSRAM driver" default y depends on DT_HAS_NXP_IMX_FLEXSPI_IS66WVQ8M4_ENABLED select MEMC_MCUX_FLEXSPI config MEMC_MCUX_FLEXSPI_INIT_PRIORITY int "MCUX FLEXSPI MEMC driver initialization priority" default MEMC_INIT_PRIORITY help Initialization priority for FlexSPI MEMC driver. In cases where the flash driver must initialize before the MEMC RAM driver, initialization priorities can be set such that MEMC_MCUX_FLEXSPI_INIT_PRIORITY < FLASH_INIT_PRIORITY < MEMC_INIT_PRIORITY config MEMC_MCUX_FLEXSPI_INIT_XIP bool "Initialize FLEXSPI when using device for XIP" help Initialize the FLEXSPI device even when using it for XIP. If this Kconfig is enabled, the user must ensure that the pin control state used does not reconfigure the pins used to interface with the flash device used for XIP, and that the configuration settings used for the FLEXSPI are compatible with those needed for XIP from the flash device. config MEMC_MCUX_FLEXSPI bool select PINCTRL endif # DT_HAS_NXP_IMX_FLEXSPI_ENABLED if DT_HAS_NXP_FLEXRAM_ENABLED config MEMC_NXP_FLEXRAM bool default y config MEMC_NXP_FLEXRAM_MAGIC_ADDR_API bool "NXP FlexRAM magic addr API" help Enable API to use flexRAM magic address functionality config MEMC_NXP_FLEXRAM_ERROR_INTERRUPT bool "NXP FlexRAM error interrupt" help Allow flexram to generate error interrupts endif # DT_HAS_NXP_FLEXRAM_ENABLED ```
/content/code_sandbox/drivers/memc/Kconfig.mcux
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
603
```c /* * */ #define DT_DRV_COMPAT renesas_smartbond_nor_psram #include <zephyr/device.h> #include <zephyr/kernel.h> #include <zephyr/irq.h> #include <DA1469xAB.h> #include <zephyr/pm/device.h> #include <da1469x_qspic.h> #include <da1469x_pd.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(smartbond_nor_psram, CONFIG_MEMC_LOG_LEVEL); #define CLK_AMBA_REG_SET_FIELD(_field, _var, _val) \ ((_var)) = \ ((_var) & ~(CRG_TOP_CLK_AMBA_REG_ ## _field ## _Msk)) | \ (((_val) << CRG_TOP_CLK_AMBA_REG_ ## _field ## _Pos) & \ CRG_TOP_CLK_AMBA_REG_ ## _field ## _Msk) #define QSPIC2_CTRLMODE_REG_SET_FIELD(_field, _var, _val) \ ((_var)) = \ ((_var) & ~(QSPIC2_QSPIC2_CTRLMODE_REG_ ## _field ## _Msk)) | \ (((_val) << QSPIC2_QSPIC2_CTRLMODE_REG_ ## _field ## _Pos) & \ QSPIC2_QSPIC2_CTRLMODE_REG_ ## _field ## _Msk) #define QSPIC2_BURSTCMDA_REG_SET_FIELD(_field, _var, _val) \ ((_var)) = \ ((_var) & ~(QSPIC2_QSPIC2_BURSTCMDA_REG_ ## _field ## _Msk)) | \ (((_val) << QSPIC2_QSPIC2_BURSTCMDA_REG_ ## _field ## _Pos) & \ QSPIC2_QSPIC2_BURSTCMDA_REG_ ## _field ## _Msk) #define QSPIC2_BURSTCMDB_REG_SET_FIELD(_field, _var, _val) \ ((_var)) = \ ((_var) & ~(QSPIC2_QSPIC2_BURSTCMDB_REG_ ## _field ## _Msk)) | \ (((_val) << QSPIC2_QSPIC2_BURSTCMDB_REG_ ## _field ## _Pos) & \ QSPIC2_QSPIC2_BURSTCMDB_REG_ ## _field ## _Msk) #define QSPIC2_AWRITECMD_REG_SET_FIELD(_field, _var, _val) \ ((_var)) = \ ((_var) & ~(QSPIC2_QSPIC2_AWRITECMD_REG_ ## _field ## _Msk)) | \ (((_val) << QSPIC2_QSPIC2_AWRITECMD_REG_ ## _field ## _Pos) & \ QSPIC2_QSPIC2_AWRITECMD_REG_ ## _field ## _Msk) static void memc_set_status(bool status, int clk_div) { unsigned int key; uint32_t clk_amba_reg; /* Clock AMBA register might be accessed by multiple driver classes */ key = irq_lock(); clk_amba_reg = CRG_TOP->CLK_AMBA_REG; if (status) { CLK_AMBA_REG_SET_FIELD(QSPI2_ENABLE, clk_amba_reg, 1); CLK_AMBA_REG_SET_FIELD(QSPI2_DIV, clk_amba_reg, clk_div); } else { CLK_AMBA_REG_SET_FIELD(QSPI2_ENABLE, clk_amba_reg, 0); } CRG_TOP->CLK_AMBA_REG = clk_amba_reg; irq_unlock(key); } static void memc_automode_configure(void) { uint32_t reg; reg = QSPIC2->QSPIC2_CTRLMODE_REG; QSPIC2_CTRLMODE_REG_SET_FIELD(QSPIC_SRAM_EN, reg, DT_INST_PROP(0, is_ram)); QSPIC2_CTRLMODE_REG_SET_FIELD(QSPIC_USE_32BA, reg, DT_INST_ENUM_IDX(0, addr_range)); QSPIC2_CTRLMODE_REG_SET_FIELD(QSPIC_CLK_MD, reg, DT_INST_ENUM_IDX(0, clock_mode)); QSPIC2_CTRLMODE_REG_SET_FIELD(QSPIC_AUTO_MD, reg, 1); QSPIC2->QSPIC2_CTRLMODE_REG = reg; reg = QSPIC2->QSPIC2_BURSTCMDA_REG; QSPIC2_BURSTCMDA_REG_SET_FIELD(QSPIC_DMY_TX_MD, reg, DT_INST_ENUM_IDX(0, rx_dummy_mode)); QSPIC2_BURSTCMDA_REG_SET_FIELD(QSPIC_ADR_TX_MD, reg, DT_INST_ENUM_IDX(0, rx_addr_mode)); QSPIC2_BURSTCMDA_REG_SET_FIELD(QSPIC_INST_TX_MD, reg, DT_INST_ENUM_IDX(0, rx_inst_mode)); #if DT_INST_PROP(0, extra_byte_enable) QSPIC2_BURSTCMDA_REG_SET_FIELD(QSPIC_EXT_TX_MD, reg, DT_INST_ENUM_IDX(0, rx_extra_mode)); #endif QSPIC2_BURSTCMDA_REG_SET_FIELD(QSPIC_INST, reg, DT_INST_PROP(0, read_cmd)); #if DT_INST_PROP(0, extra_byte_enable) QSPIC2_BURSTCMDA_REG_SET_FIELD(QSPIC_EXT_BYTE, reg, DT_INST_PROP(0, extra_byte)); #endif QSPIC2->QSPIC2_BURSTCMDA_REG = reg; reg = QSPIC2->QSPIC2_BURSTCMDB_REG; QSPIC2_BURSTCMDB_REG_SET_FIELD(QSPIC_DMY_NUM, reg, DT_INST_ENUM_IDX(0, dummy_bytes_count)); QSPIC2_BURSTCMDB_REG_SET_FIELD(QSPIC_DAT_RX_MD, reg, DT_INST_ENUM_IDX(0, rx_data_mode)); QSPIC2_BURSTCMDB_REG_SET_FIELD(QSPIC_INST_MD, reg, 0); QSPIC2_BURSTCMDB_REG_SET_FIELD(QSPIC_EXT_BYTE_EN, reg, DT_INST_PROP(0, extra_byte_enable)); QSPIC2->QSPIC2_BURSTCMDB_REG = reg; reg = QSPIC2->QSPIC2_AWRITECMD_REG; QSPIC2_AWRITECMD_REG_SET_FIELD(QSPIC_WR_DAT_TX_MD, reg, DT_INST_ENUM_IDX(0, tx_data_mode)); QSPIC2_AWRITECMD_REG_SET_FIELD(QSPIC_WR_ADR_TX_MD, reg, DT_INST_ENUM_IDX(0, tx_addr_mode)); QSPIC2_AWRITECMD_REG_SET_FIELD(QSPIC_WR_INST_TX_MD, reg, DT_INST_ENUM_IDX(0, tx_inst_mode)); QSPIC2_AWRITECMD_REG_SET_FIELD(QSPIC_WR_INST, reg, DT_INST_PROP(0, write_cmd)); QSPIC2->QSPIC2_AWRITECMD_REG = reg; } /* Read PSRAM/NOR device ID using JEDEC commands. */ static bool memc_jedec_read_and_verify_id(QSPIC_TYPE qspi_id) { uint16_t device_density; bool ret = 0; qspi_memory_id_t memory_id; da1469x_qspi_memory_jedec_read_id(qspi_id, &memory_id); device_density = DT_INST_PROP(0, dev_density); ret |= !(memory_id.id == DT_INST_PROP(0, dev_id)); ret |= !(memory_id.type == DT_INST_PROP(0, dev_type)); ret |= !((memory_id.density & (device_density >> 8)) == (device_density & 0xFF)); return ret; } static int memc_smartbond_init(const struct device *dev) { uint32_t qspic_ctrlmode_reg; /* First QSPI controller is enabled so registers can be accessed */ memc_set_status(true, DT_INST_PROP_OR(0, clock_div, 0)); /* Apply the min. required settings before performing any transaction in manual mode. */ qspic_ctrlmode_reg = QSPIC2->QSPIC2_CTRLMODE_REG; QSPIC2_CTRLMODE_REG_SET_FIELD(QSPIC_CLK_MD, qspic_ctrlmode_reg, DT_INST_ENUM_IDX(0, clock_mode)); QSPIC2_CTRLMODE_REG_SET_FIELD(QSPIC_AUTO_MD, qspic_ctrlmode_reg, 0); QSPIC2->QSPIC2_CTRLMODE_REG = qspic_ctrlmode_reg; /* Reset PSRAM/NOR device using JDEC commands */ da1469x_qspi_memory_jedec_reset(QSPIC2_ID); /* Wait till reset is completed */ k_usleep(DT_INST_PROP(0, reset_delay_us)); if (memc_jedec_read_and_verify_id(QSPIC2_ID)) { LOG_ERR("Device detection failed"); memc_set_status(false, 0); return -EINVAL; } #if DT_INST_PROP(0, enter_qpi_mode) da1469x_qspi_enter_exit_qpi_mode(QSPIC2_ID, true, DT_INST_PROP(0, enter_qpi_cmd)); #endif /* Should be called prior to switching to auto mode and when the quad bus is selected! */ da1469x_qspi_set_bus_mode(QSPIC2_ID, QSPI_BUS_MODE_QUAD); da1469x_pd_acquire(MCU_PD_DOMAIN_SYS); /* From this point onwards memory device should be seen as memory mapped device. */ memc_automode_configure(); return 0; } #ifdef CONFIG_PM_DEVICE static int memc_smartbond_pm_action(const struct device *dev, enum pm_device_action action) { switch (action) { case PM_DEVICE_ACTION_SUSPEND: /* * CLK_AMBA_REG, that controlls QSPIC2, is retained during sleep * (resides in PD_AON). However, unused blocks should be disabled * to minimize power consumption at sleep. */ memc_set_status(false, 0); da1469x_pd_release(MCU_PD_DOMAIN_SYS); break; case PM_DEVICE_ACTION_RESUME: /* * Mainly, required when in PM runtime mode. When in PM static mode, * the device will block till an ongoing/pending AMBA bus transfer * completes. */ da1469x_pd_acquire(MCU_PD_DOMAIN_SYS); /* * QSPIC2 is powered by PD_SYS which is turned off during sleep and * so QSPIC2 auto mode re-initialization is required. * * XXX: It's assumed that memory device's power rail, that should * be 1V8P, is not turned off and so the device itsef does not * require re-initialization. Revisit this part if power settings * are changed in the future, that should include: * * 1. Powering off the memory device by turning off 1V8P * (valid for FLASH/PSRAM). * 2. Powering down the memory device so it enters the suspend/low-power * state during sleep (valid for FLASH/NOR devices). */ memc_set_status(true, DT_INST_PROP_OR(0, clock_div, 0)); memc_automode_configure(); default: return -ENOTSUP; } return 0; } #endif #define SMARTBOND_MEMC_INIT(inst) \ BUILD_ASSERT(inst == 0, "multiple instances are not permitted"); \ BUILD_ASSERT(DT_INST_PROP(inst, is_ram), \ "current driver version suports only PSRAM devices"); \ \ PM_DEVICE_DT_INST_DEFINE(inst, memc_smartbond_pm_action); \ \ DEVICE_DT_INST_DEFINE(inst, memc_smartbond_init, PM_DEVICE_DT_INST_GET(inst), \ NULL, NULL, \ POST_KERNEL, CONFIG_MEMC_INIT_PRIORITY, NULL); DT_INST_FOREACH_STATUS_OKAY(SMARTBOND_MEMC_INIT) ```
/content/code_sandbox/drivers/memc/memc_smartbond_nor_psram.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,648
```c /* * */ #define DT_DRV_COMPAT mspi_aps6404l #include <zephyr/kernel.h> #include <zephyr/pm/device.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/mspi.h> #if CONFIG_SOC_FAMILY_AMBIQ #include "mspi_ambiq.h" typedef struct mspi_ambiq_timing_cfg mspi_timing_cfg; typedef enum mspi_ambiq_timing_param mspi_timing_param; #else typedef struct mspi_timing_cfg mspi_timing_cfg; typedef enum mspi_timing_param mspi_timing_param; #endif LOG_MODULE_REGISTER(memc_mspi_aps6404l, CONFIG_MEMC_LOG_LEVEL); #define APM_VENDOR_ID 0xD #define APS6404L_WRITE 0x02 #define APS6404L_READ 0x03 #define APS6404L_FAST_READ 0x0B #define APS6404L_QUAD_MODE_ENTER 0x35 #define APS6404L_QUAD_WRITE 0x38 #define APS6404L_RESET_ENABLE 0x66 #define APS6404L_RESET_MEMORY 0x99 #define APS6404L_READ_ID 0x9F #define APS6404L_HALF_SLEEP_ENTER 0xC0 #define APS6404L_QUAD_READ 0xEB #define APS6404L_QUAD_MODE_EXIT 0xF5 struct memc_mspi_aps6404l_config { uint32_t port; uint32_t mem_size; const struct device *bus; struct mspi_dev_id dev_id; struct mspi_dev_cfg serial_cfg; struct mspi_dev_cfg quad_cfg; struct mspi_dev_cfg tar_dev_cfg; struct mspi_xip_cfg tar_xip_cfg; struct mspi_scramble_cfg tar_scramble_cfg; mspi_timing_cfg tar_timing_cfg; mspi_timing_param timing_cfg_mask; bool sw_multi_periph; }; struct memc_mspi_aps6404l_data { struct mspi_dev_cfg dev_cfg; struct mspi_xip_cfg xip_cfg; struct mspi_scramble_cfg scramble_cfg; mspi_timing_cfg timing_cfg; struct mspi_xfer trans; struct mspi_xfer_packet packet; struct k_sem lock; }; static int memc_mspi_aps6404l_command_write(const struct device *psram, uint8_t cmd, uint32_t addr, uint8_t *wdata, uint32_t length) { const struct memc_mspi_aps6404l_config *cfg = psram->config; struct memc_mspi_aps6404l_data *data = psram->data; int ret; uint8_t buffer[16]; data->packet.dir = MSPI_TX; data->packet.cmd = cmd; data->packet.address = addr; data->packet.data_buf = buffer; data->packet.num_bytes = length; data->trans.async = false; data->trans.xfer_mode = MSPI_PIO; data->trans.tx_dummy = 0; data->trans.cmd_length = 1; data->trans.addr_length = 0; data->trans.hold_ce = false; data->trans.packets = &data->packet; data->trans.num_packet = 1; data->trans.timeout = 10; if (wdata != NULL) { memcpy(buffer, wdata, length); } ret = mspi_transceive(cfg->bus, &cfg->dev_id, (const struct mspi_xfer *)&data->trans); if (ret) { LOG_ERR("MSPI write transaction failed with code: %d/%u", ret, __LINE__); return -EIO; } return ret; } static int memc_mspi_aps6404l_command_read(const struct device *psram, uint8_t cmd, uint32_t addr, uint8_t *rdata, uint32_t length) { const struct memc_mspi_aps6404l_config *cfg = psram->config; struct memc_mspi_aps6404l_data *data = psram->data; int ret; uint8_t buffer[16]; data->packet.dir = MSPI_RX; data->packet.cmd = cmd; data->packet.address = addr; data->packet.data_buf = buffer; data->packet.num_bytes = length; data->trans.async = false; data->trans.xfer_mode = MSPI_PIO; data->trans.rx_dummy = 0; data->trans.cmd_length = 1; data->trans.addr_length = 3; data->trans.hold_ce = false; data->trans.packets = &data->packet; data->trans.num_packet = 1; data->trans.timeout = 10; ret = mspi_transceive(cfg->bus, &cfg->dev_id, (const struct mspi_xfer *)&data->trans); if (ret) { LOG_ERR("MSPI read transaction failed with code: %d/%u", ret, __LINE__); return -EIO; } memcpy(rdata, buffer, length); return ret; } static void acquire(const struct device *psram) { const struct memc_mspi_aps6404l_config *cfg = psram->config; struct memc_mspi_aps6404l_data *data = psram->data; k_sem_take(&data->lock, K_FOREVER); if (cfg->sw_multi_periph) { while (mspi_dev_config(cfg->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_ALL, &data->dev_cfg)) { ; } } else { while (mspi_dev_config(cfg->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_NONE, NULL)) { ; } } } static void release(const struct device *psram) { const struct memc_mspi_aps6404l_config *cfg = psram->config; struct memc_mspi_aps6404l_data *data = psram->data; while (mspi_get_channel_status(cfg->bus, cfg->port)) { ; } k_sem_give(&data->lock); } static int memc_mspi_aps6404l_reset(const struct device *psram) { int ret; LOG_DBG("Resetting aps6404l/%u", __LINE__); ret = memc_mspi_aps6404l_command_write(psram, APS6404L_RESET_ENABLE, 0, NULL, 0); if (ret) { return ret; } ret = memc_mspi_aps6404l_command_write(psram, APS6404L_RESET_MEMORY, 0, NULL, 0); if (ret) { return ret; } /** We need to delay 5 ms to allow aps6404L pSRAM to reinitialize */ k_busy_wait(5000); return ret; } static int memc_mspi_aps6404l_get_vendor_id(const struct device *psram, uint8_t *vendor_id) { uint16_t buffer = 0; int ret; ret = memc_mspi_aps6404l_command_read(psram, APS6404L_READ_ID, 0, (uint8_t *)&buffer, 2); LOG_DBG("Read ID buff: %x/%u", buffer, __LINE__); *vendor_id = buffer & 0xff; return ret; } #if CONFIG_PM_DEVICE static int memc_mspi_aps6404l_half_sleep_enter(const struct device *psram) { int ret; LOG_DBG("Putting aps6404l to half sleep/%u", __LINE__); ret = memc_mspi_aps6404l_command_write(psram, APS6404L_HALF_SLEEP_ENTER, 0, NULL, 0); if (ret) { LOG_ERR("Failed to enter half sleep/%u", __LINE__); return ret; } /** Minimum half sleep duration tHS time */ k_busy_wait(4); return ret; } static int memc_mspi_aps6404l_half_sleep_exit(const struct device *psram) { const struct memc_mspi_aps6404l_config *cfg = psram->config; struct memc_mspi_aps6404l_data *data = psram->data; struct mspi_dev_cfg bkp = data->dev_cfg; int ret = 0; data->dev_cfg.freq = 48000000; mspi_dev_config(cfg->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_FREQUENCY, (const struct mspi_dev_cfg *)&data->dev_cfg); LOG_DBG("Waking up aps6404l from half sleep/%u", __LINE__); ret = memc_mspi_aps6404l_command_write(psram, 0, 0, NULL, 0); if (ret) { LOG_ERR("Failed to exit from half sleep/%u", __LINE__); return ret; } /** Minimum half sleep exit CE to CLK setup time */ k_busy_wait(100); data->dev_cfg = bkp; ret = mspi_dev_config(cfg->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_FREQUENCY, (const struct mspi_dev_cfg *)&data->dev_cfg); if (ret) { LOG_ERR("Failed to reconfigure MSPI after exiting half sleep/%u", __LINE__); return ret; } return ret; } static int memc_mspi_aps6404l_pm_action(const struct device *psram, enum pm_device_action action) { switch (action) { case PM_DEVICE_ACTION_RESUME: acquire(psram); memc_mspi_aps6404l_half_sleep_exit(psram); release(psram); break; case PM_DEVICE_ACTION_SUSPEND: acquire(psram); memc_mspi_aps6404l_half_sleep_enter(psram); release(psram); break; default: return -ENOTSUP; } return 0; } #endif /** IS_ENABLED(CONFIG_PM_DEVICE) */ static int memc_mspi_aps6404l_init(const struct device *psram) { const struct memc_mspi_aps6404l_config *cfg = psram->config; struct memc_mspi_aps6404l_data *data = psram->data; uint8_t vendor_id; if (!device_is_ready(cfg->bus)) { LOG_ERR("Controller device not ready/%u", __LINE__); return -ENODEV; } switch (cfg->tar_dev_cfg.io_mode) { case MSPI_IO_MODE_SINGLE: case MSPI_IO_MODE_QUAD: break; default: LOG_ERR("Bus mode %d not supported/%u", cfg->tar_dev_cfg.io_mode, __LINE__); return -EIO; } if (data->dev_cfg.io_mode == MSPI_IO_MODE_QUAD) { if (mspi_dev_config(cfg->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_ALL, &cfg->quad_cfg)) { LOG_ERR("Failed to config mspi controller/%u", __LINE__); return -EIO; } data->dev_cfg = cfg->quad_cfg; if (memc_mspi_aps6404l_reset(psram)) { LOG_ERR("Could not reset pSRAM/%u", __LINE__); return -EIO; } if (memc_mspi_aps6404l_command_write(psram, APS6404L_QUAD_MODE_EXIT, 0, NULL, 0)) { LOG_ERR("Could not exit quad mode/%u", __LINE__); return -EIO; } } if (mspi_dev_config(cfg->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_ALL, &cfg->serial_cfg)) { LOG_ERR("Failed to config mspi controller/%u", __LINE__); return -EIO; } data->dev_cfg = cfg->serial_cfg; if (memc_mspi_aps6404l_reset(psram)) { LOG_ERR("Could not reset pSRAM/%u", __LINE__); return -EIO; } if (memc_mspi_aps6404l_get_vendor_id(psram, &vendor_id)) { LOG_ERR("Could not read vendor id/%u", __LINE__); return -EIO; } LOG_DBG("Vendor id: 0x%0x", vendor_id); if (vendor_id != APM_VENDOR_ID) { LOG_WRN("Vendor ID does not match expected value of 0x%0x/%u", APM_VENDOR_ID, __LINE__); } if (cfg->tar_dev_cfg.io_mode == MSPI_IO_MODE_QUAD) { if (memc_mspi_aps6404l_command_write(psram, APS6404L_QUAD_MODE_ENTER, 0, NULL, 0)) { return -EIO; } } if (mspi_dev_config(cfg->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_ALL, &cfg->tar_dev_cfg)) { LOG_ERR("Failed to config mspi controller/%u", __LINE__); return -EIO; } data->dev_cfg = cfg->tar_dev_cfg; if (mspi_timing_config(cfg->bus, &cfg->dev_id, cfg->timing_cfg_mask, (void *)&cfg->tar_timing_cfg)) { LOG_ERR("Failed to config mspi timing/%u", __LINE__); return -EIO; } data->timing_cfg = cfg->tar_timing_cfg; if (cfg->tar_xip_cfg.enable) { if (mspi_xip_config(cfg->bus, &cfg->dev_id, &cfg->tar_xip_cfg)) { LOG_ERR("Failed to enable XIP/%u", __LINE__); return -EIO; } data->xip_cfg = cfg->tar_xip_cfg; } if (cfg->tar_scramble_cfg.enable) { if (mspi_scramble_config(cfg->bus, &cfg->dev_id, &cfg->tar_scramble_cfg)) { LOG_ERR("Failed to enable scrambling/%u", __LINE__); return -EIO; } data->scramble_cfg = cfg->tar_scramble_cfg; } release(psram); return 0; } #define MSPI_DEVICE_CONFIG_SERIAL(n) \ { \ .ce_num = DT_INST_PROP(n, mspi_hardware_ce_num), \ .freq = 12000000, \ .io_mode = MSPI_IO_MODE_SINGLE, \ .data_rate = MSPI_DATA_RATE_SINGLE, \ .cpp = MSPI_CPP_MODE_0, \ .endian = MSPI_XFER_LITTLE_ENDIAN, \ .ce_polarity = MSPI_CE_ACTIVE_LOW, \ .dqs_enable = false, \ .rx_dummy = 8, \ .tx_dummy = 0, \ .read_cmd = APS6404L_FAST_READ, \ .write_cmd = APS6404L_WRITE, \ .cmd_length = 1, \ .addr_length = 3, \ .mem_boundary = 1024, \ .time_to_break = 8, \ } #define MSPI_DEVICE_CONFIG_QUAD(n) \ { \ .ce_num = DT_INST_PROP(n, mspi_hardware_ce_num), \ .freq = 24000000, \ .io_mode = MSPI_IO_MODE_SINGLE, \ .data_rate = MSPI_DATA_RATE_SINGLE, \ .cpp = MSPI_CPP_MODE_0, \ .endian = MSPI_XFER_LITTLE_ENDIAN, \ .ce_polarity = MSPI_CE_ACTIVE_LOW, \ .dqs_enable = false, \ .rx_dummy = 6, \ .tx_dummy = 0, \ .read_cmd = APS6404L_QUAD_READ, \ .write_cmd = APS6404L_QUAD_WRITE, \ .cmd_length = 1, \ .addr_length = 3, \ .mem_boundary = 1024, \ .time_to_break = 4, \ } #if CONFIG_SOC_FAMILY_AMBIQ #define MSPI_TIMING_CONFIG(n) \ { \ .ui8WriteLatency = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 0), \ .ui8TurnAround = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 1), \ .bTxNeg = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 2), \ .bRxNeg = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 3), \ .bRxCap = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 4), \ .ui32TxDQSDelay = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 5), \ .ui32RxDQSDelay = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 6), \ .ui32RXDQSDelayEXT = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 7), \ } #define MSPI_TIMING_CONFIG_MASK(n) DT_INST_PROP(n, ambiq_timing_config_mask) #else #define MSPI_TIMING_CONFIG(n) #define MSPI_TIMING_CONFIG_MASK(n) #endif #define MEMC_MSPI_APS6404L(n) \ static const struct memc_mspi_aps6404l_config \ memc_mspi_aps6404l_config_##n = { \ .port = MSPI_PORT(n), \ .mem_size = DT_INST_PROP(n, size) / 8, \ .bus = DEVICE_DT_GET(DT_INST_BUS(n)), \ .dev_id = MSPI_DEVICE_ID_DT_INST(n), \ .serial_cfg = MSPI_DEVICE_CONFIG_SERIAL(n), \ .quad_cfg = MSPI_DEVICE_CONFIG_QUAD(n), \ .tar_dev_cfg = MSPI_DEVICE_CONFIG_DT_INST(n), \ .tar_xip_cfg = MSPI_XIP_CONFIG_DT_INST(n), \ .tar_scramble_cfg = MSPI_SCRAMBLE_CONFIG_DT_INST(n), \ .tar_timing_cfg = MSPI_TIMING_CONFIG(n), \ .timing_cfg_mask = MSPI_TIMING_CONFIG_MASK(n), \ .sw_multi_periph = DT_PROP(DT_INST_BUS(n), software_multiperipheral) \ }; \ static struct memc_mspi_aps6404l_data \ memc_mspi_aps6404l_data_##n = { \ .lock = Z_SEM_INITIALIZER(memc_mspi_aps6404l_data_##n.lock, 0, 1), \ }; \ PM_DEVICE_DT_INST_DEFINE(n, memc_mspi_aps6404l_pm_action); \ DEVICE_DT_INST_DEFINE(n, \ memc_mspi_aps6404l_init, \ PM_DEVICE_DT_INST_GET(n), \ &memc_mspi_aps6404l_data_##n, \ &memc_mspi_aps6404l_config_##n, \ POST_KERNEL, \ CONFIG_MEMC_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(MEMC_MSPI_APS6404L) ```
/content/code_sandbox/drivers/memc/memc_mspi_aps6404l.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,302
```objective-c /* * */ #include <zephyr/devicetree.h> #include <soc.h> #define FLEXRAM_DT_NODE DT_INST(0, nxp_flexram) #define IOMUXC_GPR_DT_NODE DT_NODELABEL(iomuxcgpr) #if defined(CONFIG_MEMC_NXP_FLEXRAM_MAGIC_ADDR_API) || \ defined(CONFIG_MEMC_NXP_FLEXRAM_ERROR_INTERRUPT) #define FLEXRAM_INTERRUPTS_USED #endif #if DT_PROP_HAS_IDX(FLEXRAM_DT_NODE, flexram_bank_spec, 0) #define FLEXRAM_RUNTIME_BANKS_USED 1 #endif #ifdef FLEXRAM_INTERRUPTS_USED enum memc_flexram_interrupt_cause { #ifdef CONFIG_MEMC_NXP_FLEXRAM_ERROR_INTERRUPT flexram_ocram_access_error, flexram_itcm_access_error, flexram_dtcm_access_error, #endif #ifdef CONFIG_MEMC_NXP_FLEXRAM_MAGIC_ADDR_API flexram_ocram_magic_addr, flexram_itcm_magic_addr, flexram_dtcm_magic_addr, #endif /* CONFIG_MEMC_NXP_FLEXRAM_MAGIC_ADDR_API */ }; typedef void (*flexram_callback_t)(enum memc_flexram_interrupt_cause, void *user_data); void memc_flexram_register_callback(flexram_callback_t callback, void *user_data); #endif /* FLEXRAM_INTERRUPTS_USED */ #ifdef FLEXRAM_RUNTIME_BANKS_USED /* * call from platform_init to set up flexram if using runtime map * must be inlined because cannot use stack */ #define GPR_FLEXRAM_REG_FILL(node_id, prop, idx) \ (((uint32_t)DT_PROP_BY_IDX(node_id, prop, idx)) << (2 * idx)) static inline void memc_flexram_dt_partition(void) { /* iomuxc_gpr must be const (in ROM region) because used in reconfiguring ram */ static IOMUXC_GPR_Type *const iomuxc_gpr = (IOMUXC_GPR_Type *)DT_REG_ADDR(IOMUXC_GPR_DT_NODE); /* do not create stack variables or use any data from ram in this function */ #if defined(CONFIG_SOC_SERIES_IMXRT11XX) iomuxc_gpr->GPR17 = (DT_FOREACH_PROP_ELEM_SEP(FLEXRAM_DT_NODE, flexram_bank_spec, GPR_FLEXRAM_REG_FILL, (+))) & 0xFFFF; iomuxc_gpr->GPR18 = (((DT_FOREACH_PROP_ELEM_SEP(FLEXRAM_DT_NODE, flexram_bank_spec, GPR_FLEXRAM_REG_FILL, (+)))) >> 16) & 0xFFFF; #elif defined(CONFIG_SOC_SERIES_IMXRT10XX) iomuxc_gpr->GPR17 = DT_FOREACH_PROP_ELEM_SEP(FLEXRAM_DT_NODE, flexram_bank_spec, GPR_FLEXRAM_REG_FILL, (+)); #endif iomuxc_gpr->GPR16 |= IOMUXC_GPR_GPR16_FLEXRAM_BANK_CFG_SEL_MASK; } #endif /* FLEXRAM_RUNTIME_BANKS_USED */ #ifdef CONFIG_MEMC_NXP_FLEXRAM_MAGIC_ADDR_API /** @brief Sets magic address for OCRAM * * Magic address allows core interrupt from FlexRAM when address * is accessed. * * @param ocram_addr: An address in OCRAM to set magic function on. * @retval 0 on success * @retval -EINVAL if ocram_addr is not in OCRAM * @retval -ENODEV if there is no OCRAM allocation in flexram */ int memc_flexram_set_ocram_magic_addr(uint32_t ocram_addr); /** @brief Sets magic address for ITCM * * Magic address allows core interrupt from FlexRAM when address * is accessed. * * @param itcm_addr: An address in ITCM to set magic function on. * @retval 0 on success * @retval -EINVAL if itcm_addr is not in ITCM * @retval -ENODEV if there is no ITCM allocation in flexram */ int memc_flexram_set_itcm_magic_addr(uint32_t itcm_addr); /** @brief Sets magic address for DTCM * * Magic address allows core interrupt from FlexRAM when address * is accessed. * * @param dtcm_addr: An address in DTCM to set magic function on. * @retval 0 on success * @retval -EINVAL if dtcm_addr is not in DTCM * @retval -ENODEV if there is no DTCM allocation in flexram */ int memc_flexram_set_dtcm_magic_addr(uint32_t dtcm_addr); #endif /* CONFIG_MEMC_NXP_FLEXRAM_MAGIC_ADDR_API */ ```
/content/code_sandbox/drivers/memc/memc_nxp_flexram.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
989
```unknown config MEMC_STM32 bool "STM32 Flexible Memory Controller (FMC)" default y depends on DT_HAS_ST_STM32_FMC_ENABLED || DT_HAS_ST_STM32H7_FMC_ENABLED help Enable STM32 Flexible Memory Controller. if MEMC_STM32 config MEMC_STM32_SDRAM bool "STM32 FMC SDRAM controller" default y depends on DT_HAS_ST_STM32_FMC_SDRAM_ENABLED select USE_STM32_LL_FMC select USE_STM32_HAL_SDRAM help Enable STM32 FMC SDRAM controller. config MEMC_STM32_NOR_PSRAM bool "STM32 FMC NOR/PSRAM controller" default y depends on DT_HAS_ST_STM32_FMC_NOR_PSRAM_ENABLED select USE_STM32_LL_FMC select USE_STM32_HAL_NOR select USE_STM32_HAL_SRAM help Enable STM32 FMC NOR/PSRAM controller. endif ```
/content/code_sandbox/drivers/memc/Kconfig.stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
220
```c /* * */ #include "memc_nxp_flexram.h" #include <zephyr/dt-bindings/memory-controller/nxp,flexram.h> #include <zephyr/devicetree.h> #include <zephyr/init.h> #include <zephyr/sys/util.h> #include <errno.h> #include <zephyr/irq.h> #include "fsl_device_registers.h" #if defined(CONFIG_MEMC_NXP_FLEXRAM_MAGIC_ADDR_API) BUILD_ASSERT(DT_PROP(FLEXRAM_DT_NODE, flexram_has_magic_addr), "SOC does not support magic flexram addresses"); #endif #define BANK_SIZE (DT_PROP(FLEXRAM_DT_NODE, flexram_bank_size) * 1024) #define NUM_BANKS DT_PROP(FLEXRAM_DT_NODE, flexram_num_ram_banks) #define IS_CHILD_RAM_TYPE(node_id, compat) DT_NODE_HAS_COMPAT(node_id, compat) #define DOES_RAM_TYPE_EXIST(compat) \ DT_FOREACH_CHILD_SEP_VARGS(FLEXRAM_DT_NODE, IS_CHILD_RAM_TYPE, (+), compat) #if DOES_RAM_TYPE_EXIST(mmio_sram) #define FIND_OCRAM_NODE(node_id) \ COND_CODE_1(DT_NODE_HAS_COMPAT(node_id, mmio_sram), (node_id), ()) #define OCRAM_DT_NODE DT_FOREACH_CHILD(FLEXRAM_DT_NODE, FIND_OCRAM_NODE) #define OCRAM_START (DT_REG_ADDR(OCRAM_DT_NODE)) #define OCRAM_END (OCRAM_START + DT_REG_SIZE(OCRAM_DT_NODE)) #endif /* OCRAM */ #if DOES_RAM_TYPE_EXIST(nxp_imx_dtcm) #define FIND_DTCM_NODE(node_id) \ COND_CODE_1(DT_NODE_HAS_COMPAT(node_id, nxp_imx_dtcm), (node_id), ()) #define DTCM_DT_NODE DT_FOREACH_CHILD(FLEXRAM_DT_NODE, FIND_DTCM_NODE) #define DTCM_START (DT_REG_ADDR(DTCM_DT_NODE)) #define DTCM_END (DTCM_START + DT_REG_SIZE(DTCM_DT_NODE)) #endif /* DTCM */ #if DOES_RAM_TYPE_EXIST(nxp_imx_itcm) #define FIND_ITCM_NODE(node_id) \ COND_CODE_1(DT_NODE_HAS_COMPAT(node_id, nxp_imx_itcm), (node_id), ()) #define ITCM_DT_NODE DT_FOREACH_CHILD(FLEXRAM_DT_NODE, FIND_ITCM_NODE) #define ITCM_START (DT_REG_ADDR(ITCM_DT_NODE)) #define ITCM_END (ITCM_START + DT_REG_SIZE(ITCM_DT_NODE)) #endif /* ITCM */ #ifdef FLEXRAM_RUNTIME_BANKS_USED #define PLUS_ONE_BANK(node_id, prop, idx) 1 #define COUNT_BANKS \ DT_FOREACH_PROP_ELEM_SEP(FLEXRAM_DT_NODE, flexram_bank_spec, PLUS_ONE_BANK, (+)) BUILD_ASSERT(COUNT_BANKS == NUM_BANKS, "wrong number of flexram banks defined"); #ifdef OCRAM_DT_NODE #define ADD_BANK_IF_OCRAM(node_id, prop, idx) \ COND_CODE_1(IS_EQ(DT_PROP_BY_IDX(node_id, prop, idx), FLEXRAM_OCRAM), \ (BANK_SIZE), (0)) #define OCRAM_TOTAL \ DT_FOREACH_PROP_ELEM_SEP(FLEXRAM_DT_NODE, flexram_bank_spec, ADD_BANK_IF_OCRAM, (+)) BUILD_ASSERT((OCRAM_TOTAL) == DT_REG_SIZE(OCRAM_DT_NODE), "OCRAM node size is wrong"); #endif /* OCRAM */ #ifdef DTCM_DT_NODE #define ADD_BANK_IF_DTCM(node_id, prop, idx) \ COND_CODE_1(IS_EQ(DT_PROP_BY_IDX(node_id, prop, idx), FLEXRAM_DTCM), \ (BANK_SIZE), (0)) #define DTCM_TOTAL \ DT_FOREACH_PROP_ELEM_SEP(FLEXRAM_DT_NODE, flexram_bank_spec, ADD_BANK_IF_DTCM, (+)) BUILD_ASSERT((DTCM_TOTAL) == DT_REG_SIZE(DTCM_DT_NODE), "DTCM node size is wrong"); #endif /* DTCM */ #ifdef ITCM_DT_NODE #define ADD_BANK_IF_ITCM(node_id, prop, idx) \ COND_CODE_1(IS_EQ(DT_PROP_BY_IDX(node_id, prop, idx), FLEXRAM_ITCM), \ (BANK_SIZE), (0)) #define ITCM_TOTAL \ DT_FOREACH_PROP_ELEM_SEP(FLEXRAM_DT_NODE, flexram_bank_spec, ADD_BANK_IF_ITCM, (+)) BUILD_ASSERT((ITCM_TOTAL) == DT_REG_SIZE(ITCM_DT_NODE), "ITCM node size is wrong"); #endif /* ITCM */ #endif /* FLEXRAM_RUNTIME_BANKS_USED */ static FLEXRAM_Type *const base = (FLEXRAM_Type *) DT_REG_ADDR(FLEXRAM_DT_NODE); #ifdef FLEXRAM_INTERRUPTS_USED static flexram_callback_t flexram_callback; static void *flexram_user_data; void memc_flexram_register_callback(flexram_callback_t callback, void *user_data) { flexram_callback = callback; flexram_user_data = user_data; } static void nxp_flexram_isr(void *arg) { ARG_UNUSED(arg); if (flexram_callback == NULL) { return; } #if defined(CONFIG_MEMC_NXP_FLEXRAM_ERROR_INTERRUPT) if (base->INT_STATUS & FLEXRAM_INT_STATUS_OCRAM_ERR_STATUS_MASK) { base->INT_STATUS |= FLEXRAM_INT_STATUS_OCRAM_ERR_STATUS_MASK; flexram_callback(flexram_ocram_access_error, flexram_user_data); } if (base->INT_STATUS & FLEXRAM_INT_STATUS_DTCM_ERR_STATUS_MASK) { base->INT_STATUS |= FLEXRAM_INT_STATUS_DTCM_ERR_STATUS_MASK; flexram_callback(flexram_dtcm_access_error, flexram_user_data); } if (base->INT_STATUS & FLEXRAM_INT_STATUS_ITCM_ERR_STATUS_MASK) { base->INT_STATUS |= FLEXRAM_INT_STATUS_ITCM_ERR_STATUS_MASK; flexram_callback(flexram_itcm_access_error, flexram_user_data); } #endif /* CONFIG_MEMC_NXP_FLEXRAM_ERROR_INTERRUPT */ #if defined(CONFIG_MEMC_NXP_FLEXRAM_MAGIC_ADDR_API) if (base->INT_STATUS & FLEXRAM_INT_STATUS_OCRAM_MAM_STATUS_MASK) { base->INT_STATUS |= FLEXRAM_INT_STATUS_OCRAM_MAM_STATUS_MASK; flexram_callback(flexram_ocram_magic_addr, flexram_user_data); } if (base->INT_STATUS & FLEXRAM_INT_STATUS_DTCM_MAM_STATUS_MASK) { base->INT_STATUS |= FLEXRAM_INT_STATUS_DTCM_MAM_STATUS_MASK; flexram_callback(flexram_dtcm_magic_addr, flexram_user_data); } if (base->INT_STATUS & FLEXRAM_INT_STATUS_ITCM_MAM_STATUS_MASK) { base->INT_STATUS |= FLEXRAM_INT_STATUS_ITCM_MAM_STATUS_MASK; flexram_callback(flexram_itcm_magic_addr, flexram_user_data); } #endif /* CONFIG_MEMC_NXP_FLEXRAM_MAGIC_ADDR_API */ } #if defined(CONFIG_MEMC_NXP_FLEXRAM_MAGIC_ADDR_API) int memc_flexram_set_ocram_magic_addr(uint32_t ocram_addr) { #ifdef OCRAM_DT_NODE ocram_addr -= DT_REG_ADDR(OCRAM_DT_NODE); if (ocram_addr >= DT_REG_SIZE(OCRAM_DT_NODE)) { return -EINVAL; } base->OCRAM_MAGIC_ADDR &= ~FLEXRAM_OCRAM_MAGIC_ADDR_OCRAM_MAGIC_ADDR_MASK; base->OCRAM_MAGIC_ADDR |= FLEXRAM_OCRAM_MAGIC_ADDR_OCRAM_MAGIC_ADDR(ocram_addr); base->INT_STAT_EN |= FLEXRAM_INT_STAT_EN_OCRAM_MAM_STAT_EN_MASK; return 0; #else return -ENODEV; #endif } int memc_flexram_set_itcm_magic_addr(uint32_t itcm_addr) { #ifdef ITCM_DT_NODE itcm_addr -= DT_REG_ADDR(ITCM_DT_NODE); if (itcm_addr >= DT_REG_SIZE(ITCM_DT_NODE)) { return -EINVAL; } base->ITCM_MAGIC_ADDR &= ~FLEXRAM_ITCM_MAGIC_ADDR_ITCM_MAGIC_ADDR_MASK; base->ITCM_MAGIC_ADDR |= FLEXRAM_ITCM_MAGIC_ADDR_ITCM_MAGIC_ADDR(itcm_addr); base->INT_STAT_EN |= FLEXRAM_INT_STAT_EN_ITCM_MAM_STAT_EN_MASK; return 0; #else return -ENODEV; #endif } int memc_flexram_set_dtcm_magic_addr(uint32_t dtcm_addr) { #ifdef DTCM_DT_NODE dtcm_addr -= DT_REG_ADDR(DTCM_DT_NODE); if (dtcm_addr >= DT_REG_SIZE(DTCM_DT_NODE)) { return -EINVAL; } base->DTCM_MAGIC_ADDR &= ~FLEXRAM_DTCM_MAGIC_ADDR_DTCM_MAGIC_ADDR_MASK; base->DTCM_MAGIC_ADDR |= FLEXRAM_DTCM_MAGIC_ADDR_DTCM_MAGIC_ADDR(dtcm_addr); base->INT_STAT_EN |= FLEXRAM_INT_STAT_EN_DTCM_MAM_STAT_EN_MASK; return 0; #else return -ENODEV; #endif } #endif /* CONFIG_MEMC_NXP_FLEXRAM_MAGIC_ADDR_API */ #endif /* FLEXRAM_INTERRUPTS_USED */ static int nxp_flexram_init(void) { if (DT_PROP(FLEXRAM_DT_NODE, flexram_tcm_read_wait_mode)) { base->TCM_CTRL |= FLEXRAM_TCM_CTRL_TCM_WWAIT_EN_MASK; } if (DT_PROP(FLEXRAM_DT_NODE, flexram_tcm_write_wait_mode)) { base->TCM_CTRL |= FLEXRAM_TCM_CTRL_TCM_RWAIT_EN_MASK; } #if defined(CONFIG_MEMC_NXP_FLEXRAM_ERROR_INTERRUPT) base->INT_SIG_EN |= FLEXRAM_INT_SIG_EN_OCRAM_ERR_SIG_EN_MASK; base->INT_SIG_EN |= FLEXRAM_INT_SIG_EN_DTCM_ERR_SIG_EN_MASK; base->INT_SIG_EN |= FLEXRAM_INT_SIG_EN_ITCM_ERR_SIG_EN_MASK; base->INT_STAT_EN |= FLEXRAM_INT_STAT_EN_OCRAM_ERR_STAT_EN_MASK; base->INT_STAT_EN |= FLEXRAM_INT_STAT_EN_DTCM_ERR_STAT_EN_MASK; base->INT_STAT_EN |= FLEXRAM_INT_STAT_EN_ITCM_ERR_STAT_EN_MASK; #endif /* CONFIG_MEMC_NXP_FLEXRAM_ERROR_INTERRUPT */ #if defined(CONFIG_MEMC_NXP_FLEXRAM_MAGIC_ADDR_API) base->INT_SIG_EN |= FLEXRAM_INT_SIG_EN_OCRAM_MAM_SIG_EN_MASK; base->INT_SIG_EN |= FLEXRAM_INT_SIG_EN_DTCM_MAM_SIG_EN_MASK; base->INT_SIG_EN |= FLEXRAM_INT_SIG_EN_ITCM_MAM_SIG_EN_MASK; #endif /* CONFIG_MEMC_NXP_FLEXRAM_MAGIC_ADDR_API */ #ifdef FLEXRAM_INTERRUPTS_USED IRQ_CONNECT(DT_IRQN(FLEXRAM_DT_NODE), DT_IRQ(FLEXRAM_DT_NODE, priority), nxp_flexram_isr, NULL, 0); irq_enable(DT_IRQN(FLEXRAM_DT_NODE)); #endif /* FLEXRAM_INTERRUPTS_USED */ return 0; } SYS_INIT(nxp_flexram_init, EARLY, 0); ```
/content/code_sandbox/drivers/memc/memc_nxp_flexram.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,369
```c /* * */ #define DT_DRV_COMPAT atmel_sam_smc #include <zephyr/device.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control/atmel_sam_pmc.h> #include <soc.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(memc_sam, CONFIG_MEMC_LOG_LEVEL); struct memc_smc_bank_config { uint32_t cs; uint32_t mode; uint32_t setup_timing; uint32_t pulse_timing; uint32_t cycle_timing; }; struct memc_smc_config { Smc *regs; size_t banks_len; const struct memc_smc_bank_config *banks; const struct atmel_sam_pmc_config clock_cfg; const struct pinctrl_dev_config *pcfg; }; static int memc_smc_init(const struct device *dev) { int ret; const struct memc_smc_config *cfg = dev->config; SmcCs_number *bank; /* Enable SMC clock in PMC */ (void)clock_control_on(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&cfg->clock_cfg); ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } for (size_t i = 0U; i < cfg->banks_len; i++) { if (cfg->banks[i].cs >= SMCCS_NUMBER_NUMBER) { return -EINVAL; } bank = &cfg->regs->SMC_CS_NUMBER[cfg->banks[i].cs]; bank->SMC_SETUP = cfg->banks[i].setup_timing; bank->SMC_PULSE = cfg->banks[i].pulse_timing; bank->SMC_CYCLE = cfg->banks[i].cycle_timing; bank->SMC_MODE = cfg->banks[i].mode; } return 0; } #define SETUP_TIMING(node_id) \ SMC_SETUP_NWE_SETUP(DT_PROP_BY_IDX(node_id, atmel_smc_setup_timing, 0)) \ | SMC_SETUP_NCS_WR_SETUP(DT_PROP_BY_IDX(node_id, atmel_smc_setup_timing, 1)) \ | SMC_SETUP_NRD_SETUP(DT_PROP_BY_IDX(node_id, atmel_smc_setup_timing, 2)) \ | SMC_SETUP_NCS_RD_SETUP(DT_PROP_BY_IDX(node_id, atmel_smc_setup_timing, 3)) #define PULSE_TIMING(node_id) \ SMC_PULSE_NWE_PULSE(DT_PROP_BY_IDX(node_id, atmel_smc_pulse_timing, 0)) \ | SMC_PULSE_NCS_WR_PULSE(DT_PROP_BY_IDX(node_id, atmel_smc_pulse_timing, 1)) \ | SMC_PULSE_NRD_PULSE(DT_PROP_BY_IDX(node_id, atmel_smc_pulse_timing, 2)) \ | SMC_PULSE_NCS_RD_PULSE(DT_PROP_BY_IDX(node_id, atmel_smc_pulse_timing, 3)) #define CYCLE_TIMING(node_id) \ SMC_CYCLE_NWE_CYCLE(DT_PROP_BY_IDX(node_id, atmel_smc_cycle_timing, 0)) \ | SMC_CYCLE_NRD_CYCLE(DT_PROP_BY_IDX(node_id, atmel_smc_cycle_timing, 1)) #define BANK_CONFIG(node_id) \ { \ .cs = DT_REG_ADDR(node_id), \ .mode = COND_CODE_1(DT_ENUM_IDX(node_id, atmel_smc_write_mode), \ (SMC_MODE_WRITE_MODE), (0)) \ | COND_CODE_1(DT_ENUM_IDX(node_id, atmel_smc_read_mode), \ (SMC_MODE_READ_MODE), (0)), \ .setup_timing = SETUP_TIMING(node_id), \ .pulse_timing = PULSE_TIMING(node_id), \ .cycle_timing = CYCLE_TIMING(node_id), \ }, #define MEMC_SMC_DEFINE(inst) \ static const struct memc_smc_bank_config smc_bank_config_##inst[] = { \ DT_INST_FOREACH_CHILD(inst, BANK_CONFIG) \ }; \ PINCTRL_DT_INST_DEFINE(inst); \ static const struct memc_smc_config smc_config_##inst = { \ .regs = (Smc *)DT_INST_REG_ADDR(inst), \ .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(inst), \ .banks_len = ARRAY_SIZE(smc_bank_config_##inst), \ .banks = smc_bank_config_##inst, \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ }; \ DEVICE_DT_INST_DEFINE(inst, memc_smc_init, NULL, NULL, \ &smc_config_##inst, POST_KERNEL, \ CONFIG_MEMC_INIT_PRIORITY, NULL); DT_INST_FOREACH_STATUS_OKAY(MEMC_SMC_DEFINE) ```
/content/code_sandbox/drivers/memc/memc_sam_smc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,102
```objective-c /* * * Based on: * - path_to_url (Apache-2.0) * - path_to_url (Apache-2.0) * - path_to_url (MIT) * */ #include <stdint.h> uint32_t ddr_phy_settings[1215] = { 0x01375642, /* DENALI_PHY_00_DATA */ 0x0004c008, /* DENALI_PHY_01_DATA */ 0x000000da, /* DENALI_PHY_02_DATA */ 0x00000000, /* DENALI_PHY_03_DATA */ 0x00000000, /* DENALI_PHY_04_DATA */ 0x00010000, /* DENALI_PHY_05_DATA */ 0x01DDDD90, /* DENALI_PHY_06_DATA */ 0x01DDDD90, /* DENALI_PHY_07_DATA */ 0x01030001, /* DENALI_PHY_08_DATA */ 0x01000000, /* DENALI_PHY_09_DATA */ 0x00c00000, /* DENALI_PHY_10_DATA */ 0x00000007, /* DENALI_PHY_11_DATA */ 0x00000000, /* DENALI_PHY_12_DATA */ 0x00000000, /* DENALI_PHY_13_DATA */ 0x04000408, /* DENALI_PHY_14_DATA */ 0x00000408, /* DENALI_PHY_15_DATA */ 0x00e4e400, /* DENALI_PHY_16_DATA */ 0x00000000, /* DENALI_PHY_17_DATA */ 0x00000000, /* DENALI_PHY_18_DATA */ 0x00000000, /* DENALI_PHY_19_DATA */ 0x00000000, /* DENALI_PHY_20_DATA */ 0x00000000, /* DENALI_PHY_21_DATA */ 0x00000000, /* DENALI_PHY_22_DATA */ 0x00000000, /* DENALI_PHY_23_DATA */ 0x00000000, /* DENALI_PHY_24_DATA */ 0x00000000, /* DENALI_PHY_25_DATA */ 0x00000000, /* DENALI_PHY_26_DATA */ 0x00000000, /* DENALI_PHY_27_DATA */ 0x00000000, /* DENALI_PHY_28_DATA */ 0x00000000, /* DENALI_PHY_29_DATA */ 0x00000000, /* DENALI_PHY_30_DATA */ 0x00000000, /* DENALI_PHY_31_DATA */ 0x00000000, /* DENALI_PHY_32_DATA */ 0x00200000, /* DENALI_PHY_33_DATA */ 0x00000000, /* DENALI_PHY_34_DATA */ 0x00000000, /* DENALI_PHY_35_DATA */ 0x00000000, /* DENALI_PHY_36_DATA */ 0x00000000, /* DENALI_PHY_37_DATA */ 0x00000000, /* DENALI_PHY_38_DATA */ 0x00000000, /* DENALI_PHY_39_DATA */ 0x02800280, /* DENALI_PHY_40_DATA */ 0x02800280, /* DENALI_PHY_41_DATA */ 0x02800280, /* DENALI_PHY_42_DATA */ 0x02800280, /* DENALI_PHY_43_DATA */ 0x00000280, /* DENALI_PHY_44_DATA */ 0x00000000, /* DENALI_PHY_45_DATA */ 0x00000000, /* DENALI_PHY_46_DATA */ 0x00000000, /* DENALI_PHY_47_DATA */ 0x00000000, /* DENALI_PHY_48_DATA */ 0x00000000, /* DENALI_PHY_49_DATA */ 0x00800080, /* DENALI_PHY_50_DATA */ 0x00800080, /* DENALI_PHY_51_DATA */ 0x00800080, /* DENALI_PHY_52_DATA */ 0x00800080, /* DENALI_PHY_53_DATA */ 0x00800080, /* DENALI_PHY_54_DATA */ 0x00800080, /* DENALI_PHY_55_DATA */ 0x00800080, /* DENALI_PHY_56_DATA */ 0x00800080, /* DENALI_PHY_57_DATA */ 0x00800080, /* DENALI_PHY_58_DATA */ 0x000100da, /* DENALI_PHY_59_DATA */ 0x01ff0010, /* DENALI_PHY_60_DATA */ 0x00000000, /* DENALI_PHY_61_DATA */ 0x00000000, /* DENALI_PHY_62_DATA */ 0x00000002, /* DENALI_PHY_63_DATA */ 0x51313152, /* DENALI_PHY_64_DATA */ 0x80013130, /* DENALI_PHY_65_DATA */ 0x02000080, /* DENALI_PHY_66_DATA */ 0x00100001, /* DENALI_PHY_67_DATA */ 0x0c064208, /* DENALI_PHY_68_DATA */ 0x000f0c0f, /* DENALI_PHY_69_DATA */ 0x01000140, /* DENALI_PHY_70_DATA */ 0x0000000c, /* DENALI_PHY_71_DATA */ 0x00000000, /* DENALI_PHY_72_DATA */ 0x00000000, /* DENALI_PHY_73_DATA */ 0x00000000, /* DENALI_PHY_74_DATA */ 0x00000000, /* DENALI_PHY_75_DATA */ 0x00000000, /* DENALI_PHY_76_DATA */ 0x00000000, /* DENALI_PHY_77_DATA */ 0x00000000, /* DENALI_PHY_78_DATA */ 0x00000000, /* DENALI_PHY_79_DATA */ 0x00000000, /* DENALI_PHY_80_DATA */ 0x00000000, /* DENALI_PHY_81_DATA */ 0x00000000, /* DENALI_PHY_82_DATA */ 0x00000000, /* DENALI_PHY_83_DATA */ 0x00000000, /* DENALI_PHY_84_DATA */ 0x00000000, /* DENALI_PHY_85_DATA */ 0x00000000, /* DENALI_PHY_86_DATA */ 0x00000000, /* DENALI_PHY_87_DATA */ 0x00000000, /* DENALI_PHY_88_DATA */ 0x00000000, /* DENALI_PHY_89_DATA */ 0x00000000, /* DENALI_PHY_90_DATA */ 0x00000000, /* DENALI_PHY_91_DATA */ 0x00000000, /* DENALI_PHY_92_DATA */ 0x00000000, /* DENALI_PHY_93_DATA */ 0x00000000, /* DENALI_PHY_94_DATA */ 0x00000000, /* DENALI_PHY_95_DATA */ 0x00000000, /* DENALI_PHY_96_DATA */ 0x00000000, /* DENALI_PHY_97_DATA */ 0x00000000, /* DENALI_PHY_98_DATA */ 0x00000000, /* DENALI_PHY_99_DATA */ 0x00000000, /* DENALI_PHY_100_DATA */ 0x00000000, /* DENALI_PHY_101_DATA */ 0x00000000, /* DENALI_PHY_102_DATA */ 0x00000000, /* DENALI_PHY_103_DATA */ 0x00000000, /* DENALI_PHY_104_DATA */ 0x00000000, /* DENALI_PHY_105_DATA */ 0x00000000, /* DENALI_PHY_106_DATA */ 0x00000000, /* DENALI_PHY_107_DATA */ 0x00000000, /* DENALI_PHY_108_DATA */ 0x00000000, /* DENALI_PHY_109_DATA */ 0x00000000, /* DENALI_PHY_110_DATA */ 0x00000000, /* DENALI_PHY_111_DATA */ 0x00000000, /* DENALI_PHY_112_DATA */ 0x00000000, /* DENALI_PHY_113_DATA */ 0x00000000, /* DENALI_PHY_114_DATA */ 0x00000000, /* DENALI_PHY_115_DATA */ 0x00000000, /* DENALI_PHY_116_DATA */ 0x00000000, /* DENALI_PHY_117_DATA */ 0x00000000, /* DENALI_PHY_118_DATA */ 0x00000000, /* DENALI_PHY_119_DATA */ 0x00000000, /* DENALI_PHY_120_DATA */ 0x00000000, /* DENALI_PHY_121_DATA */ 0x00000000, /* DENALI_PHY_122_DATA */ 0x00000000, /* DENALI_PHY_123_DATA */ 0x00000000, /* DENALI_PHY_124_DATA */ 0x00000000, /* DENALI_PHY_125_DATA */ 0x00000000, /* DENALI_PHY_126_DATA */ 0x00000000, /* DENALI_PHY_127_DATA */ 0x40263571, /* DENALI_PHY_128_DATA */ 0x0004c008, /* DENALI_PHY_129_DATA */ 0x000000da, /* DENALI_PHY_130_DATA */ 0x00000000, /* DENALI_PHY_131_DATA */ 0x00000000, /* DENALI_PHY_132_DATA */ 0x00010000, /* DENALI_PHY_133_DATA */ 0x01DDDD90, /* DENALI_PHY_134_DATA */ 0x01DDDD90, /* DENALI_PHY_135_DATA */ 0x01030001, /* DENALI_PHY_136_DATA */ 0x01000000, /* DENALI_PHY_137_DATA */ 0x00c00000, /* DENALI_PHY_138_DATA */ 0x00000007, /* DENALI_PHY_139_DATA */ 0x00000000, /* DENALI_PHY_140_DATA */ 0x00000000, /* DENALI_PHY_141_DATA */ 0x04000408, /* DENALI_PHY_142_DATA */ 0x00000408, /* DENALI_PHY_143_DATA */ 0x00e4e400, /* DENALI_PHY_144_DATA */ 0x00000000, /* DENALI_PHY_145_DATA */ 0x00000000, /* DENALI_PHY_146_DATA */ 0x00000000, /* DENALI_PHY_147_DATA */ 0x00000000, /* DENALI_PHY_148_DATA */ 0x00000000, /* DENALI_PHY_149_DATA */ 0x00000000, /* DENALI_PHY_150_DATA */ 0x00000000, /* DENALI_PHY_151_DATA */ 0x00000000, /* DENALI_PHY_152_DATA */ 0x00000000, /* DENALI_PHY_153_DATA */ 0x00000000, /* DENALI_PHY_154_DATA */ 0x00000000, /* DENALI_PHY_155_DATA */ 0x00000000, /* DENALI_PHY_156_DATA */ 0x00000000, /* DENALI_PHY_157_DATA */ 0x00000000, /* DENALI_PHY_158_DATA */ 0x00000000, /* DENALI_PHY_159_DATA */ 0x00000000, /* DENALI_PHY_160_DATA */ 0x00200000, /* DENALI_PHY_161_DATA */ 0x00000000, /* DENALI_PHY_162_DATA */ 0x00000000, /* DENALI_PHY_163_DATA */ 0x00000000, /* DENALI_PHY_164_DATA */ 0x00000000, /* DENALI_PHY_165_DATA */ 0x00000000, /* DENALI_PHY_166_DATA */ 0x00000000, /* DENALI_PHY_167_DATA */ 0x02800280, /* DENALI_PHY_168_DATA */ 0x02800280, /* DENALI_PHY_169_DATA */ 0x02800280, /* DENALI_PHY_170_DATA */ 0x02800280, /* DENALI_PHY_171_DATA */ 0x00000280, /* DENALI_PHY_172_DATA */ 0x00000000, /* DENALI_PHY_173_DATA */ 0x00000000, /* DENALI_PHY_174_DATA */ 0x00000000, /* DENALI_PHY_175_DATA */ 0x00000000, /* DENALI_PHY_176_DATA */ 0x00000000, /* DENALI_PHY_177_DATA */ 0x00800080, /* DENALI_PHY_178_DATA */ 0x00800080, /* DENALI_PHY_179_DATA */ 0x00800080, /* DENALI_PHY_180_DATA */ 0x00800080, /* DENALI_PHY_181_DATA */ 0x00800080, /* DENALI_PHY_182_DATA */ 0x00800080, /* DENALI_PHY_183_DATA */ 0x00800080, /* DENALI_PHY_184_DATA */ 0x00800080, /* DENALI_PHY_185_DATA */ 0x00800080, /* DENALI_PHY_186_DATA */ 0x000100da, /* DENALI_PHY_187_DATA */ 0x01ff0010, /* DENALI_PHY_188_DATA */ 0x00000000, /* DENALI_PHY_189_DATA */ 0x00000000, /* DENALI_PHY_190_DATA */ 0x00000002, /* DENALI_PHY_191_DATA */ 0x51313152, /* DENALI_PHY_192_DATA */ 0x80013130, /* DENALI_PHY_193_DATA */ 0x02000080, /* DENALI_PHY_194_DATA */ 0x00100001, /* DENALI_PHY_195_DATA */ 0x0c064208, /* DENALI_PHY_196_DATA */ 0x000f0c0f, /* DENALI_PHY_197_DATA */ 0x01000140, /* DENALI_PHY_198_DATA */ 0x0000000c, /* DENALI_PHY_199_DATA */ 0x00000000, /* DENALI_PHY_200_DATA */ 0x00000000, /* DENALI_PHY_201_DATA */ 0x00000000, /* DENALI_PHY_202_DATA */ 0x00000000, /* DENALI_PHY_203_DATA */ 0x00000000, /* DENALI_PHY_204_DATA */ 0x00000000, /* DENALI_PHY_205_DATA */ 0x00000000, /* DENALI_PHY_206_DATA */ 0x00000000, /* DENALI_PHY_207_DATA */ 0x00000000, /* DENALI_PHY_208_DATA */ 0x00000000, /* DENALI_PHY_209_DATA */ 0x00000000, /* DENALI_PHY_210_DATA */ 0x00000000, /* DENALI_PHY_211_DATA */ 0x00000000, /* DENALI_PHY_212_DATA */ 0x00000000, /* DENALI_PHY_213_DATA */ 0x00000000, /* DENALI_PHY_214_DATA */ 0x00000000, /* DENALI_PHY_215_DATA */ 0x00000000, /* DENALI_PHY_216_DATA */ 0x00000000, /* DENALI_PHY_217_DATA */ 0x00000000, /* DENALI_PHY_218_DATA */ 0x00000000, /* DENALI_PHY_219_DATA */ 0x00000000, /* DENALI_PHY_220_DATA */ 0x00000000, /* DENALI_PHY_221_DATA */ 0x00000000, /* DENALI_PHY_222_DATA */ 0x00000000, /* DENALI_PHY_223_DATA */ 0x00000000, /* DENALI_PHY_224_DATA */ 0x00000000, /* DENALI_PHY_225_DATA */ 0x00000000, /* DENALI_PHY_226_DATA */ 0x00000000, /* DENALI_PHY_227_DATA */ 0x00000000, /* DENALI_PHY_228_DATA */ 0x00000000, /* DENALI_PHY_229_DATA */ 0x00000000, /* DENALI_PHY_230_DATA */ 0x00000000, /* DENALI_PHY_231_DATA */ 0x00000000, /* DENALI_PHY_232_DATA */ 0x00000000, /* DENALI_PHY_233_DATA */ 0x00000000, /* DENALI_PHY_234_DATA */ 0x00000000, /* DENALI_PHY_235_DATA */ 0x00000000, /* DENALI_PHY_236_DATA */ 0x00000000, /* DENALI_PHY_237_DATA */ 0x00000000, /* DENALI_PHY_238_DATA */ 0x00000000, /* DENALI_PHY_239_DATA */ 0x00000000, /* DENALI_PHY_240_DATA */ 0x00000000, /* DENALI_PHY_241_DATA */ 0x00000000, /* DENALI_PHY_242_DATA */ 0x00000000, /* DENALI_PHY_243_DATA */ 0x00000000, /* DENALI_PHY_244_DATA */ 0x00000000, /* DENALI_PHY_245_DATA */ 0x00000000, /* DENALI_PHY_246_DATA */ 0x00000000, /* DENALI_PHY_247_DATA */ 0x00000000, /* DENALI_PHY_248_DATA */ 0x00000000, /* DENALI_PHY_249_DATA */ 0x00000000, /* DENALI_PHY_250_DATA */ 0x00000000, /* DENALI_PHY_251_DATA */ 0x00000000, /* DENALI_PHY_252_DATA */ 0x00000000, /* DENALI_PHY_253_DATA */ 0x00000000, /* DENALI_PHY_254_DATA */ 0x00000000, /* DENALI_PHY_255_DATA */ 0x46052371, /* DENALI_PHY_256_DATA */ 0x0004c008, /* DENALI_PHY_257_DATA */ 0x000000da, /* DENALI_PHY_258_DATA */ 0x00000000, /* DENALI_PHY_259_DATA */ 0x00000000, /* DENALI_PHY_260_DATA */ 0x00010000, /* DENALI_PHY_261_DATA */ 0x01DDDD90, /* DENALI_PHY_262_DATA */ 0x01DDDD90, /* DENALI_PHY_263_DATA */ 0x01030001, /* DENALI_PHY_264_DATA */ 0x01000000, /* DENALI_PHY_265_DATA */ 0x00c00000, /* DENALI_PHY_266_DATA */ 0x00000007, /* DENALI_PHY_267_DATA */ 0x00000000, /* DENALI_PHY_268_DATA */ 0x00000000, /* DENALI_PHY_269_DATA */ 0x04000408, /* DENALI_PHY_270_DATA */ 0x00000408, /* DENALI_PHY_271_DATA */ 0x00e4e400, /* DENALI_PHY_272_DATA */ 0x00000000, /* DENALI_PHY_273_DATA */ 0x00000000, /* DENALI_PHY_274_DATA */ 0x00000000, /* DENALI_PHY_275_DATA */ 0x00000000, /* DENALI_PHY_276_DATA */ 0x00000000, /* DENALI_PHY_277_DATA */ 0x00000000, /* DENALI_PHY_278_DATA */ 0x00000000, /* DENALI_PHY_279_DATA */ 0x00000000, /* DENALI_PHY_280_DATA */ 0x00000000, /* DENALI_PHY_281_DATA */ 0x00000000, /* DENALI_PHY_282_DATA */ 0x00000000, /* DENALI_PHY_283_DATA */ 0x00000000, /* DENALI_PHY_284_DATA */ 0x00000000, /* DENALI_PHY_285_DATA */ 0x00000000, /* DENALI_PHY_286_DATA */ 0x00000000, /* DENALI_PHY_287_DATA */ 0x00000000, /* DENALI_PHY_288_DATA */ 0x00200000, /* DENALI_PHY_289_DATA */ 0x00000000, /* DENALI_PHY_290_DATA */ 0x00000000, /* DENALI_PHY_291_DATA */ 0x00000000, /* DENALI_PHY_292_DATA */ 0x00000000, /* DENALI_PHY_293_DATA */ 0x00000000, /* DENALI_PHY_294_DATA */ 0x00000000, /* DENALI_PHY_295_DATA */ 0x02800280, /* DENALI_PHY_296_DATA */ 0x02800280, /* DENALI_PHY_297_DATA */ 0x02800280, /* DENALI_PHY_298_DATA */ 0x02800280, /* DENALI_PHY_299_DATA */ 0x00000280, /* DENALI_PHY_300_DATA */ 0x00000000, /* DENALI_PHY_301_DATA */ 0x00000000, /* DENALI_PHY_302_DATA */ 0x00000000, /* DENALI_PHY_303_DATA */ 0x00000000, /* DENALI_PHY_304_DATA */ 0x00000000, /* DENALI_PHY_305_DATA */ 0x00800080, /* DENALI_PHY_306_DATA */ 0x00800080, /* DENALI_PHY_307_DATA */ 0x00800080, /* DENALI_PHY_308_DATA */ 0x00800080, /* DENALI_PHY_309_DATA */ 0x00800080, /* DENALI_PHY_310_DATA */ 0x00800080, /* DENALI_PHY_311_DATA */ 0x00800080, /* DENALI_PHY_312_DATA */ 0x00800080, /* DENALI_PHY_313_DATA */ 0x00800080, /* DENALI_PHY_314_DATA */ 0x000100da, /* DENALI_PHY_315_DATA */ 0x01ff0010, /* DENALI_PHY_316_DATA */ 0x00000000, /* DENALI_PHY_317_DATA */ 0x00000000, /* DENALI_PHY_318_DATA */ 0x00000002, /* DENALI_PHY_319_DATA */ 0x51313152, /* DENALI_PHY_320_DATA */ 0x80013130, /* DENALI_PHY_321_DATA */ 0x02000080, /* DENALI_PHY_322_DATA */ 0x00100001, /* DENALI_PHY_323_DATA */ 0x0c064208, /* DENALI_PHY_324_DATA */ 0x000f0c0f, /* DENALI_PHY_325_DATA */ 0x01000140, /* DENALI_PHY_326_DATA */ 0x0000000c, /* DENALI_PHY_327_DATA */ 0x00000000, /* DENALI_PHY_328_DATA */ 0x00000000, /* DENALI_PHY_329_DATA */ 0x00000000, /* DENALI_PHY_330_DATA */ 0x00000000, /* DENALI_PHY_331_DATA */ 0x00000000, /* DENALI_PHY_332_DATA */ 0x00000000, /* DENALI_PHY_333_DATA */ 0x00000000, /* DENALI_PHY_334_DATA */ 0x00000000, /* DENALI_PHY_335_DATA */ 0x00000000, /* DENALI_PHY_336_DATA */ 0x00000000, /* DENALI_PHY_337_DATA */ 0x00000000, /* DENALI_PHY_338_DATA */ 0x00000000, /* DENALI_PHY_339_DATA */ 0x00000000, /* DENALI_PHY_340_DATA */ 0x00000000, /* DENALI_PHY_341_DATA */ 0x00000000, /* DENALI_PHY_342_DATA */ 0x00000000, /* DENALI_PHY_343_DATA */ 0x00000000, /* DENALI_PHY_344_DATA */ 0x00000000, /* DENALI_PHY_345_DATA */ 0x00000000, /* DENALI_PHY_346_DATA */ 0x00000000, /* DENALI_PHY_347_DATA */ 0x00000000, /* DENALI_PHY_348_DATA */ 0x00000000, /* DENALI_PHY_349_DATA */ 0x00000000, /* DENALI_PHY_350_DATA */ 0x00000000, /* DENALI_PHY_351_DATA */ 0x00000000, /* DENALI_PHY_352_DATA */ 0x00000000, /* DENALI_PHY_353_DATA */ 0x00000000, /* DENALI_PHY_354_DATA */ 0x00000000, /* DENALI_PHY_355_DATA */ 0x00000000, /* DENALI_PHY_356_DATA */ 0x00000000, /* DENALI_PHY_357_DATA */ 0x00000000, /* DENALI_PHY_358_DATA */ 0x00000000, /* DENALI_PHY_359_DATA */ 0x00000000, /* DENALI_PHY_360_DATA */ 0x00000000, /* DENALI_PHY_361_DATA */ 0x00000000, /* DENALI_PHY_362_DATA */ 0x00000000, /* DENALI_PHY_363_DATA */ 0x00000000, /* DENALI_PHY_364_DATA */ 0x00000000, /* DENALI_PHY_365_DATA */ 0x00000000, /* DENALI_PHY_366_DATA */ 0x00000000, /* DENALI_PHY_367_DATA */ 0x00000000, /* DENALI_PHY_368_DATA */ 0x00000000, /* DENALI_PHY_369_DATA */ 0x00000000, /* DENALI_PHY_370_DATA */ 0x00000000, /* DENALI_PHY_371_DATA */ 0x00000000, /* DENALI_PHY_372_DATA */ 0x00000000, /* DENALI_PHY_373_DATA */ 0x00000000, /* DENALI_PHY_374_DATA */ 0x00000000, /* DENALI_PHY_375_DATA */ 0x00000000, /* DENALI_PHY_376_DATA */ 0x00000000, /* DENALI_PHY_377_DATA */ 0x00000000, /* DENALI_PHY_378_DATA */ 0x00000000, /* DENALI_PHY_379_DATA */ 0x00000000, /* DENALI_PHY_380_DATA */ 0x00000000, /* DENALI_PHY_381_DATA */ 0x00000000, /* DENALI_PHY_382_DATA */ 0x00000000, /* DENALI_PHY_383_DATA */ 0x37651240, /* DENALI_PHY_384_DATA */ 0x0004c008, /* DENALI_PHY_385_DATA */ 0x000000da, /* DENALI_PHY_386_DATA */ 0x00000000, /* DENALI_PHY_387_DATA */ 0x00000000, /* DENALI_PHY_388_DATA */ 0x00010000, /* DENALI_PHY_389_DATA */ 0x01DDDD90, /* DENALI_PHY_390_DATA */ 0x01DDDD90, /* DENALI_PHY_391_DATA */ 0x01030001, /* DENALI_PHY_392_DATA */ 0x01000000, /* DENALI_PHY_393_DATA */ 0x00c00000, /* DENALI_PHY_394_DATA */ 0x00000007, /* DENALI_PHY_395_DATA */ 0x00000000, /* DENALI_PHY_396_DATA */ 0x00000000, /* DENALI_PHY_397_DATA */ 0x04000408, /* DENALI_PHY_398_DATA */ 0x00000408, /* DENALI_PHY_399_DATA */ 0x00e4e400, /* DENALI_PHY_400_DATA */ 0x00000000, /* DENALI_PHY_401_DATA */ 0x00000000, /* DENALI_PHY_402_DATA */ 0x00000000, /* DENALI_PHY_403_DATA */ 0x00000000, /* DENALI_PHY_404_DATA */ 0x00000000, /* DENALI_PHY_405_DATA */ 0x00000000, /* DENALI_PHY_406_DATA */ 0x00000000, /* DENALI_PHY_407_DATA */ 0x00000000, /* DENALI_PHY_408_DATA */ 0x00000000, /* DENALI_PHY_409_DATA */ 0x00000000, /* DENALI_PHY_410_DATA */ 0x00000000, /* DENALI_PHY_411_DATA */ 0x00000000, /* DENALI_PHY_412_DATA */ 0x00000000, /* DENALI_PHY_413_DATA */ 0x00000000, /* DENALI_PHY_414_DATA */ 0x00000000, /* DENALI_PHY_415_DATA */ 0x00000000, /* DENALI_PHY_416_DATA */ 0x00200000, /* DENALI_PHY_417_DATA */ 0x00000000, /* DENALI_PHY_418_DATA */ 0x00000000, /* DENALI_PHY_419_DATA */ 0x00000000, /* DENALI_PHY_420_DATA */ 0x00000000, /* DENALI_PHY_421_DATA */ 0x00000000, /* DENALI_PHY_422_DATA */ 0x00000000, /* DENALI_PHY_423_DATA */ 0x02800280, /* DENALI_PHY_424_DATA */ 0x02800280, /* DENALI_PHY_425_DATA */ 0x02800280, /* DENALI_PHY_426_DATA */ 0x02800280, /* DENALI_PHY_427_DATA */ 0x00000280, /* DENALI_PHY_428_DATA */ 0x00000000, /* DENALI_PHY_429_DATA */ 0x00000000, /* DENALI_PHY_430_DATA */ 0x00000000, /* DENALI_PHY_431_DATA */ 0x00000000, /* DENALI_PHY_432_DATA */ 0x00000000, /* DENALI_PHY_433_DATA */ 0x00800080, /* DENALI_PHY_434_DATA */ 0x00800080, /* DENALI_PHY_435_DATA */ 0x00800080, /* DENALI_PHY_436_DATA */ 0x00800080, /* DENALI_PHY_437_DATA */ 0x00800080, /* DENALI_PHY_438_DATA */ 0x00800080, /* DENALI_PHY_439_DATA */ 0x00800080, /* DENALI_PHY_440_DATA */ 0x00800080, /* DENALI_PHY_441_DATA */ 0x00800080, /* DENALI_PHY_442_DATA */ 0x000100da, /* DENALI_PHY_443_DATA */ 0x01ff0010, /* DENALI_PHY_444_DATA */ 0x00000000, /* DENALI_PHY_445_DATA */ 0x00000000, /* DENALI_PHY_446_DATA */ 0x00000002, /* DENALI_PHY_447_DATA */ 0x51313152, /* DENALI_PHY_448_DATA */ 0x80013130, /* DENALI_PHY_449_DATA */ 0x02000080, /* DENALI_PHY_450_DATA */ 0x00100001, /* DENALI_PHY_451_DATA */ 0x0c064208, /* DENALI_PHY_452_DATA */ 0x000f0c0f, /* DENALI_PHY_453_DATA */ 0x01000140, /* DENALI_PHY_454_DATA */ 0x0000000c, /* DENALI_PHY_455_DATA */ 0x00000000, /* DENALI_PHY_456_DATA */ 0x00000000, /* DENALI_PHY_457_DATA */ 0x00000000, /* DENALI_PHY_458_DATA */ 0x00000000, /* DENALI_PHY_459_DATA */ 0x00000000, /* DENALI_PHY_460_DATA */ 0x00000000, /* DENALI_PHY_461_DATA */ 0x00000000, /* DENALI_PHY_462_DATA */ 0x00000000, /* DENALI_PHY_463_DATA */ 0x00000000, /* DENALI_PHY_464_DATA */ 0x00000000, /* DENALI_PHY_465_DATA */ 0x00000000, /* DENALI_PHY_466_DATA */ 0x00000000, /* DENALI_PHY_467_DATA */ 0x00000000, /* DENALI_PHY_468_DATA */ 0x00000000, /* DENALI_PHY_469_DATA */ 0x00000000, /* DENALI_PHY_470_DATA */ 0x00000000, /* DENALI_PHY_471_DATA */ 0x00000000, /* DENALI_PHY_472_DATA */ 0x00000000, /* DENALI_PHY_473_DATA */ 0x00000000, /* DENALI_PHY_474_DATA */ 0x00000000, /* DENALI_PHY_475_DATA */ 0x00000000, /* DENALI_PHY_476_DATA */ 0x00000000, /* DENALI_PHY_477_DATA */ 0x00000000, /* DENALI_PHY_478_DATA */ 0x00000000, /* DENALI_PHY_479_DATA */ 0x00000000, /* DENALI_PHY_480_DATA */ 0x00000000, /* DENALI_PHY_481_DATA */ 0x00000000, /* DENALI_PHY_482_DATA */ 0x00000000, /* DENALI_PHY_483_DATA */ 0x00000000, /* DENALI_PHY_484_DATA */ 0x00000000, /* DENALI_PHY_485_DATA */ 0x00000000, /* DENALI_PHY_486_DATA */ 0x00000000, /* DENALI_PHY_487_DATA */ 0x00000000, /* DENALI_PHY_488_DATA */ 0x00000000, /* DENALI_PHY_489_DATA */ 0x00000000, /* DENALI_PHY_490_DATA */ 0x00000000, /* DENALI_PHY_491_DATA */ 0x00000000, /* DENALI_PHY_492_DATA */ 0x00000000, /* DENALI_PHY_493_DATA */ 0x00000000, /* DENALI_PHY_494_DATA */ 0x00000000, /* DENALI_PHY_495_DATA */ 0x00000000, /* DENALI_PHY_496_DATA */ 0x00000000, /* DENALI_PHY_497_DATA */ 0x00000000, /* DENALI_PHY_498_DATA */ 0x00000000, /* DENALI_PHY_499_DATA */ 0x00000000, /* DENALI_PHY_500_DATA */ 0x00000000, /* DENALI_PHY_501_DATA */ 0x00000000, /* DENALI_PHY_502_DATA */ 0x00000000, /* DENALI_PHY_503_DATA */ 0x00000000, /* DENALI_PHY_504_DATA */ 0x00000000, /* DENALI_PHY_505_DATA */ 0x00000000, /* DENALI_PHY_506_DATA */ 0x00000000, /* DENALI_PHY_507_DATA */ 0x00000000, /* DENALI_PHY_508_DATA */ 0x00000000, /* DENALI_PHY_509_DATA */ 0x00000000, /* DENALI_PHY_510_DATA */ 0x00000000, /* DENALI_PHY_511_DATA */ 0x34216750, /* DENALI_PHY_512_DATA */ 0x0004c008, /* DENALI_PHY_513_DATA */ 0x000000da, /* DENALI_PHY_514_DATA */ 0x00000000, /* DENALI_PHY_515_DATA */ 0x00000000, /* DENALI_PHY_516_DATA */ 0x00010000, /* DENALI_PHY_517_DATA */ 0x01DDDD90, /* DENALI_PHY_518_DATA */ 0x01DDDD90, /* DENALI_PHY_519_DATA */ 0x01030001, /* DENALI_PHY_520_DATA */ 0x01000000, /* DENALI_PHY_521_DATA */ 0x00c00000, /* DENALI_PHY_522_DATA */ 0x00000007, /* DENALI_PHY_523_DATA */ 0x00000000, /* DENALI_PHY_524_DATA */ 0x00000000, /* DENALI_PHY_525_DATA */ 0x04000408, /* DENALI_PHY_526_DATA */ 0x00000408, /* DENALI_PHY_527_DATA */ 0x00e4e400, /* DENALI_PHY_528_DATA */ 0x00000000, /* DENALI_PHY_529_DATA */ 0x00000000, /* DENALI_PHY_530_DATA */ 0x00000000, /* DENALI_PHY_531_DATA */ 0x00000000, /* DENALI_PHY_532_DATA */ 0x00000000, /* DENALI_PHY_533_DATA */ 0x00000000, /* DENALI_PHY_534_DATA */ 0x00000000, /* DENALI_PHY_535_DATA */ 0x00000000, /* DENALI_PHY_536_DATA */ 0x00000000, /* DENALI_PHY_537_DATA */ 0x00000000, /* DENALI_PHY_538_DATA */ 0x00000000, /* DENALI_PHY_539_DATA */ 0x00000000, /* DENALI_PHY_540_DATA */ 0x00000000, /* DENALI_PHY_541_DATA */ 0x00000000, /* DENALI_PHY_542_DATA */ 0x00000000, /* DENALI_PHY_543_DATA */ 0x00000000, /* DENALI_PHY_544_DATA */ 0x00200000, /* DENALI_PHY_545_DATA */ 0x00000000, /* DENALI_PHY_546_DATA */ 0x00000000, /* DENALI_PHY_547_DATA */ 0x00000000, /* DENALI_PHY_548_DATA */ 0x00000000, /* DENALI_PHY_549_DATA */ 0x00000000, /* DENALI_PHY_550_DATA */ 0x00000000, /* DENALI_PHY_551_DATA */ 0x02800280, /* DENALI_PHY_552_DATA */ 0x02800280, /* DENALI_PHY_553_DATA */ 0x02800280, /* DENALI_PHY_554_DATA */ 0x02800280, /* DENALI_PHY_555_DATA */ 0x00000280, /* DENALI_PHY_556_DATA */ 0x00000000, /* DENALI_PHY_557_DATA */ 0x00000000, /* DENALI_PHY_558_DATA */ 0x00000000, /* DENALI_PHY_559_DATA */ 0x00000000, /* DENALI_PHY_560_DATA */ 0x00000000, /* DENALI_PHY_561_DATA */ 0x00800080, /* DENALI_PHY_562_DATA */ 0x00800080, /* DENALI_PHY_563_DATA */ 0x00800080, /* DENALI_PHY_564_DATA */ 0x00800080, /* DENALI_PHY_565_DATA */ 0x00800080, /* DENALI_PHY_566_DATA */ 0x00800080, /* DENALI_PHY_567_DATA */ 0x00800080, /* DENALI_PHY_568_DATA */ 0x00800080, /* DENALI_PHY_569_DATA */ 0x00800080, /* DENALI_PHY_570_DATA */ 0x000100da, /* DENALI_PHY_571_DATA */ 0x01ff0010, /* DENALI_PHY_572_DATA */ 0x00000000, /* DENALI_PHY_573_DATA */ 0x00000000, /* DENALI_PHY_574_DATA */ 0x00000002, /* DENALI_PHY_575_DATA */ 0x51313152, /* DENALI_PHY_576_DATA */ 0x80013130, /* DENALI_PHY_577_DATA */ 0x02000080, /* DENALI_PHY_578_DATA */ 0x00100001, /* DENALI_PHY_579_DATA */ 0x0c064208, /* DENALI_PHY_580_DATA */ 0x000f0c0f, /* DENALI_PHY_581_DATA */ 0x01000140, /* DENALI_PHY_582_DATA */ 0x0000000c, /* DENALI_PHY_583_DATA */ 0x00000000, /* DENALI_PHY_584_DATA */ 0x00000000, /* DENALI_PHY_585_DATA */ 0x00000000, /* DENALI_PHY_586_DATA */ 0x00000000, /* DENALI_PHY_587_DATA */ 0x00000000, /* DENALI_PHY_588_DATA */ 0x00000000, /* DENALI_PHY_589_DATA */ 0x00000000, /* DENALI_PHY_590_DATA */ 0x00000000, /* DENALI_PHY_591_DATA */ 0x00000000, /* DENALI_PHY_592_DATA */ 0x00000000, /* DENALI_PHY_593_DATA */ 0x00000000, /* DENALI_PHY_594_DATA */ 0x00000000, /* DENALI_PHY_595_DATA */ 0x00000000, /* DENALI_PHY_596_DATA */ 0x00000000, /* DENALI_PHY_597_DATA */ 0x00000000, /* DENALI_PHY_598_DATA */ 0x00000000, /* DENALI_PHY_599_DATA */ 0x00000000, /* DENALI_PHY_600_DATA */ 0x00000000, /* DENALI_PHY_601_DATA */ 0x00000000, /* DENALI_PHY_602_DATA */ 0x00000000, /* DENALI_PHY_603_DATA */ 0x00000000, /* DENALI_PHY_604_DATA */ 0x00000000, /* DENALI_PHY_605_DATA */ 0x00000000, /* DENALI_PHY_606_DATA */ 0x00000000, /* DENALI_PHY_607_DATA */ 0x00000000, /* DENALI_PHY_608_DATA */ 0x00000000, /* DENALI_PHY_609_DATA */ 0x00000000, /* DENALI_PHY_610_DATA */ 0x00000000, /* DENALI_PHY_611_DATA */ 0x00000000, /* DENALI_PHY_612_DATA */ 0x00000000, /* DENALI_PHY_613_DATA */ 0x00000000, /* DENALI_PHY_614_DATA */ 0x00000000, /* DENALI_PHY_615_DATA */ 0x00000000, /* DENALI_PHY_616_DATA */ 0x00000000, /* DENALI_PHY_617_DATA */ 0x00000000, /* DENALI_PHY_618_DATA */ 0x00000000, /* DENALI_PHY_619_DATA */ 0x00000000, /* DENALI_PHY_620_DATA */ 0x00000000, /* DENALI_PHY_621_DATA */ 0x00000000, /* DENALI_PHY_622_DATA */ 0x00000000, /* DENALI_PHY_623_DATA */ 0x00000000, /* DENALI_PHY_624_DATA */ 0x00000000, /* DENALI_PHY_625_DATA */ 0x00000000, /* DENALI_PHY_626_DATA */ 0x00000000, /* DENALI_PHY_627_DATA */ 0x00000000, /* DENALI_PHY_628_DATA */ 0x00000000, /* DENALI_PHY_629_DATA */ 0x00000000, /* DENALI_PHY_630_DATA */ 0x00000000, /* DENALI_PHY_631_DATA */ 0x00000000, /* DENALI_PHY_632_DATA */ 0x00000000, /* DENALI_PHY_633_DATA */ 0x00000000, /* DENALI_PHY_634_DATA */ 0x00000000, /* DENALI_PHY_635_DATA */ 0x00000000, /* DENALI_PHY_636_DATA */ 0x00000000, /* DENALI_PHY_637_DATA */ 0x00000000, /* DENALI_PHY_638_DATA */ 0x00000000, /* DENALI_PHY_639_DATA */ 0x35176402, /* DENALI_PHY_640_DATA */ 0x0004c008, /* DENALI_PHY_641_DATA */ 0x000000da, /* DENALI_PHY_642_DATA */ 0x00000000, /* DENALI_PHY_643_DATA */ 0x00000000, /* DENALI_PHY_644_DATA */ 0x00010000, /* DENALI_PHY_645_DATA */ 0x01DDDD90, /* DENALI_PHY_646_DATA */ 0x01DDDD90, /* DENALI_PHY_647_DATA */ 0x01030001, /* DENALI_PHY_648_DATA */ 0x01000000, /* DENALI_PHY_649_DATA */ 0x00c00000, /* DENALI_PHY_650_DATA */ 0x00000007, /* DENALI_PHY_651_DATA */ 0x00000000, /* DENALI_PHY_652_DATA */ 0x00000000, /* DENALI_PHY_653_DATA */ 0x04000408, /* DENALI_PHY_654_DATA */ 0x00000408, /* DENALI_PHY_655_DATA */ 0x00e4e400, /* DENALI_PHY_656_DATA */ 0x00000000, /* DENALI_PHY_657_DATA */ 0x00000000, /* DENALI_PHY_658_DATA */ 0x00000000, /* DENALI_PHY_659_DATA */ 0x00000000, /* DENALI_PHY_660_DATA */ 0x00000000, /* DENALI_PHY_661_DATA */ 0x00000000, /* DENALI_PHY_662_DATA */ 0x00000000, /* DENALI_PHY_663_DATA */ 0x00000000, /* DENALI_PHY_664_DATA */ 0x00000000, /* DENALI_PHY_665_DATA */ 0x00000000, /* DENALI_PHY_666_DATA */ 0x00000000, /* DENALI_PHY_667_DATA */ 0x00000000, /* DENALI_PHY_668_DATA */ 0x00000000, /* DENALI_PHY_669_DATA */ 0x00000000, /* DENALI_PHY_670_DATA */ 0x00000000, /* DENALI_PHY_671_DATA */ 0x00000000, /* DENALI_PHY_672_DATA */ 0x00200000, /* DENALI_PHY_673_DATA */ 0x00000000, /* DENALI_PHY_674_DATA */ 0x00000000, /* DENALI_PHY_675_DATA */ 0x00000000, /* DENALI_PHY_676_DATA */ 0x00000000, /* DENALI_PHY_677_DATA */ 0x00000000, /* DENALI_PHY_678_DATA */ 0x00000000, /* DENALI_PHY_679_DATA */ 0x02800280, /* DENALI_PHY_680_DATA */ 0x02800280, /* DENALI_PHY_681_DATA */ 0x02800280, /* DENALI_PHY_682_DATA */ 0x02800280, /* DENALI_PHY_683_DATA */ 0x00000280, /* DENALI_PHY_684_DATA */ 0x00000000, /* DENALI_PHY_685_DATA */ 0x00000000, /* DENALI_PHY_686_DATA */ 0x00000000, /* DENALI_PHY_687_DATA */ 0x00000000, /* DENALI_PHY_688_DATA */ 0x00000000, /* DENALI_PHY_689_DATA */ 0x00800080, /* DENALI_PHY_690_DATA */ 0x00800080, /* DENALI_PHY_691_DATA */ 0x00800080, /* DENALI_PHY_692_DATA */ 0x00800080, /* DENALI_PHY_693_DATA */ 0x00800080, /* DENALI_PHY_694_DATA */ 0x00800080, /* DENALI_PHY_695_DATA */ 0x00800080, /* DENALI_PHY_696_DATA */ 0x00800080, /* DENALI_PHY_697_DATA */ 0x00800080, /* DENALI_PHY_698_DATA */ 0x000100da, /* DENALI_PHY_699_DATA */ 0x01ff0010, /* DENALI_PHY_700_DATA */ 0x00000000, /* DENALI_PHY_701_DATA */ 0x00000000, /* DENALI_PHY_702_DATA */ 0x00000002, /* DENALI_PHY_703_DATA */ 0x51313152, /* DENALI_PHY_704_DATA */ 0x80013130, /* DENALI_PHY_705_DATA */ 0x02000080, /* DENALI_PHY_706_DATA */ 0x00100001, /* DENALI_PHY_707_DATA */ 0x0c064208, /* DENALI_PHY_708_DATA */ 0x000f0c0f, /* DENALI_PHY_709_DATA */ 0x01000140, /* DENALI_PHY_710_DATA */ 0x0000000c, /* DENALI_PHY_711_DATA */ 0x00000000, /* DENALI_PHY_712_DATA */ 0x00000000, /* DENALI_PHY_713_DATA */ 0x00000000, /* DENALI_PHY_714_DATA */ 0x00000000, /* DENALI_PHY_715_DATA */ 0x00000000, /* DENALI_PHY_716_DATA */ 0x00000000, /* DENALI_PHY_717_DATA */ 0x00000000, /* DENALI_PHY_718_DATA */ 0x00000000, /* DENALI_PHY_719_DATA */ 0x00000000, /* DENALI_PHY_720_DATA */ 0x00000000, /* DENALI_PHY_721_DATA */ 0x00000000, /* DENALI_PHY_722_DATA */ 0x00000000, /* DENALI_PHY_723_DATA */ 0x00000000, /* DENALI_PHY_724_DATA */ 0x00000000, /* DENALI_PHY_725_DATA */ 0x00000000, /* DENALI_PHY_726_DATA */ 0x00000000, /* DENALI_PHY_727_DATA */ 0x00000000, /* DENALI_PHY_728_DATA */ 0x00000000, /* DENALI_PHY_729_DATA */ 0x00000000, /* DENALI_PHY_730_DATA */ 0x00000000, /* DENALI_PHY_731_DATA */ 0x00000000, /* DENALI_PHY_732_DATA */ 0x00000000, /* DENALI_PHY_733_DATA */ 0x00000000, /* DENALI_PHY_734_DATA */ 0x00000000, /* DENALI_PHY_735_DATA */ 0x00000000, /* DENALI_PHY_736_DATA */ 0x00000000, /* DENALI_PHY_737_DATA */ 0x00000000, /* DENALI_PHY_738_DATA */ 0x00000000, /* DENALI_PHY_739_DATA */ 0x00000000, /* DENALI_PHY_740_DATA */ 0x00000000, /* DENALI_PHY_741_DATA */ 0x00000000, /* DENALI_PHY_742_DATA */ 0x00000000, /* DENALI_PHY_743_DATA */ 0x00000000, /* DENALI_PHY_744_DATA */ 0x00000000, /* DENALI_PHY_745_DATA */ 0x00000000, /* DENALI_PHY_746_DATA */ 0x00000000, /* DENALI_PHY_747_DATA */ 0x00000000, /* DENALI_PHY_748_DATA */ 0x00000000, /* DENALI_PHY_749_DATA */ 0x00000000, /* DENALI_PHY_750_DATA */ 0x00000000, /* DENALI_PHY_751_DATA */ 0x00000000, /* DENALI_PHY_752_DATA */ 0x00000000, /* DENALI_PHY_753_DATA */ 0x00000000, /* DENALI_PHY_754_DATA */ 0x00000000, /* DENALI_PHY_755_DATA */ 0x00000000, /* DENALI_PHY_756_DATA */ 0x00000000, /* DENALI_PHY_757_DATA */ 0x00000000, /* DENALI_PHY_758_DATA */ 0x00000000, /* DENALI_PHY_759_DATA */ 0x00000000, /* DENALI_PHY_760_DATA */ 0x00000000, /* DENALI_PHY_761_DATA */ 0x00000000, /* DENALI_PHY_762_DATA */ 0x00000000, /* DENALI_PHY_763_DATA */ 0x00000000, /* DENALI_PHY_764_DATA */ 0x00000000, /* DENALI_PHY_765_DATA */ 0x00000000, /* DENALI_PHY_766_DATA */ 0x00000000, /* DENALI_PHY_767_DATA */ 0x10526347, /* DENALI_PHY_768_DATA */ 0x0004c008, /* DENALI_PHY_769_DATA */ 0x000000da, /* DENALI_PHY_770_DATA */ 0x00000000, /* DENALI_PHY_771_DATA */ 0x00000000, /* DENALI_PHY_772_DATA */ 0x00010000, /* DENALI_PHY_773_DATA */ 0x01DDDD90, /* DENALI_PHY_774_DATA */ 0x01DDDD90, /* DENALI_PHY_775_DATA */ 0x01030001, /* DENALI_PHY_776_DATA */ 0x01000000, /* DENALI_PHY_777_DATA */ 0x00c00000, /* DENALI_PHY_778_DATA */ 0x00000007, /* DENALI_PHY_779_DATA */ 0x00000000, /* DENALI_PHY_780_DATA */ 0x00000000, /* DENALI_PHY_781_DATA */ 0x04000408, /* DENALI_PHY_782_DATA */ 0x00000408, /* DENALI_PHY_783_DATA */ 0x00e4e400, /* DENALI_PHY_784_DATA */ 0x00000000, /* DENALI_PHY_785_DATA */ 0x00000000, /* DENALI_PHY_786_DATA */ 0x00000000, /* DENALI_PHY_787_DATA */ 0x00000000, /* DENALI_PHY_788_DATA */ 0x00000000, /* DENALI_PHY_789_DATA */ 0x00000000, /* DENALI_PHY_790_DATA */ 0x00000000, /* DENALI_PHY_791_DATA */ 0x00000000, /* DENALI_PHY_792_DATA */ 0x00000000, /* DENALI_PHY_793_DATA */ 0x00000000, /* DENALI_PHY_794_DATA */ 0x00000000, /* DENALI_PHY_795_DATA */ 0x00000000, /* DENALI_PHY_796_DATA */ 0x00000000, /* DENALI_PHY_797_DATA */ 0x00000000, /* DENALI_PHY_798_DATA */ 0x00000000, /* DENALI_PHY_799_DATA */ 0x00000000, /* DENALI_PHY_800_DATA */ 0x00200000, /* DENALI_PHY_801_DATA */ 0x00000000, /* DENALI_PHY_802_DATA */ 0x00000000, /* DENALI_PHY_803_DATA */ 0x00000000, /* DENALI_PHY_804_DATA */ 0x00000000, /* DENALI_PHY_805_DATA */ 0x00000000, /* DENALI_PHY_806_DATA */ 0x00000000, /* DENALI_PHY_807_DATA */ 0x02800280, /* DENALI_PHY_808_DATA */ 0x02800280, /* DENALI_PHY_809_DATA */ 0x02800280, /* DENALI_PHY_810_DATA */ 0x02800280, /* DENALI_PHY_811_DATA */ 0x00000280, /* DENALI_PHY_812_DATA */ 0x00000000, /* DENALI_PHY_813_DATA */ 0x00000000, /* DENALI_PHY_814_DATA */ 0x00000000, /* DENALI_PHY_815_DATA */ 0x00000000, /* DENALI_PHY_816_DATA */ 0x00000000, /* DENALI_PHY_817_DATA */ 0x00800080, /* DENALI_PHY_818_DATA */ 0x00800080, /* DENALI_PHY_819_DATA */ 0x00800080, /* DENALI_PHY_820_DATA */ 0x00800080, /* DENALI_PHY_821_DATA */ 0x00800080, /* DENALI_PHY_822_DATA */ 0x00800080, /* DENALI_PHY_823_DATA */ 0x00800080, /* DENALI_PHY_824_DATA */ 0x00800080, /* DENALI_PHY_825_DATA */ 0x00800080, /* DENALI_PHY_826_DATA */ 0x000100da, /* DENALI_PHY_827_DATA */ 0x01ff0010, /* DENALI_PHY_828_DATA */ 0x00000000, /* DENALI_PHY_829_DATA */ 0x00000000, /* DENALI_PHY_830_DATA */ 0x00000002, /* DENALI_PHY_831_DATA */ 0x51313152, /* DENALI_PHY_832_DATA */ 0x80013130, /* DENALI_PHY_833_DATA */ 0x02000080, /* DENALI_PHY_834_DATA */ 0x00100001, /* DENALI_PHY_835_DATA */ 0x0c064208, /* DENALI_PHY_836_DATA */ 0x000f0c0f, /* DENALI_PHY_837_DATA */ 0x01000140, /* DENALI_PHY_838_DATA */ 0x0000000c, /* DENALI_PHY_839_DATA */ 0x00000000, /* DENALI_PHY_840_DATA */ 0x00000000, /* DENALI_PHY_841_DATA */ 0x00000000, /* DENALI_PHY_842_DATA */ 0x00000000, /* DENALI_PHY_843_DATA */ 0x00000000, /* DENALI_PHY_844_DATA */ 0x00000000, /* DENALI_PHY_845_DATA */ 0x00000000, /* DENALI_PHY_846_DATA */ 0x00000000, /* DENALI_PHY_847_DATA */ 0x00000000, /* DENALI_PHY_848_DATA */ 0x00000000, /* DENALI_PHY_849_DATA */ 0x00000000, /* DENALI_PHY_850_DATA */ 0x00000000, /* DENALI_PHY_851_DATA */ 0x00000000, /* DENALI_PHY_852_DATA */ 0x00000000, /* DENALI_PHY_853_DATA */ 0x00000000, /* DENALI_PHY_854_DATA */ 0x00000000, /* DENALI_PHY_855_DATA */ 0x00000000, /* DENALI_PHY_856_DATA */ 0x00000000, /* DENALI_PHY_857_DATA */ 0x00000000, /* DENALI_PHY_858_DATA */ 0x00000000, /* DENALI_PHY_859_DATA */ 0x00000000, /* DENALI_PHY_860_DATA */ 0x00000000, /* DENALI_PHY_861_DATA */ 0x00000000, /* DENALI_PHY_862_DATA */ 0x00000000, /* DENALI_PHY_863_DATA */ 0x00000000, /* DENALI_PHY_864_DATA */ 0x00000000, /* DENALI_PHY_865_DATA */ 0x00000000, /* DENALI_PHY_866_DATA */ 0x00000000, /* DENALI_PHY_867_DATA */ 0x00000000, /* DENALI_PHY_868_DATA */ 0x00000000, /* DENALI_PHY_869_DATA */ 0x00000000, /* DENALI_PHY_870_DATA */ 0x00000000, /* DENALI_PHY_871_DATA */ 0x00000000, /* DENALI_PHY_872_DATA */ 0x00000000, /* DENALI_PHY_873_DATA */ 0x00000000, /* DENALI_PHY_874_DATA */ 0x00000000, /* DENALI_PHY_875_DATA */ 0x00000000, /* DENALI_PHY_876_DATA */ 0x00000000, /* DENALI_PHY_877_DATA */ 0x00000000, /* DENALI_PHY_878_DATA */ 0x00000000, /* DENALI_PHY_879_DATA */ 0x00000000, /* DENALI_PHY_880_DATA */ 0x00000000, /* DENALI_PHY_881_DATA */ 0x00000000, /* DENALI_PHY_882_DATA */ 0x00000000, /* DENALI_PHY_883_DATA */ 0x00000000, /* DENALI_PHY_884_DATA */ 0x00000000, /* DENALI_PHY_885_DATA */ 0x00000000, /* DENALI_PHY_886_DATA */ 0x00000000, /* DENALI_PHY_887_DATA */ 0x00000000, /* DENALI_PHY_888_DATA */ 0x00000000, /* DENALI_PHY_889_DATA */ 0x00000000, /* DENALI_PHY_890_DATA */ 0x00000000, /* DENALI_PHY_891_DATA */ 0x00000000, /* DENALI_PHY_892_DATA */ 0x00000000, /* DENALI_PHY_893_DATA */ 0x00000000, /* DENALI_PHY_894_DATA */ 0x00000000, /* DENALI_PHY_895_DATA */ 0x41753260, /* DENALI_PHY_896_DATA */ 0x0004c008, /* DENALI_PHY_897_DATA */ 0x000000da, /* DENALI_PHY_898_DATA */ 0x00000000, /* DENALI_PHY_899_DATA */ 0x00000000, /* DENALI_PHY_900_DATA */ 0x00010000, /* DENALI_PHY_901_DATA */ 0x01DDDD90, /* DENALI_PHY_902_DATA */ 0x01DDDD90, /* DENALI_PHY_903_DATA */ 0x01030001, /* DENALI_PHY_904_DATA */ 0x01000000, /* DENALI_PHY_905_DATA */ 0x00c00000, /* DENALI_PHY_906_DATA */ 0x00000007, /* DENALI_PHY_907_DATA */ 0x00000000, /* DENALI_PHY_908_DATA */ 0x00000000, /* DENALI_PHY_909_DATA */ 0x04000408, /* DENALI_PHY_910_DATA */ 0x00000408, /* DENALI_PHY_911_DATA */ 0x00e4e400, /* DENALI_PHY_912_DATA */ 0x00000000, /* DENALI_PHY_913_DATA */ 0x00000000, /* DENALI_PHY_914_DATA */ 0x00000000, /* DENALI_PHY_915_DATA */ 0x00000000, /* DENALI_PHY_916_DATA */ 0x00000000, /* DENALI_PHY_917_DATA */ 0x00000000, /* DENALI_PHY_918_DATA */ 0x00000000, /* DENALI_PHY_919_DATA */ 0x00000000, /* DENALI_PHY_920_DATA */ 0x00000000, /* DENALI_PHY_921_DATA */ 0x00000000, /* DENALI_PHY_922_DATA */ 0x00000000, /* DENALI_PHY_923_DATA */ 0x00000000, /* DENALI_PHY_924_DATA */ 0x00000000, /* DENALI_PHY_925_DATA */ 0x00000000, /* DENALI_PHY_926_DATA */ 0x00000000, /* DENALI_PHY_927_DATA */ 0x00000000, /* DENALI_PHY_928_DATA */ 0x00200000, /* DENALI_PHY_929_DATA */ 0x00000000, /* DENALI_PHY_930_DATA */ 0x00000000, /* DENALI_PHY_931_DATA */ 0x00000000, /* DENALI_PHY_932_DATA */ 0x00000000, /* DENALI_PHY_933_DATA */ 0x00000000, /* DENALI_PHY_934_DATA */ 0x00000000, /* DENALI_PHY_935_DATA */ 0x02800280, /* DENALI_PHY_936_DATA */ 0x02800280, /* DENALI_PHY_937_DATA */ 0x02800280, /* DENALI_PHY_938_DATA */ 0x02800280, /* DENALI_PHY_939_DATA */ 0x00000280, /* DENALI_PHY_940_DATA */ 0x00000000, /* DENALI_PHY_941_DATA */ 0x00000000, /* DENALI_PHY_942_DATA */ 0x00000000, /* DENALI_PHY_943_DATA */ 0x00000000, /* DENALI_PHY_944_DATA */ 0x00000000, /* DENALI_PHY_945_DATA */ 0x00800080, /* DENALI_PHY_946_DATA */ 0x00800080, /* DENALI_PHY_947_DATA */ 0x00800080, /* DENALI_PHY_948_DATA */ 0x00800080, /* DENALI_PHY_949_DATA */ 0x00800080, /* DENALI_PHY_950_DATA */ 0x00800080, /* DENALI_PHY_951_DATA */ 0x00800080, /* DENALI_PHY_952_DATA */ 0x00800080, /* DENALI_PHY_953_DATA */ 0x00800080, /* DENALI_PHY_954_DATA */ 0x000100da, /* DENALI_PHY_955_DATA */ 0x01ff0010, /* DENALI_PHY_956_DATA */ 0x00000000, /* DENALI_PHY_957_DATA */ 0x00000000, /* DENALI_PHY_958_DATA */ 0x00000002, /* DENALI_PHY_959_DATA */ 0x51313152, /* DENALI_PHY_960_DATA */ 0x80013130, /* DENALI_PHY_961_DATA */ 0x02000080, /* DENALI_PHY_962_DATA */ 0x00100001, /* DENALI_PHY_963_DATA */ 0x0c064208, /* DENALI_PHY_964_DATA */ 0x000f0c0f, /* DENALI_PHY_965_DATA */ 0x01000140, /* DENALI_PHY_966_DATA */ 0x0000000c, /* DENALI_PHY_967_DATA */ 0x00000000, /* DENALI_PHY_968_DATA */ 0x00000000, /* DENALI_PHY_969_DATA */ 0x00000000, /* DENALI_PHY_970_DATA */ 0x00000000, /* DENALI_PHY_971_DATA */ 0x00000000, /* DENALI_PHY_972_DATA */ 0x00000000, /* DENALI_PHY_973_DATA */ 0x00000000, /* DENALI_PHY_974_DATA */ 0x00000000, /* DENALI_PHY_975_DATA */ 0x00000000, /* DENALI_PHY_976_DATA */ 0x00000000, /* DENALI_PHY_977_DATA */ 0x00000000, /* DENALI_PHY_978_DATA */ 0x00000000, /* DENALI_PHY_979_DATA */ 0x00000000, /* DENALI_PHY_980_DATA */ 0x00000000, /* DENALI_PHY_981_DATA */ 0x00000000, /* DENALI_PHY_982_DATA */ 0x00000000, /* DENALI_PHY_983_DATA */ 0x00000000, /* DENALI_PHY_984_DATA */ 0x00000000, /* DENALI_PHY_985_DATA */ 0x00000000, /* DENALI_PHY_986_DATA */ 0x00000000, /* DENALI_PHY_987_DATA */ 0x00000000, /* DENALI_PHY_988_DATA */ 0x00000000, /* DENALI_PHY_989_DATA */ 0x00000000, /* DENALI_PHY_990_DATA */ 0x00000000, /* DENALI_PHY_991_DATA */ 0x00000000, /* DENALI_PHY_992_DATA */ 0x00000000, /* DENALI_PHY_993_DATA */ 0x00000000, /* DENALI_PHY_994_DATA */ 0x00000000, /* DENALI_PHY_995_DATA */ 0x00000000, /* DENALI_PHY_996_DATA */ 0x00000000, /* DENALI_PHY_997_DATA */ 0x00000000, /* DENALI_PHY_998_DATA */ 0x00000000, /* DENALI_PHY_999_DATA */ 0x00000000, /* DENALI_PHY_1000_DATA */ 0x00000000, /* DENALI_PHY_1001_DATA */ 0x00000000, /* DENALI_PHY_1002_DATA */ 0x00000000, /* DENALI_PHY_1003_DATA */ 0x00000000, /* DENALI_PHY_1004_DATA */ 0x00000000, /* DENALI_PHY_1005_DATA */ 0x00000000, /* DENALI_PHY_1006_DATA */ 0x00000000, /* DENALI_PHY_1007_DATA */ 0x00000000, /* DENALI_PHY_1008_DATA */ 0x00000000, /* DENALI_PHY_1009_DATA */ 0x00000000, /* DENALI_PHY_1010_DATA */ 0x00000000, /* DENALI_PHY_1011_DATA */ 0x00000000, /* DENALI_PHY_1012_DATA */ 0x00000000, /* DENALI_PHY_1013_DATA */ 0x00000000, /* DENALI_PHY_1014_DATA */ 0x00000000, /* DENALI_PHY_1015_DATA */ 0x00000000, /* DENALI_PHY_1016_DATA */ 0x00000000, /* DENALI_PHY_1017_DATA */ 0x00000000, /* DENALI_PHY_1018_DATA */ 0x00000000, /* DENALI_PHY_1019_DATA */ 0x00000000, /* DENALI_PHY_1020_DATA */ 0x00000000, /* DENALI_PHY_1021_DATA */ 0x00000000, /* DENALI_PHY_1022_DATA */ 0x00000000, /* DENALI_PHY_1023_DATA */ 0x76543210, /* DENALI_PHY_1024_DATA */ 0x0004c008, /* DENALI_PHY_1025_DATA */ 0x000000da, /* DENALI_PHY_1026_DATA */ 0x00000000, /* DENALI_PHY_1027_DATA */ 0x00000000, /* DENALI_PHY_1028_DATA */ 0x00010000, /* DENALI_PHY_1029_DATA */ 0x01665555, /* DENALI_PHY_1030_DATA */ 0x01665555, /* DENALI_PHY_1031_DATA */ 0x01030001, /* DENALI_PHY_1032_DATA */ 0x01000000, /* DENALI_PHY_1033_DATA */ 0x00c00000, /* DENALI_PHY_1034_DATA */ 0x00000007, /* DENALI_PHY_1035_DATA */ 0x00000000, /* DENALI_PHY_1036_DATA */ 0x00000000, /* DENALI_PHY_1037_DATA */ 0x04000408, /* DENALI_PHY_1038_DATA */ 0x00000408, /* DENALI_PHY_1039_DATA */ 0x00e4e400, /* DENALI_PHY_1040_DATA */ 0x00000000, /* DENALI_PHY_1041_DATA */ 0x00000000, /* DENALI_PHY_1042_DATA */ 0x00000000, /* DENALI_PHY_1043_DATA */ 0x00000000, /* DENALI_PHY_1044_DATA */ 0x00000000, /* DENALI_PHY_1045_DATA */ 0x00000000, /* DENALI_PHY_1046_DATA */ 0x00000000, /* DENALI_PHY_1047_DATA */ 0x00000000, /* DENALI_PHY_1048_DATA */ 0x00000000, /* DENALI_PHY_1049_DATA */ 0x00000000, /* DENALI_PHY_1050_DATA */ 0x00000000, /* DENALI_PHY_1051_DATA */ 0x00000000, /* DENALI_PHY_1052_DATA */ 0x00000000, /* DENALI_PHY_1053_DATA */ 0x00000000, /* DENALI_PHY_1054_DATA */ 0x00000000, /* DENALI_PHY_1055_DATA */ 0x00000000, /* DENALI_PHY_1056_DATA */ 0x00200000, /* DENALI_PHY_1057_DATA */ 0x00000000, /* DENALI_PHY_1058_DATA */ 0x00000000, /* DENALI_PHY_1059_DATA */ 0x00000000, /* DENALI_PHY_1060_DATA */ 0x00000000, /* DENALI_PHY_1061_DATA */ 0x00000000, /* DENALI_PHY_1062_DATA */ 0x00000000, /* DENALI_PHY_1063_DATA */ 0x02800280, /* DENALI_PHY_1064_DATA */ 0x02800280, /* DENALI_PHY_1065_DATA */ 0x02800280, /* DENALI_PHY_1066_DATA */ 0x02800280, /* DENALI_PHY_1067_DATA */ 0x00000280, /* DENALI_PHY_1068_DATA */ 0x00000000, /* DENALI_PHY_1069_DATA */ 0x00000000, /* DENALI_PHY_1070_DATA */ 0x00000000, /* DENALI_PHY_1071_DATA */ 0x00000000, /* DENALI_PHY_1072_DATA */ 0x00000000, /* DENALI_PHY_1073_DATA */ 0x00800080, /* DENALI_PHY_1074_DATA */ 0x00800080, /* DENALI_PHY_1075_DATA */ 0x00800080, /* DENALI_PHY_1076_DATA */ 0x00800080, /* DENALI_PHY_1077_DATA */ 0x00800080, /* DENALI_PHY_1078_DATA */ 0x00800080, /* DENALI_PHY_1079_DATA */ 0x00800080, /* DENALI_PHY_1080_DATA */ 0x00800080, /* DENALI_PHY_1081_DATA */ 0x00800080, /* DENALI_PHY_1082_DATA */ 0x000100da, /* DENALI_PHY_1083_DATA */ 0x01ff0010, /* DENALI_PHY_1084_DATA */ 0x00000000, /* DENALI_PHY_1085_DATA */ 0x00000000, /* DENALI_PHY_1086_DATA */ 0x00000002, /* DENALI_PHY_1087_DATA */ 0x51313152, /* DENALI_PHY_1088_DATA */ 0x80013130, /* DENALI_PHY_1089_DATA */ 0x02000080, /* DENALI_PHY_1090_DATA */ 0x00100001, /* DENALI_PHY_1091_DATA */ 0x0c064208, /* DENALI_PHY_1092_DATA */ 0x000f0c0f, /* DENALI_PHY_1093_DATA */ 0x01000140, /* DENALI_PHY_1094_DATA */ 0x0000000c, /* DENALI_PHY_1095_DATA */ 0x00000000, /* DENALI_PHY_1096_DATA */ 0x00000000, /* DENALI_PHY_1097_DATA */ 0x00000000, /* DENALI_PHY_1098_DATA */ 0x00000000, /* DENALI_PHY_1099_DATA */ 0x00000000, /* DENALI_PHY_1100_DATA */ 0x00000000, /* DENALI_PHY_1101_DATA */ 0x00000000, /* DENALI_PHY_1102_DATA */ 0x00000000, /* DENALI_PHY_1103_DATA */ 0x00000000, /* DENALI_PHY_1104_DATA */ 0x00000000, /* DENALI_PHY_1105_DATA */ 0x00000000, /* DENALI_PHY_1106_DATA */ 0x00000000, /* DENALI_PHY_1107_DATA */ 0x00000000, /* DENALI_PHY_1108_DATA */ 0x00000000, /* DENALI_PHY_1109_DATA */ 0x00000000, /* DENALI_PHY_1110_DATA */ 0x00000000, /* DENALI_PHY_1111_DATA */ 0x00000000, /* DENALI_PHY_1112_DATA */ 0x00000000, /* DENALI_PHY_1113_DATA */ 0x00000000, /* DENALI_PHY_1114_DATA */ 0x00000000, /* DENALI_PHY_1115_DATA */ 0x00000000, /* DENALI_PHY_1116_DATA */ 0x00000000, /* DENALI_PHY_1117_DATA */ 0x00000000, /* DENALI_PHY_1118_DATA */ 0x00000000, /* DENALI_PHY_1119_DATA */ 0x00000000, /* DENALI_PHY_1120_DATA */ 0x00000000, /* DENALI_PHY_1121_DATA */ 0x00000000, /* DENALI_PHY_1122_DATA */ 0x00000000, /* DENALI_PHY_1123_DATA */ 0x00000000, /* DENALI_PHY_1124_DATA */ 0x00000000, /* DENALI_PHY_1125_DATA */ 0x00000000, /* DENALI_PHY_1126_DATA */ 0x00000000, /* DENALI_PHY_1127_DATA */ 0x00000000, /* DENALI_PHY_1128_DATA */ 0x00000000, /* DENALI_PHY_1129_DATA */ 0x00000000, /* DENALI_PHY_1130_DATA */ 0x00000000, /* DENALI_PHY_1131_DATA */ 0x00000000, /* DENALI_PHY_1132_DATA */ 0x00000000, /* DENALI_PHY_1133_DATA */ 0x00000000, /* DENALI_PHY_1134_DATA */ 0x00000000, /* DENALI_PHY_1135_DATA */ 0x00000000, /* DENALI_PHY_1136_DATA */ 0x00000000, /* DENALI_PHY_1137_DATA */ 0x00000000, /* DENALI_PHY_1138_DATA */ 0x00000000, /* DENALI_PHY_1139_DATA */ 0x00000000, /* DENALI_PHY_1140_DATA */ 0x00000000, /* DENALI_PHY_1141_DATA */ 0x00000000, /* DENALI_PHY_1142_DATA */ 0x00000000, /* DENALI_PHY_1143_DATA */ 0x00000000, /* DENALI_PHY_1144_DATA */ 0x00000000, /* DENALI_PHY_1145_DATA */ 0x00000000, /* DENALI_PHY_1146_DATA */ 0x00000000, /* DENALI_PHY_1147_DATA */ 0x00000000, /* DENALI_PHY_1148_DATA */ 0x00000000, /* DENALI_PHY_1149_DATA */ 0x00000000, /* DENALI_PHY_1150_DATA */ 0x00000000, /* DENALI_PHY_1151_DATA */ 0x00000000, /* DENALI_PHY_1152_DATA */ 0x00000000, /* DENALI_PHY_1153_DATA */ 0x00050000, /* DENALI_PHY_1154_DATA */ 0x00000000, /* DENALI_PHY_1155_DATA */ 0x00000000, /* DENALI_PHY_1156_DATA */ 0x00000000, /* DENALI_PHY_1157_DATA */ 0x00000100, /* DENALI_PHY_1158_DATA */ 0x00000000, /* DENALI_PHY_1159_DATA */ 0x00000000, /* DENALI_PHY_1160_DATA */ 0x00506401, /* DENALI_PHY_1161_DATA */ 0x01221102, /* DENALI_PHY_1162_DATA */ 0x00000122, /* DENALI_PHY_1163_DATA */ 0x00000000, /* DENALI_PHY_1164_DATA */ 0x000B1F00, /* DENALI_PHY_1165_DATA */ 0x0B1F0B1F, /* DENALI_PHY_1166_DATA */ 0x0B1F0B1F, /* DENALI_PHY_1167_DATA */ 0x0B1F0B1F, /* DENALI_PHY_1168_DATA */ 0x0B1F0B1F, /* DENALI_PHY_1169_DATA */ 0x00000B00, /* DENALI_PHY_1170_DATA */ 0x42080010, /* DENALI_PHY_1171_DATA */ 0x01000100, /* DENALI_PHY_1172_DATA */ 0x01000100, /* DENALI_PHY_1173_DATA */ 0x01000100, /* DENALI_PHY_1174_DATA */ 0x01000100, /* DENALI_PHY_1175_DATA */ 0x00000000, /* DENALI_PHY_1176_DATA */ 0x00000000, /* DENALI_PHY_1177_DATA */ 0x00000000, /* DENALI_PHY_1178_DATA */ 0x00000000, /* DENALI_PHY_1179_DATA */ 0x00000000, /* DENALI_PHY_1180_DATA */ 0x00000803, /* DENALI_PHY_1181_DATA */ 0x223FFF00, /* DENALI_PHY_1182_DATA */ 0x000008FF, /* DENALI_PHY_1183_DATA */ 0x0000057F, /* DENALI_PHY_1184_DATA */ 0x0000057F, /* DENALI_PHY_1185_DATA */ 0x00037FFF, /* DENALI_PHY_1186_DATA */ 0x00037FFF, /* DENALI_PHY_1187_DATA */ 0x00004410, /* DENALI_PHY_1188_DATA */ 0x00004410, /* DENALI_PHY_1189_DATA */ 0x00004410, /* DENALI_PHY_1190_DATA */ 0x00004410, /* DENALI_PHY_1191_DATA */ 0x00004410, /* DENALI_PHY_1192_DATA */ 0x00000111, /* DENALI_PHY_1193_DATA */ 0x00000111, /* DENALI_PHY_1194_DATA */ 0x00000000, /* DENALI_PHY_1195_DATA */ 0x00000000, /* DENALI_PHY_1196_DATA */ 0x00000000, /* DENALI_PHY_1197_DATA */ 0x04000000, /* DENALI_PHY_1198_DATA */ 0x00000000, /* DENALI_PHY_1199_DATA */ 0x00000000, /* DENALI_PHY_1200_DATA */ 0x00000108, /* DENALI_PHY_1201_DATA */ 0x00000000, /* DENALI_PHY_1202_DATA */ 0x00000000, /* DENALI_PHY_1203_DATA */ 0x00000000, /* DENALI_PHY_1204_DATA */ 0x00000001, /* DENALI_PHY_1205_DATA */ 0x00000000, /* DENALI_PHY_1206_DATA */ 0x00000000, /* DENALI_PHY_1207_DATA */ 0x00000000, /* DENALI_PHY_1208_DATA */ 0x00000000, /* DENALI_PHY_1209_DATA */ 0x00000000, /* DENALI_PHY_1210_DATA */ 0x00000000, /* DENALI_PHY_1211_DATA */ 0x00020100, /* DENALI_PHY_1212_DATA */ 0x00000000, /* DENALI_PHY_1213_DATA */ 0x00000000, /* DENALI_PHY_1214_DATA */ }; uint32_t ddr_ctl_settings[265] = { 0x00000a00, /* DENALI_CTL_00_DATA */ 0x00000000, /* DENALI_CTL_01_DATA */ 0x00000000, /* DENALI_CTL_02_DATA */ 0x00000000, /* DENALI_CTL_03_DATA */ 0x00000000, /* DENALI_CTL_04_DATA */ 0x00000000, /* DENALI_CTL_05_DATA */ 0x0000000a, /* DENALI_CTL_06_DATA */ 0x0002d362, /* DENALI_CTL_07_DATA */ 0x00071073, /* DENALI_CTL_08_DATA */ 0x0a1c0255, /* DENALI_CTL_09_DATA */ 0x1c1c0400, /* DENALI_CTL_10_DATA */ 0x0404c90b, /* DENALI_CTL_11_DATA */ 0x2b050405, /* DENALI_CTL_12_DATA */ 0x0d0c081e, /* DENALI_CTL_13_DATA */ 0x08090914, /* DENALI_CTL_14_DATA */ 0x00fde718, /* DENALI_CTL_15_DATA */ 0x00180a05, /* DENALI_CTL_16_DATA */ 0x008b130d, /* DENALI_CTL_17_DATA */ 0x01000118, /* DENALI_CTL_18_DATA */ 0x0d032001, /* DENALI_CTL_19_DATA */ 0x00000000, /* DENALI_CTL_20_DATA */ 0x00000101, /* DENALI_CTL_21_DATA */ 0x00000000, /* DENALI_CTL_22_DATA */ 0x0a000000, /* DENALI_CTL_23_DATA */ 0x00000000, /* DENALI_CTL_24_DATA */ 0x01450100, /* DENALI_CTL_25_DATA */ 0x00001c36, /* DENALI_CTL_26_DATA */ 0x00000005, /* DENALI_CTL_27_DATA */ 0x00170006, /* DENALI_CTL_28_DATA */ 0x014e0400, /* DENALI_CTL_29_DATA */ 0x03010000, /* DENALI_CTL_30_DATA */ 0x000a0e00, /* DENALI_CTL_31_DATA */ 0x04030200, /* DENALI_CTL_32_DATA */ 0x0000031f, /* DENALI_CTL_33_DATA */ 0x00070004, /* DENALI_CTL_34_DATA */ 0x00000000, /* DENALI_CTL_35_DATA */ 0x00000000, /* DENALI_CTL_36_DATA */ 0x00000000, /* DENALI_CTL_37_DATA */ 0x00000000, /* DENALI_CTL_38_DATA */ 0x00000000, /* DENALI_CTL_39_DATA */ 0x00000000, /* DENALI_CTL_40_DATA */ 0x00000000, /* DENALI_CTL_41_DATA */ 0x00000000, /* DENALI_CTL_42_DATA */ 0x00000000, /* DENALI_CTL_43_DATA */ 0x00000000, /* DENALI_CTL_44_DATA */ 0x00000000, /* DENALI_CTL_45_DATA */ 0x00000000, /* DENALI_CTL_46_DATA */ 0x00000000, /* DENALI_CTL_47_DATA */ 0x00000000, /* DENALI_CTL_48_DATA */ 0x00000000, /* DENALI_CTL_49_DATA */ 0x00000000, /* DENALI_CTL_50_DATA */ 0x00000000, /* DENALI_CTL_51_DATA */ 0x00000000, /* DENALI_CTL_52_DATA */ 0x00000000, /* DENALI_CTL_53_DATA */ 0x00000000, /* DENALI_CTL_54_DATA */ 0x00000000, /* DENALI_CTL_55_DATA */ 0x00000000, /* DENALI_CTL_56_DATA */ 0x00000000, /* DENALI_CTL_57_DATA */ 0x00000000, /* DENALI_CTL_58_DATA */ 0x00000000, /* DENALI_CTL_59_DATA */ 0x00000424, /* DENALI_CTL_60_DATA */ 0x00000201, /* DENALI_CTL_61_DATA */ 0x00001008, /* DENALI_CTL_62_DATA */ 0x00000000, /* DENALI_CTL_63_DATA */ 0x00000200, /* DENALI_CTL_64_DATA */ 0x00000800, /* DENALI_CTL_65_DATA */ 0x00000481, /* DENALI_CTL_66_DATA */ 0x00000400, /* DENALI_CTL_67_DATA */ 0x00000424, /* DENALI_CTL_68_DATA */ 0x00000201, /* DENALI_CTL_69_DATA */ 0x00001008, /* DENALI_CTL_70_DATA */ 0x00000000, /* DENALI_CTL_71_DATA */ 0x00000200, /* DENALI_CTL_72_DATA */ 0x00000800, /* DENALI_CTL_73_DATA */ 0x00000481, /* DENALI_CTL_74_DATA */ 0x00000400, /* DENALI_CTL_75_DATA */ 0x01010000, /* DENALI_CTL_76_DATA */ 0x00000000, /* DENALI_CTL_77_DATA */ 0x00000000, /* DENALI_CTL_78_DATA */ 0x00000000, /* DENALI_CTL_79_DATA */ 0x00000000, /* DENALI_CTL_80_DATA */ 0x00000000, /* DENALI_CTL_81_DATA */ 0x00000000, /* DENALI_CTL_82_DATA */ 0x00000000, /* DENALI_CTL_83_DATA */ 0x00000000, /* DENALI_CTL_84_DATA */ 0x00000000, /* DENALI_CTL_85_DATA */ 0x00000000, /* DENALI_CTL_86_DATA */ 0x00000000, /* DENALI_CTL_87_DATA */ 0x00000000, /* DENALI_CTL_88_DATA */ 0x00000000, /* DENALI_CTL_89_DATA */ 0x00000000, /* DENALI_CTL_90_DATA */ 0x00000000, /* DENALI_CTL_91_DATA */ 0x00000000, /* DENALI_CTL_92_DATA */ 0x00000000, /* DENALI_CTL_93_DATA */ 0x00000000, /* DENALI_CTL_94_DATA */ 0x00000000, /* DENALI_CTL_95_DATA */ 0x00000000, /* DENALI_CTL_96_DATA */ 0x00000000, /* DENALI_CTL_97_DATA */ 0x00000000, /* DENALI_CTL_98_DATA */ 0x00000000, /* DENALI_CTL_99_DATA */ 0x00000000 /* DENALI_CTL_100_DATA*/, 0x00000000 /* DENALI_CTL_101_DATA*/, 0x00000000 /* DENALI_CTL_102_DATA*/, 0x00000000 /* DENALI_CTL_103_DATA*/, 0x00000000 /* DENALI_CTL_104_DATA*/, 0x00000003 /* DENALI_CTL_105_DATA*/, 0x00000000 /* DENALI_CTL_106_DATA*/, 0x00000000 /* DENALI_CTL_107_DATA*/, 0x00000000 /* DENALI_CTL_108_DATA*/, 0x00000000 /* DENALI_CTL_109_DATA*/, 0x01000000 /* DENALI_CTL_110_DATA*/, 0x00040000 /* DENALI_CTL_111_DATA*/, 0x00800200 /* DENALI_CTL_112_DATA*/, 0x00000200 /* DENALI_CTL_113_DATA*/, 0x00000040 /* DENALI_CTL_114_DATA*/, 0x01000100 /* DENALI_CTL_115_DATA*/, 0x0a000002 /* DENALI_CTL_116_DATA*/, 0x0101ffff /* DENALI_CTL_117_DATA*/, 0x01010101 /* DENALI_CTL_118_DATA*/, 0x01010101 /* DENALI_CTL_119_DATA*/, 0x0000010b /* DENALI_CTL_120_DATA*/, 0x00000c03 /* DENALI_CTL_121_DATA*/, 0x00000000 /* DENALI_CTL_122_DATA*/, 0x00000000 /* DENALI_CTL_123_DATA*/, 0x00000000 /* DENALI_CTL_124_DATA*/, 0x00000000 /* DENALI_CTL_125_DATA*/, 0x00030300 /* DENALI_CTL_126_DATA*/, 0x00000000 /* DENALI_CTL_127_DATA*/, 0x00010101 /* DENALI_CTL_128_DATA*/, 0x00000000 /* DENALI_CTL_129_DATA*/, 0x00000000 /* DENALI_CTL_130_DATA*/, 0x00000000 /* DENALI_CTL_131_DATA*/, 0x00000000 /* DENALI_CTL_132_DATA*/, 0x00000000 /* DENALI_CTL_133_DATA*/, 0x00000000 /* DENALI_CTL_134_DATA*/, 0x00000000 /* DENALI_CTL_135_DATA*/, 0x00000000 /* DENALI_CTL_136_DATA*/, 0x00000000 /* DENALI_CTL_137_DATA*/, 0x00000000 /* DENALI_CTL_138_DATA*/, 0x00000000 /* DENALI_CTL_139_DATA*/, 0x00000000 /* DENALI_CTL_140_DATA*/, 0x00000000 /* DENALI_CTL_141_DATA*/, 0x00000000 /* DENALI_CTL_142_DATA*/, 0x00000000 /* DENALI_CTL_143_DATA*/, 0x00000000 /* DENALI_CTL_144_DATA*/, 0x00000000 /* DENALI_CTL_145_DATA*/, 0x00000000 /* DENALI_CTL_146_DATA*/, 0x00000000 /* DENALI_CTL_147_DATA*/, 0x00000000 /* DENALI_CTL_148_DATA*/, 0x00000000 /* DENALI_CTL_149_DATA*/, 0x00000000 /* DENALI_CTL_150_DATA*/, 0x00000000 /* DENALI_CTL_151_DATA*/, 0x00000000 /* DENALI_CTL_152_DATA*/, 0x00000000 /* DENALI_CTL_153_DATA*/, 0x00000000 /* DENALI_CTL_154_DATA*/, 0x00000000 /* DENALI_CTL_155_DATA*/, 0x00000000 /* DENALI_CTL_156_DATA*/, 0x00000000 /* DENALI_CTL_157_DATA*/, 0x00000000 /* DENALI_CTL_158_DATA*/, 0x00000000 /* DENALI_CTL_159_DATA*/, 0x00000000 /* DENALI_CTL_160_DATA*/, 0x02010102 /* DENALI_CTL_161_DATA*/, 0x0108070d /* DENALI_CTL_162_DATA*/, 0x05050300 /* DENALI_CTL_163_DATA*/, 0x04000503 /* DENALI_CTL_164_DATA*/, 0x00000000 /* DENALI_CTL_165_DATA*/, 0x00000000 /* DENALI_CTL_166_DATA*/, 0x00000000 /* DENALI_CTL_167_DATA*/, 0x00000000 /* DENALI_CTL_168_DATA*/, 0x280d0000 /* DENALI_CTL_169_DATA*/, 0x01000000 /* DENALI_CTL_170_DATA*/, 0x00000000 /* DENALI_CTL_171_DATA*/, 0x00030001 /* DENALI_CTL_172_DATA*/, 0x00000000 /* DENALI_CTL_173_DATA*/, 0x00000000 /* DENALI_CTL_174_DATA*/, 0x00000000 /* DENALI_CTL_175_DATA*/, 0x00000000 /* DENALI_CTL_176_DATA*/, 0x00000000 /* DENALI_CTL_177_DATA*/, 0x00000000 /* DENALI_CTL_178_DATA*/, 0x00000000 /* DENALI_CTL_179_DATA*/, 0x00000000 /* DENALI_CTL_180_DATA*/, 0x01000000 /* DENALI_CTL_181_DATA*/, 0x00000001 /* DENALI_CTL_182_DATA*/, 0x00000100 /* DENALI_CTL_183_DATA*/, 0x00010303 /* DENALI_CTL_184_DATA*/, 0x67676701 /* DENALI_CTL_185_DATA*/, 0x67676767 /* DENALI_CTL_186_DATA*/, 0x67676767 /* DENALI_CTL_187_DATA*/, 0x67676767 /* DENALI_CTL_188_DATA*/, 0x67676767 /* DENALI_CTL_189_DATA*/, 0x67676767 /* DENALI_CTL_190_DATA*/, 0x67676767 /* DENALI_CTL_191_DATA*/, 0x67676767 /* DENALI_CTL_192_DATA*/, 0x67676767 /* DENALI_CTL_193_DATA*/, 0x01000067 /* DENALI_CTL_194_DATA*/, 0x00000001 /* DENALI_CTL_195_DATA*/, 0x00000101 /* DENALI_CTL_196_DATA*/, 0x00000000 /* DENALI_CTL_197_DATA*/, 0x00000000 /* DENALI_CTL_198_DATA*/, 0x00000000 /* DENALI_CTL_199_DATA*/, 0x00000000 /* DENALI_CTL_200_DATA*/, 0x00000000 /* DENALI_CTL_201_DATA*/, 0x00000000 /* DENALI_CTL_202_DATA*/, 0x00000000 /* DENALI_CTL_203_DATA*/, 0x00000000 /* DENALI_CTL_204_DATA*/, 0x00000000 /* DENALI_CTL_205_DATA*/, 0x00000000 /* DENALI_CTL_206_DATA*/, 0x00000000 /* DENALI_CTL_207_DATA*/, 0x00000001 /* DENALI_CTL_208_DATA*/, 0x00000000 /* DENALI_CTL_209_DATA*/, 0x007fffff /* DENALI_CTL_210_DATA*/, 0x00000000 /* DENALI_CTL_211_DATA*/, 0x007fffff /* DENALI_CTL_212_DATA*/, 0x00000000 /* DENALI_CTL_213_DATA*/, 0x007fffff /* DENALI_CTL_214_DATA*/, 0x00000000 /* DENALI_CTL_215_DATA*/, 0x007fffff /* DENALI_CTL_216_DATA*/, 0x00000000 /* DENALI_CTL_217_DATA*/, 0x007fffff /* DENALI_CTL_218_DATA*/, 0x00000000 /* DENALI_CTL_219_DATA*/, 0x007fffff /* DENALI_CTL_220_DATA*/, 0x00000000 /* DENALI_CTL_221_DATA*/, 0x007fffff /* DENALI_CTL_222_DATA*/, 0x00000000 /* DENALI_CTL_223_DATA*/, 0x037fffff /* DENALI_CTL_224_DATA*/, 0xffffffff /* DENALI_CTL_225_DATA*/, 0x000f000f /* DENALI_CTL_226_DATA*/, 0x00ffff03 /* DENALI_CTL_227_DATA*/, 0x000fffff /* DENALI_CTL_228_DATA*/, 0x0003000f /* DENALI_CTL_229_DATA*/, 0xffffffff /* DENALI_CTL_230_DATA*/, 0x000f000f /* DENALI_CTL_231_DATA*/, 0x00ffff03 /* DENALI_CTL_232_DATA*/, 0x000fffff /* DENALI_CTL_233_DATA*/, 0x0003000f /* DENALI_CTL_234_DATA*/, 0xffffffff /* DENALI_CTL_235_DATA*/, 0x000f000f /* DENALI_CTL_236_DATA*/, 0x00ffff03 /* DENALI_CTL_237_DATA*/, 0x000fffff /* DENALI_CTL_238_DATA*/, 0x0003000f /* DENALI_CTL_239_DATA*/, 0xffffffff /* DENALI_CTL_240_DATA*/, 0x000f000f /* DENALI_CTL_241_DATA*/, 0x00ffff03 /* DENALI_CTL_242_DATA*/, 0x000fffff /* DENALI_CTL_243_DATA*/, 0x6407000f /* DENALI_CTL_244_DATA*/, 0x01640001 /* DENALI_CTL_245_DATA*/, 0x00000000 /* DENALI_CTL_246_DATA*/, 0x00000000 /* DENALI_CTL_247_DATA*/, 0x00001800 /* DENALI_CTL_248_DATA*/, 0x00386c05 /* DENALI_CTL_249_DATA*/, 0x02000200 /* DENALI_CTL_250_DATA*/, 0x02000200 /* DENALI_CTL_251_DATA*/, 0x0000386c /* DENALI_CTL_252_DATA*/, 0x00023438 /* DENALI_CTL_253_DATA*/, 0x02020d0f /* DENALI_CTL_254_DATA*/, 0x00140303 /* DENALI_CTL_255_DATA*/, 0x00000000 /* DENALI_CTL_256_DATA*/, 0x00000000 /* DENALI_CTL_257_DATA*/, 0x00001403 /* DENALI_CTL_258_DATA*/, 0x00000000 /* DENALI_CTL_259_DATA*/, 0x00000000 /* DENALI_CTL_260_DATA*/, 0x00000000 /* DENALI_CTL_261_DATA*/, 0x00000000 /* DENALI_CTL_262_DATA*/, 0x0c010000 /* DENALI_CTL_263_DATA*/, 0x00000008 /* DENALI_CTL_264_DATA*/, }; ```
/content/code_sandbox/drivers/memc/sifive_ddrregs.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
22,485
```c /* * */ #define DT_DRV_COMPAT nxp_imx_flexspi_is66wvq8m4 #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> #include "memc_mcux_flexspi.h" LOG_MODULE_REGISTER(memc_flexspi_is66wvq8m4, CONFIG_MEMC_LOG_LEVEL); /* Vendor ID for ISSI device */ #define ISSI_VENDOR_ID 0x3 enum { READ_DATA = 0, WRITE_DATA, READ_REG, WRITE_REG, READ_ID, }; struct memc_flexspi_is66wvq8m4_config { flexspi_port_t port; flexspi_device_config_t config; }; /* Device variables used in critical sections should be in this structure */ struct memc_flexspi_is66wvq8m4_data { const struct device *controller; }; /* IS66WVQ8M4 configuration register constants */ #define IS66WVQ8M4_LATENCY_MASK BIT(3) #define IS66WVQ8M4_LATENCY_FIXED BIT(3) static const uint32_t memc_flexspi_is66wvq8m4_lut[][4] = { /* Read Data (continuous burst) */ [READ_DATA] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_4PAD, 0xAA, kFLEXSPI_Command_DDR, kFLEXSPI_4PAD, 0x00), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_4PAD, 16, kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_4PAD, 16), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DUMMY_DDR, kFLEXSPI_4PAD, 28, kFLEXSPI_Command_READ_DDR, kFLEXSPI_4PAD, 0x01), }, /* Write Data (continuous burst) */ [WRITE_DATA] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_4PAD, 0x22, kFLEXSPI_Command_DDR, kFLEXSPI_4PAD, 0x00), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_4PAD, 16, kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_4PAD, 16), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DUMMY_DDR, kFLEXSPI_4PAD, 28, kFLEXSPI_Command_WRITE_DDR, kFLEXSPI_4PAD, 0x01), }, /* Read Register */ [READ_REG] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_4PAD, 0xCC, kFLEXSPI_Command_DDR, kFLEXSPI_4PAD, 0x00), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_4PAD, 16, kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_4PAD, 16), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DUMMY_DDR, kFLEXSPI_4PAD, 12, kFLEXSPI_Command_READ_DDR, kFLEXSPI_4PAD, 0x01), }, /* Write Register */ [WRITE_REG] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_4PAD, 0x66, kFLEXSPI_Command_DDR, kFLEXSPI_4PAD, 0x00), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_4PAD, 16, kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_4PAD, 16), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_WRITE_DDR, kFLEXSPI_4PAD, 0x01, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x00), }, /* Read Identification register */ [READ_ID] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_4PAD, 0xE0, kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_4PAD, 16), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_4PAD, 16, kFLEXSPI_Command_DUMMY_RWDS_DDR, kFLEXSPI_4PAD, 0x08), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_READ_DDR, kFLEXSPI_4PAD, 0x01, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x00), } }; /* Read vendor ID from identification register */ static int memc_flexspi_is66wvq8m4_get_vendor_id(const struct device *dev, uint8_t *vendor_id) { const struct memc_flexspi_is66wvq8m4_config *config = dev->config; struct memc_flexspi_is66wvq8m4_data *data = dev->data; uint32_t buffer = 0; int ret; flexspi_transfer_t transfer = { .deviceAddress = 0x00, /* Not used by this command */ .port = config->port, .cmdType = kFLEXSPI_Read, .SeqNumber = 1, .seqIndex = READ_ID, .data = &buffer, .dataSize = 2, }; ret = memc_flexspi_transfer(data->controller, &transfer); *vendor_id = buffer & 0x7; return ret; } /* Update configuration register */ static int memc_flexspi_is66wvq8m4_update_cfg(const struct device *dev, uint16_t mask, uint16_t set_val) { const struct memc_flexspi_is66wvq8m4_config *config = dev->config; struct memc_flexspi_is66wvq8m4_data *data = dev->data; uint32_t buffer = 0; int ret; flexspi_transfer_t transfer = { /* Results in 0x4 being written on clock 4 */ .deviceAddress = (0x4 << 9), .port = config->port, .cmdType = kFLEXSPI_Read, .SeqNumber = 1, .seqIndex = READ_REG, .data = &buffer, .dataSize = 2, }; ret = memc_flexspi_transfer(data->controller, &transfer); if (ret < 0) { return ret; } buffer &= (~mask & GENMASK(15, 0)); buffer |= set_val; LOG_DBG("Setting cfg reg to 0x%0x", buffer); transfer.cmdType = kFLEXSPI_Write, transfer.seqIndex = WRITE_REG; ret = memc_flexspi_transfer(data->controller, &transfer); return ret; } static int memc_flexspi_is66wvq8m4_init(const struct device *dev) { const struct memc_flexspi_is66wvq8m4_config *config = dev->config; struct memc_flexspi_is66wvq8m4_data *data = dev->data; uint8_t vendor_id; if (!device_is_ready(data->controller)) { LOG_ERR("Controller device not ready"); return -ENODEV; } if (memc_flexspi_set_device_config(data->controller, &config->config, (const uint32_t *) memc_flexspi_is66wvq8m4_lut, sizeof(memc_flexspi_is66wvq8m4_lut) / MEMC_FLEXSPI_CMD_SIZE, config->port)) { LOG_ERR("Could not set device configuration"); return -EINVAL; } if (memc_flexspi_is66wvq8m4_get_vendor_id(dev, &vendor_id)) { LOG_ERR("Could not read vendor id"); return -EIO; } LOG_DBG("Vendor id: 0x%0x", vendor_id); if (vendor_id != ISSI_VENDOR_ID) { LOG_WRN("Vendor ID does not match expected value of 0x%0x", ISSI_VENDOR_ID); } if (memc_flexspi_is66wvq8m4_update_cfg(dev, IS66WVQ8M4_LATENCY_MASK, IS66WVQ8M4_LATENCY_FIXED)) { LOG_ERR("Could not set fixed latency mode"); return -EIO; } return 0; } #define CONCAT3(x, y, z) x ## y ## z #define CS_INTERVAL_UNIT(unit) \ CONCAT3(kFLEXSPI_CsIntervalUnit, unit, SckCycle) #define AHB_WRITE_WAIT_UNIT(unit) \ CONCAT3(kFLEXSPI_AhbWriteWaitUnit, unit, AhbCycle) #define MEMC_FLEXSPI_DEVICE_CONFIG(n) \ { \ .flexspiRootClk = DT_INST_PROP(n, spi_max_frequency), \ .isSck2Enabled = false, \ .flashSize = DT_INST_PROP(n, size) / 8 / KB(1), \ .addressShift = DT_INST_REG_ADDR(n) != 0, \ .CSIntervalUnit = \ CS_INTERVAL_UNIT( \ DT_INST_PROP(n, cs_interval_unit)), \ .CSInterval = DT_INST_PROP(n, cs_interval), \ .CSHoldTime = DT_INST_PROP(n, cs_hold_time), \ .CSSetupTime = DT_INST_PROP(n, cs_setup_time), \ .dataValidTime = DT_INST_PROP(n, data_valid_time), \ .columnspace = DT_INST_PROP(n, column_space), \ .enableWordAddress = DT_INST_PROP(n, word_addressable), \ .AWRSeqIndex = WRITE_DATA, \ .AWRSeqNumber = 1, \ .ARDSeqIndex = READ_DATA, \ .ARDSeqNumber = 1, \ .AHBWriteWaitUnit = \ AHB_WRITE_WAIT_UNIT( \ DT_INST_PROP(n, ahb_write_wait_unit)), \ .AHBWriteWaitInterval = \ DT_INST_PROP(n, ahb_write_wait_interval), \ .enableWriteMask = true, \ } #define MEMC_FLEXSPI_IS66WVQ8M4(n) \ static const struct memc_flexspi_is66wvq8m4_config \ memc_flexspi_is66wvq8m4_config_##n = { \ .port = DT_INST_REG_ADDR(n), \ .config = MEMC_FLEXSPI_DEVICE_CONFIG(n), \ }; \ \ static struct memc_flexspi_is66wvq8m4_data \ memc_flexspi_is66wvq8m4_data_##n = { \ .controller = DEVICE_DT_GET(DT_INST_BUS(n)), \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ memc_flexspi_is66wvq8m4_init, \ NULL, \ &memc_flexspi_is66wvq8m4_data_##n, \ &memc_flexspi_is66wvq8m4_config_##n,\ POST_KERNEL, \ CONFIG_MEMC_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(MEMC_FLEXSPI_IS66WVQ8M4) ```
/content/code_sandbox/drivers/memc/memc_mcux_flexspi_is66wvq8m4.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,617
```unknown config MEMC_NXP_S32_QSPI bool "NXP S32 Quad Serial Peripheral Interface (QSPI) controller" default y depends on DT_HAS_NXP_S32_QSPI_ENABLED select PINCTRL help Enable NXP S32 Quad Serial Peripheral Interface (QSPI) controller. ```
/content/code_sandbox/drivers/memc/Kconfig.nxp_s32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
66
```objective-c /* * */ #include <zephyr/device.h> #include <sys/types.h> #include <fsl_flexspi.h> /* Size of a command in the LUT table */ #define MEMC_FLEXSPI_CMD_SIZE 4U /* Number of commands in an instruction sequence */ #define MEMC_FLEXSPI_CMD_PER_SEQ 4U /** * @brief Wait for the FlexSPI bus to be idle * * Waits for the FlexSPI bus to be idle. Can be used when reconfiguring * the FlexSPI to make sure no flash access is occurring before changing * settings. * * @param dev: FlexSPI device */ void memc_flexspi_wait_bus_idle(const struct device *dev); /** * @brief Check if FlexSPI is being used in XIP mode. * * Checks if the FlexSPI is being used for code execution in the current * application. * * @param dev: FlexSPI device * @retval true if FlexSPI being used for XIP */ bool memc_flexspi_is_running_xip(const struct device *dev); /** * @brief Update clock selection of the FlexSPI device * * Updates clock frequency of FlexSPI to new clock speed. * * @param dev: FlexSPI device * @param device_config: External device configuration. * @param port: FlexSPI port to use for this external device * @param freq_hz: new clock frequency to apply * @return 0 on success, negative value on failure */ int memc_flexspi_update_clock(const struct device *dev, flexspi_device_config_t *device_config, flexspi_port_t port, uint32_t freq_hz); /** * @brief configure new FlexSPI device * * Configures new device on the FlexSPI bus. * @param dev: FlexSPI device * @param device_config: External device configuration. * @param lut_array: Lookup table of FlexSPI flash commands for device * @param lut_count: number of LUT entries (4 bytes each) in lut array * @param port: FlexSPI port to use for this external device * @return 0 on success, negative value on failure */ int memc_flexspi_set_device_config(const struct device *dev, const flexspi_device_config_t *device_config, const uint32_t *lut_array, uint8_t lut_count, flexspi_port_t port); /** * @brief Perform software reset of FlexSPI * * Software reset of FlexSPI. Does not clear configuration registers. * @param dev: FlexSPI device * @return 0 on success, negative value on failure */ int memc_flexspi_reset(const struct device *dev); /** * @brief Send blocking IP transfer * * Send blocking IP transfer using FlexSPI. * @param dev: FlexSPI device * @param transfer: FlexSPI transfer. seqIndex should be set as though the * LUT array was loaded at offset 0. * @return 0 on success, negative value on failure */ int memc_flexspi_transfer(const struct device *dev, flexspi_transfer_t *transfer); /** * @brief Get AHB address for FlexSPI port * * Get AHB address for FlexSPI port. This address is memory mapped, and can be * read from (and written to, for PSRAM) as though it were internal memory. * @param dev: FlexSPI device * @param port: FlexSPI port external device is on * @param offset: byte offset from start of device to get AHB address for * @return 0 on success, negative value on failure */ void *memc_flexspi_get_ahb_address(const struct device *dev, flexspi_port_t port, off_t offset); ```
/content/code_sandbox/drivers/memc/memc_mcux_flexspi.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
797
```c /* * */ /* */ #define DT_DRV_COMPAT nxp_imx_flexspi_aps6408l #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> #include "memc_mcux_flexspi.h" /* * NOTE: If CONFIG_FLASH_MCUX_FLEXSPI_XIP is selected, Any external functions * called while interacting with the flexspi MUST be relocated to SRAM or ITCM * at runtime, so that the chip does not access the flexspi to read program * instructions while it is being written to */ #if defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP) && (CONFIG_MEMC_LOG_LEVEL > 0) #warning "Enabling memc driver logging and XIP mode simultaneously can cause \ read-while-write hazards. This configuration is not recommended." #endif LOG_MODULE_REGISTER(memc_flexspi_aps6408l, CONFIG_MEMC_LOG_LEVEL); #define APM_VENDOR_ID 0xD /* APS6408L Configuration registers */ #define APS_6408L_MR_0 0x0 #define APS_6408L_MR_1 0x1 #define APS_6408L_MR_2 0x2 #define APS_6408L_MR_3 0x3 #define APS_6408L_MR_4 0x4 #define APS_6408L_MR_6 0x6 #define APS_6408L_MR_8 0x8 /* Read Latency code (MR0[4:2]) */ #define APS_6408L_RLC_MASK 0x1C #define APS_6408L_RLC_200 0x10 /* 200MHz input clock read latency */ /* Read Latency type (MR0[5]) */ #define APS_6408L_RLT_MASK 0x30 #define APS_6408L_RLT_VARIABLE 0x0 /* Variable latency */ /* Burst type/burst length mask (MR8[0:2]) */ #define APS_6408L_BURST_TYPE_MASK 0x7 #define APS_6408L_BURST_1K 0x7 /* 1K Hybrid wrap */ /* Row boundary cross enable mask (MR8[3]) */ #define APS_6408L_ROW_CROSS_MASK 0x8 #define APS_6408L_ROW_CROSS_EN 0x8 /* Enable linear burst reads to cross rows */ /* Write latency (MR4[7:5]) */ #define APS_6408L_WLC_MASK 0xE0 #define APS_6408L_WLC_200 0x20 /* 200MHz input clock write latency */ enum { READ_DATA = 0, WRITE_DATA, READ_REG, WRITE_REG, RESET, }; struct memc_flexspi_aps6408l_config { flexspi_port_t port; flexspi_device_config_t config; }; /* Device variables used in critical sections should be in this structure */ struct memc_flexspi_aps6408l_data { const struct device *controller; }; static const uint32_t memc_flexspi_aps6408l_lut[][4] = { /* Read Data (Sync read, linear burst) */ [READ_DATA] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0x20, kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x20), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DUMMY_RWDS_DDR, kFLEXSPI_8PAD, 0x07, kFLEXSPI_Command_READ_DDR, kFLEXSPI_8PAD, 0x04), }, /* Write Data (Sync write, linear burst) */ [WRITE_DATA] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0xA0, kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x20), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DUMMY_RWDS_DDR, kFLEXSPI_8PAD, 0x07, kFLEXSPI_Command_WRITE_DDR, kFLEXSPI_8PAD, 0x04), }, /* Read Register (Mode register read) */ [READ_REG] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0x40, kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x20), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DUMMY_RWDS_DDR, kFLEXSPI_8PAD, 0x07, kFLEXSPI_Command_READ_DDR, kFLEXSPI_8PAD, 0x04), }, /* Write Register (Mode register write) */ [WRITE_REG] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0xC0, kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x20), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_WRITE_DDR, kFLEXSPI_8PAD, 0x08, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x00), }, /* Reset (Global reset) */ [RESET] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0xFF, kFLEXSPI_Command_DUMMY_SDR, kFLEXSPI_8PAD, 0x03), } }; static int memc_flexspi_aps6408l_get_vendor_id(const struct device *dev, uint8_t *vendor_id) { const struct memc_flexspi_aps6408l_config *config = dev->config; struct memc_flexspi_aps6408l_data *data = dev->data; uint32_t buffer = 0; int ret; flexspi_transfer_t transfer = { .deviceAddress = APS_6408L_MR_1, .port = config->port, .cmdType = kFLEXSPI_Read, .SeqNumber = 1, .seqIndex = READ_REG, .data = &buffer, .dataSize = 1, }; ret = memc_flexspi_transfer(data->controller, &transfer); *vendor_id = buffer & 0x1f; return ret; } static int memc_flexspi_aps6408l_update_reg(const struct device *dev, uint8_t reg, uint8_t mask, uint8_t set_val) { const struct memc_flexspi_aps6408l_config *config = dev->config; struct memc_flexspi_aps6408l_data *data = dev->data; uint32_t buffer = 0; int ret; flexspi_transfer_t transfer = { .deviceAddress = reg, .port = config->port, .cmdType = kFLEXSPI_Read, .SeqNumber = 1, .seqIndex = READ_REG, .data = &buffer, .dataSize = 1, }; ret = memc_flexspi_transfer(data->controller, &transfer); if (ret < 0) { return ret; } buffer &= (~mask & 0xFF); buffer |= set_val; LOG_DBG("Setting reg 0x%0x to 0x%0x", reg, buffer); transfer.cmdType = kFLEXSPI_Write, transfer.seqIndex = WRITE_REG; ret = memc_flexspi_transfer(data->controller, &transfer); return ret; } static int memc_flexspi_aps6408l_reset(const struct device *dev) { const struct memc_flexspi_aps6408l_config *config = dev->config; struct memc_flexspi_aps6408l_data *data = dev->data; int ret; flexspi_transfer_t transfer = { .deviceAddress = 0x0, .port = config->port, .cmdType = kFLEXSPI_Command, .SeqNumber = 1, .seqIndex = RESET, .data = NULL, .dataSize = 0, }; LOG_DBG("Resetting ram"); ret = memc_flexspi_transfer(data->controller, &transfer); if (ret < 0) { return ret; } /* We need to delay 5 ms to allow APS6408L pSRAM to reinitialize */ k_msleep(5); return ret; } static int memc_flexspi_aps6408l_init(const struct device *dev) { const struct memc_flexspi_aps6408l_config *config = dev->config; struct memc_flexspi_aps6408l_data *data = dev->data; uint8_t vendor_id; if (!device_is_ready(data->controller)) { LOG_ERR("Controller device not ready"); return -ENODEV; } if (memc_flexspi_set_device_config(data->controller, &config->config, (const uint32_t *) memc_flexspi_aps6408l_lut, sizeof(memc_flexspi_aps6408l_lut) / MEMC_FLEXSPI_CMD_SIZE, config->port)) { LOG_ERR("Could not set device configuration"); return -EINVAL; } memc_flexspi_reset(data->controller); if (memc_flexspi_aps6408l_reset(dev)) { LOG_ERR("Could not reset pSRAM"); return -EIO; } if (memc_flexspi_aps6408l_get_vendor_id(dev, &vendor_id)) { LOG_ERR("Could not read vendor id"); return -EIO; } LOG_DBG("Vendor id: 0x%0x", vendor_id); if (vendor_id != APM_VENDOR_ID) { LOG_WRN("Vendor ID does not match expected value of 0x%0x", APM_VENDOR_ID); } /* Enable RBX, burst length set to 1K byte wrap. * this will also enable boundary crossing for burst reads */ if (memc_flexspi_aps6408l_update_reg(dev, APS_6408L_MR_8, (APS_6408L_ROW_CROSS_MASK | APS_6408L_BURST_TYPE_MASK), (APS_6408L_ROW_CROSS_EN | APS_6408L_BURST_1K))) { LOG_ERR("Could not enable RBX 1K burst length"); return -EIO; } /* Set read latency code and type for 200MHz flash clock operation */ if (memc_flexspi_aps6408l_update_reg(dev, APS_6408L_MR_0, (APS_6408L_RLC_MASK | APS_6408L_RLT_MASK), (APS_6408L_RLC_200 | APS_6408L_RLT_VARIABLE))) { LOG_ERR("Could not set 200MHz read latency code"); return -EIO; } /* Set write latency code and type for 200MHz flash clock operation */ if (memc_flexspi_aps6408l_update_reg(dev, APS_6408L_MR_4, APS_6408L_WLC_MASK, APS_6408L_WLC_200)) { LOG_ERR("Could not set 200MHz write latency code"); return -EIO; } return 0; } #define CONCAT3(x, y, z) x ## y ## z #define CS_INTERVAL_UNIT(unit) \ CONCAT3(kFLEXSPI_CsIntervalUnit, unit, SckCycle) #define AHB_WRITE_WAIT_UNIT(unit) \ CONCAT3(kFLEXSPI_AhbWriteWaitUnit, unit, AhbCycle) #define MEMC_FLEXSPI_DEVICE_CONFIG(n) \ { \ .flexspiRootClk = DT_INST_PROP(n, spi_max_frequency), \ .isSck2Enabled = false, \ .flashSize = DT_INST_PROP(n, size) / 8 / KB(1), \ .CSIntervalUnit = \ CS_INTERVAL_UNIT( \ DT_INST_PROP(n, cs_interval_unit)), \ .CSInterval = DT_INST_PROP(n, cs_interval), \ .CSHoldTime = DT_INST_PROP(n, cs_hold_time), \ .CSSetupTime = DT_INST_PROP(n, cs_setup_time), \ .dataValidTime = DT_INST_PROP(n, data_valid_time), \ .columnspace = DT_INST_PROP(n, column_space), \ .enableWordAddress = DT_INST_PROP(n, word_addressable), \ .AWRSeqIndex = WRITE_DATA, \ .AWRSeqNumber = 1, \ .ARDSeqIndex = READ_DATA, \ .ARDSeqNumber = 1, \ .AHBWriteWaitUnit = \ AHB_WRITE_WAIT_UNIT( \ DT_INST_PROP(n, ahb_write_wait_unit)), \ .AHBWriteWaitInterval = \ DT_INST_PROP(n, ahb_write_wait_interval), \ .enableWriteMask = true, \ } \ #define MEMC_FLEXSPI_APS6408L(n) \ static const struct memc_flexspi_aps6408l_config \ memc_flexspi_aps6408l_config_##n = { \ .port = DT_INST_REG_ADDR(n), \ .config = MEMC_FLEXSPI_DEVICE_CONFIG(n), \ }; \ \ static struct memc_flexspi_aps6408l_data \ memc_flexspi_aps6408l_data_##n = { \ .controller = DEVICE_DT_GET(DT_INST_BUS(n)), \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ memc_flexspi_aps6408l_init, \ NULL, \ &memc_flexspi_aps6408l_data_##n, \ &memc_flexspi_aps6408l_config_##n, \ POST_KERNEL, \ CONFIG_MEMC_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(MEMC_FLEXSPI_APS6408L) ```
/content/code_sandbox/drivers/memc/memc_mcux_flexspi_aps6408l.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,174
```c /* * */ #define DT_DRV_COMPAT nxp_imx_flexspi #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/pm/device.h> #include <soc.h> #include "memc_mcux_flexspi.h" /* * NOTE: If CONFIG_FLASH_MCUX_FLEXSPI_XIP is selected, Any external functions * called while interacting with the flexspi MUST be relocated to SRAM or ITCM * at runtime, so that the chip does not access the flexspi to read program * instructions while it is being written to */ #if defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP) && (CONFIG_MEMC_LOG_LEVEL > 0) #warning "Enabling memc driver logging and XIP mode simultaneously can cause \ read-while-write hazards. This configuration is not recommended." #endif #define FLEXSPI_MAX_LUT 64U LOG_MODULE_REGISTER(memc_flexspi, CONFIG_MEMC_LOG_LEVEL); struct memc_flexspi_buf_cfg { uint16_t prefetch; uint16_t priority; uint16_t master_id; uint16_t buf_size; } __packed; /* Structure tracking LUT offset and usage per each port */ struct port_lut { uint8_t lut_offset; uint8_t lut_used; }; /* flexspi device data should be stored in RAM to avoid read-while-write hazards */ struct memc_flexspi_data { FLEXSPI_Type *base; uint8_t *ahb_base; bool xip; bool ahb_bufferable; bool ahb_cacheable; bool ahb_prefetch; bool ahb_read_addr_opt; bool combination_mode; bool sck_differential_clock; flexspi_read_sample_clock_t rx_sample_clock; #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB) && \ FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB flexspi_read_sample_clock_t rx_sample_clock_b; #endif const struct pinctrl_dev_config *pincfg; size_t size[kFLEXSPI_PortCount]; struct port_lut port_luts[kFLEXSPI_PortCount]; struct memc_flexspi_buf_cfg *buf_cfg; uint8_t buf_cfg_cnt; const struct device *clock_dev; clock_control_subsys_t clock_subsys; }; void memc_flexspi_wait_bus_idle(const struct device *dev) { struct memc_flexspi_data *data = dev->data; while (false == FLEXSPI_GetBusIdleStatus(data->base)) { } } bool memc_flexspi_is_running_xip(const struct device *dev) { struct memc_flexspi_data *data = dev->data; return data->xip; } int memc_flexspi_update_clock(const struct device *dev, flexspi_device_config_t *device_config, flexspi_port_t port, uint32_t freq_hz) { struct memc_flexspi_data *data = dev->data; uint32_t rate; uint32_t key; int ret; /* To reclock the FlexSPI, we should: * - disable the module * - set the new clock * - reenable the module * - reset the module * We CANNOT XIP at any point during this process */ key = irq_lock(); memc_flexspi_wait_bus_idle(dev); ret = clock_control_set_rate(data->clock_dev, data->clock_subsys, (clock_control_subsys_rate_t)freq_hz); if (ret < 0) { irq_unlock(key); return ret; } /* * We need to update the DLL value before we call clock_control_get_rate, * because this will cause XIP (flash reads) to occur. Although the * true flash clock is not known, assume the set_rate function programmed * a value close to what we requested. */ device_config->flexspiRootClk = freq_hz; FLEXSPI_UpdateDllValue(data->base, device_config, port); memc_flexspi_reset(dev); memc_flexspi_wait_bus_idle(dev); ret = clock_control_get_rate(data->clock_dev, data->clock_subsys, &rate); if (ret < 0) { irq_unlock(key); return ret; } device_config->flexspiRootClk = rate; FLEXSPI_UpdateDllValue(data->base, device_config, port); memc_flexspi_reset(dev); irq_unlock(key); return 0; } int memc_flexspi_set_device_config(const struct device *dev, const flexspi_device_config_t *device_config, const uint32_t *lut_array, uint8_t lut_count, flexspi_port_t port) { flexspi_device_config_t tmp_config; uint32_t tmp_lut[FLEXSPI_MAX_LUT]; struct memc_flexspi_data *data = dev->data; const uint32_t *lut_ptr = lut_array; uint8_t lut_used = 0U; unsigned int key = 0; if (port >= kFLEXSPI_PortCount) { LOG_ERR("Invalid port number"); return -EINVAL; } if (data->port_luts[port].lut_used < lut_count) { /* We cannot reuse the existing LUT slot, * Check if the LUT table will fit into the remaining LUT slots */ for (uint8_t i = 0; i < kFLEXSPI_PortCount; i++) { lut_used += data->port_luts[i].lut_used; } if ((lut_used + lut_count) > FLEXSPI_MAX_LUT) { return -ENOBUFS; } } data->size[port] = device_config->flashSize * KB(1); if (memc_flexspi_is_running_xip(dev)) { /* We need to avoid flash access while configuring the FlexSPI. * To do this, we will copy the LUT array into stack-allocated * temporary memory */ memcpy(tmp_lut, lut_array, lut_count * MEMC_FLEXSPI_CMD_SIZE); lut_ptr = tmp_lut; } memcpy(&tmp_config, device_config, sizeof(tmp_config)); /* Update FlexSPI AWRSEQID and ARDSEQID values based on where the LUT * array will actually be loaded. */ if (data->port_luts[port].lut_used < lut_count) { /* Update lut offset with new value */ data->port_luts[port].lut_offset = lut_used; } /* LUTs should only be installed on sequence boundaries, every * 4 entries. Round LUT usage up to nearest sequence */ data->port_luts[port].lut_used = ROUND_UP(lut_count, 4); tmp_config.ARDSeqIndex += data->port_luts[port].lut_offset / MEMC_FLEXSPI_CMD_PER_SEQ; tmp_config.AWRSeqIndex += data->port_luts[port].lut_offset / MEMC_FLEXSPI_CMD_PER_SEQ; /* Lock IRQs before reconfiguring FlexSPI, to prevent XIP */ key = irq_lock(); FLEXSPI_SetFlashConfig(data->base, &tmp_config, port); FLEXSPI_UpdateLUT(data->base, data->port_luts[port].lut_offset, lut_ptr, lut_count); irq_unlock(key); return 0; } int memc_flexspi_reset(const struct device *dev) { struct memc_flexspi_data *data = dev->data; FLEXSPI_SoftwareReset(data->base); return 0; } int memc_flexspi_transfer(const struct device *dev, flexspi_transfer_t *transfer) { flexspi_transfer_t tmp; struct memc_flexspi_data *data = dev->data; status_t status; uint32_t seq_off, addr_offset = 0U; int i; /* Calculate sequence offset and address offset based on port */ seq_off = data->port_luts[transfer->port].lut_offset / MEMC_FLEXSPI_CMD_PER_SEQ; for (i = 0; i < transfer->port; i++) { addr_offset += data->size[i]; } if ((seq_off != 0) || (addr_offset != 0)) { /* Adjust device address and sequence index for transfer */ memcpy(&tmp, transfer, sizeof(tmp)); tmp.seqIndex += seq_off; tmp.deviceAddress += addr_offset; status = FLEXSPI_TransferBlocking(data->base, &tmp); } else { /* Transfer does not need adjustment */ status = FLEXSPI_TransferBlocking(data->base, transfer); } if (status != kStatus_Success) { LOG_ERR("Transfer error: %d", status); return -EIO; } return 0; } void *memc_flexspi_get_ahb_address(const struct device *dev, flexspi_port_t port, off_t offset) { struct memc_flexspi_data *data = dev->data; int i; if (port >= kFLEXSPI_PortCount) { LOG_ERR("Invalid port number: %u", port); return NULL; } for (i = 0; i < port; i++) { offset += data->size[i]; } #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_ADDRESS_SHIFT) && \ (FSL_FEATURE_FLEXSPI_SUPPORT_ADDRESS_SHIFT) if (data->base->FLSHCR0[port] & FLEXSPI_FLSHCR0_ADDRSHIFT_MASK) { /* Address shift is set, add 0x1000_0000 to AHB address */ offset += 0x10000000; } #endif return data->ahb_base + offset; } static int memc_flexspi_init(const struct device *dev) { struct memc_flexspi_data *data = dev->data; flexspi_config_t flexspi_config; uint32_t flash_sizes[kFLEXSPI_PortCount]; int ret; uint8_t i; /* we should not configure the device we are running on */ if (memc_flexspi_is_running_xip(dev)) { if (!IS_ENABLED(CONFIG_MEMC_MCUX_FLEXSPI_INIT_XIP)) { LOG_DBG("XIP active on %s, skipping init", dev->name); return 0; } } /* * SOCs such as the RT1064 and RT1024 have internal flash, and no pinmux * settings, continue if no pinctrl state found. */ ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_DEFAULT); if (ret < 0 && ret != -ENOENT) { return ret; } FLEXSPI_GetDefaultConfig(&flexspi_config); flexspi_config.ahbConfig.enableAHBBufferable = data->ahb_bufferable; flexspi_config.ahbConfig.enableAHBCachable = data->ahb_cacheable; flexspi_config.ahbConfig.enableAHBPrefetch = data->ahb_prefetch; flexspi_config.ahbConfig.enableReadAddressOpt = data->ahb_read_addr_opt; #if !(defined(FSL_FEATURE_FLEXSPI_HAS_NO_MCR0_COMBINATIONEN) && \ FSL_FEATURE_FLEXSPI_HAS_NO_MCR0_COMBINATIONEN) flexspi_config.enableCombination = data->combination_mode; #endif #if !(defined(FSL_FEATURE_FLEXSPI_HAS_NO_MCR2_SCKBDIFFOPT) && \ FSL_FEATURE_FLEXSPI_HAS_NO_MCR2_SCKBDIFFOPT) flexspi_config.enableSckBDiffOpt = data->sck_differential_clock; #endif flexspi_config.rxSampleClock = data->rx_sample_clock; #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB) && \ FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB flexspi_config.rxSampleClockPortB = data->rx_sample_clock_b; #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_RXCLKSRC_DIFF) && \ FSL_FEATURE_FLEXSPI_SUPPORT_RXCLKSRC_DIFF if (flexspi_config.rxSampleClock != flexspi_config.rxSampleClockPortB) { flexspi_config.rxSampleClockDiff = true; } #endif #endif /* Configure AHB RX buffers, if any configuration settings are present */ __ASSERT(data->buf_cfg_cnt < FSL_FEATURE_FLEXSPI_AHB_BUFFER_COUNT, "Maximum RX buffer configuration count exceeded"); for (i = 0; i < data->buf_cfg_cnt; i++) { /* Should AHB prefetch up to buffer size? */ flexspi_config.ahbConfig.buffer[i].enablePrefetch = data->buf_cfg[i].prefetch; /* AHB access priority (used for suspending control of AHB prefetching )*/ flexspi_config.ahbConfig.buffer[i].priority = data->buf_cfg[i].priority; /* AHB master index, SOC specific */ flexspi_config.ahbConfig.buffer[i].masterIndex = data->buf_cfg[i].master_id; /* RX buffer allocation (total available buffer space is instance/SOC specific) */ flexspi_config.ahbConfig.buffer[i].bufferSize = data->buf_cfg[i].buf_size; } if (memc_flexspi_is_running_xip(dev)) { /* Save flash sizes- FlexSPI init will reset them */ for (i = 0; i < kFLEXSPI_PortCount; i++) { flash_sizes[i] = data->base->FLSHCR0[i]; } } FLEXSPI_Init(data->base, &flexspi_config); if (memc_flexspi_is_running_xip(dev)) { /* Restore flash sizes */ for (i = 0; i < kFLEXSPI_PortCount; i++) { data->base->FLSHCR0[i] = flash_sizes[i]; } /* Reenable FLEXSPI module */ data->base->MCR0 &= ~FLEXSPI_MCR0_MDIS_MASK; } return 0; } #ifdef CONFIG_PM_DEVICE static int memc_flexspi_pm_action(const struct device *dev, enum pm_device_action action) { struct memc_flexspi_data *data = dev->data; int ret; switch (action) { case PM_DEVICE_ACTION_RESUME: ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_DEFAULT); if (ret < 0 && ret != -ENOENT) { return ret; } break; case PM_DEVICE_ACTION_SUSPEND: ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_SLEEP); if (ret < 0 && ret != -ENOENT) { return ret; } break; default: return -ENOTSUP; } return 0; } #endif #if defined(FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB) && \ FSL_FEATURE_FLEXSPI_SUPPORT_SEPERATE_RXCLKSRC_PORTB #define MEMC_FLEXSPI_RXCLK_B(inst) .rx_sample_clock_b = DT_INST_PROP(inst, rx_clock_source_b), #else #define MEMC_FLEXSPI_RXCLK_B(inst) #endif #if defined(CONFIG_XIP) && defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP) /* Checks if image flash base address is in the FlexSPI AHB base region */ #define MEMC_FLEXSPI_CFG_XIP(node_id) \ ((CONFIG_FLASH_BASE_ADDRESS) >= DT_REG_ADDR_BY_IDX(node_id, 1)) && \ ((CONFIG_FLASH_BASE_ADDRESS) < (DT_REG_ADDR_BY_IDX(node_id, 1) + \ DT_REG_SIZE_BY_IDX(node_id, 1))) #else #define MEMC_FLEXSPI_CFG_XIP(node_id) false #endif #define MEMC_FLEXSPI(n) \ PINCTRL_DT_INST_DEFINE(n); \ static uint16_t buf_cfg_##n[] = \ DT_INST_PROP_OR(n, rx_buffer_config, {0}); \ \ static struct memc_flexspi_data \ memc_flexspi_data_##n = { \ .base = (FLEXSPI_Type *) DT_INST_REG_ADDR(n), \ .xip = MEMC_FLEXSPI_CFG_XIP(DT_DRV_INST(n)), \ .ahb_base = (uint8_t *) DT_INST_REG_ADDR_BY_IDX(n, 1), \ .ahb_bufferable = DT_INST_PROP(n, ahb_bufferable), \ .ahb_cacheable = DT_INST_PROP(n, ahb_cacheable), \ .ahb_prefetch = DT_INST_PROP(n, ahb_prefetch), \ .ahb_read_addr_opt = DT_INST_PROP(n, ahb_read_addr_opt),\ .combination_mode = DT_INST_PROP(n, combination_mode), \ .sck_differential_clock = DT_INST_PROP(n, sck_differential_clock), \ .rx_sample_clock = DT_INST_PROP(n, rx_clock_source), \ MEMC_FLEXSPI_RXCLK_B(n) \ .buf_cfg = (struct memc_flexspi_buf_cfg *)buf_cfg_##n, \ .buf_cfg_cnt = sizeof(buf_cfg_##n) / \ sizeof(struct memc_flexspi_buf_cfg), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = (clock_control_subsys_t) \ DT_INST_CLOCKS_CELL(n, name), \ }; \ \ PM_DEVICE_DT_INST_DEFINE(n, memc_flexspi_pm_action); \ \ DEVICE_DT_INST_DEFINE(n, \ memc_flexspi_init, \ PM_DEVICE_DT_INST_GET(n), \ &memc_flexspi_data_##n, \ NULL, \ POST_KERNEL, \ CONFIG_MEMC_MCUX_FLEXSPI_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(MEMC_FLEXSPI) ```
/content/code_sandbox/drivers/memc/memc_mcux_flexspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,943
```unknown config MEMC_HIFIVE_UNMATCHED_DRAM bool "HiFive Unmatched DRAM Memory Controller" default y depends on DT_HAS_SIFIVE_FU740_C000_DDR_ENABLED help Enable HiFive Unmatched Memory Controller. ```
/content/code_sandbox/drivers/memc/Kconfig.sifive
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
55
```unknown menu "MSPI MEMC device driver" config MEMC_MSPI bool select MSPI help MSPI MEMC drivers are enabled. config MEMC_MSPI_APS6404L bool "MSPI AP Memory APS6404L pSRAM driver" default y depends on DT_HAS_MSPI_APS6404L_ENABLED select MEMC_MSPI select MSPI_AMBIQ_AP3 if SOC_SERIES_APOLLO3X endmenu ```
/content/code_sandbox/drivers/memc/Kconfig.mspi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
104
```objective-c /* * */ #include <zephyr/device.h> #include <sys/types.h> #include <Qspi_Ip.h> /** * @brief Build a QSPI Look-up Table (LUT) sequence entry. * * @param inst instruction * @param pads pad information * @param op operand */ #define QSPI_LUT_OP(inst, pads, op) \ ((Qspi_Ip_InstrOpType)((Qspi_Ip_InstrOpType)(inst) \ | (Qspi_Ip_InstrOpType)(pads) \ | (Qspi_Ip_InstrOpType)(op))) /** * @brief Get the QSPI peripheral hardware instance number. * * @param dev device pointer */ uint8_t memc_nxp_s32_qspi_get_instance(const struct device *dev); ```
/content/code_sandbox/drivers/memc/memc_nxp_s32_qspi.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
179
```c /* * */ #define DT_DRV_COMPAT nxp_imx_flexspi_s27ks0641 #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> #include "memc_mcux_flexspi.h" /* * NOTE: If CONFIG_FLASH_MCUX_FLEXSPI_XIP is selected, Any external functions * called while interacting with the flexspi MUST be relocated to SRAM or ITCM * at runtime, so that the chip does not access the flexspi to read program * instructions while it is being written to */ #if defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP) && (CONFIG_MEMC_LOG_LEVEL > 0) #warning "Enabling memc driver logging and XIP mode simultaneously can cause \ read-while-write hazards. This configuration is not recommended." #endif LOG_MODULE_REGISTER(memc_flexspi_s27ks0641, CONFIG_MEMC_LOG_LEVEL); enum { READ_DATA, WRITE_DATA, READ_REG, WRITE_REG, }; struct memc_flexspi_s27ks0641_config { flexspi_port_t port; flexspi_device_config_t config; }; /* Device variables used in critical sections should be in this structure */ struct memc_flexspi_s27ks0641_data { const struct device *controller; }; static const uint32_t memc_flexspi_s27ks0641_lut[][4] = { /* Read Data */ [READ_DATA] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xA0, kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x18), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_8PAD, 0x10, kFLEXSPI_Command_DUMMY_RWDS_DDR, kFLEXSPI_8PAD, 0x06), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_READ_DDR, kFLEXSPI_8PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x00), }, /* Write Data */ [WRITE_DATA] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x20, kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x18), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_8PAD, 0x10, kFLEXSPI_Command_DUMMY_RWDS_DDR, kFLEXSPI_8PAD, 0x06), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_WRITE_DDR, kFLEXSPI_8PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x00), }, /* Read Register */ [READ_REG] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xE0, kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x18), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_8PAD, 0x10, kFLEXSPI_Command_DUMMY_RWDS_DDR, kFLEXSPI_8PAD, 0x06), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_READ_DDR, kFLEXSPI_8PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x00), }, /* Write Register */ [WRITE_REG] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x60, kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x18), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_8PAD, 0x10, kFLEXSPI_Command_DUMMY_RWDS_DDR, kFLEXSPI_8PAD, 0x06), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_WRITE_DDR, kFLEXSPI_8PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x00), }, }; static int memc_flexspi_s27ks0641_get_vendor_id(const struct device *dev, uint16_t *vendor_id) { const struct memc_flexspi_s27ks0641_config *config = dev->config; struct memc_flexspi_s27ks0641_data *data = dev->data; uint32_t buffer = 0; int ret; flexspi_transfer_t transfer = { .deviceAddress = 0, .port = config->port, .cmdType = kFLEXSPI_Read, .SeqNumber = 1, .seqIndex = READ_REG, .data = &buffer, .dataSize = 4, }; LOG_DBG("Reading id"); ret = memc_flexspi_transfer(data->controller, &transfer); *vendor_id = buffer & 0xffff; return ret; } static int memc_flexspi_s27ks0641_init(const struct device *dev) { const struct memc_flexspi_s27ks0641_config *config = dev->config; struct memc_flexspi_s27ks0641_data *data = dev->data; uint16_t vendor_id; if (!device_is_ready(data->controller)) { LOG_ERR("Controller device not ready"); return -ENODEV; } if (memc_flexspi_set_device_config(data->controller, &config->config, (const uint32_t *) memc_flexspi_s27ks0641_lut, sizeof(memc_flexspi_s27ks0641_lut) / MEMC_FLEXSPI_CMD_SIZE, config->port)) { LOG_ERR("Could not set device configuration"); return -EINVAL; } memc_flexspi_reset(data->controller); if (memc_flexspi_s27ks0641_get_vendor_id(dev, &vendor_id)) { LOG_ERR("Could not read vendor id"); return -EIO; } LOG_DBG("Vendor id: 0x%0x", vendor_id); return 0; } #define CONCAT3(x, y, z) x ## y ## z #define CS_INTERVAL_UNIT(unit) \ CONCAT3(kFLEXSPI_CsIntervalUnit, unit, SckCycle) #define AHB_WRITE_WAIT_UNIT(unit) \ CONCAT3(kFLEXSPI_AhbWriteWaitUnit, unit, AhbCycle) #define MEMC_FLEXSPI_DEVICE_CONFIG(n) \ { \ .flexspiRootClk = MHZ(332), \ .isSck2Enabled = false, \ .flashSize = DT_INST_PROP(n, size) / 8 / KB(1), \ .CSIntervalUnit = \ CS_INTERVAL_UNIT( \ DT_INST_PROP(n, cs_interval_unit)), \ .CSInterval = DT_INST_PROP(n, cs_interval), \ .CSHoldTime = DT_INST_PROP(n, cs_hold_time), \ .CSSetupTime = DT_INST_PROP(n, cs_setup_time), \ .dataValidTime = DT_INST_PROP(n, data_valid_time), \ .columnspace = DT_INST_PROP(n, column_space), \ .enableWordAddress = DT_INST_PROP(n, word_addressable), \ .AWRSeqIndex = WRITE_DATA, \ .AWRSeqNumber = 1, \ .ARDSeqIndex = READ_DATA, \ .ARDSeqNumber = 1, \ .AHBWriteWaitUnit = \ AHB_WRITE_WAIT_UNIT( \ DT_INST_PROP(n, ahb_write_wait_unit)), \ .AHBWriteWaitInterval = \ DT_INST_PROP(n, ahb_write_wait_interval), \ .enableWriteMask = true, \ } \ #define MEMC_FLEXSPI_S27KS0641(n) \ static const struct memc_flexspi_s27ks0641_config \ memc_flexspi_s27ks0641_config_##n = { \ .port = DT_INST_REG_ADDR(n), \ .config = MEMC_FLEXSPI_DEVICE_CONFIG(n), \ }; \ \ static struct memc_flexspi_s27ks0641_data \ memc_flexspi_s27ks0641_data_##n = { \ .controller = DEVICE_DT_GET(DT_INST_BUS(n)), \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ memc_flexspi_s27ks0641_init, \ NULL, \ &memc_flexspi_s27ks0641_data_##n, \ &memc_flexspi_s27ks0641_config_##n, \ POST_KERNEL, \ CONFIG_MEMC_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(MEMC_FLEXSPI_S27KS0641) ```
/content/code_sandbox/drivers/memc/memc_mcux_flexspi_s27ks0641.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,064
```c /* * */ #define DT_DRV_COMPAT nxp_s32_qspi #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(nxp_s32_qspi_memc, CONFIG_MEMC_LOG_LEVEL); #include <zephyr/drivers/pinctrl.h> #include <zephyr/sys/util.h> #include <soc.h> #include "memc_nxp_s32_qspi.h" /* Mapping between QSPI chip select signals and devicetree chip select identifiers */ #define QSPI_PCSFA1 0 #define QSPI_PCSFA2 1 #define QSPI_PCSFB1 2 #define QSPI_PCSFB2 3 struct memc_nxp_s32_qspi_data { uint8_t instance; }; struct memc_nxp_s32_qspi_config { QuadSPI_Type *base; const struct pinctrl_dev_config *pincfg; const Qspi_Ip_ControllerConfigType *controller_cfg; }; static inline uint8_t get_instance(QuadSPI_Type *base) { QuadSPI_Type *const base_ptrs[QuadSPI_INSTANCE_COUNT] = IP_QuadSPI_BASE_PTRS; uint8_t i; for (i = 0; i < QuadSPI_INSTANCE_COUNT; i++) { if (base_ptrs[i] == base) { break; } } __ASSERT_NO_MSG(i < QuadSPI_INSTANCE_COUNT); return i; } static int memc_nxp_s32_qspi_init(const struct device *dev) { const struct memc_nxp_s32_qspi_config *config = dev->config; struct memc_nxp_s32_qspi_data *data = dev->data; Qspi_Ip_StatusType status; data->instance = get_instance(config->base); if (pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT)) { return -EIO; } status = Qspi_Ip_ControllerInit(data->instance, config->controller_cfg); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Fail to initialize QSPI controller %d (%d)", data->instance, status); return -EIO; } return 0; } uint8_t memc_nxp_s32_qspi_get_instance(const struct device *dev) { struct memc_nxp_s32_qspi_data *data = dev->data; return data->instance; } #define QSPI_DATA_CFG(n) \ IF_ENABLED(FEATURE_QSPI_DDR, ( \ .dataRate = _CONCAT(QSPI_IP_DATA_RATE_, \ DT_INST_STRING_UPPER_TOKEN(n, data_rate)), \ .dataAlign = COND_CODE_1(DT_INST_PROP(n, hold_time_2x), \ (QSPI_IP_FLASH_DATA_ALIGN_2X_REFCLK), \ (QSPI_IP_FLASH_DATA_ALIGN_REFCLK)), \ )) #define QSPI_ADDR_CFG(n) \ IF_ENABLED(FEATURE_QSPI_ADDR_CFG, ( \ .columnAddr = DT_INST_PROP_OR(n, column_space, 0), \ .wordAddresable = DT_INST_PROP(n, word_addressable), \ )) #define QSPI_BYTES_SWAP_ADDR(n) \ IF_ENABLED(FEATURE_QSPI_BYTES_SWAP_ADDR, \ (.byteSwap = DT_INST_PROP(n, byte_swapping),)) #define QSPI_SAMPLE_DELAY(n) \ COND_CODE_1(DT_INST_PROP(n, sample_delay_half_cycle), \ (QSPI_IP_SAMPLE_DELAY_HALFCYCLE_EARLY_DQS), \ (QSPI_IP_SAMPLE_DELAY_SAME_DQS)) #define QSPI_SAMPLE_PHASE(n) \ COND_CODE_1(DT_INST_PROP(n, sample_phase_inverted), \ (QSPI_IP_SAMPLE_PHASE_INVERTED), \ (QSPI_IP_SAMPLE_PHASE_NON_INVERTED)) #define QSPI_AHB_BUFFERS(n) \ { \ .masters = DT_INST_PROP(n, ahb_buffers_masters), \ .sizes = DT_INST_PROP(n, ahb_buffers_sizes), \ .allMasters = (bool)DT_INST_PROP(n, ahb_buffers_all_masters), \ } #define QSPI_DLL_CFG(n, side, side_upper) \ IF_ENABLED(FEATURE_QSPI_HAS_DLL, ( \ .dllSettings##side_upper = { \ .dllMode = _CONCAT(QSPI_IP_DLL_, \ DT_INST_STRING_UPPER_TOKEN(n, side##_dll_mode)), \ .freqEnable = DT_INST_PROP(n, side##_dll_freq_enable), \ .coarseDelay = DT_INST_PROP(n, side##_dll_coarse_delay), \ .fineDelay = DT_INST_PROP(n, side##_dll_fine_delay), \ .tapSelect = DT_INST_PROP(n, side##_dll_tap_select), \ IF_ENABLED(FEATURE_QSPI_DLL_LOOPCONTROL, ( \ .referenceCounter = DT_INST_PROP(n, side##_dll_ref_counter), \ .resolution = DT_INST_PROP(n, side##_dll_resolution), \ )) \ }, \ )) #define QSPI_READ_MODE(n, side, side_upper) \ _CONCAT(QSPI_IP_READ_MODE_, DT_INST_STRING_UPPER_TOKEN(n, side##_rx_clock_source)) #define QSPI_IDLE_SIGNAL_DRIVE(n, side, side_upper) \ IF_ENABLED(FEATURE_QSPI_CONFIGURABLE_ISD, ( \ .io2IdleValue##side_upper = (uint8_t)DT_INST_PROP(n, side##_io2_idle_high),\ .io3IdleValue##side_upper = (uint8_t)DT_INST_PROP(n, side##_io3_idle_high),\ )) #define QSPI_PORT_SIZE_FN(node_id, side_upper, port) \ COND_CODE_1(IS_EQ(DT_REG_ADDR(node_id), QSPI_PCSF##side_upper##port), \ (COND_CODE_1(DT_NODE_HAS_STATUS(node_id, okay), \ (.memSize##side_upper##port = DT_PROP(node_id, size) / 8,), \ (.memSize##side_upper##port = 0,))), \ (EMPTY)) #define QSPI_PORT_SIZE(n, side_upper) \ DT_INST_FOREACH_CHILD_VARGS(n, QSPI_PORT_SIZE_FN, side_upper, 1) \ DT_INST_FOREACH_CHILD_VARGS(n, QSPI_PORT_SIZE_FN, side_upper, 2) #define QSPI_SIDE_CFG(n, side, side_upper) \ QSPI_IDLE_SIGNAL_DRIVE(n, side, side_upper) \ QSPI_DLL_CFG(n, side, side_upper) \ QSPI_PORT_SIZE(n, side_upper) \ .readMode##side_upper = QSPI_READ_MODE(n, side, side_upper), #define MEMC_NXP_S32_QSPI_CONTROLLER_CONFIG(n) \ BUILD_ASSERT(DT_INST_PROP_LEN(n, ahb_buffers_masters) == QSPI_IP_AHB_BUFFERS, \ "ahb-buffers-masters must be of size QSPI_IP_AHB_BUFFERS"); \ BUILD_ASSERT(DT_INST_PROP_LEN(n, ahb_buffers_sizes) == QSPI_IP_AHB_BUFFERS, \ "ahb-buffers-sizes must be of size QSPI_IP_AHB_BUFFERS"); \ BUILD_ASSERT( \ _CONCAT(FEATURE_QSPI_, DT_INST_STRING_UPPER_TOKEN(n, a_rx_clock_source)) == 1,\ "a-rx-clock-source source mode selected is not supported"); \ \ static const Qspi_Ip_ControllerConfigType \ memc_nxp_s32_qspi_controller_cfg_##n = { \ .csHoldTime = DT_INST_PROP(n, cs_hold_time), \ .csSetupTime = DT_INST_PROP(n, cs_setup_time), \ .sampleDelay = QSPI_SAMPLE_DELAY(n), \ .samplePhase = QSPI_SAMPLE_PHASE(n), \ .ahbConfig = QSPI_AHB_BUFFERS(n), \ QSPI_SIDE_CFG(n, a, A) \ QSPI_DATA_CFG(n) \ QSPI_ADDR_CFG(n) \ QSPI_BYTES_SWAP_ADDR(n) \ } #define MEMC_NXP_S32_QSPI_INIT_DEVICE(n) \ PINCTRL_DT_INST_DEFINE(n); \ MEMC_NXP_S32_QSPI_CONTROLLER_CONFIG(n); \ static struct memc_nxp_s32_qspi_data memc_nxp_s32_qspi_data_##n; \ static const struct memc_nxp_s32_qspi_config memc_nxp_s32_qspi_config_##n = { \ .base = (QuadSPI_Type *)DT_INST_REG_ADDR(n), \ .controller_cfg = &memc_nxp_s32_qspi_controller_cfg_##n, \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ }; \ DEVICE_DT_INST_DEFINE(n, \ memc_nxp_s32_qspi_init, \ NULL, \ &memc_nxp_s32_qspi_data_##n, \ &memc_nxp_s32_qspi_config_##n, \ POST_KERNEL, \ CONFIG_MEMC_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(MEMC_NXP_S32_QSPI_INIT_DEVICE) ```
/content/code_sandbox/drivers/memc/memc_nxp_s32_qspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,084
```unknown # Smartbond Cryptographic Accelerator configuration options config MEMC_SMARTBOND bool "Smartbond NOR/PSRAM memory controller" depends on DT_HAS_RENESAS_SMARTBOND_NOR_PSRAM_ENABLED default y help Enable Smartbond NOR/PSRAM memory controller. ```
/content/code_sandbox/drivers/memc/Kconfig.smartbond
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
66
```c /* * */ #include <zephyr/device.h> #include <soc.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(memc_stm32, CONFIG_MEMC_LOG_LEVEL); #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_fmc) #define DT_DRV_COMPAT st_stm32_fmc #elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_fmc) #define DT_DRV_COMPAT st_stm32h7_fmc #else #error "No compatible FMC devicetree node found" #endif /* This symbol takes the value 1 if one of the device instances */ /* is configured in dts with a domain clock */ #if STM32_DT_INST_DEV_DOMAIN_CLOCK_SUPPORT #define STM32_FMC_DOMAIN_CLOCK_SUPPORT 1 #else #define STM32_FMC_DOMAIN_CLOCK_SUPPORT 0 #endif struct memc_stm32_config { uint32_t fmc; const struct stm32_pclken *pclken; size_t pclk_len; const struct pinctrl_dev_config *pcfg; }; static int memc_stm32_init(const struct device *dev) { const struct memc_stm32_config *config = dev->config; int r; const struct device *clk; /* configure pinmux */ r = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (r < 0) { LOG_ERR("FMC pinctrl setup failed (%d)", r); return r; } /* enable FMC peripheral clock */ clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(clk)) { LOG_ERR("clock control device not ready"); return -ENODEV; } r = clock_control_on(clk, (clock_control_subsys_t)&config->pclken[0]); if (r < 0) { LOG_ERR("Could not initialize FMC clock (%d)", r); return r; } if (IS_ENABLED(STM32_FMC_DOMAIN_CLOCK_SUPPORT) && (config->pclk_len > 1)) { /* Enable FMC clock source */ r = clock_control_configure(clk, (clock_control_subsys_t)&config->pclken[1], NULL); if (r < 0) { LOG_ERR("Could not select FMC clock (%d)", r); return r; } } #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_fmc) #if (DT_ENUM_IDX(DT_DRV_INST(0), st_mem_swap) == 1) /* sdram-sram */ MODIFY_REG(FMC_Bank1_R->BTCR[0], FMC_BCR1_BMAP, FMC_BCR1_BMAP_0); #elif (DT_ENUM_IDX(DT_DRV_INST(0), st_mem_swap) == 2) /* sdramb2 */ MODIFY_REG(FMC_Bank1_R->BTCR[0], FMC_BCR1_BMAP, FMC_BCR1_BMAP_1); #endif #endif return 0; } PINCTRL_DT_INST_DEFINE(0); static const struct stm32_pclken pclken[] = STM32_DT_INST_CLOCKS(0); static const struct memc_stm32_config config = { .fmc = DT_INST_REG_ADDR(0), .pclken = pclken, .pclk_len = DT_INST_NUM_CLOCKS(0), .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), }; DEVICE_DT_INST_DEFINE(0, memc_stm32_init, NULL, NULL, &config, POST_KERNEL, CONFIG_MEMC_INIT_PRIORITY, NULL); ```
/content/code_sandbox/drivers/memc/memc_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
795
```c /* * * * Based on implementation of fsbl in: * path_to_url */ #define DT_DRV_COMPAT sifive_fu740_c000_ddr #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/logging/log.h> #include <soc.h> #include "sifive_ddrregs.h" LOG_MODULE_REGISTER(sifive_ddr); #define DRAM_CLASS_OFFSET 8 #define DRAM_CLASS_DDR4 0xA #define OPTIMAL_RMODW_EN BIT(0) #define DISABLE_RD_INTERLEAVE BIT(16) #define OUT_OF_RANGE BIT(1) #define MULTIPLE_OUT_OF_RANGE BIT(2) #define PORT_COMMAND_CHANNEL_ERROR BIT(7) #define MC_INIT_COMPLETE BIT(8) #define LEVELING_OPERATION_COMPLETED BIT(22) #define DFI_PHY_WRLELV_MODE BIT(24) #define DFI_PHY_RDLVL_MODE BIT(24) #define DFI_PHY_RDLVL_GATE_MODE BIT(0) #define VREF_EN BIT(24) #define PORT_ADDR_PROTECTION_EN BIT(0) #define AXI0_ADDRESS_RANGE_ENABLE BIT(8) #define AXI0_RANGE_PROT_BITS_0 (BIT(24) | BIT(25)); #define RDLVL_EN BIT(16) #define RDLVL_GATE_EN BIT(24) #define WRLVL_EN BIT(0) #define PHY_RX_CAL_DQ0_0_OFFSET 0 #define PHY_RX_CAL_DQ1_0_OFFSET 16 #define DDR_CTL_REG(d, i) (*(d->ddrctl + i)) #define DDR_PHY_REG(d, i) (*(d->ddrphy + i)) struct ddr_ctrl_data { volatile uint32_t *const ddrctl; volatile uint32_t *const ddrphy; volatile uint32_t *const ddr_physical_filter; volatile uint32_t *const ddr_start; const size_t ddr_size; }; static inline void phy_reset(struct ddr_ctrl_data *ddr_ctrl) { unsigned int i; for (i = 1152; i <= 1214; i++) { DDR_PHY_REG(ddr_ctrl, i) = ddr_phy_settings[i]; } for (i = 0; i <= 1151; i++) { DDR_PHY_REG(ddr_ctrl, i) = ddr_phy_settings[i]; } } static inline void ddr_writeregmap(struct ddr_ctrl_data *ddr_ctrl) { unsigned int i; for (i = 0; i <= 264; i++) { DDR_CTL_REG(ddr_ctrl, i) = ddr_ctl_settings[i]; } phy_reset(ddr_ctrl); } static inline uint32_t ddr_getdramclass(struct ddr_ctrl_data *ddr_ctrl) { return ((DDR_CTL_REG(ddr_ctrl, 0) >> DRAM_CLASS_OFFSET) & 0xF); } static inline void check_errata(uint32_t regbase, uint32_t updownreg) { uint64_t fails = 0; uint32_t bit, dq; for (bit = 0, dq = 0; bit < 2; bit++, dq++) { uint32_t phy_rx_cal_dqn_0_offset; if (bit == 0) { phy_rx_cal_dqn_0_offset = PHY_RX_CAL_DQ0_0_OFFSET; } else { phy_rx_cal_dqn_0_offset = PHY_RX_CAL_DQ1_0_OFFSET; } uint32_t down = (updownreg >> phy_rx_cal_dqn_0_offset) & 0x3F; uint32_t up = (updownreg >> (phy_rx_cal_dqn_0_offset + 6)) & 0x3F; uint8_t failc0 = ((down == 0) && (up == 0x3F)); uint8_t failc1 = ((up == 0) && (down == 0x3F)); /* print error message on failure */ if (failc0 || failc1) { if (fails == 0) { LOG_ERR("DDR error in fixing up"); } char slicelsc = '0'; char slicemsc = '0'; fails |= (1 << dq); slicelsc += (dq % 10); slicemsc += (dq / 10); LOG_ERR("S %c%c%c", slicemsc, slicelsc, failc0 ? 'U' : 'D'); } } } static inline uint64_t ddr_phy_fixup(struct ddr_ctrl_data *ddr_ctrl) { /* return bitmask of failed lanes */ uint32_t slicebase = 0; uint32_t updownreg; /* check errata condition */ for (uint32_t slice = 0; slice < 8; slice++) { uint32_t regbase = slicebase + 34; for (uint32_t reg = 0 ; reg < 4; reg++) { updownreg = DDR_PHY_REG(ddr_ctrl, (regbase + reg)); check_errata(regbase, updownreg); } slicebase += 128; } return (0); } static int ddr_init(const struct device *dev) { struct ddr_ctrl_data *ddr_ctrl = dev->data; LOG_DBG("start: 0x%lx", (uintptr_t)ddr_ctrl->ddr_start); LOG_DBG("size: 0x%lx", ddr_ctrl->ddr_size); ddr_writeregmap(ddr_ctrl); DDR_CTL_REG(ddr_ctrl, 120) |= DISABLE_RD_INTERLEAVE; DDR_CTL_REG(ddr_ctrl, 21) &= ~OPTIMAL_RMODW_EN; DDR_CTL_REG(ddr_ctrl, 170) |= WRLVL_EN | DFI_PHY_WRLELV_MODE; DDR_CTL_REG(ddr_ctrl, 181) |= DFI_PHY_RDLVL_MODE; DDR_CTL_REG(ddr_ctrl, 260) |= RDLVL_EN; DDR_CTL_REG(ddr_ctrl, 260) |= RDLVL_GATE_EN; DDR_CTL_REG(ddr_ctrl, 182) |= DFI_PHY_RDLVL_GATE_MODE; if (ddr_getdramclass(ddr_ctrl) == DRAM_CLASS_DDR4) { DDR_CTL_REG(ddr_ctrl, 184) |= VREF_EN; } DDR_CTL_REG(ddr_ctrl, 136) |= LEVELING_OPERATION_COMPLETED; DDR_CTL_REG(ddr_ctrl, 136) |= MC_INIT_COMPLETE; DDR_CTL_REG(ddr_ctrl, 136) |= OUT_OF_RANGE | MULTIPLE_OUT_OF_RANGE; /* Setup range protection */ size_t end_addr_16Kblocks = ((ddr_ctrl->ddr_size >> 14) & 0x7FFFFF) - 1; DDR_CTL_REG(ddr_ctrl, 209) = 0x0; DDR_CTL_REG(ddr_ctrl, 210) = ((uint32_t) end_addr_16Kblocks); DDR_CTL_REG(ddr_ctrl, 212) = 0x0; DDR_CTL_REG(ddr_ctrl, 214) = 0x0; DDR_CTL_REG(ddr_ctrl, 216) = 0x0; DDR_CTL_REG(ddr_ctrl, 224) |= AXI0_RANGE_PROT_BITS_0; DDR_CTL_REG(ddr_ctrl, 225) = 0xFFFFFFFF; DDR_CTL_REG(ddr_ctrl, 208) |= AXI0_ADDRESS_RANGE_ENABLE; DDR_CTL_REG(ddr_ctrl, 208) |= PORT_ADDR_PROTECTION_EN; /* Mask port command error interrupt */ DDR_CTL_REG(ddr_ctrl, 136) |= PORT_COMMAND_CHANNEL_ERROR; DDR_CTL_REG(ddr_ctrl, 0) |= 1; /* WAIT for initialization complete : bit 8 of INT_STATUS (DENALI_CTL_132) 0x210 */ while ((DDR_CTL_REG(ddr_ctrl, 132) & MC_INIT_COMPLETE) != 0) { ; } uint64_t ddr_end = (uint64_t)ddr_ctrl->ddr_start + ddr_ctrl->ddr_size; /* Disable the BusBlocker in front of the controller AXI slave ports */ volatile uint64_t *filterreg = (volatile uint64_t *)ddr_ctrl->ddr_physical_filter; filterreg[0] = 0x0f00000000000000UL | (ddr_end >> 2); /* ^^ RWX + TOR */ ddr_phy_fixup(ddr_ctrl); return 0; } #define DDRCTL_NODE DT_NODELABEL(dmc) static struct ddr_ctrl_data ddrctl_private_data = { .ddrctl = (uint32_t *)DT_REG_ADDR_BY_IDX(DDRCTL_NODE, 0), .ddrphy = (uint32_t *)DT_REG_ADDR_BY_IDX(DDRCTL_NODE, 1), .ddr_physical_filter = (uint32_t *)DT_REG_ADDR_BY_IDX(DDRCTL_NODE, 2), .ddr_start = (uint32_t *)DT_REG_ADDR(DT_NODELABEL(ram0)), .ddr_size = DT_REG_SIZE(DT_NODELABEL(ram0)), }; DEVICE_DT_INST_DEFINE(0, ddr_init, NULL, &ddrctl_private_data, NULL, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, NULL); ```
/content/code_sandbox/drivers/memc/sifive_ddr.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,038
```linker script /* * */ #if DT_NODE_HAS_STATUS(DT_NODELABEL(sdram1), okay) GROUP_START(SDRAM1) SECTION_PROLOGUE(_STM32_SDRAM1_SECTION_NAME, (NOLOAD),) { *(.stm32_sdram1) *(".stm32_sdram1.*") } GROUP_LINK_IN(SDRAM1) GROUP_END(SDRAM1) #endif #if DT_NODE_HAS_STATUS(DT_NODELABEL(sdram2), okay) GROUP_START(SDRAM2) SECTION_PROLOGUE(_STM32_SDRAM2_SECTION_NAME, (NOLOAD),) { *(.stm32_sdram2) *(".stm32_sdram2.*") } GROUP_LINK_IN(SDRAM2) GROUP_END(SDRAM2) #endif ```
/content/code_sandbox/drivers/memc/memc_stm32_sdram.ld
linker script
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
164
```c /* * */ #define DT_DRV_COMPAT st_stm32_fmc_sdram #include <zephyr/device.h> #include <zephyr/kernel.h> #include <soc.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(memc_stm32_sdram, CONFIG_MEMC_LOG_LEVEL); /** SDRAM controller register offset. */ #define SDRAM_OFFSET 0x140U /** FMC SDRAM controller bank configuration fields. */ struct memc_stm32_sdram_bank_config { FMC_SDRAM_InitTypeDef init; FMC_SDRAM_TimingTypeDef timing; }; /** FMC SDRAM controller configuration fields. */ struct memc_stm32_sdram_config { FMC_SDRAM_TypeDef *sdram; uint32_t power_up_delay; uint8_t num_auto_refresh; uint16_t mode_register; uint16_t refresh_rate; const struct memc_stm32_sdram_bank_config *banks; size_t banks_len; }; static int memc_stm32_sdram_init(const struct device *dev) { const struct memc_stm32_sdram_config *config = dev->config; SDRAM_HandleTypeDef sdram = { 0 }; FMC_SDRAM_CommandTypeDef sdram_cmd = { 0 }; sdram.Instance = config->sdram; for (size_t i = 0U; i < config->banks_len; i++) { sdram.State = HAL_SDRAM_STATE_RESET; memcpy(&sdram.Init, &config->banks[i].init, sizeof(sdram.Init)); (void)HAL_SDRAM_Init( &sdram, (FMC_SDRAM_TimingTypeDef *)&config->banks[i].timing); } /* SDRAM initialization sequence */ if (config->banks_len == 2U) { sdram_cmd.CommandTarget = FMC_SDRAM_CMD_TARGET_BANK1_2; } else if (config->banks[0].init.SDBank == FMC_SDRAM_BANK1) { sdram_cmd.CommandTarget = FMC_SDRAM_CMD_TARGET_BANK1; } else { sdram_cmd.CommandTarget = FMC_SDRAM_CMD_TARGET_BANK2; } sdram_cmd.AutoRefreshNumber = config->num_auto_refresh; sdram_cmd.ModeRegisterDefinition = config->mode_register; /* enable clock */ sdram_cmd.CommandMode = FMC_SDRAM_CMD_CLK_ENABLE; (void)HAL_SDRAM_SendCommand(&sdram, &sdram_cmd, 0U); k_usleep(config->power_up_delay); /* pre-charge all */ sdram_cmd.CommandMode = FMC_SDRAM_CMD_PALL; (void)HAL_SDRAM_SendCommand(&sdram, &sdram_cmd, 0U); /* auto-refresh */ sdram_cmd.CommandMode = FMC_SDRAM_CMD_AUTOREFRESH_MODE; (void)HAL_SDRAM_SendCommand(&sdram, &sdram_cmd, 0U); /* load mode */ sdram_cmd.CommandMode = FMC_SDRAM_CMD_LOAD_MODE; (void)HAL_SDRAM_SendCommand(&sdram, &sdram_cmd, 0U); /* program refresh count */ (void)HAL_SDRAM_ProgramRefreshRate(&sdram, config->refresh_rate); return 0; } /** SDRAM bank/s configuration initialization macro. */ #define BANK_CONFIG(node_id) \ { .init = { \ .SDBank = DT_REG_ADDR(node_id), \ .ColumnBitsNumber = DT_PROP_BY_IDX(node_id, st_sdram_control, 0), \ .RowBitsNumber = DT_PROP_BY_IDX(node_id, st_sdram_control, 1), \ .MemoryDataWidth = DT_PROP_BY_IDX(node_id, st_sdram_control, 2), \ .InternalBankNumber = DT_PROP_BY_IDX(node_id, st_sdram_control, 3),\ .CASLatency = DT_PROP_BY_IDX(node_id, st_sdram_control, 4), \ .WriteProtection = FMC_SDRAM_WRITE_PROTECTION_DISABLE, \ .SDClockPeriod = DT_PROP_BY_IDX(node_id, st_sdram_control, 5), \ .ReadBurst = DT_PROP_BY_IDX(node_id, st_sdram_control, 6), \ .ReadPipeDelay = DT_PROP_BY_IDX(node_id, st_sdram_control, 7), \ }, \ .timing = { \ .LoadToActiveDelay = DT_PROP_BY_IDX(node_id, st_sdram_timing, 0), \ .ExitSelfRefreshDelay = \ DT_PROP_BY_IDX(node_id, st_sdram_timing, 1), \ .SelfRefreshTime = DT_PROP_BY_IDX(node_id, st_sdram_timing, 2), \ .RowCycleDelay = DT_PROP_BY_IDX(node_id, st_sdram_timing, 3), \ .WriteRecoveryTime = DT_PROP_BY_IDX(node_id, st_sdram_timing, 4), \ .RPDelay = DT_PROP_BY_IDX(node_id, st_sdram_timing, 5), \ .RCDDelay = DT_PROP_BY_IDX(node_id, st_sdram_timing, 6), \ } \ }, /** SDRAM bank/s configuration. */ static const struct memc_stm32_sdram_bank_config bank_config[] = { DT_INST_FOREACH_CHILD(0, BANK_CONFIG) }; /** SDRAM configuration. */ static const struct memc_stm32_sdram_config config = { .sdram = (FMC_SDRAM_TypeDef *)(DT_REG_ADDR(DT_INST_PARENT(0)) + SDRAM_OFFSET), .power_up_delay = DT_INST_PROP(0, power_up_delay), .num_auto_refresh = DT_INST_PROP(0, num_auto_refresh), .mode_register = DT_INST_PROP(0, mode_register), .refresh_rate = DT_INST_PROP(0, refresh_rate), .banks = bank_config, .banks_len = ARRAY_SIZE(bank_config), }; DEVICE_DT_INST_DEFINE(0, memc_stm32_sdram_init, NULL, NULL, &config, POST_KERNEL, CONFIG_MEMC_INIT_PRIORITY, NULL); ```
/content/code_sandbox/drivers/memc/memc_stm32_sdram.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,295
```unknown # Memory controller configuration options menuconfig MEMC bool "Memory controller drivers [EXPERIMENTAL]" select EXPERIMENTAL help Add support for memory controllers if MEMC module = MEMC module-str = memc source "subsys/logging/Kconfig.template.log_config" config MEMC_INIT_PRIORITY int "Initialization priority" default 0 help Memory controllers initialization priority. source "drivers/memc/Kconfig.stm32" source "drivers/memc/Kconfig.mcux" source "drivers/memc/Kconfig.sam" source "drivers/memc/Kconfig.sifive" source "drivers/memc/Kconfig.nxp_s32" source "drivers/memc/Kconfig.smartbond" source "drivers/memc/Kconfig.mspi" endif ```
/content/code_sandbox/drivers/memc/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
168
```c /* * */ /* */ #define DT_DRV_COMPAT nxp_imx_flexspi_w956a8mbya #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> #include "memc_mcux_flexspi.h" LOG_MODULE_REGISTER(memc_flexspi_w956a8mbya, CONFIG_MEMC_LOG_LEVEL); enum { READ_DATA, WRITE_DATA, READ_REG, WRITE_REG, }; struct memc_flexspi_w956a8mbya_config { flexspi_port_t port; flexspi_device_config_t config; }; /* Device variables used in critical sections should be in this structure */ struct memc_flexspi_w956a8mbya_data { const struct device *controller; }; static const uint32_t memc_flexspi_w956a8mbya_lut[][4] = { /* Read Data */ [READ_DATA] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xA0, kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x18), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_8PAD, 0x10, kFLEXSPI_Command_DUMMY_RWDS_DDR, kFLEXSPI_8PAD, 0x07), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_READ_DDR, kFLEXSPI_8PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x00), }, /* Write Data */ [WRITE_DATA] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x20, kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x18), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_8PAD, 0x10, kFLEXSPI_Command_DUMMY_RWDS_DDR, kFLEXSPI_8PAD, 0x07), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_WRITE_DDR, kFLEXSPI_8PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x00), }, /* Read Register */ [READ_REG] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xE0, kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x18), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_8PAD, 0x10, kFLEXSPI_Command_DUMMY_RWDS_DDR, kFLEXSPI_8PAD, 0x07), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_READ_DDR, kFLEXSPI_8PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x00), }, /* Write Register */ [WRITE_REG] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x60, kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x18), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_8PAD, 0x10, kFLEXSPI_Command_WRITE_DDR, kFLEXSPI_8PAD, 0x04), }, }; static int memc_flexspi_w956a8mbya_get_vendor_id(const struct device *dev, uint16_t *vendor_id) { const struct memc_flexspi_w956a8mbya_config *config = dev->config; struct memc_flexspi_w956a8mbya_data *data = dev->data; uint32_t buffer = 0; int ret; flexspi_transfer_t transfer = { .deviceAddress = 0, .port = config->port, .cmdType = kFLEXSPI_Read, .SeqNumber = 1, .seqIndex = READ_REG, .data = &buffer, .dataSize = 4, }; LOG_DBG("Reading id"); ret = memc_flexspi_transfer(data->controller, &transfer); *vendor_id = buffer & 0xffff; return ret; } static int memc_flexspi_w956a8mbya_init(const struct device *dev) { const struct memc_flexspi_w956a8mbya_config *config = dev->config; struct memc_flexspi_w956a8mbya_data *data = dev->data; uint16_t vendor_id; if (!device_is_ready(data->controller)) { LOG_ERR("Controller device not ready"); return -ENODEV; } if (memc_flexspi_set_device_config(data->controller, &config->config, (const uint32_t *) memc_flexspi_w956a8mbya_lut, sizeof(memc_flexspi_w956a8mbya_lut) / MEMC_FLEXSPI_CMD_SIZE, config->port)) { LOG_ERR("Could not set device configuration"); return -EINVAL; } memc_flexspi_reset(data->controller); if (memc_flexspi_w956a8mbya_get_vendor_id(dev, &vendor_id)) { LOG_ERR("Could not read vendor id"); return -EIO; } LOG_DBG("Vendor id: 0x%0x", vendor_id); return 0; } #define CONCAT3(x, y, z) x ## y ## z #define CS_INTERVAL_UNIT(unit) \ CONCAT3(kFLEXSPI_CsIntervalUnit, unit, SckCycle) #define AHB_WRITE_WAIT_UNIT(unit) \ CONCAT3(kFLEXSPI_AhbWriteWaitUnit, unit, AhbCycle) #define MEMC_FLEXSPI_DEVICE_CONFIG(n) \ { \ .flexspiRootClk = DT_INST_PROP(n, spi_max_frequency), \ .isSck2Enabled = false, \ .flashSize = DT_INST_PROP(n, size) / 8 / KB(1), \ .CSIntervalUnit = \ CS_INTERVAL_UNIT( \ DT_INST_PROP(n, cs_interval_unit)), \ .CSInterval = DT_INST_PROP(n, cs_interval), \ .CSHoldTime = DT_INST_PROP(n, cs_hold_time), \ .CSSetupTime = DT_INST_PROP(n, cs_setup_time), \ .dataValidTime = DT_INST_PROP(n, data_valid_time), \ .columnspace = DT_INST_PROP(n, column_space), \ .enableWordAddress = DT_INST_PROP(n, word_addressable), \ .AWRSeqIndex = WRITE_DATA, \ .AWRSeqNumber = 1, \ .ARDSeqIndex = READ_DATA, \ .ARDSeqNumber = 1, \ .AHBWriteWaitUnit = \ AHB_WRITE_WAIT_UNIT( \ DT_INST_PROP(n, ahb_write_wait_unit)), \ .AHBWriteWaitInterval = \ DT_INST_PROP(n, ahb_write_wait_interval), \ .enableWriteMask = true, \ } \ #define MEMC_FLEXSPI_W956A8MBYA(n) \ static const struct memc_flexspi_w956a8mbya_config \ memc_flexspi_w956a8mbya_config_##n = { \ .port = DT_INST_REG_ADDR(n), \ .config = MEMC_FLEXSPI_DEVICE_CONFIG(n), \ }; \ \ static struct memc_flexspi_w956a8mbya_data \ memc_flexspi_w956a8mbya_data_##n = { \ .controller = DEVICE_DT_GET(DT_INST_BUS(n)), \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ memc_flexspi_w956a8mbya_init, \ NULL, \ &memc_flexspi_w956a8mbya_data_##n, \ &memc_flexspi_w956a8mbya_config_##n, \ POST_KERNEL, \ CONFIG_MEMC_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(MEMC_FLEXSPI_W956A8MBYA) ```
/content/code_sandbox/drivers/memc/memc_mcux_flexspi_w956a8mbya.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,938
```c /* * */ #define DT_DRV_COMPAT syscon #include <errno.h> #include <zephyr/arch/cpu.h> #include <zephyr/sys/util.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/drivers/syscon.h> #include "syscon_common.h" struct syscon_generic_config { DEVICE_MMIO_ROM; uint8_t reg_width; }; struct syscon_generic_data { DEVICE_MMIO_RAM; size_t size; }; static int syscon_generic_get_base(const struct device *dev, uintptr_t *addr) { if (!dev) { return -ENODEV; } *addr = DEVICE_MMIO_GET(dev); return 0; } static int syscon_generic_read_reg(const struct device *dev, uint16_t reg, uint32_t *val) { const struct syscon_generic_config *config; struct syscon_generic_data *data; uintptr_t base_address; if (!dev) { return -ENODEV; } data = dev->data; config = dev->config; if (!val) { return -EINVAL; } if (syscon_sanitize_reg(&reg, data->size, config->reg_width)) { return -EINVAL; } base_address = DEVICE_MMIO_GET(dev); switch (config->reg_width) { case 1: *val = sys_read8(base_address + reg); break; case 2: *val = sys_read16(base_address + reg); break; case 4: *val = sys_read32(base_address + reg); break; default: return -EINVAL; } return 0; } static int syscon_generic_write_reg(const struct device *dev, uint16_t reg, uint32_t val) { const struct syscon_generic_config *config; struct syscon_generic_data *data; uintptr_t base_address; if (!dev) { return -ENODEV; } data = dev->data; config = dev->config; if (syscon_sanitize_reg(&reg, data->size, config->reg_width)) { return -EINVAL; } base_address = DEVICE_MMIO_GET(dev); switch (config->reg_width) { case 1: sys_write8(val, (base_address + reg)); break; case 2: sys_write16(val, (base_address + reg)); break; case 4: sys_write32(val, (base_address + reg)); break; default: return -EINVAL; } return 0; } static int syscon_generic_get_size(const struct device *dev, size_t *size) { struct syscon_generic_data *data = dev->data; *size = data->size; return 0; } static const struct syscon_driver_api syscon_generic_driver_api = { .read = syscon_generic_read_reg, .write = syscon_generic_write_reg, .get_base = syscon_generic_get_base, .get_size = syscon_generic_get_size, }; static int syscon_generic_init(const struct device *dev) { DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); return 0; } #define SYSCON_INIT(inst) \ static const struct syscon_generic_config syscon_generic_config_##inst = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(inst)), \ .reg_width = DT_INST_PROP_OR(inst, reg_io_width, 4), \ }; \ static struct syscon_generic_data syscon_generic_data_##inst = { \ .size = DT_INST_REG_SIZE(inst), \ }; \ DEVICE_DT_INST_DEFINE(inst, syscon_generic_init, NULL, &syscon_generic_data_##inst, \ &syscon_generic_config_##inst, PRE_KERNEL_1, \ CONFIG_SYSCON_INIT_PRIORITY, &syscon_generic_driver_api); DT_INST_FOREACH_STATUS_OKAY(SYSCON_INIT); ```
/content/code_sandbox/drivers/syscon/syscon.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
832
```c /* * */ #define DT_DRV_COMPAT st_stm32_fmc_nor_psram #include <zephyr/device.h> #include <soc.h> #include <errno.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(memc_stm32_nor_psram, CONFIG_MEMC_LOG_LEVEL); /** SRAM base register offset, see FMC_Bank1_R_BASE */ #define SRAM_OFFSET 0x0000UL /** SRAM extended mode register offset, see FMC_Bank1E_R_BASE */ #define SRAM_EXT_OFFSET 0x0104UL /** FMC NOR/PSRAM controller bank configuration fields. */ struct memc_stm32_nor_psram_bank_config { FMC_NORSRAM_InitTypeDef init; FMC_NORSRAM_TimingTypeDef timing; FMC_NORSRAM_TimingTypeDef timing_ext; }; /** FMC NOR/PSRAM controller configuration fields. */ struct memc_stm32_nor_psram_config { FMC_NORSRAM_TypeDef *nor_psram; FMC_NORSRAM_EXTENDED_TypeDef *extended; const struct memc_stm32_nor_psram_bank_config *banks; size_t banks_len; }; static int memc_stm32_nor_init(const struct memc_stm32_nor_psram_config *config, const struct memc_stm32_nor_psram_bank_config *bank_config) { FMC_NORSRAM_TimingTypeDef *ext_timing; NOR_HandleTypeDef hnor = { 0 }; hnor.Instance = config->nor_psram; hnor.Extended = config->extended; memcpy(&hnor.Init, &bank_config->init, sizeof(hnor.Init)); if (bank_config->init.ExtendedMode == FMC_EXTENDED_MODE_ENABLE) { ext_timing = (FMC_NORSRAM_TimingTypeDef *)&bank_config->timing_ext; } else { ext_timing = NULL; } if (HAL_NOR_Init(&hnor, (FMC_NORSRAM_TimingTypeDef *)&bank_config->timing, ext_timing) != HAL_OK) { return -ENODEV; } return 0; } static int memc_stm32_psram_init(const struct memc_stm32_nor_psram_config *config, const struct memc_stm32_nor_psram_bank_config *bank_config) { FMC_NORSRAM_TimingTypeDef *ext_timing; SRAM_HandleTypeDef hsram = { 0 }; hsram.Instance = config->nor_psram; hsram.Extended = config->extended; memcpy(&hsram.Init, &bank_config->init, sizeof(hsram.Init)); if (bank_config->init.ExtendedMode == FMC_EXTENDED_MODE_ENABLE) { ext_timing = (FMC_NORSRAM_TimingTypeDef *)&bank_config->timing_ext; } else { ext_timing = NULL; } if (HAL_SRAM_Init(&hsram, (FMC_NORSRAM_TimingTypeDef *)&bank_config->timing, ext_timing) != HAL_OK) { return -ENODEV; } return 0; } static int memc_stm32_nor_psram_init(const struct device *dev) { const struct memc_stm32_nor_psram_config *config = dev->config; uint32_t memory_type; size_t bank_idx; int ret = 0; for (bank_idx = 0U; bank_idx < config->banks_len; ++bank_idx) { memory_type = config->banks[bank_idx].init.MemoryType; switch (memory_type) { case FMC_MEMORY_TYPE_NOR: ret = memc_stm32_nor_init(config, &config->banks[bank_idx]); break; case FMC_MEMORY_TYPE_PSRAM: __fallthrough; case FMC_MEMORY_TYPE_SRAM: ret = memc_stm32_psram_init(config, &config->banks[bank_idx]); break; default: ret = -ENOTSUP; break; } if (ret < 0) { LOG_ERR("Unable to initialize memory type: " "0x%08X, NSBank: %d, err: %d", memory_type, config->banks[bank_idx].init.NSBank, ret); goto end; } } end: return ret; } /** SDRAM bank/s configuration initialization macro. */ #define BANK_CONFIG(node_id) \ { .init = { \ .NSBank = DT_REG_ADDR(node_id), \ .DataAddressMux = DT_PROP_BY_IDX(node_id, st_control, 0), \ .MemoryType = DT_PROP_BY_IDX(node_id, st_control, 1), \ .MemoryDataWidth = DT_PROP_BY_IDX(node_id, st_control, 2), \ .BurstAccessMode = DT_PROP_BY_IDX(node_id, st_control, 3), \ .WaitSignalPolarity = DT_PROP_BY_IDX(node_id, st_control, 4), \ .WaitSignalActive = DT_PROP_BY_IDX(node_id, st_control, 5), \ .WriteOperation = DT_PROP_BY_IDX(node_id, st_control, 6), \ .WaitSignal = DT_PROP_BY_IDX(node_id, st_control, 7), \ .ExtendedMode = DT_PROP_BY_IDX(node_id, st_control, 8), \ .AsynchronousWait = DT_PROP_BY_IDX(node_id, st_control, 9), \ .WriteBurst = DT_PROP_BY_IDX(node_id, st_control, 10), \ .ContinuousClock = DT_PROP_BY_IDX(node_id, st_control, 11), \ .WriteFifo = DT_PROP_BY_IDX(node_id, st_control, 12), \ .PageSize = DT_PROP_BY_IDX(node_id, st_control, 13) \ }, \ .timing = { \ .AddressSetupTime = DT_PROP_BY_IDX(node_id, st_timing, 0), \ .AddressHoldTime = DT_PROP_BY_IDX(node_id, st_timing, 1), \ .DataSetupTime = DT_PROP_BY_IDX(node_id, st_timing, 2), \ .BusTurnAroundDuration = DT_PROP_BY_IDX(node_id, st_timing, 3), \ .CLKDivision = DT_PROP_BY_IDX(node_id, st_timing, 4), \ .DataLatency = DT_PROP_BY_IDX(node_id, st_timing, 5), \ .AccessMode = DT_PROP_BY_IDX(node_id, st_timing, 6), \ }, \ .timing_ext = { \ .AddressSetupTime = DT_PROP_BY_IDX(node_id, st_timing_ext, 0), \ .AddressHoldTime = DT_PROP_BY_IDX(node_id, st_timing_ext, 1), \ .DataSetupTime = DT_PROP_BY_IDX(node_id, st_timing_ext, 2), \ .BusTurnAroundDuration = DT_PROP_BY_IDX(node_id, st_timing_ext, 3), \ .AccessMode = DT_PROP_BY_IDX(node_id, st_timing_ext, 4), \ } \ }, /** SRAM bank/s configuration. */ static const struct memc_stm32_nor_psram_bank_config bank_config[] = { DT_INST_FOREACH_CHILD(0, BANK_CONFIG) }; /** SRAM configuration. */ static const struct memc_stm32_nor_psram_config config = { .nor_psram = (FMC_NORSRAM_TypeDef *)(DT_REG_ADDR(DT_INST_PARENT(0)) + SRAM_OFFSET), .extended = (FMC_NORSRAM_EXTENDED_TypeDef *)(DT_REG_ADDR(DT_INST_PARENT(0)) + SRAM_EXT_OFFSET), .banks = bank_config, .banks_len = ARRAY_SIZE(bank_config), }; DEVICE_DT_INST_DEFINE(0, memc_stm32_nor_psram_init, NULL, NULL, &config, POST_KERNEL, CONFIG_MEMC_INIT_PRIORITY, NULL); ```
/content/code_sandbox/drivers/memc/memc_stm32_nor_psram.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,711
```objective-c /* */ #ifndef DRIVERS_SYSCON_SYSCON_COMMON_H_ #define DRIVERS_SYSCON_SYSCON_COMMON_H_ #include <zephyr/sys/util.h> #ifdef __cplusplus extern "C" { #endif /** * @brief Align and check register address * * @param reg Pointer to the register address in question. * @param reg_size The size of the syscon register region. * @param reg_width The width of a single register (in bytes). * @return 0 if the register read is valid. * @return -EINVAL is the read is invalid. */ static inline int syscon_sanitize_reg(uint16_t *reg, size_t reg_size, uint8_t reg_width) { /* Avoid unaligned readings */ *reg = ROUND_DOWN(*reg, reg_width); /* Check for out-of-bounds readings */ if (*reg >= reg_size) { return -EINVAL; } return 0; } #ifdef __cplusplus } #endif #endif /* DRIVERS_SYSCON_SYSCON_COMMON_H_ */ ```
/content/code_sandbox/drivers/syscon/syscon_common.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
219
```unknown # PCIe RC/EP drivers configuration options source "drivers/pcie/host/Kconfig" source "drivers/pcie/endpoint/Kconfig" ```
/content/code_sandbox/drivers/pcie/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
31
```unknown # SYSCON configuration options # # SYSCON options # menuconfig SYSCON bool "System Controller (SYSCON) drivers" help SYSCON (System Controller) drivers. System controller node represents a register region containing a set of miscellaneous registers. The registers are not cohesive enough to represent as any specific type of device. The typical use-case is for some other node's driver, or platform-specific code, to acquire a reference to the syscon node and extract information from there. if SYSCON module = SYSCON module-str = syscon source "subsys/logging/Kconfig.template.log_config" config SYSCON_GENERIC bool "Generic SYSCON (System Controller) driver" default y depends on DT_HAS_SYSCON_ENABLED help Enable generic SYSCON (System Controller) driver config SYSCON_INIT_PRIORITY int "SYSCON (System Controller) driver init priority" default 50 help This option controls the priority of the syscon device initialization. Higher priority ensures that the device is initialized earlier in the startup cycle. If unsure, leave at default value endif # SYSCON ```
/content/code_sandbox/drivers/syscon/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
262
```unknown # iProc PCIe EP configuration options menuconfig PCIE_EP_IPROC bool "Broadcom iProc PCIe EP driver" help This option enables Broadcom iProc PCIe EP driver. if PCIE_EP_IPROC config PCIE_EP_IPROC_INIT_CFG bool "Re-initialize PCIe MSI/MSIX configurations" config PCIE_EP_IPROC_V2 bool "Version-2 of iProc PCIe EP controller" endif # PCIE_EP_IPROC ```
/content/code_sandbox/drivers/pcie/endpoint/Kconfig.iproc
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
97
```objective-c /* * */ #ifndef ZEPHYR_INCLUDE_DRIVERS_PCIE_EP_IPROC_H_ #define ZEPHYR_INCLUDE_DRIVERS_PCIE_EP_IPROC_H_ #include <soc.h> #include <zephyr/sys/util.h> #include "pcie_ep_iproc_regs.h" #define PCIE_LINK_STATUS_CONTROL 0xbc #define PCIE_LINKSPEED_SHIFT 16 #define PCIE_LINKWIDTH_SHIFT 20 #define PCIE_LINKSPEED_MASK 0xf #define PCIE_LINKWIDTH_MASK 0x3f #define PCIE_RC_MODE_MASK 0x1 #define MSI_ADDR_L 0x5c #define MSI_ADDR_H 0x60 #define MSI_DATA 0x64 #define MSI_COUNT_SHIFT 12 #define MSI_COUNT_MASK 0x7000 #define MSI_COUNT_VAL 4 #define MSI_CSR_MASK 0xffffffff #define MSI_EN_MASK 0xf #define MSIX_CAP 0xa0 #define MSIX_FUNC_MASK BIT(30) #define ID_VAL4_OFFSET 0x440 #define MSIX_CONTROL 0x4c0 #define MSIX_TBL_OFF_BIR 0x4c4 #define MSIX_PBA_OFF_BIR 0x4c8 #define MSIX_TBL_B2_10000 0x10002 #define MSIX_PBA_B2_10800 0x10802 #define MSIX_TABLE_BASE 0x20010000 #define MSIX_TABLE_SIZE 16 /* we support 16 MSI-X */ #define MSIX_TBL_ENTRY_SIZE 16 #define MSIX_TBL_ADDR_OFF 0 #define MSIX_TBL_DATA_OFF 8 #define MSIX_TBL_VECTOR_CTRL_OFF 12 #define MSIX_VECTOR_MASK BIT(0) #define MSIX_VECTOR_OFF(x) (MSIX_TABLE_BASE + \ (MSIX_TBL_ENTRY_SIZE * (x))) #define PBA_TABLE_BASE 0x20010800 #define PBA_TABLE_SIZE 0x800 #define PBA_OFFSET(x) (PBA_TABLE_BASE + (4 * ((x) / 32))) #define PENDING_BIT(x) ((x) % 32) #define PAXB_OARR_VALID BIT(0) #ifdef CONFIG_PCIE_EP_IPROC_V2 #define SNOOP_VALID_INTR BIT(3) #define SNOOP_ADDR1_EN BIT(31) #define SNOOP_ADDR1_MASK 0x1fff #define SNOOP_ADDR1 MSIX_CAP #define PMON_LITE_PCIE_INTERRUPT_ENABLE (PMON_LITE_PCIE_BASE + 0xc) #define PMON_LITE_PCIE_INTERRUPT_STATUS (PMON_LITE_PCIE_BASE + 0x10) #define PMON_LITE_PCIE_INTERRUPT_CLEAR (PMON_LITE_PCIE_BASE + 0x14) #define WR_ADDR_CHK_INTR_EN 2 #define PMON_LITE_PCIE_AXI_FILTER_0_CONTROL (PMON_LITE_PCIE_BASE + 0xd4) #define AXI_FILTER_0_ENABLE (BIT(30) | BIT(2) | \ BIT(1) | BIT(0)) #define AXI_FILTER_0_ADDR_START_LOW (PMON_LITE_PCIE_BASE + 0xd8) #define AXI_FILTER_0_ADDR_START_HIGH (PMON_LITE_PCIE_BASE + 0xdc) #define AXI_FILTER_0_ADDR_END_LOW (PMON_LITE_PCIE_BASE + 0xe0) #define AXI_FILTER_0_ADDR_END_HIGH (PMON_LITE_PCIE_BASE + 0xe4) #endif #define PCIE_DEV_CTRL_OFFSET 0x4d8 #define FLR_IN_PROGRESS BIT(27) #define PCIE_TL_CTRL0_OFFSET 0x800 #define AUTO_CLR_FLR_AFTER_DELAY BIT(13) /* Clears FLR after 55ms */ #define AUTO_CLR_CRS_POST_FLR BIT(14) #define PCIE0_FLR_INTR BIT(20) #define PCIE0_FLR_PERST_INTR BIT(21) enum pcie_outbound_map { PCIE_MAP_LOWMEM_IDX, PCIE_MAP_HIGHMEM_IDX, }; struct iproc_pcie_ep_config { struct iproc_pcie_reg *base; /* Base address of PAXB registers */ uint32_t reg_size; uint32_t map_low_base; /* Base addr of outbound mapping at lowmem */ uint32_t map_low_size; uint64_t map_high_base; /* Base addr of outbound mapping at highmem */ uint32_t map_high_size; unsigned int id; const struct device *pl330_dev; uint32_t pl330_tx_chan_id; /* channel used for Device to Host write */ uint32_t pl330_rx_chan_id; /* channel used for Host to Device read */ }; struct iproc_pcie_ep_ctx { struct k_spinlock ob_map_lock; struct k_spinlock raise_irq_lock; struct k_spinlock pba_lock; bool highmem_in_use; bool lowmem_in_use; /* Callback function for reset interrupt */ pcie_ep_reset_callback_t reset_cb[PCIE_RESET_MAX]; /* Callback data for reset interrupt */ void *reset_data[PCIE_RESET_MAX]; }; void iproc_pcie_msix_config(const struct device *dev); void iproc_pcie_msi_config(const struct device *dev); int iproc_pcie_generate_msi(const struct device *dev, const uint32_t msi_num); int iproc_pcie_generate_msix(const struct device *dev, const uint32_t msix_num); void iproc_pcie_func_mask_isr(void *arg); void iproc_pcie_vector_mask_isr(void *arg); #endif /* ZEPHYR_INCLUDE_DRIVERS_PCIE_EP_IPROC_H_ */ ```
/content/code_sandbox/drivers/pcie/endpoint/pcie_ep_iproc.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,221
```c /* * */ #include <zephyr/drivers/pcie/endpoint/pcie_ep.h> #include <zephyr/logging/log.h> #include "pcie_ep_iproc.h" LOG_MODULE_DECLARE(iproc_pcie, CONFIG_PCIE_EP_LOG_LEVEL); /* Helper macro to read 64-bit data using two 32-bit data read */ #define sys_read64(addr) (((uint64_t)(sys_read32(addr + 4)) << 32) | \ sys_read32(addr)) #ifdef PCIE_EP_IPROC_INIT_CFG void iproc_pcie_msix_config(const struct device *dev) { /* * Configure capability of generating 16 messages, * MSI-X Table offset 0x10000 on BAR2, * MSI-X PBA offset 0x10800 on BAR2. */ pcie_ep_conf_write(dev, MSIX_CONTROL, (MSIX_TABLE_SIZE - 1)); pcie_ep_conf_write(dev, MSIX_TBL_OFF_BIR, MSIX_TBL_B2_10000); pcie_ep_conf_write(dev, MSIX_PBA_OFF_BIR, MSIX_PBA_B2_10800); } void iproc_pcie_msi_config(const struct device *dev) { uint32_t data; /* Configure capability of generating 16 messages */ pcie_ep_conf_read(dev, ID_VAL4_OFFSET, &data); data = (data & ~(MSI_COUNT_MASK)) | (MSI_COUNT_VAL << MSI_COUNT_SHIFT); pcie_ep_conf_write(dev, ID_VAL4_OFFSET, data); } #endif int iproc_pcie_generate_msi(const struct device *dev, const uint32_t msi_num) { int ret = 0; #ifdef CONFIG_PCIE_EP_IPROC_V2 uint64_t addr; uint32_t data; pcie_ep_conf_read(dev, MSI_ADDR_H, &data); addr = ((uint64_t)data) << 32; pcie_ep_conf_read(dev, MSI_ADDR_L, &data); addr = addr | data; if (data == 0) { /* * This is mostly the case where the test is being run * from device before host driver sets up MSI. * Returning zero instead of error because of this. */ LOG_WRN("MSI is not setup, skipping MSI"); return 0; } pcie_ep_conf_read(dev, MSI_DATA, &data); data |= msi_num; ret = pcie_ep_xfer_data_memcpy(dev, addr, (uintptr_t *)&data, sizeof(data), PCIE_OB_LOWMEM, DEVICE_TO_HOST); #else const struct iproc_pcie_ep_config *cfg = dev->config; pcie_write32(msi_num, &cfg->base->paxb_pcie_sys_msi_req); #endif return ret; } static int generate_msix(const struct device *dev, const uint32_t msix_num) { int ret; uint64_t addr; uint32_t data; addr = sys_read64(MSIX_VECTOR_OFF(msix_num) + MSIX_TBL_ADDR_OFF); if (addr == 0) { /* * This is mostly the case where the test is being run * from device before host driver has setup MSIX table. * Returning zero instead of error because of this. */ LOG_WRN("MSIX table is not setup, skipping MSIX\n"); ret = 0; goto out; } data = sys_read32(MSIX_VECTOR_OFF(msix_num) + MSIX_TBL_DATA_OFF); ret = pcie_ep_xfer_data_memcpy(dev, addr, (uintptr_t *)&data, sizeof(data), PCIE_OB_LOWMEM, DEVICE_TO_HOST); if (ret < 0) { goto out; } LOG_DBG("msix %d generated\n", msix_num); out: return ret; } #ifdef CONFIG_PCIE_EP_IPROC_V2 static bool is_pcie_function_mask(const struct device *dev) { uint32_t data; pcie_ep_conf_read(dev, MSIX_CAP, &data); return ((data & MSIX_FUNC_MASK) ? true : false); } static bool is_msix_vector_mask(const int msix_num) { uint32_t data; data = sys_read32(MSIX_VECTOR_OFF(msix_num) + MSIX_TBL_VECTOR_CTRL_OFF); return ((data & MSIX_VECTOR_MASK) ? true : false); } /* Below function will be called from interrupt context */ static int generate_pending_msix(const struct device *dev, const int msix_num) { int is_msix_pending; struct iproc_pcie_ep_ctx *ctx = dev->data; k_spinlock_key_t key; /* check if function mask bit got set by Host */ if (is_pcie_function_mask(dev)) { LOG_DBG("function mask set! %d\n", msix_num); return 0; } key = k_spin_lock(&ctx->pba_lock); is_msix_pending = sys_test_bit(PBA_OFFSET(msix_num), PENDING_BIT(msix_num)); /* check if vector mask bit is cleared for pending msix */ if (is_msix_pending && !(is_msix_vector_mask(msix_num))) { LOG_DBG("msix %d unmasked\n", msix_num); /* generate msix and clear pending bit */ generate_msix(dev, msix_num); sys_clear_bit(PBA_OFFSET(msix_num), PENDING_BIT(msix_num)); } k_spin_unlock(&ctx->pba_lock, key); return 0; } /* Below function will be called from interrupt context */ static int generate_all_pending_msix(const struct device *dev) { int i; for (i = 0; i < MSIX_TABLE_SIZE; i++) { generate_pending_msix(dev, i); } return 0; } void iproc_pcie_func_mask_isr(void *arg) { const struct device *dev = arg; const struct iproc_pcie_ep_config *cfg = dev->config; uint32_t data; data = pcie_read32(&cfg->base->paxb_pcie_cfg_intr_status); LOG_DBG("%s: %x\n", __func__, data); if (data & SNOOP_VALID_INTR) { pcie_write32(SNOOP_VALID_INTR, &cfg->base->paxb_pcie_cfg_intr_clear); if (!is_pcie_function_mask(dev)) { generate_all_pending_msix(dev); } } } void iproc_pcie_vector_mask_isr(void *arg) { const struct device *dev = arg; int msix_table_update = sys_test_bit(PMON_LITE_PCIE_INTERRUPT_STATUS, WR_ADDR_CHK_INTR_EN); LOG_DBG("%s: %x\n", __func__, sys_read32(PMON_LITE_PCIE_INTERRUPT_STATUS)); if (msix_table_update) { sys_write32(BIT(WR_ADDR_CHK_INTR_EN), PMON_LITE_PCIE_INTERRUPT_CLEAR); generate_all_pending_msix(dev); } } #endif int iproc_pcie_generate_msix(const struct device *dev, const uint32_t msix_num) { if (msix_num >= MSIX_TABLE_SIZE) { LOG_WRN("Exceeded max supported MSI-X (%d)", MSIX_TABLE_SIZE); return -ENOTSUP; } #ifdef CONFIG_PCIE_EP_IPROC_V2 struct iproc_pcie_ep_ctx *ctx = dev->data; k_spinlock_key_t key; /* * Read function mask bit/vector mask bit and update pending bit * with spin_lock - aim is not to allow interrupt context * to update PBA during this section * This will make sure of no races between mask bit read * and pending bit update. */ key = k_spin_lock(&ctx->pba_lock); if (is_pcie_function_mask(dev) || is_msix_vector_mask(msix_num)) { LOG_DBG("msix %d masked\n", msix_num); /* set pending bit and return */ sys_set_bit(PBA_OFFSET(msix_num), PENDING_BIT(msix_num)); k_spin_unlock(&ctx->pba_lock, key); return -EBUSY; } k_spin_unlock(&ctx->pba_lock, key); #endif return generate_msix(dev, msix_num); } ```
/content/code_sandbox/drivers/pcie/endpoint/pcie_ep_iproc_msi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,759
```objective-c /* * */ #ifndef ZEPHYR_INCLUDE_DRIVERS_PCIE_EP_IPROC_REGS_H_ #define ZEPHYR_INCLUDE_DRIVERS_PCIE_EP_IPROC_REGS_H_ struct paxb_64 { uint32_t lower; uint32_t upper; }; #ifdef CONFIG_PCIE_EP_IPROC_V2 struct paxb_imap { uint32_t lower; uint32_t upper; uint32_t axim_write_config; uint32_t axim_read_config; }; struct iproc_pcie_reg { uint32_t paxb_clk_control; uint32_t paxb_ep_perst_hold_off; uint32_t paxb_global_control; uint32_t paxb_flush_control; uint32_t paxb_flush_status; uint32_t paxb_strap_status; uint32_t paxb_reset_status; uint32_t paxb_mps_mrrs_stat; uint32_t paxb_cfg_addr; uint32_t paxb_cfg_data; uint32_t paxb_cfg_be; uint32_t paxb_config_ind_addr; uint32_t paxb_config_ind_data; uint32_t paxb_config_ind_cmpl_stat; uint32_t paxb_config_ind_be; uint32_t paxb_config_ecm_addr; uint32_t paxb_config_ecm_data; uint32_t paxb_hide_func_cfg; uint32_t paxb_0_apb_timeout; uint32_t paxb_0_apb_err_en_for_cfg_rd_cmpl; uint32_t paxb_0_apb_err_en_for_cfg_wr_cmpl; uint32_t paxb_0_ur_resp_on_apb_timeout; uint32_t paxb_0_crs_resp_on_flr_in_progress; uint32_t paxb_0_apb_read_data_on_ca; uint32_t paxb_0_apb_read_data_on_crs; uint32_t paxb_0_compare_bus_num_for_pf; uint32_t paxb_0_latch_bus_number_on_cfg_wr; uint32_t paxb_0_issue_slave_error_from_ple_on_ur; uint32_t paxb_0_ep_ple_cfg_register_0; uint32_t paxb_0_ep_ple_cfg_register_1; uint32_t paxb_0_ep_ple_cfg_register_2; uint32_t paxb_0_ep_ple_cfg_register_3; uint32_t paxb_0_ep_ple_cfg_register_4; uint32_t paxb_ordering_cfg; uint32_t paxb_master_cfg; uint32_t paxb_master_axid_seq_num_cfg; uint32_t paxb_pnpn_so_traffic_shaper_count_slow; uint32_t paxb_pnpn_so_traffic_shaper_count_fast; uint32_t paxb_pnpn_so_traffic_shaper_thrshold; uint32_t paxb_bdf_to_func_num_mapping; uint32_t paxb_atomics_cfg; uint32_t paxb_master_gic_its_address; uint32_t paxb_master_id_seq_num_avbl_status; uint32_t paxb_axim_rd_fsm_state; uint32_t paxb_atomics_status; uint32_t paxb_atomics_fail_addr_h_status; uint32_t paxb_atomics_fail_addr_l_status; uint32_t paxb_atomics_fail_status; uint32_t paxb_atomics_fail_pf_vf_num; uint32_t paxb_nullified_tx_pd_status; uint32_t paxb_nullified_tx_pd_addr_h_status; uint32_t paxb_nullified_tx_pd_addr_l_status; uint32_t paxb_ds_non_posted_crdt_default; uint32_t paxb_ds_posted_crdt_default; uint32_t paxb_ds_cmpl_crdt_default; uint32_t paxb_ds_cfg; uint32_t paxb_us_cfg; uint32_t paxb_axim_write_config_func0; uint32_t paxb_axim_read_config_func0; uint32_t paxb_axim_write_config_func1; uint32_t paxb_axim_read_config_func1; uint32_t paxb_axim_write_config_func2; uint32_t paxb_axim_read_config_func2; uint32_t paxb_axim_write_config_func3; uint32_t paxb_axim_read_config_func3; uint32_t paxb_axim_write_config_func4; uint32_t paxb_axim_read_config_func4; uint32_t paxb_axim_write_config_func5; uint32_t paxb_axim_read_config_func5; uint32_t paxb_axim_write_config_func6; uint32_t paxb_axim_read_config_func6; uint32_t paxb_axim_write_config_func7; uint32_t paxb_axim_read_config_func7; uint32_t paxb_axim_write_config_func8; uint32_t paxb_axim_read_config_func8; uint32_t paxb_axim_write_config_func9; uint32_t paxb_axim_read_config_func9; uint32_t paxb_axim_write_config_func10; uint32_t paxb_axim_read_config_func10; uint32_t paxb_axim_write_config_func11; uint32_t paxb_axim_read_config_func11; uint32_t paxb_axim_write_config_func12; uint32_t paxb_axim_read_config_func12; uint32_t paxb_axim_write_config_func13; uint32_t paxb_axim_read_config_func13; uint32_t paxb_axim_write_config_func14; uint32_t paxb_axim_read_config_func14; uint32_t paxb_axim_write_config_func15; uint32_t paxb_axim_read_config_func15; uint32_t paxb_default_imap_lower; uint32_t paxb_default_imap_upper; uint32_t paxb_default_imap_axim_write_config; uint32_t paxb_default_imap_axim_read_config; struct paxb_imap paxb_func0_imap0[8]; struct paxb_imap paxb_func0_imap1[8]; struct paxb_imap paxb_func0_imap2; struct paxb_imap paxb_func0_imap3[8]; struct paxb_imap paxb_func0_imap4[8]; struct paxb_64 paxb_iarr[5]; uint32_t paxb_override_window0_cfg0; uint32_t paxb_override_window0_cfg1; uint32_t paxb_override_window0_write_cfg; uint32_t paxb_override_window0_read_cfg; uint32_t paxb_override_window1_cfg0; uint32_t paxb_override_window1_cfg1; uint32_t paxb_override_window1_write_cfg; uint32_t paxb_override_window1_read_cfg; uint32_t paxb_msi_base_addr_cfg; uint32_t paxb_msi_high_addr_cfg; uint32_t paxb_msi_window_write_cfg; uint32_t paxb_oarr_func0_msi_page; uint32_t paxb_oarr_func0_msi_page_upper; struct paxb_64 paxb_oarr[2]; struct paxb_64 paxb_omap[2]; struct paxb_64 paxb_oarr_2; struct paxb_64 paxb_omap_2; struct paxb_64 paxb_oarr_3; struct paxb_64 paxb_omap_3; struct paxb_64 paxb_oarr_4; uint32_t paxb_omap_4_upper; struct paxb_64 paxb_oarr_5; struct paxb_64 paxb_omap_5; struct paxb_64 paxb_omap_5_0; struct paxb_64 paxb_omap_5_1; struct paxb_64 paxb_omap_5_2; struct paxb_64 paxb_omap_5_3; struct paxb_64 paxb_omap_5_4; struct paxb_64 paxb_omap_5_5; struct paxb_64 paxb_omap_5_6; struct paxb_64 paxb_omap_5_7; uint32_t paxb_rc_pm_control; uint32_t paxb_rc_pm_status; uint32_t paxb_ep_pm_control; uint32_t paxb_ep_pm_status; uint32_t paxb_ep_ltr_control; uint32_t paxb_ep_ltr_status; uint32_t paxb_ep_obff_status; uint32_t paxb_pcie_error_status; uint32_t paxb_pcie_link_status; uint32_t paxb_ecam_cfg_0; uint32_t paxb_ecam_cfg_1; uint32_t paxb_ecam_cfg_rc; uint32_t paxb_ecam_crs_cfg; uint32_t paxb_ecam_cfg_rd_data; uint32_t paxb_ecam_cmpl_stat; uint32_t paxb_ecam_apb_err_cfg; uint32_t paxb_ecam_apb_ur_resp_cfg; uint32_t paxb_mem_pwr_cfg; uint32_t paxb_mem_iso_cfg; uint32_t paxb_mem_pwr_status; uint32_t paxb_free_cid_cfg; uint32_t paxb_free_cid_status; uint32_t paxb_slave_cfg; uint32_t paxb_slave_pf_vf_offset; uint32_t paxb_cmp_err_tx_cplh_status; uint32_t paxb_cmp_err_tx_cplh_addr_h_status; uint32_t paxb_cmp_err_tx_cplh_addr_l_status; uint32_t paxb_axi_slave_debug_status; uint32_t paxb_paxb_intr_status; uint32_t paxb_paxb_intr_en; uint32_t paxb_paxb_intr_clear; uint32_t paxb_rc_intr_clear_en; uint32_t paxb_rc_intr_status; uint32_t paxb_rc_intr_mask; uint32_t paxb_rc_intr_clear; uint32_t paxb_pcie_cfg_intr_status; uint32_t paxb_pcie_cfg_intr_mask; uint32_t paxb_pcie_cfg_intr_clear; uint32_t paxb_master_intr_status; uint32_t paxb_master_intr_mask; uint32_t paxb_master_intr_clear; uint32_t paxb_slave_intr_status; uint32_t paxb_slave_intr_mask; uint32_t paxb_slave_intr_clear; uint32_t paxb_user_if_intr_status; uint32_t paxb_user_if_intr_mask; uint32_t paxb_user_if_intr_clear; uint32_t paxb_master_underflow_status; uint32_t paxb_master_overflow_status; uint32_t paxb_master_fifo_ecc_corr_status; uint32_t paxb_master_fifo_ecc_uncorr_status; uint32_t paxb_slave_underflow_status; uint32_t paxb_slave_overflow_status; uint32_t paxb_slave_ecc_err_corrected_status; uint32_t paxb_slave_ecc_err_uncor_status; uint32_t paxb_userif_underflow_status; uint32_t paxb_userif_overflow_status; uint32_t paxb_userif_ecc_err_corrected_status; uint32_t paxb_userif_ecc_err_uncor_status; uint32_t paxb_msg_status; uint32_t paxb_msg_header[4]; uint32_t paxb_msg_data[8]; uint32_t paxb_msg_control; uint32_t paxb_msg_drop_control; uint32_t paxb_snoop_addr_cfg[4]; }; #else struct iproc_pcie_reg { uint32_t paxb_clk_control; uint32_t paxb_rc_pm_control; uint32_t paxb_rc_pm_status; uint32_t paxb_ep_pm_control; uint32_t paxb_ep_pm_status; uint32_t paxb_ep_ltr_control; uint32_t paxb_ep_ltr_status; uint32_t paxb_reserved_0[1]; uint32_t paxb_ep_obff_status; uint32_t paxb_pcie_error_status; uint32_t paxb_reserved_1[2]; uint32_t paxb_paxb_endianness; uint32_t paxb_apb_timeout_count; uint32_t paxb_paxb_tx_arbiter_priority; uint32_t paxb_reserved_2[1]; uint32_t paxb_paxb_rd_cmpl_buf_init_start; uint32_t paxb_paxb_rd_cmpl_buf_init_done; uint32_t paxb_pcie_ordering_rules_enable; uint32_t paxb_axi_slverr_en_for_mem_rd_cmpl; uint32_t paxb_reserved_3[44]; uint32_t paxb_pcie_rc_axi_config; uint32_t paxb_pcie_ep_axi_config; uint32_t paxb_pcie_paxb_rx_debug_status_0; uint32_t paxb_pcie_paxb_rx_debug_control_0; uint32_t paxb_reserved_4[4]; uint32_t paxb_config_ind_addr; uint32_t paxb_config_ind_data; uint32_t paxb_reserved_5[51]; uint32_t paxb_cfg_be; uint32_t paxb_cfg_addr; uint32_t paxb_cfg_data; uint32_t paxb_pcie_sys_eq_page; uint32_t paxb_pcie_sys_msi_page; uint32_t paxb_reserved_6[2]; uint32_t paxb_pcie_sys_msi_ctrl[6]; uint32_t paxb_reserved_7[10]; uint32_t paxb_pcie_sys_eq_head_0; uint32_t paxb_pcie_sys_eq_tail_0; uint32_t paxb_pcie_sys_eq_head_1; uint32_t paxb_pcie_sys_eq_tail_1; uint32_t paxb_pcie_sys_eq_head_2; uint32_t paxb_pcie_sys_eq_tail_2; uint32_t paxb_pcie_sys_eq_head_3; uint32_t paxb_pcie_sys_eq_tail_3; uint32_t paxb_pcie_sys_eq_head_4; uint32_t paxb_pcie_sys_eq_tail_4; uint32_t paxb_pcie_sys_eq_head_5; uint32_t paxb_pcie_sys_eq_tail_5; uint32_t paxb_pcie_sys_eq_tail_early[6]; uint32_t paxb_reserved_8[2]; uint32_t paxb_pcie_sys_eq_overwritten[6]; uint32_t paxb_reserved_9[2]; uint32_t paxb_pcie_sys_eq_page_upper; uint32_t paxb_pcie_sys_msi_page_upper; uint32_t paxb_reserved_10[26]; uint32_t paxb_pcie_sys_rc_intx_en; uint32_t paxb_pcie_sys_rc_intx_csr; uint32_t paxb_reserved_11[2]; uint32_t paxb_pcie_sys_msi_req; uint32_t paxb_pcie_sys_host_intr_en; uint32_t paxb_pcie_sys_host_intr_csr; uint32_t paxb_reserved_12[1]; uint32_t paxb_pcie_sys_host_intr[4]; uint32_t paxb_pcie_sys_ep_int_en0; uint32_t paxb_pcie_sys_ep_int_en1; uint32_t paxb_reserved_13[2]; uint32_t paxb_pcie_sys_ep_int_csr0; uint32_t paxb_pcie_sys_ep_int_csr1; uint32_t paxb_reserved_14[2]; uint32_t paxb_cmicd_to_pcie_intr_en; uint32_t paxb_reserved_15[543]; uint32_t paxb_func0_imap0[8]; uint32_t paxb_func1_imap0[8]; uint32_t paxb_func0_imap0_upper[8]; uint32_t paxb_func1_imap0_upper[8]; uint32_t paxb_reserved_16[16]; struct paxb_64 paxb_func0_imap2; struct paxb_64 paxb_func1_imap2; uint32_t paxb_func0_imap0_0123_regs_type; uint32_t paxb_reserved_17[11]; struct paxb_64 paxb_iarr[3]; uint32_t paxb_reserved_18[2]; struct paxb_64 paxb_oarr[2]; uint32_t paxb_reserved_19[1]; uint32_t paxb_oarr_func0_msi_page; uint32_t paxb_oarr_func1_msi_page; uint32_t paxb_reserved_20[1]; struct paxb_64 paxb_omap[2]; uint32_t paxb_oarr_func0_msi_page_upper; uint32_t paxb_oarr_func1_msi_page_upper; uint32_t paxb_reserved_21[1]; uint32_t paxb_func1_iarr_2_size; struct paxb_64 paxb_oarr_2; struct paxb_64 paxb_omap_2; struct paxb_64 paxb_func0_imap1[8]; struct paxb_64 paxb_func1_imap1[8]; struct paxb_64 paxb_oarr_3; struct paxb_64 paxb_omap_3; struct paxb_64 paxb_iarr_3; struct paxb_64 paxb_func0_imap3[8]; uint32_t paxb_func0_imap3_axuser[8]; struct paxb_64 paxb_iarr_4; struct paxb_64 paxb_func0_imap4[8]; uint32_t paxb_func0_imap4_axuser[8]; uint32_t paxb_default_imap_lower; uint32_t paxb_default_imap_upper; uint32_t paxb_default_imap_axuser; uint32_t paxb_default_imap_axcache; uint32_t paxb_cfg_tlp_rd_status; uint32_t paxb_reserved_22[7]; uint32_t paxb_mem_control; uint32_t paxb_mem_ecc_err_log_0; uint32_t paxb_mem_ecc_err_log_1; uint32_t paxb_pcie_link_status; uint32_t paxb_strap_status; uint32_t paxb_reset_status; uint32_t paxb_reset_enable_in_pcie_link_down; uint32_t paxb_reserved_23[1]; uint32_t paxb_paxb_tx_debug_cfg; uint32_t paxb_paxb_misc_config; uint32_t paxb_reserved_24[2]; uint32_t paxb_paxb_intr_en; uint32_t paxb_paxb_intr_clear; uint32_t paxb_paxb_intr_status; uint32_t paxb_reserved_25[1]; uint32_t paxb_apb_err_en_for_cfg_rd_cmpl; uint32_t paxb_pcie_replay_addr_buf_ecc_log; uint32_t paxb_pcie_replay_data_buf_ecc_log; uint32_t paxb_pcie_dl_to_tl_buf_ecc_log; uint32_t paxb_pcie_tl_to_dl_buf_ecc_log; uint32_t paxb_reserved_26[3]; uint32_t paxb_func0_imap0_axuser[8]; uint32_t paxb_func1_imap0_axuser[8]; uint32_t paxb_func0_imap1_axuser[8]; uint32_t paxb_func1_imap1_axuser[8]; uint32_t paxb_func0_imap2_axuser; uint32_t paxb_func1_imap2_axuser; }; #endif static inline void pcie_write32(uint32_t data, uint32_t *addr) { sys_write32(data, (mem_addr_t)addr); } static inline uint32_t pcie_read32(uint32_t *addr) { return sys_read32((mem_addr_t)addr); } #endif /* ZEPHYR_INCLUDE_DRIVERS_PCIE_EP_IPROC_REGS_H_ */ ```
/content/code_sandbox/drivers/pcie/endpoint/pcie_ep_iproc_regs.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,472
```c /* * * */ #include <zephyr/drivers/pcie/endpoint/pcie_ep.h> #include <string.h> #include <zephyr/sys/util.h> /* * During DEVICE_TO_HOST data transfer, in order to make sure that all * PCIe writes (posted) have reached Host, i.e. to flush PCIe writes, * we need to add a dummy PCIe read (non posted transaction) after each * DEVICE_TO_HOST data transfer. * * There are a few possible scenarios, where we need to *place* * a dummy PCIe read. * All possible scenarios are captured in the table below. * * As can be seen in the table, for 64-bit systems, we could just do sys_read8 * on mapped Host address to generate a dummy PCIe read, before unmapping the * address - irrespective of low/high outbound memory usage as core is capable * of accessing highmem. * Basically, we issue single byte PCIe read with sys_read8. * * For 32-bit systems, if using low outbound memory for memcpy/DMA, * we could do sys_read8 on the mapped address. * But, for 32-bit systems using high outbound memory for DMA operation, * sys_read8 is not possible, as the core cannot access highmem. * In this case, we need to *explicitly* perform PCIe read. * * +-------------+----------------------+-------------------------------------+ * | Core | Data transfer with | OB memory type | Dummy PCIe read | * +-------------+----------------------+----------------+--------------------+ * | 64-bit | | highmem | sys_read8 | * | | memcpy |----------------+--------------------+ * | | | lowmem | sys_read8 | * | (e.g. +----------------------+----------------+--------------------+ * | Cortex-A72) | | highmem | sys_read8 | * | | DMA |----------------+--------------------+ * | | | lowmem | sys_read8 | * +-------------+----------------------+----------------+--------------------+ * | 32-bit | | highmem | NA | * | | memcpy |----------------+--------------------+ * | | | lowmem | sys_read8 | * | (e.g. +----------------------+----------------+--------------------+ * | Cortex-M7) | | highmem | Explicit PCIe read | * | | DMA |----------------+--------------------+ * | | | lowmem | sys_read8 | * +-------------+----------------------+----------------+--------------------+ * * Based on this explanation, the 2 common APIs below, namely * pcie_ep_xfer_data_memcpy and pcie_ep_xfer_data_dma * are implemented with dummy PCIe read, phew! */ static int pcie_ep_mapped_copy(uint64_t mapped_addr, uintptr_t local_addr, const uint32_t size, const enum xfer_direction dir) { /* * Make sure that address can be generated by core, this condition * would not hit if proper pcie_ob_mem_type is passed by core */ if ((!IS_ENABLED(CONFIG_64BIT)) && (mapped_addr >> 32)) { return -EINVAL; } if (dir == DEVICE_TO_HOST) { memcpy(UINT_TO_POINTER(mapped_addr), UINT_TO_POINTER(local_addr), size); } else { memcpy(UINT_TO_POINTER(local_addr), UINT_TO_POINTER(mapped_addr), size); } return 0; } /* * Helper API to achieve data transfer with memcpy operation * through PCIe outbound memory */ int pcie_ep_xfer_data_memcpy(const struct device *dev, uint64_t pcie_addr, uintptr_t *local_addr, uint32_t size, enum pcie_ob_mem_type ob_mem_type, enum xfer_direction dir) { uint64_t mapped_addr; int mapped_size, ret; uint32_t xfer_size, unmapped_size; /* Map pcie_addr to outbound memory */ mapped_size = pcie_ep_map_addr(dev, pcie_addr, &mapped_addr, size, ob_mem_type); /* Check if outbound memory mapping succeeded */ if (mapped_size < 0) { return mapped_size; } ret = pcie_ep_mapped_copy(mapped_addr, (uintptr_t)local_addr, mapped_size, dir); /* Check if mapped_copy succeeded */ if (ret < 0) { goto out_unmap; } /* Flush the PCIe writes upon successful memcpy */ if (dir == DEVICE_TO_HOST) { sys_read8(mapped_addr); } /* Check if we achieved data transfer for given size */ if (mapped_size == size) { ret = 0; goto out_unmap; } /* * In normal case, we are done with data transfer by now, * but some PCIe address translation hardware requires us to * align Host address to be mapped to the translation window size. * So, even though translation window size is good enough for * size of Host buffer, we may not be able to map entire Host buffer * to given outbound window in one time, and we may need to map * remaining size and complete remaining data transfer */ pcie_ep_unmap_addr(dev, mapped_addr); /* unmap previous Host buffer */ xfer_size = mapped_size; /* save already transferred data size */ unmapped_size = size - mapped_size; mapped_size = pcie_ep_map_addr(dev, pcie_addr + xfer_size, &mapped_addr, unmapped_size, ob_mem_type); /* Check if outbound memory mapping succeeded */ if (mapped_size < 0) { return mapped_size; } /* * In second attempt, we must have mapped entire size, * if not just quit here before attempting memcpy */ if (mapped_size != unmapped_size) { ret = -EIO; goto out_unmap; } ret = pcie_ep_mapped_copy(mapped_addr, ((uintptr_t)local_addr) + xfer_size, mapped_size, dir); /* Flush the PCIe writes upon successful memcpy */ if (!ret && (dir == DEVICE_TO_HOST)) { sys_read8(mapped_addr); } out_unmap: pcie_ep_unmap_addr(dev, mapped_addr); return ret; } /* * Helper API to achieve data transfer with DMA operation through * PCIe outbound memory, this API is based off pcie_ep_xfer_data_memcpy, * here we use "system dma" instead of memcpy */ int pcie_ep_xfer_data_dma(const struct device *dev, uint64_t pcie_addr, uintptr_t *local_addr, uint32_t size, enum pcie_ob_mem_type ob_mem_type, enum xfer_direction dir) { uint64_t mapped_addr; int mapped_size, ret; uint32_t xfer_size, unmapped_size; uint32_t dummy_data; /* For explicit dummy PCIe read */ /* Map pcie_addr to outbound memory */ mapped_size = pcie_ep_map_addr(dev, pcie_addr, &mapped_addr, size, ob_mem_type); /* Check if outbound memory mapping succeeded */ if (mapped_size < 0) { return mapped_size; } ret = pcie_ep_dma_xfer(dev, mapped_addr, (uintptr_t)local_addr, mapped_size, dir); /* Check if dma succeeded */ if (ret < 0) { goto out_unmap; } /* Flush the PCIe writes upon successful DMA */ if (dir == DEVICE_TO_HOST) { if (IS_ENABLED(CONFIG_64BIT) || !(mapped_addr >> 32)) { sys_read8(mapped_addr); } } pcie_ep_unmap_addr(dev, mapped_addr); /* * Explicit PCIe read to flush PCIe writes for 32-bit system * using high outbound memory for DMA operation */ if (dir == DEVICE_TO_HOST && (!IS_ENABLED(CONFIG_64BIT)) && (mapped_addr >> 32)) { ret = pcie_ep_xfer_data_memcpy(dev, pcie_addr, (uintptr_t *)&dummy_data, sizeof(dummy_data), PCIE_OB_LOWMEM, HOST_TO_DEVICE); if (ret < 0) { return ret; } } /* Check if we achieved data transfer for given size */ if (mapped_size == size) { return 0; } /* map remaining size and complete remaining data transfer */ xfer_size = mapped_size; /* save already transferred data size */ unmapped_size = size - mapped_size; mapped_size = pcie_ep_map_addr(dev, pcie_addr + xfer_size, &mapped_addr, unmapped_size, ob_mem_type); /* Check if outbound memory mapping succeeded */ if (mapped_size < 0) { return mapped_size; } /* * In second attempt, we must have mapped entire size, * if not just quit here before attempting dma */ if (mapped_size != unmapped_size) { ret = -EIO; goto out_unmap; } ret = pcie_ep_dma_xfer(dev, mapped_addr, ((uintptr_t)local_addr) + xfer_size, mapped_size, dir); /* Check if dma copy succeeded */ if (ret < 0) { goto out_unmap; } /* Flush the PCIe writes upon successful DMA */ if (dir == DEVICE_TO_HOST) { if (IS_ENABLED(CONFIG_64BIT) || !(mapped_addr >> 32)) { sys_read8(mapped_addr); } } pcie_ep_unmap_addr(dev, mapped_addr); /* * Explicit PCIe read to flush PCIe writes for 32-bit system * using high outbound memory for DMA operation */ if (dir == DEVICE_TO_HOST && (!IS_ENABLED(CONFIG_64BIT)) && (mapped_addr >> 32)) { ret = pcie_ep_xfer_data_memcpy(dev, pcie_addr, (uintptr_t *)&dummy_data, sizeof(dummy_data), PCIE_OB_LOWMEM, HOST_TO_DEVICE); } return ret; out_unmap: pcie_ep_unmap_addr(dev, mapped_addr); return ret; } ```
/content/code_sandbox/drivers/pcie/endpoint/pcie_ep_common.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,186
```unknown # PCIe Endpoint configuration options menuconfig PCIE_ENDPOINT bool "PCIe Endpoint support" help This option enables PCIe Endpoint support. if PCIE_ENDPOINT module = PCIE_EP module-str = PCIE_EP source "subsys/logging/Kconfig.template.log_config" comment "PCIe Endpoint Drivers" source "drivers/pcie/endpoint/Kconfig.iproc" endif # PCIE_ENDPOINT ```
/content/code_sandbox/drivers/pcie/endpoint/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
85
```c /* * */ #include <stdlib.h> #include <zephyr/shell/shell.h> #include <zephyr/drivers/pcie/pcie.h> #ifdef CONFIG_PCIE_MSI #include <zephyr/drivers/pcie/msi.h> #endif #include <zephyr/drivers/pcie/cap.h> #include <zephyr/drivers/pcie/vc.h> #include "vc.h" struct pcie_cap_id_to_str { uint32_t id; char *str; }; static struct pcie_cap_id_to_str pcie_cap_list[] = { { PCI_CAP_ID_PM, "Power Management" }, { PCI_CAP_ID_AGP, "Accelerated Graphics Port" }, { PCI_CAP_ID_VPD, "Vital Product Data" }, { PCI_CAP_ID_SLOTID, "Slot Identification" }, { PCI_CAP_ID_MSI, "Message Signalled Interrupts" }, { PCI_CAP_ID_CHSWP, "CompactPCI HotSwap" }, { PCI_CAP_ID_PCIX, "PCI-X" }, { PCI_CAP_ID_HT, "HyperTransport" }, { PCI_CAP_ID_VNDR, "Vendor-Specific" }, { PCI_CAP_ID_DBG, "Debug port" }, { PCI_CAP_ID_CCRC, "CompactPCI Central Resource Control" }, { PCI_CAP_ID_SHPC, "PCI Standard Hot-Plug Controller" }, { PCI_CAP_ID_SSVID, "Bridge subsystem vendor/device ID" }, { PCI_CAP_ID_AGP3, "AGP 8x" }, { PCI_CAP_ID_SECDEV, "Secure Device" }, { PCI_CAP_ID_EXP, "PCI Express" }, { PCI_CAP_ID_MSIX, "MSI-X" }, { PCI_CAP_ID_SATA, "Serial ATA Data/Index Configuration" }, { PCI_CAP_ID_AF, "PCI Advanced Features" }, { PCI_CAP_ID_EA, "PCI Enhanced Allocation" }, { PCI_CAP_ID_FPB, "Flattening Portal Bridge" }, { PCI_CAP_ID_NULL, NULL }, }; static struct pcie_cap_id_to_str pcie_ext_cap_list[] = { { PCIE_EXT_CAP_ID_ERR, "Advanced Error Reporting" }, { PCIE_EXT_CAP_ID_VC, "Virtual Channel when no MFVC" }, { PCIE_EXT_CAP_ID_DSN, "Device Serial Number" }, { PCIE_EXT_CAP_ID_PWR, "Power Budgeting" }, { PCIE_EXT_CAP_ID_RCLD, "Root Complex Link Declaration" }, { PCIE_EXT_CAP_ID_RCILC, "Root Complex Internal Link Control" }, { PCIE_EXT_CAP_ID_RCEC, "Root Complex Event Collector Endpoint Association" }, { PCIE_EXT_CAP_ID_MFVC, "Multi-Function VC Capability" }, { PCIE_EXT_CAP_ID_MFVC_VC, "Virtual Channel used with MFVC" }, { PCIE_EXT_CAP_ID_RCRB, "Root Complex Register Block" }, { PCIE_EXT_CAP_ID_VNDR, "Vendor-Specific Extended Capability" }, { PCIE_EXT_CAP_ID_CAC, "Config Access Correlation - obsolete" }, { PCIE_EXT_CAP_ID_ACS, "Access Control Services" }, { PCIE_EXT_CAP_ID_ARI, "Alternate Routing-ID Interpretation" }, { PCIE_EXT_CAP_ID_ATS, "Address Translation Services" }, { PCIE_EXT_CAP_ID_SRIOV, "Single Root I/O Virtualization" }, { PCIE_EXT_CAP_ID_MRIOV, "Multi Root I/O Virtualization" }, { PCIE_EXT_CAP_ID_MCAST, "Multicast" }, { PCIE_EXT_CAP_ID_PRI, "Page Request Interface" }, { PCIE_EXT_CAP_ID_AMD_XXX, "Reserved for AMD" }, { PCIE_EXT_CAP_ID_REBAR, "Resizable BAR" }, { PCIE_EXT_CAP_ID_DPA, "Dynamic Power Allocation" }, { PCIE_EXT_CAP_ID_TPH, "TPH Requester" }, { PCIE_EXT_CAP_ID_LTR, "Latency Tolerance Reporting" }, { PCIE_EXT_CAP_ID_SECPCI, "Secondary PCIe Capability" }, { PCIE_EXT_CAP_ID_PMUX, "Protocol Multiplexing" }, { PCIE_EXT_CAP_ID_PASID, "Process Address Space ID" }, { PCIE_EXT_CAP_ID_DPC, "Downstream Port Containment" }, { PCIE_EXT_CAP_ID_L1SS, "L1 PM Substates" }, { PCIE_EXT_CAP_ID_PTM, "Precision Time Measurement" }, { PCIE_EXT_CAP_ID_DVSEC, "Designated Vendor-Specific Extended Capability" }, { PCIE_EXT_CAP_ID_DLF, "Data Link Feature" }, { PCIE_EXT_CAP_ID_PL_16GT, "Physical Layer 16.0 GT/s" }, { PCIE_EXT_CAP_ID_LMR, "Lane Margining at the Receiver" }, { PCIE_EXT_CAP_ID_HID, "Hierarchy ID" }, { PCIE_EXT_CAP_ID_NPEM, "Native PCIe Enclosure Management" }, { PCIE_EXT_CAP_ID_PL_32GT, "Physical Layer 32.0 GT/s" }, { PCIE_EXT_CAP_ID_AP, "Alternate Protocol" }, { PCIE_EXT_CAP_ID_SFI, "System Firmware Intermediary" }, { PCIE_EXT_CAP_ID_NULL, NULL }, }; static void show_msi(const struct shell *sh, pcie_bdf_t bdf) { #ifdef CONFIG_PCIE_MSI uint32_t msi; uint32_t data; msi = pcie_get_cap(bdf, PCI_CAP_ID_MSI); if (msi) { data = pcie_conf_read(bdf, msi + PCIE_MSI_MCR); shell_fprintf(sh, SHELL_NORMAL, " MSI support%s%s\n", (data & PCIE_MSI_MCR_64) ? ", 64-bit" : "", (data & PCIE_MSI_MCR_EN) ? ", enabled" : ", disabled"); } msi = pcie_get_cap(bdf, PCI_CAP_ID_MSIX); if (msi) { uint32_t offset, table_size; uint8_t bir; data = pcie_conf_read(bdf, msi + PCIE_MSIX_MCR); table_size = ((data & PCIE_MSIX_MCR_TSIZE) >> PCIE_MSIX_MCR_TSIZE_SHIFT) + 1; shell_fprintf(sh, SHELL_NORMAL, " MSI-X support%s table size %d\n", (data & PCIE_MSIX_MCR_EN) ? ", enabled" : ", disabled", table_size); offset = pcie_conf_read(bdf, msi + PCIE_MSIX_TR); bir = offset & PCIE_MSIX_TR_BIR; offset &= PCIE_MSIX_TR_OFFSET; shell_fprintf(sh, SHELL_NORMAL, "\tTable offset 0x%x BAR %d\n", offset, bir); offset = pcie_conf_read(bdf, msi + PCIE_MSIX_PBA); bir = offset & PCIE_MSIX_PBA_BIR; offset &= PCIE_MSIX_PBA_OFFSET; shell_fprintf(sh, SHELL_NORMAL, "\tPBA offset 0x%x BAR %d\n", offset, bir); } #endif } static void show_bars(const struct shell *sh, pcie_bdf_t bdf) { uint32_t data; int bar; for (bar = PCIE_CONF_BAR0; bar <= PCIE_CONF_BAR5; ++bar) { data = pcie_conf_read(bdf, bar); if (data == PCIE_CONF_BAR_NONE) { continue; } shell_fprintf(sh, SHELL_NORMAL, " bar %d: %s%s", bar - PCIE_CONF_BAR0, PCIE_CONF_BAR_IO(data) ? "I/O" : "MEM", PCIE_CONF_BAR_64(data) ? ", 64-bit" : ""); shell_fprintf(sh, SHELL_NORMAL, " addr 0x"); if (PCIE_CONF_BAR_64(data)) { ++bar; shell_fprintf(sh, SHELL_NORMAL, "%08x", pcie_conf_read(bdf, bar)); } shell_fprintf(sh, SHELL_NORMAL, "%08x\n", (uint32_t)PCIE_CONF_BAR_ADDR(data)); } } static void show_capabilities(const struct shell *sh, pcie_bdf_t bdf) { struct pcie_cap_id_to_str *cap_id2str; uint32_t base; shell_fprintf(sh, SHELL_NORMAL, " PCI capabilities:\n"); cap_id2str = pcie_cap_list; while (cap_id2str->str != NULL) { base = pcie_get_cap(bdf, cap_id2str->id); if (base != 0) { shell_fprintf(sh, SHELL_NORMAL, " %s\n", cap_id2str->str); } cap_id2str++; } shell_fprintf(sh, SHELL_NORMAL, " PCIe capabilities:\n"); cap_id2str = pcie_ext_cap_list; while (cap_id2str->str != NULL) { base = pcie_get_ext_cap(bdf, cap_id2str->id); if (base != 0) { shell_fprintf(sh, SHELL_NORMAL, " %s\n", cap_id2str->str); } cap_id2str++; } } static void show_vc(const struct shell *sh, pcie_bdf_t bdf) { uint32_t base; struct pcie_vc_regs regs; struct pcie_vc_resource_regs res_regs[PCIE_VC_MAX_COUNT]; int idx; base = pcie_vc_cap_lookup(bdf, &regs); if (base == 0) { return; } shell_fprintf(sh, SHELL_NORMAL, " VC exposed : VC/LPVC count: %u/%u, " "PAT entry size 0x%x, VCA cap 0x%x, " "VCA table Offset 0x%x\n", regs.cap_reg_1.vc_count + 1, regs.cap_reg_1.lpvc_count, regs.cap_reg_1.pat_entry_size, regs.cap_reg_2.vca_cap, regs.cap_reg_2.vca_table_offset); pcie_vc_load_resources_regs(bdf, base, res_regs, regs.cap_reg_1.vc_count + 1); for (idx = 0; idx < regs.cap_reg_1.vc_count + 1; idx++) { shell_fprintf(sh, SHELL_NORMAL, " VC %d - PA Cap 0x%x, RST %u," "Max TS %u PAT offset 0x%x\n", idx, res_regs[idx].cap_reg.pa_cap, res_regs[idx].cap_reg.rst, res_regs[idx].cap_reg.max_time_slots, res_regs[idx].cap_reg.pa_table_offset); } } static void pcie_dump(const struct shell *sh, pcie_bdf_t bdf) { for (int i = 0; i < 16; i++) { uint32_t val = pcie_conf_read(bdf, i); for (int j = 0; j < 4; j++) { shell_fprintf(sh, SHELL_NORMAL, "%02x ", (uint8_t)val); val >>= 8; } if (((i + 1) % 4) == 0) { shell_fprintf(sh, SHELL_NORMAL, "\n"); } } } static pcie_bdf_t get_bdf(char *str) { int bus, dev, func; char *tok, *state; tok = strtok_r(str, ":", &state); if (tok == NULL) { return PCIE_BDF_NONE; } bus = strtoul(tok, NULL, 16); tok = strtok_r(NULL, ".", &state); if (tok == NULL) { return PCIE_BDF_NONE; } dev = strtoul(tok, NULL, 16); tok = strtok_r(NULL, ".", &state); if (tok == NULL) { return PCIE_BDF_NONE; } func = strtoul(tok, NULL, 16); return PCIE_BDF(bus, dev, func); } static void show(const struct shell *sh, pcie_bdf_t bdf, bool details, bool dump) { uint32_t data; unsigned int irq; data = pcie_conf_read(bdf, PCIE_CONF_ID); if (!PCIE_ID_IS_VALID(data)) { return; } shell_fprintf(sh, SHELL_NORMAL, "%d:%x.%d ID %x:%x ", PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf), PCIE_ID_TO_VEND(data), PCIE_ID_TO_DEV(data)); data = pcie_conf_read(bdf, PCIE_CONF_CLASSREV); shell_fprintf(sh, SHELL_NORMAL, "class %x subclass %x prog i/f %x rev %x", PCIE_CONF_CLASSREV_CLASS(data), PCIE_CONF_CLASSREV_SUBCLASS(data), PCIE_CONF_CLASSREV_PROGIF(data), PCIE_CONF_CLASSREV_REV(data)); data = pcie_conf_read(bdf, PCIE_CONF_TYPE); if (PCIE_CONF_TYPE_BRIDGE(data)) { shell_fprintf(sh, SHELL_NORMAL, " [bridge]\n"); } else { shell_fprintf(sh, SHELL_NORMAL, "\n"); show_bars(sh, bdf); show_msi(sh, bdf); irq = pcie_get_irq(bdf); if (irq != PCIE_CONF_INTR_IRQ_NONE) { shell_fprintf(sh, SHELL_NORMAL, " wired interrupt on IRQ %d\n", irq); } } if (details) { show_capabilities(sh, bdf); show_vc(sh, bdf); } if (dump) { pcie_dump(sh, bdf); } } struct scan_cb_data { const struct shell *sh; bool dump; }; static bool scan_cb(pcie_bdf_t bdf, pcie_id_t id, void *cb_data) { struct scan_cb_data *data = cb_data; show(data->sh, bdf, false, data->dump); return true; } static int cmd_pcie_ls(const struct shell *sh, size_t argc, char **argv) { pcie_bdf_t bdf = PCIE_BDF_NONE; struct scan_cb_data data = { .sh = sh, .dump = false, }; struct pcie_scan_opt scan_opt = { .cb = scan_cb, .cb_data = &data, .flags = (PCIE_SCAN_RECURSIVE | PCIE_SCAN_CB_ALL), }; for (int i = 1; i < argc; i++) { /* Check dump argument */ if (strncmp(argv[i], "dump", 4) == 0) { data.dump = true; continue; } /* Check BDF string of PCI device */ if (bdf == PCIE_BDF_NONE) { bdf = get_bdf(argv[i]); } if (bdf == PCIE_BDF_NONE) { shell_error(sh, "Unknown parameter: %s", argv[i]); return -EINVAL; } } /* Show only specified device */ if (bdf != PCIE_BDF_NONE) { show(sh, bdf, true, data.dump); return 0; } pcie_scan(&scan_opt); return 0; } SHELL_STATIC_SUBCMD_SET_CREATE(sub_pcie_cmds, SHELL_CMD_ARG(ls, NULL, "List PCIE devices\n" "Usage: ls [bus:device:function] [dump]", cmd_pcie_ls, 1, 2), SHELL_SUBCMD_SET_END /* Array terminated. */ ); SHELL_CMD_REGISTER(pcie, &sub_pcie_cmds, "PCI(e) device information", cmd_pcie_ls); ```
/content/code_sandbox/drivers/pcie/host/shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,573
```c /* * */ #include <zephyr/drivers/dma.h> #include <zephyr/drivers/pcie/endpoint/pcie_ep.h> #define LOG_LEVEL CONFIG_PCIE_EP_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(iproc_pcie); #include "pcie_ep_iproc.h" #define DT_DRV_COMPAT brcm_iproc_pcie_ep static int iproc_pcie_conf_read(const struct device *dev, uint32_t offset, uint32_t *data) { const struct iproc_pcie_ep_config *cfg = dev->config; /* Write offset to Configuration Indirect Address register */ pcie_write32(offset, &cfg->base->paxb_config_ind_addr); /* Read data from Configuration Indirect Data register */ *data = pcie_read32(&cfg->base->paxb_config_ind_data); return 0; } static void iproc_pcie_conf_write(const struct device *dev, uint32_t offset, uint32_t data) { const struct iproc_pcie_ep_config *cfg = dev->config; /* Write offset to Configuration Indirect Address register */ pcie_write32(offset, &cfg->base->paxb_config_ind_addr); /* Write data to Configuration Indirect Data register */ pcie_write32(data, &cfg->base->paxb_config_ind_data); } static int iproc_pcie_map_addr(const struct device *dev, uint64_t pcie_addr, uint64_t *mapped_addr, uint32_t size, enum pcie_ob_mem_type ob_mem_type) { const struct iproc_pcie_ep_config *cfg = dev->config; struct iproc_pcie_ep_ctx *ctx = dev->data; uint64_t pcie_ob_base, pcie_ob_size, pcie_addr_start, offset; uint32_t mapped_size; enum pcie_outbound_map idx; k_spinlock_key_t key; int ret; key = k_spin_lock(&ctx->ob_map_lock); /* We support 2 outbound windows, * one in highmem region and another in lowmem region */ if ((ob_mem_type == PCIE_OB_HIGHMEM || ob_mem_type == PCIE_OB_ANYMEM) && !ctx->highmem_in_use) { idx = PCIE_MAP_HIGHMEM_IDX; pcie_ob_base = cfg->map_high_base; pcie_ob_size = cfg->map_high_size; } else if ((ob_mem_type == PCIE_OB_LOWMEM || ob_mem_type == PCIE_OB_ANYMEM) && !ctx->lowmem_in_use) { idx = PCIE_MAP_LOWMEM_IDX; pcie_ob_base = cfg->map_low_base; pcie_ob_size = cfg->map_low_size; } else { ret = -EBUSY; goto out; } /* check if the selected OB window supports size we want to map */ if (size > pcie_ob_size) { ret = -ENOTSUP; goto out; } /* Host PCIe address should be aligned to outbound window size */ pcie_addr_start = pcie_addr & ~(pcie_ob_size - 1); /* Program OARR with PCIe outbound address */ pcie_write32(((pcie_ob_base & ~(pcie_ob_size - 1)) | PAXB_OARR_VALID), &cfg->base->paxb_oarr[idx].lower); pcie_write32(pcie_ob_base >> 32, &cfg->base->paxb_oarr[idx].upper); /* Program OMAP with Host PCIe address */ pcie_write32((uint32_t)pcie_addr_start, &cfg->base->paxb_omap[idx].lower); pcie_write32((uint32_t)(pcie_addr_start >> 32), &cfg->base->paxb_omap[idx].upper); /* Mark usage of outbound window */ if (idx == PCIE_MAP_HIGHMEM_IDX) { ctx->highmem_in_use = true; } else { ctx->lowmem_in_use = true; } /* offset holds extra size mapped due to alignment requirement */ offset = pcie_addr - pcie_addr_start; *mapped_addr = pcie_ob_base + offset; mapped_size = pcie_ob_size - offset; ret = ((mapped_size >= size) ? size : mapped_size); out: k_spin_unlock(&ctx->ob_map_lock, key); return ret; } static void iproc_pcie_unmap_addr(const struct device *dev, uint64_t mapped_addr) { struct iproc_pcie_ep_ctx *ctx = dev->data; k_spinlock_key_t key; key = k_spin_lock(&ctx->ob_map_lock); if (mapped_addr >> 32) { ctx->highmem_in_use = false; } else { ctx->lowmem_in_use = false; } k_spin_unlock(&ctx->ob_map_lock, key); } static int iproc_pcie_raise_irq(const struct device *dev, enum pci_ep_irq_type irq_type, uint32_t irq_num) { struct iproc_pcie_ep_ctx *ctx = dev->data; k_spinlock_key_t key; int ret; key = k_spin_lock(&ctx->raise_irq_lock); switch (irq_type) { case PCIE_EP_IRQ_MSI: ret = iproc_pcie_generate_msi(dev, irq_num); break; case PCIE_EP_IRQ_MSIX: ret = iproc_pcie_generate_msix(dev, irq_num); break; case PCIE_EP_IRQ_LEGACY: ret = -ENOTSUP; break; default: LOG_ERR("Unknown IRQ type\n"); ret = -EINVAL; } k_spin_unlock(&ctx->raise_irq_lock, key); return ret; } static int iproc_pcie_register_reset_cb(const struct device *dev, enum pcie_reset reset, pcie_ep_reset_callback_t cb, void *arg) { struct iproc_pcie_ep_ctx *ctx = dev->data; if (reset < PCIE_PERST || reset >= PCIE_RESET_MAX) { return -EINVAL; } LOG_DBG("Registering the callback for reset %d", reset); ctx->reset_cb[reset] = cb; ctx->reset_data[reset] = arg; return 0; } #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(dmas) static int iproc_pcie_pl330_dma_xfer(const struct device *dev, uint64_t mapped_addr, uintptr_t local_addr, uint32_t size, const enum xfer_direction dir) { const struct iproc_pcie_ep_config *cfg = dev->config; struct dma_config dma_cfg = { 0 }; struct dma_block_config dma_block_cfg = { 0 }; uint32_t chan_id; int ret = -EINVAL; if (!device_is_ready(cfg->pl330_dev)) { LOG_ERR("DMA controller is not ready\n"); ret = -ENODEV; goto out; } /* configure DMA */ dma_cfg.channel_direction = MEMORY_TO_MEMORY; dma_cfg.block_count = 1U; dma_cfg.head_block = &dma_block_cfg; dma_block_cfg.block_size = size; if (dir == DEVICE_TO_HOST) { dma_block_cfg.source_address = local_addr; dma_block_cfg.dest_address = mapped_addr; chan_id = cfg->pl330_tx_chan_id; } else { dma_block_cfg.source_address = mapped_addr; dma_block_cfg.dest_address = local_addr; chan_id = cfg->pl330_rx_chan_id; } ret = dma_config(cfg->pl330_dev, chan_id, &dma_cfg); if (ret) { LOG_ERR("DMA config failed\n"); goto out; } /* start DMA */ ret = dma_start(cfg->pl330_dev, chan_id); if (ret) { LOG_ERR("DMA transfer failed\n"); } out: return ret; } #endif #if DT_INST_IRQ_HAS_NAME(0, perst) static void iproc_pcie_perst(const struct device *dev) { struct iproc_pcie_ep_ctx *ctx = dev->data; void *reset_data; uint32_t data; data = sys_read32(CRMU_MCU_EXTRA_EVENT_STATUS); if (data & PCIE0_PERST_INTR) { LOG_DBG("PERST interrupt [0x%x]", data); sys_write32(PCIE0_PERST_INTR, CRMU_MCU_EXTRA_EVENT_CLEAR); if (ctx->reset_cb[PCIE_PERST] != NULL) { reset_data = ctx->reset_data[PCIE_PERST]; ctx->reset_cb[PCIE_PERST](reset_data); } } } #endif #if DT_INST_IRQ_HAS_NAME(0, perst_inband) static void iproc_pcie_hot_reset(const struct device *dev) { struct iproc_pcie_ep_ctx *ctx = dev->data; void *reset_data; uint32_t data; data = sys_read32(CRMU_MCU_EXTRA_EVENT_STATUS); if (data & PCIE0_PERST_INB_INTR) { LOG_DBG("INBAND PERST interrupt [0x%x]", data); sys_write32(PCIE0_PERST_INB_INTR, CRMU_MCU_EXTRA_EVENT_CLEAR); if (ctx->reset_cb[PCIE_PERST_INB] != NULL) { reset_data = ctx->reset_data[PCIE_PERST_INB]; ctx->reset_cb[PCIE_PERST_INB](reset_data); } } } #endif #if DT_INST_IRQ_HAS_NAME(0, flr) static void iproc_pcie_flr(const struct device *dev) { const struct iproc_pcie_ep_config *cfg = dev->config; struct iproc_pcie_ep_ctx *ctx = dev->data; void *reset_data; uint32_t data; data = pcie_read32(&cfg->base->paxb_paxb_intr_status); if (data & PCIE0_FLR_INTR) { LOG_DBG("FLR interrupt[0x%x]", data); pcie_write32(PCIE0_FLR_INTR, &cfg->base->paxb_paxb_intr_clear); if (ctx->reset_cb[PCIE_FLR] != NULL) { reset_data = ctx->reset_data[PCIE_FLR]; ctx->reset_cb[PCIE_FLR](reset_data); } } else { /* * Other interrupts like PAXB ECC Error interrupt * could show up at the beginning which are harmless. * So simply clearing those interrupts here */ LOG_DBG("PAXB interrupt[0x%x]", data); pcie_write32(data, &cfg->base->paxb_paxb_intr_clear); } /* Clear FLR in Progress bit */ iproc_pcie_conf_read(dev, PCIE_DEV_CTRL_OFFSET, &data); data |= FLR_IN_PROGRESS; iproc_pcie_conf_write(dev, PCIE_DEV_CTRL_OFFSET, data); } #endif static void iproc_pcie_reset_config(const struct device *dev) { __unused uint32_t data; __unused const struct iproc_pcie_ep_config *cfg = dev->config; #if DT_INST_IRQ_HAS_NAME(0, perst) /* Clear any possible prior pending PERST interrupt */ sys_write32(PCIE0_PERST_INTR, CRMU_MCU_EXTRA_EVENT_CLEAR); /* Enable PERST interrupt */ data = sys_read32(PCIE_PERSTB_INTR_CTL_STS); data |= PCIE0_PERST_FE_INTR; sys_write32(data, PCIE_PERSTB_INTR_CTL_STS); data = sys_read32(CRMU_MCU_EXTRA_EVENT_MASK); data &= ~PCIE0_PERST_INTR; sys_write32(data, CRMU_MCU_EXTRA_EVENT_MASK); IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, perst, irq), DT_INST_IRQ_BY_NAME(0, perst, priority), iproc_pcie_perst, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, perst, irq)); #endif #if DT_INST_IRQ_HAS_NAME(0, perst_inband) /* Clear any possible prior pending Inband PERST interrupt */ sys_write32(PCIE0_PERST_INB_INTR, CRMU_MCU_EXTRA_EVENT_CLEAR); /* Enable Inband PERST interrupt */ data = sys_read32(PCIE_PERSTB_INTR_CTL_STS); data |= PCIE0_PERST_INB_FE_INTR; sys_write32(data, PCIE_PERSTB_INTR_CTL_STS); data = sys_read32(CRMU_MCU_EXTRA_EVENT_MASK); data &= ~PCIE0_PERST_INB_INTR; sys_write32(data, CRMU_MCU_EXTRA_EVENT_MASK); IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, perst_inband, irq), DT_INST_IRQ_BY_NAME(0, perst_inband, priority), iproc_pcie_hot_reset, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, perst_inband, irq)); #endif #if DT_INST_IRQ_HAS_NAME(0, flr) /* Clear any possible prior pending FLR */ pcie_write32(PCIE0_FLR_INTR, &cfg->base->paxb_paxb_intr_clear); /* Set auto clear FLR and auto clear CRS post FLR */ iproc_pcie_conf_read(dev, PCIE_TL_CTRL0_OFFSET, &data); data |= (AUTO_CLR_CRS_POST_FLR | AUTO_CLR_FLR_AFTER_DELAY); iproc_pcie_conf_write(dev, PCIE_TL_CTRL0_OFFSET, data); /* Enable Function Level Reset */ data = pcie_read32(&cfg->base->paxb_paxb_intr_en); data |= PCIE0_FLR_INTR; pcie_write32(data, &cfg->base->paxb_paxb_intr_en); IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, flr, irq), DT_INST_IRQ_BY_NAME(0, flr, priority), iproc_pcie_flr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, flr, irq)); #endif } #ifdef CONFIG_PCIE_EP_IPROC_V2 static void iproc_pcie_msix_pvm_config(const struct device *dev) { __unused const struct iproc_pcie_ep_config *cfg = dev->config; __unused struct iproc_pcie_reg *base = cfg->base; __unused uint32_t data; /* configure snoop irq 1 for monitoring MSIX_CAP register */ #if DT_INST_IRQ_HAS_NAME(0, snoop_irq1) data = pcie_read32(&cfg->base->paxb_snoop_addr_cfg[1]); data &= ~SNOOP_ADDR1_MASK; data |= (SNOOP_ADDR1 | SNOOP_ADDR1_EN); pcie_write32(data, &cfg->base->paxb_snoop_addr_cfg[1]); data = pcie_read32(&base->paxb_pcie_cfg_intr_mask); data &= ~SNOOP_VALID_INTR; pcie_write32(data, &base->paxb_pcie_cfg_intr_mask); IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, snoop_irq1, irq), DT_INST_IRQ_BY_NAME(0, snoop_irq1, priority), iproc_pcie_func_mask_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, snoop_irq1, irq)); LOG_DBG("snoop interrupt configured\n"); #endif /* configure pmon lite interrupt for monitoring MSIX table */ #if DT_INST_IRQ_HAS_NAME(0, pcie_pmon_lite) data = sys_read32(PMON_LITE_PCIE_AXI_FILTER_0_CONTROL); data |= AXI_FILTER_0_ENABLE; sys_write32(data, PMON_LITE_PCIE_AXI_FILTER_0_CONTROL); sys_write32(MSIX_TABLE_BASE, AXI_FILTER_0_ADDR_START_LOW); /* Start of PBA is end of MSI-X table in our case */ sys_write32(PBA_TABLE_BASE, AXI_FILTER_0_ADDR_END_LOW); sys_set_bit(PMON_LITE_PCIE_INTERRUPT_ENABLE, WR_ADDR_CHK_INTR_EN); memset((void *)PBA_TABLE_BASE, 0, PBA_TABLE_SIZE); IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, pcie_pmon_lite, irq), DT_INST_IRQ_BY_NAME(0, pcie_pmon_lite, priority), iproc_pcie_vector_mask_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, pcie_pmon_lite, irq)); LOG_DBG("pcie pmon lite interrupt configured\n"); #endif } #endif static int iproc_pcie_mode_check(const struct iproc_pcie_ep_config *cfg) { uint32_t data; data = pcie_read32(&cfg->base->paxb_strap_status); LOG_DBG("PAXB_STRAP_STATUS = 0x%08X\n", data); if (data & PCIE_RC_MODE_MASK) { return -ENOTSUP; } return 0; } static int iproc_pcie_ep_init(const struct device *dev) { const struct iproc_pcie_ep_config *cfg = dev->config; struct iproc_pcie_ep_ctx *ctx = dev->data; int ret; uint32_t data; ret = iproc_pcie_mode_check(cfg); if (ret) { LOG_ERR("ERROR: Only PCIe EP mode is supported\n"); goto err_out; } iproc_pcie_conf_read(dev, PCIE_LINK_STATUS_CONTROL, &data); LOG_INF("PCIe linkup speed 0x%x\n", ((data >> PCIE_LINKSPEED_SHIFT) & PCIE_LINKSPEED_MASK)); LOG_INF("PCIe linkup width 0x%x\n", ((data >> PCIE_LINKWIDTH_SHIFT) & PCIE_LINKWIDTH_MASK)); #ifdef PCIE_EP_IPROC_INIT_CFG iproc_pcie_msi_config(dev); iproc_pcie_msix_config(dev); #endif /* configure interrupts for MSI-X Per-Vector Masking feature */ #ifdef CONFIG_PCIE_EP_IPROC_V2 iproc_pcie_msix_pvm_config(dev); #endif iproc_pcie_reset_config(dev); ctx->highmem_in_use = false; ctx->lowmem_in_use = false; LOG_INF("PCIe initialized successfully\n"); err_out: return ret; } static struct iproc_pcie_ep_ctx iproc_pcie_ep_ctx_0; static const struct iproc_pcie_ep_config iproc_pcie_ep_config_0 = { .id = 0, .base = (struct iproc_pcie_reg *)DT_INST_REG_ADDR(0), .reg_size = DT_INST_REG_SIZE(0), .map_low_base = DT_INST_REG_ADDR_BY_NAME(0, map_lowmem), .map_low_size = DT_INST_REG_SIZE_BY_NAME(0, map_lowmem), .map_high_base = DT_INST_REG_ADDR_BY_NAME(0, map_highmem), .map_high_size = DT_INST_REG_SIZE_BY_NAME(0, map_highmem), #if DT_INST_NODE_HAS_PROP(0, dmas) .pl330_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_IDX(0, 0)), .pl330_tx_chan_id = DT_INST_DMAS_CELL_BY_NAME(0, txdma, channel), .pl330_rx_chan_id = DT_INST_DMAS_CELL_BY_NAME(0, rxdma, channel), #endif }; static const struct pcie_ep_driver_api iproc_pcie_ep_api = { .conf_read = iproc_pcie_conf_read, .conf_write = iproc_pcie_conf_write, .map_addr = iproc_pcie_map_addr, .unmap_addr = iproc_pcie_unmap_addr, .raise_irq = iproc_pcie_raise_irq, .register_reset_cb = iproc_pcie_register_reset_cb, #if DT_INST_NODE_HAS_PROP(0, dmas) .dma_xfer = iproc_pcie_pl330_dma_xfer, #endif }; DEVICE_DT_INST_DEFINE(0, &iproc_pcie_ep_init, NULL, &iproc_pcie_ep_ctx_0, &iproc_pcie_ep_config_0, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &iproc_pcie_ep_api); ```
/content/code_sandbox/drivers/pcie/endpoint/pcie_ep_iproc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,353
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_PCIE_HOST_VC_H_ #define ZEPHYR_DRIVERS_PCIE_HOST_VC_H_ #define PCIE_VC_CAP_REG_1_OFFSET 0x04U #define PCIE_VC_CAP_REG_2_OFFSET 0x08U #define PCIE_VC_CTRL_STATUS_REG_OFFSET 0x0CU /** Virtual Channel capability and control Registers */ struct pcie_vc_regs { union { struct { /** Virtual Channel Count */ uint32_t vc_count : 3; uint32_t _reserved1 : 1; /** Low Priority Virtual Channel Count */ uint32_t lpvc_count : 3; uint32_t _reserved2 : 1; /** Reference Clock */ uint32_t reference_clock : 2; /** Port Arbitration Table Entry Size */ uint32_t pat_entry_size : 3; uint32_t _reserved3 : 19; }; uint32_t raw; } cap_reg_1; union { struct { /** Virtual Channel Arbitration Capability */ uint32_t vca_cap : 8; uint32_t _reserved1 : 16; /** Virtual Channel Arbitration Table Offset */ uint32_t vca_table_offset : 8; }; uint32_t raw; } cap_reg_2; union { struct { /** Load Virtual Channel Arbitration Table */ uint32_t load_vca_table : 1; /** Virtual Channel Arbitration Select */ uint32_t vca_select : 3; uint32_t _reserved1 : 12; /** Virtual Channel Arbitration Table Status */ uint32_t vca_table_status : 1; uint32_t _reserved2 : 15; }; uint32_t raw; } ctrl_reg; }; #define PCIE_VC_RES_CAP_REG_OFFSET(_vc) (0x10U + _vc * 0X0CU) #define PCIE_VC_RES_CTRL_REG_OFFSET(_vc) (0x14U + _vc * 0X0CU) #define PCIE_VC_RES_STATUS_REG_OFFSET(_vc) (0x18U + _vc * 0X0CU) #define PCIE_VC_PA_RR BIT(0) #define PCIE_VC_PA_WRR BIT(1) #define PCIE_VC_PA_WRR64 BIT(2) #define PCIE_VC_PA_WRR128 BIT(3) #define PCIE_VC_PA_TMWRR128 BIT(4) #define PCIE_VC_PA_WRR256 BIT(5) /** Virtual Channel Resource Registers */ struct pcie_vc_resource_regs { union { struct { /** Port Arbitration Capability */ uint32_t pa_cap : 8; uint32_t _reserved1 : 6; uint32_t undefined : 1; /** Reject Snoop Transactions */ uint32_t rst : 1; /** Maximum Time Slots */ uint32_t max_time_slots : 7; uint32_t _reserved2 : 1; /** Port Arbitration Table Offset */ uint32_t pa_table_offset : 8; }; uint32_t raw; } cap_reg; union { struct { /** Traffic Class to Virtual Channel Map */ uint32_t tc_vc_map : 8; uint32_t _reserved1 : 8; /** Load Port Arbitration Table */ uint32_t load_pa_table : 1; /** Port Arbitration Select */ uint32_t pa_select : 3; uint32_t _reserved2 : 4; /** Virtual Channel ID */ uint32_t vc_id : 3; uint32_t _reserved3 : 4; /** Virtual Channel Enable */ uint32_t vc_enable : 1; }; uint32_t raw; } ctrl_reg; union { struct { uint32_t _reserved1 : 16; /** Port Arbitration Table Status */ uint32_t pa_table_status : 1; /** Virtual Channel Negociation Pending */ uint32_t vc_negocation_pending : 1; uint32_t _reserved2 : 14; }; uint32_t raw; } status_reg; }; uint32_t pcie_vc_cap_lookup(pcie_bdf_t bdf, struct pcie_vc_regs *regs); void pcie_vc_load_resources_regs(pcie_bdf_t bdf, uint32_t base, struct pcie_vc_resource_regs *regs, int nb_regs); #endif /* ZEPHYR_DRIVERS_PCIE_HOST_VC_H_ */ ```
/content/code_sandbox/drivers/pcie/host/vc.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,021
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/drivers/pcie/msi.h> #include <zephyr/drivers/pcie/cap.h> /* functions documented in include/drivers/pcie/msi.h */ static uint32_t pcie_msi_base(pcie_bdf_t bdf, bool *msi) { uint32_t base; if (msi != NULL) { *msi = true; } base = pcie_get_cap(bdf, PCI_CAP_ID_MSI); if (IS_ENABLED(CONFIG_PCIE_MSI_X)) { uint32_t base_msix; base_msix = pcie_get_cap(bdf, PCI_CAP_ID_MSIX); if (base_msix != 0U) { base = base_msix; if (msi != NULL) { *msi = false; } } } return base; } #ifdef CONFIG_PCIE_MSI_MULTI_VECTOR #include <zephyr/kernel/mm.h> __weak uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority, msi_vector_t *vectors, uint8_t n_vector) { ARG_UNUSED(priority); ARG_UNUSED(vectors); ARG_UNUSED(n_vector); return 0; } __weak bool arch_pcie_msi_vector_connect(msi_vector_t *vector, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { ARG_UNUSED(vector); ARG_UNUSED(routine); ARG_UNUSED(parameter); ARG_UNUSED(flags); return false; } #ifdef CONFIG_PCIE_MSI_X static uint32_t get_msix_table_size(pcie_bdf_t bdf, uint32_t base) { uint32_t mcr; mcr = pcie_conf_read(bdf, base + PCIE_MSIX_MCR); return ((mcr & PCIE_MSIX_MCR_TSIZE) >> PCIE_MSIX_MCR_TSIZE_SHIFT) + 1; } static bool map_msix_table_entries(pcie_bdf_t bdf, uint32_t base, msi_vector_t *vectors, uint8_t n_vector) { uint32_t table_offset; uint8_t table_bir; struct pcie_bar bar; uintptr_t mapped_table; int i; table_offset = pcie_conf_read(bdf, base + PCIE_MSIX_TR); table_bir = table_offset & PCIE_MSIX_TR_BIR; table_offset &= PCIE_MSIX_TR_OFFSET; if (!pcie_get_mbar(bdf, table_bir, &bar)) { return false; } k_mem_map_phys_bare((uint8_t **)&mapped_table, bar.phys_addr + table_offset, n_vector * PCIE_MSIR_TABLE_ENTRY_SIZE, K_MEM_PERM_RW); for (i = 0; i < n_vector; i++) { vectors[i].msix_vector = (struct msix_vector *) (mapped_table + (i * PCIE_MSIR_TABLE_ENTRY_SIZE)); } return true; } static void set_msix(msi_vector_t *vectors, uint8_t n_vector, bool msix) { int i; for (i = 0; i < n_vector; i++) { vectors[i].msix = msix; } } #else #define get_msix_table_size(...) 0 #define map_msix_table_entries(...) true #define set_msix(...) #endif /* CONFIG_PCIE_MSI_X */ static uint32_t get_msi_mmc(pcie_bdf_t bdf, uint32_t base) { uint32_t mcr; mcr = pcie_conf_read(bdf, base + PCIE_MSI_MCR); /* Getting MMC true count: 2^(MMC field) */ return 1 << ((mcr & PCIE_MSI_MCR_MMC) >> PCIE_MSI_MCR_MMC_SHIFT); } uint8_t pcie_msi_vectors_allocate(pcie_bdf_t bdf, unsigned int priority, msi_vector_t *vectors, uint8_t n_vector) { uint32_t req_vectors; uint32_t base; bool msi; base = pcie_msi_base(bdf, &msi); if (IS_ENABLED(CONFIG_PCIE_MSI_X)) { set_msix(vectors, n_vector, !msi); if (!msi) { req_vectors = get_msix_table_size(bdf, base); if (!map_msix_table_entries(bdf, base, vectors, n_vector)) { return 0; } } } if (msi) { req_vectors = get_msi_mmc(bdf, base); } if (n_vector > req_vectors) { n_vector = req_vectors; } for (req_vectors = 0; req_vectors < n_vector; req_vectors++) { vectors[req_vectors].bdf = bdf; } return arch_pcie_msi_vectors_allocate(priority, vectors, n_vector); } bool pcie_msi_vector_connect(pcie_bdf_t bdf, msi_vector_t *vector, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { uint32_t base; base = pcie_msi_base(bdf, NULL); if (base == 0U) { return false; } return arch_pcie_msi_vector_connect(vector, routine, parameter, flags); } #endif /* CONFIG_PCIE_MSI_MULTI_VECTOR */ #ifdef CONFIG_PCIE_MSI_X static void enable_msix(pcie_bdf_t bdf, msi_vector_t *vectors, uint8_t n_vector, uint32_t base, unsigned int irq) { uint32_t mcr; int i; for (i = 0; i < n_vector; i++) { uint32_t map = pcie_msi_map(irq, &vectors[i], 1); uint32_t mdr = pcie_msi_mdr(irq, &vectors[i]); sys_write32(map, (mm_reg_t) &vectors[i].msix_vector->msg_addr); sys_write32(0, (mm_reg_t) &vectors[i].msix_vector->msg_up_addr); sys_write32(mdr, (mm_reg_t) &vectors[i].msix_vector->msg_data); sys_write32(0, (mm_reg_t) &vectors[i].msix_vector->vector_ctrl); } mcr = pcie_conf_read(bdf, base + PCIE_MSIX_MCR); mcr |= PCIE_MSIX_MCR_EN; pcie_conf_write(bdf, base + PCIE_MSIX_MCR, mcr); } #else #define enable_msix(...) #endif /* CONFIG_PCIE_MSI_X */ static void disable_msi(pcie_bdf_t bdf, uint32_t base) { uint32_t mcr; mcr = pcie_conf_read(bdf, base + PCIE_MSI_MCR); mcr &= ~PCIE_MSI_MCR_EN; pcie_conf_write(bdf, base + PCIE_MSI_MCR, mcr); } static void enable_msi(pcie_bdf_t bdf, msi_vector_t *vectors, uint8_t n_vector, uint32_t base, unsigned int irq) { uint32_t mcr; uint32_t map; uint32_t mdr; uint32_t mme; map = pcie_msi_map(irq, vectors, n_vector); pcie_conf_write(bdf, base + PCIE_MSI_MAP0, map); mdr = pcie_msi_mdr(irq, vectors); mcr = pcie_conf_read(bdf, base + PCIE_MSI_MCR); if ((mcr & PCIE_MSI_MCR_64) != 0U) { pcie_conf_write(bdf, base + PCIE_MSI_MAP1_64, 0U); pcie_conf_write(bdf, base + PCIE_MSI_MDR_64, mdr); } else { pcie_conf_write(bdf, base + PCIE_MSI_MDR_32, mdr); } /* Generating MME field (1 counts as a power of 2) */ for (mme = 0; n_vector > 1; mme++) { n_vector >>= 1; } mcr |= mme << PCIE_MSI_MCR_MME_SHIFT; mcr |= PCIE_MSI_MCR_EN; pcie_conf_write(bdf, base + PCIE_MSI_MCR, mcr); } bool pcie_msi_enable(pcie_bdf_t bdf, msi_vector_t *vectors, uint8_t n_vector, unsigned int irq) { uint32_t base; bool msi; base = pcie_msi_base(bdf, &msi); if (base == 0U) { return false; } if (!msi && IS_ENABLED(CONFIG_PCIE_MSI_X)) { disable_msi(bdf, base); enable_msix(bdf, vectors, n_vector, base, irq); } else { enable_msi(bdf, vectors, n_vector, base, irq); } pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_MASTER, true); return true; } bool pcie_is_msi(pcie_bdf_t bdf) { return (pcie_msi_base(bdf, NULL) != 0); } ```
/content/code_sandbox/drivers/pcie/host/msi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,029
```c /* * */ #define DT_DRV_COMPAT pcie_controller #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(pcie, LOG_LEVEL_ERR); #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/sys/check.h> #include <stdbool.h> #include <zephyr/drivers/pcie/pcie.h> #include <zephyr/sys/iterable_sections.h> #ifdef CONFIG_ACPI #include <zephyr/acpi/acpi.h> #endif #if CONFIG_PCIE_MSI #include <zephyr/drivers/pcie/msi.h> #endif #ifdef CONFIG_PCIE_CONTROLLER #include <zephyr/drivers/pcie/controller.h> #endif #ifdef CONFIG_PCIE_PRT /* platform interrupt are hardwired or can be dynamically allocated. */ static bool prt_en; #endif /* functions documented in drivers/pcie/pcie.h */ void pcie_set_cmd(pcie_bdf_t bdf, uint32_t bits, bool on) { uint32_t cmdstat; cmdstat = pcie_conf_read(bdf, PCIE_CONF_CMDSTAT); if (on) { cmdstat |= bits; } else { cmdstat &= ~bits; } pcie_conf_write(bdf, PCIE_CONF_CMDSTAT, cmdstat); } uint32_t pcie_get_cap(pcie_bdf_t bdf, uint32_t cap_id) { uint32_t reg = 0U; uint32_t data; data = pcie_conf_read(bdf, PCIE_CONF_CMDSTAT); if ((data & PCIE_CONF_CMDSTAT_CAPS) != 0U) { data = pcie_conf_read(bdf, PCIE_CONF_CAPPTR); reg = PCIE_CONF_CAPPTR_FIRST(data); } while (reg != 0U) { data = pcie_conf_read(bdf, reg); if (PCIE_CONF_CAP_ID(data) == cap_id) { break; } reg = PCIE_CONF_CAP_NEXT(data); } return reg; } uint32_t pcie_get_ext_cap(pcie_bdf_t bdf, uint32_t cap_id) { unsigned int reg = PCIE_CONF_EXT_CAPPTR; /* Start at end of the PCI configuration space */ uint32_t data; while (reg != 0U) { data = pcie_conf_read(bdf, reg); if (!data || data == 0xffffffffU) { return 0; } if (PCIE_CONF_EXT_CAP_ID(data) == cap_id) { break; } reg = PCIE_CONF_EXT_CAP_NEXT(data) >> 2; if (reg < PCIE_CONF_EXT_CAPPTR) { return 0; } } return reg; } /** * @brief Get the BAR at a specific BAR index * * @param bdf the PCI(e) endpoint * @param bar_index 0-based BAR index * @param bar Pointer to struct pcie_bar * @param io true for I/O BARs, false otherwise * @return true if the BAR was found and is valid, false otherwise */ static bool pcie_get_bar(pcie_bdf_t bdf, unsigned int bar_index, struct pcie_bar *bar, bool io) { uint32_t reg = bar_index + PCIE_CONF_BAR0; uint32_t cmd_reg; bool ret = false; #ifdef CONFIG_PCIE_CONTROLLER const struct device *dev; #endif uintptr_t phys_addr; size_t size; #ifdef CONFIG_PCIE_CONTROLLER dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_pcie_controller)); if (!dev) { LOG_ERR("Failed to get PCIe root complex"); return false; } #endif if (reg > PCIE_CONF_BAR5) { return false; } phys_addr = pcie_conf_read(bdf, reg); #ifndef CONFIG_PCIE_CONTROLLER if ((PCIE_CONF_BAR_MEM(phys_addr) && io) || (PCIE_CONF_BAR_IO(phys_addr) && !io)) { return false; } #endif if (PCIE_CONF_BAR_INVAL_FLAGS(phys_addr)) { /* Discard on invalid flags */ return false; } cmd_reg = pcie_conf_read(bdf, PCIE_CONF_CMDSTAT); /* IO/memory decode should be disabled before sizing/update BAR. */ pcie_conf_write(bdf, PCIE_CONF_CMDSTAT, cmd_reg & (~(PCIE_CONF_CMDSTAT_IO | PCIE_CONF_CMDSTAT_MEM))); pcie_conf_write(bdf, reg, 0xFFFFFFFFU); size = pcie_conf_read(bdf, reg); pcie_conf_write(bdf, reg, (uint32_t)phys_addr); if (IS_ENABLED(CONFIG_64BIT) && PCIE_CONF_BAR_64(phys_addr)) { reg++; phys_addr |= ((uint64_t)pcie_conf_read(bdf, reg)) << 32; if ((PCIE_CONF_BAR_ADDR(phys_addr) == PCIE_CONF_BAR_INVAL64) || (PCIE_CONF_BAR_ADDR(phys_addr) == PCIE_CONF_BAR_NONE)) { /* Discard on invalid address */ goto err_exit; } pcie_conf_write(bdf, reg, 0xFFFFFFFFU); size |= ((uint64_t)pcie_conf_read(bdf, reg)) << 32; pcie_conf_write(bdf, reg, (uint32_t)((uint64_t)phys_addr >> 32)); } else if ((PCIE_CONF_BAR_ADDR(phys_addr) == PCIE_CONF_BAR_INVAL) || (PCIE_CONF_BAR_ADDR(phys_addr) == PCIE_CONF_BAR_NONE)) { /* Discard on invalid address */ goto err_exit; } if (PCIE_CONF_BAR_IO(phys_addr)) { size = PCIE_CONF_BAR_IO_ADDR(size); if (size == 0) { /* Discard on invalid size */ goto err_exit; } } else { size = PCIE_CONF_BAR_ADDR(size); if (size == 0) { /* Discard on invalid size */ goto err_exit; } } #ifdef CONFIG_PCIE_CONTROLLER /* Translate to physical memory address from bus address */ if (!pcie_ctrl_region_translate(dev, bdf, PCIE_CONF_BAR_MEM(phys_addr), PCIE_CONF_BAR_64(phys_addr), PCIE_CONF_BAR_MEM(phys_addr) ? PCIE_CONF_BAR_ADDR(phys_addr) : PCIE_CONF_BAR_IO_ADDR(phys_addr), &bar->phys_addr)) { goto err_exit; } #else bar->phys_addr = PCIE_CONF_BAR_ADDR(phys_addr); #endif /* CONFIG_PCIE_CONTROLLER */ bar->size = size & ~(size-1); ret = true; err_exit: pcie_conf_write(bdf, PCIE_CONF_CMDSTAT, cmd_reg); return ret; } /** * @brief Probe the nth BAR assigned to an endpoint. * * A PCI(e) endpoint has 0 or more BARs. This function * allows the caller to enumerate them by calling with index=0..n. * Value of n has to be below 6, as there is a maximum of 6 BARs. The indices * are order-preserving with respect to the endpoint BARs: e.g., index 0 * will return the lowest-numbered BAR on the endpoint. * * @param bdf the PCI(e) endpoint * @param index (0-based) index * @param bar Pointer to struct pcie_bar * @param io true for I/O BARs, false otherwise * @return true if the BAR was found and is valid, false otherwise */ static bool pcie_probe_bar(pcie_bdf_t bdf, unsigned int index, struct pcie_bar *bar, bool io) { uint32_t reg; for (reg = PCIE_CONF_BAR0; (index > 0) && (reg <= PCIE_CONF_BAR5); reg++, index--) { uintptr_t addr = pcie_conf_read(bdf, reg); if (PCIE_CONF_BAR_MEM(addr) && PCIE_CONF_BAR_64(addr)) { reg++; } } if (index != 0) { return false; } return pcie_get_bar(bdf, reg - PCIE_CONF_BAR0, bar, io); } bool pcie_get_mbar(pcie_bdf_t bdf, unsigned int bar_index, struct pcie_bar *mbar) { return pcie_get_bar(bdf, bar_index, mbar, false); } bool pcie_probe_mbar(pcie_bdf_t bdf, unsigned int index, struct pcie_bar *mbar) { return pcie_probe_bar(bdf, index, mbar, false); } bool pcie_get_iobar(pcie_bdf_t bdf, unsigned int bar_index, struct pcie_bar *iobar) { return pcie_get_bar(bdf, bar_index, iobar, true); } bool pcie_probe_iobar(pcie_bdf_t bdf, unsigned int index, struct pcie_bar *iobar) { return pcie_probe_bar(bdf, index, iobar, true); } #ifndef CONFIG_PCIE_CONTROLLER unsigned int pcie_alloc_irq(pcie_bdf_t bdf) { unsigned int irq; uint32_t data; data = pcie_conf_read(bdf, PCIE_CONF_INTR); irq = PCIE_CONF_INTR_IRQ(data); if ((irq == PCIE_CONF_INTR_IRQ_NONE) || (irq >= CONFIG_MAX_IRQ_LINES) || arch_irq_is_used(irq)) { /* In some platforms, PCI interrupts are hardwired to specific interrupt inputs * on the interrupt controller and are not configurable. Hence we need to retrieve * IRQ from acpi. But if it is configurable then we allocate irq dynamically. */ #ifdef CONFIG_PCIE_PRT if (prt_en) { irq = acpi_legacy_irq_get(bdf); } else { irq = arch_irq_allocate(); } #else irq = arch_irq_allocate(); #endif if (irq == UINT_MAX) { return PCIE_CONF_INTR_IRQ_NONE; } data &= ~0xffU; data |= irq; pcie_conf_write(bdf, PCIE_CONF_INTR, data); } else { arch_irq_set_used(irq); } return irq; } #endif /* CONFIG_PCIE_CONTROLLER */ unsigned int pcie_get_irq(pcie_bdf_t bdf) { uint32_t data = pcie_conf_read(bdf, PCIE_CONF_INTR); return PCIE_CONF_INTR_IRQ(data); } bool pcie_connect_dynamic_irq(pcie_bdf_t bdf, unsigned int irq, unsigned int priority, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { #if defined(CONFIG_PCIE_MSI) && defined(CONFIG_PCIE_MSI_MULTI_VECTOR) if (pcie_is_msi(bdf)) { msi_vector_t vector; if ((pcie_msi_vectors_allocate(bdf, priority, &vector, 1) == 0) || !pcie_msi_vector_connect(bdf, &vector, routine, parameter, flags)) { return false; } } else #endif /* CONFIG_PCIE_MSI && CONFIG_PCIE_MSI_MULTI_VECTOR */ { if (irq_connect_dynamic(irq, priority, routine, parameter, flags) < 0) { return false; } } return true; } void pcie_irq_enable(pcie_bdf_t bdf, unsigned int irq) { #if CONFIG_PCIE_MSI if (pcie_msi_enable(bdf, NULL, 1, irq)) { return; } #endif irq_enable(irq); } static bool scan_flag(const struct pcie_scan_opt *opt, uint32_t flag) { return ((opt->flags & flag) != 0U); } /* Forward declaration needed since scanning a device may reveal a bridge */ static bool scan_bus(uint8_t bus, const struct pcie_scan_opt *opt); static bool scan_dev(uint8_t bus, uint8_t dev, const struct pcie_scan_opt *opt) { for (uint8_t func = 0; func <= PCIE_MAX_FUNC; func++) { pcie_bdf_t bdf = PCIE_BDF(bus, dev, func); uint32_t secondary = 0; uint32_t id, type; bool do_cb; id = pcie_conf_read(bdf, PCIE_CONF_ID); if (!PCIE_ID_IS_VALID(id)) { continue; } type = pcie_conf_read(bdf, PCIE_CONF_TYPE); switch (PCIE_CONF_TYPE_GET(type)) { case PCIE_CONF_TYPE_STANDARD: do_cb = true; break; case PCIE_CONF_TYPE_PCI_BRIDGE: if (scan_flag(opt, PCIE_SCAN_RECURSIVE)) { uint32_t num = pcie_conf_read(bdf, PCIE_BUS_NUMBER); secondary = PCIE_BUS_SECONDARY_NUMBER(num); } __fallthrough; default: do_cb = scan_flag(opt, PCIE_SCAN_CB_ALL); break; } if (do_cb && !opt->cb(bdf, id, opt->cb_data)) { return false; } if (scan_flag(opt, PCIE_SCAN_RECURSIVE) && secondary != 0) { if (!scan_bus(secondary, opt)) { return false; } } /* Only function 0 is valid for non-multifunction devices */ if (func == 0 && !PCIE_CONF_MULTIFUNCTION(type)) { break; } } return true; } static bool scan_bus(uint8_t bus, const struct pcie_scan_opt *opt) { for (uint8_t dev = 0; dev <= PCIE_MAX_DEV; dev++) { if (!scan_dev(bus, dev, opt)) { return false; } } return true; } int pcie_scan(const struct pcie_scan_opt *opt) { uint32_t type; bool multi; CHECKIF(opt->cb == NULL) { return -EINVAL; } type = pcie_conf_read(PCIE_HOST_CONTROLLER(0), PCIE_CONF_TYPE); multi = PCIE_CONF_MULTIFUNCTION(type); if (opt->bus == 0 && scan_flag(opt, PCIE_SCAN_RECURSIVE) && multi) { /* Each function on the host controller represents a portential bus */ for (uint8_t bus = 0; bus <= PCIE_MAX_FUNC; bus++) { pcie_bdf_t bdf = PCIE_HOST_CONTROLLER(bus); if (pcie_conf_read(bdf, PCIE_CONF_ID) == PCIE_ID_NONE) { continue; } if (!scan_bus(bus, opt)) { break; } } } else { /* Single PCI host controller */ scan_bus(opt->bus, opt); } return 0; } struct scan_data { size_t found; size_t max_dev; }; static bool pcie_dev_cb(pcie_bdf_t bdf, pcie_id_t id, void *cb_data) { struct scan_data *data = cb_data; STRUCT_SECTION_FOREACH(pcie_dev, dev) { if (dev->bdf != PCIE_BDF_NONE) { continue; } if (dev->id != id) { continue; } uint32_t class_rev = pcie_conf_read(bdf, PCIE_CONF_CLASSREV); if (dev->class_rev == (class_rev & dev->class_rev_mask)) { dev->bdf = bdf; dev->class_rev = class_rev; data->found++; break; } } /* Continue if we've not yet found all devices */ return (data->found != data->max_dev); } static int pcie_init(void) { struct scan_data data; struct pcie_scan_opt opt = { .cb = pcie_dev_cb, .cb_data = &data, .flags = PCIE_SCAN_RECURSIVE, }; #ifdef CONFIG_PCIE_PRT const char *hid, *uid = ACPI_DT_UID(DT_DRV_INST(0)); int ret; BUILD_ASSERT(ACPI_DT_HAS_HID(DT_DRV_INST(0)), "No HID property for PCIe devicetree node"); hid = ACPI_DT_HID(DT_DRV_INST(0)); ret = acpi_legacy_irq_init(hid, uid); if (!ret) { prt_en = true; } else { __ASSERT(ret == -ENOENT, "Error retrieve interrupt routing table!"); } #endif STRUCT_SECTION_COUNT(pcie_dev, &data.max_dev); /* Don't bother calling pcie_scan() if there are no devices to look for */ if (data.max_dev == 0) { return 0; } data.found = 0; pcie_scan(&opt); return 0; } /* * If a pcie controller is employed, pcie_scan() depends on it for working. * Thus, pcie must be bumped to the next level */ #ifdef CONFIG_PCIE_CONTROLLER #define PCIE_SYS_INIT_LEVEL PRE_KERNEL_2 #else #define PCIE_SYS_INIT_LEVEL PRE_KERNEL_1 #endif SYS_INIT(pcie_init, PCIE_SYS_INIT_LEVEL, CONFIG_PCIE_INIT_PRIORITY); ```
/content/code_sandbox/drivers/pcie/host/pcie.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,762
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_PCIE_HOST_PTM_H_ #define ZEPHYR_DRIVERS_PCIE_HOST_PTM_H_ #include <zephyr/drivers/pcie/pcie.h> #include <zephyr/drivers/pcie/cap.h> #define PTM_CAP_REG_OFFSET 0x04U union ptm_cap_reg { struct { uint32_t requester : 1; uint32_t responder : 1; uint32_t root : 1; uint32_t _reserved1 : 5; uint32_t local_clock_granularity : 8; uint32_t _reserved2 : 16; }; uint32_t raw; }; #define PTM_CTRL_REG_OFFSET 0x08U union ptm_ctrl_reg { struct { uint32_t ptm_enable : 1; uint32_t root_select : 1; uint32_t _reserved1 : 6; uint32_t effective_granularity : 8; uint32_t _reserved2 : 16; }; uint32_t raw; }; struct pcie_ptm_root_config { struct pcie_dev *pcie; }; #endif /* ZEPHYR_DRIVERS_PCIE_HOST_PTM_H_ */ ```
/content/code_sandbox/drivers/pcie/host/ptm.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
271
```c /* * */ #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(pcie_ecam, LOG_LEVEL_ERR); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/pcie/pcie.h> #include <zephyr/drivers/pcie/controller.h> #ifdef CONFIG_GIC_V3_ITS #include <zephyr/drivers/interrupt_controller/gicv3_its.h> #endif #define DT_DRV_COMPAT pci_host_ecam_generic /* * PCIe Controllers Regions * * TOFIX: * - handle prefetchable regions */ enum pcie_region_type { PCIE_REGION_IO = 0, PCIE_REGION_MEM, PCIE_REGION_MEM64, PCIE_REGION_MAX, }; struct pcie_ecam_data { uintptr_t cfg_phys_addr; mm_reg_t cfg_addr; size_t cfg_size; struct { uintptr_t phys_start; uintptr_t bus_start; size_t size; size_t allocation_offset; } regions[PCIE_REGION_MAX]; }; static int pcie_ecam_init(const struct device *dev) { const struct pcie_ctrl_config *cfg = dev->config; struct pcie_ecam_data *data = dev->data; int i; /* * Flags defined in the PCI Bus Binding to IEEE Std 1275-1994 : * Bit# 33222222 22221111 11111100 00000000 * 10987654 32109876 54321098 76543210 * * phys.hi cell: npt000ss bbbbbbbb dddddfff rrrrrrrr * phys.mid cell: hhhhhhhh hhhhhhhh hhhhhhhh hhhhhhhh * phys.lo cell: llllllll llllllll llllllll llllllll * * where: * * n is 0 if the address is relocatable, 1 otherwise * p is 1 if the addressable region is "prefetchable", 0 otherwise * t is 1 if the address is aliased (for non-relocatable I/O), below 1 MB (for Memory), * or below 64 KB (for relocatable I/O). * ss is the space code, denoting the address space * 00 denotes Configuration Space * 01 denotes I/O Space * 10 denotes 32-bit-address Memory Space * 11 denotes 64-bit-address Memory Space * bbbbbbbb is the 8-bit Bus Number * ddddd is the 5-bit Device Number * fff is the 3-bit Function Number * rrrrrrrr is the 8-bit Register Number * hh...hh is a 32-bit unsigned number * ll...ll is a 32-bit unsigned number * for I/O Space is the 32-bit offset from the start of the region * for 32-bit-address Memory Space is the 32-bit offset from the start of the region * for 64-bit-address Memory Space is the 64-bit offset from the start of the region * * Here we only handle the p, ss, hh and ll fields. * * TOFIX: * - handle prefetchable bit */ for (i = 0 ; i < cfg->ranges_count ; ++i) { switch ((cfg->ranges[i].flags >> 24) & 0x03) { case 0x01: data->regions[PCIE_REGION_IO].bus_start = cfg->ranges[i].pcie_bus_addr; data->regions[PCIE_REGION_IO].phys_start = cfg->ranges[i].host_map_addr; data->regions[PCIE_REGION_IO].size = cfg->ranges[i].map_length; /* Linux & U-Boot avoids allocating PCI resources from address 0 */ if (data->regions[PCIE_REGION_IO].bus_start < 0x1000) { data->regions[PCIE_REGION_IO].allocation_offset = 0x1000; } break; case 0x02: data->regions[PCIE_REGION_MEM].bus_start = cfg->ranges[i].pcie_bus_addr; data->regions[PCIE_REGION_MEM].phys_start = cfg->ranges[i].host_map_addr; data->regions[PCIE_REGION_MEM].size = cfg->ranges[i].map_length; /* Linux & U-Boot avoids allocating PCI resources from address 0 */ if (data->regions[PCIE_REGION_MEM].bus_start < 0x1000) { data->regions[PCIE_REGION_MEM].allocation_offset = 0x1000; } break; case 0x03: data->regions[PCIE_REGION_MEM64].bus_start = cfg->ranges[i].pcie_bus_addr; data->regions[PCIE_REGION_MEM64].phys_start = cfg->ranges[i].host_map_addr; data->regions[PCIE_REGION_MEM64].size = cfg->ranges[i].map_length; /* Linux & U-Boot avoids allocating PCI resources from address 0 */ if (data->regions[PCIE_REGION_MEM64].bus_start < 0x1000) { data->regions[PCIE_REGION_MEM64].allocation_offset = 0x1000; } break; } } if (!data->regions[PCIE_REGION_IO].size && !data->regions[PCIE_REGION_MEM].size && !data->regions[PCIE_REGION_MEM64].size) { LOG_ERR("No regions defined"); return -EINVAL; } /* Get Config address space physical address & size */ data->cfg_phys_addr = cfg->cfg_addr; data->cfg_size = cfg->cfg_size; if (data->regions[PCIE_REGION_IO].size) { LOG_DBG("IO bus [0x%lx - 0x%lx, size 0x%lx]", data->regions[PCIE_REGION_IO].bus_start, (data->regions[PCIE_REGION_IO].bus_start + data->regions[PCIE_REGION_IO].size - 1), data->regions[PCIE_REGION_IO].size); LOG_DBG("IO space [0x%lx - 0x%lx, size 0x%lx]", data->regions[PCIE_REGION_IO].phys_start, (data->regions[PCIE_REGION_IO].phys_start + data->regions[PCIE_REGION_IO].size - 1), data->regions[PCIE_REGION_IO].size); } if (data->regions[PCIE_REGION_MEM].size) { LOG_DBG("MEM bus [0x%lx - 0x%lx, size 0x%lx]", data->regions[PCIE_REGION_MEM].bus_start, (data->regions[PCIE_REGION_MEM].bus_start + data->regions[PCIE_REGION_MEM].size - 1), data->regions[PCIE_REGION_MEM].size); LOG_DBG("MEM space [0x%lx - 0x%lx, size 0x%lx]", data->regions[PCIE_REGION_MEM].phys_start, (data->regions[PCIE_REGION_MEM].phys_start + data->regions[PCIE_REGION_MEM].size - 1), data->regions[PCIE_REGION_MEM].size); } if (data->regions[PCIE_REGION_MEM64].size) { LOG_DBG("MEM64 bus [0x%lx - 0x%lx, size 0x%lx]", data->regions[PCIE_REGION_MEM64].bus_start, (data->regions[PCIE_REGION_MEM64].bus_start + data->regions[PCIE_REGION_MEM64].size - 1), data->regions[PCIE_REGION_MEM64].size); LOG_DBG("MEM64 space [0x%lx - 0x%lx, size 0x%lx]", data->regions[PCIE_REGION_MEM64].phys_start, (data->regions[PCIE_REGION_MEM64].phys_start + data->regions[PCIE_REGION_MEM64].size - 1), data->regions[PCIE_REGION_MEM64].size); } /* Map config space to be used by the pcie_generic_ctrl_conf_read/write callbacks */ device_map(&data->cfg_addr, data->cfg_phys_addr, data->cfg_size, K_MEM_CACHE_NONE); LOG_DBG("Config space [0x%lx - 0x%lx, size 0x%lx]", data->cfg_phys_addr, (data->cfg_phys_addr + data->cfg_size - 1), data->cfg_size); LOG_DBG("Config mapped [0x%lx - 0x%lx, size 0x%lx]", data->cfg_addr, (data->cfg_addr + data->cfg_size - 1), data->cfg_size); pcie_generic_ctrl_enumerate(dev, PCIE_BDF(0, 0, 0)); return 0; } static uint32_t pcie_ecam_ctrl_conf_read(const struct device *dev, pcie_bdf_t bdf, unsigned int reg) { struct pcie_ecam_data *data = dev->data; return pcie_generic_ctrl_conf_read(data->cfg_addr, bdf, reg); } static void pcie_ecam_ctrl_conf_write(const struct device *dev, pcie_bdf_t bdf, unsigned int reg, uint32_t reg_data) { struct pcie_ecam_data *data = dev->data; pcie_generic_ctrl_conf_write(data->cfg_addr, bdf, reg, reg_data); } static bool pcie_ecam_region_allocate_type(struct pcie_ecam_data *data, pcie_bdf_t bdf, size_t bar_size, uintptr_t *bar_bus_addr, enum pcie_region_type type) { uintptr_t addr; addr = (((data->regions[type].bus_start + data->regions[type].allocation_offset) - 1) | ((bar_size) - 1)) + 1; if (addr - data->regions[type].bus_start + bar_size > data->regions[type].size) { return false; } *bar_bus_addr = addr; data->regions[type].allocation_offset = addr - data->regions[type].bus_start + bar_size; return true; } static bool pcie_ecam_region_allocate(const struct device *dev, pcie_bdf_t bdf, bool mem, bool mem64, size_t bar_size, uintptr_t *bar_bus_addr) { struct pcie_ecam_data *data = dev->data; enum pcie_region_type type; if (mem && !data->regions[PCIE_REGION_MEM64].size && !data->regions[PCIE_REGION_MEM].size) { LOG_DBG("bdf %x no mem region defined for allocation", bdf); return false; } if (!mem && !data->regions[PCIE_REGION_IO].size) { LOG_DBG("bdf %x no io region defined for allocation", bdf); return false; } /* * Allocate into mem64 region if available or is the only available * * TOFIX: * - handle allocation from/to mem/mem64 when a region is full */ if (mem && ((mem64 && data->regions[PCIE_REGION_MEM64].size) || (data->regions[PCIE_REGION_MEM64].size && !data->regions[PCIE_REGION_MEM].size))) { type = PCIE_REGION_MEM64; } else if (mem) { type = PCIE_REGION_MEM; } else { type = PCIE_REGION_IO; } return pcie_ecam_region_allocate_type(data, bdf, bar_size, bar_bus_addr, type); } static bool pcie_ecam_region_get_allocate_base(const struct device *dev, pcie_bdf_t bdf, bool mem, bool mem64, size_t align, uintptr_t *bar_base_addr) { struct pcie_ecam_data *data = (struct pcie_ecam_data *)dev->data; enum pcie_region_type type; if (mem && !data->regions[PCIE_REGION_MEM64].size && !data->regions[PCIE_REGION_MEM].size) { LOG_DBG("bdf %x no mem region defined for allocation", bdf); return false; } if (!mem && !data->regions[PCIE_REGION_IO].size) { LOG_DBG("bdf %x no io region defined for allocation", bdf); return false; } /* * Allocate into mem64 region if available or is the only available * * TOFIX: * - handle allocation from/to mem/mem64 when a region is full */ if (mem && ((mem64 && data->regions[PCIE_REGION_MEM64].size) || (data->regions[PCIE_REGION_MEM64].size && !data->regions[PCIE_REGION_MEM].size))) { type = PCIE_REGION_MEM64; } else if (mem) { type = PCIE_REGION_MEM; } else { type = PCIE_REGION_IO; } *bar_base_addr = (((data->regions[type].bus_start + data->regions[type].allocation_offset) - 1) | ((align) - 1)) + 1; return true; } static bool pcie_ecam_region_translate(const struct device *dev, pcie_bdf_t bdf, bool mem, bool mem64, uintptr_t bar_bus_addr, uintptr_t *bar_addr) { struct pcie_ecam_data *data = dev->data; enum pcie_region_type type; /* Means it hasn't been allocated */ if (!bar_bus_addr) { return false; } if (mem && ((mem64 && data->regions[PCIE_REGION_MEM64].size) || (data->regions[PCIE_REGION_MEM64].size && !data->regions[PCIE_REGION_MEM].size))) { type = PCIE_REGION_MEM64; } else if (mem) { type = PCIE_REGION_MEM; } else { type = PCIE_REGION_IO; } *bar_addr = data->regions[type].phys_start + (bar_bus_addr - data->regions[type].bus_start); return true; } #if CONFIG_PCIE_MSI static uint8_t pcie_ecam_msi_device_setup(const struct device *dev, unsigned int priority, msi_vector_t *vectors, uint8_t n_vector) { #ifdef CONFIG_GIC_V3_ITS const struct pcie_ctrl_config *cfg = (const struct pcie_ctrl_config *)dev->config; unsigned int device_id; pcie_bdf_t bdf; int ret, i; if (!n_vector) { return 0; } bdf = vectors[0].bdf; /* We do not support allocating vectors for multiple BDFs for now, * This would need tracking vectors already allocated for a BDF and * re-allocating a proper table in ITS for each BDF since we can't be * sure more vectors for each BDF will be allocated later. * Simply bail-out if it's the case here. */ for (i = 1; i < n_vector; i++) { if (vectors[i].bdf != bdf) { LOG_ERR("Multiple BDFs in a single MSI vector allocation isn't supported"); return 0; } } device_id = PCI_BDF_TO_DEVID(bdf); ret = its_setup_deviceid(cfg->msi_parent, device_id, n_vector); if (ret) { return 0; } for (i = 0; i < n_vector; i++) { vectors[i].arch.irq = its_alloc_intid(cfg->msi_parent); vectors[i].arch.address = its_get_msi_addr(cfg->msi_parent); vectors[i].arch.eventid = i; vectors[i].arch.priority = priority; ret = its_map_intid(cfg->msi_parent, device_id, vectors[i].arch.eventid, vectors[i].arch.irq); if (ret) { break; } } return i; #else return 0; #endif } #endif static const struct pcie_ctrl_driver_api pcie_ecam_api = { .conf_read = pcie_ecam_ctrl_conf_read, .conf_write = pcie_ecam_ctrl_conf_write, .region_allocate = pcie_ecam_region_allocate, .region_get_allocate_base = pcie_ecam_region_get_allocate_base, .region_translate = pcie_ecam_region_translate, #if CONFIG_PCIE_MSI .msi_device_setup = pcie_ecam_msi_device_setup, #endif }; #if CONFIG_PCIE_MSI #define DEVICE_DT_GET_MSI_PARENT(n) \ .msi_parent = DEVICE_DT_GET(DT_PHANDLE(DT_DRV_INST(n), msi_parent)), #else #define DEVICE_DT_GET_MSI_PARENT(n) #endif #define PCIE_ECAM_INIT(n) \ static struct pcie_ecam_data pcie_ecam_data##n; \ static const struct pcie_ctrl_config pcie_ecam_config##n = { \ DEVICE_DT_GET_MSI_PARENT(n) \ .cfg_addr = DT_INST_REG_ADDR(n), \ .cfg_size = DT_INST_REG_SIZE(n), \ .ranges_count = DT_NUM_RANGES(DT_DRV_INST(n)), \ .ranges = { \ DT_FOREACH_RANGE(DT_DRV_INST(n), PCIE_RANGE_FORMAT) \ }, \ }; \ DEVICE_DT_INST_DEFINE(n, &pcie_ecam_init, NULL, \ &pcie_ecam_data##n, \ &pcie_ecam_config##n, \ PRE_KERNEL_1, \ CONFIG_PCIE_INIT_PRIORITY, \ &pcie_ecam_api); DT_INST_FOREACH_STATUS_OKAY(PCIE_ECAM_INIT) ```
/content/code_sandbox/drivers/pcie/host/pcie_ecam.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,921
```c /* * */ #include <errno.h> #include <zephyr/drivers/pcie/vc.h> #include <zephyr/drivers/pcie/cap.h> #include "vc.h" uint32_t pcie_vc_cap_lookup(pcie_bdf_t bdf, struct pcie_vc_regs *regs) { uint32_t base; base = pcie_get_ext_cap(bdf, PCIE_EXT_CAP_ID_VC); if (base == 0) { base = pcie_get_ext_cap(bdf, PCIE_EXT_CAP_ID_MFVC_VC); if (base == 0) { return 0; } } regs->cap_reg_1.raw = pcie_conf_read(bdf, base + PCIE_VC_CAP_REG_1_OFFSET); regs->cap_reg_2.raw = pcie_conf_read(bdf, base + PCIE_VC_CAP_REG_2_OFFSET); regs->ctrl_reg.raw = pcie_conf_read(bdf, base + PCIE_VC_CTRL_STATUS_REG_OFFSET); return base; } void pcie_vc_load_resources_regs(pcie_bdf_t bdf, uint32_t base, struct pcie_vc_resource_regs *regs, int nb_regs) { int idx; for (idx = 0; idx < nb_regs; idx++) { regs->cap_reg.raw = pcie_conf_read(bdf, base + PCIE_VC_RES_CAP_REG_OFFSET(idx)); regs->ctrl_reg.raw = pcie_conf_read(bdf, base + PCIE_VC_RES_CTRL_REG_OFFSET(idx)); regs->status_reg.raw = pcie_conf_read(bdf, base + PCIE_VC_RES_STATUS_REG_OFFSET(idx)); regs++; } } static int get_vc_registers(pcie_bdf_t bdf, struct pcie_vc_regs *regs, struct pcie_vc_resource_regs *res_regs) { uint32_t base; base = pcie_vc_cap_lookup(bdf, regs); if (base == 0) { return -ENOTSUP; } if (regs->cap_reg_1.vc_count == 0) { /* Having only VC0 is like having no real VC */ return -ENOTSUP; } pcie_vc_load_resources_regs(bdf, base, res_regs, regs->cap_reg_1.vc_count + 1); return 0; } int pcie_vc_enable(pcie_bdf_t bdf) { struct pcie_vc_regs regs; struct pcie_vc_resource_regs res_regs[PCIE_VC_MAX_COUNT]; int idx; if (get_vc_registers(bdf, &regs, res_regs) != 0) { return -ENOTSUP; } /* We do not touch VC0: it is always on */ for (idx = 1; idx < regs.cap_reg_1.vc_count + 1; idx++) { if (idx > 0 && res_regs[idx].ctrl_reg.vc_enable == 1) { /* * VC has not been disabled properly, if at all? */ return -EALREADY; } res_regs[idx].ctrl_reg.vc_enable = 1; } return 0; } int pcie_vc_disable(pcie_bdf_t bdf) { struct pcie_vc_regs regs; struct pcie_vc_resource_regs res_regs[PCIE_VC_MAX_COUNT]; int idx; if (get_vc_registers(bdf, &regs, res_regs) != 0) { return -ENOTSUP; } /* We do not touch VC0: it is always on */ for (idx = 1; idx < regs.cap_reg_1.vc_count + 1; idx++) { /* Let's wait for the pending negotiation to end */ while (res_regs[idx].status_reg.vc_negocation_pending == 1) { k_msleep(10); } res_regs[idx].ctrl_reg.vc_enable = 0; } return 0; } int pcie_vc_map_tc(pcie_bdf_t bdf, struct pcie_vctc_map *map) { struct pcie_vc_regs regs; struct pcie_vc_resource_regs res_regs[PCIE_VC_MAX_COUNT]; int idx; uint8_t tc_mapped = 0; if (get_vc_registers(bdf, &regs, res_regs) != 0) { return -ENOTSUP; } /* Map must relate to the actual VC count */ if (regs.cap_reg_1.vc_count != map->vc_count) { return -EINVAL; } /* Veryfying that map is sane */ for (idx = 0; idx < map->vc_count; idx++) { if (idx == 0 && !(map->vc_tc[idx] & PCIE_VC_SET_TC0)) { /* TC0 is on VC0 and cannot be unset */ return -EINVAL; } /* Each TC must appear only once in the map */ if (tc_mapped & map->vc_tc[idx]) { return -EINVAL; } tc_mapped |= map->vc_tc[idx]; } for (idx = 0; idx < regs.cap_reg_1.vc_count + 1; idx++) { /* Let's just set the VC ID to related index for now */ if (idx > 0) { res_regs[idx].ctrl_reg.vc_id = idx; } /* Currently, only HW round robin is used */ res_regs[idx].ctrl_reg.pa_select = PCIE_VC_PA_RR; res_regs[idx].ctrl_reg.tc_vc_map = map->vc_tc[idx]; } return 0; } ```
/content/code_sandbox/drivers/pcie/host/vc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,209
```unknown # PCIe/new PCI configuration options menuconfig PCIE bool "New PCI/PCIe Root Complex support" help This option enables support for new PCI(e) drivers. if PCIE module = PCIE module-str = pcie source "subsys/logging/Kconfig.template.log_config" config PCIE_INIT_PRIORITY int "PCIe initialization priority" default 41 help PCIe host drivers initialization priority. config PCIE_CONTROLLER bool "PCIe Controller management" help Add support for PCIe Controller management when not handled by a system firmware like on x86 platforms. if PCIE_CONTROLLER config PCIE_ECAM bool "Support for PCIe ECAM Controllers" help Add support for Enhanced Configuration Address Mapping configured PCIe Controllers allowing all outgoing I/O and MEM TLPs to be mapped from memory space into any 256 MB region of the PCIe configuration space. endif # PCIE_CONTROLLER config PCIE_MSI bool "Support for PCI(e) MSI" help Use Message-Signaled Interrupts where possible. With this option enabled, PCI(e) devices which support MSI will be configured (at runtime) to use them. This is typically required for PCIe devices to generate interrupts at all. if PCIE_MSI config PCIE_MSI_MULTI_VECTOR bool "MSI multi-vector support" help MSI can support up to 32 different messages. This will enable the support of such capability so each message can get a vector assigned to it. This will require for the selected architecture to provide the necessary logic to make it work. Disable this if the vectors cannot be managed by the hardware or if none of the peripheral require this. config PCIE_MSI_X bool "MSI-X support" help If one or more device support MSI-X, you'll need to enable this. If a device exposes support for both MSI-X and MSI, MSI-X will be used and MSI disabled on that device. Enable PCIE_MSI_MULTI_VECTOR if you want to support multi-vector on MSI-X as well. endif # PCIE_MSI config PCIE_PTM bool "Support for PCI(e) Precision Time Management (PTM)" help This will enable support both PTM root and PTM requester features. Up to the PCIe device driver to enable its PTM requester capability. config PCIE_PRT bool "Support for IRQ information retrieve via ACPI PRT (PCI Routing Table)" default y if ACPI_DSDT_SUPPORT help This will enable retrieve interrupt routing information for PCI legacy interrupt via ACPI PRT (PCI Routing Table) config PCIE_SHELL bool "PCIe/new PCI Shell" depends on SHELL help Enable commands for debugging PCI(e) using the built-in shell. endif # PCIE ```
/content/code_sandbox/drivers/pcie/host/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
606
```c /* * */ #define LOG_LEVEL CONFIG_PCIE_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(pcie); #include <errno.h> #include <zephyr/kernel.h> #include <soc.h> #include <zephyr/device.h> #include <zephyr/init.h> #define DT_DRV_COMPAT ptm_root #include <zephyr/drivers/pcie/pcie.h> #include "ptm.h" static int pcie_ptm_root_setup(const struct device *dev, uint32_t base) { const struct pcie_ptm_root_config *config = dev->config; union ptm_cap_reg cap; union ptm_ctrl_reg ctrl; cap.raw = pcie_conf_read(config->pcie->bdf, base + PTM_CAP_REG_OFFSET); if ((cap.root == 0) || ((cap.root == 1) && (cap.responder == 0))) { LOG_ERR("PTM root not supported on 0x%x", config->pcie->bdf); return -ENOTSUP; } ctrl.ptm_enable = 1; ctrl.root_select = 1; pcie_conf_write(config->pcie->bdf, base + PTM_CTRL_REG_OFFSET, ctrl.raw); LOG_DBG("PTM root 0x%x enabled", config->pcie->bdf); return 0; } static int pcie_ptm_root_init(const struct device *dev) { const struct pcie_ptm_root_config *config = dev->config; uint32_t reg; reg = pcie_get_ext_cap(config->pcie->bdf, PCIE_EXT_CAP_ID_PTM); if (reg == 0) { LOG_ERR("PTM capability not exposed on 0x%x", config->pcie->bdf); return -ENODEV; } return pcie_ptm_root_setup(dev, reg); } #define PCIE_PTM_ROOT_INIT(index) \ DEVICE_PCIE_INST_DECLARE(index); \ static const struct pcie_ptm_root_config ptm_config_##index = { \ DEVICE_PCIE_INST_INIT(index, pcie), \ }; \ DEVICE_DT_INST_DEFINE(index, &pcie_ptm_root_init, NULL, NULL, \ &ptm_config_##index, PRE_KERNEL_1, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, NULL); DT_INST_FOREACH_STATUS_OKAY(PCIE_PTM_ROOT_INIT) bool pcie_ptm_enable(pcie_bdf_t bdf) { uint32_t base; union ptm_cap_reg cap; union ptm_ctrl_reg ctrl; base = pcie_get_ext_cap(bdf, PCIE_EXT_CAP_ID_PTM); if (base == 0) { LOG_ERR("PTM capability not exposed on 0x%x", bdf); return false; } cap.raw = pcie_conf_read(bdf, base + PTM_CAP_REG_OFFSET); if (cap.requester == 0) { LOG_ERR("PTM requester not supported on 0x%x", bdf); return false; } ctrl.ptm_enable = 1; pcie_conf_write(bdf, base + PTM_CTRL_REG_OFFSET, ctrl.raw); LOG_DBG("PTM requester 0x%x enabled", bdf); return true; } ```
/content/code_sandbox/drivers/pcie/host/ptm.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
689
```objective-c /** @file * @brief Modem socket header file. * * Generic modem socket and packet size implementation for modem context */ /* * */ #ifndef ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_SOCKET_H_ #define ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_SOCKET_H_ #include <zephyr/kernel.h> #include <zephyr/net/net_ip.h> #include <zephyr/net/socket.h> #include "sockets_internal.h" #ifdef __cplusplus extern "C" { #endif __net_socket struct modem_socket { sa_family_t family; enum net_sock_type type; int ip_proto; struct sockaddr src; struct sockaddr dst; /** The number identifying the socket handle inside the modem */ int id; /** The file descriptor identifying the socket in the fdtable */ int sock_fd; /** packet data */ uint16_t packet_sizes[CONFIG_MODEM_SOCKET_PACKET_COUNT]; uint16_t packet_count; /** data ready semaphore */ struct k_sem sem_data_ready; /** data ready poll signal */ struct k_poll_signal sig_data_ready; /** socket state */ bool is_connected; bool is_waiting; /** temporary socket data */ void *data; }; struct modem_socket_config { struct modem_socket *sockets; size_t sockets_len; /* beginning socket id (modems can set this to 0 or 1 as needed) */ int base_socket_id; /* dynamically assign id when modem socket is allocated */ bool assign_id; struct k_sem sem_lock; const struct socket_op_vtable *vtable; }; /* return size of the first packet */ uint16_t modem_socket_next_packet_size(struct modem_socket_config *cfg, struct modem_socket *sock); int modem_socket_packet_size_update(struct modem_socket_config *cfg, struct modem_socket *sock, int new_total); int modem_socket_get(struct modem_socket_config *cfg, int family, int type, int proto); struct modem_socket *modem_socket_from_fd(struct modem_socket_config *cfg, int sock_fd); struct modem_socket *modem_socket_from_id(struct modem_socket_config *cfg, int id); struct modem_socket *modem_socket_from_newid(struct modem_socket_config *cfg); void modem_socket_put(struct modem_socket_config *cfg, int sock_fd); int modem_socket_poll(struct modem_socket_config *cfg, struct zsock_pollfd *fds, int nfds, int msecs); int modem_socket_poll_update(struct modem_socket *sock, struct zsock_pollfd *pfd, struct k_poll_event **pev); int modem_socket_poll_prepare(struct modem_socket_config *cfg, struct modem_socket *sock, struct zsock_pollfd *pfd, struct k_poll_event **pev, struct k_poll_event *pev_end); void modem_socket_wait_data(struct modem_socket_config *cfg, struct modem_socket *sock); void modem_socket_data_ready(struct modem_socket_config *cfg, struct modem_socket *sock); /** * @brief Initialize modem socket config struct and associated modem sockets * * @param cfg The config to initialize * @param sockets The array of sockets associated with the modem socket config * @param sockets_len The length of the array of sockets associated with the modem socket config * @param base_socket_id The lowest socket id supported by the modem * @param assign_id Dynamically assign modem socket id when allocated using modem_socket_get() * @param vtable Socket API implementation used by this config and associated sockets * * @return -EINVAL if any argument is invalid * @return 0 if successful */ int modem_socket_init(struct modem_socket_config *cfg, struct modem_socket *sockets, size_t sockets_len, int base_socket_id, bool assign_id, const struct socket_op_vtable *vtable); /** * @brief Check if modem socket has been allocated * * @details A modem socket is allocated after a successful invocation of modem_socket_get, and * released after a successful invocation of modem_socket_put. * * @note If socket id is automatically assigned, the socket id will be a value between * base_socket_id and (base_socket_id + socket_len). * Otherwise, the socket id will be assigned to (base_socket_id + socket_len) when allocated. * * @param cfg The modem socket config which the modem socket belongs to * @param sock The modem socket which is checked * * @return true if the socket has been allocated * @return false if the socket has not been allocated */ bool modem_socket_is_allocated(const struct modem_socket_config *cfg, const struct modem_socket *sock); /** * @brief Check if modem socket id has been assigned * * @note An assigned modem socket will have an id between base_socket_id and * (base_socket_id + socket_len). * * @param cfg The modem socket config which the modem socket belongs to * @param sock The modem socket for which the id is checked * * @return true if the socket id is been assigned * @return false if the socket has not been assigned */ bool modem_socket_id_is_assigned(const struct modem_socket_config *cfg, const struct modem_socket *sock); /** * @brief Assign id to modem socket * * @param cfg The modem socket config which the modem socket belongs to * @param sock The modem socket for which the id will be assigned * @param id The id to assign to the modem socket * * @return -EPERM if id has been assigned previously * @return -EINVAL if id is invalid * @return 0 if successful */ int modem_socket_id_assign(const struct modem_socket_config *cfg, struct modem_socket *sock, int id); #ifdef __cplusplus } #endif #endif /* ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_SOCKET_H_ */ ```
/content/code_sandbox/drivers/modem/modem_socket.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,195
```c /* * */ #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(pcie_core, LOG_LEVEL_INF); #include <zephyr/kernel.h> #include <zephyr/drivers/pcie/pcie.h> #include <zephyr/drivers/pcie/controller.h> #if CONFIG_PCIE_MSI #include <zephyr/drivers/pcie/msi.h> #endif /* arch agnostic PCIe API implementation */ uint32_t pcie_conf_read(pcie_bdf_t bdf, unsigned int reg) { const struct device *dev; dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_pcie_controller)); if (!dev) { LOG_ERR("Failed to get PCIe root complex"); return 0xffffffff; } return pcie_ctrl_conf_read(dev, bdf, reg); } void pcie_conf_write(pcie_bdf_t bdf, unsigned int reg, uint32_t data) { const struct device *dev; dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_pcie_controller)); if (!dev) { LOG_ERR("Failed to get PCIe root complex"); return; } pcie_ctrl_conf_write(dev, bdf, reg, data); } uint32_t pcie_generic_ctrl_conf_read(mm_reg_t cfg_addr, pcie_bdf_t bdf, unsigned int reg) { volatile uint32_t *bdf_cfg_mem = (volatile uint32_t *)((uintptr_t)cfg_addr + (bdf << 4)); if (!cfg_addr) { return 0xffffffff; } return bdf_cfg_mem[reg]; } void pcie_generic_ctrl_conf_write(mm_reg_t cfg_addr, pcie_bdf_t bdf, unsigned int reg, uint32_t data) { volatile uint32_t *bdf_cfg_mem = (volatile uint32_t *)((uintptr_t)cfg_addr + (bdf << 4)); if (!cfg_addr) { return; } bdf_cfg_mem[reg] = data; } static void pcie_generic_ctrl_enumerate_bars(const struct device *ctrl_dev, pcie_bdf_t bdf, unsigned int nbars) { unsigned int bar, reg, data; uintptr_t scratch, bar_bus_addr; size_t size, bar_size; for (bar = 0, reg = PCIE_CONF_BAR0; bar < nbars && reg <= PCIE_CONF_BAR5; reg ++, bar++) { bool found_mem64 = false; bool found_mem = false; data = scratch = pcie_conf_read(bdf, reg); if (PCIE_CONF_BAR_INVAL_FLAGS(data)) { continue; } if (PCIE_CONF_BAR_MEM(data)) { found_mem = true; if (PCIE_CONF_BAR_64(data)) { found_mem64 = true; scratch |= ((uint64_t)pcie_conf_read(bdf, reg + 1)) << 32; if (PCIE_CONF_BAR_ADDR(scratch) == PCIE_CONF_BAR_INVAL64) { continue; } } else { if (PCIE_CONF_BAR_ADDR(scratch) == PCIE_CONF_BAR_INVAL) { continue; } } } pcie_conf_write(bdf, reg, 0xFFFFFFFF); size = pcie_conf_read(bdf, reg); pcie_conf_write(bdf, reg, scratch & 0xFFFFFFFF); if (found_mem64) { pcie_conf_write(bdf, reg + 1, 0xFFFFFFFF); size |= ((uint64_t)pcie_conf_read(bdf, reg + 1)) << 32; pcie_conf_write(bdf, reg + 1, scratch >> 32); } if (!PCIE_CONF_BAR_ADDR(size)) { if (found_mem64) { reg++; } continue; } if (found_mem) { if (found_mem64) { bar_size = (uint64_t)~PCIE_CONF_BAR_ADDR(size) + 1; } else { bar_size = (uint32_t)~PCIE_CONF_BAR_ADDR(size) + 1; } } else { bar_size = (uint32_t)~PCIE_CONF_BAR_IO_ADDR(size) + 1; } if (pcie_ctrl_region_allocate(ctrl_dev, bdf, found_mem, found_mem64, bar_size, &bar_bus_addr)) { uintptr_t bar_phys_addr; pcie_ctrl_region_translate(ctrl_dev, bdf, found_mem, found_mem64, bar_bus_addr, &bar_phys_addr); LOG_INF("[%02x:%02x.%x] BAR%d size 0x%lx " "assigned [%s 0x%lx-0x%lx -> 0x%lx-0x%lx]", PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf), bar, bar_size, found_mem ? (found_mem64 ? "mem64" : "mem") : "io", bar_bus_addr, bar_bus_addr + bar_size - 1, bar_phys_addr, bar_phys_addr + bar_size - 1); pcie_conf_write(bdf, reg, bar_bus_addr & 0xFFFFFFFF); if (found_mem64) { pcie_conf_write(bdf, reg + 1, bar_bus_addr >> 32); } } else { LOG_INF("[%02x:%02x.%x] BAR%d size 0x%lx Failed memory allocation.", PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf), bar, bar_size); } if (found_mem64) { reg++; } } } static bool pcie_generic_ctrl_enumerate_type1(const struct device *ctrl_dev, pcie_bdf_t bdf, unsigned int bus_number) { uint32_t class = pcie_conf_read(bdf, PCIE_CONF_CLASSREV); /* Handle only PCI-to-PCI bridge for now */ if (PCIE_CONF_CLASSREV_CLASS(class) == 0x06 && PCIE_CONF_CLASSREV_SUBCLASS(class) == 0x04) { uint32_t number = pcie_conf_read(bdf, PCIE_BUS_NUMBER); uintptr_t bar_base_addr; pcie_generic_ctrl_enumerate_bars(ctrl_dev, bdf, 2); /* Configure bus number registers */ pcie_conf_write(bdf, PCIE_BUS_NUMBER, PCIE_BUS_NUMBER_VAL(PCIE_BDF_TO_BUS(bdf), bus_number, 0xff, /* set max until we finished scanning */ PCIE_SECONDARY_LATENCY_TIMER(number))); /* I/O align on 4k boundary */ if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, false, false, KB(4), &bar_base_addr)) { uint32_t io = pcie_conf_read(bdf, PCIE_IO_SEC_STATUS); uint32_t io_upper = pcie_conf_read(bdf, PCIE_IO_BASE_LIMIT_UPPER); pcie_conf_write(bdf, PCIE_IO_SEC_STATUS, PCIE_IO_SEC_STATUS_VAL(PCIE_IO_BASE(io), PCIE_IO_LIMIT(io), PCIE_SEC_STATUS(io))); pcie_conf_write(bdf, PCIE_IO_BASE_LIMIT_UPPER, PCIE_IO_BASE_LIMIT_UPPER_VAL(PCIE_IO_BASE_UPPER(io_upper), PCIE_IO_LIMIT_UPPER(io_upper))); pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_IO, true); } /* MEM align on 1MiB boundary */ if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, true, false, MB(1), &bar_base_addr)) { uint32_t mem = pcie_conf_read(bdf, PCIE_MEM_BASE_LIMIT); pcie_conf_write(bdf, PCIE_MEM_BASE_LIMIT, PCIE_MEM_BASE_LIMIT_VAL((bar_base_addr & 0xfff00000) >> 16, PCIE_MEM_LIMIT(mem))); pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_MEM, true); } /* TODO: add support for prefetchable */ pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_MASTER, true); return true; } return false; } static void pcie_generic_ctrl_post_enumerate_type1(const struct device *ctrl_dev, pcie_bdf_t bdf, unsigned int bus_number) { uint32_t number = pcie_conf_read(bdf, PCIE_BUS_NUMBER); uintptr_t bar_base_addr; /* Configure bus subordinate */ pcie_conf_write(bdf, PCIE_BUS_NUMBER, PCIE_BUS_NUMBER_VAL(PCIE_BUS_PRIMARY_NUMBER(number), PCIE_BUS_SECONDARY_NUMBER(number), bus_number - 1, PCIE_SECONDARY_LATENCY_TIMER(number))); /* I/O align on 4k boundary */ if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, false, false, KB(4), &bar_base_addr)) { uint32_t io = pcie_conf_read(bdf, PCIE_IO_SEC_STATUS); uint32_t io_upper = pcie_conf_read(bdf, PCIE_IO_BASE_LIMIT_UPPER); pcie_conf_write(bdf, PCIE_IO_SEC_STATUS, PCIE_IO_SEC_STATUS_VAL(PCIE_IO_BASE(io), ((bar_base_addr - 1) & 0x0000f000) >> 16, PCIE_SEC_STATUS(io))); pcie_conf_write(bdf, PCIE_IO_BASE_LIMIT_UPPER, PCIE_IO_BASE_LIMIT_UPPER_VAL(PCIE_IO_BASE_UPPER(io_upper), ((bar_base_addr - 1) & 0xffff0000) >> 16)); } /* MEM align on 1MiB boundary */ if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, true, false, MB(1), &bar_base_addr)) { uint32_t mem = pcie_conf_read(bdf, PCIE_MEM_BASE_LIMIT); pcie_conf_write(bdf, PCIE_MEM_BASE_LIMIT, PCIE_MEM_BASE_LIMIT_VAL(PCIE_MEM_BASE(mem), (bar_base_addr - 1) >> 16)); } /* TODO: add support for prefetchable */ } static void pcie_generic_ctrl_enumerate_type0(const struct device *ctrl_dev, pcie_bdf_t bdf) { /* Setup Type0 BARs */ pcie_generic_ctrl_enumerate_bars(ctrl_dev, bdf, 6); } static bool pcie_generic_ctrl_enumerate_endpoint(const struct device *ctrl_dev, pcie_bdf_t bdf, unsigned int bus_number, bool *skip_next_func) { bool multifunction_device = false; bool layout_type_1 = false; uint32_t data, class, id; bool is_bridge = false; *skip_next_func = false; id = pcie_conf_read(bdf, PCIE_CONF_ID); if (id == PCIE_ID_NONE) { return false; } class = pcie_conf_read(bdf, PCIE_CONF_CLASSREV); data = pcie_conf_read(bdf, PCIE_CONF_TYPE); multifunction_device = PCIE_CONF_MULTIFUNCTION(data); layout_type_1 = PCIE_CONF_TYPE_BRIDGE(data); LOG_INF("[%02x:%02x.%x] %04x:%04x class %x subclass %x progif %x " "rev %x Type%x multifunction %s", PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf), id & 0xffff, id >> 16, PCIE_CONF_CLASSREV_CLASS(class), PCIE_CONF_CLASSREV_SUBCLASS(class), PCIE_CONF_CLASSREV_PROGIF(class), PCIE_CONF_CLASSREV_REV(class), layout_type_1 ? 1 : 0, multifunction_device ? "true" : "false"); /* Do not enumerate sub-functions if not a multifunction device */ if (PCIE_BDF_TO_FUNC(bdf) == 0 && !multifunction_device) { *skip_next_func = true; } if (layout_type_1) { is_bridge = pcie_generic_ctrl_enumerate_type1(ctrl_dev, bdf, bus_number); } else { pcie_generic_ctrl_enumerate_type0(ctrl_dev, bdf); } return is_bridge; } /* Return the next BDF or PCIE_BDF_NONE without changing bus number */ static inline unsigned int pcie_bdf_bus_next(unsigned int bdf, bool skip_next_func) { if (skip_next_func) { if (PCIE_BDF_TO_DEV(bdf) == PCIE_BDF_DEV_MASK) { return PCIE_BDF_NONE; } return PCIE_BDF(PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf) + 1, 0); } if (PCIE_BDF_TO_DEV(bdf) == PCIE_BDF_DEV_MASK && PCIE_BDF_TO_FUNC(bdf) == PCIE_BDF_FUNC_MASK) { return PCIE_BDF_NONE; } return PCIE_BDF(PCIE_BDF_TO_BUS(bdf), (PCIE_BDF_TO_DEV(bdf) + ((PCIE_BDF_TO_FUNC(bdf) + 1) / (PCIE_BDF_FUNC_MASK + 1))), ((PCIE_BDF_TO_FUNC(bdf) + 1) & PCIE_BDF_FUNC_MASK)); } struct pcie_bus_state { /* Current scanned bus BDF, always valid */ unsigned int bus_bdf; /* Current bridge endpoint BDF, either valid or PCIE_BDF_NONE */ unsigned int bridge_bdf; /* Next BDF to scan on bus, either valid or PCIE_BDF_NONE when all EP scanned */ unsigned int next_bdf; }; #define MAX_TRAVERSE_STACK 256 /* Non-recursive stack based PCIe bus & bridge enumeration */ void pcie_generic_ctrl_enumerate(const struct device *ctrl_dev, pcie_bdf_t bdf_start) { struct pcie_bus_state stack[MAX_TRAVERSE_STACK], *state; unsigned int bus_number = PCIE_BDF_TO_BUS(bdf_start) + 1; bool skip_next_func = false; bool is_bridge = false; int stack_top = 0; /* Start with first endpoint of immediate Root Controller bus */ stack[stack_top].bus_bdf = PCIE_BDF(PCIE_BDF_TO_BUS(bdf_start), 0, 0); stack[stack_top].bridge_bdf = PCIE_BDF_NONE; stack[stack_top].next_bdf = bdf_start; while (stack_top >= 0) { /* Top of stack contains the current PCIe bus to traverse */ state = &stack[stack_top]; /* Finish current bridge configuration before scanning other endpoints */ if (state->bridge_bdf != PCIE_BDF_NONE) { pcie_generic_ctrl_post_enumerate_type1(ctrl_dev, state->bridge_bdf, bus_number); state->bridge_bdf = PCIE_BDF_NONE; } /* We still have more endpoints to scan */ if (state->next_bdf != PCIE_BDF_NONE) { while (state->next_bdf != PCIE_BDF_NONE) { is_bridge = pcie_generic_ctrl_enumerate_endpoint(ctrl_dev, state->next_bdf, bus_number, &skip_next_func); if (is_bridge) { state->bridge_bdf = state->next_bdf; state->next_bdf = pcie_bdf_bus_next(state->next_bdf, skip_next_func); /* If we can't handle more bridges, don't go further */ if (stack_top == (MAX_TRAVERSE_STACK - 1) || bus_number == PCIE_BDF_BUS_MASK) { break; } /* Push to stack to scan this bus */ stack_top++; stack[stack_top].bus_bdf = PCIE_BDF(bus_number, 0, 0); stack[stack_top].bridge_bdf = PCIE_BDF_NONE; stack[stack_top].next_bdf = PCIE_BDF(bus_number, 0, 0); /* Increase bus number */ bus_number++; break; } state->next_bdf = pcie_bdf_bus_next(state->next_bdf, skip_next_func); } } else { /* We finished scanning this bus, go back and scan next endpoints */ stack_top--; } } } #ifdef CONFIG_PCIE_MSI uint32_t pcie_msi_map(unsigned int irq, msi_vector_t *vector, uint8_t n_vector) { ARG_UNUSED(irq); return vector->arch.address; } uint16_t pcie_msi_mdr(unsigned int irq, msi_vector_t *vector) { ARG_UNUSED(irq); return vector->arch.eventid; } uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority, msi_vector_t *vectors, uint8_t n_vector) { const struct device *dev; dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_pcie_controller)); if (!dev) { LOG_ERR("Failed to get PCIe root complex"); return 0; } return pcie_ctrl_msi_device_setup(dev, priority, vectors, n_vector); } bool arch_pcie_msi_vector_connect(msi_vector_t *vector, void (*routine)(const void *parameter), const void *parameter, uint32_t flags) { if (irq_connect_dynamic(vector->arch.irq, vector->arch.priority, routine, parameter, flags) != vector->arch.irq) { return false; } irq_enable(vector->arch.irq); return true; } #endif ```
/content/code_sandbox/drivers/pcie/host/controller.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,877
```objective-c /* * */ #ifndef SIMCOM_SIM7080_H #define SIMCOM_SIM7080_H #include <zephyr/kernel.h> #include <ctype.h> #include <inttypes.h> #include <errno.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/modem/simcom-sim7080.h> #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <zephyr/init.h> #include <string.h> #include <zephyr/net/net_if.h> #include <zephyr/net/net_offload.h> #include <zephyr/net/socket_offload.h> #include "modem_context.h" #include "modem_cmd_handler.h" #include "modem_iface_uart.h" #include "modem_socket.h" #define MDM_UART_NODE DT_INST_BUS(0) #define MDM_UART_DEV DEVICE_DT_GET(MDM_UART_NODE) #define MDM_MAX_DATA_LENGTH 1024 #define MDM_RECV_BUF_SIZE 1024 #define MDM_MAX_SOCKETS 5 #define MDM_BASE_SOCKET_NUM 0 #define MDM_RECV_MAX_BUF 30 #define BUF_ALLOC_TIMEOUT K_SECONDS(1) #define MDM_CMD_TIMEOUT K_SECONDS(10) #define MDM_REGISTRATION_TIMEOUT K_SECONDS(180) #define MDM_CONNECT_TIMEOUT K_SECONDS(90) #define MDM_PDP_TIMEOUT K_SECONDS(120) #define MDM_DNS_TIMEOUT K_SECONDS(210) #define MDM_WAIT_FOR_RSSI_DELAY K_SECONDS(2) #define MDM_WAIT_FOR_RSSI_COUNT 30 #define MDM_MAX_AUTOBAUD 5 #define MDM_MAX_CEREG_WAITS 40 #define MDM_MAX_CGATT_WAITS 40 #define MDM_BOOT_TRIES 4 #define MDM_GNSS_PARSER_MAX_LEN 128 #define MDM_APN CONFIG_MODEM_SIMCOM_SIM7080_APN #define MDM_LTE_BANDS CONFIG_MODEM_SIMCOM_SIM7080_LTE_BANDS #define RSSI_TIMEOUT_SECS 30 /* * Default length of modem data. */ #define MDM_MANUFACTURER_LENGTH 12 #define MDM_MODEL_LENGTH 16 #define MDM_REVISION_LENGTH 64 #define MDM_IMEI_LENGTH 16 #define MDM_IMSI_LENGTH 16 #define MDM_ICCID_LENGTH 32 enum sim7080_state { SIM7080_STATE_INIT = 0, SIM7080_STATE_NETWORKING, SIM7080_STATE_GNSS, SIM7080_STATE_OFF, }; /* Possible states of the ftp connection. */ enum sim7080_ftp_connection_state { /* Not connected yet. */ SIM7080_FTP_CONNECTION_STATE_INITIAL = 0, /* Connected and still data available. */ SIM7080_FTP_CONNECTION_STATE_CONNECTED, /* All data transferred. */ SIM7080_FTP_CONNECTION_STATE_FINISHED, /* Something went wrong. */ SIM7080_FTP_CONNECTION_STATE_ERROR, }; /* * Driver data. */ struct sim7080_data { /* * Network interface of the sim module. */ struct net_if *netif; uint8_t mac_addr[6]; /* * Uart interface of the modem. */ struct modem_iface_uart_data iface_data; uint8_t iface_rb_buf[MDM_MAX_DATA_LENGTH]; /* * Modem command handler. */ struct modem_cmd_handler_data cmd_handler_data; uint8_t cmd_match_buf[MDM_RECV_BUF_SIZE + 1]; /* * Modem socket data. */ struct modem_socket_config socket_config; struct modem_socket sockets[MDM_MAX_SOCKETS]; /* * Current state of the modem. */ enum sim7080_state state; /* * RSSI work */ struct k_work_delayable rssi_query_work; /* * Information over the modem. */ char mdm_manufacturer[MDM_MANUFACTURER_LENGTH]; char mdm_model[MDM_MODEL_LENGTH]; char mdm_revision[MDM_REVISION_LENGTH]; char mdm_imei[MDM_IMEI_LENGTH]; #if defined(CONFIG_MODEM_SIM_NUMBERS) char mdm_imsi[MDM_IMSI_LENGTH]; char mdm_iccid[MDM_ICCID_LENGTH]; #endif /* #if defined(CONFIG_MODEM_SIM_NUMBERS) */ int mdm_rssi; /* * Current operating socket and statistics. */ int current_sock_fd; int current_sock_written; /* * Network registration of the modem. */ uint8_t mdm_registration; /* * Whether gprs is attached or detached. */ uint8_t mdm_cgatt; /* * If the sim card is ready or not. */ bool cpin_ready; /* * Flag if the PDP context is active. */ bool pdp_active; /* SMS buffer structure provided by read. */ struct sim7080_sms_buffer *sms_buffer; /* Position in the sms buffer. */ uint8_t sms_buffer_pos; /* Ftp related variables. */ struct { /* User buffer for ftp data. */ char *read_buffer; /* Length of the read buffer/number of bytes read. */ size_t nread; /* State of the ftp connection. */ enum sim7080_ftp_connection_state state; } ftp; /* * Semaphore(s). */ struct k_sem sem_response; struct k_sem sem_tx_ready; struct k_sem sem_dns; struct k_sem sem_ftp; }; /* * Socket read callback data. */ struct socket_read_data { char *recv_buf; size_t recv_buf_len; struct sockaddr *recv_addr; uint16_t recv_read_len; }; #endif /* SIMCOM_SIM7080_H */ ```
/content/code_sandbox/drivers/modem/simcom-sim7080.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,204
```unknown # WNCM 14A2A driver options menuconfig MODEM_WNCM14A2A bool "Wistron LTE-M modem driver" select MODEM_RECEIVER select NET_OFFLOAD imply GPIO help Choose this setting to enable Wistron WNC-M14A2A LTE-M modem driver. NOTE: Currently the pin settings only work with FRDM K64F shield. if MODEM_WNCM14A2A config MODEM_WNCM14A2A_RX_STACK_SIZE int "Size of the stack for the WNC-M14A2A modem driver RX thread" default 1028 help This stack is used by the WNCM14A2A RX thread. config MODEM_WNCM14A2A_RX_WORKQ_STACK_SIZE int "Size of the stack for the WNC-M14A2A modem driver work queue" default 2048 help This stack is used by the work queue to pass off net_pkt data to the rest of the network stack, letting the rx thread continue processing data. config MODEM_WNCM14A2A_APN_NAME string "APN name for establishing network connection" default "m2m.com.attz" help This setting is used in the AT%PDNSET command to set the APN name for the network connection context. Normally, don't need to change this value. config MODEM_WNCM14A2A_INIT_PRIORITY int "WNC-M14A2A driver init priority" default 80 help WNC-M14A2A device driver initialization priority. Do not mess with it unless you know what you are doing. Note that the priority needs to be lower than the net stack so that it can start before the networking sub-system. endif # MODEM_WNCM14A2A ```
/content/code_sandbox/drivers/modem/Kconfig.wncm14a2a
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
409
```c /* * */ #define DT_DRV_COMPAT quectel_bg9x #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(modem_quectel_bg9x, CONFIG_MODEM_LOG_LEVEL); #include "quectel-bg9x.h" static struct k_thread modem_rx_thread; static struct k_work_q modem_workq; static struct modem_data mdata; static struct modem_context mctx; static const struct socket_op_vtable offload_socket_fd_op_vtable; static K_KERNEL_STACK_DEFINE(modem_rx_stack, CONFIG_MODEM_QUECTEL_BG9X_RX_STACK_SIZE); static K_KERNEL_STACK_DEFINE(modem_workq_stack, CONFIG_MODEM_QUECTEL_BG9X_RX_WORKQ_STACK_SIZE); NET_BUF_POOL_DEFINE(mdm_recv_pool, MDM_RECV_MAX_BUF, MDM_RECV_BUF_SIZE, 0, NULL); static const struct gpio_dt_spec power_gpio = GPIO_DT_SPEC_INST_GET(0, mdm_power_gpios); #if DT_INST_NODE_HAS_PROP(0, mdm_reset_gpios) static const struct gpio_dt_spec reset_gpio = GPIO_DT_SPEC_INST_GET(0, mdm_reset_gpios); #endif #if DT_INST_NODE_HAS_PROP(0, mdm_dtr_gpios) static const struct gpio_dt_spec dtr_gpio = GPIO_DT_SPEC_INST_GET(0, mdm_dtr_gpios); #endif #if DT_INST_NODE_HAS_PROP(0, mdm_wdisable_gpios) static const struct gpio_dt_spec wdisable_gpio = GPIO_DT_SPEC_INST_GET(0, mdm_wdisable_gpios); #endif static inline int digits(int n) { int count = 0; while (n != 0) { n /= 10; ++count; } return count; } static inline uint32_t hash32(char *str, int len) { #define HASH_MULTIPLIER 37 uint32_t h = 0; int i; for (i = 0; i < len; ++i) { h = (h * HASH_MULTIPLIER) + str[i]; } return h; } static inline uint8_t *modem_get_mac(const struct device *dev) { struct modem_data *data = dev->data; uint32_t hash_value; data->mac_addr[0] = 0x00; data->mac_addr[1] = 0x10; /* use IMEI for mac_addr */ hash_value = hash32(mdata.mdm_imei, strlen(mdata.mdm_imei)); UNALIGNED_PUT(hash_value, (uint32_t *)(data->mac_addr + 2)); return data->mac_addr; } /* Func: modem_atoi * Desc: Convert string to long integer, but handle errors */ static int modem_atoi(const char *s, const int err_value, const char *desc, const char *func) { int ret; char *endptr; ret = (int)strtol(s, &endptr, 10); if (!endptr || *endptr != '\0') { LOG_ERR("bad %s '%s' in %s", s, desc, func); return err_value; } return ret; } static inline int find_len(char *data) { char buf[10] = {0}; int i; for (i = 0; i < 10; i++) { if (data[i] == '\r') { break; } buf[i] = data[i]; } return ATOI(buf, 0, "rx_buf"); } /* Func: on_cmd_sockread_common * Desc: Function to successfully read data from the modem on a given socket. */ static int on_cmd_sockread_common(int socket_fd, struct modem_cmd_handler_data *data, uint16_t len) { struct modem_socket *sock = NULL; struct socket_read_data *sock_data; int ret, i; int socket_data_length; int bytes_to_skip; if (!len) { LOG_ERR("Invalid length, Aborting!"); return -EAGAIN; } /* Make sure we still have buf data */ if (!data->rx_buf) { LOG_ERR("Incorrect format! Ignoring data!"); return -EINVAL; } socket_data_length = find_len(data->rx_buf->data); /* No (or not enough) data available on the socket. */ bytes_to_skip = digits(socket_data_length) + 2 + 4; if (socket_data_length <= 0) { LOG_ERR("Length problem (%d). Aborting!", socket_data_length); return -EAGAIN; } /* check to make sure we have all of the data. */ if (net_buf_frags_len(data->rx_buf) < (socket_data_length + bytes_to_skip)) { LOG_DBG("Not enough data -- wait!"); return -EAGAIN; } /* Skip "len" and CRLF */ bytes_to_skip = digits(socket_data_length) + 2; for (i = 0; i < bytes_to_skip; i++) { net_buf_pull_u8(data->rx_buf); } if (!data->rx_buf->len) { data->rx_buf = net_buf_frag_del(NULL, data->rx_buf); } sock = modem_socket_from_fd(&mdata.socket_config, socket_fd); if (!sock) { LOG_ERR("Socket not found! (%d)", socket_fd); ret = -EINVAL; goto exit; } sock_data = (struct socket_read_data *)sock->data; if (!sock_data) { LOG_ERR("Socket data not found! Skip handling (%d)", socket_fd); ret = -EINVAL; goto exit; } ret = net_buf_linearize(sock_data->recv_buf, sock_data->recv_buf_len, data->rx_buf, 0, (uint16_t)socket_data_length); data->rx_buf = net_buf_skip(data->rx_buf, ret); sock_data->recv_read_len = ret; if (ret != socket_data_length) { LOG_ERR("Total copied data is different then received data!" " copied:%d vs. received:%d", ret, socket_data_length); ret = -EINVAL; } exit: /* remove packet from list (ignore errors) */ (void)modem_socket_packet_size_update(&mdata.socket_config, sock, -socket_data_length); /* don't give back semaphore -- OK to follow */ return ret; } /* Func: socket_close * Desc: Function to close the given socket descriptor. */ static void socket_close(struct modem_socket *sock) { char buf[sizeof("AT+QICLOSE=##")] = {0}; int ret; snprintk(buf, sizeof(buf), "AT+QICLOSE=%d", sock->id); /* Tell the modem to close the socket. */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, buf, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("%s ret:%d", buf, ret); } modem_socket_put(&mdata.socket_config, sock->sock_fd); } /* Handler: OK */ MODEM_CMD_DEFINE(on_cmd_ok) { modem_cmd_handler_set_error(data, 0); k_sem_give(&mdata.sem_response); return 0; } /* Handler: ERROR */ MODEM_CMD_DEFINE(on_cmd_error) { modem_cmd_handler_set_error(data, -EIO); k_sem_give(&mdata.sem_response); return 0; } /* Handler: +CME Error: <err>[0] */ MODEM_CMD_DEFINE(on_cmd_exterror) { modem_cmd_handler_set_error(data, -EIO); k_sem_give(&mdata.sem_response); return 0; } /* Handler: +CSQ: <signal_power>[0], <qual>[1] */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_rssi_csq) { int rssi = ATOI(argv[0], 0, "signal_power"); /* Check the RSSI value. */ if (rssi == 31) { mdata.mdm_rssi = -51; } else if (rssi >= 0 && rssi <= 31) { mdata.mdm_rssi = -114 + ((rssi * 2) + 1); } else { mdata.mdm_rssi = -1000; } LOG_INF("RSSI: %d", mdata.mdm_rssi); return 0; } /* Handler: +QIOPEN: <connect_id>[0], <err>[1] */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_sockopen) { int err = ATOI(argv[1], 0, "sock_err"); LOG_INF("AT+QIOPEN: %d", err); modem_cmd_handler_set_error(data, err); k_sem_give(&mdata.sem_sock_conn); return 0; } /* Handler: <manufacturer> */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_manufacturer) { size_t out_len = net_buf_linearize(mdata.mdm_manufacturer, sizeof(mdata.mdm_manufacturer) - 1, data->rx_buf, 0, len); mdata.mdm_manufacturer[out_len] = '\0'; LOG_INF("Manufacturer: %s", mdata.mdm_manufacturer); return 0; } /* Handler: <model> */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_model) { size_t out_len = net_buf_linearize(mdata.mdm_model, sizeof(mdata.mdm_model) - 1, data->rx_buf, 0, len); mdata.mdm_model[out_len] = '\0'; /* Log the received information. */ LOG_INF("Model: %s", mdata.mdm_model); return 0; } /* Handler: <rev> */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_revision) { size_t out_len = net_buf_linearize(mdata.mdm_revision, sizeof(mdata.mdm_revision) - 1, data->rx_buf, 0, len); mdata.mdm_revision[out_len] = '\0'; /* Log the received information. */ LOG_INF("Revision: %s", mdata.mdm_revision); return 0; } /* Handler: <IMEI> */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_imei) { size_t out_len = net_buf_linearize(mdata.mdm_imei, sizeof(mdata.mdm_imei) - 1, data->rx_buf, 0, len); mdata.mdm_imei[out_len] = '\0'; /* Log the received information. */ LOG_INF("IMEI: %s", mdata.mdm_imei); return 0; } #if defined(CONFIG_MODEM_SIM_NUMBERS) /* Handler: <IMSI> */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_imsi) { size_t out_len = net_buf_linearize(mdata.mdm_imsi, sizeof(mdata.mdm_imsi) - 1, data->rx_buf, 0, len); mdata.mdm_imsi[out_len] = '\0'; /* Log the received information. */ LOG_INF("IMSI: %s", mdata.mdm_imsi); return 0; } /* Handler: <ICCID> */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_iccid) { size_t out_len; char *p; out_len = net_buf_linearize(mdata.mdm_iccid, sizeof(mdata.mdm_iccid) - 1, data->rx_buf, 0, len); mdata.mdm_iccid[out_len] = '\0'; /* Skip over the +CCID bit, which modems omit. */ if (mdata.mdm_iccid[0] == '+') { p = strchr(mdata.mdm_iccid, ' '); if (p) { out_len = strlen(p + 1); memmove(mdata.mdm_iccid, p + 1, len + 1); } } LOG_INF("ICCID: %s", mdata.mdm_iccid); return 0; } #endif /* #if defined(CONFIG_MODEM_SIM_NUMBERS) */ /* Handler: TX Ready */ MODEM_CMD_DIRECT_DEFINE(on_cmd_tx_ready) { k_sem_give(&mdata.sem_tx_ready); return len; } /* Handler: SEND OK */ MODEM_CMD_DEFINE(on_cmd_send_ok) { modem_cmd_handler_set_error(data, 0); k_sem_give(&mdata.sem_response); return 0; } /* Handler: SEND FAIL */ MODEM_CMD_DEFINE(on_cmd_send_fail) { mdata.sock_written = 0; modem_cmd_handler_set_error(data, -EIO); k_sem_give(&mdata.sem_response); return 0; } /* Handler: Read data */ MODEM_CMD_DEFINE(on_cmd_sock_readdata) { return on_cmd_sockread_common(mdata.sock_fd, data, len); } /* Handler: Data receive indication. */ MODEM_CMD_DEFINE(on_cmd_unsol_recv) { struct modem_socket *sock; int sock_fd; sock_fd = ATOI(argv[0], 0, "sock_fd"); /* Socket pointer from FD. */ sock = modem_socket_from_fd(&mdata.socket_config, sock_fd); if (!sock) { return 0; } /* Data ready indication. */ LOG_INF("Data Receive Indication for socket: %d", sock_fd); modem_socket_data_ready(&mdata.socket_config, sock); return 0; } /* Handler: Socket Close Indication. */ MODEM_CMD_DEFINE(on_cmd_unsol_close) { struct modem_socket *sock; int sock_fd; sock_fd = ATOI(argv[0], 0, "sock_fd"); sock = modem_socket_from_fd(&mdata.socket_config, sock_fd); if (!sock) { return 0; } LOG_INF("Socket Close Indication for socket: %d", sock_fd); /* Tell the modem to close the socket. */ socket_close(sock); LOG_INF("Socket Closed: %d", sock_fd); return 0; } /* Handler: Modem initialization ready. */ MODEM_CMD_DEFINE(on_cmd_unsol_rdy) { k_sem_give(&mdata.sem_response); return 0; } /* Func: send_socket_data * Desc: This function will send "binary" data over the socket object. */ static ssize_t send_socket_data(struct modem_socket *sock, const struct sockaddr *dst_addr, struct modem_cmd *handler_cmds, size_t handler_cmds_len, const char *buf, size_t buf_len, k_timeout_t timeout) { int ret; char send_buf[sizeof("AT+QISEND=##,####")] = {0}; char ctrlz = 0x1A; if (buf_len > MDM_MAX_DATA_LENGTH) { buf_len = MDM_MAX_DATA_LENGTH; } /* Create a buffer with the correct params. */ mdata.sock_written = buf_len; snprintk(send_buf, sizeof(send_buf), "AT+QISEND=%d,%ld", sock->id, (long) buf_len); /* Setup the locks correctly. */ (void)k_sem_take(&mdata.cmd_handler_data.sem_tx_lock, K_FOREVER); k_sem_reset(&mdata.sem_tx_ready); /* Send the Modem command. */ ret = modem_cmd_send_nolock(&mctx.iface, &mctx.cmd_handler, NULL, 0U, send_buf, NULL, K_NO_WAIT); if (ret < 0) { goto exit; } /* set command handlers */ ret = modem_cmd_handler_update_cmds(&mdata.cmd_handler_data, handler_cmds, handler_cmds_len, true); if (ret < 0) { goto exit; } /* Wait for '>' */ ret = k_sem_take(&mdata.sem_tx_ready, K_MSEC(5000)); if (ret < 0) { /* Didn't get the data prompt - Exit. */ LOG_DBG("Timeout waiting for tx"); goto exit; } /* Write all data on the console and send CTRL+Z. */ mctx.iface.write(&mctx.iface, buf, buf_len); mctx.iface.write(&mctx.iface, &ctrlz, 1); /* Wait for 'SEND OK' or 'SEND FAIL' */ k_sem_reset(&mdata.sem_response); ret = k_sem_take(&mdata.sem_response, timeout); if (ret < 0) { LOG_DBG("No send response"); goto exit; } ret = modem_cmd_handler_get_error(&mdata.cmd_handler_data); if (ret != 0) { LOG_DBG("Failed to send data"); } exit: /* unset handler commands and ignore any errors */ (void)modem_cmd_handler_update_cmds(&mdata.cmd_handler_data, NULL, 0U, false); k_sem_give(&mdata.cmd_handler_data.sem_tx_lock); if (ret < 0) { return ret; } /* Return the amount of data written on the socket. */ return mdata.sock_written; } /* Func: offload_sendto * Desc: This function will send data on the socket object. */ static ssize_t offload_sendto(void *obj, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen) { int ret; struct modem_socket *sock = (struct modem_socket *) obj; /* Here's how sending data works, * -> We firstly send the "AT+QISEND" command on the given socket and * specify the length of data to be transferred. * -> In response to "AT+QISEND" command, the modem may respond with a * data prompt (>) or not respond at all. If it doesn't respond, we * exit. If it does respond with a data prompt (>), we move forward. * -> We plainly write all data on the UART and terminate by sending a * CTRL+Z. Once the modem receives CTRL+Z, it starts processing the * data and will respond with either "SEND OK", "SEND FAIL" or "ERROR". * Here we are registering handlers for the first two responses. We * already have a handler for the "generic" error response. */ struct modem_cmd cmd[] = { MODEM_CMD_DIRECT(">", on_cmd_tx_ready), MODEM_CMD("SEND OK", on_cmd_send_ok, 0, ","), MODEM_CMD("SEND FAIL", on_cmd_send_fail, 0, ","), }; /* Ensure that valid parameters are passed. */ if (!buf || len == 0) { errno = EINVAL; return -1; } /* UDP is not supported. */ if (sock->ip_proto == IPPROTO_UDP) { errno = ENOTSUP; return -1; } if (!sock->is_connected) { errno = ENOTCONN; return -1; } ret = send_socket_data(sock, to, cmd, ARRAY_SIZE(cmd), buf, len, MDM_CMD_TIMEOUT); if (ret < 0) { errno = -ret; return -1; } /* Data was written successfully. */ errno = 0; return ret; } /* Func: offload_recvfrom * Desc: This function will receive data on the socket object. */ static ssize_t offload_recvfrom(void *obj, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlen) { struct modem_socket *sock = (struct modem_socket *)obj; char sendbuf[sizeof("AT+QIRD=##,####")] = {0}; int ret; struct socket_read_data sock_data; /* Modem command to read the data. */ struct modem_cmd data_cmd[] = { MODEM_CMD("+QIRD: ", on_cmd_sock_readdata, 0U, "") }; if (!buf || len == 0) { errno = EINVAL; return -1; } if (flags & ZSOCK_MSG_PEEK) { errno = ENOTSUP; return -1; } snprintk(sendbuf, sizeof(sendbuf), "AT+QIRD=%d,%zd", sock->id, len); /* Socket read settings */ (void) memset(&sock_data, 0, sizeof(sock_data)); sock_data.recv_buf = buf; sock_data.recv_buf_len = len; sock_data.recv_addr = from; sock->data = &sock_data; mdata.sock_fd = sock->sock_fd; /* Tell the modem to give us data (AT+QIRD=id,data_len). */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, data_cmd, ARRAY_SIZE(data_cmd), sendbuf, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { errno = -ret; ret = -1; goto exit; } /* HACK: use dst address as from */ if (from && fromlen) { *fromlen = sizeof(sock->dst); memcpy(from, &sock->dst, *fromlen); } /* return length of received data */ errno = 0; ret = sock_data.recv_read_len; exit: /* clear socket data */ sock->data = NULL; return ret; } /* Func: offload_read * Desc: This function reads data from the given socket object. */ static ssize_t offload_read(void *obj, void *buffer, size_t count) { return offload_recvfrom(obj, buffer, count, 0, NULL, 0); } /* Func: offload_write * Desc: This function writes data to the given socket object. */ static ssize_t offload_write(void *obj, const void *buffer, size_t count) { return offload_sendto(obj, buffer, count, 0, NULL, 0); } /* Func: offload_ioctl * Desc: Function call to handle various misc requests. */ static int offload_ioctl(void *obj, unsigned int request, va_list args) { switch (request) { case ZFD_IOCTL_POLL_PREPARE: { struct zsock_pollfd *pfd; struct k_poll_event **pev; struct k_poll_event *pev_end; pfd = va_arg(args, struct zsock_pollfd *); pev = va_arg(args, struct k_poll_event **); pev_end = va_arg(args, struct k_poll_event *); return modem_socket_poll_prepare(&mdata.socket_config, obj, pfd, pev, pev_end); } case ZFD_IOCTL_POLL_UPDATE: { struct zsock_pollfd *pfd; struct k_poll_event **pev; pfd = va_arg(args, struct zsock_pollfd *); pev = va_arg(args, struct k_poll_event **); return modem_socket_poll_update(obj, pfd, pev); } default: errno = EINVAL; return -1; } } /* Func: offload_connect * Desc: This function will connect with a provided TCP. */ static int offload_connect(void *obj, const struct sockaddr *addr, socklen_t addrlen) { struct modem_socket *sock = (struct modem_socket *) obj; uint16_t dst_port = 0; char *protocol = "TCP"; struct modem_cmd cmd[] = { MODEM_CMD("+QIOPEN: ", on_cmd_atcmdinfo_sockopen, 2U, ",") }; char buf[sizeof("AT+QIOPEN=#,#,'###','###'," "####.####.####.####.####.####.####.####,######," "0,0")] = {0}; int ret; char ip_str[NET_IPV6_ADDR_LEN]; /* Verify socket has been allocated */ if (modem_socket_is_allocated(&mdata.socket_config, sock) == false) { LOG_ERR("Invalid socket_id(%d) from fd:%d", sock->id, sock->sock_fd); errno = EINVAL; return -1; } if (sock->is_connected == true) { LOG_ERR("Socket is already connected!! socket_id(%d), socket_fd:%d", sock->id, sock->sock_fd); errno = EISCONN; return -1; } /* Find the correct destination port. */ if (addr->sa_family == AF_INET6) { dst_port = ntohs(net_sin6(addr)->sin6_port); } else if (addr->sa_family == AF_INET) { dst_port = ntohs(net_sin(addr)->sin_port); } /* UDP is not supported. */ if (sock->ip_proto == IPPROTO_UDP) { errno = ENOTSUP; return -1; } k_sem_reset(&mdata.sem_sock_conn); ret = modem_context_sprint_ip_addr(addr, ip_str, sizeof(ip_str)); if (ret != 0) { LOG_ERR("Error formatting IP string %d", ret); LOG_ERR("Closing the socket!!!"); socket_close(sock); errno = -ret; return -1; } /* Formulate the complete string. */ snprintk(buf, sizeof(buf), "AT+QIOPEN=%d,%d,\"%s\",\"%s\",%d,0,0", 1, sock->id, protocol, ip_str, dst_port); /* Send out the command. */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, buf, &mdata.sem_response, K_SECONDS(1)); if (ret < 0) { LOG_ERR("%s ret:%d", buf, ret); LOG_ERR("Closing the socket!!!"); socket_close(sock); errno = -ret; return -1; } /* set command handlers */ ret = modem_cmd_handler_update_cmds(&mdata.cmd_handler_data, cmd, ARRAY_SIZE(cmd), true); if (ret < 0) { goto exit; } /* Wait for QI+OPEN */ ret = k_sem_take(&mdata.sem_sock_conn, MDM_CMD_CONN_TIMEOUT); if (ret < 0) { LOG_ERR("Timeout waiting for socket open"); LOG_ERR("Closing the socket!!!"); socket_close(sock); goto exit; } ret = modem_cmd_handler_get_error(&mdata.cmd_handler_data); if (ret != 0) { LOG_ERR("Closing the socket!!!"); socket_close(sock); goto exit; } /* Connected successfully. */ sock->is_connected = true; errno = 0; return 0; exit: (void) modem_cmd_handler_update_cmds(&mdata.cmd_handler_data, NULL, 0U, false); errno = -ret; return -1; } /* Func: offload_close * Desc: This function closes the connection with the remote client and * frees the socket. */ static int offload_close(void *obj) { struct modem_socket *sock = (struct modem_socket *) obj; /* Make sure socket is allocated */ if (modem_socket_is_allocated(&mdata.socket_config, sock) == false) { return 0; } /* Close the socket only if it is connected. */ if (sock->is_connected) { socket_close(sock); } return 0; } /* Func: offload_sendmsg * Desc: This function sends messages to the modem. */ static ssize_t offload_sendmsg(void *obj, const struct msghdr *msg, int flags) { ssize_t sent = 0; int rc; LOG_DBG("msg_iovlen:%zd flags:%d", msg->msg_iovlen, flags); for (int i = 0; i < msg->msg_iovlen; i++) { const char *buf = msg->msg_iov[i].iov_base; size_t len = msg->msg_iov[i].iov_len; while (len > 0) { rc = offload_sendto(obj, buf, len, flags, msg->msg_name, msg->msg_namelen); if (rc < 0) { if (rc == -EAGAIN) { k_sleep(MDM_SENDMSG_SLEEP); } else { sent = rc; break; } } else { sent += rc; buf += rc; len -= rc; } } } return (ssize_t) sent; } /* Func: modem_rx * Desc: Thread to process all messages received from the Modem. */ static void modem_rx(void *p1, void *p2, void *p3) { ARG_UNUSED(p1); ARG_UNUSED(p2); ARG_UNUSED(p3); while (true) { /* Wait for incoming data */ modem_iface_uart_rx_wait(&mctx.iface, K_FOREVER); modem_cmd_handler_process(&mctx.cmd_handler, &mctx.iface); } } /* Func: modem_rssi_query_work * Desc: Routine to get Modem RSSI. */ static void modem_rssi_query_work(struct k_work *work) { struct modem_cmd cmd = MODEM_CMD("+CSQ: ", on_cmd_atcmdinfo_rssi_csq, 2U, ","); static char *send_cmd = "AT+CSQ"; int ret; /* query modem RSSI */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, &cmd, 1U, send_cmd, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("AT+CSQ ret:%d", ret); } /* Re-start RSSI query work */ if (work) { k_work_reschedule_for_queue(&modem_workq, &mdata.rssi_query_work, K_SECONDS(RSSI_TIMEOUT_SECS)); } } /* Func: pin_init * Desc: Boot up the Modem. */ static void pin_init(void) { #if !DT_INST_NODE_HAS_PROP(0, mdm_reset_gpios) int ret = k_sem_take(&mdata.sem_pin_busy, K_SECONDS(3)); if (ret < 0) { LOG_DBG("Timeout pin_init()"); } #endif /* !DT_INST_NODE_HAS_PROP(0, mdm_reset_gpios) */ LOG_INF("Setting Modem Pins"); #if DT_INST_NODE_HAS_PROP(0, mdm_wdisable_gpios) LOG_INF("Deactivate W Disable"); gpio_pin_set_dt(&wdisable_gpio, 0); k_sleep(K_MSEC(250)); #endif /* NOTE: Per the BG95 document, the Reset pin is internally connected to the * Power key pin. */ /* MDM_POWER -> 1 for 500-1000 msec. */ gpio_pin_set_dt(&power_gpio, 1); k_sleep(K_MSEC(750)); /* MDM_POWER -> 0 and wait for ~2secs as UART remains in "inactive" state * for some time after the power signal is enabled. */ gpio_pin_set_dt(&power_gpio, 0); k_sleep(K_SECONDS(2)); LOG_INF("... Done!"); #if !DT_INST_NODE_HAS_PROP(0, mdm_reset_gpios) k_sem_give(&mdata.sem_pin_busy); #endif /* !DT_INST_NODE_HAS_PROP(0, mdm_reset_gpios) */ } MODEM_CMD_DEFINE(on_cmd_unsol_normal_power_down) { LOG_INF("Modem powering off. Re-power modem..."); pin_init(); return 0; } static const struct modem_cmd response_cmds[] = { MODEM_CMD("OK", on_cmd_ok, 0U, ""), MODEM_CMD("ERROR", on_cmd_error, 0U, ""), MODEM_CMD("+CME ERROR: ", on_cmd_exterror, 1U, ""), }; static const struct modem_cmd unsol_cmds[] = { MODEM_CMD("+QIURC: \"recv\",", on_cmd_unsol_recv, 1U, ""), MODEM_CMD("+QIURC: \"closed\",", on_cmd_unsol_close, 1U, ""), MODEM_CMD(MDM_UNSOL_RDY, on_cmd_unsol_rdy, 0U, ""), MODEM_CMD("NORMAL POWER DOWN", on_cmd_unsol_normal_power_down, 0U, ""), }; /* Commands sent to the modem to set it up at boot time. */ static const struct setup_cmd setup_cmds[] = { SETUP_CMD_NOHANDLE("ATE0"), SETUP_CMD_NOHANDLE("ATH"), SETUP_CMD_NOHANDLE("AT+CMEE=1"), /* Commands to read info from the modem (things like IMEI, Model etc). */ SETUP_CMD("AT+CGMI", "", on_cmd_atcmdinfo_manufacturer, 0U, ""), SETUP_CMD("AT+CGMM", "", on_cmd_atcmdinfo_model, 0U, ""), SETUP_CMD("AT+CGMR", "", on_cmd_atcmdinfo_revision, 0U, ""), SETUP_CMD("AT+CGSN", "", on_cmd_atcmdinfo_imei, 0U, ""), #if defined(CONFIG_MODEM_SIM_NUMBERS) SETUP_CMD("AT+CIMI", "", on_cmd_atcmdinfo_imsi, 0U, ""), SETUP_CMD("AT+QCCID", "", on_cmd_atcmdinfo_iccid, 0U, ""), #endif /* #if defined(CONFIG_MODEM_SIM_NUMBERS) */ SETUP_CMD_NOHANDLE("AT+QICSGP=1,1,\"" MDM_APN "\",\"" MDM_USERNAME "\",\"" MDM_PASSWORD "\",1"), }; /* Func: modem_pdp_context_active * Desc: This helper function is called from modem_setup, and is * used to open the PDP context. If there is trouble activating the * PDP context, we try to deactivate and reactivate MDM_PDP_ACT_RETRY_COUNT times. * If it fails, we return an error. */ static int modem_pdp_context_activate(void) { int ret; int retry_count = 0; ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, "AT+QIACT=1", &mdata.sem_response, MDM_CMD_TIMEOUT); /* If there is trouble activating the PDP context, we try to deactivate/reactive it. */ while (ret == -EIO && retry_count < MDM_PDP_ACT_RETRY_COUNT) { ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, "AT+QIDEACT=1", &mdata.sem_response, MDM_CMD_TIMEOUT); /* If there's any error for AT+QIDEACT, restart the module. */ if (ret != 0) { return ret; } ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, "AT+QIACT=1", &mdata.sem_response, MDM_CMD_TIMEOUT); retry_count++; } if (ret == -EIO && retry_count >= MDM_PDP_ACT_RETRY_COUNT) { LOG_ERR("Retried activating/deactivating too many times."); } return ret; } /* Func: modem_setup * Desc: This function is used to setup the modem from zero. The idea * is that this function will be called right after the modem is * powered on to do the stuff necessary to talk to the modem. */ static int modem_setup(void) { int ret = 0, counter; int rssi_retry_count = 0, init_retry_count = 0; /* Setup the pins to ensure that Modem is enabled. */ pin_init(); restart: counter = 0; /* stop RSSI delay work */ k_work_cancel_delayable(&mdata.rssi_query_work); /* Let the modem respond. */ LOG_INF("Waiting for modem to respond"); ret = k_sem_take(&mdata.sem_response, MDM_MAX_BOOT_TIME); if (ret < 0) { LOG_ERR("Timeout waiting for RDY"); goto error; } /* Run setup commands on the modem. */ ret = modem_cmd_handler_setup_cmds(&mctx.iface, &mctx.cmd_handler, setup_cmds, ARRAY_SIZE(setup_cmds), &mdata.sem_response, MDM_REGISTRATION_TIMEOUT); if (ret < 0) { goto error; } restart_rssi: /* query modem RSSI */ modem_rssi_query_work(NULL); k_sleep(MDM_WAIT_FOR_RSSI_DELAY); /* Keep trying to read RSSI until we get a valid value - Eventually, exit. */ while (counter++ < MDM_WAIT_FOR_RSSI_COUNT && (mdata.mdm_rssi >= 0 || mdata.mdm_rssi <= -1000)) { modem_rssi_query_work(NULL); k_sleep(MDM_WAIT_FOR_RSSI_DELAY); } /* Is the RSSI invalid ? */ if (mdata.mdm_rssi >= 0 || mdata.mdm_rssi <= -1000) { rssi_retry_count++; if (rssi_retry_count >= MDM_NETWORK_RETRY_COUNT) { LOG_ERR("Failed network init. Too many attempts!"); ret = -ENETUNREACH; goto error; } /* Try again! */ LOG_ERR("Failed network init. Restarting process."); counter = 0; goto restart_rssi; } /* Network is ready - Start RSSI work in the background. */ LOG_INF("Network is ready."); k_work_reschedule_for_queue(&modem_workq, &mdata.rssi_query_work, K_SECONDS(RSSI_TIMEOUT_SECS)); /* Once the network is ready, we try to activate the PDP context. */ ret = modem_pdp_context_activate(); if (ret < 0 && init_retry_count++ < MDM_INIT_RETRY_COUNT) { LOG_ERR("Error activating modem with pdp context"); goto restart; } error: return ret; } static const struct socket_op_vtable offload_socket_fd_op_vtable = { .fd_vtable = { .read = offload_read, .write = offload_write, .close = offload_close, .ioctl = offload_ioctl, }, .bind = NULL, .connect = offload_connect, .sendto = offload_sendto, .recvfrom = offload_recvfrom, .listen = NULL, .accept = NULL, .sendmsg = offload_sendmsg, .getsockopt = NULL, .setsockopt = NULL, }; static int offload_socket(int family, int type, int proto); /* Setup the Modem NET Interface. */ static void modem_net_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct modem_data *data = dev->data; /* Direct socket offload used instead of net offload: */ net_if_set_link_addr(iface, modem_get_mac(dev), sizeof(data->mac_addr), NET_LINK_ETHERNET); data->net_iface = iface; net_if_socket_offload_set(iface, offload_socket); } static struct offloaded_if_api api_funcs = { .iface_api.init = modem_net_iface_init, }; static bool offload_is_supported(int family, int type, int proto) { if (family != AF_INET && family != AF_INET6) { return false; } if (type != SOCK_STREAM) { return false; } if (proto != IPPROTO_TCP) { return false; } return true; } static int offload_socket(int family, int type, int proto) { int ret; /* defer modem's socket create call to bind() */ ret = modem_socket_get(&mdata.socket_config, family, type, proto); if (ret < 0) { errno = -ret; return -1; } errno = 0; return ret; } static int modem_init(const struct device *dev) { int ret; ARG_UNUSED(dev); #if !DT_INST_NODE_HAS_PROP(0, mdm_reset_gpios) k_sem_init(&mdata.sem_pin_busy, 1, 1); #endif /* !DT_INST_NODE_HAS_PROP(0, mdm_reset_gpios) */ k_sem_init(&mdata.sem_response, 0, 1); k_sem_init(&mdata.sem_tx_ready, 0, 1); k_sem_init(&mdata.sem_sock_conn, 0, 1); k_work_queue_start(&modem_workq, modem_workq_stack, K_KERNEL_STACK_SIZEOF(modem_workq_stack), K_PRIO_COOP(7), NULL); /* socket config */ ret = modem_socket_init(&mdata.socket_config, &mdata.sockets[0], ARRAY_SIZE(mdata.sockets), MDM_BASE_SOCKET_NUM, true, &offload_socket_fd_op_vtable); if (ret < 0) { goto error; } /* cmd handler setup */ const struct modem_cmd_handler_config cmd_handler_config = { .match_buf = &mdata.cmd_match_buf[0], .match_buf_len = sizeof(mdata.cmd_match_buf), .buf_pool = &mdm_recv_pool, .alloc_timeout = BUF_ALLOC_TIMEOUT, .eol = "\r\n", .user_data = NULL, .response_cmds = response_cmds, .response_cmds_len = ARRAY_SIZE(response_cmds), .unsol_cmds = unsol_cmds, .unsol_cmds_len = ARRAY_SIZE(unsol_cmds), }; ret = modem_cmd_handler_init(&mctx.cmd_handler, &mdata.cmd_handler_data, &cmd_handler_config); if (ret < 0) { goto error; } /* modem interface */ const struct modem_iface_uart_config uart_config = { .rx_rb_buf = &mdata.iface_rb_buf[0], .rx_rb_buf_len = sizeof(mdata.iface_rb_buf), .dev = MDM_UART_DEV, .hw_flow_control = DT_PROP(MDM_UART_NODE, hw_flow_control), }; ret = modem_iface_uart_init(&mctx.iface, &mdata.iface_data, &uart_config); if (ret < 0) { goto error; } /* modem data storage */ mctx.data_manufacturer = mdata.mdm_manufacturer; mctx.data_model = mdata.mdm_model; mctx.data_revision = mdata.mdm_revision; mctx.data_imei = mdata.mdm_imei; #if defined(CONFIG_MODEM_SIM_NUMBERS) mctx.data_imsi = mdata.mdm_imsi; mctx.data_iccid = mdata.mdm_iccid; #endif /* #if defined(CONFIG_MODEM_SIM_NUMBERS) */ mctx.data_rssi = &mdata.mdm_rssi; /* pin setup */ ret = gpio_pin_configure_dt(&power_gpio, GPIO_OUTPUT_LOW); if (ret < 0) { LOG_ERR("Failed to configure %s pin", "power"); goto error; } #if DT_INST_NODE_HAS_PROP(0, mdm_reset_gpios) ret = gpio_pin_configure_dt(&reset_gpio, GPIO_OUTPUT_LOW); if (ret < 0) { LOG_ERR("Failed to configure %s pin", "reset"); goto error; } #endif #if DT_INST_NODE_HAS_PROP(0, mdm_dtr_gpios) ret = gpio_pin_configure_dt(&dtr_gpio, GPIO_OUTPUT_LOW); if (ret < 0) { LOG_ERR("Failed to configure %s pin", "dtr"); goto error; } #endif #if DT_INST_NODE_HAS_PROP(0, mdm_wdisable_gpios) ret = gpio_pin_configure_dt(&wdisable_gpio, GPIO_OUTPUT_LOW); if (ret < 0) { LOG_ERR("Failed to configure %s pin", "wdisable"); goto error; } #endif /* modem context setup */ mctx.driver_data = &mdata; ret = modem_context_register(&mctx); if (ret < 0) { LOG_ERR("Error registering modem context: %d", ret); goto error; } /* start RX thread */ k_thread_create(&modem_rx_thread, modem_rx_stack, K_KERNEL_STACK_SIZEOF(modem_rx_stack), modem_rx, NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT); /* Init RSSI query */ k_work_init_delayable(&mdata.rssi_query_work, modem_rssi_query_work); return modem_setup(); error: return ret; } /* Register the device with the Networking stack. */ NET_DEVICE_DT_INST_OFFLOAD_DEFINE(0, modem_init, NULL, &mdata, NULL, CONFIG_MODEM_QUECTEL_BG9X_INIT_PRIORITY, &api_funcs, MDM_MAX_DATA_LENGTH); /* Register NET sockets. */ NET_SOCKET_OFFLOAD_REGISTER(quectel_bg9x, CONFIG_NET_SOCKETS_OFFLOAD_PRIORITY, AF_UNSPEC, offload_is_supported, offload_socket); ```
/content/code_sandbox/drivers/modem/quectel-bg9x.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,721
```objective-c /** @file * @brief Modem receiver header file. * * A modem receiver driver allowing application to handle all * aspects of received protocol data. */ /* * */ #ifndef ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_RECEIVER_H_ #define ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_RECEIVER_H_ #include <zephyr/kernel.h> #include <zephyr/sys/ring_buffer.h> #ifdef __cplusplus extern "C" { #endif struct mdm_receiver_context { const struct device *uart_dev; /* rx data */ struct ring_buf rx_rb; struct k_sem rx_sem; /* modem data */ char *data_manufacturer; char *data_model; char *data_revision; #if defined(CONFIG_MODEM_SIM_NUMBERS) char *data_imei; char *data_imsi; #endif char *data_iccid; int *data_rssi; }; /** * @brief Gets receiver context by id. * * @param id: receiver context id. * * @retval Receiver context or NULL. */ struct mdm_receiver_context *mdm_receiver_context_from_id(int id); /** * @brief Get received data. * * @param ctx: receiver context. * @param buf: buffer to copy the received data to. * @param size: buffer size. * @param bytes_read: amount of received bytes * * @retval 0 if ok, < 0 if error. */ int mdm_receiver_recv(struct mdm_receiver_context *ctx, uint8_t *buf, size_t size, size_t *bytes_read); /** * @brief Sends the data over specified receiver context. * * @param ctx: receiver context. * @param buf: buffer with the data to send. * @param size: the amount of data to send. * * @retval 0 if ok, < 0 if error. */ int mdm_receiver_send(struct mdm_receiver_context *ctx, const uint8_t *buf, size_t size); /** * @brief Registers receiver context. * * @note Acquires receivers device, and prepares the context to be used. * * @param ctx: receiver context to register. * @param uart_dev: communication device for the receiver context. * @param buf: rx buffer to use for received data. * @param size: rx buffer size. * * @retval 0 if ok, < 0 if error. */ int mdm_receiver_register(struct mdm_receiver_context *ctx, const struct device *uart_dev, uint8_t *buf, size_t size); int mdm_receiver_sleep(struct mdm_receiver_context *ctx); int mdm_receiver_wake(struct mdm_receiver_context *ctx); #ifdef __cplusplus } #endif #endif /* ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_RECEIVER_H_ */ ```
/content/code_sandbox/drivers/modem/modem_receiver.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
602
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/cellular.h> #include <zephyr/modem/chat.h> #include <zephyr/modem/cmux.h> #include <zephyr/modem/pipe.h> #include <zephyr/modem/pipelink.h> #include <zephyr/modem/ppp.h> #include <zephyr/modem/backend/uart.h> #include <zephyr/net/ppp.h> #include <zephyr/pm/device.h> #include <zephyr/sys/atomic.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(modem_cellular, CONFIG_MODEM_LOG_LEVEL); #include <string.h> #include <stdlib.h> #define MODEM_CELLULAR_PERIODIC_SCRIPT_TIMEOUT \ K_MSEC(CONFIG_MODEM_CELLULAR_PERIODIC_SCRIPT_MS) #define MODEM_CELLULAR_DATA_IMEI_LEN (16) #define MODEM_CELLULAR_DATA_MODEL_ID_LEN (65) #define MODEM_CELLULAR_DATA_IMSI_LEN (23) #define MODEM_CELLULAR_DATA_ICCID_LEN (22) #define MODEM_CELLULAR_DATA_MANUFACTURER_LEN (65) #define MODEM_CELLULAR_DATA_FW_VERSION_LEN (65) #define MODEM_CELLULAR_RESERVED_DLCIS (2) /* Magic constants */ #define CSQ_RSSI_UNKNOWN (99) #define CESQ_RSRP_UNKNOWN (255) #define CESQ_RSRQ_UNKNOWN (255) /* Magic numbers to units conversions */ #define CSQ_RSSI_TO_DB(v) (-113 + (2 * (rssi))) #define CESQ_RSRP_TO_DB(v) (-140 + (v)) #define CESQ_RSRQ_TO_DB(v) (-20 + ((v) / 2)) enum modem_cellular_state { MODEM_CELLULAR_STATE_IDLE = 0, MODEM_CELLULAR_STATE_RESET_PULSE, MODEM_CELLULAR_STATE_POWER_ON_PULSE, MODEM_CELLULAR_STATE_AWAIT_POWER_ON, MODEM_CELLULAR_STATE_RUN_INIT_SCRIPT, MODEM_CELLULAR_STATE_CONNECT_CMUX, MODEM_CELLULAR_STATE_OPEN_DLCI1, MODEM_CELLULAR_STATE_OPEN_DLCI2, MODEM_CELLULAR_STATE_RUN_DIAL_SCRIPT, MODEM_CELLULAR_STATE_AWAIT_REGISTERED, MODEM_CELLULAR_STATE_CARRIER_ON, MODEM_CELLULAR_STATE_INIT_POWER_OFF, MODEM_CELLULAR_STATE_POWER_OFF_PULSE, MODEM_CELLULAR_STATE_AWAIT_POWER_OFF, }; enum modem_cellular_event { MODEM_CELLULAR_EVENT_RESUME = 0, MODEM_CELLULAR_EVENT_SUSPEND, MODEM_CELLULAR_EVENT_SCRIPT_SUCCESS, MODEM_CELLULAR_EVENT_SCRIPT_FAILED, MODEM_CELLULAR_EVENT_CMUX_CONNECTED, MODEM_CELLULAR_EVENT_DLCI1_OPENED, MODEM_CELLULAR_EVENT_DLCI2_OPENED, MODEM_CELLULAR_EVENT_TIMEOUT, MODEM_CELLULAR_EVENT_REGISTERED, MODEM_CELLULAR_EVENT_DEREGISTERED, MODEM_CELLULAR_EVENT_BUS_OPENED, MODEM_CELLULAR_EVENT_BUS_CLOSED, }; struct modem_cellular_data { /* UART backend */ struct modem_pipe *uart_pipe; struct modem_backend_uart uart_backend; uint8_t uart_backend_receive_buf[CONFIG_MODEM_CELLULAR_UART_BUFFER_SIZES]; uint8_t uart_backend_transmit_buf[CONFIG_MODEM_CELLULAR_UART_BUFFER_SIZES]; /* CMUX */ struct modem_cmux cmux; uint8_t cmux_receive_buf[CONFIG_MODEM_CELLULAR_CMUX_MAX_FRAME_SIZE]; uint8_t cmux_transmit_buf[2 * CONFIG_MODEM_CELLULAR_CMUX_MAX_FRAME_SIZE]; struct modem_cmux_dlci dlci1; struct modem_cmux_dlci dlci2; struct modem_pipe *dlci1_pipe; struct modem_pipe *dlci2_pipe; uint8_t dlci1_receive_buf[CONFIG_MODEM_CELLULAR_CMUX_MAX_FRAME_SIZE]; /* DLCI 2 is only used for chat scripts. */ uint8_t dlci2_receive_buf[CONFIG_MODEM_CELLULAR_CHAT_BUFFER_SIZES]; /* Modem chat */ struct modem_chat chat; uint8_t chat_receive_buf[CONFIG_MODEM_CELLULAR_CHAT_BUFFER_SIZES]; uint8_t *chat_delimiter; uint8_t *chat_filter; uint8_t *chat_argv[32]; /* Status */ enum cellular_registration_status registration_status_gsm; enum cellular_registration_status registration_status_gprs; enum cellular_registration_status registration_status_lte; uint8_t rssi; uint8_t rsrp; uint8_t rsrq; uint8_t imei[MODEM_CELLULAR_DATA_IMEI_LEN]; uint8_t model_id[MODEM_CELLULAR_DATA_MODEL_ID_LEN]; uint8_t imsi[MODEM_CELLULAR_DATA_IMSI_LEN]; uint8_t iccid[MODEM_CELLULAR_DATA_ICCID_LEN]; uint8_t manufacturer[MODEM_CELLULAR_DATA_MANUFACTURER_LEN]; uint8_t fw_version[MODEM_CELLULAR_DATA_FW_VERSION_LEN]; /* PPP */ struct modem_ppp *ppp; enum modem_cellular_state state; const struct device *dev; struct k_work_delayable timeout_work; /* Power management */ struct k_sem suspended_sem; /* Event dispatcher */ struct k_work event_dispatch_work; uint8_t event_buf[8]; struct ring_buf event_rb; struct k_mutex event_rb_lock; }; struct modem_cellular_user_pipe { struct modem_cmux_dlci dlci; uint8_t dlci_address; uint8_t *dlci_receive_buf; uint16_t dlci_receive_buf_size; struct modem_pipe *pipe; struct modem_pipelink *pipelink; }; struct modem_cellular_config { const struct device *uart; struct gpio_dt_spec power_gpio; struct gpio_dt_spec reset_gpio; uint16_t power_pulse_duration_ms; uint16_t reset_pulse_duration_ms; uint16_t startup_time_ms; uint16_t shutdown_time_ms; bool autostarts; const struct modem_chat_script *init_chat_script; const struct modem_chat_script *dial_chat_script; const struct modem_chat_script *periodic_chat_script; struct modem_cellular_user_pipe *user_pipes; uint8_t user_pipes_size; }; static const char *modem_cellular_state_str(enum modem_cellular_state state) { switch (state) { case MODEM_CELLULAR_STATE_IDLE: return "idle"; case MODEM_CELLULAR_STATE_RESET_PULSE: return "reset pulse"; case MODEM_CELLULAR_STATE_POWER_ON_PULSE: return "power pulse"; case MODEM_CELLULAR_STATE_AWAIT_POWER_ON: return "await power on"; case MODEM_CELLULAR_STATE_RUN_INIT_SCRIPT: return "run init script"; case MODEM_CELLULAR_STATE_CONNECT_CMUX: return "connect cmux"; case MODEM_CELLULAR_STATE_OPEN_DLCI1: return "open dlci1"; case MODEM_CELLULAR_STATE_OPEN_DLCI2: return "open dlci2"; case MODEM_CELLULAR_STATE_AWAIT_REGISTERED: return "await registered"; case MODEM_CELLULAR_STATE_RUN_DIAL_SCRIPT: return "run dial script"; case MODEM_CELLULAR_STATE_CARRIER_ON: return "carrier on"; case MODEM_CELLULAR_STATE_INIT_POWER_OFF: return "init power off"; case MODEM_CELLULAR_STATE_POWER_OFF_PULSE: return "power off pulse"; case MODEM_CELLULAR_STATE_AWAIT_POWER_OFF: return "await power off"; } return ""; } static const char *modem_cellular_event_str(enum modem_cellular_event event) { switch (event) { case MODEM_CELLULAR_EVENT_RESUME: return "resume"; case MODEM_CELLULAR_EVENT_SUSPEND: return "suspend"; case MODEM_CELLULAR_EVENT_SCRIPT_SUCCESS: return "script success"; case MODEM_CELLULAR_EVENT_SCRIPT_FAILED: return "script failed"; case MODEM_CELLULAR_EVENT_CMUX_CONNECTED: return "cmux connected"; case MODEM_CELLULAR_EVENT_DLCI1_OPENED: return "dlci1 opened"; case MODEM_CELLULAR_EVENT_DLCI2_OPENED: return "dlci2 opened"; case MODEM_CELLULAR_EVENT_TIMEOUT: return "timeout"; case MODEM_CELLULAR_EVENT_REGISTERED: return "registered"; case MODEM_CELLULAR_EVENT_DEREGISTERED: return "deregistered"; case MODEM_CELLULAR_EVENT_BUS_OPENED: return "bus opened"; case MODEM_CELLULAR_EVENT_BUS_CLOSED: return "bus closed"; } return ""; } static bool modem_cellular_gpio_is_enabled(const struct gpio_dt_spec *gpio) { return gpio->port != NULL; } static void modem_cellular_notify_user_pipes_connected(struct modem_cellular_data *data) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; struct modem_cellular_user_pipe *user_pipe; struct modem_pipelink *pipelink; for (uint8_t i = 0; i < config->user_pipes_size; i++) { user_pipe = &config->user_pipes[i]; pipelink = user_pipe->pipelink; modem_pipelink_notify_connected(pipelink); } } static void modem_cellular_notify_user_pipes_disconnected(struct modem_cellular_data *data) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; struct modem_cellular_user_pipe *user_pipe; struct modem_pipelink *pipelink; for (uint8_t i = 0; i < config->user_pipes_size; i++) { user_pipe = &config->user_pipes[i]; pipelink = user_pipe->pipelink; modem_pipelink_notify_disconnected(pipelink); } } static void modem_cellular_enter_state(struct modem_cellular_data *data, enum modem_cellular_state state); static void modem_cellular_delegate_event(struct modem_cellular_data *data, enum modem_cellular_event evt); static void modem_cellular_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt); static void modem_cellular_bus_pipe_handler(struct modem_pipe *pipe, enum modem_pipe_event event, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; switch (event) { case MODEM_PIPE_EVENT_OPENED: modem_cellular_delegate_event(data, MODEM_CELLULAR_EVENT_BUS_OPENED); break; case MODEM_PIPE_EVENT_CLOSED: modem_cellular_delegate_event(data, MODEM_CELLULAR_EVENT_BUS_CLOSED); break; default: break; } } static void modem_cellular_dlci1_pipe_handler(struct modem_pipe *pipe, enum modem_pipe_event event, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; switch (event) { case MODEM_PIPE_EVENT_OPENED: modem_cellular_delegate_event(data, MODEM_CELLULAR_EVENT_DLCI1_OPENED); break; default: break; } } static void modem_cellular_dlci2_pipe_handler(struct modem_pipe *pipe, enum modem_pipe_event event, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; switch (event) { case MODEM_PIPE_EVENT_OPENED: modem_cellular_delegate_event(data, MODEM_CELLULAR_EVENT_DLCI2_OPENED); break; default: break; } } static void modem_cellular_chat_callback_handler(struct modem_chat *chat, enum modem_chat_script_result result, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; if (result == MODEM_CHAT_SCRIPT_RESULT_SUCCESS) { modem_cellular_delegate_event(data, MODEM_CELLULAR_EVENT_SCRIPT_SUCCESS); } else { modem_cellular_delegate_event(data, MODEM_CELLULAR_EVENT_SCRIPT_FAILED); } } static void modem_cellular_chat_on_imei(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; if (argc != 2) { return; } strncpy(data->imei, argv[1], sizeof(data->imei) - 1); } static void modem_cellular_chat_on_cgmm(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; if (argc != 2) { return; } strncpy(data->model_id, argv[1], sizeof(data->model_id) - 1); } static void modem_cellular_chat_on_cgmi(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; if (argc != 2) { return; } strncpy(data->manufacturer, argv[1], sizeof(data->manufacturer) - 1); } static void modem_cellular_chat_on_cgmr(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; if (argc != 2) { return; } strncpy(data->fw_version, argv[1], sizeof(data->fw_version) - 1); } static void modem_cellular_chat_on_csq(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; if (argc != 3) { return; } data->rssi = (uint8_t)atoi(argv[1]); } static void modem_cellular_chat_on_cesq(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; if (argc != 7) { return; } data->rsrq = (uint8_t)atoi(argv[5]); data->rsrp = (uint8_t)atoi(argv[6]); } static void modem_cellular_chat_on_iccid(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; if (argc != 2) { return; } strncpy(data->iccid, argv[1], sizeof(data->iccid) - 1); } static void modem_cellular_chat_on_imsi(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; if (argc != 2) { return; } strncpy(data->imsi, argv[1], sizeof(data->imsi) - 1); } static bool modem_cellular_is_registered(struct modem_cellular_data *data) { return (data->registration_status_gsm == CELLULAR_REGISTRATION_REGISTERED_HOME) || (data->registration_status_gsm == CELLULAR_REGISTRATION_REGISTERED_ROAMING) || (data->registration_status_gprs == CELLULAR_REGISTRATION_REGISTERED_HOME) || (data->registration_status_gprs == CELLULAR_REGISTRATION_REGISTERED_ROAMING) || (data->registration_status_lte == CELLULAR_REGISTRATION_REGISTERED_HOME) || (data->registration_status_lte == CELLULAR_REGISTRATION_REGISTERED_ROAMING); } static void modem_cellular_chat_on_cxreg(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; enum cellular_registration_status registration_status = 0; if (argc == 2) { registration_status = atoi(argv[1]); } else if (argc == 3 || argc == 6) { registration_status = atoi(argv[2]); } else { return; } if (strcmp(argv[0], "+CREG: ") == 0) { data->registration_status_gsm = registration_status; } else if (strcmp(argv[0], "+CGREG: ") == 0) { data->registration_status_gprs = registration_status; } else { data->registration_status_lte = registration_status; } if (modem_cellular_is_registered(data)) { modem_cellular_delegate_event(data, MODEM_CELLULAR_EVENT_REGISTERED); } else { modem_cellular_delegate_event(data, MODEM_CELLULAR_EVENT_DEREGISTERED); } } MODEM_CHAT_MATCH_DEFINE(ok_match, "OK", "", NULL); MODEM_CHAT_MATCHES_DEFINE(allow_match, MODEM_CHAT_MATCH("OK", "", NULL), MODEM_CHAT_MATCH("ERROR", "", NULL)); MODEM_CHAT_MATCH_DEFINE(imei_match, "", "", modem_cellular_chat_on_imei); MODEM_CHAT_MATCH_DEFINE(cgmm_match, "", "", modem_cellular_chat_on_cgmm); MODEM_CHAT_MATCH_DEFINE(csq_match, "+CSQ: ", ",", modem_cellular_chat_on_csq); MODEM_CHAT_MATCH_DEFINE(cesq_match, "+CESQ: ", ",", modem_cellular_chat_on_cesq); MODEM_CHAT_MATCH_DEFINE(qccid_match __maybe_unused, "+QCCID: ", "", modem_cellular_chat_on_iccid); MODEM_CHAT_MATCH_DEFINE(iccid_match __maybe_unused, "+ICCID: ", "", modem_cellular_chat_on_iccid); MODEM_CHAT_MATCH_DEFINE(cimi_match __maybe_unused, "", "", modem_cellular_chat_on_imsi); MODEM_CHAT_MATCH_DEFINE(cgmi_match __maybe_unused, "", "", modem_cellular_chat_on_cgmi); MODEM_CHAT_MATCH_DEFINE(cgmr_match __maybe_unused, "", "", modem_cellular_chat_on_cgmr); MODEM_CHAT_MATCHES_DEFINE(unsol_matches, MODEM_CHAT_MATCH("+CREG: ", ",", modem_cellular_chat_on_cxreg), MODEM_CHAT_MATCH("+CEREG: ", ",", modem_cellular_chat_on_cxreg), MODEM_CHAT_MATCH("+CGREG: ", ",", modem_cellular_chat_on_cxreg)); MODEM_CHAT_MATCHES_DEFINE(abort_matches, MODEM_CHAT_MATCH("ERROR", "", NULL)); MODEM_CHAT_MATCHES_DEFINE(dial_abort_matches, MODEM_CHAT_MATCH("ERROR", "", NULL), MODEM_CHAT_MATCH("BUSY", "", NULL), MODEM_CHAT_MATCH("NO ANSWER", "", NULL), MODEM_CHAT_MATCH("NO CARRIER", "", NULL), MODEM_CHAT_MATCH("NO DIALTONE", "", NULL)); #if DT_HAS_COMPAT_STATUS_OKAY(swir_hl7800) || DT_HAS_COMPAT_STATUS_OKAY(sqn_gm02s) MODEM_CHAT_MATCH_DEFINE(connect_match, "CONNECT", "", NULL); #endif static void modem_cellular_log_state_changed(enum modem_cellular_state last_state, enum modem_cellular_state new_state) { LOG_DBG("switch from %s to %s", modem_cellular_state_str(last_state), modem_cellular_state_str(new_state)); } static void modem_cellular_log_event(enum modem_cellular_event evt) { LOG_DBG("event %s", modem_cellular_event_str(evt)); } static void modem_cellular_start_timer(struct modem_cellular_data *data, k_timeout_t timeout) { k_work_schedule(&data->timeout_work, timeout); } static void modem_cellular_stop_timer(struct modem_cellular_data *data) { k_work_cancel_delayable(&data->timeout_work); } static void modem_cellular_timeout_handler(struct k_work *item) { struct k_work_delayable *dwork = k_work_delayable_from_work(item); struct modem_cellular_data *data = CONTAINER_OF(dwork, struct modem_cellular_data, timeout_work); modem_cellular_delegate_event(data, MODEM_CELLULAR_EVENT_TIMEOUT); } static void modem_cellular_event_dispatch_handler(struct k_work *item) { struct modem_cellular_data *data = CONTAINER_OF(item, struct modem_cellular_data, event_dispatch_work); uint8_t events[sizeof(data->event_buf)]; uint8_t events_cnt; k_mutex_lock(&data->event_rb_lock, K_FOREVER); events_cnt = (uint8_t)ring_buf_get(&data->event_rb, events, sizeof(data->event_buf)); k_mutex_unlock(&data->event_rb_lock); for (uint8_t i = 0; i < events_cnt; i++) { modem_cellular_event_handler(data, (enum modem_cellular_event)events[i]); } } static void modem_cellular_delegate_event(struct modem_cellular_data *data, enum modem_cellular_event evt) { k_mutex_lock(&data->event_rb_lock, K_FOREVER); ring_buf_put(&data->event_rb, (uint8_t *)&evt, 1); k_mutex_unlock(&data->event_rb_lock); k_work_submit(&data->event_dispatch_work); } static int modem_cellular_on_idle_state_enter(struct modem_cellular_data *data) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; if (modem_cellular_gpio_is_enabled(&config->reset_gpio)) { gpio_pin_set_dt(&config->reset_gpio, 1); } modem_cellular_notify_user_pipes_disconnected(data); modem_chat_release(&data->chat); modem_ppp_release(data->ppp); modem_cmux_release(&data->cmux); modem_pipe_close_async(data->uart_pipe); k_sem_give(&data->suspended_sem); return 0; } static void modem_cellular_idle_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; switch (evt) { case MODEM_CELLULAR_EVENT_RESUME: if (config->autostarts) { modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_AWAIT_POWER_ON); break; } if (modem_cellular_gpio_is_enabled(&config->power_gpio)) { modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_POWER_ON_PULSE); break; } if (modem_cellular_gpio_is_enabled(&config->reset_gpio)) { modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_AWAIT_POWER_ON); break; } modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_RUN_INIT_SCRIPT); break; case MODEM_CELLULAR_EVENT_SUSPEND: k_sem_give(&data->suspended_sem); break; default: break; } } static int modem_cellular_on_idle_state_leave(struct modem_cellular_data *data) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; k_sem_take(&data->suspended_sem, K_NO_WAIT); if (modem_cellular_gpio_is_enabled(&config->reset_gpio)) { gpio_pin_set_dt(&config->reset_gpio, 0); } return 0; } static int modem_cellular_on_reset_pulse_state_enter(struct modem_cellular_data *data) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; gpio_pin_set_dt(&config->reset_gpio, 1); modem_cellular_start_timer(data, K_MSEC(config->reset_pulse_duration_ms)); return 0; } static void modem_cellular_reset_pulse_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { switch (evt) { case MODEM_CELLULAR_EVENT_TIMEOUT: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_AWAIT_POWER_ON); break; case MODEM_CELLULAR_EVENT_SUSPEND: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_IDLE); break; default: break; } } static int modem_cellular_on_reset_pulse_state_leave(struct modem_cellular_data *data) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; gpio_pin_set_dt(&config->reset_gpio, 0); modem_cellular_stop_timer(data); return 0; } static int modem_cellular_on_power_on_pulse_state_enter(struct modem_cellular_data *data) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; gpio_pin_set_dt(&config->power_gpio, 1); modem_cellular_start_timer(data, K_MSEC(config->power_pulse_duration_ms)); return 0; } static void modem_cellular_power_on_pulse_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { switch (evt) { case MODEM_CELLULAR_EVENT_TIMEOUT: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_AWAIT_POWER_ON); break; case MODEM_CELLULAR_EVENT_SUSPEND: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_IDLE); break; default: break; } } static int modem_cellular_on_power_on_pulse_state_leave(struct modem_cellular_data *data) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; gpio_pin_set_dt(&config->power_gpio, 0); modem_cellular_stop_timer(data); return 0; } static int modem_cellular_on_await_power_on_state_enter(struct modem_cellular_data *data) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; modem_cellular_start_timer(data, K_MSEC(config->startup_time_ms)); return 0; } static void modem_cellular_await_power_on_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { switch (evt) { case MODEM_CELLULAR_EVENT_TIMEOUT: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_RUN_INIT_SCRIPT); break; case MODEM_CELLULAR_EVENT_SUSPEND: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_IDLE); break; default: break; } } static int modem_cellular_on_run_init_script_state_enter(struct modem_cellular_data *data) { modem_pipe_attach(data->uart_pipe, modem_cellular_bus_pipe_handler, data); return modem_pipe_open_async(data->uart_pipe); } static void modem_cellular_run_init_script_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; switch (evt) { case MODEM_CELLULAR_EVENT_BUS_OPENED: modem_chat_attach(&data->chat, data->uart_pipe); modem_chat_run_script_async(&data->chat, config->init_chat_script); break; case MODEM_CELLULAR_EVENT_SCRIPT_SUCCESS: net_if_set_link_addr(modem_ppp_get_iface(data->ppp), data->imei, ARRAY_SIZE(data->imei), NET_LINK_UNKNOWN); modem_chat_release(&data->chat); modem_pipe_attach(data->uart_pipe, modem_cellular_bus_pipe_handler, data); modem_pipe_close_async(data->uart_pipe); break; case MODEM_CELLULAR_EVENT_BUS_CLOSED: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_CONNECT_CMUX); break; case MODEM_CELLULAR_EVENT_SUSPEND: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_IDLE); break; case MODEM_CELLULAR_EVENT_SCRIPT_FAILED: if (modem_cellular_gpio_is_enabled(&config->power_gpio)) { modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_POWER_ON_PULSE); break; } if (modem_cellular_gpio_is_enabled(&config->reset_gpio)) { modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_RESET_PULSE); break; } modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_IDLE); break; default: break; } } static int modem_cellular_on_connect_cmux_state_enter(struct modem_cellular_data *data) { /* * Allow modem to switch bus into CMUX mode. Some modems disable UART RX while * switching, resulting in UART RX errors as bus is no longer pulled up by modem. */ modem_cellular_start_timer(data, K_MSEC(100)); return 0; } static void modem_cellular_connect_cmux_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { switch (evt) { case MODEM_CELLULAR_EVENT_TIMEOUT: modem_pipe_attach(data->uart_pipe, modem_cellular_bus_pipe_handler, data); modem_pipe_open_async(data->uart_pipe); break; case MODEM_CELLULAR_EVENT_BUS_OPENED: modem_cmux_attach(&data->cmux, data->uart_pipe); modem_cmux_connect_async(&data->cmux); break; case MODEM_CELLULAR_EVENT_CMUX_CONNECTED: modem_cellular_notify_user_pipes_connected(data); modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_OPEN_DLCI1); break; case MODEM_CELLULAR_EVENT_SUSPEND: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_INIT_POWER_OFF); break; default: break; } } static int modem_cellular_on_open_dlci1_state_enter(struct modem_cellular_data *data) { modem_pipe_attach(data->dlci1_pipe, modem_cellular_dlci1_pipe_handler, data); return modem_pipe_open_async(data->dlci1_pipe); } static void modem_cellular_open_dlci1_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { switch (evt) { case MODEM_CELLULAR_EVENT_DLCI1_OPENED: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_OPEN_DLCI2); break; case MODEM_CELLULAR_EVENT_SUSPEND: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_INIT_POWER_OFF); break; default: break; } } static int modem_cellular_on_open_dlci1_state_leave(struct modem_cellular_data *data) { modem_pipe_release(data->dlci1_pipe); return 0; } static int modem_cellular_on_open_dlci2_state_enter(struct modem_cellular_data *data) { modem_pipe_attach(data->dlci2_pipe, modem_cellular_dlci2_pipe_handler, data); return modem_pipe_open_async(data->dlci2_pipe); } static void modem_cellular_open_dlci2_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { switch (evt) { case MODEM_CELLULAR_EVENT_DLCI2_OPENED: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_RUN_DIAL_SCRIPT); break; case MODEM_CELLULAR_EVENT_SUSPEND: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_INIT_POWER_OFF); break; default: break; } } static int modem_cellular_on_open_dlci2_state_leave(struct modem_cellular_data *data) { modem_pipe_release(data->dlci2_pipe); return 0; } static int modem_cellular_on_run_dial_script_state_enter(struct modem_cellular_data *data) { /* Allow modem time to enter command mode before running dial script */ modem_cellular_start_timer(data, K_MSEC(100)); return 0; } static void modem_cellular_run_dial_script_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; switch (evt) { case MODEM_CELLULAR_EVENT_TIMEOUT: modem_chat_attach(&data->chat, data->dlci1_pipe); modem_chat_run_script_async(&data->chat, config->dial_chat_script); break; case MODEM_CELLULAR_EVENT_SCRIPT_SUCCESS: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_AWAIT_REGISTERED); break; case MODEM_CELLULAR_EVENT_SUSPEND: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_INIT_POWER_OFF); break; default: break; } } static int modem_cellular_on_run_dial_script_state_leave(struct modem_cellular_data *data) { modem_chat_release(&data->chat); return 0; } static int modem_cellular_on_await_registered_state_enter(struct modem_cellular_data *data) { if (modem_ppp_attach(data->ppp, data->dlci1_pipe) < 0) { return -EAGAIN; } modem_cellular_start_timer(data, MODEM_CELLULAR_PERIODIC_SCRIPT_TIMEOUT); return modem_chat_attach(&data->chat, data->dlci2_pipe); } static void modem_cellular_await_registered_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; switch (evt) { case MODEM_CELLULAR_EVENT_SCRIPT_SUCCESS: case MODEM_CELLULAR_EVENT_SCRIPT_FAILED: modem_cellular_start_timer(data, MODEM_CELLULAR_PERIODIC_SCRIPT_TIMEOUT); break; case MODEM_CELLULAR_EVENT_TIMEOUT: modem_chat_run_script_async(&data->chat, config->periodic_chat_script); break; case MODEM_CELLULAR_EVENT_REGISTERED: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_CARRIER_ON); break; case MODEM_CELLULAR_EVENT_SUSPEND: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_INIT_POWER_OFF); break; default: break; } } static int modem_cellular_on_await_registered_state_leave(struct modem_cellular_data *data) { modem_cellular_stop_timer(data); return 0; } static int modem_cellular_on_carrier_on_state_enter(struct modem_cellular_data *data) { net_if_carrier_on(modem_ppp_get_iface(data->ppp)); modem_cellular_start_timer(data, MODEM_CELLULAR_PERIODIC_SCRIPT_TIMEOUT); return 0; } static void modem_cellular_carrier_on_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; switch (evt) { case MODEM_CELLULAR_EVENT_SCRIPT_SUCCESS: case MODEM_CELLULAR_EVENT_SCRIPT_FAILED: modem_cellular_start_timer(data, MODEM_CELLULAR_PERIODIC_SCRIPT_TIMEOUT); break; case MODEM_CELLULAR_EVENT_TIMEOUT: modem_chat_run_script_async(&data->chat, config->periodic_chat_script); break; case MODEM_CELLULAR_EVENT_DEREGISTERED: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_RUN_DIAL_SCRIPT); break; case MODEM_CELLULAR_EVENT_SUSPEND: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_INIT_POWER_OFF); break; default: break; } } static int modem_cellular_on_carrier_on_state_leave(struct modem_cellular_data *data) { modem_cellular_stop_timer(data); net_if_carrier_off(modem_ppp_get_iface(data->ppp)); modem_chat_release(&data->chat); modem_ppp_release(data->ppp); return 0; } static int modem_cellular_on_init_power_off_state_enter(struct modem_cellular_data *data) { modem_pipe_close_async(data->uart_pipe); modem_cellular_start_timer(data, K_MSEC(2000)); return 0; } static void modem_cellular_init_power_off_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; switch (evt) { case MODEM_CELLULAR_EVENT_TIMEOUT: if (modem_cellular_gpio_is_enabled(&config->power_gpio)) { modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_POWER_OFF_PULSE); break; } modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_IDLE); break; default: break; } } static int modem_cellular_on_init_power_off_state_leave(struct modem_cellular_data *data) { modem_cellular_notify_user_pipes_disconnected(data); modem_chat_release(&data->chat); modem_ppp_release(data->ppp); return 0; } static int modem_cellular_on_power_off_pulse_state_enter(struct modem_cellular_data *data) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; gpio_pin_set_dt(&config->power_gpio, 1); modem_cellular_start_timer(data, K_MSEC(config->power_pulse_duration_ms)); return 0; } static void modem_cellular_power_off_pulse_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { switch (evt) { case MODEM_CELLULAR_EVENT_TIMEOUT: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_AWAIT_POWER_OFF); break; default: break; } } static int modem_cellular_on_power_off_pulse_state_leave(struct modem_cellular_data *data) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; gpio_pin_set_dt(&config->power_gpio, 0); modem_cellular_stop_timer(data); return 0; } static int modem_cellular_on_await_power_off_state_enter(struct modem_cellular_data *data) { const struct modem_cellular_config *config = (const struct modem_cellular_config *)data->dev->config; modem_cellular_start_timer(data, K_MSEC(config->shutdown_time_ms)); return 0; } static void modem_cellular_await_power_off_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { switch (evt) { case MODEM_CELLULAR_EVENT_TIMEOUT: modem_cellular_enter_state(data, MODEM_CELLULAR_STATE_IDLE); break; default: break; } } static int modem_cellular_on_state_enter(struct modem_cellular_data *data) { int ret; switch (data->state) { case MODEM_CELLULAR_STATE_IDLE: ret = modem_cellular_on_idle_state_enter(data); break; case MODEM_CELLULAR_STATE_RESET_PULSE: ret = modem_cellular_on_reset_pulse_state_enter(data); break; case MODEM_CELLULAR_STATE_POWER_ON_PULSE: ret = modem_cellular_on_power_on_pulse_state_enter(data); break; case MODEM_CELLULAR_STATE_AWAIT_POWER_ON: ret = modem_cellular_on_await_power_on_state_enter(data); break; case MODEM_CELLULAR_STATE_RUN_INIT_SCRIPT: ret = modem_cellular_on_run_init_script_state_enter(data); break; case MODEM_CELLULAR_STATE_CONNECT_CMUX: ret = modem_cellular_on_connect_cmux_state_enter(data); break; case MODEM_CELLULAR_STATE_OPEN_DLCI1: ret = modem_cellular_on_open_dlci1_state_enter(data); break; case MODEM_CELLULAR_STATE_OPEN_DLCI2: ret = modem_cellular_on_open_dlci2_state_enter(data); break; case MODEM_CELLULAR_STATE_RUN_DIAL_SCRIPT: ret = modem_cellular_on_run_dial_script_state_enter(data); break; case MODEM_CELLULAR_STATE_AWAIT_REGISTERED: ret = modem_cellular_on_await_registered_state_enter(data); break; case MODEM_CELLULAR_STATE_CARRIER_ON: ret = modem_cellular_on_carrier_on_state_enter(data); break; case MODEM_CELLULAR_STATE_INIT_POWER_OFF: ret = modem_cellular_on_init_power_off_state_enter(data); break; case MODEM_CELLULAR_STATE_POWER_OFF_PULSE: ret = modem_cellular_on_power_off_pulse_state_enter(data); break; case MODEM_CELLULAR_STATE_AWAIT_POWER_OFF: ret = modem_cellular_on_await_power_off_state_enter(data); break; default: ret = 0; break; } return ret; } static int modem_cellular_on_state_leave(struct modem_cellular_data *data) { int ret; switch (data->state) { case MODEM_CELLULAR_STATE_IDLE: ret = modem_cellular_on_idle_state_leave(data); break; case MODEM_CELLULAR_STATE_RESET_PULSE: ret = modem_cellular_on_reset_pulse_state_leave(data); break; case MODEM_CELLULAR_STATE_POWER_ON_PULSE: ret = modem_cellular_on_power_on_pulse_state_leave(data); break; case MODEM_CELLULAR_STATE_OPEN_DLCI1: ret = modem_cellular_on_open_dlci1_state_leave(data); break; case MODEM_CELLULAR_STATE_OPEN_DLCI2: ret = modem_cellular_on_open_dlci2_state_leave(data); break; case MODEM_CELLULAR_STATE_RUN_DIAL_SCRIPT: ret = modem_cellular_on_run_dial_script_state_leave(data); break; case MODEM_CELLULAR_STATE_AWAIT_REGISTERED: ret = modem_cellular_on_await_registered_state_leave(data); break; case MODEM_CELLULAR_STATE_CARRIER_ON: ret = modem_cellular_on_carrier_on_state_leave(data); break; case MODEM_CELLULAR_STATE_INIT_POWER_OFF: ret = modem_cellular_on_init_power_off_state_leave(data); break; case MODEM_CELLULAR_STATE_POWER_OFF_PULSE: ret = modem_cellular_on_power_off_pulse_state_leave(data); break; default: ret = 0; break; } return ret; } static void modem_cellular_enter_state(struct modem_cellular_data *data, enum modem_cellular_state state) { int ret; ret = modem_cellular_on_state_leave(data); if (ret < 0) { LOG_WRN("failed to leave state, error: %i", ret); return; } data->state = state; ret = modem_cellular_on_state_enter(data); if (ret < 0) { LOG_WRN("failed to enter state error: %i", ret); } } static void modem_cellular_event_handler(struct modem_cellular_data *data, enum modem_cellular_event evt) { enum modem_cellular_state state; state = data->state; modem_cellular_log_event(evt); switch (data->state) { case MODEM_CELLULAR_STATE_IDLE: modem_cellular_idle_event_handler(data, evt); break; case MODEM_CELLULAR_STATE_RESET_PULSE: modem_cellular_reset_pulse_event_handler(data, evt); break; case MODEM_CELLULAR_STATE_POWER_ON_PULSE: modem_cellular_power_on_pulse_event_handler(data, evt); break; case MODEM_CELLULAR_STATE_AWAIT_POWER_ON: modem_cellular_await_power_on_event_handler(data, evt); break; case MODEM_CELLULAR_STATE_RUN_INIT_SCRIPT: modem_cellular_run_init_script_event_handler(data, evt); break; case MODEM_CELLULAR_STATE_CONNECT_CMUX: modem_cellular_connect_cmux_event_handler(data, evt); break; case MODEM_CELLULAR_STATE_OPEN_DLCI1: modem_cellular_open_dlci1_event_handler(data, evt); break; case MODEM_CELLULAR_STATE_OPEN_DLCI2: modem_cellular_open_dlci2_event_handler(data, evt); break; case MODEM_CELLULAR_STATE_RUN_DIAL_SCRIPT: modem_cellular_run_dial_script_event_handler(data, evt); break; case MODEM_CELLULAR_STATE_AWAIT_REGISTERED: modem_cellular_await_registered_event_handler(data, evt); break; case MODEM_CELLULAR_STATE_CARRIER_ON: modem_cellular_carrier_on_event_handler(data, evt); break; case MODEM_CELLULAR_STATE_INIT_POWER_OFF: modem_cellular_init_power_off_event_handler(data, evt); break; case MODEM_CELLULAR_STATE_POWER_OFF_PULSE: modem_cellular_power_off_pulse_event_handler(data, evt); break; case MODEM_CELLULAR_STATE_AWAIT_POWER_OFF: modem_cellular_await_power_off_event_handler(data, evt); break; } if (state != data->state) { modem_cellular_log_state_changed(state, data->state); } } static void modem_cellular_cmux_handler(struct modem_cmux *cmux, enum modem_cmux_event event, void *user_data) { struct modem_cellular_data *data = (struct modem_cellular_data *)user_data; switch (event) { case MODEM_CMUX_EVENT_CONNECTED: modem_cellular_delegate_event(data, MODEM_CELLULAR_EVENT_CMUX_CONNECTED); break; default: break; } } MODEM_CHAT_SCRIPT_CMDS_DEFINE(get_signal_csq_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT+CSQ", csq_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(get_signal_csq_chat_script, get_signal_csq_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 2); static inline int modem_cellular_csq_parse_rssi(uint8_t rssi, int16_t *value) { /* AT+CSQ returns a response +CSQ: <rssi>,<ber> where: * - rssi is a integer from 0 to 31 whose values describes a signal strength * between -113 dBm for 0 and -51dbM for 31 or unknown for 99 * - ber is an integer from 0 to 7 that describes the error rate, it can also * be 99 for an unknown error rate */ if (rssi == CSQ_RSSI_UNKNOWN) { return -EINVAL; } *value = (int16_t)CSQ_RSSI_TO_DB(rssi); return 0; } MODEM_CHAT_SCRIPT_CMDS_DEFINE(get_signal_cesq_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT+CESQ", cesq_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(get_signal_cesq_chat_script, get_signal_cesq_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 2); /* AT+CESQ returns a response +CESQ: <rxlev>,<ber>,<rscp>,<ecn0>,<rsrq>,<rsrp> where: * - rsrq is a integer from 0 to 34 whose values describes the Reference Signal Receive * Quality between -20 dB for 0 and -3 dB for 34 (0.5 dB steps), or unknown for 255 * - rsrp is an integer from 0 to 97 that describes the Reference Signal Receive Power * between -140 dBm for 0 and -44 dBm for 97 (1 dBm steps), or unknown for 255 */ static inline int modem_cellular_cesq_parse_rsrp(uint8_t rsrp, int16_t *value) { if (rsrp == CESQ_RSRP_UNKNOWN) { return -EINVAL; } *value = (int16_t)CESQ_RSRP_TO_DB(rsrp); return 0; } static inline int modem_cellular_cesq_parse_rsrq(uint8_t rsrq, int16_t *value) { if (rsrq == CESQ_RSRQ_UNKNOWN) { return -EINVAL; } *value = (int16_t)CESQ_RSRQ_TO_DB(rsrq); return 0; } static int modem_cellular_get_signal(const struct device *dev, const enum cellular_signal_type type, int16_t *value) { int ret = -ENOTSUP; struct modem_cellular_data *data = (struct modem_cellular_data *)dev->data; if ((data->state != MODEM_CELLULAR_STATE_AWAIT_REGISTERED) && (data->state != MODEM_CELLULAR_STATE_CARRIER_ON)) { return -ENODATA; } /* Run chat script */ switch (type) { case CELLULAR_SIGNAL_RSSI: ret = modem_chat_run_script(&data->chat, &get_signal_csq_chat_script); break; case CELLULAR_SIGNAL_RSRP: case CELLULAR_SIGNAL_RSRQ: ret = modem_chat_run_script(&data->chat, &get_signal_cesq_chat_script); break; default: ret = -ENOTSUP; break; } /* Verify chat script ran successfully */ if (ret < 0) { return ret; } /* Parse received value */ switch (type) { case CELLULAR_SIGNAL_RSSI: ret = modem_cellular_csq_parse_rssi(data->rssi, value); break; case CELLULAR_SIGNAL_RSRP: ret = modem_cellular_cesq_parse_rsrp(data->rsrp, value); break; case CELLULAR_SIGNAL_RSRQ: ret = modem_cellular_cesq_parse_rsrq(data->rsrq, value); break; default: ret = -ENOTSUP; break; } return ret; } static int modem_cellular_get_modem_info(const struct device *dev, enum cellular_modem_info_type type, char *info, size_t size) { int ret = 0; struct modem_cellular_data *data = (struct modem_cellular_data *)dev->data; switch (type) { case CELLULAR_MODEM_INFO_IMEI: strncpy(info, &data->imei[0], MIN(size, sizeof(data->imei))); break; case CELLULAR_MODEM_INFO_SIM_IMSI: strncpy(info, &data->imsi[0], MIN(size, sizeof(data->imsi))); break; case CELLULAR_MODEM_INFO_MANUFACTURER: strncpy(info, &data->manufacturer[0], MIN(size, sizeof(data->manufacturer))); break; case CELLULAR_MODEM_INFO_FW_VERSION: strncpy(info, &data->fw_version[0], MIN(size, sizeof(data->fw_version))); break; case CELLULAR_MODEM_INFO_MODEL_ID: strncpy(info, &data->model_id[0], MIN(size, sizeof(data->model_id))); break; case CELLULAR_MODEM_INFO_SIM_ICCID: strncpy(info, &data->iccid[0], MIN(size, sizeof(data->iccid))); break; default: ret = -ENODATA; break; } return ret; } static int modem_cellular_get_registration_status(const struct device *dev, enum cellular_access_technology tech, enum cellular_registration_status *status) { int ret = 0; struct modem_cellular_data *data = (struct modem_cellular_data *)dev->data; switch (tech) { case CELLULAR_ACCESS_TECHNOLOGY_GSM: *status = data->registration_status_gsm; break; case CELLULAR_ACCESS_TECHNOLOGY_GPRS: case CELLULAR_ACCESS_TECHNOLOGY_UMTS: case CELLULAR_ACCESS_TECHNOLOGY_EDGE: *status = data->registration_status_gprs; break; case CELLULAR_ACCESS_TECHNOLOGY_LTE: case CELLULAR_ACCESS_TECHNOLOGY_LTE_CAT_M1: case CELLULAR_ACCESS_TECHNOLOGY_LTE_CAT_M2: case CELLULAR_ACCESS_TECHNOLOGY_NB_IOT: *status = data->registration_status_lte; break; default: ret = -ENODATA; break; } return ret; } const static struct cellular_driver_api modem_cellular_api = { .get_signal = modem_cellular_get_signal, .get_modem_info = modem_cellular_get_modem_info, .get_registration_status = modem_cellular_get_registration_status, }; #ifdef CONFIG_PM_DEVICE static int modem_cellular_pm_action(const struct device *dev, enum pm_device_action action) { struct modem_cellular_data *data = (struct modem_cellular_data *)dev->data; int ret; switch (action) { case PM_DEVICE_ACTION_RESUME: modem_cellular_delegate_event(data, MODEM_CELLULAR_EVENT_RESUME); ret = 0; break; case PM_DEVICE_ACTION_SUSPEND: modem_cellular_delegate_event(data, MODEM_CELLULAR_EVENT_SUSPEND); ret = k_sem_take(&data->suspended_sem, K_SECONDS(30)); break; default: ret = -ENOTSUP; break; } return ret; } #endif /* CONFIG_PM_DEVICE */ static int modem_cellular_init(const struct device *dev) { struct modem_cellular_data *data = (struct modem_cellular_data *)dev->data; struct modem_cellular_config *config = (struct modem_cellular_config *)dev->config; data->dev = dev; k_work_init_delayable(&data->timeout_work, modem_cellular_timeout_handler); k_work_init(&data->event_dispatch_work, modem_cellular_event_dispatch_handler); ring_buf_init(&data->event_rb, sizeof(data->event_buf), data->event_buf); k_sem_init(&data->suspended_sem, 0, 1); if (modem_cellular_gpio_is_enabled(&config->power_gpio)) { gpio_pin_configure_dt(&config->power_gpio, GPIO_OUTPUT_INACTIVE); } if (modem_cellular_gpio_is_enabled(&config->reset_gpio)) { gpio_pin_configure_dt(&config->reset_gpio, GPIO_OUTPUT_ACTIVE); } { const struct modem_backend_uart_config uart_backend_config = { .uart = config->uart, .receive_buf = data->uart_backend_receive_buf, .receive_buf_size = ARRAY_SIZE(data->uart_backend_receive_buf), .transmit_buf = data->uart_backend_transmit_buf, .transmit_buf_size = ARRAY_SIZE(data->uart_backend_transmit_buf), }; data->uart_pipe = modem_backend_uart_init(&data->uart_backend, &uart_backend_config); } { const struct modem_cmux_config cmux_config = { .callback = modem_cellular_cmux_handler, .user_data = data, .receive_buf = data->cmux_receive_buf, .receive_buf_size = ARRAY_SIZE(data->cmux_receive_buf), .transmit_buf = data->cmux_transmit_buf, .transmit_buf_size = ARRAY_SIZE(data->cmux_transmit_buf), }; modem_cmux_init(&data->cmux, &cmux_config); } { const struct modem_cmux_dlci_config dlci1_config = { .dlci_address = 1, .receive_buf = data->dlci1_receive_buf, .receive_buf_size = ARRAY_SIZE(data->dlci1_receive_buf), }; data->dlci1_pipe = modem_cmux_dlci_init(&data->cmux, &data->dlci1, &dlci1_config); } { const struct modem_cmux_dlci_config dlci2_config = { .dlci_address = 2, .receive_buf = data->dlci2_receive_buf, .receive_buf_size = ARRAY_SIZE(data->dlci2_receive_buf), }; data->dlci2_pipe = modem_cmux_dlci_init(&data->cmux, &data->dlci2, &dlci2_config); } for (uint8_t i = 0; i < config->user_pipes_size; i++) { struct modem_cellular_user_pipe *user_pipe = &config->user_pipes[i]; const struct modem_cmux_dlci_config user_dlci_config = { .dlci_address = user_pipe->dlci_address, .receive_buf = user_pipe->dlci_receive_buf, .receive_buf_size = user_pipe->dlci_receive_buf_size, }; user_pipe->pipe = modem_cmux_dlci_init(&data->cmux, &user_pipe->dlci, &user_dlci_config); modem_pipelink_init(user_pipe->pipelink, user_pipe->pipe); } { const struct modem_chat_config chat_config = { .user_data = data, .receive_buf = data->chat_receive_buf, .receive_buf_size = ARRAY_SIZE(data->chat_receive_buf), .delimiter = data->chat_delimiter, .delimiter_size = strlen(data->chat_delimiter), .filter = data->chat_filter, .filter_size = data->chat_filter ? strlen(data->chat_filter) : 0, .argv = data->chat_argv, .argv_size = ARRAY_SIZE(data->chat_argv), .unsol_matches = unsol_matches, .unsol_matches_size = ARRAY_SIZE(unsol_matches), }; modem_chat_init(&data->chat, &chat_config); } #ifndef CONFIG_PM_DEVICE modem_cellular_delegate_event(data, MODEM_CELLULAR_EVENT_RESUME); #else pm_device_init_suspended(dev); #endif /* CONFIG_PM_DEVICE */ return 0; } /* * Every modem uses two custom scripts to initialize the modem and dial out. * * The first script is named <dt driver compatible>_init_chat_script, with its * script commands named <dt driver compatible>_init_chat_script_cmds. This * script is sent to the modem after it has started up, and must configure the * modem to use CMUX. * * The second script is named <dt driver compatible>_dial_chat_script, with its * script commands named <dt driver compatible>_dial_chat_script_cmds. This * script is sent on a DLCI channel in command mode, and must request the modem * dial out and put the DLCI channel into data mode. */ #if DT_HAS_COMPAT_STATUS_OKAY(quectel_bg95) MODEM_CHAT_SCRIPT_CMDS_DEFINE(quectel_bg95_init_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("ATE0", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=4", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CMEE=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGSN", imei_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMM", cgmm_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMI", cgmi_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMR", cgmr_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CIMI", cimi_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+QCCID", qccid_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT+CMUX=0,0,5,127", 300)); MODEM_CHAT_SCRIPT_DEFINE(quectel_bg95_init_chat_script, quectel_bg95_init_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(quectel_bg95_dial_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP_MULT("AT+CGACT=0,1", allow_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGDCONT=1,\"IP\"," "\""CONFIG_MODEM_CELLULAR_APN"\"", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("ATD*99***1#", 0),); MODEM_CHAT_SCRIPT_DEFINE(quectel_bg95_dial_chat_script, quectel_bg95_dial_chat_script_cmds, dial_abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(quectel_bg95_periodic_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG?", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(quectel_bg95_periodic_chat_script, quectel_bg95_periodic_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 4); #endif #if DT_HAS_COMPAT_STATUS_OKAY(quectel_eg25_g) MODEM_CHAT_SCRIPT_CMDS_DEFINE( quectel_eg25_g_init_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("ATE0", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=4", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CMEE=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGSN", imei_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMM", cgmm_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMI", cgmi_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMR", cgmr_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CIMI", cimi_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT+CMUX=0,0,5,127,10,3,30,10,2", 100)); MODEM_CHAT_SCRIPT_DEFINE(quectel_eg25_g_init_chat_script, quectel_eg25_g_init_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(quectel_eg25_g_dial_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP_MULT("AT+CGACT=0,1", allow_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGDCONT=1,\"IP\"," "\""CONFIG_MODEM_CELLULAR_APN"\"", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("ATD*99***1#", 0),); MODEM_CHAT_SCRIPT_DEFINE(quectel_eg25_g_dial_chat_script, quectel_eg25_g_dial_chat_script_cmds, dial_abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(quectel_eg25_g_periodic_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CSQ", csq_match)); MODEM_CHAT_SCRIPT_DEFINE(quectel_eg25_g_periodic_chat_script, quectel_eg25_g_periodic_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 4); #endif #if DT_HAS_COMPAT_STATUS_OKAY(simcom_sim7080) MODEM_CHAT_SCRIPT_CMDS_DEFINE(simcom_sim7080_init_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP("ATE0", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=4", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CMEE=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGSN", imei_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMM", cgmm_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT+CMUX=0,0,5,127", 300)); MODEM_CHAT_SCRIPT_DEFINE(simcom_sim7080_init_chat_script, simcom_sim7080_init_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(simcom_sim7080_dial_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP_MULT("AT+CGACT=0,1", allow_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGDCONT=1,\"IP\"," "\""CONFIG_MODEM_CELLULAR_APN"\"", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("ATD*99***1#", 0),); MODEM_CHAT_SCRIPT_DEFINE(simcom_sim7080_dial_chat_script, simcom_sim7080_dial_chat_script_cmds, dial_abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(simcom_sim7080_periodic_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG?", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(simcom_sim7080_periodic_chat_script, simcom_sim7080_periodic_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 4); #endif #if DT_HAS_COMPAT_STATUS_OKAY(u_blox_sara_r4) MODEM_CHAT_SCRIPT_CMDS_DEFINE(u_blox_sara_r4_init_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP("ATE0", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=4", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CMEE=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGSN", imei_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMM", cgmm_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CMUX=0,0,5,127", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(u_blox_sara_r4_init_chat_script, u_blox_sara_r4_init_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(u_blox_sara_r4_dial_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP_MULT("AT+CGACT=0,1", allow_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGDCONT=1,\"IP\"," "\""CONFIG_MODEM_CELLULAR_APN"\"", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("ATD*99***1#", 0),); MODEM_CHAT_SCRIPT_DEFINE(u_blox_sara_r4_dial_chat_script, u_blox_sara_r4_dial_chat_script_cmds, dial_abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(u_blox_sara_r4_periodic_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG?", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(u_blox_sara_r4_periodic_chat_script, u_blox_sara_r4_periodic_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 4); #endif #if DT_HAS_COMPAT_STATUS_OKAY(u_blox_sara_r5) MODEM_CHAT_SCRIPT_CMDS_DEFINE(u_blox_sara_r5_init_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP("ATE0", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=4", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CMEE=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGSN", imei_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMM", cgmm_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMI", cgmi_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMR", cgmr_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CIMI", cimi_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CMUX=0,0,5,127", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(u_blox_sara_r5_init_chat_script, u_blox_sara_r5_init_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(u_blox_sara_r5_dial_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP_MULT("AT+CGACT=0,1", allow_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGDCONT=1,\"IP\"," "\""CONFIG_MODEM_CELLULAR_APN"\"", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("ATD*99***1#", 0),); MODEM_CHAT_SCRIPT_DEFINE(u_blox_sara_r5_dial_chat_script, u_blox_sara_r5_dial_chat_script_cmds, dial_abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(u_blox_sara_r5_periodic_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG?", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(u_blox_sara_r5_periodic_chat_script, u_blox_sara_r5_periodic_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 4); #endif #if DT_HAS_COMPAT_STATUS_OKAY(swir_hl7800) MODEM_CHAT_SCRIPT_CMDS_DEFINE(swir_hl7800_init_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP("ATE0", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP_MULT("AT+CGACT=0", allow_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=4", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CMEE=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGSN", imei_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMM", cgmm_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMI", cgmi_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMR", cgmr_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CIMI", cimi_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CMUX=0,0,5,127", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(swir_hl7800_init_chat_script, swir_hl7800_init_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(swir_hl7800_dial_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGDCONT=1,\"IP\"," "\""CONFIG_MODEM_CELLULAR_APN"\"", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+KCNXCFG=1,\"GPRS\",\"" CONFIG_MODEM_CELLULAR_APN "\",,,\"IPV4\"", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+WPPP=0", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("ATD*99***1#", connect_match)); MODEM_CHAT_SCRIPT_CMDS_DEFINE(swir_hl7800_periodic_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(swir_hl7800_periodic_chat_script, swir_hl7800_periodic_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 4); MODEM_CHAT_SCRIPT_DEFINE(swir_hl7800_dial_chat_script, swir_hl7800_dial_chat_script_cmds, dial_abort_matches, modem_cellular_chat_callback_handler, 10); #endif #if DT_HAS_COMPAT_STATUS_OKAY(telit_me910g1) MODEM_CHAT_SCRIPT_CMDS_DEFINE(telit_me910g1_init_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT", 100), MODEM_CHAT_SCRIPT_CMD_RESP("ATE0", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+ICCID", iccid_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CIMI", cimi_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), /* The Telit me910g1 often has an error trying * to set the PDP context. The radio must be on to set * the context, and this step must be successful. * It is moved to the init script to allow retries. */ MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGDCONT=1,\"IP\"," "\"" CONFIG_MODEM_CELLULAR_APN "\"", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=4", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CMEE=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGSN", imei_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMM", cgmm_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMI", cgmi_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMR", cgmr_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT+CMUX=0,0,5,127,10,3,30,10,2", 300)); MODEM_CHAT_SCRIPT_DEFINE(telit_me910g1_init_chat_script, telit_me910g1_init_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(telit_me910g1_dial_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("ATD*99***1#", 0)); MODEM_CHAT_SCRIPT_DEFINE(telit_me910g1_dial_chat_script, telit_me910g1_dial_chat_script_cmds, dial_abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(telit_me910g1_periodic_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT+CREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(telit_me910g1_periodic_chat_script, telit_me910g1_periodic_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 4); #endif #if DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf91_slm) MODEM_CHAT_SCRIPT_CMDS_DEFINE(nordic_nrf91_slm_init_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP_MULT("AT", allow_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CMEE=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGSN", imei_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMM", cgmm_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMI", cgmi_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMR", cgmr_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT#XCMUX=1", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(nordic_nrf91_slm_init_chat_script, nordic_nrf91_slm_init_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(nordic_nrf91_slm_dial_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=4", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT#XCMUX=2", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(nordic_nrf91_slm_dial_chat_script, nordic_nrf91_slm_dial_chat_script_cmds, dial_abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(nordic_nrf91_slm_periodic_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(nordic_nrf91_slm_periodic_chat_script, nordic_nrf91_slm_periodic_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 4); #endif #if DT_HAS_COMPAT_STATUS_OKAY(sqn_gm02s) MODEM_CHAT_SCRIPT_CMDS_DEFINE(sqn_gm02s_init_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("ATE0", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CFUN=4", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CMEE=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG=1", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGSN", imei_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMM", cgmm_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMI", cgmi_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGMR", cgmr_match), MODEM_CHAT_SCRIPT_CMD_RESP("", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CMUX=0,0,5,127", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(sqn_gm02s_init_chat_script, sqn_gm02s_init_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 10); MODEM_CHAT_SCRIPT_CMDS_DEFINE(sqn_gm02s_dial_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP_MULT("AT+CGACT=0,1", allow_match), MODEM_CHAT_SCRIPT_CMD_RESP("AT+CGDCONT=1,\"IP\"," "\""CONFIG_MODEM_CELLULAR_APN"\"", ok_match), MODEM_CHAT_SCRIPT_CMD_RESP_NONE("AT+CFUN=1", 10000), MODEM_CHAT_SCRIPT_CMD_RESP("ATD*99***1#", connect_match)); MODEM_CHAT_SCRIPT_DEFINE(sqn_gm02s_dial_chat_script, sqn_gm02s_dial_chat_script_cmds, dial_abort_matches, modem_cellular_chat_callback_handler, 15); MODEM_CHAT_SCRIPT_CMDS_DEFINE(sqn_gm02s_periodic_chat_script_cmds, MODEM_CHAT_SCRIPT_CMD_RESP("AT+CEREG?", ok_match)); MODEM_CHAT_SCRIPT_DEFINE(sqn_gm02s_periodic_chat_script, sqn_gm02s_periodic_chat_script_cmds, abort_matches, modem_cellular_chat_callback_handler, 4); #endif #define MODEM_CELLULAR_INST_NAME(name, inst) \ _CONCAT_4(name, _, DT_DRV_COMPAT, inst) #define MODEM_CELLULAR_DEFINE_USER_PIPE_DATA(inst, name, size) \ MODEM_PIPELINK_DT_INST_DEFINE(inst, name); \ static uint8_t MODEM_CELLULAR_INST_NAME(name, inst)[size] \ #define MODEM_CELLULAR_INIT_USER_PIPE(_inst, _name, _dlci_address) \ { \ .dlci_address = _dlci_address, \ .dlci_receive_buf = MODEM_CELLULAR_INST_NAME(_name, _inst), \ .dlci_receive_buf_size = sizeof(MODEM_CELLULAR_INST_NAME(_name, _inst)), \ .pipelink = MODEM_PIPELINK_DT_INST_GET(_inst, _name), \ } #define MODEM_CELLULAR_DEFINE_USER_PIPES(inst, ...) \ static struct modem_cellular_user_pipe MODEM_CELLULAR_INST_NAME(user_pipes, inst)[] = { \ __VA_ARGS__ \ } #define MODEM_CELLULAR_GET_USER_PIPES(inst) \ MODEM_CELLULAR_INST_NAME(user_pipes, inst) /* Extract the first argument (pipe name) from a pair */ #define MODEM_CELLULAR_GET_PIPE_NAME_ARG(arg1, ...) arg1 /* Extract the second argument (DLCI address) from a pair */ #define MODEM_CELLULAR_GET_DLCI_ADDRESS_ARG(arg1, arg2, ...) arg2 /* Define user pipe data using instance and extracted pipe name */ #define MODEM_CELLULAR_DEFINE_USER_PIPE_DATA_HELPER(_args, inst) \ MODEM_CELLULAR_DEFINE_USER_PIPE_DATA(inst, \ MODEM_CELLULAR_GET_PIPE_NAME_ARG _args, \ CONFIG_MODEM_CELLULAR_USER_PIPE_BUFFER_SIZES) /* Initialize user pipe using instance, extracted pipe name, and DLCI address */ #define MODEM_CELLULAR_INIT_USER_PIPE_HELPER(_args, inst) \ MODEM_CELLULAR_INIT_USER_PIPE(inst, \ MODEM_CELLULAR_GET_PIPE_NAME_ARG _args, \ MODEM_CELLULAR_GET_DLCI_ADDRESS_ARG _args) /* * Define and initialize user pipes dynamically * Takes an instance and pairs of (pipe name, DLCI address) */ #define MODEM_CELLULAR_DEFINE_AND_INIT_USER_PIPES(inst, ...) \ FOR_EACH_FIXED_ARG(MODEM_CELLULAR_DEFINE_USER_PIPE_DATA_HELPER, \ (;), inst, __VA_ARGS__); \ MODEM_CELLULAR_DEFINE_USER_PIPES( \ inst, \ FOR_EACH_FIXED_ARG(MODEM_CELLULAR_INIT_USER_PIPE_HELPER, \ (,), inst, __VA_ARGS__) \ ); /* Helper to define modem instance */ #define MODEM_CELLULAR_DEFINE_INSTANCE(inst, power_ms, reset_ms, startup_ms, shutdown_ms, start, \ init_script, \ dial_script, \ periodic_script) \ static const struct modem_cellular_config MODEM_CELLULAR_INST_NAME(config, inst) = { \ .uart = DEVICE_DT_GET(DT_INST_BUS(inst)), \ .power_gpio = GPIO_DT_SPEC_INST_GET_OR(inst, mdm_power_gpios, {}), \ .reset_gpio = GPIO_DT_SPEC_INST_GET_OR(inst, mdm_reset_gpios, {}), \ .power_pulse_duration_ms = (power_ms), \ .reset_pulse_duration_ms = (reset_ms), \ .startup_time_ms = (startup_ms), \ .shutdown_time_ms = (shutdown_ms), \ .autostarts = (start), \ .init_chat_script = (init_script), \ .dial_chat_script = (dial_script), \ .periodic_chat_script = (periodic_script), \ .user_pipes = MODEM_CELLULAR_GET_USER_PIPES(inst), \ .user_pipes_size = ARRAY_SIZE(MODEM_CELLULAR_GET_USER_PIPES(inst)), \ }; \ \ PM_DEVICE_DT_INST_DEFINE(inst, modem_cellular_pm_action); \ \ DEVICE_DT_INST_DEFINE(inst, modem_cellular_init, PM_DEVICE_DT_INST_GET(inst), \ &MODEM_CELLULAR_INST_NAME(data, inst), \ &MODEM_CELLULAR_INST_NAME(config, inst), POST_KERNEL, 99, \ &modem_cellular_api); #define MODEM_CELLULAR_DEVICE_QUECTEL_BG95(inst) \ MODEM_PPP_DEFINE(MODEM_CELLULAR_INST_NAME(ppp, inst), NULL, 98, 1500, 64); \ \ static struct modem_cellular_data MODEM_CELLULAR_INST_NAME(data, inst) = { \ .chat_delimiter = "\r", \ .chat_filter = "\n", \ .ppp = &MODEM_CELLULAR_INST_NAME(ppp, inst), \ }; \ \ MODEM_CELLULAR_DEFINE_AND_INIT_USER_PIPES(inst, \ (user_pipe_0, 3), \ (user_pipe_1, 4)) \ \ MODEM_CELLULAR_DEFINE_INSTANCE(inst, 1500, 100, 10000, 5000, false, \ &quectel_bg95_init_chat_script, \ &quectel_bg95_dial_chat_script, \ &quectel_bg95_periodic_chat_script) #define MODEM_CELLULAR_DEVICE_QUECTEL_EG25_G(inst) \ MODEM_PPP_DEFINE(MODEM_CELLULAR_INST_NAME(ppp, inst), NULL, 98, 1500, 64); \ \ static struct modem_cellular_data MODEM_CELLULAR_INST_NAME(data, inst) = { \ .chat_delimiter = "\r", \ .chat_filter = "\n", \ .ppp = &MODEM_CELLULAR_INST_NAME(ppp, inst), \ }; \ \ MODEM_CELLULAR_DEFINE_AND_INIT_USER_PIPES(inst, \ (user_pipe_0, 3), \ (user_pipe_1, 4)) \ \ MODEM_CELLULAR_DEFINE_INSTANCE(inst, 1500, 500, 15000, 5000, false, \ &quectel_eg25_g_init_chat_script, \ &quectel_eg25_g_dial_chat_script, \ &quectel_eg25_g_periodic_chat_script) #define MODEM_CELLULAR_DEVICE_SIMCOM_SIM7080(inst) \ MODEM_PPP_DEFINE(MODEM_CELLULAR_INST_NAME(ppp, inst), NULL, 98, 1500, 64); \ \ static struct modem_cellular_data MODEM_CELLULAR_INST_NAME(data, inst) = { \ .chat_delimiter = "\r", \ .chat_filter = "\n", \ .ppp = &MODEM_CELLULAR_INST_NAME(ppp, inst), \ }; \ \ MODEM_CELLULAR_DEFINE_AND_INIT_USER_PIPES(inst, \ (user_pipe_0, 3), \ (user_pipe_1, 4)) \ \ MODEM_CELLULAR_DEFINE_INSTANCE(inst, 1500, 100, 10000, 5000, false, \ &simcom_sim7080_init_chat_script, \ &simcom_sim7080_dial_chat_script, \ &simcom_sim7080_periodic_chat_script) #define MODEM_CELLULAR_DEVICE_U_BLOX_SARA_R4(inst) \ MODEM_PPP_DEFINE(MODEM_CELLULAR_INST_NAME(ppp, inst), NULL, 98, 1500, 64); \ \ static struct modem_cellular_data MODEM_CELLULAR_INST_NAME(data, inst) = { \ .chat_delimiter = "\r", \ .chat_filter = "\n", \ .ppp = &MODEM_CELLULAR_INST_NAME(ppp, inst), \ }; \ \ MODEM_CELLULAR_DEFINE_AND_INIT_USER_PIPES(inst, \ (gnss_pipe, 3), \ (user_pipe_0, 4)) \ \ MODEM_CELLULAR_DEFINE_INSTANCE(inst, 1500, 100, 10000, 5000, false, \ &u_blox_sara_r4_init_chat_script, \ &u_blox_sara_r4_dial_chat_script, \ &u_blox_sara_r4_periodic_chat_script) #define MODEM_CELLULAR_DEVICE_U_BLOX_SARA_R5(inst) \ MODEM_PPP_DEFINE(MODEM_CELLULAR_INST_NAME(ppp, inst), NULL, 98, 1500, 64); \ \ static struct modem_cellular_data MODEM_CELLULAR_INST_NAME(data, inst) = { \ .chat_delimiter = "\r", \ .chat_filter = "\n", \ .ppp = &MODEM_CELLULAR_INST_NAME(ppp, inst), \ }; \ \ MODEM_CELLULAR_DEFINE_AND_INIT_USER_PIPES(inst, \ (gnss_pipe, 4), \ (user_pipe_0, 3)) \ \ MODEM_CELLULAR_DEFINE_INSTANCE(inst, 1500, 100, 1500, 13000, true, \ &u_blox_sara_r5_init_chat_script, \ &u_blox_sara_r5_dial_chat_script, \ &u_blox_sara_r5_periodic_chat_script) #define MODEM_CELLULAR_DEVICE_SWIR_HL7800(inst) \ MODEM_PPP_DEFINE(MODEM_CELLULAR_INST_NAME(ppp, inst), NULL, 98, 1500, 64); \ \ static struct modem_cellular_data MODEM_CELLULAR_INST_NAME(data, inst) = { \ .chat_delimiter = "\r", \ .chat_filter = "\n", \ .ppp = &MODEM_CELLULAR_INST_NAME(ppp, inst), \ }; \ \ MODEM_CELLULAR_DEFINE_AND_INIT_USER_PIPES(inst, \ (user_pipe_0, 3), \ (user_pipe_1, 4)) \ \ MODEM_CELLULAR_DEFINE_INSTANCE(inst, 1500, 100, 10000, 5000, false, \ &swir_hl7800_init_chat_script, \ &swir_hl7800_dial_chat_script, \ &swir_hl7800_periodic_chat_script) #define MODEM_CELLULAR_DEVICE_TELIT_ME910G1(inst) \ MODEM_PPP_DEFINE(MODEM_CELLULAR_INST_NAME(ppp, inst), NULL, 98, 1500, 64); \ \ static struct modem_cellular_data MODEM_CELLULAR_INST_NAME(data, inst) = { \ .chat_delimiter = "\r", \ .chat_filter = "\n", \ .ppp = &MODEM_CELLULAR_INST_NAME(ppp, inst), \ }; \ \ MODEM_CELLULAR_DEFINE_AND_INIT_USER_PIPES(inst, \ (user_pipe_0, 3)) \ \ MODEM_CELLULAR_DEFINE_INSTANCE(inst, 5050, 250, 15000, 5000, false, \ &telit_me910g1_init_chat_script, \ &telit_me910g1_dial_chat_script, \ &telit_me910g1_periodic_chat_script) #define MODEM_CELLULAR_DEVICE_NORDIC_NRF91_SLM(inst) \ MODEM_PPP_DEFINE(MODEM_CELLULAR_INST_NAME(ppp, inst), NULL, 98, 1500, 1500); \ \ static struct modem_cellular_data MODEM_CELLULAR_INST_NAME(data, inst) = { \ .chat_delimiter = "\r\n", \ .ppp = &MODEM_CELLULAR_INST_NAME(ppp, inst), \ }; \ \ MODEM_CELLULAR_DEFINE_AND_INIT_USER_PIPES(inst, \ (gnss_pipe, 3)) \ \ MODEM_CELLULAR_DEFINE_INSTANCE(inst, 100, 100, 2000, 10000, false, \ &nordic_nrf91_slm_init_chat_script, \ &nordic_nrf91_slm_dial_chat_script, \ &nordic_nrf91_slm_periodic_chat_script) #define MODEM_CELLULAR_DEVICE_SQN_GM02S(inst) \ MODEM_PPP_DEFINE(MODEM_CELLULAR_INST_NAME(ppp, inst), NULL, 98, 1500, 64); \ \ static struct modem_cellular_data MODEM_CELLULAR_INST_NAME(data, inst) = { \ .chat_delimiter = "\r", \ .chat_filter = "\n", \ .ppp = &MODEM_CELLULAR_INST_NAME(ppp, inst), \ }; \ \ MODEM_CELLULAR_DEFINE_AND_INIT_USER_PIPES(inst, \ (user_pipe_0, 3), \ (user_pipe_1, 4)) \ \ MODEM_CELLULAR_DEFINE_INSTANCE(inst, 1500, 100, 2000, 5000, true, \ &sqn_gm02s_init_chat_script, \ &sqn_gm02s_dial_chat_script, \ &sqn_gm02s_periodic_chat_script) #define DT_DRV_COMPAT quectel_bg95 DT_INST_FOREACH_STATUS_OKAY(MODEM_CELLULAR_DEVICE_QUECTEL_BG95) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT quectel_eg25_g DT_INST_FOREACH_STATUS_OKAY(MODEM_CELLULAR_DEVICE_QUECTEL_EG25_G) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT simcom_sim7080 DT_INST_FOREACH_STATUS_OKAY(MODEM_CELLULAR_DEVICE_SIMCOM_SIM7080) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT u_blox_sara_r4 DT_INST_FOREACH_STATUS_OKAY(MODEM_CELLULAR_DEVICE_U_BLOX_SARA_R4) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT u_blox_sara_r5 DT_INST_FOREACH_STATUS_OKAY(MODEM_CELLULAR_DEVICE_U_BLOX_SARA_R5) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT swir_hl7800 DT_INST_FOREACH_STATUS_OKAY(MODEM_CELLULAR_DEVICE_SWIR_HL7800) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT telit_me910g1 DT_INST_FOREACH_STATUS_OKAY(MODEM_CELLULAR_DEVICE_TELIT_ME910G1) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT nordic_nrf91_slm DT_INST_FOREACH_STATUS_OKAY(MODEM_CELLULAR_DEVICE_NORDIC_NRF91_SLM) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT sqn_gm02s DT_INST_FOREACH_STATUS_OKAY(MODEM_CELLULAR_DEVICE_SQN_GM02S) #undef DT_DRV_COMPAT ```
/content/code_sandbox/drivers/modem/modem_cellular.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
21,648
```unknown config MODEM_AT_SHELL bool "AT command shell based on modem modules" select MODEM_MODULES select MODEM_CHAT select MODEM_PIPE select MODEM_PIPELINK depends on !MODEM_SHELL depends on !SHELL_WILDCARD depends on $(dt_alias_enabled,modem) if MODEM_AT_SHELL config MODEM_AT_SHELL_USER_PIPE int "User pipe number to use" default 0 config MODEM_AT_SHELL_RESPONSE_TIMEOUT_S int "Timeout waiting for response to AT command in seconds" default 5 config MODEM_AT_SHELL_COMMAND_MAX_SIZE int "Maximum size of AT command" default 32 config MODEM_AT_SHELL_RESPONSE_MAX_SIZE int "Maximum size of AT response" default 64 config MODEM_AT_SHELL_CHAT_RECEIVE_BUF_SIZE int "Size of modem chat receive buffer in bytes" default 128 endif # MODEM_AT_SHELL ```
/content/code_sandbox/drivers/modem/Kconfig.at_shell
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
204
```objective-c /* * */ #ifndef QUECTEL_BG9X_H #define QUECTEL_BG9X_H #include <zephyr/kernel.h> #include <ctype.h> #include <errno.h> #include <zephyr/drivers/gpio.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/net/net_if.h> #include <zephyr/net/offloaded_netdev.h> #include <zephyr/net/net_offload.h> #include <zephyr/net/socket_offload.h> #include "modem_context.h" #include "modem_socket.h" #include "modem_cmd_handler.h" #include "modem_iface_uart.h" #define MDM_UART_NODE DT_INST_BUS(0) #define MDM_UART_DEV DEVICE_DT_GET(MDM_UART_NODE) #define MDM_CMD_TIMEOUT K_SECONDS(10) #define MDM_CMD_CONN_TIMEOUT K_SECONDS(120) #define MDM_REGISTRATION_TIMEOUT K_SECONDS(180) #define MDM_SENDMSG_SLEEP K_MSEC(1) #define MDM_MAX_DATA_LENGTH 1024 #define MDM_RECV_MAX_BUF 30 #define MDM_RECV_BUF_SIZE 1024 #define MDM_MAX_SOCKETS 5 #define MDM_BASE_SOCKET_NUM 0 #define MDM_NETWORK_RETRY_COUNT 10 #define MDM_INIT_RETRY_COUNT 10 #define MDM_PDP_ACT_RETRY_COUNT 3 #define MDM_WAIT_FOR_RSSI_COUNT 10 #define MDM_WAIT_FOR_RSSI_DELAY K_SECONDS(2) #define BUF_ALLOC_TIMEOUT K_SECONDS(1) #define MDM_MAX_BOOT_TIME K_SECONDS(50) /* Default lengths of certain things. */ #define MDM_MANUFACTURER_LENGTH 10 #define MDM_MODEL_LENGTH 16 #define MDM_REVISION_LENGTH 64 #define MDM_IMEI_LENGTH 16 #define MDM_IMSI_LENGTH 16 #define MDM_ICCID_LENGTH 32 #define MDM_APN_LENGTH 32 #define RSSI_TIMEOUT_SECS 30 #define MDM_UNSOL_RDY CONFIG_MODEM_QUECTEL_BG9X_UNSOL_RDY #define MDM_APN CONFIG_MODEM_QUECTEL_BG9X_APN #define MDM_USERNAME CONFIG_MODEM_QUECTEL_BG9X_USERNAME #define MDM_PASSWORD CONFIG_MODEM_QUECTEL_BG9X_PASSWORD /* Modem ATOI routine. */ #define ATOI(s_, value_, desc_) modem_atoi(s_, value_, desc_, __func__) /* pin settings */ enum mdm_control_pins { MDM_POWER = 0, MDM_RESET, #if DT_INST_NODE_HAS_PROP(0, mdm_dtr_gpios) MDM_DTR, #endif #if DT_INST_NODE_HAS_PROP(0, mdm_wdisable_gpios) MDM_WDISABLE, #endif }; /* driver data */ struct modem_data { struct net_if *net_iface; uint8_t mac_addr[6]; /* modem interface */ struct modem_iface_uart_data iface_data; uint8_t iface_rb_buf[MDM_MAX_DATA_LENGTH]; /* modem cmds */ struct modem_cmd_handler_data cmd_handler_data; uint8_t cmd_match_buf[MDM_RECV_BUF_SIZE + 1]; /* socket data */ struct modem_socket_config socket_config; struct modem_socket sockets[MDM_MAX_SOCKETS]; /* RSSI work */ struct k_work_delayable rssi_query_work; /* modem data */ char mdm_manufacturer[MDM_MANUFACTURER_LENGTH]; char mdm_model[MDM_MODEL_LENGTH]; char mdm_revision[MDM_REVISION_LENGTH]; char mdm_imei[MDM_IMEI_LENGTH]; #if defined(CONFIG_MODEM_SIM_NUMBERS) char mdm_imsi[MDM_IMSI_LENGTH]; char mdm_iccid[MDM_ICCID_LENGTH]; #endif /* #if defined(CONFIG_MODEM_SIM_NUMBERS) */ int mdm_rssi; /* bytes written to socket in last transaction */ int sock_written; /* Socket from which we are currently reading data. */ int sock_fd; /* Semaphore(s) */ struct k_sem sem_response; struct k_sem sem_tx_ready; struct k_sem sem_sock_conn; #if !DT_INST_NODE_HAS_PROP(0, mdm_reset_gpios) struct k_sem sem_pin_busy; #endif }; /* Socket read callback data */ struct socket_read_data { char *recv_buf; size_t recv_buf_len; struct sockaddr *recv_addr; uint16_t recv_read_len; }; #endif /* QUECTEL_BG9X_H */ ```
/content/code_sandbox/drivers/modem/quectel-bg9x.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
968
```c /** @file * @brief Modem shell module * * Provide some modem shell commands that can be useful to applications. */ /* * */ #define LOG_MODULE_NAME modem_shell #include <zephyr/kernel.h> #include <stdlib.h> #include <string.h> #include <zephyr/device.h> #include <zephyr/shell/shell.h> #include <zephyr/sys/printk.h> struct modem_shell_user_data { const struct shell *sh; void *user_data; }; #if defined(CONFIG_MODEM_CONTEXT) #include "modem_context.h" #define ms_context modem_context #define ms_max_context CONFIG_MODEM_CONTEXT_MAX_NUM #define ms_send(ctx_, buf_, size_) \ (ctx_->iface.write(&ctx_->iface, buf_, size_)) #define ms_context_from_id modem_context_from_id #define UART_DEV_NAME(ctx) (ctx->iface.dev->name) #elif defined(CONFIG_MODEM_RECEIVER) #include "modem_receiver.h" #define ms_context mdm_receiver_context #define ms_max_context CONFIG_MODEM_RECEIVER_MAX_CONTEXTS #define ms_send mdm_receiver_send #define ms_context_from_id mdm_receiver_context_from_id #define UART_DEV_NAME(ctx_) (ctx_->uart_dev->name) #else #error "MODEM_CONTEXT or MODEM_RECEIVER need to be enabled" #endif static int cmd_modem_list(const struct shell *sh, size_t argc, char *argv[]) { struct ms_context *mdm_ctx; int i, count = 0; shell_fprintf(sh, SHELL_NORMAL, "Modem receivers:\n"); for (i = 0; i < ms_max_context; i++) { mdm_ctx = ms_context_from_id(i); if (mdm_ctx) { count++; shell_fprintf(sh, SHELL_NORMAL, "%d:\tIface Device: %s\n" "\tManufacturer: %s\n" "\tModel: %s\n" "\tRevision: %s\n" "\tIMEI: %s\n" #if defined(CONFIG_MODEM_SIM_NUMBERS) "\tIMSI: %s\n" "\tICCID: %s\n" #endif #if defined(CONFIG_MODEM_CELL_INFO) "\tOperator: %d\n" "\tLAC: %d\n" "\tCellId: %d\n" "\tAcT: %d\n" #endif "\tRSSI: %d\n", i, UART_DEV_NAME(mdm_ctx), mdm_ctx->data_manufacturer, mdm_ctx->data_model, mdm_ctx->data_revision, mdm_ctx->data_imei, #if defined(CONFIG_MODEM_SIM_NUMBERS) mdm_ctx->data_imsi, mdm_ctx->data_iccid, #endif #if defined(CONFIG_MODEM_CELL_INFO) mdm_ctx->data_operator, mdm_ctx->data_lac, mdm_ctx->data_cellid, mdm_ctx->data_act, #endif mdm_ctx->data_rssi ? *mdm_ctx->data_rssi : 0); } } if (!count) { shell_fprintf(sh, SHELL_NORMAL, "None found.\n"); } return 0; } static int cmd_modem_send(const struct shell *sh, size_t argc, char *argv[]) { struct ms_context *mdm_ctx; char *endptr; int ret, i, arg = 1; /* list */ if (!argv[arg]) { shell_fprintf(sh, SHELL_ERROR, "Please enter a modem index\n"); return -EINVAL; } /* <index> of modem receiver */ i = (int)strtol(argv[arg], &endptr, 10); if (*endptr != '\0') { shell_fprintf(sh, SHELL_ERROR, "Please enter a modem index\n"); return -EINVAL; } mdm_ctx = ms_context_from_id(i); if (!mdm_ctx) { shell_fprintf(sh, SHELL_ERROR, "Modem receiver not found!"); return 0; } for (i = arg + 1; i < argc; i++) { ret = ms_send(mdm_ctx, argv[i], strlen(argv[i])); if (ret < 0) { shell_fprintf(sh, SHELL_ERROR, "Error sending '%s': %d\n", argv[i], ret); return 0; } if (i == argc - 1) { ret = ms_send(mdm_ctx, "\r", 1); } else { ret = ms_send(mdm_ctx, " ", 1); } if (ret < 0) { shell_fprintf(sh, SHELL_ERROR, "Error sending (CRLF or space): %d\n", ret); return 0; } } return 0; } static int cmd_modem_info(const struct shell *sh, size_t argc, char *argv[]) { struct ms_context *mdm_ctx; char *endptr; int i, arg = 1; /* info */ if (!argv[arg]) { shell_fprintf(sh, SHELL_ERROR, "Please enter a modem index\n"); return -EINVAL; } /* <index> of modem receiver */ i = (int)strtol(argv[arg], &endptr, 10); if (*endptr != '\0') { shell_fprintf(sh, SHELL_ERROR, "Please enter a modem index\n"); return -EINVAL; } mdm_ctx = ms_context_from_id(i); if (!mdm_ctx) { shell_fprintf(sh, SHELL_ERROR, "Modem receiver not found!"); return 0; } shell_fprintf(sh, SHELL_NORMAL, "Modem index : %d\n" "Iface Device : %s\n" "Manufacturer : %s\n" "Model : %s\n" "Revision : %s\n" "IMEI : %s\n" "RSSI : %d\n", i, UART_DEV_NAME(mdm_ctx), mdm_ctx->data_manufacturer, mdm_ctx->data_model, mdm_ctx->data_revision, mdm_ctx->data_imei, mdm_ctx->data_rssi ? *mdm_ctx->data_rssi : 0); shell_fprintf(sh, SHELL_NORMAL, "GSM 07.10 muxing : disabled\n"); return 0; } SHELL_STATIC_SUBCMD_SET_CREATE(sub_modem, SHELL_CMD(info, NULL, "Show information for a modem", cmd_modem_info), SHELL_CMD(list, NULL, "List registered modems", cmd_modem_list), SHELL_CMD(send, NULL, "Send an AT <command> to a registered modem " "receiver", cmd_modem_send), SHELL_SUBCMD_SET_END /* Array terminated. */ ); SHELL_CMD_REGISTER(modem, &sub_modem, "Modem commands", NULL); ```
/content/code_sandbox/drivers/modem/modem_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,546
```c /* * */ #define DT_DRV_COMPAT simcom_sim7080 #include <zephyr/logging/log.h> #include <zephyr/net/offloaded_netdev.h> LOG_MODULE_REGISTER(modem_simcom_sim7080, CONFIG_MODEM_LOG_LEVEL); #include <zephyr/drivers/modem/simcom-sim7080.h> #include "simcom-sim7080.h" #define SMS_TP_UDHI_HEADER 0x40 static struct k_thread modem_rx_thread; static struct k_work_q modem_workq; static struct sim7080_data mdata; static struct modem_context mctx; static const struct socket_op_vtable offload_socket_fd_op_vtable; static struct zsock_addrinfo dns_result; static struct sockaddr dns_result_addr; static char dns_result_canonname[DNS_MAX_NAME_SIZE + 1]; static struct sim7080_gnss_data gnss_data; static K_KERNEL_STACK_DEFINE(modem_rx_stack, CONFIG_MODEM_SIMCOM_SIM7080_RX_STACK_SIZE); static K_KERNEL_STACK_DEFINE(modem_workq_stack, CONFIG_MODEM_SIMCOM_SIM7080_RX_WORKQ_STACK_SIZE); NET_BUF_POOL_DEFINE(mdm_recv_pool, MDM_RECV_MAX_BUF, MDM_RECV_BUF_SIZE, 0, NULL); /* pin settings */ static const struct gpio_dt_spec power_gpio = GPIO_DT_SPEC_INST_GET(0, mdm_power_gpios); static void socket_close(struct modem_socket *sock); static const struct socket_dns_offload offload_dns_ops; static inline uint32_t hash32(char *str, int len) { #define HASH_MULTIPLIER 37 uint32_t h = 0; int i; for (i = 0; i < len; ++i) { h = (h * HASH_MULTIPLIER) + str[i]; } return h; } static inline uint8_t *modem_get_mac(const struct device *dev) { struct sim7080_data *data = dev->data; uint32_t hash_value; data->mac_addr[0] = 0x00; data->mac_addr[1] = 0x10; /* use IMEI for mac_addr */ hash_value = hash32(mdata.mdm_imei, strlen(mdata.mdm_imei)); UNALIGNED_PUT(hash_value, (uint32_t *)(data->mac_addr + 2)); return data->mac_addr; } static int offload_socket(int family, int type, int proto); /* Setup the Modem NET Interface. */ static void modem_net_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct sim7080_data *data = dev->data; net_if_set_link_addr(iface, modem_get_mac(dev), sizeof(data->mac_addr), NET_LINK_ETHERNET); data->netif = iface; socket_offload_dns_register(&offload_dns_ops); net_if_socket_offload_set(iface, offload_socket); } /** * Changes the operating state of the sim7080. * * @param state The new state. */ static void change_state(enum sim7080_state state) { LOG_DBG("Changing state to (%d)", state); mdata.state = state; } /** * Get the current operating state of the sim7080. * * @return The current state. */ static enum sim7080_state get_state(void) { return mdata.state; } /* * Parses the +CAOPEN command and gives back the * connect semaphore. */ MODEM_CMD_DEFINE(on_cmd_caopen) { int result = atoi(argv[1]); LOG_INF("+CAOPEN: %d", result); modem_cmd_handler_set_error(data, result); return 0; } /* * Unlock the tx ready semaphore if '> ' is received. */ MODEM_CMD_DIRECT_DEFINE(on_cmd_tx_ready) { k_sem_give(&mdata.sem_tx_ready); return len; } /* * Connects an modem socket. Protocol can either be TCP or UDP. */ static int offload_connect(void *obj, const struct sockaddr *addr, socklen_t addrlen) { struct modem_socket *sock = (struct modem_socket *)obj; uint16_t dst_port = 0; char *protocol; struct modem_cmd cmd[] = { MODEM_CMD("+CAOPEN: ", on_cmd_caopen, 2U, ",") }; char buf[sizeof("AT+CAOPEN: #,#,#####,#xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx#,####")]; char ip_str[NET_IPV6_ADDR_LEN]; int ret; /* Modem is not attached to the network. */ if (get_state() != SIM7080_STATE_NETWORKING) { return -EAGAIN; } if (modem_socket_is_allocated(&mdata.socket_config, sock) == false) { LOG_ERR("Invalid socket id %d from fd %d", sock->id, sock->sock_fd); errno = EINVAL; return -1; } if (sock->is_connected == true) { LOG_ERR("Socket is already connected! id: %d, fd: %d", sock->id, sock->sock_fd); errno = EISCONN; return -1; } /* get the destination port */ if (addr->sa_family == AF_INET6) { dst_port = ntohs(net_sin6(addr)->sin6_port); } else if (addr->sa_family == AF_INET) { dst_port = ntohs(net_sin(addr)->sin_port); } /* Get protocol */ protocol = (sock->type == SOCK_STREAM) ? "TCP" : "UDP"; ret = modem_context_sprint_ip_addr(addr, ip_str, sizeof(ip_str)); if (ret != 0) { LOG_ERR("Failed to format IP!"); errno = ENOMEM; return -1; } ret = snprintk(buf, sizeof(buf), "AT+CAOPEN=%d,%d,\"%s\",\"%s\",%d", 0, sock->id, protocol, ip_str, dst_port); if (ret < 0) { LOG_ERR("Failed to build connect command. ID: %d, FD: %d", sock->id, sock->sock_fd); errno = ENOMEM; return -1; } ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, cmd, ARRAY_SIZE(cmd), buf, &mdata.sem_response, MDM_CONNECT_TIMEOUT); if (ret < 0) { LOG_ERR("%s ret: %d", buf, ret); socket_close(sock); goto error; } ret = modem_cmd_handler_get_error(&mdata.cmd_handler_data); if (ret != 0) { LOG_ERR("Closing the socket!"); socket_close(sock); goto error; } sock->is_connected = true; errno = 0; return 0; error: errno = -ret; return -1; } /* * Send data over a given socket. * * First we signal the module that we want to send data over a socket. * This is done by sending AT+CASEND=<sockfd>,<nbytes>\r\n. * If The module is ready to send data it will send back * an UNTERMINATED prompt '> '. After that data can be sent to the modem. * As terminating byte a STRG+Z (0x1A) is sent. The module will * then send a OK or ERROR. */ static ssize_t offload_sendto(void *obj, const void *buf, size_t len, int flags, const struct sockaddr *dest_addr, socklen_t addrlen) { int ret; struct modem_socket *sock = (struct modem_socket *)obj; char send_buf[sizeof("AT+CASEND=#,####")] = { 0 }; char ctrlz = 0x1A; /* Modem is not attached to the network. */ if (get_state() != SIM7080_STATE_NETWORKING) { LOG_ERR("Modem currently not attached to the network!"); return -EAGAIN; } /* Do some sanity checks. */ if (!buf || len == 0) { errno = EINVAL; return -1; } /* Socket has to be connected. */ if (!sock->is_connected) { errno = ENOTCONN; return -1; } /* Only send up to MTU bytes. */ if (len > MDM_MAX_DATA_LENGTH) { len = MDM_MAX_DATA_LENGTH; } ret = snprintk(send_buf, sizeof(send_buf), "AT+CASEND=%d,%ld", sock->id, (long)len); if (ret < 0) { LOG_ERR("Failed to build send command!!"); errno = ENOMEM; return -1; } /* Make sure only one send can be done at a time. */ k_sem_take(&mdata.cmd_handler_data.sem_tx_lock, K_FOREVER); k_sem_reset(&mdata.sem_tx_ready); /* Send CASEND */ mdata.current_sock_written = len; ret = modem_cmd_send_nolock(&mctx.iface, &mctx.cmd_handler, NULL, 0U, send_buf, NULL, K_NO_WAIT); if (ret < 0) { LOG_ERR("Failed to send CASEND!!"); goto exit; } /* Wait for '> ' */ ret = k_sem_take(&mdata.sem_tx_ready, K_SECONDS(2)); if (ret < 0) { LOG_ERR("Timeout while waiting for tx"); goto exit; } /* Send data */ mctx.iface.write(&mctx.iface, buf, len); mctx.iface.write(&mctx.iface, &ctrlz, 1); /* Wait for the OK */ k_sem_reset(&mdata.sem_response); ret = k_sem_take(&mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("Timeout waiting for OK"); } exit: k_sem_give(&mdata.cmd_handler_data.sem_tx_lock); /* Data was successfully sent */ if (ret < 0) { errno = -ret; return -1; } errno = 0; return mdata.current_sock_written; } /* * Read data from a given socket. * * The response has the form +CARECV: <length>,data\r\nOK\r\n */ static int sockread_common(int sockfd, struct modem_cmd_handler_data *data, int socket_data_length, uint16_t len) { struct modem_socket *sock; struct socket_read_data *sock_data; int ret, packet_size; if (!len) { LOG_ERR("Invalid length, aborting"); return -EAGAIN; } if (!data->rx_buf) { LOG_ERR("Incorrect format! Ignoring data!"); return -EINVAL; } if (socket_data_length <= 0) { LOG_ERR("Length error (%d)", socket_data_length); return -EAGAIN; } if (net_buf_frags_len(data->rx_buf) < socket_data_length) { LOG_DBG("Not enough data -- wait!"); return -EAGAIN; } sock = modem_socket_from_fd(&mdata.socket_config, sockfd); if (!sock) { LOG_ERR("Socket not found! (%d)", sockfd); ret = -EINVAL; goto exit; } sock_data = (struct socket_read_data *)sock->data; if (!sock_data) { LOG_ERR("Socket data not found! (%d)", sockfd); ret = -EINVAL; goto exit; } ret = net_buf_linearize(sock_data->recv_buf, sock_data->recv_buf_len, data->rx_buf, 0, (uint16_t)socket_data_length); data->rx_buf = net_buf_skip(data->rx_buf, ret); sock_data->recv_read_len = ret; if (ret != socket_data_length) { LOG_ERR("Total copied data is different then received data!" " copied:%d vs. received:%d", ret, socket_data_length); ret = -EINVAL; goto exit; } exit: /* Indication only sets length to a dummy value. */ packet_size = modem_socket_next_packet_size(&mdata.socket_config, sock); modem_socket_packet_size_update(&mdata.socket_config, sock, -packet_size); return ret; } /* * Handler for carecv response. */ MODEM_CMD_DEFINE(on_cmd_carecv) { return sockread_common(mdata.current_sock_fd, data, atoi(argv[0]), len); } /* * Read data from a given socket. */ static ssize_t offload_recvfrom(void *obj, void *buf, size_t max_len, int flags, struct sockaddr *src_addr, socklen_t *addrlen) { struct modem_socket *sock = (struct modem_socket *)obj; char sendbuf[sizeof("AT+CARECV=##,####")]; int ret, packet_size; struct socket_read_data sock_data; struct modem_cmd data_cmd[] = { MODEM_CMD("+CARECV: ", on_cmd_carecv, 1U, ",") }; /* Modem is not attached to the network. */ if (get_state() != SIM7080_STATE_NETWORKING) { LOG_ERR("Modem currently not attached to the network!"); return -EAGAIN; } if (!buf || max_len == 0) { errno = EINVAL; return -1; } if (flags & ZSOCK_MSG_PEEK) { errno = ENOTSUP; return -1; } packet_size = modem_socket_next_packet_size(&mdata.socket_config, sock); if (!packet_size) { if (flags & ZSOCK_MSG_DONTWAIT) { errno = EAGAIN; return -1; } modem_socket_wait_data(&mdata.socket_config, sock); packet_size = modem_socket_next_packet_size(&mdata.socket_config, sock); } max_len = (max_len > MDM_MAX_DATA_LENGTH) ? MDM_MAX_DATA_LENGTH : max_len; snprintk(sendbuf, sizeof(sendbuf), "AT+CARECV=%d,%zd", sock->id, max_len); memset(&sock_data, 0, sizeof(sock_data)); sock_data.recv_buf = buf; sock_data.recv_buf_len = max_len; sock_data.recv_addr = src_addr; sock->data = &sock_data; mdata.current_sock_fd = sock->sock_fd; ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, data_cmd, ARRAY_SIZE(data_cmd), sendbuf, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { errno = -ret; ret = -1; goto exit; } /* HACK: use dst address as src */ if (src_addr && addrlen) { *addrlen = sizeof(sock->dst); memcpy(src_addr, &sock->dst, *addrlen); } errno = 0; ret = sock_data.recv_read_len; exit: /* clear socket data */ mdata.current_sock_fd = -1; sock->data = NULL; return ret; } /* * Sends messages to the modem. */ static ssize_t offload_sendmsg(void *obj, const struct msghdr *msg, int flags) { struct modem_socket *sock = obj; ssize_t sent = 0; const char *buf; size_t len; int ret; /* Modem is not attached to the network. */ if (get_state() != SIM7080_STATE_NETWORKING) { LOG_ERR("Modem currently not attached to the network!"); return -EAGAIN; } if (sock->type == SOCK_DGRAM) { /* * Current implementation only handles single contiguous fragment at a time, so * prevent sending multiple datagrams. */ if (msghdr_non_empty_iov_count(msg) > 1) { errno = EMSGSIZE; return -1; } } for (int i = 0; i < msg->msg_iovlen; i++) { buf = msg->msg_iov[i].iov_base; len = msg->msg_iov[i].iov_len; while (len > 0) { ret = offload_sendto(obj, buf, len, flags, msg->msg_name, msg->msg_namelen); if (ret < 0) { if (ret == -EAGAIN) { k_sleep(K_SECONDS(1)); } else { return ret; } } else { sent += ret; buf += ret; len -= ret; } } } return sent; } /* * Closes a given socket. */ static void socket_close(struct modem_socket *sock) { char buf[sizeof("AT+CACLOSE=##")]; int ret; snprintk(buf, sizeof(buf), "AT+CACLOSE=%d", sock->id); ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, buf, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("%s ret: %d", buf, ret); } modem_socket_put(&mdata.socket_config, sock->sock_fd); } /* * Offloads read by reading from a given socket. */ static ssize_t offload_read(void *obj, void *buffer, size_t count) { return offload_recvfrom(obj, buffer, count, 0, NULL, 0); } /* * Offloads write by writing to a given socket. */ static ssize_t offload_write(void *obj, const void *buffer, size_t count) { return offload_sendto(obj, buffer, count, 0, NULL, 0); } /* * Offloads close by terminating the connection and freeing the socket. */ static int offload_close(void *obj) { struct modem_socket *sock = (struct modem_socket *)obj; /* Modem is not attached to the network. */ if (get_state() != SIM7080_STATE_NETWORKING) { LOG_ERR("Modem currently not attached to the network!"); return -EAGAIN; } /* Make sure socket is allocated */ if (modem_socket_is_allocated(&mdata.socket_config, sock) == false) { return 0; } /* Close the socket only if it is connected. */ if (sock->is_connected) { socket_close(sock); } return 0; } /* * Polls a given socket. */ static int offload_poll(struct zsock_pollfd *fds, int nfds, int msecs) { int i; void *obj; /* Modem is not attached to the network. */ if (get_state() != SIM7080_STATE_NETWORKING) { LOG_ERR("Modem currently not attached to the network!"); return -EAGAIN; } /* Only accept modem sockets. */ for (i = 0; i < nfds; i++) { if (fds[i].fd < 0) { continue; } /* If vtable matches, then it's modem socket. */ obj = zvfs_get_fd_obj(fds[i].fd, (const struct fd_op_vtable *)&offload_socket_fd_op_vtable, EINVAL); if (obj == NULL) { return -1; } } return modem_socket_poll(&mdata.socket_config, fds, nfds, msecs); } /* * Offloads ioctl. Only supported ioctl is poll_offload. */ static int offload_ioctl(void *obj, unsigned int request, va_list args) { switch (request) { case ZFD_IOCTL_POLL_PREPARE: return -EXDEV; case ZFD_IOCTL_POLL_UPDATE: return -EOPNOTSUPP; case ZFD_IOCTL_POLL_OFFLOAD: { /* Poll on the given socket. */ struct zsock_pollfd *fds; int nfds, timeout; fds = va_arg(args, struct zsock_pollfd *); nfds = va_arg(args, int); timeout = va_arg(args, int); return offload_poll(fds, nfds, timeout); } default: errno = EINVAL; return -1; } } static const struct socket_op_vtable offload_socket_fd_op_vtable = { .fd_vtable = { .read = offload_read, .write = offload_write, .close = offload_close, .ioctl = offload_ioctl, }, .bind = NULL, .connect = offload_connect, .sendto = offload_sendto, .recvfrom = offload_recvfrom, .listen = NULL, .accept = NULL, .sendmsg = offload_sendmsg, .getsockopt = NULL, .setsockopt = NULL, }; /* * Parses the dns response from the modem. * * Response on success: * +CDNSGIP: 1,<domain name>,<IPv4>[,<IPv6>] * * Response on failure: * +CDNSGIP: 0,<err> */ MODEM_CMD_DEFINE(on_cmd_cdnsgip) { int state; char ips[256]; size_t out_len; int ret = -1; state = atoi(argv[0]); if (state == 0) { LOG_ERR("DNS lookup failed with error %s", argv[1]); goto exit; } /* Offset to skip the leading " */ out_len = net_buf_linearize(ips, sizeof(ips) - 1, data->rx_buf, 1, len); ips[out_len] = '\0'; /* find trailing " */ char *ipv4 = strstr(ips, "\""); if (!ipv4) { LOG_ERR("Malformed DNS response!!"); goto exit; } *ipv4 = '\0'; net_addr_pton(dns_result.ai_family, ips, &((struct sockaddr_in *)&dns_result_addr)->sin_addr); ret = 0; exit: k_sem_give(&mdata.sem_dns); return ret; } /* * Perform a dns lookup. */ static int offload_getaddrinfo(const char *node, const char *service, const struct zsock_addrinfo *hints, struct zsock_addrinfo **res) { struct modem_cmd cmd[] = { MODEM_CMD("+CDNSGIP: ", on_cmd_cdnsgip, 2U, ",") }; char sendbuf[sizeof("AT+CDNSGIP=\"\",##,#####") + 128]; uint32_t port = 0; int ret; /* Modem is not attached to the network. */ if (get_state() != SIM7080_STATE_NETWORKING) { LOG_ERR("Modem currently not attached to the network!"); return DNS_EAI_AGAIN; } /* init result */ (void)memset(&dns_result, 0, sizeof(dns_result)); (void)memset(&dns_result_addr, 0, sizeof(dns_result_addr)); /* Currently only support IPv4. */ dns_result.ai_family = AF_INET; dns_result_addr.sa_family = AF_INET; dns_result.ai_addr = &dns_result_addr; dns_result.ai_addrlen = sizeof(dns_result_addr); dns_result.ai_canonname = dns_result_canonname; dns_result_canonname[0] = '\0'; if (service) { port = atoi(service); if (port < 1 || port > USHRT_MAX) { return DNS_EAI_SERVICE; } } if (port > 0U) { if (dns_result.ai_family == AF_INET) { net_sin(&dns_result_addr)->sin_port = htons(port); } } /* Check if node is an IP address */ if (net_addr_pton(dns_result.ai_family, node, &((struct sockaddr_in *)&dns_result_addr)->sin_addr) == 0) { *res = &dns_result; return 0; } /* user flagged node as numeric host, but we failed net_addr_pton */ if (hints && hints->ai_flags & AI_NUMERICHOST) { return DNS_EAI_NONAME; } snprintk(sendbuf, sizeof(sendbuf), "AT+CDNSGIP=\"%s\",10,20000", node); ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, cmd, ARRAY_SIZE(cmd), sendbuf, &mdata.sem_dns, MDM_DNS_TIMEOUT); if (ret < 0) { return ret; } *res = (struct zsock_addrinfo *)&dns_result; return 0; } /* * Free addrinfo structure. */ static void offload_freeaddrinfo(struct zsock_addrinfo *res) { /* No need to free static memory. */ ARG_UNUSED(res); } /* * DNS vtable. */ static const struct socket_dns_offload offload_dns_ops = { .getaddrinfo = offload_getaddrinfo, .freeaddrinfo = offload_freeaddrinfo, }; static struct offloaded_if_api api_funcs = { .iface_api.init = modem_net_iface_init, }; static bool offload_is_supported(int family, int type, int proto) { if (family != AF_INET && family != AF_INET6) { return false; } if (type != SOCK_DGRAM && type != SOCK_STREAM) { return false; } if (proto != IPPROTO_TCP && proto != IPPROTO_UDP) { return false; } return true; } static int offload_socket(int family, int type, int proto) { int ret; ret = modem_socket_get(&mdata.socket_config, family, type, proto); if (ret < 0) { errno = -ret; return -1; } errno = 0; return ret; } /* * Process all messages received from the modem. */ static void modem_rx(void *p1, void *p2, void *p3) { ARG_UNUSED(p1); ARG_UNUSED(p2); ARG_UNUSED(p3); while (true) { /* Wait for incoming data */ modem_iface_uart_rx_wait(&mctx.iface, K_FOREVER); modem_cmd_handler_process(&mctx.cmd_handler, &mctx.iface); } } MODEM_CMD_DEFINE(on_cmd_ok) { modem_cmd_handler_set_error(data, 0); k_sem_give(&mdata.sem_response); return 0; } MODEM_CMD_DEFINE(on_cmd_error) { modem_cmd_handler_set_error(data, -EIO); k_sem_give(&mdata.sem_response); return 0; } MODEM_CMD_DEFINE(on_cmd_exterror) { modem_cmd_handler_set_error(data, -EIO); k_sem_give(&mdata.sem_response); return 0; } /* * Handles pdp context urc. * * The urc has the form +APP PDP: <index>,<state>. * State can either be ACTIVE for activation or * DEACTIVE if disabled. */ MODEM_CMD_DEFINE(on_urc_app_pdp) { mdata.pdp_active = strcmp(argv[1], "ACTIVE") == 0; LOG_INF("PDP context: %u", mdata.pdp_active); k_sem_give(&mdata.sem_response); return 0; } MODEM_CMD_DEFINE(on_urc_sms) { LOG_INF("SMS: %s", argv[0]); return 0; } /* * Handles socket data notification. * * The sim modem sends and unsolicited +CADATAIND: <cid> * if data can be read from a socket. */ MODEM_CMD_DEFINE(on_urc_cadataind) { struct modem_socket *sock; int sock_fd; sock_fd = atoi(argv[0]); sock = modem_socket_from_fd(&mdata.socket_config, sock_fd); if (!sock) { return 0; } /* Modem does not tell packet size. Set dummy for receive. */ modem_socket_packet_size_update(&mdata.socket_config, sock, 1); LOG_INF("Data available on socket: %d", sock_fd); modem_socket_data_ready(&mdata.socket_config, sock); return 0; } /* * Handles the castate response. * * +CASTATE: <cid>,<state> * * Cid is the connection id (socket fd) and * state can be: * 0 - Closed by remote server or error * 1 - Connected to remote server * 2 - Listening */ MODEM_CMD_DEFINE(on_urc_castate) { struct modem_socket *sock; int sockfd, state; sockfd = atoi(argv[0]); state = atoi(argv[1]); sock = modem_socket_from_fd(&mdata.socket_config, sockfd); if (!sock) { return 0; } /* Only continue if socket was closed. */ if (state != 0) { return 0; } LOG_INF("Socket close indication for socket: %d", sockfd); sock->is_connected = false; LOG_INF("Socket closed: %d", sockfd); return 0; } /** * Handles the ftpget urc. * * +FTPGET: <mode>,<error> * * Mode can be 1 for opening a session and * reporting that data is available or 2 for * reading data. This urc handler will only handle * mode 1 because 2 will not occur as urc. * * Error can be either: * - 1 for data available/opened session. * - 0 If transfer is finished. * - >0 for some error. */ MODEM_CMD_DEFINE(on_urc_ftpget) { int error = atoi(argv[0]); LOG_INF("+FTPGET: 1,%d", error); /* Transfer finished. */ if (error == 0) { mdata.ftp.state = SIM7080_FTP_CONNECTION_STATE_FINISHED; } else if (error == 1) { mdata.ftp.state = SIM7080_FTP_CONNECTION_STATE_CONNECTED; } else { mdata.ftp.state = SIM7080_FTP_CONNECTION_STATE_ERROR; } k_sem_give(&mdata.sem_ftp); return 0; } /* * Read manufacturer identification. */ MODEM_CMD_DEFINE(on_cmd_cgmi) { size_t out_len = net_buf_linearize( mdata.mdm_manufacturer, sizeof(mdata.mdm_manufacturer) - 1, data->rx_buf, 0, len); mdata.mdm_manufacturer[out_len] = '\0'; LOG_INF("Manufacturer: %s", mdata.mdm_manufacturer); return 0; } /* * Read model identification. */ MODEM_CMD_DEFINE(on_cmd_cgmm) { size_t out_len = net_buf_linearize(mdata.mdm_model, sizeof(mdata.mdm_model) - 1, data->rx_buf, 0, len); mdata.mdm_model[out_len] = '\0'; LOG_INF("Model: %s", mdata.mdm_model); return 0; } /* * Read software release. * * Response will be in format RESPONSE: <revision>. */ MODEM_CMD_DEFINE(on_cmd_cgmr) { size_t out_len; char *p; out_len = net_buf_linearize(mdata.mdm_revision, sizeof(mdata.mdm_revision) - 1, data->rx_buf, 0, len); mdata.mdm_revision[out_len] = '\0'; /* The module prepends a Revision: */ p = strchr(mdata.mdm_revision, ':'); if (p) { out_len = strlen(p + 1); memmove(mdata.mdm_revision, p + 1, out_len + 1); } LOG_INF("Revision: %s", mdata.mdm_revision); return 0; } /* * Read serial number identification. */ MODEM_CMD_DEFINE(on_cmd_cgsn) { size_t out_len = net_buf_linearize(mdata.mdm_imei, sizeof(mdata.mdm_imei) - 1, data->rx_buf, 0, len); mdata.mdm_imei[out_len] = '\0'; LOG_INF("IMEI: %s", mdata.mdm_imei); return 0; } #if defined(CONFIG_MODEM_SIM_NUMBERS) /* * Read international mobile subscriber identity. */ MODEM_CMD_DEFINE(on_cmd_cimi) { size_t out_len = net_buf_linearize(mdata.mdm_imsi, sizeof(mdata.mdm_imsi) - 1, data->rx_buf, 0, len); mdata.mdm_imsi[out_len] = '\0'; /* Log the received information. */ LOG_INF("IMSI: %s", mdata.mdm_imsi); return 0; } /* * Read iccid. */ MODEM_CMD_DEFINE(on_cmd_ccid) { size_t out_len = net_buf_linearize(mdata.mdm_iccid, sizeof(mdata.mdm_iccid) - 1, data->rx_buf, 0, len); mdata.mdm_iccid[out_len] = '\0'; /* Log the received information. */ LOG_INF("ICCID: %s", mdata.mdm_iccid); return 0; } #endif /* defined(CONFIG_MODEM_SIM_NUMBERS) */ /* * Parses the non urc C(E)REG and updates registration status. */ MODEM_CMD_DEFINE(on_cmd_cereg) { mdata.mdm_registration = atoi(argv[1]); LOG_INF("CREG: %u", mdata.mdm_registration); return 0; } MODEM_CMD_DEFINE(on_cmd_cpin) { mdata.cpin_ready = strcmp(argv[0], "READY") == 0; LOG_INF("CPIN: %d", mdata.cpin_ready); return 0; } MODEM_CMD_DEFINE(on_cmd_cgatt) { mdata.mdm_cgatt = atoi(argv[0]); LOG_INF("CGATT: %d", mdata.mdm_cgatt); return 0; } /* * Handler for RSSI query. * * +CSQ: <rssi>,<ber> * rssi: 0,-115dBm; 1,-111dBm; 2...30,-110...-54dBm; 31,-52dBm or greater. * 99, ukn * ber: Not used. */ MODEM_CMD_DEFINE(on_cmd_csq) { int rssi = atoi(argv[0]); if (rssi == 0) { mdata.mdm_rssi = -115; } else if (rssi == 1) { mdata.mdm_rssi = -111; } else if (rssi > 1 && rssi < 31) { mdata.mdm_rssi = -114 + 2 * rssi; } else if (rssi == 31) { mdata.mdm_rssi = -52; } else { mdata.mdm_rssi = -1000; } LOG_INF("RSSI: %d", mdata.mdm_rssi); return 0; } /* * Queries modem RSSI. * * If a work queue parameter is provided query work will * be scheduled. Otherwise rssi is queried once. */ static void modem_rssi_query_work(struct k_work *work) { struct modem_cmd cmd[] = { MODEM_CMD("+CSQ: ", on_cmd_csq, 2U, ",") }; static char *send_cmd = "AT+CSQ"; int ret; ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, cmd, ARRAY_SIZE(cmd), send_cmd, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("AT+CSQ ret:%d", ret); } if (work) { k_work_reschedule_for_queue(&modem_workq, &mdata.rssi_query_work, K_SECONDS(RSSI_TIMEOUT_SECS)); } } /* * Possible responses by the sim7080. */ static const struct modem_cmd response_cmds[] = { MODEM_CMD("OK", on_cmd_ok, 0U, ""), MODEM_CMD("ERROR", on_cmd_error, 0U, ""), MODEM_CMD("+CME ERROR: ", on_cmd_exterror, 1U, ""), MODEM_CMD_DIRECT(">", on_cmd_tx_ready), }; /* * Possible unsolicited commands. */ static const struct modem_cmd unsolicited_cmds[] = { MODEM_CMD("+APP PDP: ", on_urc_app_pdp, 2U, ","), MODEM_CMD("SMS ", on_urc_sms, 1U, ""), MODEM_CMD("+CADATAIND: ", on_urc_cadataind, 1U, ""), MODEM_CMD("+CASTATE: ", on_urc_castate, 2U, ","), MODEM_CMD("+FTPGET: 1,", on_urc_ftpget, 1U, ""), }; /* * Activates the pdp context */ static int modem_pdp_activate(void) { int counter; int ret = 0; #if defined(CONFIG_MODEM_SIMCOM_SIM7080_RAT_GSM) const char *buf = "AT+CREG?"; struct modem_cmd cmds[] = { MODEM_CMD("+CREG: ", on_cmd_cereg, 2U, ",") }; #else const char *buf = "AT+CEREG?"; struct modem_cmd cmds[] = { MODEM_CMD("+CEREG: ", on_cmd_cereg, 2U, ",") }; #endif /* defined(CONFIG_MODEM_SIMCOM_SIM7080_RAT_GSM) */ struct modem_cmd cgatt_cmd[] = { MODEM_CMD("+CGATT: ", on_cmd_cgatt, 1U, "") }; counter = 0; while (counter++ < MDM_MAX_CGATT_WAITS && mdata.mdm_cgatt != 1) { ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, cgatt_cmd, ARRAY_SIZE(cgatt_cmd), "AT+CGATT?", &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("Failed to query cgatt!!"); return -1; } k_sleep(K_SECONDS(1)); } if (counter >= MDM_MAX_CGATT_WAITS) { LOG_WRN("Network attach failed!!"); return -1; } if (!mdata.cpin_ready || mdata.mdm_cgatt != 1) { LOG_ERR("Fatal: Modem is not attached to GPRS network!!"); return -1; } LOG_INF("Waiting for network"); /* Wait until the module is registered to the network. * Registration will be set by urc. */ counter = 0; while (counter++ < MDM_MAX_CEREG_WAITS && mdata.mdm_registration != 1 && mdata.mdm_registration != 5) { ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, cmds, ARRAY_SIZE(cmds), buf, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("Failed to query registration!!"); return -1; } k_sleep(K_SECONDS(1)); } if (counter >= MDM_MAX_CEREG_WAITS) { LOG_WRN("Network registration failed!"); ret = -1; goto error; } /* Set dual stack mode (IPv4/IPv6) */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0, "AT+CNCFG=0,0", &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("Could not configure pdp context!"); goto error; } /* * Now activate the pdp context and wait for confirmation. */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0, "AT+CNACT=0,1", &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("Could not activate PDP context."); goto error; } ret = k_sem_take(&mdata.sem_response, MDM_PDP_TIMEOUT); if (ret < 0 || mdata.pdp_active == false) { LOG_ERR("Failed to activate PDP context."); ret = -1; goto error; } LOG_INF("Network active."); error: return ret; } /* * Toggles the modems power pin. */ static void modem_pwrkey(void) { /* Power pin should be high for 1.5 seconds. */ gpio_pin_set_dt(&power_gpio, 1); k_sleep(K_MSEC(1500)); gpio_pin_set_dt(&power_gpio, 0); k_sleep(K_SECONDS(5)); } /* * Commands to be sent at setup. */ static const struct setup_cmd setup_cmds[] = { SETUP_CMD_NOHANDLE("ATH"), SETUP_CMD("AT+CGMI", "", on_cmd_cgmi, 0U, ""), SETUP_CMD("AT+CGMM", "", on_cmd_cgmm, 0U, ""), SETUP_CMD("AT+CGMR", "", on_cmd_cgmr, 0U, ""), SETUP_CMD("AT+CGSN", "", on_cmd_cgsn, 0U, ""), #if defined(CONFIG_MODEM_SIM_NUMBERS) SETUP_CMD("AT+CIMI", "", on_cmd_cimi, 0U, ""), SETUP_CMD("AT+CCID", "", on_cmd_ccid, 0U, ""), #endif /* defined(CONFIG_MODEM_SIM_NUMBERS) */ #if defined(CONFIG_MODEM_SIMCOM_SIM7080_RAT_NB1) SETUP_CMD_NOHANDLE("AT+CNMP=38"), SETUP_CMD_NOHANDLE("AT+CMNB=2"), SETUP_CMD_NOHANDLE("AT+CBANDCFG=\"NB-IOT\"," MDM_LTE_BANDS), #endif /* defined(CONFIG_MODEM_SIMCOM_SIM7080_RAT_NB1) */ #if defined(CONFIG_MODEM_SIMCOM_SIM7080_RAT_M1) SETUP_CMD_NOHANDLE("AT+CNMP=38"), SETUP_CMD_NOHANDLE("AT+CMNB=1"), SETUP_CMD_NOHANDLE("AT+CBANDCFG=\"CAT-M\"," MDM_LTE_BANDS), #endif /* defined(CONFIG_MODEM_SIMCOM_SIM7080_RAT_M1) */ #if defined(CONFIG_MODEM_SIMCOM_SIM7080_RAT_GSM) SETUP_CMD_NOHANDLE("AT+CNMP=13"), #endif /* defined(CONFIG_MODEM_SIMCOM_SIM7080_RAT_GSM) */ SETUP_CMD("AT+CPIN?", "+CPIN: ", on_cmd_cpin, 1U, ""), }; /** * Performs the autobaud sequence until modem answers or limit is reached. * * @return On successful boot 0 is returned. Otherwise <0 is returned. */ static int modem_autobaud(void) { int boot_tries = 0; int counter = 0; int ret; while (boot_tries++ <= MDM_BOOT_TRIES) { modem_pwrkey(); /* * The sim7080 has a autobaud function. * On startup multiple AT's are sent until * a OK is received. */ counter = 0; while (counter < MDM_MAX_AUTOBAUD) { ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, "AT", &mdata.sem_response, K_MSEC(500)); /* OK was received. */ if (ret == 0) { /* Disable echo */ return modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, "ATE0", &mdata.sem_response, K_SECONDS(2)); } counter++; } } return -1; } /** * Get the next parameter from the gnss phrase. * * @param src The source string supported on first call. * @param delim The delimiter of the parameter list. * @param saveptr Pointer for subsequent parses. * @return On success a pointer to the parameter. On failure * or end of string NULL is returned. * * This function is used instead of strtok because strtok would * skip empty parameters, which is not desired. The modem may * omit parameters which could lead to a incorrect parse. */ static char *gnss_get_next_param(char *src, const char *delim, char **saveptr) { char *start, *del; if (src) { start = src; } else { start = *saveptr; } /* Illegal start string. */ if (!start) { return NULL; } /* End of string reached. */ if (*start == '\0' || *start == '\r') { return NULL; } del = strstr(start, delim); if (!del) { return NULL; } *del = '\0'; *saveptr = del + 1; if (del == start) { return NULL; } return start; } static void gnss_skip_param(char **saveptr) { gnss_get_next_param(NULL, ",", saveptr); } /** * Splits float parameters of the CGNSINF response on '.' * * @param src Null terminated string containing the float. * @param f1 Resulting number part of the float. * @param f2 Resulting fraction part of the float. * @return 0 if parsing was successful. Otherwise <0 is returned. * * If the number part of the float is negative f1 and f2 will be * negative too. */ static int gnss_split_on_dot(const char *src, int32_t *f1, int32_t *f2) { char *dot = strchr(src, '.'); if (!dot) { return -1; } *dot = '\0'; *f1 = (int32_t)strtol(src, NULL, 10); *f2 = (int32_t)strtol(dot + 1, NULL, 10); if (*f1 < 0) { *f2 = -*f2; } return 0; } /** * Parses cgnsinf response into the gnss_data structure. * * @param gps_buf Null terminated buffer containing the response. * @return 0 on successful parse. Otherwise <0 is returned. */ static int parse_cgnsinf(char *gps_buf) { char *saveptr; int ret; int32_t number, fraction; char *run_status = gnss_get_next_param(gps_buf, ",", &saveptr); if (run_status == NULL) { goto error; } else if (*run_status != '1') { goto error; } char *fix_status = gnss_get_next_param(NULL, ",", &saveptr); if (fix_status == NULL) { goto error; } else if (*fix_status != '1') { goto error; } char *utc = gnss_get_next_param(NULL, ",", &saveptr); if (utc == NULL) { goto error; } char *lat = gnss_get_next_param(NULL, ",", &saveptr); if (lat == NULL) { goto error; } char *lon = gnss_get_next_param(NULL, ",", &saveptr); if (lon == NULL) { goto error; } char *alt = gnss_get_next_param(NULL, ",", &saveptr); char *speed = gnss_get_next_param(NULL, ",", &saveptr); char *course = gnss_get_next_param(NULL, ",", &saveptr); /* discard fix mode and reserved*/ gnss_skip_param(&saveptr); gnss_skip_param(&saveptr); char *hdop = gnss_get_next_param(NULL, ",", &saveptr); if (hdop == NULL) { goto error; } gnss_data.run_status = 1; gnss_data.fix_status = 1; strncpy(gnss_data.utc, utc, sizeof(gnss_data.utc) - 1); ret = gnss_split_on_dot(lat, &number, &fraction); if (ret != 0) { goto error; } gnss_data.lat = number * 10000000 + fraction * 10; ret = gnss_split_on_dot(lon, &number, &fraction); if (ret != 0) { goto error; } gnss_data.lon = number * 10000000 + fraction * 10; if (alt) { ret = gnss_split_on_dot(alt, &number, &fraction); if (ret != 0) { goto error; } gnss_data.alt = number * 1000 + fraction; } else { gnss_data.alt = 0; } ret = gnss_split_on_dot(hdop, &number, &fraction); if (ret != 0) { goto error; } gnss_data.hdop = number * 100 + fraction * 10; if (course) { ret = gnss_split_on_dot(course, &number, &fraction); if (ret != 0) { goto error; } gnss_data.cog = number * 100 + fraction * 10; } else { gnss_data.cog = 0; } if (speed) { ret = gnss_split_on_dot(speed, &number, &fraction); if (ret != 0) { goto error; } gnss_data.kmh = number * 10 + fraction / 10; } else { gnss_data.kmh = 0; } return 0; error: memset(&gnss_data, 0, sizeof(gnss_data)); return -1; } /* * Parses the +CGNSINF Gnss response. * * The CGNSINF command has the following parameters but * not all parameters are set by the module: * * +CGNSINF: <GNSS run status>,<Fix status>,<UTC date & Time>, * <Latitude>,<Longitude>,<MSL Altitude>,<Speed Over Ground>, * <Course Over Ground>,<Fix Mode>,<Reserved1>,<HDOP>,<PDOP>, * <VDOP>,<Reserved2>,<GNSS Satellites in View>,<Reserved3>, * <HPA>,<VPA> * */ MODEM_CMD_DEFINE(on_cmd_cgnsinf) { char gps_buf[MDM_GNSS_PARSER_MAX_LEN]; size_t out_len = net_buf_linearize(gps_buf, sizeof(gps_buf) - 1, data->rx_buf, 0, len); gps_buf[out_len] = '\0'; return parse_cgnsinf(gps_buf); } int mdm_sim7080_query_gnss(struct sim7080_gnss_data *data) { int ret; struct modem_cmd cmds[] = { MODEM_CMD("+CGNSINF: ", on_cmd_cgnsinf, 0U, NULL) }; if (get_state() != SIM7080_STATE_GNSS) { LOG_ERR("GNSS functionality is not enabled!!"); return -1; } ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, cmds, ARRAY_SIZE(cmds), "AT+CGNSINF", &mdata.sem_response, K_SECONDS(2)); if (ret < 0) { return ret; } if (!gnss_data.run_status || !gnss_data.fix_status) { return -EAGAIN; } if (data) { memcpy(data, &gnss_data, sizeof(gnss_data)); } memset(&gnss_data, 0, sizeof(gnss_data)); return ret; } int mdm_sim7080_start_gnss(void) { int ret; change_state(SIM7080_STATE_INIT); k_work_cancel_delayable(&mdata.rssi_query_work); ret = modem_autobaud(); if (ret < 0) { LOG_ERR("Failed to start modem!!"); return -1; } ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, "AT+CGNSCOLD", &mdata.sem_response, K_SECONDS(2)); if (ret < 0) { return -1; } change_state(SIM7080_STATE_GNSS); return 0; } /** * Parse the +FTPGET response. * * +FTPGET: <mode>,<len> * * Mode is hard set to 2. * * Length is the number of bytes following (the ftp data). */ MODEM_CMD_DEFINE(on_cmd_ftpget) { int nbytes = atoi(argv[0]); int bytes_to_skip; size_t out_len; if (nbytes == 0) { mdata.ftp.nread = 0; return 0; } /* Skip length parameter and trailing \r\n */ bytes_to_skip = strlen(argv[0]) + 2; /* Wait until data is ready. * >= to ensure buffer is not empty after skip. */ if (net_buf_frags_len(data->rx_buf) <= nbytes + bytes_to_skip) { return -EAGAIN; } out_len = net_buf_linearize(mdata.ftp.read_buffer, mdata.ftp.nread, data->rx_buf, bytes_to_skip, nbytes); if (out_len != nbytes) { LOG_WRN("FTP read size differs!"); } data->rx_buf = net_buf_skip(data->rx_buf, nbytes + bytes_to_skip); mdata.ftp.nread = nbytes; return 0; } int mdm_sim7080_ftp_get_read(char *dst, size_t *size) { int ret; char buffer[sizeof("AT+FTPGET=#,######")]; struct modem_cmd cmds[] = { MODEM_CMD("+FTPGET: 2,", on_cmd_ftpget, 1U, "") }; /* Some error occurred. */ if (mdata.ftp.state == SIM7080_FTP_CONNECTION_STATE_ERROR || mdata.ftp.state == SIM7080_FTP_CONNECTION_STATE_INITIAL) { return SIM7080_FTP_RC_ERROR; } /* Setup buffer. */ mdata.ftp.read_buffer = dst; mdata.ftp.nread = *size; /* Read ftp data. */ ret = snprintk(buffer, sizeof(buffer), "AT+FTPGET=2,%zu", *size); if (ret < 0) { *size = 0; return SIM7080_FTP_RC_ERROR; } /* Wait for data from the server. */ k_sem_take(&mdata.sem_ftp, K_MSEC(200)); if (mdata.ftp.state == SIM7080_FTP_CONNECTION_STATE_FINISHED) { *size = 0; return SIM7080_FTP_RC_FINISHED; } else if (mdata.ftp.state == SIM7080_FTP_CONNECTION_STATE_ERROR) { *size = 0; return SIM7080_FTP_RC_ERROR; } ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, cmds, ARRAY_SIZE(cmds), buffer, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { *size = 0; return SIM7080_FTP_RC_ERROR; } /* Set read size. */ *size = mdata.ftp.nread; return SIM7080_FTP_RC_OK; } int mdm_sim7080_ftp_get_start(const char *server, const char *user, const char *passwd, const char *file, const char *path) { int ret; char buffer[256]; /* Start network. */ ret = mdm_sim7080_start_network(); if (ret < 0) { LOG_ERR("Failed to start network for FTP!"); return -1; } /* Set connection id for ftp. */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, "AT+FTPCID=0", &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_WRN("Failed to set FTP Cid!"); return -1; } /* Set ftp server. */ ret = snprintk(buffer, sizeof(buffer), "AT+FTPSERV=\"%s\"", server); if (ret < 0) { LOG_WRN("Failed to build command!"); return -1; } ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, buffer, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_WRN("Failed to set FTP Cid!"); return -1; } /* Set ftp user. */ ret = snprintk(buffer, sizeof(buffer), "AT+FTPUN=\"%s\"", user); if (ret < 0) { LOG_WRN("Failed to build command!"); return -1; } ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, buffer, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_WRN("Failed to set ftp user!"); return -1; } /* Set ftp password. */ ret = snprintk(buffer, sizeof(buffer), "AT+FTPPW=\"%s\"", passwd); if (ret < 0) { LOG_WRN("Failed to build command!"); return -1; } ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, buffer, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_WRN("Failed to set ftp password!"); return -1; } /* Set ftp filename. */ ret = snprintk(buffer, sizeof(buffer), "AT+FTPGETNAME=\"%s\"", file); if (ret < 0) { LOG_WRN("Failed to build command!"); return -1; } ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, buffer, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_WRN("Failed to set ftp filename!"); return -1; } /* Set ftp filename. */ ret = snprintk(buffer, sizeof(buffer), "AT+FTPGETNAME=\"%s\"", file); if (ret < 0) { LOG_WRN("Failed to build command!"); return -1; } ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, buffer, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_WRN("Failed to set ftp filename!"); return -1; } /* Set ftp path. */ ret = snprintk(buffer, sizeof(buffer), "AT+FTPGETPATH=\"%s\"", path); if (ret < 0) { LOG_WRN("Failed to build command!"); return -1; } ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, buffer, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_WRN("Failed to set ftp path!"); return -1; } /* Initialize ftp variables. */ mdata.ftp.read_buffer = NULL; mdata.ftp.nread = 0; mdata.ftp.state = SIM7080_FTP_CONNECTION_STATE_INITIAL; /* Start the ftp session. */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, "AT+FTPGET=1", &mdata.sem_ftp, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_WRN("Failed to start session!"); return -1; } if (mdata.ftp.state != SIM7080_FTP_CONNECTION_STATE_CONNECTED) { LOG_WRN("Session state is not connected!"); return -1; } return 0; } /** * Decode readable hex to "real" hex. */ static uint8_t mdm_pdu_decode_ascii(char byte) { if ((byte >= '0') && (byte <= '9')) { return byte - '0'; } else if ((byte >= 'A') && (byte <= 'F')) { return byte - 'A' + 10; } else if ((byte >= 'a') && (byte <= 'f')) { return byte - 'a' + 10; } else { return 255; } } /** * Reads "byte" from pdu. * * @param pdu pdu to read from. * @param index index of "byte". * * Sim module "encodes" one pdu byte as two human readable bytes * this functions squashes these two bytes into one. */ static uint8_t mdm_pdu_read_byte(const char *pdu, size_t index) { return (mdm_pdu_decode_ascii(pdu[index * 2]) << 4 | mdm_pdu_decode_ascii(pdu[index * 2 + 1])); } /** * Decodes time from pdu. * * @param pdu pdu to read from. * @param index index of "byte". */ static uint8_t mdm_pdu_read_time(const char *pdu, size_t index) { return (mdm_pdu_decode_ascii(pdu[index * 2]) + mdm_pdu_decode_ascii(pdu[index * 2 + 1]) * 10); } /** * Decode a sms from pdu mode. */ static int mdm_decode_pdu(const char *pdu, size_t pdu_len, struct sim7080_sms *target_buf) { size_t index; /* * GSM_03.38 to Unicode conversion table */ const short enc7_basic[128] = { '@', 0xA3, '$', 0xA5, 0xE8, 0xE9, 0xF9, 0xEC, 0xF2, 0xE7, '\n', 0xD8, 0xF8, '\r', 0xC5, 0xF8, 0x0394, '_', 0x03A6, 0x0393, 0x039B, 0x03A9, 0x03A0, 0x03A8, 0x03A3, 0x0398, 0x039E, '\x1b', 0xC6, 0xE6, 0xDF, 0xC9, ' ', '!', '\"', '#', 0xA4, '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', 0xA1, 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 0xC4, 0xD6, 0xD1, 0xDC, 0xA7, 0xBF, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 0xE4, 0xF6, 0xF1, 0xFC, 0xE0 }; /* two bytes in pdu are on real byte */ pdu_len = (pdu_len / 2); /* first byte of pdu is length of trailing SMSC information * skip it by setting index to SMSC length + 1. */ index = mdm_pdu_read_byte(pdu, 0) + 1; if (index >= pdu_len) { return -1; } /* read first octet */ target_buf->first_octet = mdm_pdu_read_byte(pdu, index++); if (index >= pdu_len) { return -1; } /* pdu_index now points to the address field. * first byte of addr field is the addr length -> skip it. * address type is not included in addr len -> add +1. * address is coded in semi octets * + addr_len/2 if even * + addr_len/2 + 1 if odd */ uint8_t addr_len = mdm_pdu_read_byte(pdu, index); index += ((addr_len % 2) == 0) ? (addr_len / 2) + 2 : (addr_len / 2) + 3; if (index >= pdu_len) { return -1; } /* read protocol identifier */ target_buf->tp_pid = mdm_pdu_read_byte(pdu, index++); if (index >= pdu_len) { return -1; } /* read coding scheme */ uint8_t tp_dcs = mdm_pdu_read_byte(pdu, index++); /* parse date and time */ if ((index + 7) >= pdu_len) { return -1; } target_buf->time.year = mdm_pdu_read_time(pdu, index++); target_buf->time.month = mdm_pdu_read_time(pdu, index++); target_buf->time.day = mdm_pdu_read_time(pdu, index++); target_buf->time.hour = mdm_pdu_read_time(pdu, index++); target_buf->time.minute = mdm_pdu_read_time(pdu, index++); target_buf->time.second = mdm_pdu_read_time(pdu, index++); target_buf->time.timezone = mdm_pdu_read_time(pdu, index++); /* Read user data length */ uint8_t tp_udl = mdm_pdu_read_byte(pdu, index++); /* Discard header */ uint8_t header_skip = 0; if (target_buf->first_octet & SMS_TP_UDHI_HEADER) { uint8_t tp_udhl = mdm_pdu_read_byte(pdu, index); index += tp_udhl + 1; header_skip = tp_udhl + 1; if (index >= pdu_len) { return -1; } } /* Read data according to type set in TP-DCS */ if (tp_dcs == 0x00) { /* 7 bit GSM coding */ uint8_t fill_level = 0; uint16_t buf = 0; if (target_buf->first_octet & SMS_TP_UDHI_HEADER) { /* Initial fill because septets are aligned to * septet boundary after header */ uint8_t fill_bits = 7 - ((header_skip * 8) % 7); if (fill_bits == 7) { fill_bits = 0; } buf = mdm_pdu_read_byte(pdu, index++); fill_level = 8 - fill_bits; } uint16_t data_index = 0; for (unsigned int idx = 0; idx < tp_udl; idx++) { if (fill_level < 7) { uint8_t octet = mdm_pdu_read_byte(pdu, index++); buf &= ((1 << fill_level) - 1); buf |= (octet << fill_level); fill_level += 8; } /* * Convert 7-bit encoded data to Unicode and * then to UTF-8 */ short letter = enc7_basic[buf & 0x007f]; if (letter < 0x0080) { target_buf->data[data_index++] = letter & 0x007f; } else if (letter < 0x0800) { target_buf->data[data_index++] = 0xc0 | ((letter & 0x07c0) >> 6); target_buf->data[data_index++] = 0x80 | ((letter & 0x003f) >> 0); } buf >>= 7; fill_level -= 7; } target_buf->data_len = data_index; } else if (tp_dcs == 0x04) { /* 8 bit binary coding */ for (int idx = 0; idx < tp_udl - header_skip; idx++) { target_buf->data[idx] = mdm_pdu_read_byte(pdu, index++); } target_buf->data_len = tp_udl; } else if (tp_dcs == 0x08) { /* Unicode (16 bit per character) */ for (int idx = 0; idx < tp_udl - header_skip; idx++) { target_buf->data[idx] = mdm_pdu_read_byte(pdu, index++); } target_buf->data_len = tp_udl; } else { return -1; } return 0; } /** * Check if given char sequence is crlf. * * @param c The char sequence. * @param len Total length of the fragment. * @return @c true if char sequence is crlf. * Otherwise @c false is returned. */ static bool is_crlf(uint8_t *c, uint8_t len) { /* crlf does not fit. */ if (len < 2) { return false; } return c[0] == '\r' && c[1] == '\n'; } /** * Find terminating crlf in a netbuffer. * * @param buf The netbuffer. * @param skip Bytes to skip before search. * @return Length of the returned fragment or 0 if not found. */ static size_t net_buf_find_crlf(struct net_buf *buf, size_t skip) { size_t len = 0, pos = 0; struct net_buf *frag = buf; /* Skip to the start. */ while (frag && skip >= frag->len) { skip -= frag->len; frag = frag->frags; } /* Need to wait for more data. */ if (!frag) { return 0; } pos = skip; while (frag && !is_crlf(frag->data + pos, frag->len - pos)) { if (pos + 1 >= frag->len) { len += frag->len; frag = frag->frags; pos = 0U; } else { pos++; } } if (frag && is_crlf(frag->data + pos, frag->len - pos)) { len += pos; return len - skip; } return 0; } /** * Parses list sms and add them to buffer. * Format is: * * +CMGL: <index>,<stat>,,<length><CR><LF><pdu><CR><LF> * +CMGL: <index>,<stat>,,<length><CR><LF><pdu><CR><LF> * ... * OK */ MODEM_CMD_DEFINE(on_cmd_cmgl) { int sms_index, sms_stat, ret; char pdu_buffer[256]; size_t out_len, sms_len, param_len; struct sim7080_sms *sms; sms_index = atoi(argv[0]); sms_stat = atoi(argv[1]); /* Get the length of the "length" parameter. * The last parameter will be stuck in the netbuffer. * It is not the actual length of the trailing pdu so * we have to search the next crlf. */ param_len = net_buf_find_crlf(data->rx_buf, 0); if (param_len == 0) { LOG_INF("No <CR><LF>"); return -EAGAIN; } /* Get actual trailing pdu len. +2 to skip crlf. */ sms_len = net_buf_find_crlf(data->rx_buf, param_len + 2); if (sms_len == 0) { return -EAGAIN; } /* Skip to start of pdu. */ data->rx_buf = net_buf_skip(data->rx_buf, param_len + 2); out_len = net_buf_linearize(pdu_buffer, sizeof(pdu_buffer) - 1, data->rx_buf, 0, sms_len); pdu_buffer[out_len] = '\0'; data->rx_buf = net_buf_skip(data->rx_buf, sms_len); /* No buffer specified. */ if (!mdata.sms_buffer) { return 0; } /* No space left in buffer. */ if (mdata.sms_buffer_pos >= mdata.sms_buffer->nsms) { return 0; } sms = &mdata.sms_buffer->sms[mdata.sms_buffer_pos]; ret = mdm_decode_pdu(pdu_buffer, out_len, sms); if (ret < 0) { return 0; } sms->stat = sms_stat; sms->index = sms_index; sms->data[sms->data_len] = '\0'; mdata.sms_buffer_pos++; return 0; } int mdm_sim7080_read_sms(struct sim7080_sms_buffer *buffer) { int ret; struct modem_cmd cmds[] = { MODEM_CMD("+CMGL: ", on_cmd_cmgl, 4U, ",\r") }; mdata.sms_buffer = buffer; mdata.sms_buffer_pos = 0; ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, cmds, ARRAY_SIZE(cmds), "AT+CMGL=4", &mdata.sem_response, K_SECONDS(20)); if (ret < 0) { return -1; } return mdata.sms_buffer_pos; } int mdm_sim7080_delete_sms(uint16_t index) { int ret; char buf[sizeof("AT+CMGD=#####")] = { 0 }; ret = snprintk(buf, sizeof(buf), "AT+CMGD=%u", index); if (ret < 0) { return -1; } ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0, buf, &mdata.sem_response, K_SECONDS(5)); if (ret < 0) { return -1; } return 0; } /* * Does the modem setup by starting it and * bringing the modem to a PDP active state. */ static int modem_setup(void) { int ret = 0; int counter = 0; k_work_cancel_delayable(&mdata.rssi_query_work); ret = modem_autobaud(); if (ret < 0) { LOG_ERR("Booting modem failed!!"); goto error; } ret = modem_cmd_handler_setup_cmds(&mctx.iface, &mctx.cmd_handler, setup_cmds, ARRAY_SIZE(setup_cmds), &mdata.sem_response, MDM_REGISTRATION_TIMEOUT); if (ret < 0) { LOG_ERR("Failed to send init commands!"); goto error; } k_sleep(K_SECONDS(3)); /* Wait for acceptable rssi values. */ modem_rssi_query_work(NULL); k_sleep(MDM_WAIT_FOR_RSSI_DELAY); counter = 0; while (counter++ < MDM_WAIT_FOR_RSSI_COUNT && (mdata.mdm_rssi >= 0 || mdata.mdm_rssi <= -1000)) { modem_rssi_query_work(NULL); k_sleep(MDM_WAIT_FOR_RSSI_DELAY); } if (mdata.mdm_rssi >= 0 || mdata.mdm_rssi <= -1000) { LOG_ERR("Network not reachable!!"); ret = -ENETUNREACH; goto error; } ret = modem_pdp_activate(); if (ret < 0) { goto error; } k_work_reschedule_for_queue(&modem_workq, &mdata.rssi_query_work, K_SECONDS(RSSI_TIMEOUT_SECS)); change_state(SIM7080_STATE_NETWORKING); error: return ret; } int mdm_sim7080_start_network(void) { change_state(SIM7080_STATE_INIT); return modem_setup(); } int mdm_sim7080_power_on(void) { return modem_autobaud(); } int mdm_sim7080_power_off(void) { int tries = 5; int autobaud_tries; int ret = 0; k_work_cancel_delayable(&mdata.rssi_query_work); /* Check if module is already off. */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, "AT", &mdata.sem_response, K_MSEC(1000)); if (ret < 0) { change_state(SIM7080_STATE_OFF); return 0; } while (tries--) { modem_pwrkey(); autobaud_tries = 5; while (autobaud_tries--) { ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, "AT", &mdata.sem_response, K_MSEC(500)); if (ret == 0) { break; } } if (ret < 0) { change_state(SIM7080_STATE_OFF); return 0; } } return -1; } const char *mdm_sim7080_get_manufacturer(void) { return mdata.mdm_manufacturer; } const char *mdm_sim7080_get_model(void) { return mdata.mdm_model; } const char *mdm_sim7080_get_revision(void) { return mdata.mdm_revision; } const char *mdm_sim7080_get_imei(void) { return mdata.mdm_imei; } /* * Initializes modem handlers and context. * After successful init this function calls * modem_setup. */ static int modem_init(const struct device *dev) { int ret; ARG_UNUSED(dev); k_sem_init(&mdata.sem_response, 0, 1); k_sem_init(&mdata.sem_tx_ready, 0, 1); k_sem_init(&mdata.sem_dns, 0, 1); k_sem_init(&mdata.sem_ftp, 0, 1); k_work_queue_start(&modem_workq, modem_workq_stack, K_KERNEL_STACK_SIZEOF(modem_workq_stack), K_PRIO_COOP(7), NULL); /* Assume the modem is not registered to the network. */ mdata.mdm_registration = 0; mdata.cpin_ready = false; mdata.pdp_active = false; mdata.sms_buffer = NULL; mdata.sms_buffer_pos = 0; /* Socket config. */ ret = modem_socket_init(&mdata.socket_config, &mdata.sockets[0], ARRAY_SIZE(mdata.sockets), MDM_BASE_SOCKET_NUM, true, &offload_socket_fd_op_vtable); if (ret < 0) { goto error; } change_state(SIM7080_STATE_INIT); /* Command handler. */ const struct modem_cmd_handler_config cmd_handler_config = { .match_buf = &mdata.cmd_match_buf[0], .match_buf_len = sizeof(mdata.cmd_match_buf), .buf_pool = &mdm_recv_pool, .alloc_timeout = BUF_ALLOC_TIMEOUT, .eol = "\r\n", .user_data = NULL, .response_cmds = response_cmds, .response_cmds_len = ARRAY_SIZE(response_cmds), .unsol_cmds = unsolicited_cmds, .unsol_cmds_len = ARRAY_SIZE(unsolicited_cmds), }; ret = modem_cmd_handler_init(&mctx.cmd_handler, &mdata.cmd_handler_data, &cmd_handler_config); if (ret < 0) { goto error; } /* Uart handler. */ const struct modem_iface_uart_config uart_config = { .rx_rb_buf = &mdata.iface_rb_buf[0], .rx_rb_buf_len = sizeof(mdata.iface_rb_buf), .dev = MDM_UART_DEV, .hw_flow_control = DT_PROP(MDM_UART_NODE, hw_flow_control), }; ret = modem_iface_uart_init(&mctx.iface, &mdata.iface_data, &uart_config); if (ret < 0) { goto error; } mdata.current_sock_fd = -1; mdata.current_sock_written = 0; mdata.ftp.read_buffer = NULL; mdata.ftp.nread = 0; mdata.ftp.state = SIM7080_FTP_CONNECTION_STATE_INITIAL; /* Modem data storage. */ mctx.data_manufacturer = mdata.mdm_manufacturer; mctx.data_model = mdata.mdm_model; mctx.data_revision = mdata.mdm_revision; mctx.data_imei = mdata.mdm_imei; #if defined(CONFIG_MODEM_SIM_NUMBERS) mctx.data_imsi = mdata.mdm_imsi; mctx.data_iccid = mdata.mdm_iccid; #endif /* #if defined(CONFIG_MODEM_SIM_NUMBERS) */ mctx.data_rssi = &mdata.mdm_rssi; ret = gpio_pin_configure_dt(&power_gpio, GPIO_OUTPUT_LOW); if (ret < 0) { LOG_ERR("Failed to configure %s pin", "power"); goto error; } mctx.driver_data = &mdata; memset(&gnss_data, 0, sizeof(gnss_data)); ret = modem_context_register(&mctx); if (ret < 0) { LOG_ERR("Error registering modem context: %d", ret); goto error; } k_thread_create(&modem_rx_thread, modem_rx_stack, K_KERNEL_STACK_SIZEOF(modem_rx_stack), modem_rx, NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT); /* Init RSSI query */ k_work_init_delayable(&mdata.rssi_query_work, modem_rssi_query_work); return modem_setup(); error: return ret; } /* Register device with the networking stack. */ NET_DEVICE_DT_INST_OFFLOAD_DEFINE(0, modem_init, NULL, &mdata, NULL, CONFIG_MODEM_SIMCOM_SIM7080_INIT_PRIORITY, &api_funcs, MDM_MAX_DATA_LENGTH); NET_SOCKET_OFFLOAD_REGISTER(simcom_sim7080, CONFIG_NET_SOCKETS_OFFLOAD_PRIORITY, AF_UNSPEC, offload_is_supported, offload_socket); ```
/content/code_sandbox/drivers/modem/simcom-sim7080.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
17,865
```objective-c /** @file * @brief Modem command handler header file. * * Text-based command handler implementation for modem context driver. */ /* * */ #ifndef ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_CMD_HANDLER_H_ #define ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_CMD_HANDLER_H_ #include <zephyr/kernel.h> #include "modem_context.h" #ifdef __cplusplus extern "C" { #endif #define MODEM_CMD_DEFINE(name_) \ static int name_(struct modem_cmd_handler_data *data, uint16_t len, \ uint8_t **argv, uint16_t argc) #define MODEM_CMD(cmd_, func_cb_, acount_, adelim_) { \ .cmd = cmd_, \ .cmd_len = (uint16_t)sizeof(cmd_)-1, \ .func = func_cb_, \ .arg_count_min = acount_, \ .arg_count_max = acount_, \ .delim = adelim_, \ .direct = false, \ } #define MODEM_CMD_ARGS_MAX(cmd_, func_cb_, acount_, acountmax_, adelim_) { \ .cmd = cmd_, \ .cmd_len = (uint16_t)sizeof(cmd_)-1, \ .func = func_cb_, \ .arg_count_min = acount_, \ .arg_count_max = acountmax_, \ .delim = adelim_, \ .direct = false, \ } #define MODEM_CMD_DIRECT_DEFINE(name_) MODEM_CMD_DEFINE(name_) #define MODEM_CMD_DIRECT(cmd_, func_cb_) { \ .cmd = cmd_, \ .cmd_len = (uint16_t)sizeof(cmd_)-1, \ .func = func_cb_, \ .arg_count_min = 0, \ .arg_count_max = 0, \ .delim = "", \ .direct = true, \ } #define CMD_RESP 0 #define CMD_UNSOL 1 #define CMD_HANDLER 2 #define CMD_MAX 3 /* * Flags for modem_send_cmd_ext. */ #define MODEM_NO_TX_LOCK BIT(0) #define MODEM_NO_SET_CMDS BIT(1) #define MODEM_NO_UNSET_CMDS BIT(2) struct modem_cmd_handler_data; struct modem_cmd { int (*func)(struct modem_cmd_handler_data *data, uint16_t len, uint8_t **argv, uint16_t argc); const char *cmd; const char *delim; uint16_t cmd_len; uint16_t arg_count_min; uint16_t arg_count_max; bool direct; }; #define SETUP_CMD(cmd_send_, match_cmd_, func_cb_, num_param_, delim_) { \ .send_cmd = cmd_send_, \ MODEM_CMD(match_cmd_, func_cb_, num_param_, delim_) \ } #define SETUP_CMD_NOHANDLE(send_cmd_) \ SETUP_CMD(send_cmd_, NULL, NULL, 0U, NULL) /* series of modem setup commands to run */ struct setup_cmd { const char *send_cmd; struct modem_cmd handle_cmd; }; struct modem_cmd_handler_data { const struct modem_cmd *cmds[CMD_MAX]; size_t cmds_len[CMD_MAX]; char *match_buf; size_t match_buf_len; int last_error; const char *eol; size_t eol_len; /* rx net buffer */ struct net_buf *rx_buf; /* allocation info */ struct net_buf_pool *buf_pool; k_timeout_t alloc_timeout; /* locks */ struct k_sem sem_tx_lock; struct k_sem sem_parse_lock; /* user data */ void *user_data; }; /** * @brief get the last error code * * @param data: command handler data reference * * @retval last handled error. */ int modem_cmd_handler_get_error(struct modem_cmd_handler_data *data); /** * @brief set the last error code * * @param data: command handler data reference * @param error_code: error * * @retval 0 if ok, < 0 if error. */ int modem_cmd_handler_set_error(struct modem_cmd_handler_data *data, int error_code); /** * @brief update the parser's handler commands * * @param data: handler data to use * @param handler_cmds: commands to attach * @param handler_cmds_len: size of commands array * @param reset_error_flag: reset last error code * * @retval 0 if ok, < 0 if error. */ int modem_cmd_handler_update_cmds(struct modem_cmd_handler_data *data, const struct modem_cmd *handler_cmds, size_t handler_cmds_len, bool reset_error_flag); /** * @brief send AT command to interface with behavior defined by flags * * This function is similar to @ref modem_cmd_send, but it allows to choose a * specific behavior regarding acquiring tx_lock, setting and unsetting * @a handler_cmds. * * @param iface: interface to use * @param handler: command handler to use * @param handler_cmds: commands to attach * @param handler_cmds_len: size of commands array * @param buf: NULL terminated send buffer * @param sem: wait for response semaphore * @param timeout: timeout of command * @param flags: flags which influence behavior of command sending * * @retval 0 if ok, < 0 if error. */ int modem_cmd_send_ext(struct modem_iface *iface, struct modem_cmd_handler *handler, const struct modem_cmd *handler_cmds, size_t handler_cmds_len, const uint8_t *buf, struct k_sem *sem, k_timeout_t timeout, int flags); /** * @brief send AT command to interface w/o locking TX * * @param iface: interface to use * @param handler: command handler to use * @param handler_cmds: commands to attach * @param handler_cmds_len: size of commands array * @param buf: NULL terminated send buffer * @param sem: wait for response semaphore * @param timeout: timeout of command * * @retval 0 if ok, < 0 if error. */ static inline int modem_cmd_send_nolock(struct modem_iface *iface, struct modem_cmd_handler *handler, const struct modem_cmd *handler_cmds, size_t handler_cmds_len, const uint8_t *buf, struct k_sem *sem, k_timeout_t timeout) { return modem_cmd_send_ext(iface, handler, handler_cmds, handler_cmds_len, buf, sem, timeout, MODEM_NO_TX_LOCK); } /** * @brief send AT command to interface w/ a TX lock * * @param iface: interface to use * @param handler: command handler to use * @param handler_cmds: commands to attach * @param handler_cmds_len: size of commands array * @param buf: NULL terminated send buffer * @param sem: wait for response semaphore * @param timeout: timeout of command * * @retval 0 if ok, < 0 if error. */ static inline int modem_cmd_send(struct modem_iface *iface, struct modem_cmd_handler *handler, const struct modem_cmd *handler_cmds, size_t handler_cmds_len, const uint8_t *buf, struct k_sem *sem, k_timeout_t timeout) { return modem_cmd_send_ext(iface, handler, handler_cmds, handler_cmds_len, buf, sem, timeout, 0); } /** * @brief send a series of AT commands w/ a TX lock * * @param iface: interface to use * @param handler: command handler to use * @param cmds: array of setup commands to send * @param cmds_len: size of the setup command array * @param sem: wait for response semaphore * @param timeout: timeout of command * * @retval 0 if ok, < 0 if error. */ int modem_cmd_handler_setup_cmds(struct modem_iface *iface, struct modem_cmd_handler *handler, const struct setup_cmd *cmds, size_t cmds_len, struct k_sem *sem, k_timeout_t timeout); /** * @brief send a series of AT commands w/o locking TX * * @param iface: interface to use * @param handler: command handler to use * @param cmds: array of setup commands to send * @param cmds_len: size of the setup command array * @param sem: wait for response semaphore * @param timeout: timeout of command * * @retval 0 if ok, < 0 if error. */ int modem_cmd_handler_setup_cmds_nolock(struct modem_iface *iface, struct modem_cmd_handler *handler, const struct setup_cmd *cmds, size_t cmds_len, struct k_sem *sem, k_timeout_t timeout); /** * @brief Modem command handler configuration * * @details Contains user configuration which is used to set up * command handler data context. The struct is initialized and then passed * to modem_cmd_handler_init(). * * @retval 0 if ok, < 0 if error. * @param match_buf Buffer used for matching commands * @param match_buf_len Length of buffer used for matching commands * @param buf_pool Initialized buffer pool used to store incoming data * @param alloc_timeout Timeout for allocating data in buffer pool * @param eol End of line represented as string * @param user_data Free to use data which can be retrieved from within command handlers * @param response_cmds Array of response command handlers * @param response_cmds_len Length of response command handlers array * @param unsol_cmds Array of unsolicitet command handlers * @param unsol_cmds_len Length of unsolicitet command handlers array */ struct modem_cmd_handler_config { char *match_buf; size_t match_buf_len; struct net_buf_pool *buf_pool; k_timeout_t alloc_timeout; const char *eol; void *user_data; const struct modem_cmd *response_cmds; size_t response_cmds_len; const struct modem_cmd *unsol_cmds; size_t unsol_cmds_len; }; /** * @brief Initialize modem command handler * * @details This function is called once for each command handler, before any * incoming data is processed. * * @note All arguments passed to this function, including the referenced data * contained in the setup struct, must persist as long as the command handler itself. * * @param handler Command handler to initialize * @param data Command handler data to use * @param setup Command handler setup * * @return -EINVAL if any argument is invalid * @return 0 if successful */ int modem_cmd_handler_init(struct modem_cmd_handler *handler, struct modem_cmd_handler_data *data, const struct modem_cmd_handler_config *config); /** * @brief Lock the modem for sending cmds * * This is semaphore-based rather than mutex based, which means there's no * requirements of thread ownership for the user. This function is useful * when one needs to prevent threads from sending UART data to the modem for an * extended period of time (for example during modem reset). * * @param handler: command handler to lock * @param timeout: give up after timeout * * @retval 0 if ok, < 0 if error. */ int modem_cmd_handler_tx_lock(struct modem_cmd_handler *handler, k_timeout_t timeout); /** * @brief Unlock the modem for sending cmds * * @param handler: command handler to unlock */ void modem_cmd_handler_tx_unlock(struct modem_cmd_handler *handler); /** * @brief Process incoming data * * @details This function will process any data available from the interface * using the command handler. The command handler will invoke any matching modem * command which has been registered using @ref modem_cmd_handler_init_cmds or * @ref modem_cmd_handler_update_cmds. Once handled, the function will return. * * @note This function should be invoked from a dedicated thread, which only handles * commands. * * @param handler The handler wich will handle the command when processed * @param iface The interface which receives incoming data */ static inline void modem_cmd_handler_process(struct modem_cmd_handler *handler, struct modem_iface *iface) { handler->process(handler, iface); } #ifdef __cplusplus } #endif #endif /* ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_CMD_HANDLER_H_ */ ```
/content/code_sandbox/drivers/modem/modem_cmd_handler.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,627
```c /** @file * @brief Modem receiver driver * * A modem receiver driver allowing application to handle all * aspects of received protocol data. */ /* * */ #include <zephyr/kernel.h> #include <zephyr/init.h> #include <zephyr/drivers/uart.h> #include <zephyr/pm/device.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(mdm_receiver, CONFIG_MODEM_LOG_LEVEL); #include "modem_receiver.h" #define MAX_MDM_CTX CONFIG_MODEM_RECEIVER_MAX_CONTEXTS #define MAX_READ_SIZE 128 static struct mdm_receiver_context *contexts[MAX_MDM_CTX]; /** * @brief Finds receiver context which manages provided device. * * @param dev: device used by the receiver context. * * @retval Receiver context or NULL. */ static struct mdm_receiver_context *context_from_dev(const struct device *dev) { int i; for (i = 0; i < MAX_MDM_CTX; i++) { if (contexts[i] && contexts[i]->uart_dev == dev) { return contexts[i]; } } return NULL; } /** * @brief Persists receiver context if there is a free place. * * @note Amount of stored receiver contexts is determined by * MAX_MDM_CTX. * * @param ctx: receiver context to persist. * * @retval 0 if ok, < 0 if error. */ static int mdm_receiver_get(struct mdm_receiver_context *ctx) { int i; for (i = 0; i < MAX_MDM_CTX; i++) { if (!contexts[i]) { contexts[i] = ctx; return 0; } } return -ENOMEM; } /** * @brief Drains UART. * * @note Discards remaining data. * * @param ctx: receiver context. * * @retval None. */ static void mdm_receiver_flush(struct mdm_receiver_context *ctx) { uint8_t c; __ASSERT(ctx, "invalid ctx"); __ASSERT(ctx->uart_dev, "invalid ctx device"); while (uart_fifo_read(ctx->uart_dev, &c, 1) > 0) { continue; } } /** * @brief Receiver UART interrupt handler. * * @note Fills contexts ring buffer with received data. * When ring buffer is full the data is discarded. * * @param uart_dev: uart device. * * @retval None. */ static void mdm_receiver_isr(const struct device *uart_dev, void *user_data) { struct mdm_receiver_context *ctx; int rx, ret; static uint8_t read_buf[MAX_READ_SIZE]; ARG_UNUSED(user_data); /* lookup the device */ ctx = context_from_dev(uart_dev); if (!ctx) { return; } /* get all of the data off UART as fast as we can */ while (uart_irq_update(ctx->uart_dev) && uart_irq_rx_ready(ctx->uart_dev)) { rx = uart_fifo_read(ctx->uart_dev, read_buf, sizeof(read_buf)); if (rx > 0) { ret = ring_buf_put(&ctx->rx_rb, read_buf, rx); if (ret != rx) { LOG_ERR("Rx buffer doesn't have enough space. " "Bytes pending: %d, written: %d", rx, ret); mdm_receiver_flush(ctx); k_sem_give(&ctx->rx_sem); break; } k_sem_give(&ctx->rx_sem); } } } /** * @brief Configures receiver context and assigned device. * * @param ctx: receiver context. * * @retval None. */ static void mdm_receiver_setup(struct mdm_receiver_context *ctx) { __ASSERT(ctx, "invalid ctx"); uart_irq_rx_disable(ctx->uart_dev); uart_irq_tx_disable(ctx->uart_dev); mdm_receiver_flush(ctx); uart_irq_callback_set(ctx->uart_dev, mdm_receiver_isr); uart_irq_rx_enable(ctx->uart_dev); } struct mdm_receiver_context *mdm_receiver_context_from_id(int id) { if (id >= 0 && id < MAX_MDM_CTX) { return contexts[id]; } else { return NULL; } } int mdm_receiver_recv(struct mdm_receiver_context *ctx, uint8_t *buf, size_t size, size_t *bytes_read) { if (!ctx) { return -EINVAL; } if (size == 0) { *bytes_read = 0; return 0; } *bytes_read = ring_buf_get(&ctx->rx_rb, buf, size); return 0; } int mdm_receiver_send(struct mdm_receiver_context *ctx, const uint8_t *buf, size_t size) { if (!ctx) { return -EINVAL; } if (size == 0) { return 0; } do { uart_poll_out(ctx->uart_dev, *buf++); } while (--size); return 0; } int mdm_receiver_sleep(struct mdm_receiver_context *ctx) { uart_irq_rx_disable(ctx->uart_dev); #ifdef CONFIG_PM_DEVICE pm_device_action_run(ctx->uart_dev, PM_DEVICE_ACTION_SUSPEND); #endif return 0; } int mdm_receiver_wake(struct mdm_receiver_context *ctx) { #ifdef CONFIG_PM_DEVICE pm_device_action_run(ctx->uart_dev, PM_DEVICE_ACTION_RESUME); #endif uart_irq_rx_enable(ctx->uart_dev); return 0; } int mdm_receiver_register(struct mdm_receiver_context *ctx, const struct device *uart_dev, uint8_t *buf, size_t size) { int ret; if ((!ctx) || (size == 0)) { return -EINVAL; } if (!device_is_ready(uart_dev)) { LOG_ERR("Device is not ready: %s", uart_dev ? uart_dev->name : "<null>"); return -ENODEV; } ctx->uart_dev = uart_dev; ring_buf_init(&ctx->rx_rb, size, buf); k_sem_init(&ctx->rx_sem, 0, 1); ret = mdm_receiver_get(ctx); if (ret < 0) { return ret; } mdm_receiver_setup(ctx); return 0; } ```
/content/code_sandbox/drivers/modem/modem_receiver.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,358
```unknown # Quectel BG9x driver options config MODEM_QUECTEL_BG9X bool "Quectel modem driver" select MODEM_CONTEXT select MODEM_CMD_HANDLER select MODEM_IFACE_UART select MODEM_SOCKET select NET_SOCKETS_OFFLOAD help Choose this setting to enable quectel BG9x LTE-CatM1/NB-IoT modem driver. if MODEM_QUECTEL_BG9X config MODEM_QUECTEL_BG9X_RX_STACK_SIZE int "Stack size for the quectel BG9X modem driver RX thread" default 1028 help This stack is used by the quectel BG9X RX thread. config MODEM_QUECTEL_BG9X_RX_WORKQ_STACK_SIZE int "Stack size for the quectel BG9X modem driver work queue" default 2048 help This stack is used by the work queue to pass off net_pkt data to the rest of the network stack, letting the rx thread continue processing data. config MODEM_QUECTEL_BG9X_APN string "APN for establishing network connection" default "internet" help This setting is used to set the APN name for the network connection context. This value is specific to the network provider and may need to be changed. config MODEM_QUECTEL_BG9X_USERNAME string "Username for establishing network connection" default "" help This setting is used to set the User name for the network connection context. This value is specific to the network provider and may need to be changed. config MODEM_QUECTEL_BG9X_PASSWORD string "Password for establishing network connection" default "" help This setting is used to set the Password for the network connection context. This value is specific to the network provider and may need to be changed. config MODEM_QUECTEL_BG9X_UNSOL_RDY string "Unsolicited response string expected from the modem" default "RDY" help This string is used to determine when modem powered and ready to accept AT commands. If this value is not matching the modem response, the init will fail with timeout. config MODEM_QUECTEL_BG9X_INIT_PRIORITY int "quectel BG9X driver init priority" default 80 help quectel BG9X device driver initialization priority. Do not mess with it unless you know what you are doing. Note that the priority needs to be lower than the net stack so that it can start before the networking sub-system. endif ```
/content/code_sandbox/drivers/modem/Kconfig.quectel-bg9x
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
570
```c /** @file * @brief Modem context helper driver * * A modem context driver allowing application to handle all * aspects of received protocol data. */ /* * */ #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(modem_context, CONFIG_MODEM_LOG_LEVEL); #include <zephyr/kernel.h> #include "modem_context.h" static struct modem_context *contexts[CONFIG_MODEM_CONTEXT_MAX_NUM]; int modem_context_sprint_ip_addr(const struct sockaddr *addr, char *buf, size_t buf_size) { static const char unknown_str[] = "unk"; if (addr->sa_family == AF_INET6) { if (buf_size < NET_IPV6_ADDR_LEN) { return -ENOMEM; } if (net_addr_ntop(AF_INET6, &net_sin6(addr)->sin6_addr, buf, buf_size) == NULL) { return -ENOMEM; } return 0; } if (addr->sa_family == AF_INET) { if (buf_size < NET_IPV4_ADDR_LEN) { return -ENOMEM; } if (net_addr_ntop(AF_INET, &net_sin(addr)->sin_addr, buf, buf_size) == NULL) { return -ENOMEM; } return 0; } LOG_ERR("Unknown IP address family:%d", addr->sa_family); if (buf_size < sizeof(unknown_str)) { return -ENOMEM; } strcpy(buf, unknown_str); return 0; } int modem_context_get_addr_port(const struct sockaddr *addr, uint16_t *port) { if (!addr || !port) { return -EINVAL; } if (addr->sa_family == AF_INET6) { *port = ntohs(net_sin6(addr)->sin6_port); return 0; } else if (addr->sa_family == AF_INET) { *port = ntohs(net_sin(addr)->sin_port); return 0; } return -EPROTONOSUPPORT; } /** * @brief Finds modem context which owns the iface device. * * @param dev: device used by the modem iface. * * @retval Modem context or NULL. */ struct modem_context *modem_context_from_iface_dev(const struct device *dev) { int i; for (i = 0; i < ARRAY_SIZE(contexts); i++) { if (contexts[i] && contexts[i]->iface.dev == dev) { return contexts[i]; } } return NULL; } /** * @brief Assign a modem context if there is free space. * * @note Amount of stored modem contexts is determined by * CONFIG_MODEM_CONTEXT_MAX_NUM. * * @param ctx: modem context to persist. * * @retval 0 if ok, < 0 if error. */ static int modem_context_get(struct modem_context *ctx) { int i; for (i = 0; i < ARRAY_SIZE(contexts); i++) { if (!contexts[i]) { contexts[i] = ctx; return 0; } } return -ENOMEM; } struct modem_context *modem_context_from_id(int id) { if (id >= 0 && id < ARRAY_SIZE(contexts)) { return contexts[id]; } else { return NULL; } } int modem_context_register(struct modem_context *ctx) { if (!ctx) { return -EINVAL; } return modem_context_get(ctx); } ```
/content/code_sandbox/drivers/modem/modem_context.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
736
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/shell/shell.h> #include <zephyr/modem/chat.h> #include <zephyr/modem/pipelink.h> #include <zephyr/sys/atomic.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(modem_at_shell, CONFIG_MODEM_LOG_LEVEL); #define AT_SHELL_MODEM_NODE DT_ALIAS(modem) #define AT_SHELL_PIPELINK_NAME _CONCAT(user_pipe_, CONFIG_MODEM_AT_SHELL_USER_PIPE) #define AT_SHELL_STATE_ATTACHED_BIT 0 #define AT_SHELL_STATE_SCRIPT_RUNNING_BIT 1 MODEM_PIPELINK_DT_DECLARE(AT_SHELL_MODEM_NODE, AT_SHELL_PIPELINK_NAME); static struct modem_pipelink *at_shell_pipelink = MODEM_PIPELINK_DT_GET(AT_SHELL_MODEM_NODE, AT_SHELL_PIPELINK_NAME); static struct modem_chat at_shell_chat; static uint8_t at_shell_chat_receive_buf[CONFIG_MODEM_AT_SHELL_CHAT_RECEIVE_BUF_SIZE]; static uint8_t *at_shell_chat_argv_buf[2]; static uint8_t at_shell_request_buf[CONFIG_MODEM_AT_SHELL_COMMAND_MAX_SIZE]; static struct modem_chat_script_chat at_shell_script_chat[1]; static struct modem_chat_match at_shell_script_chat_matches[2]; static uint8_t at_shell_match_buf[CONFIG_MODEM_AT_SHELL_RESPONSE_MAX_SIZE]; static const struct shell *at_shell_active_shell; static struct k_work at_shell_open_pipe_work; static struct k_work at_shell_attach_chat_work; static struct k_work at_shell_release_chat_work; static atomic_t at_shell_state; static void at_shell_print_any_match(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { if (at_shell_active_shell == NULL) { return; } if (argc != 2) { return; } shell_print(at_shell_active_shell, "%s", argv[1]); } static void at_shell_print_match(struct modem_chat *chat, char **argv, uint16_t argc, void *user_data) { if (at_shell_active_shell == NULL) { return; } if (argc != 1) { return; } shell_print(at_shell_active_shell, "%s", argv[0]); } MODEM_CHAT_MATCHES_DEFINE( at_shell_abort_matches, MODEM_CHAT_MATCH("ERROR", "", at_shell_print_match), ); static void at_shell_script_callback(struct modem_chat *chat, enum modem_chat_script_result result, void *user_data) { atomic_clear_bit(&at_shell_state, AT_SHELL_STATE_SCRIPT_RUNNING_BIT); } MODEM_CHAT_SCRIPT_DEFINE( at_shell_script, at_shell_script_chat, at_shell_abort_matches, at_shell_script_callback, CONFIG_MODEM_AT_SHELL_RESPONSE_TIMEOUT_S ); static void at_shell_pipe_callback(struct modem_pipe *pipe, enum modem_pipe_event event, void *user_data) { ARG_UNUSED(user_data); switch (event) { case MODEM_PIPE_EVENT_OPENED: LOG_INF("pipe opened"); k_work_submit(&at_shell_attach_chat_work); break; default: break; } } void at_shell_pipelink_callback(struct modem_pipelink *link, enum modem_pipelink_event event, void *user_data) { ARG_UNUSED(user_data); switch (event) { case MODEM_PIPELINK_EVENT_CONNECTED: LOG_INF("pipe connected"); k_work_submit(&at_shell_open_pipe_work); break; case MODEM_PIPELINK_EVENT_DISCONNECTED: LOG_INF("pipe disconnected"); k_work_submit(&at_shell_release_chat_work); break; default: break; } } static void at_shell_open_pipe_handler(struct k_work *work) { ARG_UNUSED(work); LOG_INF("opening pipe"); modem_pipe_attach(modem_pipelink_get_pipe(at_shell_pipelink), at_shell_pipe_callback, NULL); modem_pipe_open_async(modem_pipelink_get_pipe(at_shell_pipelink)); } static void at_shell_attach_chat_handler(struct k_work *work) { ARG_UNUSED(work); modem_chat_attach(&at_shell_chat, modem_pipelink_get_pipe(at_shell_pipelink)); atomic_set_bit(&at_shell_state, AT_SHELL_STATE_ATTACHED_BIT); LOG_INF("chat attached"); } static void at_shell_release_chat_handler(struct k_work *work) { ARG_UNUSED(work); modem_chat_release(&at_shell_chat); atomic_clear_bit(&at_shell_state, AT_SHELL_STATE_ATTACHED_BIT); LOG_INF("chat released"); } static void at_shell_init_work(void) { k_work_init(&at_shell_open_pipe_work, at_shell_open_pipe_handler); k_work_init(&at_shell_attach_chat_work, at_shell_attach_chat_handler); k_work_init(&at_shell_release_chat_work, at_shell_release_chat_handler); } static void at_shell_init_chat(void) { const struct modem_chat_config at_shell_chat_config = { .receive_buf = at_shell_chat_receive_buf, .receive_buf_size = sizeof(at_shell_chat_receive_buf), .delimiter = "\r", .delimiter_size = sizeof("\r") - 1, .filter = "\n", .filter_size = sizeof("\n") - 1, .argv = at_shell_chat_argv_buf, .argv_size = ARRAY_SIZE(at_shell_chat_argv_buf), }; modem_chat_init(&at_shell_chat, &at_shell_chat_config); } static void at_shell_init_script_chat(void) { /* Match anything except the expected response without progressing script */ modem_chat_match_init(&at_shell_script_chat_matches[0]); modem_chat_match_set_match(&at_shell_script_chat_matches[0], ""); modem_chat_match_set_separators(&at_shell_script_chat_matches[0], ""); modem_chat_match_set_callback(&at_shell_script_chat_matches[0], at_shell_print_any_match); modem_chat_match_set_partial(&at_shell_script_chat_matches[0], true); modem_chat_match_enable_wildcards(&at_shell_script_chat_matches[0], false); /* Match the expected response and terminate script */ modem_chat_match_init(&at_shell_script_chat_matches[1]); modem_chat_match_set_match(&at_shell_script_chat_matches[1], ""); modem_chat_match_set_separators(&at_shell_script_chat_matches[1], ""); modem_chat_match_set_callback(&at_shell_script_chat_matches[1], at_shell_print_match); modem_chat_match_set_partial(&at_shell_script_chat_matches[1], false); modem_chat_match_enable_wildcards(&at_shell_script_chat_matches[1], false); modem_chat_script_chat_init(at_shell_script_chat); modem_chat_script_chat_set_response_matches(at_shell_script_chat, at_shell_script_chat_matches, ARRAY_SIZE(at_shell_script_chat_matches)); modem_chat_script_chat_set_timeout(at_shell_script_chat, CONFIG_MODEM_AT_SHELL_RESPONSE_TIMEOUT_S); } static void at_shell_init_pipelink(void) { modem_pipelink_attach(at_shell_pipelink, at_shell_pipelink_callback, NULL); } static int at_shell_init(void) { at_shell_init_work(); at_shell_init_chat(); at_shell_init_script_chat(); at_shell_init_pipelink(); return 0; } SYS_INIT(at_shell_init, POST_KERNEL, 99); static int at_shell_cmd_handler(const struct shell *sh, size_t argc, char **argv) { int ret; if (argc < 2) { return -EINVAL; } if (!atomic_test_bit(&at_shell_state, AT_SHELL_STATE_ATTACHED_BIT)) { shell_error(sh, "modem is not ready"); return -EPERM; } if (atomic_test_and_set_bit(&at_shell_state, AT_SHELL_STATE_SCRIPT_RUNNING_BIT)) { shell_error(sh, "script is already running"); return -EBUSY; } strncpy(at_shell_request_buf, argv[1], sizeof(at_shell_request_buf) - 1); ret = modem_chat_script_chat_set_request(at_shell_script_chat, at_shell_request_buf); if (ret < 0) { return -EINVAL; } if (argc == 3) { strncpy(at_shell_match_buf, argv[2], sizeof(at_shell_match_buf) - 1); } else { strncpy(at_shell_match_buf, "OK", sizeof(at_shell_match_buf) - 1); } ret = modem_chat_match_set_match(&at_shell_script_chat_matches[1], at_shell_match_buf); if (ret < 0) { return -EINVAL; } at_shell_active_shell = sh; ret = modem_chat_run_script_async(&at_shell_chat, &at_shell_script); if (ret < 0) { shell_error(sh, "failed to start script"); atomic_clear_bit(&at_shell_state, AT_SHELL_STATE_SCRIPT_RUNNING_BIT); } return ret; } SHELL_STATIC_SUBCMD_SET_CREATE(modem_sub_cmds, SHELL_CMD_ARG(at, NULL, "at <command> <response>", at_shell_cmd_handler, 1, 2), SHELL_SUBCMD_SET_END ); SHELL_CMD_REGISTER(modem, &modem_sub_cmds, "Modem commands", NULL); ```
/content/code_sandbox/drivers/modem/modem_at_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,933
```c /** @file * @brief interface for modem context * * UART-based modem interface implementation for modem context driver. */ /* * */ #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(modem_iface_uart, CONFIG_MODEM_LOG_LEVEL); #include <zephyr/kernel.h> #include <zephyr/drivers/uart.h> #include "modem_context.h" #include "modem_iface_uart.h" /** * @brief Drains UART. * * @note Discards remaining data. * * @param iface: modem interface. * * @retval None. */ static void modem_iface_uart_flush(struct modem_iface *iface) { uint8_t c; while (uart_fifo_read(iface->dev, &c, 1) > 0) { continue; } } /** * @brief Modem interface interrupt handler. * * @note Fills interfaces ring buffer with received data. * When ring buffer is full the data is discarded. * * @param uart_dev: uart device. * * @retval None. */ static void modem_iface_uart_isr(const struct device *uart_dev, void *user_data) { struct modem_context *ctx; struct modem_iface_uart_data *data; int rx = 0, ret; uint8_t *dst; uint32_t partial_size = 0; uint32_t total_size = 0; ARG_UNUSED(user_data); /* lookup the modem context */ ctx = modem_context_from_iface_dev(uart_dev); if (!ctx || !ctx->iface.iface_data) { return; } data = (struct modem_iface_uart_data *)(ctx->iface.iface_data); /* get all of the data off UART as fast as we can */ while (uart_irq_update(ctx->iface.dev) && uart_irq_rx_ready(ctx->iface.dev)) { if (!partial_size) { partial_size = ring_buf_put_claim(&data->rx_rb, &dst, UINT32_MAX); } if (!partial_size) { if (data->hw_flow_control) { uart_irq_rx_disable(ctx->iface.dev); } else { LOG_ERR("Rx buffer doesn't have enough space"); modem_iface_uart_flush(&ctx->iface); } break; } rx = uart_fifo_read(ctx->iface.dev, dst, partial_size); if (rx <= 0) { continue; } dst += rx; total_size += rx; partial_size -= rx; } ret = ring_buf_put_finish(&data->rx_rb, total_size); __ASSERT_NO_MSG(ret == 0); if (total_size > 0) { k_sem_give(&data->rx_sem); } } static int modem_iface_uart_read(struct modem_iface *iface, uint8_t *buf, size_t size, size_t *bytes_read) { struct modem_iface_uart_data *data; if (!iface || !iface->iface_data) { return -EINVAL; } if (size == 0) { *bytes_read = 0; return 0; } data = (struct modem_iface_uart_data *)(iface->iface_data); *bytes_read = ring_buf_get(&data->rx_rb, buf, size); if (data->hw_flow_control && *bytes_read == 0) { uart_irq_rx_enable(iface->dev); } return 0; } static int modem_iface_uart_write(struct modem_iface *iface, const uint8_t *buf, size_t size) { if (!iface || !iface->iface_data) { return -EINVAL; } if (size == 0) { return 0; } do { uart_poll_out(iface->dev, *buf++); } while (--size); return 0; } int modem_iface_uart_init_dev(struct modem_iface *iface, const struct device *dev) { /* get UART device */ const struct device *prev = iface->dev; if (!device_is_ready(dev)) { return -ENODEV; } /* Check if there's already a device inited to this iface. If so, * interrupts needs to be disabled on that too before switching to avoid * race conditions with modem_iface_uart_isr. */ if (prev) { uart_irq_tx_disable(prev); uart_irq_rx_disable(prev); } uart_irq_rx_disable(dev); uart_irq_tx_disable(dev); iface->dev = dev; modem_iface_uart_flush(iface); uart_irq_callback_set(iface->dev, modem_iface_uart_isr); uart_irq_rx_enable(iface->dev); if (prev) { uart_irq_rx_enable(prev); } return 0; } int modem_iface_uart_init(struct modem_iface *iface, struct modem_iface_uart_data *data, const struct modem_iface_uart_config *config) { int ret; if (iface == NULL || data == NULL || config == NULL) { return -EINVAL; } iface->iface_data = data; iface->read = modem_iface_uart_read; iface->write = modem_iface_uart_write; ring_buf_init(&data->rx_rb, config->rx_rb_buf_len, config->rx_rb_buf); k_sem_init(&data->rx_sem, 0, 1); /* Configure hardware flow control */ data->hw_flow_control = config->hw_flow_control; /* Get UART device */ ret = modem_iface_uart_init_dev(iface, config->dev); if (ret < 0) { iface->iface_data = NULL; iface->read = NULL; iface->write = NULL; return ret; } return 0; } ```
/content/code_sandbox/drivers/modem/modem_iface_uart_interrupt.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,208
```unknown # Simcom sim7080 driver options config MODEM_SIM7080 bool "Sim7080 Driver" select MODEM_CONTEXT select MODEM_CMD_HANDLER select MODEM_IFACE_UART select MODEM_SOCKET select NET_OFFLOAD select NET_SOCKETS_OFFLOAD imply GPIO help Enables the driver for the Sim7080 modem. if MODEM_SIM7080 config MODEM_SIMCOM_SIM7080_RX_STACK_SIZE int "Stack size for the simcom sim7080 modem driver rx thread" default 1028 help This stack is used by the simcom SIM7080 RX thread. config MODEM_SIMCOM_SIM7080_RX_WORKQ_STACK_SIZE int "Stack size for the simcom sim7080 modem driver work queue" default 2048 help This stack is used by the work queue. config MODEM_SIMCOM_SIM7080_INIT_PRIORITY int "simcom sim7080 driver init priority" default 80 help simcom sim7080 driver initialization priority. config MODEM_SIMCOM_SIM7080_LTE_BANDS string "LTE bands the driver can use" default "8,20,28" help Comma separated list of usable lte bands. config MODEM_SIMCOM_SIM7080_APN string "APN for establishing a network connection" default "internet" help This setting is used to set the APN name for the network connection context. This value is specific to the network provider and may need to be changed. choice MODEM_SIMCOM_SIM7080_RAT bool "Radio Access Technology Mode" default MODEM_SIMCOM_SIM7080_RAT_NB1 config MODEM_SIMCOM_SIM7080_RAT_NB1 bool "NB-IoT" help Enable LTE NB-IoT mode. config MODEM_SIMCOM_SIM7080_RAT_M1 bool "Cat-M1" help Enable Cat-M1 mode. config MODEM_SIMCOM_SIM7080_RAT_GSM bool "GSM" help Enable GSM mode. endchoice endif # MODEM_SIM7080 ```
/content/code_sandbox/drivers/modem/Kconfig.simcom-sim7080
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
453
```unknown config MODEM_CELLULAR bool "Generic cellular modem support" select MODEM_MODULES select MODEM_PPP select MODEM_CMUX select MODEM_CHAT select MODEM_PIPE select MODEM_PIPELINK select MODEM_BACKEND_UART select RING_BUFFER select NET_L2_PPP_OPTION_MRU select NET_L2_PPP_PAP depends on (DT_HAS_QUECTEL_BG95_ENABLED || \ DT_HAS_SIMCOM_SIM7080_ENABLED || DT_HAS_U_BLOX_SARA_R4_ENABLED || \ DT_HAS_U_BLOX_SARA_R5_ENABLED || DT_HAS_SWIR_HL7800_ENABLED || \ DT_HAS_TELIT_ME910G1_ENABLED || DT_HAS_QUECTEL_EG25_G_ENABLED || \ DT_HAS_NORDIC_NRF91_SLM_ENABLED || DT_HAS_SQN_GM02S_ENABLED) help This driver uses the generic 3gpp AT commands, along with the standard protocols CMUX and PPP, to configure cellular modems to establish a point-to-point network connection. It is a template for tailored drivers for the ublox, quectel and other modems, which include power management and more complex device specific features. if MODEM_CELLULAR config MODEM_CELLULAR_APN string "APN" default "internet" config MODEM_CELLULAR_PERIODIC_SCRIPT_MS int "Periodic script interval in milliseconds" default 2000 config MODEM_CELLULAR_UART_BUFFER_SIZES int "The UART receive and transmit buffer sizes in bytes." default 512 config MODEM_CELLULAR_CMUX_MAX_FRAME_SIZE int "The maximum CMUX frame size in bytes." default 128 help This value affects the size of buffers used to receive and transmit CMUX frames. config MODEM_CELLULAR_CHAT_BUFFER_SIZES int "The size of the buffers used for the chat scripts in bytes." default 128 config MODEM_CELLULAR_USER_PIPE_BUFFER_SIZES int "The size of the buffers used for each user pipe in bytes." default 128 endif ```
/content/code_sandbox/drivers/modem/Kconfig.cellular
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
449
```c /* * */ #define DT_DRV_COMPAT wnc_m14a2a #define LOG_DOMAIN modem_wncm14a2a #define LOG_LEVEL CONFIG_MODEM_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_DOMAIN); #include <zephyr/types.h> #include <stddef.h> #include <stdlib.h> #include <ctype.h> #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/drivers/gpio.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/random/random.h> #include <zephyr/net/net_context.h> #include <zephyr/net/net_if.h> #include <zephyr/net/net_offload.h> #include <zephyr/net/offloaded_netdev.h> #include <zephyr/net/net_pkt.h> #if defined(CONFIG_NET_IPV6) #include "ipv6.h" #endif #if defined(CONFIG_NET_IPV4) #include "ipv4.h" #endif #if defined(CONFIG_NET_UDP) #include "udp_internal.h" #endif #include "modem_receiver.h" /* Uncomment the #define below to enable a hexdump of all incoming * data from the modem receiver */ /* #define ENABLE_VERBOSE_MODEM_RECV_HEXDUMP 1 */ /* pin settings */ enum mdm_control_pins { MDM_BOOT_MODE_SEL = 0, MDM_POWER, MDM_KEEP_AWAKE, MDM_RESET, SHLD_3V3_1V8_SIG_TRANS_ENA, #if DT_INST_NODE_HAS_PROP(0, mdm_send_ok_gpios) MDM_SEND_OK, #endif MAX_MDM_CONTROL_PINS, }; #define MDM_UART_DEV DEVICE_DT_GET(DT_INST_BUS(0)) #define MDM_BOOT_MODE_SPECIAL 0 #define MDM_BOOT_MODE_NORMAL 1 #define MDM_CMD_TIMEOUT (5 * MSEC_PER_SEC) #define MDM_CMD_SEND_TIMEOUT (10 * MSEC_PER_SEC) #define MDM_CMD_CONN_TIMEOUT (31 * MSEC_PER_SEC) #define MDM_MAX_DATA_LENGTH 1500 #define MDM_RECV_MAX_BUF 30 #define MDM_RECV_BUF_SIZE 128 #define MDM_MAX_SOCKETS 6 #define BUF_ALLOC_TIMEOUT K_SECONDS(1) #define CMD_HANDLER(cmd_, cb_) { \ .cmd = cmd_, \ .cmd_len = (uint16_t)sizeof(cmd_)-1, \ .func = on_cmd_ ## cb_ \ } #define MDM_MANUFACTURER_LENGTH 10 #define MDM_MODEL_LENGTH 16 #define MDM_REVISION_LENGTH 64 #define MDM_IMEI_LENGTH 16 #define RSSI_TIMEOUT_SECS 30 NET_BUF_POOL_DEFINE(mdm_recv_pool, MDM_RECV_MAX_BUF, MDM_RECV_BUF_SIZE, 0, NULL); static uint8_t mdm_recv_buf[MDM_MAX_DATA_LENGTH]; /* RX thread structures */ K_KERNEL_STACK_DEFINE(wncm14a2a_rx_stack, CONFIG_MODEM_WNCM14A2A_RX_STACK_SIZE); struct k_thread wncm14a2a_rx_thread; /* RX thread work queue */ K_KERNEL_STACK_DEFINE(wncm14a2a_workq_stack, CONFIG_MODEM_WNCM14A2A_RX_WORKQ_STACK_SIZE); static struct k_work_q wncm14a2a_workq; struct wncm14a2a_socket { struct net_context *context; sa_family_t family; enum net_sock_type type; enum net_ip_protocol ip_proto; struct sockaddr src; struct sockaddr dst; int socket_id; /** semaphore */ struct k_sem sock_send_sem; /** socket callbacks */ struct k_work recv_cb_work; net_context_recv_cb_t recv_cb; struct net_pkt *recv_pkt; void *recv_user_data; }; struct wncm14a2a_config { struct gpio_dt_spec gpio[MAX_MDM_CONTROL_PINS]; }; struct wncm14a2a_iface_ctx { struct net_if *iface; uint8_t mac_addr[6]; /* RX specific attributes */ struct mdm_receiver_context mdm_ctx; /* socket data */ struct wncm14a2a_socket sockets[MDM_MAX_SOCKETS]; int last_socket_id; int last_error; /* semaphores */ struct k_sem response_sem; /* RSSI work */ struct k_work_delayable rssi_query_work; /* modem data */ char mdm_manufacturer[MDM_MANUFACTURER_LENGTH]; char mdm_model[MDM_MODEL_LENGTH]; char mdm_revision[MDM_REVISION_LENGTH]; char mdm_imei[MDM_IMEI_LENGTH]; int mdm_rssi; /* modem state */ int ev_csps; int ev_rrcstate; }; struct cmd_handler { const char *cmd; uint16_t cmd_len; void (*func)(struct net_buf **buf, uint16_t len); }; const static struct wncm14a2a_config wncm14a2a_cfg = { .gpio = { GPIO_DT_SPEC_INST_GET(0, mdm_boot_mode_sel_gpios), GPIO_DT_SPEC_INST_GET(0, mdm_power_gpios), GPIO_DT_SPEC_INST_GET(0, mdm_keep_awake_gpios), GPIO_DT_SPEC_INST_GET(0, mdm_reset_gpios), GPIO_DT_SPEC_INST_GET(0, mdm_shld_trans_ena_gpios), #if DT_INST_NODE_HAS_PROP(0, mdm_send_ok_gpios) GPIO_DT_SPEC_INST_GET(0, mdm_send_ok_gpios), #endif }, }; static struct wncm14a2a_iface_ctx ictx; static void wncm14a2a_read_rx(struct net_buf **buf); /*** Verbose Debugging Functions ***/ #if defined(ENABLE_VERBOSE_MODEM_RECV_HEXDUMP) static inline void hexdump(const uint8_t *packet, size_t length) { char output[sizeof("xxxxyyyy xxxxyyyy")]; int n = 0, k = 0; uint8_t byte; while (length--) { if (n % 16 == 0) { printk(" %08X ", n); } byte = *packet++; printk("%02X ", byte); if (byte < 0x20 || byte > 0x7f) { output[k++] = '.'; } else { output[k++] = byte; } n++; if (n % 8 == 0) { if (n % 16 == 0) { output[k] = '\0'; printk(" [%s]\n", output); k = 0; } else { printk(" "); } } } if (n % 16) { int i; output[k] = '\0'; for (i = 0; i < (16 - (n % 16)); i++) { printk(" "); } if ((n % 16) < 8) { printk(" "); /* one extra delimiter after 8 chars */ } printk(" [%s]\n", output); } } #else #define hexdump(...) #endif static struct wncm14a2a_socket *socket_get(void) { int i; struct wncm14a2a_socket *sock = NULL; for (i = 0; i < MDM_MAX_SOCKETS; i++) { if (!ictx.sockets[i].context) { sock = &ictx.sockets[i]; break; } } return sock; } static struct wncm14a2a_socket *socket_from_id(int socket_id) { int i; struct wncm14a2a_socket *sock = NULL; if (socket_id < 1) { return NULL; } for (i = 0; i < MDM_MAX_SOCKETS; i++) { if (ictx.sockets[i].socket_id == socket_id) { sock = &ictx.sockets[i]; break; } } return sock; } static void socket_put(struct wncm14a2a_socket *sock) { if (!sock) { return; } sock->context = NULL; sock->socket_id = 0; (void)memset(&sock->src, 0, sizeof(struct sockaddr)); (void)memset(&sock->dst, 0, sizeof(struct sockaddr)); } char *wncm14a2a_sprint_ip_addr(const struct sockaddr *addr) { static char buf[NET_IPV6_ADDR_LEN]; #if defined(CONFIG_NET_IPV6) if (addr->sa_family == AF_INET6) { return net_addr_ntop(AF_INET6, &net_sin6(addr)->sin6_addr, buf, sizeof(buf)); } else #endif #if defined(CONFIG_NET_IPV4) if (addr->sa_family == AF_INET) { return net_addr_ntop(AF_INET, &net_sin(addr)->sin_addr, buf, sizeof(buf)); } else #endif { LOG_ERR("Unknown IP address family:%d", addr->sa_family); return NULL; } } /* Send an AT command with a series of response handlers */ static int send_at_cmd(struct wncm14a2a_socket *sock, const uint8_t *data, int timeout) { int ret; ictx.last_error = 0; LOG_DBG("OUT: [%s]", data); mdm_receiver_send(&ictx.mdm_ctx, data, strlen(data)); mdm_receiver_send(&ictx.mdm_ctx, "\r\n", 2); if (timeout == 0) { return 0; } if (!sock) { k_sem_reset(&ictx.response_sem); ret = k_sem_take(&ictx.response_sem, K_MSEC(timeout)); } else { k_sem_reset(&sock->sock_send_sem); ret = k_sem_take(&sock->sock_send_sem, K_MSEC(timeout)); } if (ret == 0) { ret = ictx.last_error; } else if (ret == -EAGAIN) { ret = -ETIMEDOUT; } return ret; } static int send_data(struct wncm14a2a_socket *sock, struct net_pkt *pkt) { int ret; struct net_buf *frag; char buf[sizeof("AT@SOCKWRITE=#,####,1\r")]; if (!sock) { return -EINVAL; } ictx.last_error = 0; frag = pkt->frags; /* use SOCKWRITE with binary mode formatting */ snprintk(buf, sizeof(buf), "AT@SOCKWRITE=%d,%zu,1\r", sock->socket_id, net_buf_frags_len(frag)); mdm_receiver_send(&ictx.mdm_ctx, buf, strlen(buf)); /* Loop through packet data and send */ while (frag) { mdm_receiver_send(&ictx.mdm_ctx, frag->data, frag->len); frag = frag->frags; } mdm_receiver_send(&ictx.mdm_ctx, "\r\n", 2); k_sem_reset(&sock->sock_send_sem); ret = k_sem_take(&sock->sock_send_sem, K_MSEC(MDM_CMD_SEND_TIMEOUT)); if (ret == 0) { ret = ictx.last_error; } else if (ret == -EAGAIN) { ret = -ETIMEDOUT; } return ret; } /*** NET_BUF HELPERS ***/ static bool is_crlf(uint8_t c) { if (c == '\n' || c == '\r') { return true; } else { return false; } } static void net_buf_skipcrlf(struct net_buf **buf) { /* chop off any /n or /r */ while (*buf && is_crlf(*(*buf)->data)) { net_buf_pull_u8(*buf); if (!(*buf)->len) { *buf = net_buf_frag_del(NULL, *buf); } } } static uint16_t net_buf_findcrlf(struct net_buf *buf, struct net_buf **frag, uint16_t *offset) { uint16_t len = 0U, pos = 0U; while (buf && !is_crlf(*(buf->data + pos))) { if (pos + 1 >= buf->len) { len += buf->len; buf = buf->frags; pos = 0U; } else { pos++; } } if (buf && is_crlf(*(buf->data + pos))) { len += pos; *offset = pos; *frag = buf; return len; } return 0; } /*** UDP / TCP Helper Function ***/ /* Setup IP header data to be used by some network applications. * While much is dummy data, some fields such as dst, port and family are * important. * Return the IP + protocol header length. */ static int pkt_setup_ip_data(struct net_pkt *pkt, struct wncm14a2a_socket *sock) { int hdr_len = 0; uint16_t src_port = 0U, dst_port = 0U; #if defined(CONFIG_NET_IPV6) if (net_pkt_family(pkt) == AF_INET6) { if (net_ipv6_create( pkt, &((struct sockaddr_in6 *)&sock->dst)->sin6_addr, &((struct sockaddr_in6 *)&sock->src)->sin6_addr)) { return -1; } src_port = ntohs(net_sin6(&sock->src)->sin6_port); dst_port = ntohs(net_sin6(&sock->dst)->sin6_port); hdr_len = sizeof(struct net_ipv6_hdr); } else #endif #if defined(CONFIG_NET_IPV4) if (net_pkt_family(pkt) == AF_INET) { if (net_ipv4_create( pkt, &((struct sockaddr_in *)&sock->dst)->sin_addr, &((struct sockaddr_in *)&sock->src)->sin_addr)) { return -1; } src_port = ntohs(net_sin(&sock->src)->sin_port); dst_port = ntohs(net_sin(&sock->dst)->sin_port); hdr_len = sizeof(struct net_ipv4_hdr); } else #endif { /* no error here as hdr_len is checked later for 0 value */ } #if defined(CONFIG_NET_UDP) if (sock->ip_proto == IPPROTO_UDP) { if (net_udp_create(pkt, dst_port, src_port)) { return -1; } hdr_len += NET_UDPH_LEN; } else #endif #if defined(CONFIG_NET_TCP) if (sock->ip_proto == IPPROTO_TCP) { NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct net_tcp_hdr); struct net_tcp_hdr *tcp; tcp = (struct net_tcp_hdr *)net_pkt_get_data(pkt, &tcp_access); if (!tcp) { return -1; } (void)memset(tcp, 0, NET_TCPH_LEN); /* Setup TCP header */ tcp->src_port = dst_port; tcp->dst_port = src_port; if (net_pkt_set_data(pkt, &tcp_access)) { return -1; } hdr_len += NET_TCPH_LEN; } else #endif /* CONFIG_NET_TCP */ { /* no error here as hdr_len is checked later for 0 value */ } return hdr_len; } /*** MODEM RESPONSE HANDLERS ***/ /* Last Socket ID Handler */ static void on_cmd_atcmdecho(struct net_buf **buf, uint16_t len) { char value[2]; /* make sure only a single digit is picked up for socket_id */ value[0] = net_buf_pull_u8(*buf); ictx.last_socket_id = atoi(value); } /* Echo Handler for commands without related sockets */ static void on_cmd_atcmdecho_nosock(struct net_buf **buf, uint16_t len) { /* clear last_socket_id */ ictx.last_socket_id = 0; } static void on_cmd_atcmdinfo_manufacturer(struct net_buf **buf, uint16_t len) { size_t out_len; out_len = net_buf_linearize(ictx.mdm_manufacturer, sizeof(ictx.mdm_manufacturer) - 1, *buf, 0, len); ictx.mdm_manufacturer[out_len] = 0; LOG_INF("Manufacturer: %s", ictx.mdm_manufacturer); } static void on_cmd_atcmdinfo_model(struct net_buf **buf, uint16_t len) { size_t out_len; out_len = net_buf_linearize(ictx.mdm_model, sizeof(ictx.mdm_model) - 1, *buf, 0, len); ictx.mdm_model[out_len] = 0; LOG_INF("Model: %s", ictx.mdm_model); } static void on_cmd_atcmdinfo_revision(struct net_buf **buf, uint16_t len) { size_t out_len; out_len = net_buf_linearize(ictx.mdm_revision, sizeof(ictx.mdm_revision) - 1, *buf, 0, len); ictx.mdm_revision[out_len] = 0; LOG_INF("Revision: %s", ictx.mdm_revision); } static void on_cmd_atcmdecho_nosock_imei(struct net_buf **buf, uint16_t len) { struct net_buf *frag = NULL; uint16_t offset; size_t out_len; /* make sure IMEI data is received */ if (len < MDM_IMEI_LENGTH) { LOG_DBG("Waiting for data"); /* wait for more data */ k_sleep(K_MSEC(500)); wncm14a2a_read_rx(buf); } net_buf_skipcrlf(buf); if (!*buf) { LOG_DBG("Unable to find IMEI (net_buf_skipcrlf)"); return; } frag = NULL; len = net_buf_findcrlf(*buf, &frag, &offset); if (!frag) { LOG_DBG("Unable to find IMEI (net_buf_findcrlf)"); return; } out_len = net_buf_linearize(ictx.mdm_imei, sizeof(ictx.mdm_imei) - 1, *buf, 0, len); ictx.mdm_imei[out_len] = 0; LOG_INF("IMEI: %s", ictx.mdm_imei); } /* Handler: %MEAS: RSSI:Reported= -68, Ant0= -63, Ant1= -251 */ static void on_cmd_atcmdinfo_rssi(struct net_buf **buf, uint16_t len) { int start = 0, i = 0; size_t value_size; char value[64]; value_size = sizeof(value); (void)memset(value, 0, value_size); while (*buf && len > 0 && i < value_size) { value[i] = net_buf_pull_u8(*buf); if (!(*buf)->len) { *buf = net_buf_frag_del(NULL, *buf); } /* 2nd "=" marks the beginning of the RSSI value */ if (start < 2) { if (value[i] == '=') { start++; } continue; } /* "," marks the end of the RSSI value */ if (value[i] == ',') { value[i] = '\0'; break; } i++; } if (i > 0) { ictx.mdm_rssi = atoi(value); LOG_INF("RSSI: %d", ictx.mdm_rssi); } else { LOG_WRN("Bad format found for RSSI"); } } /* Handler: OK */ static void on_cmd_sockok(struct net_buf **buf, uint16_t len) { struct wncm14a2a_socket *sock = NULL; ictx.last_error = 0; sock = socket_from_id(ictx.last_socket_id); if (!sock) { k_sem_give(&ictx.response_sem); } else { k_sem_give(&sock->sock_send_sem); } } /* Handler: ERROR */ static void on_cmd_sockerror(struct net_buf **buf, uint16_t len) { struct wncm14a2a_socket *sock = NULL; ictx.last_error = -EIO; sock = socket_from_id(ictx.last_socket_id); if (!sock) { k_sem_give(&ictx.response_sem); } else { k_sem_give(&sock->sock_send_sem); } } /* Handler: @EXTERR:<exterror_id> */ static void on_cmd_sockexterror(struct net_buf **buf, uint16_t len) { char value[8]; size_t out_len; struct wncm14a2a_socket *sock = NULL; out_len = net_buf_linearize(value, sizeof(value) - 1, *buf, 0, len); value[out_len] = 0; ictx.last_error = -atoi(value); LOG_ERR("@EXTERR:%d", ictx.last_error); sock = socket_from_id(ictx.last_socket_id); if (!sock) { k_sem_give(&ictx.response_sem); } else { k_sem_give(&sock->sock_send_sem); } } /* Handler: @SOCKDIAL:<status> */ static void on_cmd_sockdial(struct net_buf **buf, uint16_t len) { char value[8]; size_t out_len; out_len = net_buf_linearize(value, sizeof(value) - 1, *buf, 0, len); value[out_len] = 0; ictx.last_error = atoi(value); k_sem_give(&ictx.response_sem); } /* Handler: @SOCKCREAT:<socket_id> */ static void on_cmd_sockcreat(struct net_buf **buf, uint16_t len) { char value[2]; struct wncm14a2a_socket *sock = NULL; /* look up new socket by special id */ sock = socket_from_id(MDM_MAX_SOCKETS + 1); if (sock) { /* make sure only a single digit is picked up for socket_id */ value[0] = net_buf_pull_u8(*buf); sock->socket_id = atoi(value); } /* don't give back semaphore -- OK to follow */ } /* Handler: @SOCKWRITE:<actual_length> */ static void on_cmd_sockwrite(struct net_buf **buf, uint16_t len) { char value[8]; size_t out_len; int write_len; struct wncm14a2a_socket *sock = NULL; /* TODO: check against what we wanted to send */ out_len = net_buf_linearize(value, sizeof(value) - 1, *buf, 0, len); value[out_len] = 0; write_len = atoi(value); if (write_len <= 0) { return; } sock = socket_from_id(ictx.last_socket_id); if (sock) { k_sem_give(&sock->sock_send_sem); } } static void sockreadrecv_cb_work(struct k_work *work) { struct wncm14a2a_socket *sock = NULL; struct net_pkt *pkt; sock = CONTAINER_OF(work, struct wncm14a2a_socket, recv_cb_work); /* return data */ pkt = sock->recv_pkt; sock->recv_pkt = NULL; if (sock->recv_cb) { sock->recv_cb(sock->context, pkt, NULL, NULL, 0, sock->recv_user_data); } else { net_pkt_unref(pkt); } } /* Handler: @SOCKREAD:<actual_length>,"<hex encoded binary>" */ static void on_cmd_sockread(struct net_buf **buf, uint16_t len) { struct wncm14a2a_socket *sock = NULL; uint8_t c = 0U; int i, actual_length, hdr_len = 0; size_t value_size; char value[10]; /* first comma marks the end of actual_length */ i = 0; value_size = sizeof(value); (void)memset(value, 0, value_size); while (*buf && i < value_size - 1) { value[i++] = net_buf_pull_u8(*buf); len--; if (!(*buf)->len) { *buf = net_buf_frag_del(NULL, *buf); } if (value[i-1] == ',') { i--; break; } } /* make sure we still have buf data, the last pulled character was * a comma and that the next char in the buffer is a quote. */ if (!*buf || value[i] != ',' || *(*buf)->data != '\"') { LOG_ERR("Incorrect format! Ignoring data!"); return; } /* clear the comma */ value[i] = '\0'; actual_length = atoi(value); /* skip quote */ len--; net_buf_pull_u8(*buf); if (!(*buf)->len) { *buf = net_buf_frag_del(NULL, *buf); } /* check that we have enough data */ if (!*buf || len > (actual_length * 2) + 1) { LOG_ERR("Incorrect format! Ignoring data!"); return; } sock = socket_from_id(ictx.last_socket_id); if (!sock) { LOG_ERR("Socket not found! (%d)", ictx.last_socket_id); return; } /* allocate an RX pkt */ sock->recv_pkt = net_pkt_rx_alloc_with_buffer( net_context_get_iface(sock->context), actual_length, sock->family, sock->ip_proto, BUF_ALLOC_TIMEOUT); if (!sock->recv_pkt) { LOG_ERR("Failed net_pkt_get_reserve_rx!"); return; } /* set pkt data */ net_pkt_set_context(sock->recv_pkt, sock->context); /* add IP / protocol headers */ hdr_len = pkt_setup_ip_data(sock->recv_pkt, sock); /* move hex encoded data from the buffer to the recv_pkt */ for (i = 0; i < actual_length * 2; i++) { char c2 = *(*buf)->data; if (isdigit((int)c2) != 0) { c += c2 - '0'; } else if (isalpha((int)c2) != 0) { c += c2 - (isupper((int)c2) != 0 ? 'A' - 10 : 'a' - 10); } else { /* TODO: unexpected input! skip? */ } if (i % 2) { if (net_pkt_write_u8(sock->recv_pkt, c)) { LOG_ERR("Unable to add data! Aborting!"); net_pkt_unref(sock->recv_pkt); sock->recv_pkt = NULL; return; } c = 0U; } else { c = c << 4; } /* pull data from buf and advance to the next frag if needed */ net_buf_pull_u8(*buf); if (!(*buf)->len) { *buf = net_buf_frag_del(NULL, *buf); } } net_pkt_cursor_init(sock->recv_pkt); net_pkt_set_overwrite(sock->recv_pkt, true); if (hdr_len > 0) { net_pkt_skip(sock->recv_pkt, hdr_len); } /* Let's do the callback processing in a different work queue in * case the app takes a long time. */ k_work_submit_to_queue(&wncm14a2a_workq, &sock->recv_cb_work); } /* Handler: @SOCKDATAIND: <socket_id>,<session_status>,<left_bytes> */ static void on_cmd_sockdataind(struct net_buf **buf, uint16_t len) { int socket_id, left_bytes; size_t out_len; char *delim1, *delim2; char value[sizeof("#,#,#####\r")]; char sendbuf[sizeof("AT@SOCKREAD=-#####,-#####\r")]; struct wncm14a2a_socket *sock = NULL; out_len = net_buf_linearize(value, sizeof(value) - 1, *buf, 0, len); value[out_len] = 0; /* First comma separator marks the end of socket_id */ delim1 = strchr(value, ','); if (!delim1) { LOG_ERR("Missing 1st comma"); return; } *delim1++ = '\0'; socket_id = atoi(value); /* Second comma separator marks the end of session_status */ /* TODO: ignore for now, but maybe this is useful? */ delim2 = strchr(delim1, ','); if (!delim2) { LOG_ERR("Missing 2nd comma"); return; } *delim2++ = '\0'; /* Third param is for left_bytes */ /* TODO: ignore for now because we ask for max data len * but maybe this is useful in the future? */ left_bytes = atoi(delim2); sock = socket_from_id(socket_id); if (!sock) { LOG_ERR("Unable to find socket_id:%d", socket_id); return; } if (left_bytes > 0) { LOG_DBG("socket_id:%d left_bytes:%d", socket_id, left_bytes); snprintk(sendbuf, sizeof(sendbuf), "AT@SOCKREAD=%d,%d", sock->socket_id, left_bytes); /* We entered this trigger due to an unsolicited modem response. * When we send the AT@SOCKREAD command it won't generate an * "OK" response directly. The modem will respond with * "@SOCKREAD ..." and the data requested and then "OK" or * "ERROR". Let's not wait here by passing in a timeout to * send_at_cmd(). Instead, when the resulting response is * received, we trigger on_cmd_sockread() to handle it. */ send_at_cmd(sock, sendbuf, 0); } } static void on_cmd_socknotifyev(struct net_buf **buf, uint16_t len) { char value[40]; size_t out_len; int p1 = 0, p2 = 0; out_len = net_buf_linearize(value, sizeof(value) - 1, *buf, 0, len); value[out_len] = 0; /* walk value till 1st quote */ while (p1 < len && value[p1] != '\"') { p1++; } if (value[p1] != '\"') { /* 1st quote not found */ return; } p1++; p2 = p1; while (p2 < len && value[p2] != '\"') { p2++; } if (value[p2] != '\"') { /* 2nd quote not found */ return; } /* clear quote */ value[p2] = '\0'; p2++; /* skip comma if present */ if (value[p2] == ',') { p2++; } /* CSPS: 0: Moved to PS mode, 1: Moved to CS/PS mode */ if (!strncmp(&value[p1], "CSPS", 4)) { ictx.ev_csps = atoi(&value[p2]); /* This also signifies that RRCSTATE = 1 */ ictx.ev_rrcstate = 1; LOG_DBG("CSPS:%d", ictx.ev_csps); /* RRCSTATE: 0: RRC Idle, 1: RRC Connected, 2: RRC Unknown */ } else if (!strncmp(&value[p1], "RRCSTATE", 8)) { ictx.ev_rrcstate = atoi(&value[p2]); LOG_DBG("RRCSTATE:%d", ictx.ev_rrcstate); } else if (!strncmp(&value[p1], "LTIME", 5)) { /* local time from network */ LOG_INF("LTIME:%s", &value[p2]); } else if (!strncmp(&value[p1], "SIB1", 4)) { /* do nothing? */ LOG_DBG("SIB1"); } else { LOG_DBG("UNHANDLED: [%s:%s]", &value[p1], &value[p2]); } } static int net_buf_ncmp(struct net_buf *buf, const uint8_t *s2, size_t n) { struct net_buf *frag = buf; uint16_t offset = 0U; while ((n > 0) && (*(frag->data + offset) == *s2) && (*s2 != '\0')) { if (offset == frag->len) { if (!frag->frags) { break; } frag = frag->frags; offset = 0U; } else { offset++; } s2++; n--; } return (n == 0) ? 0 : (*(frag->data + offset) - *s2); } static inline struct net_buf *read_rx_allocator(k_timeout_t timeout, void *user_data) { return net_buf_alloc((struct net_buf_pool *)user_data, timeout); } static void wncm14a2a_read_rx(struct net_buf **buf) { uint8_t uart_buffer[MDM_RECV_BUF_SIZE]; size_t bytes_read = 0; int ret; uint16_t rx_len; /* read all of the data from mdm_receiver */ while (true) { ret = mdm_receiver_recv(&ictx.mdm_ctx, uart_buffer, sizeof(uart_buffer), &bytes_read); if (ret < 0 || bytes_read == 0) { /* mdm_receiver buffer is empty */ break; } hexdump(uart_buffer, bytes_read); /* make sure we have storage */ if (!*buf) { *buf = net_buf_alloc(&mdm_recv_pool, BUF_ALLOC_TIMEOUT); if (!*buf) { LOG_ERR("Can't allocate RX data! " "Skipping data!"); break; } } rx_len = net_buf_append_bytes(*buf, bytes_read, uart_buffer, BUF_ALLOC_TIMEOUT, read_rx_allocator, &mdm_recv_pool); if (rx_len < bytes_read) { LOG_ERR("Data was lost! read %u of %zu!", rx_len, bytes_read); } } } /* RX thread */ static void wncm14a2a_rx(void *p1, void *p2, void *p3) { ARG_UNUSED(p1); ARG_UNUSED(p2); ARG_UNUSED(p3); struct net_buf *rx_buf = NULL; struct net_buf *frag = NULL; int i; uint16_t offset, len; static const struct cmd_handler handlers[] = { /* NON-SOCKET COMMAND ECHOES to clear last_socket_id */ CMD_HANDLER("ATE1", atcmdecho_nosock), CMD_HANDLER("AT%PDNSET=", atcmdecho_nosock), CMD_HANDLER("ATI", atcmdecho_nosock), CMD_HANDLER("AT+CGSN", atcmdecho_nosock_imei), CMD_HANDLER("AT%MEAS=", atcmdecho_nosock), CMD_HANDLER("AT@INTERNET=", atcmdecho_nosock), CMD_HANDLER("AT@SOCKDIAL=", atcmdecho_nosock), CMD_HANDLER("AT@SOCKCREAT=", atcmdecho_nosock), /* SOCKET COMMAND ECHOES for last_socket_id processing */ CMD_HANDLER("AT@SOCKCONN=", atcmdecho), CMD_HANDLER("AT@SOCKWRITE=", atcmdecho), CMD_HANDLER("AT@SOCKREAD=", atcmdecho), CMD_HANDLER("AT@SOCKCLOSE=", atcmdecho), /* MODEM Information */ CMD_HANDLER("Manufacturer: ", atcmdinfo_manufacturer), CMD_HANDLER("Model: ", atcmdinfo_model), CMD_HANDLER("Revision: ", atcmdinfo_revision), CMD_HANDLER("%MEAS: RSSI:", atcmdinfo_rssi), /* SOLICITED SOCKET RESPONSES */ CMD_HANDLER("OK", sockok), CMD_HANDLER("ERROR", sockerror), CMD_HANDLER("@EXTERR:", sockexterror), CMD_HANDLER("@SOCKDIAL:", sockdial), CMD_HANDLER("@SOCKCREAT:", sockcreat), CMD_HANDLER("@OCKCREAT:", sockcreat), /* seeing this a lot */ CMD_HANDLER("@SOCKWRITE:", sockwrite), CMD_HANDLER("@SOCKREAD:", sockread), /* UNSOLICITED SOCKET RESPONSES */ CMD_HANDLER("@SOCKDATAIND:", sockdataind), CMD_HANDLER("%NOTIFYEV:", socknotifyev), }; while (true) { /* wait for incoming data */ (void)k_sem_take(&ictx.mdm_ctx.rx_sem, K_FOREVER); wncm14a2a_read_rx(&rx_buf); while (rx_buf) { net_buf_skipcrlf(&rx_buf); if (!rx_buf) { break; } frag = NULL; len = net_buf_findcrlf(rx_buf, &frag, &offset); if (!frag) { break; } /* look for matching data handlers */ i = -1; for (i = 0; i < ARRAY_SIZE(handlers); i++) { if (net_buf_ncmp(rx_buf, handlers[i].cmd, handlers[i].cmd_len) == 0) { /* found a matching handler */ LOG_DBG("MATCH %s (len:%u)", handlers[i].cmd, len); /* skip cmd_len */ rx_buf = net_buf_skip(rx_buf, handlers[i].cmd_len); /* locate next cr/lf */ frag = NULL; len = net_buf_findcrlf(rx_buf, &frag, &offset); if (!frag) { break; } /* call handler */ if (handlers[i].func) { handlers[i].func(&rx_buf, len); } frag = NULL; /* make sure buf still has data */ if (!rx_buf) { break; } /* * We've handled the current line * and need to exit the "search for * handler loop". Let's skip any * "extra" data and look for the next * CR/LF, leaving us ready for the * next handler search. Ignore the * length returned. */ (void)net_buf_findcrlf(rx_buf, &frag, &offset); break; } } if (frag && rx_buf) { /* clear out processed line (buffers) */ while (frag && rx_buf != frag) { rx_buf = net_buf_frag_del(NULL, rx_buf); } net_buf_pull(rx_buf, offset); } } /* give up time if we have a solid stream of data */ k_yield(); } } static int modem_pin_init(void) { LOG_INF("Setting Modem Pins"); /* Hard reset the modem (>5 seconds required) * (doesn't go through the signal level translator) */ LOG_DBG("MDM_RESET_PIN -> ASSERTED"); gpio_pin_set_dt(&wncm14a2a_cfg.gpio[MDM_RESET], 1); k_sleep(K_SECONDS(7)); LOG_DBG("MDM_RESET_PIN -> NOT_ASSERTED"); gpio_pin_set_dt(&wncm14a2a_cfg.gpio[MDM_RESET], 0); /* disable signal level translator (necessary * for the modem to boot properly). All signals * except mdm_reset go through the level translator * and have internal pull-up/down in the module. While * the level translator is disabled, these pins will * be in the correct state. */ LOG_DBG("SIG_TRANS_ENA_PIN -> DISABLED"); gpio_pin_set_dt(&wncm14a2a_cfg.gpio[SHLD_3V3_1V8_SIG_TRANS_ENA], 0); /* While the level translator is disabled and output pins * are tristated, make sure the inputs are in the same state * as the WNC Module pins so that when the level translator is * enabled, there are no differences. */ LOG_DBG("MDM_BOOT_MODE_SEL_PIN -> NORMAL"); gpio_pin_set_dt(&wncm14a2a_cfg.gpio[MDM_BOOT_MODE_SEL], MDM_BOOT_MODE_NORMAL); LOG_DBG("MDM_POWER_PIN -> ENABLE"); gpio_pin_set_dt(&wncm14a2a_cfg.gpio[MDM_POWER], 1); LOG_DBG("MDM_KEEP_AWAKE_PIN -> ENABLED"); gpio_pin_set_dt(&wncm14a2a_cfg.gpio[MDM_KEEP_AWAKE], 1); #if DT_INST_NODE_HAS_PROP(0, mdm_send_ok_gpios) LOG_DBG("MDM_SEND_OK_PIN -> ENABLED"); gpio_pin_set_dt(&wncm14a2a_cfg.gpio[MDM_SEND_OK], 1); #endif /* wait for the WNC Module to perform its initial boot correctly */ k_sleep(K_SECONDS(1)); /* Enable the level translator. * The input pins should now be the same as how the M14A module is * driving them with internal pull ups/downs. * When enabled, there will be no changes in the above 4 pins... */ LOG_DBG("SIG_TRANS_ENA_PIN -> ENABLED"); gpio_pin_set_dt(&wncm14a2a_cfg.gpio[SHLD_3V3_1V8_SIG_TRANS_ENA], 1); LOG_INF("... Done!"); return 0; } static void modem_wakeup_pin_fix(void) { /* AT&T recommend toggling the KEEP_AWAKE signal to reduce missed * UART characters. */ LOG_DBG("Toggling MDM_KEEP_AWAKE_PIN to avoid missed characters"); k_sleep(K_MSEC(20)); LOG_DBG("MDM_KEEP_AWAKE_PIN -> DISABLED"); gpio_pin_set_dt(&wncm14a2a_cfg.gpio[MDM_KEEP_AWAKE], 0); k_sleep(K_SECONDS(2)); LOG_DBG("MDM_KEEP_AWAKE_PIN -> ENABLED"); gpio_pin_set_dt(&wncm14a2a_cfg.gpio[MDM_KEEP_AWAKE], 1); k_sleep(K_MSEC(20)); } static void wncm14a2a_rssi_query_work(struct k_work *work) { int ret; /* query modem RSSI */ ret = send_at_cmd(NULL, "AT%MEAS=\"23\"", MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("AT%%MEAS ret:%d", ret); } /* re-start RSSI query work */ k_work_reschedule_for_queue(&wncm14a2a_workq, &ictx.rssi_query_work, K_SECONDS(RSSI_TIMEOUT_SECS)); } static void wncm14a2a_modem_reset(void) { int ret = 0, retry_count = 0, counter = 0; /* bring down network interface */ net_if_carrier_off(ictx.iface); restart: /* stop RSSI delay work */ k_work_cancel_delayable(&ictx.rssi_query_work); modem_pin_init(); LOG_INF("Waiting for modem to respond"); /* Give the modem a while to start responding to simple 'AT' commands. * Also wait for CSPS=1 or RRCSTATE=1 notification */ ret = -1; while (counter++ < 50 && ret < 0) { k_sleep(K_SECONDS(2)); ret = send_at_cmd(NULL, "AT", MDM_CMD_TIMEOUT); if (ret < 0 && ret != -ETIMEDOUT) { break; } } if (ret < 0) { LOG_ERR("MODEM WAIT LOOP ERROR: %d", ret); goto error; } LOG_INF("Setting modem to always stay awake"); modem_wakeup_pin_fix(); ret = send_at_cmd(NULL, "ATE1", MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("ATE1 ret:%d", ret); goto error; } ret = send_at_cmd(NULL, "AT%PDNSET=1,\"" CONFIG_MODEM_WNCM14A2A_APN_NAME "\",\"IPV4V6\"", MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("AT%%PDNSET ret:%d", ret); goto error; } /* query modem info */ LOG_INF("Querying modem information"); ret = send_at_cmd(NULL, "ATI", MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("ATI ret:%d", ret); goto error; } /* query modem IMEI */ ret = send_at_cmd(NULL, "AT+CGSN", MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("AT+CGSN ret:%d", ret); goto error; } LOG_INF("Waiting for network"); /* query modem RSSI */ wncm14a2a_rssi_query_work(NULL); k_sleep(K_SECONDS(2)); counter = 0; /* wait for RSSI > -1000 and != 0 */ while (counter++ < 15 && (ictx.mdm_rssi <= -1000 || ictx.mdm_rssi == 0)) { /* stop RSSI delay work */ k_work_cancel_delayable(&ictx.rssi_query_work); wncm14a2a_rssi_query_work(NULL); k_sleep(K_SECONDS(2)); } if (ictx.mdm_rssi <= -1000 || ictx.mdm_rssi == 0) { retry_count++; if (retry_count > 3) { LOG_ERR("Failed network init. Too many attempts!"); goto error; } LOG_ERR("Failed network init. Restarting process."); goto restart; } LOG_INF("Network is ready."); ret = send_at_cmd(NULL, "AT@INTERNET=1", MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("AT@INTERNET ret:%d", ret); goto error; } ret = send_at_cmd(NULL, "AT@SOCKDIAL=1", MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("SOCKDIAL=1 CHECK ret:%d", ret); /* don't report this as an error, we retry later */ } /* Set iface up */ net_if_carrier_on(ictx.iface); error: return; } static int wncm14a2a_init(const struct device *dev) { int i, ret = 0; ARG_UNUSED(dev); (void)memset(&ictx, 0, sizeof(ictx)); for (i = 0; i < MDM_MAX_SOCKETS; i++) { k_work_init(&ictx.sockets[i].recv_cb_work, sockreadrecv_cb_work); k_sem_init(&ictx.sockets[i].sock_send_sem, 0, 1); } k_sem_init(&ictx.response_sem, 0, 1); /* initialize the work queue */ k_work_queue_start(&wncm14a2a_workq, wncm14a2a_workq_stack, K_KERNEL_STACK_SIZEOF(wncm14a2a_workq_stack), K_PRIO_COOP(7), NULL); ictx.last_socket_id = 0; /* setup port devices and pin directions */ for (i = 0; i < MAX_MDM_CONTROL_PINS; i++) { if (!gpio_is_ready_dt(&wncm14a2a_cfg.gpio[i])) { LOG_ERR("gpio port (%s) not ready!", wncm14a2a_cfg.gpio[i].port->name); return -ENODEV; } gpio_pin_configure_dt(&wncm14a2a_cfg.gpio[i], GPIO_OUTPUT); } /* Set modem data storage */ ictx.mdm_ctx.data_manufacturer = ictx.mdm_manufacturer; ictx.mdm_ctx.data_model = ictx.mdm_model; ictx.mdm_ctx.data_revision = ictx.mdm_revision; #ifdef CONFIG_MODEM_SIM_NUMBERS ictx.mdm_ctx.data_imei = ictx.mdm_imei; #endif ictx.mdm_ctx.data_rssi = &ictx.mdm_rssi; ret = mdm_receiver_register(&ictx.mdm_ctx, MDM_UART_DEV, mdm_recv_buf, sizeof(mdm_recv_buf)); if (ret < 0) { LOG_ERR("Error registering modem receiver (%d)!", ret); goto error; } /* start RX thread */ k_thread_create(&wncm14a2a_rx_thread, wncm14a2a_rx_stack, K_KERNEL_STACK_SIZEOF(wncm14a2a_rx_stack), wncm14a2a_rx, NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT); /* init RSSI query */ k_work_init_delayable(&ictx.rssi_query_work, wncm14a2a_rssi_query_work); wncm14a2a_modem_reset(); error: return ret; } /*** OFFLOAD FUNCTIONS ***/ static int offload_get(sa_family_t family, enum net_sock_type type, enum net_ip_protocol ip_proto, struct net_context **context) { int ret; char buf[sizeof("AT@SOCKCREAT=###,#\r")]; struct wncm14a2a_socket *sock = NULL; /* new socket */ sock = socket_get(); if (!sock) { return -ENOMEM; } (*context)->offload_context = sock; sock->family = family; sock->type = type; sock->ip_proto = ip_proto; sock->context = *context; sock->socket_id = MDM_MAX_SOCKETS + 1; /* socket # needs assigning */ snprintk(buf, sizeof(buf), "AT@SOCKCREAT=%d,%d", type, family == AF_INET ? 0 : 1); ret = send_at_cmd(NULL, buf, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("AT@SOCKCREAT ret:%d", ret); socket_put(sock); } return ret; } static int offload_bind(struct net_context *context, const struct sockaddr *addr, socklen_t addrlen) { struct wncm14a2a_socket *sock = NULL; if (!context) { return -EINVAL; } sock = (struct wncm14a2a_socket *)context->offload_context; if (!sock) { LOG_ERR("Can't locate socket for net_ctx:%p!", context); return -EINVAL; } /* save bind address information */ sock->src.sa_family = addr->sa_family; #if defined(CONFIG_NET_IPV6) if (addr->sa_family == AF_INET6) { net_ipaddr_copy(&net_sin6(&sock->src)->sin6_addr, &net_sin6(addr)->sin6_addr); net_sin6(&sock->src)->sin6_port = net_sin6(addr)->sin6_port; } else #endif #if defined(CONFIG_NET_IPV4) if (addr->sa_family == AF_INET) { net_ipaddr_copy(&net_sin(&sock->src)->sin_addr, &net_sin(addr)->sin_addr); net_sin(&sock->src)->sin_port = net_sin(addr)->sin_port; } else #endif { return -EPFNOSUPPORT; } return 0; } static int offload_listen(struct net_context *context, int backlog) { /* NOT IMPLEMENTED */ return -ENOTSUP; } static int offload_connect(struct net_context *context, const struct sockaddr *addr, socklen_t addrlen, net_context_connect_cb_t cb, int32_t timeout, void *user_data) { int ret, dst_port = -1; int32_t timeout_sec = -1; /* if not changed, this will be min timeout */ char buf[sizeof("AT@SOCKCONN=#,###.###.###.###,#####,#####\r")]; struct wncm14a2a_socket *sock; if (timeout > 0) { timeout_sec = timeout / MSEC_PER_SEC; } if (!context || !addr) { return -EINVAL; } sock = (struct wncm14a2a_socket *)context->offload_context; if (!sock) { LOG_ERR("Can't locate socket for net_ctx:%p!", context); return -EINVAL; } if (sock->socket_id < 1) { LOG_ERR("Invalid socket_id(%d) for net_ctx:%p!", sock->socket_id, context); return -EINVAL; } sock->dst.sa_family = addr->sa_family; #if defined(CONFIG_NET_IPV6) if (addr->sa_family == AF_INET6) { net_ipaddr_copy(&net_sin6(&sock->dst)->sin6_addr, &net_sin6(addr)->sin6_addr); dst_port = ntohs(net_sin6(addr)->sin6_port); net_sin6(&sock->dst)->sin6_port = dst_port; } else #endif #if defined(CONFIG_NET_IPV4) if (addr->sa_family == AF_INET) { net_ipaddr_copy(&net_sin(&sock->dst)->sin_addr, &net_sin(addr)->sin_addr); dst_port = ntohs(net_sin(addr)->sin_port); net_sin(&sock->dst)->sin_port = dst_port; } else #endif { return -EINVAL; } if (dst_port < 0) { LOG_ERR("Invalid port: %d", dst_port); return -EINVAL; } /* * AT@SOCKCONN timeout param has minimum value of 30 seconds and * maximum value of 360 seconds, otherwise an error is generated */ timeout_sec = CLAMP(timeout_sec, 30, 360); snprintk(buf, sizeof(buf), "AT@SOCKCONN=%d,\"%s\",%d,%d", sock->socket_id, wncm14a2a_sprint_ip_addr(addr), dst_port, timeout_sec); ret = send_at_cmd(sock, buf, MDM_CMD_CONN_TIMEOUT); if (!ret) { net_context_set_state(sock->context, NET_CONTEXT_CONNECTED); } else { LOG_ERR("AT@SOCKCONN ret:%d", ret); } if (cb) { cb(context, ret, user_data); } return ret; } static int offload_accept(struct net_context *context, net_tcp_accept_cb_t cb, int32_t timeout, void *user_data) { /* NOT IMPLEMENTED */ return -ENOTSUP; } static int offload_sendto(struct net_pkt *pkt, const struct sockaddr *dst_addr, socklen_t addrlen, net_context_send_cb_t cb, int32_t timeout, void *user_data) { struct net_context *context = net_pkt_context(pkt); struct wncm14a2a_socket *sock; int ret = 0; if (!context) { return -EINVAL; } sock = (struct wncm14a2a_socket *)context->offload_context; if (!sock) { LOG_ERR("Can't locate socket for net_ctx:%p!", context); return -EINVAL; } ret = send_data(sock, pkt); if (ret < 0) { LOG_ERR("send_data error: %d", ret); } else { net_pkt_unref(pkt); } if (cb) { cb(context, ret, user_data); } return ret; } static int offload_send(struct net_pkt *pkt, net_context_send_cb_t cb, int32_t timeout, void *user_data) { struct net_context *context = net_pkt_context(pkt); socklen_t addrlen; #if defined(CONFIG_NET_IPV6) if (net_pkt_family(pkt) == AF_INET6) { addrlen = sizeof(struct sockaddr_in6); } else #endif /* CONFIG_NET_IPV6 */ #if defined(CONFIG_NET_IPV4) if (net_pkt_family(pkt) == AF_INET) { addrlen = sizeof(struct sockaddr_in); } else #endif /* CONFIG_NET_IPV4 */ { return -EPFNOSUPPORT; } return offload_sendto(pkt, &context->remote, addrlen, cb, timeout, user_data); } static int offload_recv(struct net_context *context, net_context_recv_cb_t cb, int32_t timeout, void *user_data) { struct wncm14a2a_socket *sock; if (!context) { return -EINVAL; } sock = (struct wncm14a2a_socket *)context->offload_context; if (!sock) { LOG_ERR("Can't locate socket for net_ctx:%p!", context); return -EINVAL; } sock->recv_cb = cb; sock->recv_user_data = user_data; return 0; } static int offload_put(struct net_context *context) { struct wncm14a2a_socket *sock; char buf[sizeof("AT@SOCKCLOSE=#\r")]; int ret; if (!context) { return -EINVAL; } sock = (struct wncm14a2a_socket *)context->offload_context; if (!sock) { /* socket was already closed? Exit quietly here. */ return 0; } snprintk(buf, sizeof(buf), "AT@SOCKCLOSE=%d", sock->socket_id); ret = send_at_cmd(sock, buf, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("AT@SOCKCLOSE ret:%d", ret); } /* clear last_socket_id */ ictx.last_socket_id = 0; socket_put(sock); net_context_unref(context); if (sock->type == SOCK_STREAM) { /* TCP contexts are referenced twice, * once for the app and once for the stack. * Since TCP stack is not used for offload, * unref a second time. */ net_context_unref(context); } return 0; } static struct net_offload offload_funcs = { .get = offload_get, .bind = offload_bind, .listen = offload_listen, /* TODO */ .connect = offload_connect, .accept = offload_accept, /* TODO */ .send = offload_send, .sendto = offload_sendto, .recv = offload_recv, .put = offload_put, }; static inline uint8_t *wncm14a2a_get_mac(const struct device *dev) { struct wncm14a2a_iface_ctx *ctx = dev->data; ctx->mac_addr[0] = 0x00; ctx->mac_addr[1] = 0x10; sys_rand_get(&ctx->mac_addr[2], 4U); return ctx->mac_addr; } static void offload_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct wncm14a2a_iface_ctx *ctx = dev->data; iface->if_dev->offload = &offload_funcs; net_if_set_link_addr(iface, wncm14a2a_get_mac(dev), sizeof(ctx->mac_addr), NET_LINK_ETHERNET); ctx->iface = iface; } static struct offloaded_if_api api_funcs = { .iface_api.init = offload_iface_init, }; NET_DEVICE_DT_INST_OFFLOAD_DEFINE(0, wncm14a2a_init, NULL, &ictx, &wncm14a2a_cfg, CONFIG_MODEM_WNCM14A2A_INIT_PRIORITY, &api_funcs, MDM_MAX_DATA_LENGTH); ```
/content/code_sandbox/drivers/modem/wncm14a2a.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
13,168
```c /** @file * @brief Modem command handler for modem context driver * * Text-based command handler implementation for modem context driver. */ /* * */ #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(modem_cmd_handler, CONFIG_MODEM_LOG_LEVEL); #include <zephyr/kernel.h> #include <stddef.h> #include <zephyr/net/buf.h> #include "modem_context.h" #include "modem_cmd_handler.h" /* * Parsing Functions */ static bool is_crlf(uint8_t c) { if (c == '\n' || c == '\r') { return true; } else { return false; } } static void skipcrlf(struct modem_cmd_handler_data *data) { while (data->rx_buf && data->rx_buf->len && is_crlf(*data->rx_buf->data)) { net_buf_pull_u8(data->rx_buf); if (!data->rx_buf->len) { data->rx_buf = net_buf_frag_del(NULL, data->rx_buf); } } } static uint16_t findcrlf(struct modem_cmd_handler_data *data, struct net_buf **frag, uint16_t *offset) { struct net_buf *buf = data->rx_buf; uint16_t len = 0U, pos = 0U; while (buf && buf->len && !is_crlf(*(buf->data + pos))) { if (pos + 1 >= buf->len) { len += buf->len; buf = buf->frags; pos = 0U; } else { pos++; } } if (buf && buf->len && is_crlf(*(buf->data + pos))) { len += pos; *offset = pos; *frag = buf; return len; } return 0; } static bool starts_with(struct net_buf *buf, const char *str) { int pos = 0; while (buf && buf->len && *str) { if (*(buf->data + pos) == *str) { str++; pos++; if (pos >= buf->len) { buf = buf->frags; pos = 0; } } else { return false; } } if (*str == 0) { return true; } return false; } /* * Cmd Handler Functions */ static inline struct net_buf *read_rx_allocator(k_timeout_t timeout, void *user_data) { return net_buf_alloc((struct net_buf_pool *)user_data, timeout); } /* return scanned length for params */ static int parse_params(struct modem_cmd_handler_data *data, size_t match_len, const struct modem_cmd *cmd, uint8_t **argv, size_t argv_len, uint16_t *argc) { int count = 0; size_t delim_len, begin, end, i; bool quoted = false; if (!data || !data->match_buf || !match_len || !cmd || !argv || !argc) { return -EINVAL; } begin = cmd->cmd_len; end = cmd->cmd_len; delim_len = strlen(cmd->delim); while (end < match_len) { /* Don't look for delimiters in the middle of a quoted parameter */ if (data->match_buf[end] == '"') { quoted = !quoted; } if (quoted) { end++; continue; } /* Look for delimiter characters */ for (i = 0; i < delim_len; i++) { if (data->match_buf[end] == cmd->delim[i]) { /* mark a parameter beginning */ argv[*argc] = &data->match_buf[begin]; /* end parameter with NUL char */ data->match_buf[end] = '\0'; /* bump begin */ begin = end + 1; count += 1; (*argc)++; break; } } if (count >= cmd->arg_count_max) { break; } if (*argc == argv_len) { break; } end++; } /* consider the ending portion a param if end > begin */ if (end > begin) { /* mark a parameter beginning */ argv[*argc] = &data->match_buf[begin]; /* end parameter with NUL char * NOTE: if this is at the end of match_len will probably * be overwriting a NUL that's already there */ data->match_buf[end] = '\0'; (*argc)++; } /* missing arguments */ if (*argc < cmd->arg_count_min) { /* Do not return -EAGAIN here as there is no way new argument * can be parsed later because match_len is computed to be * the minimum of the distance to the first CRLF and the size * of the buffer. * Therefore, waiting more data on the interface won't change * match_len value, which mean there is no point in waiting * for more arguments, this will just end in a infinite loop * parsing data and finding that some arguments are missing. */ return -EINVAL; } /* * return the beginning of the next unfinished param so we don't * "skip" any data that could be parsed later. */ return begin - cmd->cmd_len; } /* process a "matched" command */ static int process_cmd(const struct modem_cmd *cmd, size_t match_len, struct modem_cmd_handler_data *data) { int parsed_len = 0, ret = 0; uint8_t *argv[CONFIG_MODEM_CMD_HANDLER_MAX_PARAM_COUNT]; uint16_t argc = 0U; /* reset params */ memset(argv, 0, sizeof(argv[0]) * ARRAY_SIZE(argv)); /* do we need to parse arguments? */ if (cmd->arg_count_max > 0U) { /* returns < 0 on error and > 0 for parsed len */ parsed_len = parse_params(data, match_len, cmd, argv, ARRAY_SIZE(argv), &argc); if (parsed_len < 0) { return parsed_len; } } /* skip cmd_len + parsed len */ data->rx_buf = net_buf_skip(data->rx_buf, cmd->cmd_len + parsed_len); /* call handler */ if (cmd->func) { ret = cmd->func(data, match_len - cmd->cmd_len - parsed_len, argv, argc); if (ret == -EAGAIN) { /* wait for more data */ net_buf_push(data->rx_buf, cmd->cmd_len + parsed_len); } } return ret; } /* * check 3 arrays of commands for a match in match_buf: * - response handlers[0] * - unsolicited handlers[1] * - current assigned handlers[2] */ static const struct modem_cmd *find_cmd_match( struct modem_cmd_handler_data *data) { int j; size_t i; for (j = 0; j < ARRAY_SIZE(data->cmds); j++) { if (!data->cmds[j] || data->cmds_len[j] == 0U) { continue; } for (i = 0; i < data->cmds_len[j]; i++) { /* match on "empty" cmd */ if (strlen(data->cmds[j][i].cmd) == 0 || strncmp(data->match_buf, data->cmds[j][i].cmd, data->cmds[j][i].cmd_len) == 0) { return &data->cmds[j][i]; } } } return NULL; } static const struct modem_cmd *find_cmd_direct_match( struct modem_cmd_handler_data *data) { size_t j, i; for (j = 0; j < ARRAY_SIZE(data->cmds); j++) { if (!data->cmds[j] || data->cmds_len[j] == 0U) { continue; } for (i = 0; i < data->cmds_len[j]; i++) { /* match start of cmd */ if (data->cmds[j][i].direct && (data->cmds[j][i].cmd[0] == '\0' || starts_with(data->rx_buf, data->cmds[j][i].cmd))) { return &data->cmds[j][i]; } } } return NULL; } static int cmd_handler_process_iface_data(struct modem_cmd_handler_data *data, struct modem_iface *iface) { struct net_buf *last; size_t bytes_read = 0; int ret; if (!data->rx_buf) { data->rx_buf = net_buf_alloc(data->buf_pool, data->alloc_timeout); if (!data->rx_buf) { /* there is potentially more data waiting */ return -ENOMEM; } } last = net_buf_frag_last(data->rx_buf); /* read all of the data from modem iface */ while (true) { struct net_buf *frag = last; size_t frag_room = net_buf_tailroom(frag); if (!frag_room) { frag = net_buf_alloc(data->buf_pool, data->alloc_timeout); if (!frag) { /* there is potentially more data waiting */ return -ENOMEM; } net_buf_frag_insert(last, frag); last = frag; frag_room = net_buf_tailroom(frag); } ret = iface->read(iface, net_buf_tail(frag), frag_room, &bytes_read); if (ret < 0 || bytes_read == 0) { /* modem context buffer is empty */ return 0; } net_buf_add(frag, bytes_read); } } static void cmd_handler_process_rx_buf(struct modem_cmd_handler_data *data) { const struct modem_cmd *cmd; struct net_buf *frag = NULL; size_t match_len; int ret; uint16_t offset, len; /* process all of the data in the net_buf */ while (data->rx_buf && data->rx_buf->len) { skipcrlf(data); if (!data->rx_buf || !data->rx_buf->len) { break; } cmd = find_cmd_direct_match(data); if (cmd && cmd->func) { ret = cmd->func(data, cmd->cmd_len, NULL, 0); if (ret == -EAGAIN) { /* Wait for more data */ break; } else if (ret > 0) { LOG_DBG("match direct cmd [%s] (ret:%d)", cmd->cmd, ret); data->rx_buf = net_buf_skip(data->rx_buf, ret); } continue; } frag = NULL; /* locate next CR/LF */ len = findcrlf(data, &frag, &offset); if (!frag) { /* * No CR/LF found. Let's exit and leave any data * for next time */ break; } /* load match_buf with content up to the next CR/LF */ /* NOTE: keep room in match_buf for ending NUL char */ match_len = net_buf_linearize(data->match_buf, data->match_buf_len - 1, data->rx_buf, 0, len); if ((data->match_buf_len - 1) < match_len) { LOG_ERR("Match buffer size (%zu) is too small for " "incoming command size: %zu! Truncating!", data->match_buf_len - 1, match_len); } #if defined(CONFIG_MODEM_CONTEXT_VERBOSE_DEBUG) LOG_HEXDUMP_DBG(data->match_buf, match_len, "RECV"); #endif k_sem_take(&data->sem_parse_lock, K_FOREVER); cmd = find_cmd_match(data); if (cmd) { LOG_DBG("match cmd [%s] (len:%zu)", cmd->cmd, match_len); ret = process_cmd(cmd, match_len, data); if (ret == -EAGAIN) { k_sem_give(&data->sem_parse_lock); break; } else if (ret < 0) { LOG_ERR("process cmd [%s] (len:%zu, ret:%d)", cmd->cmd, match_len, ret); } /* * make sure we didn't run out of data during * command processing */ if (!data->rx_buf) { /* we're out of data, exit early */ k_sem_give(&data->sem_parse_lock); break; } frag = NULL; /* * We've handled the current line. * Let's skip any "extra" data in that * line, and look for the next CR/LF. * This leaves us ready for the next * handler search. * Ignore the length returned. */ (void)findcrlf(data, &frag, &offset); } k_sem_give(&data->sem_parse_lock); if (frag && data->rx_buf) { /* clear out processed line (net_buf's) */ while (frag && data->rx_buf != frag) { data->rx_buf = net_buf_frag_del(NULL, data->rx_buf); } net_buf_pull(data->rx_buf, offset); } } } static void cmd_handler_process(struct modem_cmd_handler *cmd_handler, struct modem_iface *iface) { struct modem_cmd_handler_data *data; int err; if (!cmd_handler || !cmd_handler->cmd_handler_data || !iface || !iface->read) { return; } data = (struct modem_cmd_handler_data *)(cmd_handler->cmd_handler_data); do { err = cmd_handler_process_iface_data(data, iface); cmd_handler_process_rx_buf(data); } while (err); } int modem_cmd_handler_get_error(struct modem_cmd_handler_data *data) { if (!data) { return -EINVAL; } return data->last_error; } int modem_cmd_handler_set_error(struct modem_cmd_handler_data *data, int error_code) { if (!data) { return -EINVAL; } data->last_error = error_code; return 0; } int modem_cmd_handler_update_cmds(struct modem_cmd_handler_data *data, const struct modem_cmd *handler_cmds, size_t handler_cmds_len, bool reset_error_flag) { if (!data) { return -EINVAL; } data->cmds[CMD_HANDLER] = handler_cmds; data->cmds_len[CMD_HANDLER] = handler_cmds_len; if (reset_error_flag) { data->last_error = 0; } return 0; } int modem_cmd_send_ext(struct modem_iface *iface, struct modem_cmd_handler *handler, const struct modem_cmd *handler_cmds, size_t handler_cmds_len, const uint8_t *buf, struct k_sem *sem, k_timeout_t timeout, int flags) { struct modem_cmd_handler_data *data; int ret = 0; if (!iface || !handler || !handler->cmd_handler_data || !buf) { return -EINVAL; } if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { /* semaphore is not needed if there is no timeout */ sem = NULL; } else if (!sem) { /* cannot respect timeout without semaphore */ return -EINVAL; } data = (struct modem_cmd_handler_data *)(handler->cmd_handler_data); if (!(flags & MODEM_NO_TX_LOCK)) { k_sem_take(&data->sem_tx_lock, K_FOREVER); } if (!(flags & MODEM_NO_SET_CMDS)) { ret = modem_cmd_handler_update_cmds(data, handler_cmds, handler_cmds_len, true); if (ret < 0) { goto unlock_tx_lock; } } #if defined(CONFIG_MODEM_CONTEXT_VERBOSE_DEBUG) LOG_HEXDUMP_DBG(buf, strlen(buf), "SENT DATA"); if (data->eol_len > 0) { if (data->eol[0] != '\r') { /* Print the EOL only if it is not \r, otherwise there * is just too much printing. */ LOG_HEXDUMP_DBG(data->eol, data->eol_len, "SENT EOL"); } } else { LOG_DBG("EOL not set!!!"); } #endif if (sem) { k_sem_reset(sem); } iface->write(iface, buf, strlen(buf)); iface->write(iface, data->eol, data->eol_len); if (sem) { ret = k_sem_take(sem, timeout); if (ret == 0) { ret = data->last_error; } else if (ret == -EAGAIN) { ret = -ETIMEDOUT; } } if (!(flags & MODEM_NO_UNSET_CMDS)) { /* unset handlers and ignore any errors */ (void)modem_cmd_handler_update_cmds(data, NULL, 0U, false); } unlock_tx_lock: if (!(flags & MODEM_NO_TX_LOCK)) { k_sem_give(&data->sem_tx_lock); } return ret; } /* run a set of AT commands */ int modem_cmd_handler_setup_cmds(struct modem_iface *iface, struct modem_cmd_handler *handler, const struct setup_cmd *cmds, size_t cmds_len, struct k_sem *sem, k_timeout_t timeout) { int ret = 0; size_t i; for (i = 0; i < cmds_len; i++) { if (cmds[i].handle_cmd.cmd && cmds[i].handle_cmd.func) { ret = modem_cmd_send(iface, handler, &cmds[i].handle_cmd, 1U, cmds[i].send_cmd, sem, timeout); } else { ret = modem_cmd_send(iface, handler, NULL, 0, cmds[i].send_cmd, sem, timeout); } k_sleep(K_MSEC(50)); if (ret < 0) { LOG_ERR("command %s ret:%d", cmds[i].send_cmd, ret); break; } } return ret; } /* run a set of AT commands, without lock */ int modem_cmd_handler_setup_cmds_nolock(struct modem_iface *iface, struct modem_cmd_handler *handler, const struct setup_cmd *cmds, size_t cmds_len, struct k_sem *sem, k_timeout_t timeout) { int ret = 0; size_t i; for (i = 0; i < cmds_len; i++) { if (cmds[i].handle_cmd.cmd && cmds[i].handle_cmd.func) { ret = modem_cmd_send_nolock(iface, handler, &cmds[i].handle_cmd, 1U, cmds[i].send_cmd, sem, timeout); } else { ret = modem_cmd_send_nolock(iface, handler, NULL, 0, cmds[i].send_cmd, sem, timeout); } k_sleep(K_MSEC(50)); if (ret < 0) { LOG_ERR("command %s ret:%d", cmds[i].send_cmd, ret); break; } } return ret; } int modem_cmd_handler_tx_lock(struct modem_cmd_handler *handler, k_timeout_t timeout) { struct modem_cmd_handler_data *data; data = (struct modem_cmd_handler_data *)(handler->cmd_handler_data); return k_sem_take(&data->sem_tx_lock, timeout); } void modem_cmd_handler_tx_unlock(struct modem_cmd_handler *handler) { struct modem_cmd_handler_data *data; data = (struct modem_cmd_handler_data *)(handler->cmd_handler_data); k_sem_give(&data->sem_tx_lock); } int modem_cmd_handler_init(struct modem_cmd_handler *handler, struct modem_cmd_handler_data *data, const struct modem_cmd_handler_config *config) { /* Verify arguments */ if (handler == NULL || data == NULL || config == NULL) { return -EINVAL; } /* Verify config */ if ((config->match_buf == NULL) || (config->match_buf_len == 0) || (config->buf_pool == NULL) || (NULL != config->response_cmds && 0 == config->response_cmds_len) || (NULL != config->unsol_cmds && 0 == config->unsol_cmds_len)) { return -EINVAL; } /* Assign data to command handler */ handler->cmd_handler_data = data; /* Assign command process implementation to command handler */ handler->process = cmd_handler_process; /* Store arguments */ data->match_buf = config->match_buf; data->match_buf_len = config->match_buf_len; data->buf_pool = config->buf_pool; data->alloc_timeout = config->alloc_timeout; data->eol = config->eol; data->cmds[CMD_RESP] = config->response_cmds; data->cmds_len[CMD_RESP] = config->response_cmds_len; data->cmds[CMD_UNSOL] = config->unsol_cmds; data->cmds_len[CMD_UNSOL] = config->unsol_cmds_len; /* Process end of line */ data->eol_len = data->eol == NULL ? 0 : strlen(data->eol); /* Store optional user data */ data->user_data = config->user_data; /* Initialize command handler data members */ k_sem_init(&data->sem_tx_lock, 1, 1); k_sem_init(&data->sem_parse_lock, 1, 1); return 0; } ```
/content/code_sandbox/drivers/modem/modem_cmd_handler.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,731
```objective-c /** @file * @brief Modem interface for UART header file. * * Modem interface UART handling for modem context driver. */ /* * */ #ifndef ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_IFACE_UART_H_ #define ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_IFACE_UART_H_ #include <zephyr/kernel.h> #ifdef __cplusplus extern "C" { #endif struct modem_iface_uart_data { /* HW flow control */ bool hw_flow_control; /* ring buffer */ struct ring_buf rx_rb; /* rx semaphore */ struct k_sem rx_sem; #ifdef CONFIG_MODEM_IFACE_UART_ASYNC /* tx semaphore */ struct k_sem tx_sem; #endif /* CONFIG_MODEM_IFACE_UART_ASYNC */ }; /** * @brief Init modem interface device for UART * * @details This can be called after the init if the UART is changed. * * @param iface: modem interface to initialize. * @param dev_name: name of the UART device to use * * @retval 0 if ok, < 0 if error. */ int modem_iface_uart_init_dev(struct modem_iface *iface, const struct device *dev); /** * @brief Modem uart interface configuration * * @param rx_rb_buf Buffer used for internal ring buffer * @param rx_rb_buf_len Size of buffer used for internal ring buffer * @param dev UART device used for interface * @param hw_flow_control Set if hardware flow control is used */ struct modem_iface_uart_config { char *rx_rb_buf; size_t rx_rb_buf_len; const struct device *dev; bool hw_flow_control; }; /** * @brief Initialize modem interface for UART * * @param iface Interface structure to initialize * @param data UART data structure used by the modem interface * @param config UART configuration structure used to configure UART data structure * * @return -EINVAL if any argument is invalid * @return 0 if successful */ int modem_iface_uart_init(struct modem_iface *iface, struct modem_iface_uart_data *data, const struct modem_iface_uart_config *config); /** * @brief Wait for rx data ready from uart interface * * @param iface Interface to wait on * * @return 0 if data is ready * @return -EBUSY if returned without waiting * @return -EAGAIN if timeout occurred */ static inline int modem_iface_uart_rx_wait(struct modem_iface *iface, k_timeout_t timeout) { struct modem_iface_uart_data *data = (struct modem_iface_uart_data *)iface->iface_data; return k_sem_take(&data->rx_sem, timeout); } #ifdef __cplusplus } #endif #endif /* ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_IFACE_UART_H_ */ ```
/content/code_sandbox/drivers/modem/modem_iface_uart.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
575
```c /* * Organisation (CSIRO) ABN 41 687 119 230. * */ #include <zephyr/logging/log.h> #include <zephyr/kernel.h> #include <zephyr/drivers/uart.h> #include "modem_context.h" #include "modem_iface_uart.h" LOG_MODULE_REGISTER(modem_iface_uart_async, CONFIG_MODEM_LOG_LEVEL); #define RX_BUFFER_SIZE CONFIG_MODEM_IFACE_UART_ASYNC_RX_BUFFER_SIZE #define RX_BUFFER_NUM CONFIG_MODEM_IFACE_UART_ASYNC_RX_NUM_BUFFERS K_MEM_SLAB_DEFINE(uart_modem_async_rx_slab, RX_BUFFER_SIZE, RX_BUFFER_NUM, 1); static void iface_uart_async_callback(const struct device *dev, struct uart_event *evt, void *user_data) { struct modem_iface *iface = user_data; struct modem_iface_uart_data *data = iface->iface_data; uint32_t written; void *buf; int rc; switch (evt->type) { case UART_TX_DONE: k_sem_give(&data->tx_sem); break; case UART_RX_BUF_REQUEST: /* Allocate next RX buffer for UART driver */ rc = k_mem_slab_alloc(&uart_modem_async_rx_slab, (void **)&buf, K_NO_WAIT); if (rc < 0) { /* Major problems, UART_RX_BUF_RELEASED event is not being generated, or * CONFIG_MODEM_IFACE_UART_ASYNC_RX_NUM_BUFFERS is not large enough. */ LOG_ERR("RX buffer starvation"); break; } /* Provide the buffer to the UART driver */ uart_rx_buf_rsp(dev, buf, RX_BUFFER_SIZE); break; case UART_RX_BUF_RELEASED: /* UART driver is done with memory, free it */ k_mem_slab_free(&uart_modem_async_rx_slab, (void *)evt->data.rx_buf.buf); break; case UART_RX_RDY: /* Place received data on the ring buffer */ written = ring_buf_put(&data->rx_rb, evt->data.rx.buf + evt->data.rx.offset, evt->data.rx.len); if (written != evt->data.rx.len) { LOG_WRN("Received bytes dropped from ring buf"); } /* Notify upper layer that new data has arrived */ k_sem_give(&data->rx_sem); break; case UART_RX_STOPPED: break; case UART_RX_DISABLED: /* RX stopped (likely due to line error), re-enable it */ rc = k_mem_slab_alloc(&uart_modem_async_rx_slab, (void **)&buf, K_FOREVER); if (rc < 0) { LOG_ERR("RX disabled and buffer starvation"); break; } rc = uart_rx_enable(dev, buf, RX_BUFFER_SIZE, CONFIG_MODEM_IFACE_UART_ASYNC_RX_TIMEOUT_US); if (rc < 0) { LOG_ERR("Failed to re-enable UART"); } break; default: break; } } static int modem_iface_uart_async_read(struct modem_iface *iface, uint8_t *buf, size_t size, size_t *bytes_read) { struct modem_iface_uart_data *data; if (!iface || !iface->iface_data) { return -EINVAL; } if (size == 0) { *bytes_read = 0; return 0; } /* Pull data off the ring buffer */ data = iface->iface_data; *bytes_read = ring_buf_get(&data->rx_rb, buf, size); return 0; } static int modem_iface_uart_async_write(struct modem_iface *iface, const uint8_t *buf, size_t size) { struct modem_iface_uart_data *data; int rc; if (!iface || !iface->iface_data) { return -EINVAL; } if (size == 0) { return 0; } /* Start the transmission */ rc = uart_tx(iface->dev, buf, size, SYS_FOREVER_MS); if (rc >= 0) { /* Wait until the transmission completes */ data = iface->iface_data; k_sem_take(&data->tx_sem, K_FOREVER); } return rc; } int modem_iface_uart_init_dev(struct modem_iface *iface, const struct device *dev) { struct modem_iface_uart_data *data; void *buf; int rc; if (!device_is_ready(dev)) { return -ENODEV; } /* Check if there's already a device inited to this iface. If so, * interrupts needs to be disabled on that too before switching to avoid * race conditions with modem_iface_uart_isr. */ if (iface->dev) { LOG_WRN("Device %s already inited", iface->dev->name); uart_rx_disable(iface->dev); } iface->dev = dev; data = iface->iface_data; /* Configure async UART callback */ rc = uart_callback_set(dev, iface_uart_async_callback, iface); if (rc < 0) { LOG_ERR("Failed to set UART callback"); return rc; } /* Enable reception permanently on the interface */ k_mem_slab_alloc(&uart_modem_async_rx_slab, (void **)&buf, K_FOREVER); rc = uart_rx_enable(dev, buf, RX_BUFFER_SIZE, CONFIG_MODEM_IFACE_UART_ASYNC_RX_TIMEOUT_US); if (rc < 0) { LOG_ERR("Failed to enable UART RX"); } return rc; } int modem_iface_uart_init(struct modem_iface *iface, struct modem_iface_uart_data *data, const struct modem_iface_uart_config *config) { int ret; if (iface == NULL || data == NULL || config == NULL) { return -EINVAL; } iface->iface_data = data; iface->read = modem_iface_uart_async_read; iface->write = modem_iface_uart_async_write; ring_buf_init(&data->rx_rb, config->rx_rb_buf_len, config->rx_rb_buf); k_sem_init(&data->rx_sem, 0, 1); k_sem_init(&data->tx_sem, 0, 1); /* Configure hardware flow control */ data->hw_flow_control = config->hw_flow_control; /* Get UART device */ ret = modem_iface_uart_init_dev(iface, config->dev); if (ret < 0) { iface->iface_data = NULL; iface->read = NULL; iface->write = NULL; return ret; } return 0; } ```
/content/code_sandbox/drivers/modem/modem_iface_uart_async.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,396
```c /** @file * @brief Modem socket / packet size handler * * Generic modem socket and packet size implementation for modem context */ /* * */ #include <zephyr/kernel.h> #include <zephyr/sys/fdtable.h> #include "modem_socket.h" /* * Packet Size Support Functions */ uint16_t modem_socket_next_packet_size(struct modem_socket_config *cfg, struct modem_socket *sock) { uint16_t total = 0U; k_sem_take(&cfg->sem_lock, K_FOREVER); if (!sock || !sock->packet_count) { goto exit; } total = sock->packet_sizes[0]; exit: k_sem_give(&cfg->sem_lock); return total; } static uint16_t modem_socket_packet_get_total(struct modem_socket *sock) { int i; uint16_t total = 0U; if (!sock || !sock->packet_count) { return 0U; } for (i = 0; i < sock->packet_count; i++) { total += sock->packet_sizes[i]; } return total; } static int modem_socket_packet_drop_first(struct modem_socket *sock) { int i; if (!sock || !sock->packet_count) { return -EINVAL; } sock->packet_count--; for (i = 0; i < sock->packet_count; i++) { sock->packet_sizes[i] = sock->packet_sizes[i + 1]; } sock->packet_sizes[sock->packet_count] = 0U; return 0; } int modem_socket_packet_size_update(struct modem_socket_config *cfg, struct modem_socket *sock, int new_total) { uint16_t old_total = 0U; if (!sock) { return -EINVAL; } k_sem_take(&cfg->sem_lock, K_FOREVER); if (new_total < 0) { new_total += modem_socket_packet_get_total(sock); } if (new_total <= 0) { /* reset outstanding value here */ sock->packet_count = 0U; sock->packet_sizes[0] = 0U; k_poll_signal_reset(&sock->sig_data_ready); k_sem_give(&cfg->sem_lock); return 0; } old_total = modem_socket_packet_get_total(sock); if (new_total == old_total) { goto data_ready; } /* remove sent packets */ if (new_total < old_total) { /* remove packets that are not included in new_size */ while (old_total > new_total && sock->packet_count > 0) { /* handle partial read */ if (old_total - new_total < sock->packet_sizes[0]) { sock->packet_sizes[0] -= old_total - new_total; break; } old_total -= sock->packet_sizes[0]; modem_socket_packet_drop_first(sock); } goto data_ready; } /* new packet to add */ if (sock->packet_count >= CONFIG_MODEM_SOCKET_PACKET_COUNT) { k_sem_give(&cfg->sem_lock); return -ENOMEM; } if (new_total - old_total > 0) { sock->packet_sizes[sock->packet_count] = new_total - old_total; sock->packet_count++; } else { k_sem_give(&cfg->sem_lock); return -EINVAL; } data_ready: if (sock->packet_sizes[0]) { k_poll_signal_raise(&sock->sig_data_ready, 0); } else { k_poll_signal_reset(&sock->sig_data_ready); } k_sem_give(&cfg->sem_lock); return new_total; } /* * Socket Support Functions */ /* * This function reserves a file descriptor from the fdtable, make sure to update the * POSIX_FDS_MAX Kconfig option to support at minimum the required amount of sockets */ int modem_socket_get(struct modem_socket_config *cfg, int family, int type, int proto) { int i; k_sem_take(&cfg->sem_lock, K_FOREVER); for (i = 0; i < cfg->sockets_len; i++) { if (cfg->sockets[i].id < cfg->base_socket_id) { break; } } if (i >= cfg->sockets_len) { k_sem_give(&cfg->sem_lock); return -ENOMEM; } cfg->sockets[i].sock_fd = zvfs_reserve_fd(); if (cfg->sockets[i].sock_fd < 0) { k_sem_give(&cfg->sem_lock); return -errno; } cfg->sockets[i].family = family; cfg->sockets[i].type = type; cfg->sockets[i].ip_proto = proto; cfg->sockets[i].id = (cfg->assign_id) ? (i + cfg->base_socket_id) : (cfg->base_socket_id + cfg->sockets_len); zvfs_finalize_typed_fd(cfg->sockets[i].sock_fd, &cfg->sockets[i], (const struct fd_op_vtable *)cfg->vtable, ZVFS_MODE_IFSOCK); k_sem_give(&cfg->sem_lock); return cfg->sockets[i].sock_fd; } struct modem_socket *modem_socket_from_fd(struct modem_socket_config *cfg, int sock_fd) { int i; k_sem_take(&cfg->sem_lock, K_FOREVER); for (i = 0; i < cfg->sockets_len; i++) { if (cfg->sockets[i].sock_fd == sock_fd) { k_sem_give(&cfg->sem_lock); return &cfg->sockets[i]; } } k_sem_give(&cfg->sem_lock); return NULL; } struct modem_socket *modem_socket_from_id(struct modem_socket_config *cfg, int id) { int i; if (id < cfg->base_socket_id) { return NULL; } k_sem_take(&cfg->sem_lock, K_FOREVER); for (i = 0; i < cfg->sockets_len; i++) { if (cfg->sockets[i].id == id) { k_sem_give(&cfg->sem_lock); return &cfg->sockets[i]; } } k_sem_give(&cfg->sem_lock); return NULL; } struct modem_socket *modem_socket_from_newid(struct modem_socket_config *cfg) { return modem_socket_from_id(cfg, cfg->base_socket_id + cfg->sockets_len); } void modem_socket_put(struct modem_socket_config *cfg, int sock_fd) { struct modem_socket *sock = modem_socket_from_fd(cfg, sock_fd); if (!sock) { return; } k_sem_take(&cfg->sem_lock, K_FOREVER); sock->id = cfg->base_socket_id - 1; sock->sock_fd = -1; sock->is_waiting = false; sock->is_connected = false; (void)memset(&sock->src, 0, sizeof(struct sockaddr)); (void)memset(&sock->dst, 0, sizeof(struct sockaddr)); memset(&sock->packet_sizes, 0, sizeof(sock->packet_sizes)); sock->packet_count = 0; k_sem_reset(&sock->sem_data_ready); k_poll_signal_reset(&sock->sig_data_ready); k_sem_give(&cfg->sem_lock); } /* * Generic Poll Function */ /* * FIXME: The design here makes the poll function non-reentrant for same sockets. * If two different threads poll on two identical sockets we'll end up with unexpected * behavior - the higher priority thread will be unblocked, regardless on which * socket it polled. I think we could live with such limitation though in the * initial implementation, but this should be improved in the future. */ int modem_socket_poll(struct modem_socket_config *cfg, struct zsock_pollfd *fds, int nfds, int msecs) { struct modem_socket *sock; int ret, i; uint8_t found_count = 0; if (!cfg || nfds > CONFIG_NET_SOCKETS_POLL_MAX) { return -EINVAL; } struct k_poll_event events[nfds]; int eventcount = 0; for (i = 0; i < nfds; i++) { sock = modem_socket_from_fd(cfg, fds[i].fd); if (sock) { /* * Handle user check for POLLOUT events: * we consider the socket to always be writable. */ if (fds[i].events & ZSOCK_POLLOUT) { found_count++; break; } else if (fds[i].events & ZSOCK_POLLIN) { k_poll_event_init(&events[eventcount++], K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &sock->sig_data_ready); if (sock->packet_sizes[0] > 0U) { found_count++; break; } } } } /* Avoid waiting on semaphore if we have already found an event */ ret = 0; if (!found_count) { k_timeout_t timeout = K_FOREVER; if (msecs >= 0) { timeout = K_MSEC(msecs); } ret = k_poll(events, eventcount, timeout); } /* Reset counter as we reiterate on all polled sockets */ found_count = 0; for (i = 0; i < nfds; i++) { sock = modem_socket_from_fd(cfg, fds[i].fd); if (!sock) { continue; } /* * Handle user check for ZSOCK_POLLOUT events: * we consider the socket to always be writable. */ if (fds[i].events & ZSOCK_POLLOUT) { fds[i].revents |= ZSOCK_POLLOUT; found_count++; } else if ((fds[i].events & ZSOCK_POLLIN) && (sock->packet_sizes[0] > 0U)) { fds[i].revents |= ZSOCK_POLLIN; found_count++; } } /* EBUSY, EAGAIN and ETIMEDOUT aren't true errors */ if (ret < 0 && ret != -EBUSY && ret != -EAGAIN && ret != -ETIMEDOUT) { errno = ret; return -1; } errno = 0; return found_count; } int modem_socket_poll_prepare(struct modem_socket_config *cfg, struct modem_socket *sock, struct zsock_pollfd *pfd, struct k_poll_event **pev, struct k_poll_event *pev_end) { if (pfd->events & ZSOCK_POLLIN) { if (*pev == pev_end) { errno = ENOMEM; return -1; } k_poll_event_init(*pev, K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &sock->sig_data_ready); (*pev)++; } if (pfd->events & ZSOCK_POLLOUT) { if (*pev == pev_end) { errno = ENOMEM; return -1; } /* Not Implemented */ errno = ENOTSUP; return -1; } return 0; } int modem_socket_poll_update(struct modem_socket *sock, struct zsock_pollfd *pfd, struct k_poll_event **pev) { ARG_UNUSED(sock); if (pfd->events & ZSOCK_POLLIN) { if ((*pev)->state != K_POLL_STATE_NOT_READY) { pfd->revents |= ZSOCK_POLLIN; } (*pev)++; } if (pfd->events & ZSOCK_POLLOUT) { /* Not implemented, but the modem socket is always ready to transmit, * so set the revents */ pfd->revents |= ZSOCK_POLLOUT; (*pev)++; } return 0; } void modem_socket_wait_data(struct modem_socket_config *cfg, struct modem_socket *sock) { k_sem_take(&cfg->sem_lock, K_FOREVER); sock->is_waiting = true; k_sem_give(&cfg->sem_lock); k_sem_take(&sock->sem_data_ready, K_FOREVER); } void modem_socket_data_ready(struct modem_socket_config *cfg, struct modem_socket *sock) { k_sem_take(&cfg->sem_lock, K_FOREVER); if (sock->is_waiting) { /* unblock sockets waiting on recv() */ sock->is_waiting = false; k_sem_give(&sock->sem_data_ready); } k_sem_give(&cfg->sem_lock); } int modem_socket_init(struct modem_socket_config *cfg, struct modem_socket *sockets, size_t sockets_len, int base_socket_id, bool assign_id, const struct socket_op_vtable *vtable) { /* Verify arguments */ if (cfg == NULL || sockets == NULL || sockets_len < 1 || vtable == NULL) { return -EINVAL; } /* Initialize config */ cfg->sockets = sockets; cfg->sockets_len = sockets_len; cfg->base_socket_id = base_socket_id; cfg->assign_id = assign_id; k_sem_init(&cfg->sem_lock, 1, 1); cfg->vtable = vtable; /* Initialize associated sockets */ for (int i = 0; i < cfg->sockets_len; i++) { /* Clear entire socket structure */ memset(&cfg->sockets[i], 0, sizeof(cfg->sockets[i])); /* Initialize socket members */ k_sem_init(&cfg->sockets[i].sem_data_ready, 0, 1); k_poll_signal_init(&cfg->sockets[i].sig_data_ready); cfg->sockets[i].id = -1; } return 0; } bool modem_socket_is_allocated(const struct modem_socket_config *cfg, const struct modem_socket *sock) { /* Socket is allocated with a reserved id value if id is not dynamically assigned */ if (cfg->assign_id == false && sock->id == (cfg->base_socket_id + cfg->sockets_len)) { return true; } /* Socket must have been allocated if id is assigned */ return modem_socket_id_is_assigned(cfg, sock); } bool modem_socket_id_is_assigned(const struct modem_socket_config *cfg, const struct modem_socket *sock) { /* Verify socket is assigned to a valid value */ if ((cfg->base_socket_id <= sock->id) && (sock->id < (cfg->base_socket_id + cfg->sockets_len))) { return true; } return false; } int modem_socket_id_assign(const struct modem_socket_config *cfg, struct modem_socket *sock, int id) { /* Verify dynamically assigning id is disabled */ if (cfg->assign_id) { return -EPERM; } /* Verify id is currently not assigned */ if (modem_socket_id_is_assigned(cfg, sock)) { return -EPERM; } /* Verify id is valid */ if (id < cfg->base_socket_id || (cfg->base_socket_id + cfg->sockets_len) <= id) { return -EINVAL; } /* Assign id */ sock->id = id; return 0; } ```
/content/code_sandbox/drivers/modem/modem_socket.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,260
```unknown # u-blox SARA R4 driver options config MODEM_UBLOX_SARA bool "U-blox SARA modem driver" select MODEM_CONTEXT select MODEM_CMD_HANDLER select MODEM_IFACE_UART select MODEM_SOCKET select NET_OFFLOAD select NET_SOCKETS_OFFLOAD help Choose this setting to enable u-blox SARA-R4 LTE-CatM1/NB-IoT modem driver. if MODEM_UBLOX_SARA choice MODEM_UBLOX_SARA_VARIANT bool "u-blox SARA variant selection" default MODEM_UBLOX_SARA_R4 config MODEM_UBLOX_SARA_R4 bool "u-blox SARA-R4" help Enable support for SARA-R4 modem config MODEM_UBLOX_SARA_U2 bool "u-blox SARA-U2" help Enable support for SARA-U2 modem config MODEM_UBLOX_SARA_AUTODETECT_VARIANT bool "detect automatically" help Enable automatic detection of modem variant (SARA-R4 or SARA-U2) endchoice config MODEM_UBLOX_SARA_R4_RX_STACK_SIZE int "Stack size for the u-blox SARA-R4 modem driver RX thread" default 1028 help This stack is used by the u-blox SARA-R4 RX thread. config MODEM_UBLOX_SARA_R4_RX_WORKQ_STACK_SIZE int "Stack size for the u-blox SARA-R4 modem driver work queue" default 2048 help This stack is used by the work queue to pass off net_pkt data to the rest of the network stack, letting the rx thread continue processing data. config MODEM_UBLOX_SARA_R4_APN string "APN for establishing network connection" default "hologram" help This setting is used in the AT+CGDCONT command to set the APN name for the network connection context. This value is specific to the network provider and may need to be changed. config MODEM_UBLOX_SARA_AUTODETECT_APN bool "detect APN automatically" help Enable automatic detection of the APN, based on the IMSI If the detection fails, the configured APN will be used config MODEM_UBLOX_SARA_AUTODETECT_APN_PROFILES string "list of profiles to search when autodetecting APN" default "hologram=23450, wm=20601 29505 29509 23450 53703 90143" help Set a comma separated list of profiles, each containing of: <apn>=<IMSI_1> ... <IMSI_n> config MODEM_UBLOX_SARA_R4_MANUAL_MCCMNO string "MCC/MNOfor establishing network connection" help This setting is used in the AT+COPS command to set the MCC/MNO for the network connection context. This value is specific to the network provider and may need to be changed if auto is not selected. config MODEM_UBLOX_SARA_R4_INIT_PRIORITY int "u-blox SARA-R4 driver init priority" default 80 help u-blox SARA-R4 device driver initialization priority. Do not mess with it unless you know what you are doing. Note that the priority needs to be lower than the net stack so that it can start before the networking sub-system. config MODEM_UBLOX_SARA_R4_NET_STATUS bool "Support for network status indication" help Choose this setting to use a modem GPIO pin as network indication. if MODEM_UBLOX_SARA_R4_NET_STATUS config MODEM_UBLOX_SARA_R4_NET_STATUS_PIN int "Network status indication GPIO ID" default 16 help This setting is used to configure one of the modem's GPIO pins as a network status indication. See the manual for the gpio ids and how they map to pin numbers. endif # MODEM_UBLOX_SARA_R4_NET_STATUS config MODEM_UBLOX_SARA_RSSI_WORK bool "RSSI polling work" default y help u-blox SARA-R4 device is configured to poll for RSSI config MODEM_UBLOX_SARA_RSSI_WORK_PERIOD int "Configure RSSI WORK polling frequency" depends on MODEM_UBLOX_SARA_RSSI_WORK default 30 help This settings is used to configure the period of RSSI polling endif # MODEM_UBLOX_SARA ```
/content/code_sandbox/drivers/modem/Kconfig.ublox-sara-r4
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,022
```objective-c /** @file * @brief Modem context header file. * * A modem context driver allowing application to handle all * aspects of received protocol data. */ /* * */ #ifndef ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_CONTEXT_H_ #define ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_CONTEXT_H_ #include <zephyr/kernel.h> #include <zephyr/net/buf.h> #include <zephyr/net/net_ip.h> #include <zephyr/sys/ring_buffer.h> #include <zephyr/drivers/gpio.h> #ifdef __cplusplus extern "C" { #endif struct modem_iface { const struct device *dev; int (*read)(struct modem_iface *iface, uint8_t *buf, size_t size, size_t *bytes_read); int (*write)(struct modem_iface *iface, const uint8_t *buf, size_t size); /* implementation data */ void *iface_data; }; struct modem_cmd_handler { void (*process)(struct modem_cmd_handler *cmd_handler, struct modem_iface *iface); /* implementation data */ void *cmd_handler_data; }; struct modem_context { /* modem data */ char *data_manufacturer; char *data_model; char *data_revision; char *data_imei; #if defined(CONFIG_MODEM_SIM_NUMBERS) char *data_imsi; char *data_iccid; #endif #if defined(CONFIG_MODEM_CELL_INFO) int data_operator; int data_lac; int data_cellid; int data_act; #endif int *data_rssi; bool is_automatic_oper; /* interface config */ struct modem_iface iface; /* command handler config */ struct modem_cmd_handler cmd_handler; /* driver data */ void *driver_data; }; /** * @brief IP address to string * * @param addr: sockaddr to be converted * @param buf: Buffer to store IP in string form * @param buf_size: buffer size * * @retval 0 if ok, < 0 if error. */ int modem_context_sprint_ip_addr(const struct sockaddr *addr, char *buf, size_t buf_size); /** * @brief Get port from IP address * * @param addr: sockaddr * @param port: store port * * @retval 0 if ok, < 0 if error. */ int modem_context_get_addr_port(const struct sockaddr *addr, uint16_t *port); /** * @brief Gets modem context by id. * * @param id: modem context id. * * @retval modem context or NULL. */ struct modem_context *modem_context_from_id(int id); /** * @brief Finds modem context which owns the iface device. * * @param dev: device used by the modem iface. * * @retval Modem context or NULL. */ struct modem_context *modem_context_from_iface_dev(const struct device *dev); /** * @brief Registers modem context. * * @note Prepares modem context to be used. * * @param ctx: modem context to register. * * @retval 0 if ok, < 0 if error. */ int modem_context_register(struct modem_context *ctx); #ifdef __cplusplus } #endif #endif /* ZEPHYR_INCLUDE_DRIVERS_MODEM_MODEM_CONTEXT_H_ */ ```
/content/code_sandbox/drivers/modem/modem_context.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
698
```c /* * */ #define DT_DRV_COMPAT u_blox_sara_r4 #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(modem_ublox_sara_r4, CONFIG_MODEM_LOG_LEVEL); #include <zephyr/kernel.h> #include <ctype.h> #include <errno.h> #include <zephyr/drivers/gpio.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/posix/fcntl.h> #include <zephyr/net/net_if.h> #include <zephyr/net/net_offload.h> #include <zephyr/net/offloaded_netdev.h> #include <zephyr/net/socket_offload.h> #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_APN) #include <stdio.h> #endif #include "modem_context.h" #include "modem_socket.h" #include "modem_cmd_handler.h" #include "modem_iface_uart.h" #if !defined(CONFIG_MODEM_UBLOX_SARA_R4_MANUAL_MCCMNO) #define CONFIG_MODEM_UBLOX_SARA_R4_MANUAL_MCCMNO "" #endif #if defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS) #include "tls_internal.h" #include <zephyr/net/tls_credentials.h> #endif /* pin settings */ static const struct gpio_dt_spec power_gpio = GPIO_DT_SPEC_INST_GET(0, mdm_power_gpios); #if DT_INST_NODE_HAS_PROP(0, mdm_reset_gpios) static const struct gpio_dt_spec reset_gpio = GPIO_DT_SPEC_INST_GET(0, mdm_reset_gpios); #endif #if DT_INST_NODE_HAS_PROP(0, mdm_vint_gpios) static const struct gpio_dt_spec vint_gpio = GPIO_DT_SPEC_INST_GET(0, mdm_vint_gpios); #endif #define MDM_UART_NODE DT_INST_BUS(0) #define MDM_UART_DEV DEVICE_DT_GET(MDM_UART_NODE) #define MDM_RESET_NOT_ASSERTED 1 #define MDM_RESET_ASSERTED 0 #define MDM_CMD_TIMEOUT K_SECONDS(10) #define MDM_DNS_TIMEOUT K_SECONDS(70) #define MDM_CMD_CONN_TIMEOUT K_SECONDS(120) #define MDM_REGISTRATION_TIMEOUT K_SECONDS(180) #define MDM_PROMPT_CMD_DELAY K_MSEC(50) #define MDM_MAX_DATA_LENGTH 1024 #define MDM_RECV_MAX_BUF 30 #define MDM_RECV_BUF_SIZE 128 #define MDM_MAX_SOCKETS 6 #define MDM_BASE_SOCKET_NUM 0 #define MDM_NETWORK_RETRY_COUNT 3 #define MDM_WAIT_FOR_RSSI_COUNT 10 #define MDM_WAIT_FOR_RSSI_DELAY K_SECONDS(2) #define MDM_MANUFACTURER_LENGTH 10 #define MDM_MODEL_LENGTH 16 #define MDM_REVISION_LENGTH 64 #define MDM_IMEI_LENGTH 16 #define MDM_IMSI_LENGTH 16 #define MDM_APN_LENGTH 32 #define MDM_MAX_CERT_LENGTH 8192 #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_VARIANT) #define MDM_VARIANT_UBLOX_R4 4 #define MDM_VARIANT_UBLOX_U2 2 #endif NET_BUF_POOL_DEFINE(mdm_recv_pool, MDM_RECV_MAX_BUF, MDM_RECV_BUF_SIZE, 0, NULL); /* RX thread structures */ K_KERNEL_STACK_DEFINE(modem_rx_stack, CONFIG_MODEM_UBLOX_SARA_R4_RX_STACK_SIZE); struct k_thread modem_rx_thread; #if defined(CONFIG_MODEM_UBLOX_SARA_RSSI_WORK) /* RX thread work queue */ K_KERNEL_STACK_DEFINE(modem_workq_stack, CONFIG_MODEM_UBLOX_SARA_R4_RX_WORKQ_STACK_SIZE); static struct k_work_q modem_workq; #endif /* socket read callback data */ struct socket_read_data { char *recv_buf; size_t recv_buf_len; struct sockaddr *recv_addr; uint16_t recv_read_len; }; /* driver data */ struct modem_data { struct net_if *net_iface; uint8_t mac_addr[6]; /* modem interface */ struct modem_iface_uart_data iface_data; uint8_t iface_rb_buf[MDM_MAX_DATA_LENGTH]; /* modem cmds */ struct modem_cmd_handler_data cmd_handler_data; uint8_t cmd_match_buf[MDM_RECV_BUF_SIZE + 1]; /* socket data */ struct modem_socket_config socket_config; struct modem_socket sockets[MDM_MAX_SOCKETS]; #if defined(CONFIG_MODEM_UBLOX_SARA_RSSI_WORK) /* RSSI work */ struct k_work_delayable rssi_query_work; #endif /* modem data */ char mdm_manufacturer[MDM_MANUFACTURER_LENGTH]; char mdm_model[MDM_MODEL_LENGTH]; char mdm_revision[MDM_REVISION_LENGTH]; char mdm_imei[MDM_IMEI_LENGTH]; char mdm_imsi[MDM_IMSI_LENGTH]; int mdm_rssi; #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_VARIANT) /* modem variant */ int mdm_variant; #endif #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_APN) /* APN */ char mdm_apn[MDM_APN_LENGTH]; #endif /* modem state */ int ev_creg; /* bytes written to socket in last transaction */ int sock_written; /* response semaphore */ struct k_sem sem_response; /* prompt semaphore */ struct k_sem sem_prompt; }; static struct modem_data mdata; static struct modem_context mctx; #if defined(CONFIG_DNS_RESOLVER) static struct zsock_addrinfo result; static struct sockaddr result_addr; static char result_canonname[DNS_MAX_NAME_SIZE + 1]; #endif /* helper macro to keep readability */ #define ATOI(s_, value_, desc_) modem_atoi(s_, value_, desc_, __func__) /** * @brief Convert string to long integer, but handle errors * * @param s: string with representation of integer number * @param err_value: on error return this value instead * @param desc: name the string being converted * @param func: function where this is called (typically __func__) * * @retval return integer conversion on success, or err_value on error */ static int modem_atoi(const char *s, const int err_value, const char *desc, const char *func) { int ret; char *endptr; ret = (int)strtol(s, &endptr, 10); if (!endptr || *endptr != '\0') { LOG_ERR("bad %s '%s' in %s", s, desc, func); return err_value; } return ret; } #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_APN) /* the list of SIM profiles. Global scope, so the app can change it */ const char *modem_sim_profiles = CONFIG_MODEM_UBLOX_SARA_AUTODETECT_APN_PROFILES; int find_apn(char *apn, int apnlen, const char *profiles, const char *imsi) { int rc = -1; /* try to find a match */ char *s = strstr(profiles, imsi); if (s) { char *eos; /* find the assignment operator preceding the match */ while (s >= profiles && !strchr("=", *s)) { s--; } /* find the apn preceding the assignment operator */ while (s >= profiles && strchr(" =", *s)) { s--; } /* mark end of apn string */ eos = s+1; /* find first character of the apn */ while (s >= profiles && !strchr(" ,", *s)) { s--; } s++; /* copy the key */ if (s >= profiles) { int len = eos - s; if (len < apnlen) { memcpy(apn, s, len); apn[len] = '\0'; rc = 0; } else { LOG_ERR("buffer overflow"); } } } return rc; } /* try to detect APN automatically, based on IMSI */ int modem_detect_apn(const char *imsi) { int rc = -1; if (imsi != NULL && strlen(imsi) >= 5) { /* extract MMC and MNC from IMSI */ char mmcmnc[6]; *mmcmnc = 0; strncat(mmcmnc, imsi, sizeof(mmcmnc)-1); /* try to find a matching IMSI, and assign the APN */ rc = find_apn(mdata.mdm_apn, sizeof(mdata.mdm_apn), modem_sim_profiles, mmcmnc); if (rc < 0) { rc = find_apn(mdata.mdm_apn, sizeof(mdata.mdm_apn), modem_sim_profiles, "*"); } } if (rc == 0) { LOG_INF("Assign APN: \"%s\"", mdata.mdm_apn); } return rc; } #endif /* Forward declaration */ MODEM_CMD_DEFINE(on_cmd_sockwrite); /* send binary data via the +USO[ST/WR] commands */ static ssize_t send_socket_data(void *obj, const struct msghdr *msg, k_timeout_t timeout) { int ret; char send_buf[sizeof("AT+USO**=###," "!####.####.####.####.####.####.####.####!," "#####,#########\r\n")]; uint16_t dst_port = 0U; struct modem_socket *sock = (struct modem_socket *)obj; const struct modem_cmd handler_cmds[] = { MODEM_CMD("+USOST: ", on_cmd_sockwrite, 2U, ","), MODEM_CMD("+USOWR: ", on_cmd_sockwrite, 2U, ","), }; struct sockaddr *dst_addr = msg->msg_name; size_t buf_len = 0; if (!sock) { return -EINVAL; } for (int i = 0; i < msg->msg_iovlen; i++) { if (!msg->msg_iov[i].iov_base || msg->msg_iov[i].iov_len == 0) { errno = EINVAL; return -1; } buf_len += msg->msg_iov[i].iov_len; } if (!sock->is_connected && sock->ip_proto != IPPROTO_UDP) { errno = ENOTCONN; return -1; } if (!dst_addr && sock->ip_proto == IPPROTO_UDP) { dst_addr = &sock->dst; } /* * Binary and ASCII mode allows sending MDM_MAX_DATA_LENGTH bytes to * the socket in one command */ if (buf_len > MDM_MAX_DATA_LENGTH) { if (sock->type == SOCK_DGRAM) { errno = EMSGSIZE; return -1; } buf_len = MDM_MAX_DATA_LENGTH; } /* The number of bytes written will be reported by the modem */ mdata.sock_written = 0; if (sock->ip_proto == IPPROTO_UDP) { char ip_str[NET_IPV6_ADDR_LEN]; ret = modem_context_sprint_ip_addr(dst_addr, ip_str, sizeof(ip_str)); if (ret != 0) { LOG_ERR("Error formatting IP string %d", ret); goto exit; } ret = modem_context_get_addr_port(dst_addr, &dst_port); if (ret != 0) { LOG_ERR("Error getting port from IP address %d", ret); goto exit; } snprintk(send_buf, sizeof(send_buf), "AT+USOST=%d,\"%s\",%u,%zu", sock->id, ip_str, dst_port, buf_len); } else { snprintk(send_buf, sizeof(send_buf), "AT+USOWR=%d,%zu", sock->id, buf_len); } k_sem_take(&mdata.cmd_handler_data.sem_tx_lock, K_FOREVER); /* Reset prompt '@' semaphore */ k_sem_reset(&mdata.sem_prompt); ret = modem_cmd_send_nolock(&mctx.iface, &mctx.cmd_handler, NULL, 0U, send_buf, NULL, K_NO_WAIT); if (ret < 0) { goto exit; } /* set command handlers */ ret = modem_cmd_handler_update_cmds(&mdata.cmd_handler_data, handler_cmds, ARRAY_SIZE(handler_cmds), true); if (ret < 0) { goto exit; } /* Wait for prompt '@' */ ret = k_sem_take(&mdata.sem_prompt, K_SECONDS(1)); if (ret != 0) { ret = -ETIMEDOUT; LOG_ERR("No @ prompt received"); goto exit; } /* * The AT commands manual requires a 50 ms wait * after '@' prompt if using AT+USOWR, but not * if using AT+USOST. This if condition is matched with * the command selection above. */ if (sock->ip_proto != IPPROTO_UDP) { k_sleep(MDM_PROMPT_CMD_DELAY); } /* Reset response semaphore before sending data * So that we are sure that we won't use a previously pending one * And we won't miss the one that is going to be freed */ k_sem_reset(&mdata.sem_response); /* Send data directly on modem iface */ for (int i = 0; i < msg->msg_iovlen; i++) { int len = MIN(buf_len, msg->msg_iov[i].iov_len); if (len == 0) { break; } mctx.iface.write(&mctx.iface, msg->msg_iov[i].iov_base, len); buf_len -= len; } if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { ret = 0; goto exit; } ret = k_sem_take(&mdata.sem_response, timeout); if (ret == 0) { ret = modem_cmd_handler_get_error(&mdata.cmd_handler_data); } else if (ret == -EAGAIN) { ret = -ETIMEDOUT; } exit: /* unset handler commands and ignore any errors */ (void)modem_cmd_handler_update_cmds(&mdata.cmd_handler_data, NULL, 0U, false); k_sem_give(&mdata.cmd_handler_data.sem_tx_lock); if (ret < 0) { return ret; } return mdata.sock_written; } #if defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS) /* send binary data via the +USO[ST/WR] commands */ static ssize_t send_cert(struct modem_socket *sock, struct modem_cmd *handler_cmds, size_t handler_cmds_len, const char *cert_data, size_t cert_len, int cert_type) { int ret; char *filename = "ca"; char send_buf[sizeof("AT+USECMNG=#,#,!####!,####\r\n")]; /* TODO support other cert types as well */ if (cert_type != 0) { return -EINVAL; } if (!sock) { return -EINVAL; } __ASSERT_NO_MSG(cert_len <= MDM_MAX_CERT_LENGTH); snprintk(send_buf, sizeof(send_buf), "AT+USECMNG=0,%d,\"%s\",%d", cert_type, filename, cert_len); k_sem_take(&mdata.cmd_handler_data.sem_tx_lock, K_FOREVER); ret = modem_cmd_send_nolock(&mctx.iface, &mctx.cmd_handler, NULL, 0U, send_buf, NULL, K_NO_WAIT); if (ret < 0) { goto exit; } /* set command handlers */ ret = modem_cmd_handler_update_cmds(&mdata.cmd_handler_data, handler_cmds, handler_cmds_len, true); if (ret < 0) { goto exit; } /* slight pause per spec so that @ prompt is received */ k_sleep(MDM_PROMPT_CMD_DELAY); mctx.iface.write(&mctx.iface, cert_data, cert_len); k_sem_reset(&mdata.sem_response); ret = k_sem_take(&mdata.sem_response, K_MSEC(1000)); if (ret == 0) { ret = modem_cmd_handler_get_error(&mdata.cmd_handler_data); } else if (ret == -EAGAIN) { ret = -ETIMEDOUT; } exit: /* unset handler commands and ignore any errors */ (void)modem_cmd_handler_update_cmds(&mdata.cmd_handler_data, NULL, 0U, false); k_sem_give(&mdata.cmd_handler_data.sem_tx_lock); return ret; } #endif /* * Modem Response Command Handlers */ /* Handler: OK */ MODEM_CMD_DEFINE(on_cmd_ok) { modem_cmd_handler_set_error(data, 0); k_sem_give(&mdata.sem_response); return 0; } /* Handler: @ */ MODEM_CMD_DEFINE(on_prompt) { k_sem_give(&mdata.sem_prompt); /* A direct cmd should return the number of byte processed. * Therefore, here we always return 1 */ return 1; } /* Handler: ERROR */ MODEM_CMD_DEFINE(on_cmd_error) { modem_cmd_handler_set_error(data, -EIO); k_sem_give(&mdata.sem_response); return 0; } /* Handler: +CME Error: <err>[0] */ MODEM_CMD_DEFINE(on_cmd_exterror) { /* TODO: map extended error codes to values */ modem_cmd_handler_set_error(data, -EIO); k_sem_give(&mdata.sem_response); return 0; } /* * Modem Info Command Handlers */ /* Handler: <manufacturer> */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_manufacturer) { size_t out_len; out_len = net_buf_linearize(mdata.mdm_manufacturer, sizeof(mdata.mdm_manufacturer) - 1, data->rx_buf, 0, len); mdata.mdm_manufacturer[out_len] = '\0'; LOG_INF("Manufacturer: %s", mdata.mdm_manufacturer); return 0; } /* Handler: <model> */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_model) { size_t out_len; out_len = net_buf_linearize(mdata.mdm_model, sizeof(mdata.mdm_model) - 1, data->rx_buf, 0, len); mdata.mdm_model[out_len] = '\0'; LOG_INF("Model: %s", mdata.mdm_model); #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_VARIANT) /* Set modem type */ if (strstr(mdata.mdm_model, "R4")) { mdata.mdm_variant = MDM_VARIANT_UBLOX_R4; } else { if (strstr(mdata.mdm_model, "U2")) { mdata.mdm_variant = MDM_VARIANT_UBLOX_U2; } } LOG_INF("Variant: %d", mdata.mdm_variant); #endif return 0; } /* Handler: <rev> */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_revision) { size_t out_len; out_len = net_buf_linearize(mdata.mdm_revision, sizeof(mdata.mdm_revision) - 1, data->rx_buf, 0, len); mdata.mdm_revision[out_len] = '\0'; LOG_INF("Revision: %s", mdata.mdm_revision); return 0; } /* Handler: <IMEI> */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_imei) { size_t out_len; out_len = net_buf_linearize(mdata.mdm_imei, sizeof(mdata.mdm_imei) - 1, data->rx_buf, 0, len); mdata.mdm_imei[out_len] = '\0'; LOG_INF("IMEI: %s", mdata.mdm_imei); return 0; } /* Handler: <IMSI> */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_imsi) { size_t out_len; out_len = net_buf_linearize(mdata.mdm_imsi, sizeof(mdata.mdm_imsi) - 1, data->rx_buf, 0, len); mdata.mdm_imsi[out_len] = '\0'; LOG_INF("IMSI: %s", mdata.mdm_imsi); #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_APN) /* set the APN automatically */ modem_detect_apn(mdata.mdm_imsi); #endif return 0; } #if !defined(CONFIG_MODEM_UBLOX_SARA_U2) /* * Handler: +CESQ: <rxlev>[0],<ber>[1],<rscp>[2],<ecn0>[3],<rsrq>[4],<rsrp>[5] */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_rssi_cesq) { int rsrp, rxlev; rsrp = ATOI(argv[5], 0, "rsrp"); rxlev = ATOI(argv[0], 0, "rxlev"); if (rsrp >= 0 && rsrp <= 97) { mdata.mdm_rssi = -140 + (rsrp - 1); LOG_INF("RSRP: %d", mdata.mdm_rssi); } else if (rxlev >= 0 && rxlev <= 63) { mdata.mdm_rssi = -110 + (rxlev - 1); LOG_INF("RSSI: %d", mdata.mdm_rssi); } else { mdata.mdm_rssi = -1000; LOG_INF("RSRP/RSSI not known"); } return 0; } #endif #if defined(CONFIG_MODEM_UBLOX_SARA_U2) \ || defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_VARIANT) /* Handler: +CSQ: <signal_power>[0],<qual>[1] */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_rssi_csq) { int rssi; rssi = ATOI(argv[0], 0, "signal_power"); if (rssi == 31) { mdata.mdm_rssi = -46; } else if (rssi >= 0 && rssi <= 31) { /* FIXME: This value depends on the RAT */ mdata.mdm_rssi = -110 + ((rssi * 2) + 1); } else { mdata.mdm_rssi = -1000; } LOG_INF("RSSI: %d", mdata.mdm_rssi); return 0; } #endif #if defined(CONFIG_MODEM_CELL_INFO) static int unquoted_atoi(const char *s, int base) { if (*s == '"') { s++; } return strtol(s, NULL, base); } /* * Handler: +COPS: <mode>[0],<format>[1],<oper>[2] */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_cops) { if (argc >= 3) { mctx.data_operator = unquoted_atoi(argv[2], 10); LOG_INF("operator: %u", mctx.data_operator); } return 0; } /* * Handler: +CEREG: <n>[0],<stat>[1],<tac>[2],<ci>[3],<AcT>[4] */ MODEM_CMD_DEFINE(on_cmd_atcmdinfo_cereg) { if (argc >= 4) { mctx.data_lac = unquoted_atoi(argv[2], 16); mctx.data_cellid = unquoted_atoi(argv[3], 16); LOG_INF("lac: %u, cellid: %u", mctx.data_lac, mctx.data_cellid); } return 0; } static const struct setup_cmd query_cellinfo_cmds[] = { SETUP_CMD_NOHANDLE("AT+CEREG=2"), SETUP_CMD("AT+CEREG?", "", on_cmd_atcmdinfo_cereg, 5U, ","), SETUP_CMD_NOHANDLE("AT+COPS=3,2"), SETUP_CMD("AT+COPS?", "", on_cmd_atcmdinfo_cops, 3U, ","), }; #endif /* CONFIG_MODEM_CELL_INFO */ /* * Modem Socket Command Handlers */ /* Handler: +USOCR: <socket_id>[0] */ MODEM_CMD_DEFINE(on_cmd_sockcreate) { struct modem_socket *sock = NULL; int id; /* look up new socket by special id */ sock = modem_socket_from_newid(&mdata.socket_config); if (sock) { id = ATOI(argv[0], -1, "socket_id"); /* on error give up modem socket */ if (modem_socket_id_assign(&mdata.socket_config, sock, id) < 0) { modem_socket_put(&mdata.socket_config, sock->sock_fd); } } /* don't give back semaphore -- OK to follow */ return 0; } /* Handler: +USO[WR|ST]: <socket_id>[0],<length>[1] */ MODEM_CMD_DEFINE(on_cmd_sockwrite) { mdata.sock_written = ATOI(argv[1], 0, "length"); LOG_DBG("bytes written: %d", mdata.sock_written); return 0; } #if defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS) /* Handler: +USECMNG: 0,<type>[0],<internal_name>[1],<md5_string>[2] */ MODEM_CMD_DEFINE(on_cmd_cert_write) { LOG_DBG("cert md5: %s", argv[2]); return 0; } #endif /* Common code for +USOR[D|F]: "<data>" */ static int on_cmd_sockread_common(int socket_id, struct modem_cmd_handler_data *data, int socket_data_length, uint16_t len) { struct modem_socket *sock = NULL; struct socket_read_data *sock_data; int ret; if (!len) { LOG_ERR("Short +USOR[D|F] value. Aborting!"); return -EAGAIN; } /* * make sure we still have buf data and next char in the buffer is a * quote. */ if (!data->rx_buf || *data->rx_buf->data != '\"') { LOG_ERR("Incorrect format! Ignoring data!"); return -EINVAL; } /* zero length */ if (socket_data_length <= 0) { LOG_ERR("Length problem (%d). Aborting!", socket_data_length); return -EAGAIN; } /* check to make sure we have all of the data (minus quotes) */ if ((net_buf_frags_len(data->rx_buf) - 2) < socket_data_length) { LOG_DBG("Not enough data -- wait!"); return -EAGAIN; } /* skip quote */ len--; net_buf_pull_u8(data->rx_buf); if (!data->rx_buf->len) { data->rx_buf = net_buf_frag_del(NULL, data->rx_buf); } sock = modem_socket_from_id(&mdata.socket_config, socket_id); if (!sock) { LOG_ERR("Socket not found! (%d)", socket_id); ret = -EINVAL; goto exit; } sock_data = (struct socket_read_data *)sock->data; if (!sock_data) { LOG_ERR("Socket data not found! Skip handling (%d)", socket_id); ret = -EINVAL; goto exit; } ret = net_buf_linearize(sock_data->recv_buf, sock_data->recv_buf_len, data->rx_buf, 0, (uint16_t)socket_data_length); data->rx_buf = net_buf_skip(data->rx_buf, ret); sock_data->recv_read_len = ret; if (ret != socket_data_length) { LOG_ERR("Total copied data is different then received data!" " copied:%d vs. received:%d", ret, socket_data_length); ret = -EINVAL; } exit: /* remove packet from list (ignore errors) */ (void)modem_socket_packet_size_update(&mdata.socket_config, sock, -socket_data_length); /* don't give back semaphore -- OK to follow */ return ret; } /* * Handler: +USORF: <socket_id>[0],<remote_ip_addr>[1],<remote_port>[2], * <length>[3],"<data>" */ MODEM_CMD_DEFINE(on_cmd_sockreadfrom) { /* TODO: handle remote_ip_addr */ return on_cmd_sockread_common(ATOI(argv[0], 0, "socket_id"), data, ATOI(argv[3], 0, "length"), len); } /* Handler: +USORD: <socket_id>[0],<length>[1],"<data>" */ MODEM_CMD_DEFINE(on_cmd_sockread) { return on_cmd_sockread_common(ATOI(argv[0], 0, "socket_id"), data, ATOI(argv[1], 0, "length"), len); } #if defined(CONFIG_DNS_RESOLVER) /* Handler: +UDNSRN: "<resolved_ip_address>"[0], "<resolved_ip_address>"[1] */ MODEM_CMD_DEFINE(on_cmd_dns) { /* chop off end quote */ argv[0][strlen(argv[0]) - 1] = '\0'; /* FIXME: Hard-code DNS on SARA-R4 to return IPv4 */ result_addr.sa_family = AF_INET; /* skip beginning quote when parsing */ (void)net_addr_pton(result.ai_family, &argv[0][1], &((struct sockaddr_in *)&result_addr)->sin_addr); return 0; } #endif /* * MODEM UNSOLICITED NOTIFICATION HANDLERS */ /* Handler: +UUSOCL: <socket_id>[0] */ MODEM_CMD_DEFINE(on_cmd_socknotifyclose) { struct modem_socket *sock; sock = modem_socket_from_id(&mdata.socket_config, ATOI(argv[0], 0, "socket_id")); if (sock) { sock->is_connected = false; } return 0; } /* Handler: +UUSOR[D|F]: <socket_id>[0],<length>[1] */ MODEM_CMD_DEFINE(on_cmd_socknotifydata) { int ret, socket_id, new_total; struct modem_socket *sock; socket_id = ATOI(argv[0], 0, "socket_id"); new_total = ATOI(argv[1], 0, "length"); sock = modem_socket_from_id(&mdata.socket_config, socket_id); if (!sock) { return 0; } ret = modem_socket_packet_size_update(&mdata.socket_config, sock, new_total); if (ret < 0) { LOG_ERR("socket_id:%d left_bytes:%d err: %d", socket_id, new_total, ret); } if (new_total > 0) { modem_socket_data_ready(&mdata.socket_config, sock); } return 0; } /* Handler: +CREG: <stat>[0] */ MODEM_CMD_DEFINE(on_cmd_socknotifycreg) { mdata.ev_creg = ATOI(argv[0], 0, "stat"); LOG_DBG("CREG:%d", mdata.ev_creg); return 0; } /* RX thread */ static void modem_rx(void *p1, void *p2, void *p3) { ARG_UNUSED(p1); ARG_UNUSED(p2); ARG_UNUSED(p3); while (true) { /* wait for incoming data */ modem_iface_uart_rx_wait(&mctx.iface, K_FOREVER); modem_cmd_handler_process(&mctx.cmd_handler, &mctx.iface); /* give up time if we have a solid stream of data */ k_yield(); } } static int pin_init(void) { LOG_INF("Setting Modem Pins"); #if DT_INST_NODE_HAS_PROP(0, mdm_reset_gpios) LOG_DBG("MDM_RESET_PIN -> NOT_ASSERTED"); gpio_pin_set_dt(&reset_gpio, MDM_RESET_NOT_ASSERTED); #endif LOG_DBG("MDM_POWER_PIN -> ENABLE"); gpio_pin_set_dt(&power_gpio, 1); k_sleep(K_SECONDS(4)); LOG_DBG("MDM_POWER_PIN -> DISABLE"); gpio_pin_set_dt(&power_gpio, 0); #if defined(CONFIG_MODEM_UBLOX_SARA_U2) k_sleep(K_SECONDS(1)); #else k_sleep(K_SECONDS(4)); #endif LOG_DBG("MDM_POWER_PIN -> ENABLE"); gpio_pin_set_dt(&power_gpio, 1); k_sleep(K_SECONDS(1)); /* make sure module is powered off */ #if DT_INST_NODE_HAS_PROP(0, mdm_vint_gpios) LOG_DBG("Waiting for MDM_VINT_PIN = 0"); while (gpio_pin_get_dt(&vint_gpio) > 0) { #if defined(CONFIG_MODEM_UBLOX_SARA_U2) /* try to power off again */ LOG_DBG("MDM_POWER_PIN -> DISABLE"); gpio_pin_set_dt(&power_gpio, 0); k_sleep(K_SECONDS(1)); LOG_DBG("MDM_POWER_PIN -> ENABLE"); gpio_pin_set_dt(&power_gpio, 1); #endif k_sleep(K_MSEC(100)); } #else k_sleep(K_SECONDS(8)); #endif LOG_DBG("MDM_POWER_PIN -> DISABLE"); unsigned int irq_lock_key = irq_lock(); gpio_pin_set_dt(&power_gpio, 0); #if defined(CONFIG_MODEM_UBLOX_SARA_U2) k_usleep(50); /* 50-80 microseconds */ #else k_sleep(K_SECONDS(1)); #endif gpio_pin_set_dt(&power_gpio, 1); irq_unlock(irq_lock_key); LOG_DBG("MDM_POWER_PIN -> ENABLE"); #if DT_INST_NODE_HAS_PROP(0, mdm_vint_gpios) LOG_DBG("Waiting for MDM_VINT_PIN = 1"); do { k_sleep(K_MSEC(100)); } while (gpio_pin_get_dt(&vint_gpio) == 0); #else k_sleep(K_SECONDS(10)); #endif gpio_pin_configure_dt(&power_gpio, GPIO_INPUT); LOG_INF("... Done!"); return 0; } #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_VARIANT) static void modem_rssi_query_work(struct k_work *work) { static const struct modem_cmd cmds[] = { MODEM_CMD("+CSQ: ", on_cmd_atcmdinfo_rssi_csq, 2U, ","), MODEM_CMD("+CESQ: ", on_cmd_atcmdinfo_rssi_cesq, 6U, ","), }; const char *send_cmd_u2 = "AT+CSQ"; const char *send_cmd_r4 = "AT+CESQ"; int ret; /* choose cmd according to variant */ const char *send_cmd = send_cmd_r4; if (mdata.mdm_variant == MDM_VARIANT_UBLOX_U2) { send_cmd = send_cmd_u2; } /* query modem RSSI */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, cmds, ARRAY_SIZE(cmds), send_cmd, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("AT+C[E]SQ ret:%d", ret); } #if defined(CONFIG_MODEM_CELL_INFO) /* query cell info */ ret = modem_cmd_handler_setup_cmds_nolock(&mctx.iface, &mctx.cmd_handler, query_cellinfo_cmds, ARRAY_SIZE(query_cellinfo_cmds), &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_WRN("modem query for cell info returned %d", ret); } #endif #if defined(CONFIG_MODEM_UBLOX_SARA_RSSI_WORK) /* re-start RSSI query work */ if (work) { k_work_reschedule_for_queue( &modem_workq, &mdata.rssi_query_work, K_SECONDS(CONFIG_MODEM_UBLOX_SARA_RSSI_WORK_PERIOD)); } #endif } #else static void modem_rssi_query_work(struct k_work *work) { static const struct modem_cmd cmd = #if defined(CONFIG_MODEM_UBLOX_SARA_U2) MODEM_CMD("+CSQ: ", on_cmd_atcmdinfo_rssi_csq, 2U, ","); static char *send_cmd = "AT+CSQ"; #else MODEM_CMD("+CESQ: ", on_cmd_atcmdinfo_rssi_cesq, 6U, ","); static char *send_cmd = "AT+CESQ"; #endif int ret; /* query modem RSSI */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, &cmd, 1U, send_cmd, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("AT+C[E]SQ ret:%d", ret); } #if defined(CONFIG_MODEM_CELL_INFO) /* query cell info */ ret = modem_cmd_handler_setup_cmds_nolock(&mctx.iface, &mctx.cmd_handler, query_cellinfo_cmds, ARRAY_SIZE(query_cellinfo_cmds), &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_WRN("modem query for cell info returned %d", ret); } #endif #if defined(CONFIG_MODEM_UBLOX_SARA_RSSI_WORK) /* re-start RSSI query work */ if (work) { k_work_reschedule_for_queue( &modem_workq, &mdata.rssi_query_work, K_SECONDS(CONFIG_MODEM_UBLOX_SARA_RSSI_WORK_PERIOD)); } #endif } #endif static void modem_reset(void) { int ret = 0, retry_count = 0, counter = 0; static const struct setup_cmd setup_cmds[] = { /* turn off echo */ SETUP_CMD_NOHANDLE("ATE0"), /* stop functionality */ SETUP_CMD_NOHANDLE("AT+CFUN=0"), /* extended error numbers */ SETUP_CMD_NOHANDLE("AT+CMEE=1"), #if defined(CONFIG_BOARD_PARTICLE_BORON) /* use external SIM */ SETUP_CMD_NOHANDLE("AT+UGPIOC=23,0,0"), #endif #if defined(CONFIG_MODEM_UBLOX_SARA_R4_NET_STATUS_PIN) /* enable the network status indication */ SETUP_CMD_NOHANDLE("AT+UGPIOC=" STRINGIFY(CONFIG_MODEM_UBLOX_SARA_R4_NET_STATUS_PIN) ",2"), #endif /* UNC messages for registration */ SETUP_CMD_NOHANDLE("AT+CREG=1"), /* query modem info */ SETUP_CMD("AT+CGMI", "", on_cmd_atcmdinfo_manufacturer, 0U, ""), SETUP_CMD("AT+CGMM", "", on_cmd_atcmdinfo_model, 0U, ""), SETUP_CMD("AT+CGMR", "", on_cmd_atcmdinfo_revision, 0U, ""), SETUP_CMD("AT+CGSN", "", on_cmd_atcmdinfo_imei, 0U, ""), SETUP_CMD("AT+CIMI", "", on_cmd_atcmdinfo_imsi, 0U, ""), #if !defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_APN) /* setup PDP context definition */ SETUP_CMD_NOHANDLE("AT+CGDCONT=1,\"IP\",\"" CONFIG_MODEM_UBLOX_SARA_R4_APN "\""), /* start functionality */ SETUP_CMD_NOHANDLE("AT+CFUN=1"), #endif }; #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_VARIANT) static const struct setup_cmd post_setup_cmds_u2[] = { #if !defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_APN) /* set the APN */ SETUP_CMD_NOHANDLE("AT+UPSD=0,1,\"" CONFIG_MODEM_UBLOX_SARA_R4_APN "\""), #endif /* set dynamic IP */ SETUP_CMD_NOHANDLE("AT+UPSD=0,7,\"0.0.0.0\""), /* activate the GPRS connection */ SETUP_CMD_NOHANDLE("AT+UPSDA=0,3"), }; #endif static const struct setup_cmd post_setup_cmds[] = { #if defined(CONFIG_MODEM_UBLOX_SARA_U2) /* set the APN */ SETUP_CMD_NOHANDLE("AT+UPSD=0,1,\"" CONFIG_MODEM_UBLOX_SARA_R4_APN "\""), /* set dynamic IP */ SETUP_CMD_NOHANDLE("AT+UPSD=0,7,\"0.0.0.0\""), /* activate the GPRS connection */ SETUP_CMD_NOHANDLE("AT+UPSDA=0,3"), #else /* activate the PDP context */ SETUP_CMD_NOHANDLE("AT+CGACT=1,1"), #endif }; restart: #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_APN) mdata.mdm_apn[0] = '\0'; strncat(mdata.mdm_apn, CONFIG_MODEM_UBLOX_SARA_R4_APN, sizeof(mdata.mdm_apn)-1); #endif #if defined(CONFIG_MODEM_UBLOX_SARA_RSSI_WORK) /* stop RSSI delay work */ k_work_cancel_delayable(&mdata.rssi_query_work); #endif pin_init(); LOG_INF("Waiting for modem to respond"); /* Give the modem a while to start responding to simple 'AT' commands. * Also wait for CSPS=1 or RRCSTATE=1 notification */ ret = -1; while (counter++ < 50 && ret < 0) { k_sleep(K_SECONDS(2)); ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0, "AT", &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0 && ret != -ETIMEDOUT) { break; } } if (ret < 0) { LOG_ERR("MODEM WAIT LOOP ERROR: %d", ret); goto error; } ret = modem_cmd_handler_setup_cmds(&mctx.iface, &mctx.cmd_handler, setup_cmds, ARRAY_SIZE(setup_cmds), &mdata.sem_response, MDM_REGISTRATION_TIMEOUT); if (ret < 0) { goto error; } #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_APN) /* autodetect APN from IMSI */ char cmd[sizeof("AT+CGDCONT=1,\"IP\",\"\"")+MDM_APN_LENGTH]; snprintk(cmd, sizeof(cmd), "AT+CGDCONT=1,\"IP\",\"%s\"", mdata.mdm_apn); /* setup PDP context definition */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0, (const char *)cmd, &mdata.sem_response, MDM_REGISTRATION_TIMEOUT); ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0, "AT+CFUN=1", &mdata.sem_response, MDM_REGISTRATION_TIMEOUT); #endif if (strlen(CONFIG_MODEM_UBLOX_SARA_R4_MANUAL_MCCMNO) > 0) { /* use manual MCC/MNO entry */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0, "AT+COPS=1,2,\"" CONFIG_MODEM_UBLOX_SARA_R4_MANUAL_MCCMNO "\"", &mdata.sem_response, MDM_REGISTRATION_TIMEOUT); } else { /* register operator automatically */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0, "AT+COPS=0,0", &mdata.sem_response, MDM_REGISTRATION_TIMEOUT); } if (ret < 0) { LOG_ERR("AT+COPS ret:%d", ret); goto error; } LOG_INF("Waiting for network"); /* * TODO: A lot of this should be setup as a 3GPP module to handle * basic connection to the network commands / polling */ /* wait for +CREG: 1(normal) or 5(roaming) */ counter = 0; while (counter++ < 40 && mdata.ev_creg != 1 && mdata.ev_creg != 5) { if (counter == 20) { LOG_WRN("Force restart of RF functionality"); /* Disable RF temporarily */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0, "AT+CFUN=0", &mdata.sem_response, MDM_CMD_TIMEOUT); k_sleep(K_SECONDS(1)); /* Enable RF */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0, "AT+CFUN=1", &mdata.sem_response, MDM_CMD_TIMEOUT); } k_sleep(K_SECONDS(1)); } /* query modem RSSI */ modem_rssi_query_work(NULL); k_sleep(MDM_WAIT_FOR_RSSI_DELAY); counter = 0; /* wait for RSSI < 0 and > -1000 */ while (counter++ < MDM_WAIT_FOR_RSSI_COUNT && (mdata.mdm_rssi >= 0 || mdata.mdm_rssi <= -1000)) { modem_rssi_query_work(NULL); k_sleep(MDM_WAIT_FOR_RSSI_DELAY); } if (mdata.mdm_rssi >= 0 || mdata.mdm_rssi <= -1000) { retry_count++; if (retry_count >= MDM_NETWORK_RETRY_COUNT) { LOG_ERR("Failed network init. Too many attempts!"); ret = -ENETUNREACH; goto error; } LOG_ERR("Failed network init. Restarting process."); goto restart; } #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_VARIANT) if (mdata.mdm_variant == MDM_VARIANT_UBLOX_U2) { #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_APN) /* setup PDP context definition */ char cmd[sizeof("AT+UPSD=0,1,\"%s\"")+MDM_APN_LENGTH]; snprintk(cmd, sizeof(cmd), "AT+UPSD=0,1,\"%s\"", mdata.mdm_apn); ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0, (const char *)cmd, &mdata.sem_response, MDM_REGISTRATION_TIMEOUT); #endif ret = modem_cmd_handler_setup_cmds(&mctx.iface, &mctx.cmd_handler, post_setup_cmds_u2, ARRAY_SIZE(post_setup_cmds_u2), &mdata.sem_response, MDM_REGISTRATION_TIMEOUT); } else { #endif ret = modem_cmd_handler_setup_cmds(&mctx.iface, &mctx.cmd_handler, post_setup_cmds, ARRAY_SIZE(post_setup_cmds), &mdata.sem_response, MDM_REGISTRATION_TIMEOUT); #if defined(CONFIG_MODEM_UBLOX_SARA_AUTODETECT_VARIANT) } #endif if (ret < 0) { goto error; } LOG_INF("Network is ready."); #if defined(CONFIG_MODEM_UBLOX_SARA_RSSI_WORK) /* start RSSI query */ k_work_reschedule_for_queue( &modem_workq, &mdata.rssi_query_work, K_SECONDS(CONFIG_MODEM_UBLOX_SARA_RSSI_WORK_PERIOD)); #endif error: return; } /* * generic socket creation function * which can be called in bind() or connect() */ static int create_socket(struct modem_socket *sock, const struct sockaddr *addr) { int ret; static const struct modem_cmd cmd = MODEM_CMD("+USOCR: ", on_cmd_sockcreate, 1U, ""); char buf[sizeof("AT+USOCR=#,#####\r")]; uint16_t local_port = 0U, proto = 6U; if (addr) { if (addr->sa_family == AF_INET6) { local_port = ntohs(net_sin6(addr)->sin6_port); } else if (addr->sa_family == AF_INET) { local_port = ntohs(net_sin(addr)->sin_port); } } if (sock->ip_proto == IPPROTO_UDP) { proto = 17U; } if (local_port > 0U) { snprintk(buf, sizeof(buf), "AT+USOCR=%d,%u", proto, local_port); } else { snprintk(buf, sizeof(buf), "AT+USOCR=%d", proto); } /* create socket */ ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, &cmd, 1U, buf, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { goto error; } if (sock->ip_proto == IPPROTO_TLS_1_2) { char atbuf[sizeof("AT+USECPRF=#,#,#######\r")]; /* Enable socket security */ snprintk(atbuf, sizeof(atbuf), "AT+USOSEC=%d,1,%d", sock->id, sock->id); ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, atbuf, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { goto error; } /* Reset the security profile */ snprintk(atbuf, sizeof(atbuf), "AT+USECPRF=%d", sock->id); ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, atbuf, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { goto error; } /* Validate server cert against the CA. */ snprintk(atbuf, sizeof(atbuf), "AT+USECPRF=%d,0,1", sock->id); ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, atbuf, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { goto error; } /* Use TLSv1.2 only */ snprintk(atbuf, sizeof(atbuf), "AT+USECPRF=%d,1,3", sock->id); ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, atbuf, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { goto error; } /* Set root CA filename */ snprintk(atbuf, sizeof(atbuf), "AT+USECPRF=%d,3,\"ca\"", sock->id); ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, atbuf, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { goto error; } } errno = 0; return 0; error: LOG_ERR("%s ret:%d", buf, ret); modem_socket_put(&mdata.socket_config, sock->sock_fd); errno = -ret; return -1; } /* * Socket Offload OPS */ static const struct socket_op_vtable offload_socket_fd_op_vtable; static int offload_socket(int family, int type, int proto) { int ret; /* defer modem's socket create call to bind() */ ret = modem_socket_get(&mdata.socket_config, family, type, proto); if (ret < 0) { errno = -ret; return -1; } errno = 0; return ret; } static int offload_close(void *obj) { struct modem_socket *sock = (struct modem_socket *)obj; char buf[sizeof("AT+USOCL=#\r")]; int ret; /* make sure socket is allocated and assigned an id */ if (modem_socket_id_is_assigned(&mdata.socket_config, sock) == false) { return 0; } if (sock->is_connected || sock->ip_proto == IPPROTO_UDP) { snprintk(buf, sizeof(buf), "AT+USOCL=%d", sock->id); ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, buf, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { LOG_ERR("%s ret:%d", buf, ret); } } modem_socket_put(&mdata.socket_config, sock->sock_fd); return 0; } static int offload_bind(void *obj, const struct sockaddr *addr, socklen_t addrlen) { struct modem_socket *sock = (struct modem_socket *)obj; /* save bind address information */ memcpy(&sock->src, addr, sizeof(*addr)); /* make sure we've created the socket */ if (modem_socket_is_allocated(&mdata.socket_config, sock) == true) { if (create_socket(sock, addr) < 0) { return -1; } } return 0; } static int offload_connect(void *obj, const struct sockaddr *addr, socklen_t addrlen) { struct modem_socket *sock = (struct modem_socket *)obj; int ret; char buf[sizeof("AT+USOCO=###,!####.####.####.####.####.####.####.####!,#####,#\r")]; uint16_t dst_port = 0U; char ip_str[NET_IPV6_ADDR_LEN]; if (!addr) { errno = EINVAL; return -1; } /* make sure socket has been allocated */ if (modem_socket_is_allocated(&mdata.socket_config, sock) == false) { LOG_ERR("Invalid socket_id(%d) from fd:%d", sock->id, sock->sock_fd); errno = EINVAL; return -1; } /* make sure we've created the socket */ if (modem_socket_id_is_assigned(&mdata.socket_config, sock) == false) { if (create_socket(sock, NULL) < 0) { return -1; } } memcpy(&sock->dst, addr, sizeof(*addr)); if (addr->sa_family == AF_INET6) { dst_port = ntohs(net_sin6(addr)->sin6_port); } else if (addr->sa_family == AF_INET) { dst_port = ntohs(net_sin(addr)->sin_port); } else { errno = EAFNOSUPPORT; return -1; } /* skip socket connect if UDP */ if (sock->ip_proto == IPPROTO_UDP) { errno = 0; return 0; } ret = modem_context_sprint_ip_addr(addr, ip_str, sizeof(ip_str)); if (ret != 0) { errno = -ret; LOG_ERR("Error formatting IP string %d", ret); return -1; } snprintk(buf, sizeof(buf), "AT+USOCO=%d,\"%s\",%d", sock->id, ip_str, dst_port); ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, NULL, 0U, buf, &mdata.sem_response, MDM_CMD_CONN_TIMEOUT); if (ret < 0) { LOG_ERR("%s ret:%d", buf, ret); errno = -ret; return -1; } sock->is_connected = true; errno = 0; return 0; } static ssize_t offload_recvfrom(void *obj, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlen) { struct modem_socket *sock = (struct modem_socket *)obj; int ret, next_packet_size; static const struct modem_cmd cmd[] = { MODEM_CMD("+USORF: ", on_cmd_sockreadfrom, 4U, ","), MODEM_CMD("+USORD: ", on_cmd_sockread, 2U, ","), }; char sendbuf[sizeof("AT+USORF=#,#####\r")]; struct socket_read_data sock_data; if (!buf || len == 0) { errno = EINVAL; return -1; } if (flags & ZSOCK_MSG_PEEK) { errno = ENOTSUP; return -1; } next_packet_size = modem_socket_next_packet_size(&mdata.socket_config, sock); if (!next_packet_size) { if (flags & ZSOCK_MSG_DONTWAIT) { errno = EAGAIN; return -1; } if (!sock->is_connected && sock->ip_proto != IPPROTO_UDP) { errno = 0; return 0; } modem_socket_wait_data(&mdata.socket_config, sock); next_packet_size = modem_socket_next_packet_size( &mdata.socket_config, sock); } /* * Binary and ASCII mode allows sending MDM_MAX_DATA_LENGTH bytes to * the socket in one command */ if (next_packet_size > MDM_MAX_DATA_LENGTH) { next_packet_size = MDM_MAX_DATA_LENGTH; } snprintk(sendbuf, sizeof(sendbuf), "AT+USO%s=%d,%zd", sock->ip_proto == IPPROTO_UDP ? "RF" : "RD", sock->id, len < next_packet_size ? len : next_packet_size); /* socket read settings */ (void)memset(&sock_data, 0, sizeof(sock_data)); sock_data.recv_buf = buf; sock_data.recv_buf_len = len; sock_data.recv_addr = from; sock->data = &sock_data; ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, cmd, ARRAY_SIZE(cmd), sendbuf, &mdata.sem_response, MDM_CMD_TIMEOUT); if (ret < 0) { errno = -ret; ret = -1; goto exit; } /* HACK: use dst address as from */ if (from && fromlen) { *fromlen = sizeof(sock->dst); memcpy(from, &sock->dst, *fromlen); } /* return length of received data */ errno = 0; ret = sock_data.recv_read_len; exit: /* clear socket data */ sock->data = NULL; return ret; } static ssize_t offload_sendto(void *obj, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen) { struct iovec msg_iov = { .iov_base = (void *)buf, .iov_len = len, }; struct msghdr msg = { .msg_iovlen = 1, .msg_name = (struct sockaddr *)to, .msg_namelen = tolen, .msg_iov = &msg_iov, }; int ret = send_socket_data(obj, &msg, MDM_CMD_TIMEOUT); if (ret < 0) { errno = -ret; return -1; } errno = 0; return ret; } static int offload_ioctl(void *obj, unsigned int request, va_list args) { switch (request) { case ZFD_IOCTL_POLL_PREPARE: { struct zsock_pollfd *pfd; struct k_poll_event **pev; struct k_poll_event *pev_end; pfd = va_arg(args, struct zsock_pollfd *); pev = va_arg(args, struct k_poll_event **); pev_end = va_arg(args, struct k_poll_event *); return modem_socket_poll_prepare(&mdata.socket_config, obj, pfd, pev, pev_end); } case ZFD_IOCTL_POLL_UPDATE: { struct zsock_pollfd *pfd; struct k_poll_event **pev; pfd = va_arg(args, struct zsock_pollfd *); pev = va_arg(args, struct k_poll_event **); return modem_socket_poll_update(obj, pfd, pev); } case F_GETFL: return 0; default: errno = EINVAL; return -1; } } static ssize_t offload_read(void *obj, void *buffer, size_t count) { return offload_recvfrom(obj, buffer, count, 0, NULL, 0); } static ssize_t offload_write(void *obj, const void *buffer, size_t count) { return offload_sendto(obj, buffer, count, 0, NULL, 0); } static ssize_t offload_sendmsg(void *obj, const struct msghdr *msg, int flags) { ssize_t sent = 0; int bkp_iovec_idx; struct iovec bkp_iovec = {0}; struct msghdr crafted_msg = { .msg_name = msg->msg_name, .msg_namelen = msg->msg_namelen, }; size_t full_len = 0; int ret; /* Compute the full length to be send and check for invalid values */ for (int i = 0; i < msg->msg_iovlen; i++) { if (!msg->msg_iov[i].iov_base || msg->msg_iov[i].iov_len == 0) { errno = EINVAL; return -1; } full_len += msg->msg_iov[i].iov_len; } LOG_DBG("msg_iovlen:%zd flags:%d, full_len:%zd", msg->msg_iovlen, flags, full_len); while (full_len > sent) { int removed = 0; int i = 0; crafted_msg.msg_iovlen = msg->msg_iovlen; crafted_msg.msg_iov = &msg->msg_iov[0]; bkp_iovec_idx = -1; /* Iterate on iovec to remove the bytes already sent */ while (removed < sent) { int to_removed = sent - removed; if (to_removed >= msg->msg_iov[i].iov_len) { crafted_msg.msg_iovlen -= 1; crafted_msg.msg_iov = &msg->msg_iov[i + 1]; removed += msg->msg_iov[i].iov_len; } else { /* Backup msg->msg_iov[i] before "removing" * starting bytes already send. */ bkp_iovec_idx = i; bkp_iovec.iov_len = msg->msg_iov[i].iov_len; bkp_iovec.iov_base = msg->msg_iov[i].iov_base; /* Update msg->msg_iov[i] to "remove" * starting bytes already send. */ msg->msg_iov[i].iov_len -= to_removed; msg->msg_iov[i].iov_base = &(((uint8_t *)msg->msg_iov[i].iov_base)[to_removed]); removed += to_removed; } i++; } ret = send_socket_data(obj, &crafted_msg, MDM_CMD_TIMEOUT); /* Restore backup iovec when necessary */ if (bkp_iovec_idx != -1) { msg->msg_iov[bkp_iovec_idx].iov_len = bkp_iovec.iov_len; msg->msg_iov[bkp_iovec_idx].iov_base = bkp_iovec.iov_base; } /* Handle send_socket_data() returned value */ if (ret < 0) { errno = -ret; return -1; } sent += ret; } return (ssize_t)sent; } #if defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS) static int map_credentials(struct modem_socket *sock, const void *optval, socklen_t optlen) { sec_tag_t *sec_tags = (sec_tag_t *)optval; int ret = 0; int tags_len; sec_tag_t tag; int id; int i; struct tls_credential *cert; if ((optlen % sizeof(sec_tag_t)) != 0 || (optlen == 0)) { return -EINVAL; } tags_len = optlen / sizeof(sec_tag_t); /* For each tag, retrieve the credentials value and type: */ for (i = 0; i < tags_len; i++) { tag = sec_tags[i]; cert = credential_next_get(tag, NULL); while (cert != NULL) { switch (cert->type) { case TLS_CREDENTIAL_CA_CERTIFICATE: id = 0; break; case TLS_CREDENTIAL_NONE: case TLS_CREDENTIAL_PSK: case TLS_CREDENTIAL_PSK_ID: default: /* Not handled */ return -EINVAL; } struct modem_cmd cmd[] = { MODEM_CMD("+USECMNG: ", on_cmd_cert_write, 3U, ","), }; ret = send_cert(sock, cmd, 1, cert->buf, cert->len, id); if (ret < 0) { return ret; } cert = credential_next_get(tag, cert); } } return 0; } #else static int map_credentials(struct modem_socket *sock, const void *optval, socklen_t optlen) { return -EINVAL; } #endif static int offload_setsockopt(void *obj, int level, int optname, const void *optval, socklen_t optlen) { struct modem_socket *sock = (struct modem_socket *)obj; int ret; if (IS_ENABLED(CONFIG_NET_SOCKETS_SOCKOPT_TLS) && level == SOL_TLS) { switch (optname) { case TLS_SEC_TAG_LIST: ret = map_credentials(sock, optval, optlen); break; case TLS_HOSTNAME: LOG_WRN("TLS_HOSTNAME option is not supported"); return -EINVAL; case TLS_PEER_VERIFY: if (*(uint32_t *)optval != TLS_PEER_VERIFY_REQUIRED) { LOG_WRN("Disabling peer verification is not supported"); return -EINVAL; } ret = 0; break; default: return -EINVAL; } } else { return -EINVAL; } return ret; } static const struct socket_op_vtable offload_socket_fd_op_vtable = { .fd_vtable = { .read = offload_read, .write = offload_write, .close = offload_close, .ioctl = offload_ioctl, }, .bind = offload_bind, .connect = offload_connect, .sendto = offload_sendto, .recvfrom = offload_recvfrom, .listen = NULL, .accept = NULL, .sendmsg = offload_sendmsg, .getsockopt = NULL, .setsockopt = offload_setsockopt, }; static bool offload_is_supported(int family, int type, int proto) { if (family != AF_INET && family != AF_INET6) { return false; } if (type != SOCK_DGRAM && type != SOCK_STREAM) { return false; } if (proto != IPPROTO_TCP && proto != IPPROTO_UDP && proto != IPPROTO_TLS_1_2) { return false; } return true; } NET_SOCKET_OFFLOAD_REGISTER(ublox_sara_r4, CONFIG_NET_SOCKETS_OFFLOAD_PRIORITY, AF_UNSPEC, offload_is_supported, offload_socket); #if defined(CONFIG_DNS_RESOLVER) /* TODO: This is a bare-bones implementation of DNS handling * We ignore most of the hints like ai_family, ai_protocol and ai_socktype. * Later, we can add additional handling if it makes sense. */ static int offload_getaddrinfo(const char *node, const char *service, const struct zsock_addrinfo *hints, struct zsock_addrinfo **res) { static const struct modem_cmd cmd = MODEM_CMD("+UDNSRN: ", on_cmd_dns, 1U, ","); uint32_t port = 0U; int ret; /* DNS command + 128 bytes for domain name parameter */ char sendbuf[sizeof("AT+UDNSRN=#,'[]'\r") + 128]; /* init result */ (void)memset(&result, 0, sizeof(result)); (void)memset(&result_addr, 0, sizeof(result_addr)); /* FIXME: Hard-code DNS to return only IPv4 */ result.ai_family = AF_INET; result_addr.sa_family = AF_INET; result.ai_addr = &result_addr; result.ai_addrlen = sizeof(result_addr); result.ai_canonname = result_canonname; result_canonname[0] = '\0'; if (service) { port = ATOI(service, 0U, "port"); if (port < 1 || port > USHRT_MAX) { return DNS_EAI_SERVICE; } } if (port > 0U) { /* FIXME: DNS is hard-coded to return only IPv4 */ if (result.ai_family == AF_INET) { net_sin(&result_addr)->sin_port = htons(port); } } /* check to see if node is an IP address */ if (net_addr_pton(result.ai_family, node, &((struct sockaddr_in *)&result_addr)->sin_addr) == 0) { *res = &result; return 0; } /* user flagged node as numeric host, but we failed net_addr_pton */ if (hints && hints->ai_flags & AI_NUMERICHOST) { return DNS_EAI_NONAME; } snprintk(sendbuf, sizeof(sendbuf), "AT+UDNSRN=0,\"%s\"", node); ret = modem_cmd_send(&mctx.iface, &mctx.cmd_handler, &cmd, 1U, sendbuf, &mdata.sem_response, MDM_DNS_TIMEOUT); if (ret < 0) { return ret; } LOG_DBG("DNS RESULT: %s", net_addr_ntop(result.ai_family, &net_sin(&result_addr)->sin_addr, sendbuf, NET_IPV4_ADDR_LEN)); *res = (struct zsock_addrinfo *)&result; return 0; } static void offload_freeaddrinfo(struct zsock_addrinfo *res) { /* using static result from offload_getaddrinfo() -- no need to free */ ARG_UNUSED(res); } static const struct socket_dns_offload offload_dns_ops = { .getaddrinfo = offload_getaddrinfo, .freeaddrinfo = offload_freeaddrinfo, }; #endif static int net_offload_dummy_get(sa_family_t family, enum net_sock_type type, enum net_ip_protocol ip_proto, struct net_context **context) { LOG_ERR("CONFIG_NET_SOCKETS_OFFLOAD must be enabled for this driver"); return -ENOTSUP; } /* placeholders, until Zephyr IP stack updated to handle a NULL net_offload */ static struct net_offload modem_net_offload = { .get = net_offload_dummy_get, }; #define HASH_MULTIPLIER 37 static uint32_t hash32(char *str, int len) { uint32_t h = 0; int i; for (i = 0; i < len; ++i) { h = (h * HASH_MULTIPLIER) + str[i]; } return h; } static inline uint8_t *modem_get_mac(const struct device *dev) { struct modem_data *data = dev->data; uint32_t hash_value; data->mac_addr[0] = 0x00; data->mac_addr[1] = 0x10; /* use IMEI for mac_addr */ hash_value = hash32(mdata.mdm_imei, strlen(mdata.mdm_imei)); UNALIGNED_PUT(hash_value, (uint32_t *)(data->mac_addr + 2)); return data->mac_addr; } static int offload_socket(int family, int type, int proto); static void modem_net_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct modem_data *data = dev->data; /* Direct socket offload used instead of net offload: */ iface->if_dev->offload = &modem_net_offload; net_if_set_link_addr(iface, modem_get_mac(dev), sizeof(data->mac_addr), NET_LINK_ETHERNET); data->net_iface = iface; #ifdef CONFIG_DNS_RESOLVER socket_offload_dns_register(&offload_dns_ops); #endif net_if_socket_offload_set(iface, offload_socket); } static struct offloaded_if_api api_funcs = { .iface_api.init = modem_net_iface_init, }; static const struct modem_cmd response_cmds[] = { MODEM_CMD("OK", on_cmd_ok, 0U, ""), /* 3GPP */ MODEM_CMD("ERROR", on_cmd_error, 0U, ""), /* 3GPP */ MODEM_CMD("+CME ERROR: ", on_cmd_exterror, 1U, ""), MODEM_CMD_DIRECT("@", on_prompt), }; static const struct modem_cmd unsol_cmds[] = { MODEM_CMD("+UUSOCL: ", on_cmd_socknotifyclose, 1U, ""), MODEM_CMD("+UUSORD: ", on_cmd_socknotifydata, 2U, ","), MODEM_CMD("+UUSORF: ", on_cmd_socknotifydata, 2U, ","), MODEM_CMD("+CREG: ", on_cmd_socknotifycreg, 1U, ""), }; static int modem_init(const struct device *dev) { int ret = 0; ARG_UNUSED(dev); k_sem_init(&mdata.sem_response, 0, 1); k_sem_init(&mdata.sem_prompt, 0, 1); #if defined(CONFIG_MODEM_UBLOX_SARA_RSSI_WORK) /* initialize the work queue */ k_work_queue_start(&modem_workq, modem_workq_stack, K_KERNEL_STACK_SIZEOF(modem_workq_stack), K_PRIO_COOP(7), NULL); #endif /* socket config */ ret = modem_socket_init(&mdata.socket_config, &mdata.sockets[0], ARRAY_SIZE(mdata.sockets), MDM_BASE_SOCKET_NUM, false, &offload_socket_fd_op_vtable); if (ret < 0) { goto error; } /* cmd handler */ const struct modem_cmd_handler_config cmd_handler_config = { .match_buf = &mdata.cmd_match_buf[0], .match_buf_len = sizeof(mdata.cmd_match_buf), .buf_pool = &mdm_recv_pool, .alloc_timeout = K_NO_WAIT, .eol = "\r", .user_data = NULL, .response_cmds = response_cmds, .response_cmds_len = ARRAY_SIZE(response_cmds), .unsol_cmds = unsol_cmds, .unsol_cmds_len = ARRAY_SIZE(unsol_cmds), }; ret = modem_cmd_handler_init(&mctx.cmd_handler, &mdata.cmd_handler_data, &cmd_handler_config); if (ret < 0) { goto error; } /* modem interface */ const struct modem_iface_uart_config uart_config = { .rx_rb_buf = &mdata.iface_rb_buf[0], .rx_rb_buf_len = sizeof(mdata.iface_rb_buf), .dev = MDM_UART_DEV, .hw_flow_control = DT_PROP(MDM_UART_NODE, hw_flow_control), }; ret = modem_iface_uart_init(&mctx.iface, &mdata.iface_data, &uart_config); if (ret < 0) { goto error; } /* modem data storage */ mctx.data_manufacturer = mdata.mdm_manufacturer; mctx.data_model = mdata.mdm_model; mctx.data_revision = mdata.mdm_revision; mctx.data_imei = mdata.mdm_imei; mctx.data_rssi = &mdata.mdm_rssi; /* pin setup */ ret = gpio_pin_configure_dt(&power_gpio, GPIO_OUTPUT); if (ret < 0) { LOG_ERR("Failed to configure %s pin", "power"); goto error; } #if DT_INST_NODE_HAS_PROP(0, mdm_reset_gpios) ret = gpio_pin_configure_dt(&reset_gpio, GPIO_OUTPUT); if (ret < 0) { LOG_ERR("Failed to configure %s pin", "reset"); goto error; } #endif #if DT_INST_NODE_HAS_PROP(0, mdm_vint_gpios) ret = gpio_pin_configure_dt(&vint_gpio, GPIO_INPUT); if (ret < 0) { LOG_ERR("Failed to configure %s pin", "vint"); goto error; } #endif mctx.driver_data = &mdata; ret = modem_context_register(&mctx); if (ret < 0) { LOG_ERR("Error registering modem context: %d", ret); goto error; } /* start RX thread */ k_thread_create(&modem_rx_thread, modem_rx_stack, K_KERNEL_STACK_SIZEOF(modem_rx_stack), modem_rx, NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT); #if defined(CONFIG_MODEM_UBLOX_SARA_RSSI_WORK) /* init RSSI query */ k_work_init_delayable(&mdata.rssi_query_work, modem_rssi_query_work); #endif modem_reset(); error: return ret; } NET_DEVICE_DT_INST_OFFLOAD_DEFINE(0, modem_init, NULL, &mdata, NULL, CONFIG_MODEM_UBLOX_SARA_R4_INIT_PRIORITY, &api_funcs, MDM_MAX_DATA_LENGTH); ```
/content/code_sandbox/drivers/modem/ublox-sara-r4.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
17,086
```unknown # Modem configuration options menuconfig MODEM bool "Modem drivers" help Enable config options for modem drivers. if MODEM module = MODEM module-str = modem source "subsys/logging/Kconfig.template.log_config" config MODEM_RECEIVER bool "Modem receiver helper driver" depends on SERIAL_SUPPORT_INTERRUPT select UART_INTERRUPT_DRIVEN select RING_BUFFER help This driver allows modem drivers to communicate over UART with custom defined protocols. Driver doesn't inspect received data and all aspects of received protocol data are handled by application via work method provided. This driver differs from the pipe UART driver in that callbacks are executed in a different work queue and data is passed around in k_pipe structures. config MODEM_RECEIVER_MAX_CONTEXTS int "Maximum number of modem receiver contexts" depends on MODEM_RECEIVER range 1 10 default 1 help Maximum number of modem receiver contexts to handle. For most purposes this should stay at 1. config MODEM_CONTEXT bool "Modem context helper driver [EXPERIMENTAL]" select EXPERIMENTAL help This driver allows modem drivers to communicate with an interface using custom defined protocols. Driver doesn't inspect received data and all aspects of received protocol data are handled by application work method provided. This driver combines abstractions for: modem interface, command handler, pin config and socket handling each of which will need to be configured. if MODEM_CONTEXT config MODEM_CONTEXT_MAX_NUM int "Maximum number of modem contexts" default 1 help Maximum number of modem contexts to handle. For most purposes this should stay at 1. config MODEM_CONTEXT_VERBOSE_DEBUG bool "Verbose debug output in the modem context" help Enabling this setting will turn on VERY heavy debugging from the modem context helper. Do NOT leave on for production. config MODEM_IFACE_UART bool "UART-based modem interface" select RING_BUFFER help To configure this layer for use, create a modem_iface_uart_data object and pass it's reference to modem_iface_uart_init() along with the modem_iface reference from your modem_context object and the UART device name. if MODEM_IFACE_UART choice MODEM_IFACE_UART_BACKEND prompt "UART backend to use for modem interface" default MODEM_IFACE_UART_INTERRUPT config MODEM_IFACE_UART_INTERRUPT bool "UART-based modem interface using interrupt API" depends on SERIAL_SUPPORT_INTERRUPT select UART_INTERRUPT_DRIVEN config MODEM_IFACE_UART_ASYNC bool "UART-based modem interface using async API" depends on SERIAL_SUPPORT_ASYNC select UART_ASYNC_API endchoice if MODEM_IFACE_UART_ASYNC config MODEM_IFACE_UART_ASYNC_RX_BUFFER_SIZE int "Size in bytes of the RX buffers provided to UART driver" default 64 help Increasing this value decreases the number of UART interrupts needed to receive large packets. config MODEM_IFACE_UART_ASYNC_RX_NUM_BUFFERS int "Number of RX buffers available to the UART driver" default 2 help This value needs to be twice the number of UART modems using the driver to avoid buffer starvation. config MODEM_IFACE_UART_ASYNC_RX_TIMEOUT_US int "Timeout for flushing RX buffers after receiving no additional data" default 278 help Decreasing this value can help increase data throughput when high baudrates are used. 278us is 4 bytes at 115200 baud. Decreasing this value too much can result in spurious interrupts. Leaving it too high can reduce data throughput. endif # MODEM_IFACE_UART_ASYNC endif # MODEM_IFACE_UART config MODEM_CMD_HANDLER bool "Generic modem command handler" select NET_BUF help This generic command handler uses a modem interface to process incoming data and hand it back to the modem driver via callbacks defined for: - modem responses - unsolicited messages - specified handlers for current operation To configure this layer for use, create a modem_cmd_handler_data object and pass it's reference to modem_cmd_handler_init() along with the modem_cmd_handler reference from your modem_context object. config MODEM_CMD_HANDLER_MAX_PARAM_COUNT int "Maximum number of params parsed per command" depends on MODEM_CMD_HANDLER default 6 help This option sets the maximum number of parameters which may be parsed by the command handler. This is also limited by the length of the match_buf (match_buf_len) field as it needs to be large enough to hold a single line of data (ending with /r). endif # MODEM_CONTEXT config MODEM_SOCKET bool "Generic modem socket support layer" help This layer provides much of the groundwork for keeping track of modem "sockets" throughout their lifecycle (from the initial offload API calls through the command handler call back layers). To configure this layer for use, create a modem_socket_config object with your socket data and pass it's reference to modem_socket_init(). Note that the modem socket uses runtime allocated file descriptors reserved from the fdtable, for which the max count is set using the Kconfig option ZVFS_OPEN_MAX. Make sure to update this value as both the modem sockets and the POSIX_API, if used, share them. config MODEM_SOCKET_PACKET_COUNT int "Maximum number of stored packet sizes per socket" depends on MODEM_SOCKET default 6 help As the modem indicates more data is available to be received, these values are organized into "packets". This setting limits the maximum number of packet sizes the socket can keep track of. config MODEM_SHELL bool "Modem shell utilities" select SHELL help Activate shell module that provides modem utilities like sending a command to the modem UART. config MODEM_SIM_NUMBERS bool "Query the SIM for IMSI and ICCID" default y help Query the SIM card for the IMSI and ICCID identifiers. This can be disabled if the application does not use a SIM. config MODEM_CELL_INFO bool "Query for operator and cell info" help Query for numerical operator id, location area code and cell id. source "drivers/modem/Kconfig.ublox-sara-r4" source "drivers/modem/Kconfig.quectel-bg9x" source "drivers/modem/Kconfig.wncm14a2a" source "drivers/modem/Kconfig.cellular" source "drivers/modem/Kconfig.at_shell" source "drivers/modem/Kconfig.hl7800" source "drivers/modem/Kconfig.simcom-sim7080" endif # MODEM ```
/content/code_sandbox/drivers/modem/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,450
```unknown # Sierra Wireless HL7800 modem driver options menuconfig MODEM_HL7800 bool "Sierra Wireless HL7800 modem driver" select MODEM_RECEIVER select NET_OFFLOAD imply GPIO help Choose this setting to enable Sierra Wireless HL7800 LTE-M/NB-IoT modem driver. if MODEM_HL7800 config MODEM_HL7800_FW_UPDATE bool "Firmware update" select FILE_SYSTEM help Enable the ability to update the HL7800 via XMODEM by providing an update file to the update API. config MODEM_HL7800_RECV_BUF_CNT int "The number of allocated network buffers" default 30 config MODEM_HL7800_RECV_BUF_SIZE int "The size of the network buffers in bytes" default 128 config MODEM_HL7800_SET_APN_NAME_ON_STARTUP bool "Set APN name during driver init" help If APN doesn't match MODEM_HL7800_APN_NAME on startup, then it will be set. if MODEM_HL7800_SET_APN_NAME_ON_STARTUP config MODEM_HL7800_APN_NAME string "APN name for establishing network connection" help This setting is used in the AT+CGDCONT command to set the APN name for the PDP context. endif # MODEM_HL7800_SET_APN_NAME_ON_STARTUP choice MODEM_HL7800_RAT bool "Radio Access Technology Mode" default MODEM_HL7800_RAT_NO_CHANGE config MODEM_HL7800_RAT_NO_CHANGE bool "Don't change the RAT" help Leave the HL7800 RAT unchanged during modem init. config MODEM_HL7800_RAT_M1 bool "LTE-M1" help Enable LTE Cat-M1 mode during modem init. config MODEM_HL7800_RAT_NB1 bool "NB-IoT" help Enable LTE Cat-NB1 mode during modem init. endchoice menuconfig MODEM_HL7800_CONFIGURE_BANDS bool "Configure modem bands" default "y" help Choose this setting to configure which LTE bands the HL7800 modem should use at boot time. if MODEM_HL7800_CONFIGURE_BANDS config MODEM_HL7800_BAND_1 bool "Band 1 (2000MHz)" default "y" help Enable Band 1 (2000MHz) config MODEM_HL7800_BAND_2 bool "Band 2 (1900MHz)" default "y" help Enable Band 2 (1900MHz) config MODEM_HL7800_BAND_3 bool "Band 3 (1800MHz)" default "y" help Enable Band 3 (1800MHz) config MODEM_HL7800_BAND_4 bool "Band 4 (1700MHz)" default "y" help Enable Band 4 (1700MHz) config MODEM_HL7800_BAND_5 bool "Band 5 (850MHz)" default "y" help Enable Band 5 (850MHz) config MODEM_HL7800_BAND_8 bool "Band 8 (900MHz)" default "y" help Enable Band 8 (900MHz) config MODEM_HL7800_BAND_9 bool "Band 9 (1900MHz)" help Enable Band 9 (1900MHz) config MODEM_HL7800_BAND_10 bool "Band 10 (2100MHz)" help Enable Band 10 (2100MHz) config MODEM_HL7800_BAND_12 bool "Band 12 (700MHz)" default "y" help Enable Band 12 (700MHz) config MODEM_HL7800_BAND_13 bool "Band 13 (700MHz)" default "y" help Enable Band 13 (700MHz) config MODEM_HL7800_BAND_14 bool "Band 14 (700MHz)" help Enable Band 14 (700MHz) config MODEM_HL7800_BAND_17 bool "Band 17 (700MHz)" help Enable Band 17 (700MHz) config MODEM_HL7800_BAND_18 bool "Band 18 (800MHz)" help Enable Band 18 (800MHz) config MODEM_HL7800_BAND_19 bool "Band 19 (800MHz)" help Enable Band 19 (800MHz) config MODEM_HL7800_BAND_20 bool "Band 20 (800MHz)" default "y" help Enable Band 20 (800MHz) config MODEM_HL7800_BAND_25 bool "Band 25 (1900MHz)" help Enable Band 25 (1900MHz) config MODEM_HL7800_BAND_26 bool "Band 26 (800MHz)" help Enable Band 26 (800MHz) config MODEM_HL7800_BAND_27 bool "Band 27 (800MHz)" help Enable Band 27 (800MHz) config MODEM_HL7800_BAND_28 bool "Band 28 (700MHz)" default "y" help Enable Band 28 (700MHz) config MODEM_HL7800_BAND_66 bool "Band 66 (1800MHz)" help Enable Band 66 (1800MHz) endif # MODEM_HL7800_CONFIGURE_BAND menuconfig MODEM_HL7800_LOW_POWER_MODE bool "Low power modes" help Choose this setting to enable a low power mode for the HL7800 modem if MODEM_HL7800_LOW_POWER_MODE choice prompt "Low Power Mode" default MODEM_HL7800_EDRX config MODEM_HL7800_EDRX bool "eDRX" help Enable LTE eDRX config MODEM_HL7800_PSM bool "PSM" help Enable Power Save Mode (PSM) endchoice if MODEM_HL7800_EDRX config MODEM_HL7800_EDRX_VALUE string "Requested eDRX timer" default "0101" help Half a byte in a 4-bit format. The eDRX value refers to bit 4 to 1 of octet 3 of the Extended DRX parameters information element. Default value is 81.92 seconds. endif # MODEM_HL7800_EDRX if MODEM_HL7800_PSM config MODEM_HL7800_PSM_PERIODIC_TAU string "Requested extended periodic TAU timer" default "10000010" help Requested extended periodic TAU (tracking area update) value (T3412) to be allocated to the UE in E-UTRAN. One byte in an 8-bit format. Default value is 1 minute. config MODEM_HL7800_PSM_ACTIVE_TIME string "Requested active time" default "00001111" help Requested Active Time value (T3324) to be allocated to the UE. One byte in an 8-bit format. Default value is 30 seconds. endif # MODEM_HL7800_PSM choice prompt "Default Sleep Level" default MODEM_HL7800_SLEEP_LEVEL_HIBERNATE help The application can override this setting config MODEM_HL7800_SLEEP_LEVEL_HIBERNATE bool "Hibernate" help Lowest power consumption IO state not retained Application subsystem OFF config MODEM_HL7800_SLEEP_LEVEL_LITE_HIBERNATE bool "Lite Hibernate" help IO state retained Application subsystem OFF config MODEM_HL7800_SLEEP_LEVEL_SLEEP bool "Sleep" help Highest power consumption of modem sleep states IO state retained Application subsystem ON Allows sockets to remain open endchoice config MODEM_HL7800_SLEEP_DELAY_AFTER_REBOOT int "Delay in seconds before sleep after reboot" default 10 endif # MODEM_HL7800_LOW_POWER_MODE config MODEM_HL7800_RX_STACK_SIZE int "Size of the stack for the HL7800 modem driver RX thread" default 1536 if MODEM_HL7800_POLTE default 1280 help This stack is used by the HL7800 RX thread. config MODEM_HL7800_RX_WORKQ_STACK_SIZE int "Size of the stack for the HL7800 modem driver work queue" default 2048 help This stack is used by the work queue to pass off net_pkt data to the rest of the network stack, letting the rx thread continue processing data. config MODEM_HL7800_INIT_PRIORITY int "HL7800 driver init priority" default 80 help HL7800 device driver initialization priority. Do not mess with it unless you know what you are doing. Note that the priority needs to be lower than the net stack so that it can start before the networking sub-system. config MODEM_HL7800_GET_IP_ADDR_INFO_ATTEMPTS int "Number of attempts to get IP address info during init" default 4 choice prompt "Modem Boot Type" default MODEM_HL7800_BOOT_NORMAL config MODEM_HL7800_BOOT_NORMAL bool "No Delay" config MODEM_HL7800_BOOT_DELAY bool "Delay secondary initialization of driver and network attach" help When true the application must call mdm_hl7800_reset. This mode delays the validity of modem info. config MODEM_HL7800_BOOT_IN_AIRPLANE_MODE bool "Enter airplane mode during init" help mdm_hl7800_set_functionality must be used by app to exit airplane mode. endchoice config MODEM_HL7800_GPS bool "GPS command and handlers" config MODEM_HL7800_USE_GLONASS bool "Use GLONASS in addition to GPS" depends on MODEM_HL7800_GPS config MODEM_HL7800_POLTE bool "PoLTE commands and handlers" choice prompt "IP Address family" default MODEM_HL7800_ADDRESS_FAMILY_IPV4V6 help The address family for IP connections. config MODEM_HL7800_ADDRESS_FAMILY_IPV4 bool "IPv4" config MODEM_HL7800_ADDRESS_FAMILY_IPV6 bool "IPv6" config MODEM_HL7800_ADDRESS_FAMILY_IPV4V6 bool "IPv4v6" endchoice config MODEM_HL7800_ALLOW_SLEEP_DELAY_MS int "Milliseconds to delay before allowing modem to sleep" default 5000 range 5000 3600000 help This value should be set larger than the network latency. Otherwise the modem can go to sleep before having a chance to receive socket data. config MODEM_HL7800_RSSI_RATE_SECONDS int "Rate to automatically query RSSI" default 0 if MODEM_HL7800_LOW_POWER_MODE default 30 config MODEM_HL7800_CTS_FILTER_US int "CTS signal filter time (microseconds)" default 20 help This value is used to filter the CTS signal from the modem. CTS pulses shorter than this value will be ignored. endif # MODEM_HL7800 ```
/content/code_sandbox/drivers/modem/Kconfig.hl7800
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,464
```c /* * */ #include <errno.h> #include <zephyr/drivers/sensor.h> #include <zephyr/dsp/types.h> #include <zephyr/logging/log.h> #include <zephyr/rtio/work.h> LOG_MODULE_REGISTER(sensor_compat, CONFIG_SENSOR_LOG_LEVEL); /* * Ensure that the size of the generic header aligns with the sensor channel specifier . If it * doesn't, then cores that require aligned memory access will fail to read channel[0]. */ BUILD_ASSERT((sizeof(struct sensor_data_generic_header) % sizeof(struct sensor_chan_spec)) == 0); static void sensor_submit_fallback(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe); static void sensor_iodev_submit(struct rtio_iodev_sqe *iodev_sqe) { const struct sensor_read_config *cfg = iodev_sqe->sqe.iodev->data; const struct device *dev = cfg->sensor; const struct sensor_driver_api *api = dev->api; if (api->submit != NULL) { api->submit(dev, iodev_sqe); } else if (!cfg->is_streaming) { sensor_submit_fallback(dev, iodev_sqe); } else { rtio_iodev_sqe_err(iodev_sqe, -ENOTSUP); } } const struct rtio_iodev_api __sensor_iodev_api = { .submit = sensor_iodev_submit, }; /** * @brief Compute the number of samples needed for the given channels * * @param[in] channels Array of channels requested * @param[in] num_channels Number of channels on the @p channels array * @return The number of samples required to read the given channels */ static inline int compute_num_samples(const struct sensor_chan_spec *const channels, size_t num_channels) { int num_samples = 0; for (size_t i = 0; i < num_channels; ++i) { num_samples += SENSOR_CHANNEL_3_AXIS(channels[i].chan_type) ? 3 : 1; } return num_samples; } /** * @brief Compute the required header size * * This function takes into account alignment of the q31 values that will follow the header. * * @param[in] num_output_samples The number of samples to represent * @return The number of bytes needed for this sample frame's header */ static inline uint32_t compute_header_size(int num_output_samples) { uint32_t size = sizeof(struct sensor_data_generic_header) + (num_output_samples * sizeof(struct sensor_chan_spec)); return (size + 3) & ~0x3; } /** * @brief Compute the minimum number of bytes needed * * @param[in] num_output_samples The number of samples to represent * @return The number of bytes needed for this sample frame */ static inline uint32_t compute_min_buf_len(int num_output_samples) { return compute_header_size(num_output_samples) + (num_output_samples * sizeof(q31_t)); } /** * @brief Checks if the header already contains a given channel * * @param[in] header The header to scan * @param[in] channel The channel to search for * @param[in] num_channels The number of valid channels in the header so far * @return Index of the @p channel if found or negative if not found */ static inline int check_header_contains_channel(const struct sensor_data_generic_header *header, struct sensor_chan_spec chan_spec, int num_channels) { __ASSERT_NO_MSG(!SENSOR_CHANNEL_3_AXIS(chan_spec.chan_type)); for (int i = 0; i < num_channels; ++i) { if (sensor_chan_spec_eq(header->channels[i], chan_spec)) { return i; } } return -1; } /** * @brief Fallback function for retrofiting old drivers to rtio (sync) * * @param[in] iodev_sqe The read submission queue event */ static void sensor_submit_fallback_sync(struct rtio_iodev_sqe *iodev_sqe) { const struct sensor_read_config *cfg = iodev_sqe->sqe.iodev->data; const struct device *dev = cfg->sensor; const struct sensor_chan_spec *const channels = cfg->channels; const int num_output_samples = compute_num_samples(channels, cfg->count); uint32_t min_buf_len = compute_min_buf_len(num_output_samples); uint64_t timestamp_ns = k_ticks_to_ns_floor64(k_uptime_ticks()); int rc = sensor_sample_fetch(dev); uint8_t *buf; uint32_t buf_len; /* Check that the fetch succeeded */ if (rc != 0) { LOG_WRN("Failed to fetch samples"); rtio_iodev_sqe_err(iodev_sqe, rc); return; } /* Get the buffer for the frame, it may be allocated dynamically by the rtio context */ rc = rtio_sqe_rx_buf(iodev_sqe, min_buf_len, min_buf_len, &buf, &buf_len); if (rc != 0) { LOG_WRN("Failed to get a read buffer of size %u bytes", min_buf_len); rtio_iodev_sqe_err(iodev_sqe, rc); return; } /* Set the timestamp and num_channels */ struct sensor_data_generic_header *header = (struct sensor_data_generic_header *)buf; header->timestamp_ns = timestamp_ns; header->num_channels = num_output_samples; header->shift = 0; q31_t *q = (q31_t *)(buf + compute_header_size(num_output_samples)); /* Populate values, update shift, and set channels */ for (size_t i = 0, sample_idx = 0; i < cfg->count; ++i) { struct sensor_value value[3]; const int num_samples = SENSOR_CHANNEL_3_AXIS(channels[i].chan_type) ? 3 : 1; /* Get the current channel requested by the user */ rc = sensor_channel_get(dev, channels[i].chan_type, value); if (num_samples == 3) { header->channels[sample_idx++] = (struct sensor_chan_spec) { rc == 0 ? channels[i].chan_type - 3 : SENSOR_CHAN_MAX, 0 }; header->channels[sample_idx++] = (struct sensor_chan_spec) { rc == 0 ? channels[i].chan_type - 2 : SENSOR_CHAN_MAX, 0 }; header->channels[sample_idx++] = (struct sensor_chan_spec) { rc == 0 ? channels[i].chan_type - 1 : SENSOR_CHAN_MAX, 0 }; } else { header->channels[sample_idx++] = (struct sensor_chan_spec) { rc == 0 ? channels[i].chan_type : SENSOR_CHAN_MAX, 0 }; } if (rc != 0) { LOG_DBG("Failed to get channel (type: %d, index %d), skipping", channels[i].chan_type, channels[i].chan_idx); continue; } /* Get the largest absolute value reading to set the scale for the channel */ uint32_t header_scale = 0; for (int sample = 0; sample < num_samples; ++sample) { /* * The scale is the ceil(abs(sample)). * Since we are using fractional values, it's easier to assume that .val2 * is non 0 and convert this to abs(sample.val1) + 1 (removing a branch). * Since it's possible that val1 (int32_t) is saturated (INT32_MAX) we need * to upcast it to 64 bit int first, then take the abs() of that 64 bit * int before we '+ 1'. Once that's done, we can safely cast back down * to uint32_t because the min value is 0 and max is INT32_MAX + 1 which * is less than UINT32_MAX. */ uint32_t scale = (uint32_t)llabs((int64_t)value[sample].val1) + 1; header_scale = MAX(header_scale, scale); } int8_t new_shift = ilog2(header_scale - 1) + 1; /* Reset sample_idx */ sample_idx -= num_samples; if (header->shift < new_shift) { /* * Shift was updated, need to convert all the existing q values. This could * be optimized by calling zdsp_scale_q31() but that would force a * dependency between sensors and the zDSP subsystem. */ for (int q_idx = 0; q_idx < sample_idx; ++q_idx) { q[q_idx] = q[q_idx] >> (new_shift - header->shift); } header->shift = new_shift; } /* * Spread the q31 values. This is needed because some channels are 3D. If * the user specified one of those then num_samples will be 3; and we need to * produce 3 separate readings. */ for (int sample = 0; sample < num_samples; ++sample) { /* Check if the channel is already in the buffer */ int prev_computed_value_idx = check_header_contains_channel( header, header->channels[sample_idx + sample], sample_idx + sample); if (prev_computed_value_idx >= 0 && prev_computed_value_idx != sample_idx + sample) { LOG_DBG("value[%d] previously computed at q[%d]@%p", sample, prev_computed_value_idx, (void *)&q[prev_computed_value_idx]); q[sample_idx + sample] = q[prev_computed_value_idx]; continue; } /* Convert the value to micro-units */ int64_t value_u = sensor_value_to_micro(&value[sample]); /* Convert to q31 using the shift */ q[sample_idx + sample] = ((value_u * ((INT64_C(1) << 31) - 1)) / 1000000) >> header->shift; LOG_DBG("value[%d]=%s%d.%06d, q[%d]@%p=%d, shift: %d", sample, value_u < 0 ? "-" : "", abs((int)value[sample].val1), abs((int)value[sample].val2), (int)(sample_idx + sample), (void *)&q[sample_idx + sample], q[sample_idx + sample], header->shift); } sample_idx += num_samples; } LOG_DBG("Total channels in header: %" PRIu32, header->num_channels); rtio_iodev_sqe_ok(iodev_sqe, 0); } /** * @brief Fallback function for retrofiting old drivers to rtio * * @param[in] dev The sensor device to read * @param[in] iodev_sqe The read submission queue event */ static void sensor_submit_fallback(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe) { struct rtio_work_req *req = rtio_work_req_alloc(); __ASSERT_NO_MSG(req); rtio_work_req_submit(req, iodev_sqe, sensor_submit_fallback_sync); } void sensor_processing_with_callback(struct rtio *ctx, sensor_processing_callback_t cb) { void *userdata = NULL; uint8_t *buf = NULL; uint32_t buf_len = 0; int rc; /* Wait for a CQE */ struct rtio_cqe *cqe = rtio_cqe_consume_block(ctx); /* Cache the data from the CQE */ rc = cqe->result; userdata = cqe->userdata; rtio_cqe_get_mempool_buffer(ctx, cqe, &buf, &buf_len); /* Release the CQE */ rtio_cqe_release(ctx, cqe); /* Call the callback */ cb(rc, buf, buf_len, userdata); /* Release the memory */ rtio_release_buffer(ctx, buf, buf_len); } /** * @brief Default decoder get frame count * * Default reader can only ever service a single frame at a time. * * @param[in] buffer The data buffer to parse * @param[in] channel The channel to get the count for * @param[in] channel_idx The index of the channel * @param[out] frame_count The number of frames in the buffer (always 1) * @return 0 in all cases */ static int get_frame_count(const uint8_t *buffer, struct sensor_chan_spec channel, uint16_t *frame_count) { struct sensor_data_generic_header *header = (struct sensor_data_generic_header *)buffer; switch (channel.chan_type) { case SENSOR_CHAN_ACCEL_XYZ: channel.chan_type = SENSOR_CHAN_ACCEL_X; break; case SENSOR_CHAN_GYRO_XYZ: channel.chan_type = SENSOR_CHAN_GYRO_X; break; case SENSOR_CHAN_MAGN_XYZ: channel.chan_type = SENSOR_CHAN_MAGN_X; break; case SENSOR_CHAN_POS_DXYZ: channel.chan_type = SENSOR_CHAN_POS_DX; break; default: break; } for (size_t i = 0; i < header->num_channels; ++i) { if (sensor_chan_spec_eq(header->channels[i], channel)) { *frame_count = 1; return 0; } } return -ENOTSUP; } int sensor_natively_supported_channel_size_info(struct sensor_chan_spec channel, size_t *base_size, size_t *frame_size) { __ASSERT_NO_MSG(base_size != NULL); __ASSERT_NO_MSG(frame_size != NULL); if (((int)channel.chan_type < 0) || channel.chan_type >= (SENSOR_CHAN_ALL)) { return -ENOTSUP; } switch (channel.chan_type) { case SENSOR_CHAN_ACCEL_X: case SENSOR_CHAN_ACCEL_Y: case SENSOR_CHAN_ACCEL_Z: case SENSOR_CHAN_ACCEL_XYZ: case SENSOR_CHAN_GYRO_X: case SENSOR_CHAN_GYRO_Y: case SENSOR_CHAN_GYRO_Z: case SENSOR_CHAN_GYRO_XYZ: case SENSOR_CHAN_MAGN_X: case SENSOR_CHAN_MAGN_Y: case SENSOR_CHAN_MAGN_Z: case SENSOR_CHAN_MAGN_XYZ: case SENSOR_CHAN_POS_DX: case SENSOR_CHAN_POS_DY: case SENSOR_CHAN_POS_DZ: case SENSOR_CHAN_POS_DXYZ: *base_size = sizeof(struct sensor_three_axis_data); *frame_size = sizeof(struct sensor_three_axis_sample_data); return 0; case SENSOR_CHAN_PROX: *base_size = sizeof(struct sensor_byte_data); *frame_size = sizeof(struct sensor_byte_sample_data); return 0; case SENSOR_CHAN_GAUGE_CYCLE_COUNT: *base_size = sizeof(struct sensor_uint64_data); *frame_size = sizeof(struct sensor_uint64_sample_data); return 0; default: *base_size = sizeof(struct sensor_q31_data); *frame_size = sizeof(struct sensor_q31_sample_data); return 0; } } static int get_q31_value(const struct sensor_data_generic_header *header, const q31_t *values, struct sensor_chan_spec chan_spec, q31_t *out) { for (size_t i = 0; i < header->num_channels; ++i) { if (sensor_chan_spec_eq(chan_spec, header->channels[i])) { *out = values[i]; return 0; } } return -EINVAL; } static int decode_three_axis(const struct sensor_data_generic_header *header, const q31_t *values, struct sensor_three_axis_data *data_out, enum sensor_channel x, enum sensor_channel y, enum sensor_channel z, size_t channel_idx) { int rc; data_out->header.base_timestamp_ns = header->timestamp_ns; data_out->header.reading_count = 1; data_out->shift = header->shift; data_out->readings[0].timestamp_delta = 0; rc = get_q31_value(header, values, (struct sensor_chan_spec){x, channel_idx}, &data_out->readings[0].values[0]); if (rc < 0) { return rc; } rc = get_q31_value(header, values, (struct sensor_chan_spec){y, channel_idx}, &data_out->readings[0].values[1]); if (rc < 0) { return rc; } rc = get_q31_value(header, values, (struct sensor_chan_spec){z, channel_idx}, &data_out->readings[0].values[2]); if (rc < 0) { return rc; } return 1; } static int decode_q31(const struct sensor_data_generic_header *header, const q31_t *values, struct sensor_q31_data *data_out, struct sensor_chan_spec chan_spec) { int rc; data_out->header.base_timestamp_ns = header->timestamp_ns; data_out->header.reading_count = 1; data_out->shift = header->shift; data_out->readings[0].timestamp_delta = 0; rc = get_q31_value(header, values, chan_spec, &data_out->readings[0].value); if (rc < 0) { return rc; } return 1; } /** * @brief Decode up to N samples from the buffer * * This function will never wrap frames. If 1 channel is available in the current frame and * @p max_count is 2, only 1 channel will be decoded and the frame iterator will be modified * so that the next call to decode will begin at the next frame. * * @param[in] buffer The buffer provided on the :c:struct:`rtio` context * @param[in] channel The channel to decode * @param[in] channel_idx The index of the channel * @param[in,out] fit The current frame iterator * @param[in] max_count The maximum number of channels to decode. * @param[out] data_out The decoded data * @return 0 no more samples to decode * @return >0 the number of decoded frames * @return <0 on error */ static int decode(const uint8_t *buffer, struct sensor_chan_spec chan_spec, uint32_t *fit, uint16_t max_count, void *data_out) { const struct sensor_data_generic_header *header = (const struct sensor_data_generic_header *)buffer; const q31_t *q = (const q31_t *)(buffer + compute_header_size(header->num_channels)); int count = 0; if (*fit != 0 || max_count < 1) { return -EINVAL; } if (((int)chan_spec.chan_type < 0) || chan_spec.chan_type >= (SENSOR_CHAN_ALL)) { return 0; } /* Check for 3d channel mappings */ switch (chan_spec.chan_type) { case SENSOR_CHAN_ACCEL_X: case SENSOR_CHAN_ACCEL_Y: case SENSOR_CHAN_ACCEL_Z: case SENSOR_CHAN_ACCEL_XYZ: count = decode_three_axis(header, q, data_out, SENSOR_CHAN_ACCEL_X, SENSOR_CHAN_ACCEL_Y, SENSOR_CHAN_ACCEL_Z, chan_spec.chan_idx); break; case SENSOR_CHAN_GYRO_X: case SENSOR_CHAN_GYRO_Y: case SENSOR_CHAN_GYRO_Z: case SENSOR_CHAN_GYRO_XYZ: count = decode_three_axis(header, q, data_out, SENSOR_CHAN_GYRO_X, SENSOR_CHAN_GYRO_Y, SENSOR_CHAN_GYRO_Z, chan_spec.chan_idx); break; case SENSOR_CHAN_MAGN_X: case SENSOR_CHAN_MAGN_Y: case SENSOR_CHAN_MAGN_Z: case SENSOR_CHAN_MAGN_XYZ: count = decode_three_axis(header, q, data_out, SENSOR_CHAN_MAGN_X, SENSOR_CHAN_MAGN_Y, SENSOR_CHAN_MAGN_Z, chan_spec.chan_idx); break; case SENSOR_CHAN_POS_DX: case SENSOR_CHAN_POS_DY: case SENSOR_CHAN_POS_DZ: case SENSOR_CHAN_POS_DXYZ: count = decode_three_axis(header, q, data_out, SENSOR_CHAN_POS_DX, SENSOR_CHAN_POS_DY, SENSOR_CHAN_POS_DZ, chan_spec.chan_idx); break; default: count = decode_q31(header, q, data_out, chan_spec); break; } if (count > 0) { *fit = 1; } return count; } const struct sensor_decoder_api __sensor_default_decoder = { .get_frame_count = get_frame_count, .get_size_info = sensor_natively_supported_channel_size_info, .decode = decode, }; ```
/content/code_sandbox/drivers/sensor/default_rtio_sensor.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,495
```c /* * */ #include <ctype.h> #include <stdlib.h> #include <string.h> #include <zephyr/device.h> #include <zephyr/drivers/sensor.h> #include <zephyr/kernel.h> #include <zephyr/rtio/rtio.h> #include <zephyr/shell/shell.h> #include <zephyr/sys/iterable_sections.h> #include <zephyr/sys/util.h> #include "sensor_shell.h" LOG_MODULE_REGISTER(sensor_shell, CONFIG_SENSOR_LOG_LEVEL); #define SENSOR_GET_HELP \ "Get sensor data. Channel names are optional. All channels are read " \ "when no channels are provided. Syntax:\n" \ "<device_name> <channel name 0> .. <channel name N>" #define SENSOR_STREAM_HELP \ "Start/stop streaming sensor data. Data ready trigger will be used if no triggers " \ "are provided. Syntax:\n" \ "<device_name> on|off <trigger name> incl|drop|nop" #define SENSOR_ATTR_GET_HELP \ "Get the sensor's channel attribute. Syntax:\n" \ "<device_name> [<channel_name 0> <attribute_name 0> .. " \ "<channel_name N> <attribute_name N>]" #define SENSOR_ATTR_SET_HELP \ "Set the sensor's channel attribute.\n" \ "<device_name> <channel_name> <attribute_name> <value>" #define SENSOR_INFO_HELP "Get sensor info, such as vendor and model name, for all sensors." #define SENSOR_TRIG_HELP \ "Get or set the trigger type on a sensor. Currently only supports `data_ready`.\n" \ "<device_name> <on/off> <trigger_name>" static const char *sensor_channel_name[SENSOR_CHAN_COMMON_COUNT] = { [SENSOR_CHAN_ACCEL_X] = "accel_x", [SENSOR_CHAN_ACCEL_Y] = "accel_y", [SENSOR_CHAN_ACCEL_Z] = "accel_z", [SENSOR_CHAN_ACCEL_XYZ] = "accel_xyz", [SENSOR_CHAN_GYRO_X] = "gyro_x", [SENSOR_CHAN_GYRO_Y] = "gyro_y", [SENSOR_CHAN_GYRO_Z] = "gyro_z", [SENSOR_CHAN_GYRO_XYZ] = "gyro_xyz", [SENSOR_CHAN_MAGN_X] = "magn_x", [SENSOR_CHAN_MAGN_Y] = "magn_y", [SENSOR_CHAN_MAGN_Z] = "magn_z", [SENSOR_CHAN_MAGN_XYZ] = "magn_xyz", [SENSOR_CHAN_DIE_TEMP] = "die_temp", [SENSOR_CHAN_AMBIENT_TEMP] = "ambient_temp", [SENSOR_CHAN_PRESS] = "press", [SENSOR_CHAN_PROX] = "prox", [SENSOR_CHAN_HUMIDITY] = "humidity", [SENSOR_CHAN_LIGHT] = "light", [SENSOR_CHAN_IR] = "ir", [SENSOR_CHAN_RED] = "red", [SENSOR_CHAN_GREEN] = "green", [SENSOR_CHAN_BLUE] = "blue", [SENSOR_CHAN_ALTITUDE] = "altitude", [SENSOR_CHAN_PM_1_0] = "pm_1_0", [SENSOR_CHAN_PM_2_5] = "pm_2_5", [SENSOR_CHAN_PM_10] = "pm_10", [SENSOR_CHAN_DISTANCE] = "distance", [SENSOR_CHAN_CO2] = "co2", [SENSOR_CHAN_O2] = "o2", [SENSOR_CHAN_VOC] = "voc", [SENSOR_CHAN_GAS_RES] = "gas_resistance", [SENSOR_CHAN_VOLTAGE] = "voltage", [SENSOR_CHAN_VSHUNT] = "vshunt", [SENSOR_CHAN_CURRENT] = "current", [SENSOR_CHAN_POWER] = "power", [SENSOR_CHAN_RESISTANCE] = "resistance", [SENSOR_CHAN_ROTATION] = "rotation", [SENSOR_CHAN_POS_DX] = "pos_dx", [SENSOR_CHAN_POS_DY] = "pos_dy", [SENSOR_CHAN_POS_DZ] = "pos_dz", [SENSOR_CHAN_POS_DXYZ] = "pos_dxyz", [SENSOR_CHAN_RPM] = "rpm", [SENSOR_CHAN_GAUGE_VOLTAGE] = "gauge_voltage", [SENSOR_CHAN_GAUGE_AVG_CURRENT] = "gauge_avg_current", [SENSOR_CHAN_GAUGE_STDBY_CURRENT] = "gauge_stdby_current", [SENSOR_CHAN_GAUGE_MAX_LOAD_CURRENT] = "gauge_max_load_current", [SENSOR_CHAN_GAUGE_TEMP] = "gauge_temp", [SENSOR_CHAN_GAUGE_STATE_OF_CHARGE] = "gauge_state_of_charge", [SENSOR_CHAN_GAUGE_FULL_CHARGE_CAPACITY] = "gauge_full_cap", [SENSOR_CHAN_GAUGE_REMAINING_CHARGE_CAPACITY] = "gauge_remaining_cap", [SENSOR_CHAN_GAUGE_NOM_AVAIL_CAPACITY] = "gauge_nominal_cap", [SENSOR_CHAN_GAUGE_FULL_AVAIL_CAPACITY] = "gauge_full_avail_cap", [SENSOR_CHAN_GAUGE_AVG_POWER] = "gauge_avg_power", [SENSOR_CHAN_GAUGE_STATE_OF_HEALTH] = "gauge_state_of_health", [SENSOR_CHAN_GAUGE_TIME_TO_EMPTY] = "gauge_time_to_empty", [SENSOR_CHAN_GAUGE_TIME_TO_FULL] = "gauge_time_to_full", [SENSOR_CHAN_GAUGE_CYCLE_COUNT] = "gauge_cycle_count", [SENSOR_CHAN_GAUGE_DESIGN_VOLTAGE] = "gauge_design_voltage", [SENSOR_CHAN_GAUGE_DESIRED_VOLTAGE] = "gauge_desired_voltage", [SENSOR_CHAN_GAUGE_DESIRED_CHARGING_CURRENT] = "gauge_desired_charging_current", [SENSOR_CHAN_ALL] = "all", }; static const char *sensor_attribute_name[SENSOR_ATTR_COMMON_COUNT] = { [SENSOR_ATTR_SAMPLING_FREQUENCY] = "sampling_frequency", [SENSOR_ATTR_LOWER_THRESH] = "lower_thresh", [SENSOR_ATTR_UPPER_THRESH] = "upper_thresh", [SENSOR_ATTR_SLOPE_TH] = "slope_th", [SENSOR_ATTR_SLOPE_DUR] = "slope_dur", [SENSOR_ATTR_HYSTERESIS] = "hysteresis", [SENSOR_ATTR_OVERSAMPLING] = "oversampling", [SENSOR_ATTR_FULL_SCALE] = "full_scale", [SENSOR_ATTR_OFFSET] = "offset", [SENSOR_ATTR_CALIB_TARGET] = "calib_target", [SENSOR_ATTR_CONFIGURATION] = "configuration", [SENSOR_ATTR_CALIBRATION] = "calibration", [SENSOR_ATTR_FEATURE_MASK] = "feature_mask", [SENSOR_ATTR_ALERT] = "alert", [SENSOR_ATTR_FF_DUR] = "ff_dur", [SENSOR_ATTR_BATCH_DURATION] = "batch_dur", }; enum sample_stats_state { SAMPLE_STATS_STATE_UNINITIALIZED = 0, SAMPLE_STATS_STATE_ENABLED, SAMPLE_STATS_STATE_DISABLED, }; struct sample_stats { int64_t accumulator; uint64_t sample_window_start; uint32_t count; enum sample_stats_state state; }; static struct sample_stats sensor_stats[CONFIG_SENSOR_SHELL_MAX_TRIGGER_DEVICES][SENSOR_CHAN_ALL]; static const struct device *sensor_trigger_devices[CONFIG_SENSOR_SHELL_MAX_TRIGGER_DEVICES]; static bool device_is_sensor(const struct device *dev) { #ifdef CONFIG_SENSOR_INFO STRUCT_SECTION_FOREACH(sensor_info, sensor) { if (sensor->dev == dev) { return true; } } return false; #else return true; #endif /* CONFIG_SENSOR_INFO */ } static int find_sensor_trigger_device(const struct device *sensor) { for (int i = 0; i < CONFIG_SENSOR_SHELL_MAX_TRIGGER_DEVICES; i++) { if (sensor_trigger_devices[i] == sensor) { return i; } } return -1; } /* Forward declaration */ static void data_ready_trigger_handler(const struct device *sensor, const struct sensor_trigger *trigger); #define TRIGGER_DATA_ENTRY(trig_enum, str_name, handler_func) \ [(trig_enum)] = {.name = #str_name, \ .handler = (handler_func), \ .trigger = {.chan = SENSOR_CHAN_ALL, .type = (trig_enum)}} /** * @brief This table stores a mapping of string trigger names along with the sensor_trigger struct * that gets passed to the driver to enable that trigger, plus a function pointer to a handler. If * that pointer is NULL, this indicates there is not currently support for that trigger type in the * sensor shell. */ static const struct { const char *name; sensor_trigger_handler_t handler; struct sensor_trigger trigger; } sensor_trigger_table[SENSOR_TRIG_COMMON_COUNT] = { TRIGGER_DATA_ENTRY(SENSOR_TRIG_TIMER, timer, NULL), TRIGGER_DATA_ENTRY(SENSOR_TRIG_DATA_READY, data_ready, data_ready_trigger_handler), TRIGGER_DATA_ENTRY(SENSOR_TRIG_DELTA, delta, NULL), TRIGGER_DATA_ENTRY(SENSOR_TRIG_NEAR_FAR, near_far, NULL), TRIGGER_DATA_ENTRY(SENSOR_TRIG_THRESHOLD, threshold, NULL), TRIGGER_DATA_ENTRY(SENSOR_TRIG_TAP, tap, NULL), TRIGGER_DATA_ENTRY(SENSOR_TRIG_DOUBLE_TAP, double_tap, NULL), TRIGGER_DATA_ENTRY(SENSOR_TRIG_FREEFALL, freefall, NULL), TRIGGER_DATA_ENTRY(SENSOR_TRIG_MOTION, motion, NULL), TRIGGER_DATA_ENTRY(SENSOR_TRIG_STATIONARY, stationary, NULL), TRIGGER_DATA_ENTRY(SENSOR_TRIG_FIFO_WATERMARK, fifo_wm, NULL), TRIGGER_DATA_ENTRY(SENSOR_TRIG_FIFO_FULL, fifo_full, NULL), }; /** * Lookup the sensor trigger data by name * * @param name The name of the trigger * @return < 0 on error * @return >= 0 if found */ static int sensor_trigger_name_lookup(const char *name) { for (int i = 0; i < ARRAY_SIZE(sensor_trigger_table); ++i) { if (strcmp(name, sensor_trigger_table[i].name) == 0) { return i; } } return -1; } enum dynamic_command_context { NONE, CTX_GET, CTX_ATTR_GET_SET, CTX_STREAM_ON_OFF, }; static enum dynamic_command_context current_cmd_ctx = NONE; /* Mutex for accessing shared RTIO/IODEV data structures */ K_MUTEX_DEFINE(cmd_get_mutex); /* Crate a single common config for one-shot reading */ static struct sensor_chan_spec iodev_sensor_shell_channels[SENSOR_CHAN_ALL]; static struct sensor_read_config iodev_sensor_shell_read_config = { .sensor = NULL, .is_streaming = false, .channels = iodev_sensor_shell_channels, .count = 0, .max = ARRAY_SIZE(iodev_sensor_shell_channels), }; RTIO_IODEV_DEFINE(iodev_sensor_shell_read, &__sensor_iodev_api, &iodev_sensor_shell_read_config); /* Create the RTIO context to service the reading */ RTIO_DEFINE_WITH_MEMPOOL(sensor_read_rtio, 8, 8, 32, 64, 4); static int parse_named_int(const char *name, const char *heystack[], size_t count) { char *endptr; int i; /* Attempt to parse channel name as a number first */ i = strtoul(name, &endptr, 0); if (*endptr == '\0') { return i; } /* Channel name is not a number, look it up */ for (i = 0; i < count; i++) { if (strcmp(name, heystack[i]) == 0) { return i; } } return -ENOTSUP; } static int parse_sensor_value(const char *val_str, struct sensor_value *out) { const bool is_negative = val_str[0] == '-'; const char *decimal_pos = strchr(val_str, '.'); long value; char *endptr; /* Parse int portion */ value = strtol(val_str, &endptr, 0); if (*endptr != '\0' && *endptr != '.') { return -EINVAL; } if (value > INT32_MAX || value < INT32_MIN) { return -EINVAL; } out->val1 = (int32_t)value; if (decimal_pos == NULL) { return 0; } /* Parse the decimal portion */ value = strtoul(decimal_pos + 1, &endptr, 0); if (*endptr != '\0') { return -EINVAL; } while (value < 100000) { value *= 10; } if (value > INT32_C(999999)) { return -EINVAL; } out->val2 = (int32_t)value; if (is_negative) { out->val2 *= -1; } return 0; } void sensor_shell_processing_callback(int result, uint8_t *buf, uint32_t buf_len, void *userdata) { struct sensor_shell_processing_context *ctx = userdata; const struct sensor_decoder_api *decoder; uint8_t decoded_buffer[128]; struct { uint64_t base_timestamp_ns; int count; uint64_t timestamp_delta; int64_t values[3]; int8_t shift; } accumulator_buffer; int rc; ARG_UNUSED(buf_len); if (result < 0) { shell_error(ctx->sh, "Read failed"); return; } rc = sensor_get_decoder(ctx->dev, &decoder); if (rc != 0) { shell_error(ctx->sh, "Failed to get decoder for '%s'", ctx->dev->name); return; } for (int trigger = 0; decoder->has_trigger != NULL && trigger < SENSOR_TRIG_COMMON_COUNT; ++trigger) { if (!decoder->has_trigger(buf, trigger)) { continue; } shell_info(ctx->sh, "Trigger (%d / %s) detected", trigger, (sensor_trigger_table[trigger].name == NULL ? "UNKNOWN" : sensor_trigger_table[trigger].name)); } for (struct sensor_chan_spec ch = {0, 0}; ch.chan_type < SENSOR_CHAN_ALL; ch.chan_type++) { uint32_t fit = 0; size_t base_size; size_t frame_size; uint16_t frame_count; /* Channels with multi-axis equivalents are skipped */ switch (ch.chan_type) { case SENSOR_CHAN_ACCEL_X: case SENSOR_CHAN_ACCEL_Y: case SENSOR_CHAN_ACCEL_Z: case SENSOR_CHAN_GYRO_X: case SENSOR_CHAN_GYRO_Y: case SENSOR_CHAN_GYRO_Z: case SENSOR_CHAN_MAGN_X: case SENSOR_CHAN_MAGN_Y: case SENSOR_CHAN_MAGN_Z: case SENSOR_CHAN_POS_DX: case SENSOR_CHAN_POS_DY: case SENSOR_CHAN_POS_DZ: continue; } rc = decoder->get_size_info(ch, &base_size, &frame_size); if (rc != 0) { LOG_DBG("skipping unsupported channel %s:%d", sensor_channel_name[ch.chan_type], ch.chan_idx); /* Channel not supported, skipping */ continue; } if (base_size > ARRAY_SIZE(decoded_buffer)) { shell_error(ctx->sh, "Channel (type %d, idx %d) requires %zu bytes to decode, but " "only %zu are available", ch.chan_type, ch.chan_idx, base_size, ARRAY_SIZE(decoded_buffer)); continue; } while (decoder->get_frame_count(buf, ch, &frame_count) == 0) { LOG_DBG("decoding %d frames from channel %s:%d", frame_count, sensor_channel_name[ch.chan_type], ch.chan_idx); fit = 0; memset(&accumulator_buffer, 0, sizeof(accumulator_buffer)); while (decoder->decode(buf, ch, &fit, 1, decoded_buffer) > 0) { switch (ch.chan_type) { case SENSOR_CHAN_ACCEL_XYZ: case SENSOR_CHAN_GYRO_XYZ: case SENSOR_CHAN_MAGN_XYZ: case SENSOR_CHAN_POS_DXYZ: { struct sensor_three_axis_data *data = (struct sensor_three_axis_data *)decoded_buffer; if (accumulator_buffer.count == 0) { accumulator_buffer.base_timestamp_ns = data->header.base_timestamp_ns; } accumulator_buffer.count++; accumulator_buffer.shift = data->shift; accumulator_buffer.timestamp_delta += data->readings[0].timestamp_delta; accumulator_buffer.values[0] += data->readings[0].values[0]; accumulator_buffer.values[1] += data->readings[0].values[1]; accumulator_buffer.values[2] += data->readings[0].values[2]; break; } case SENSOR_CHAN_PROX: { struct sensor_byte_data *data = (struct sensor_byte_data *)decoded_buffer; if (accumulator_buffer.count == 0) { accumulator_buffer.base_timestamp_ns = data->header.base_timestamp_ns; } accumulator_buffer.count++; accumulator_buffer.timestamp_delta += data->readings[0].timestamp_delta; accumulator_buffer.values[0] += data->readings[0].is_near; break; } default: { struct sensor_q31_data *data = (struct sensor_q31_data *)decoded_buffer; if (accumulator_buffer.count == 0) { accumulator_buffer.base_timestamp_ns = data->header.base_timestamp_ns; } accumulator_buffer.count++; accumulator_buffer.shift = data->shift; accumulator_buffer.timestamp_delta += data->readings[0].timestamp_delta; accumulator_buffer.values[0] += data->readings[0].value; break; } } } /* Print the accumulated value average */ switch (ch.chan_type) { case SENSOR_CHAN_ACCEL_XYZ: case SENSOR_CHAN_GYRO_XYZ: case SENSOR_CHAN_MAGN_XYZ: case SENSOR_CHAN_POS_DXYZ: { struct sensor_three_axis_data *data = (struct sensor_three_axis_data *)decoded_buffer; data->header.base_timestamp_ns = accumulator_buffer.base_timestamp_ns; data->header.reading_count = 1; data->shift = accumulator_buffer.shift; data->readings[0].timestamp_delta = (uint32_t)(accumulator_buffer.timestamp_delta / accumulator_buffer.count); data->readings[0].values[0] = (q31_t)(accumulator_buffer.values[0] / accumulator_buffer.count); data->readings[0].values[1] = (q31_t)(accumulator_buffer.values[1] / accumulator_buffer.count); data->readings[0].values[2] = (q31_t)(accumulator_buffer.values[2] / accumulator_buffer.count); shell_info(ctx->sh, "channel type=%d(%s) index=%d shift=%d num_samples=%d " "value=%" PRIsensor_three_axis_data, ch.chan_type, sensor_channel_name[ch.chan_type], ch.chan_idx, data->shift, accumulator_buffer.count, PRIsensor_three_axis_data_arg(*data, 0)); break; } case SENSOR_CHAN_PROX: { struct sensor_byte_data *data = (struct sensor_byte_data *)decoded_buffer; data->header.base_timestamp_ns = accumulator_buffer.base_timestamp_ns; data->header.reading_count = 1; data->readings[0].timestamp_delta = (uint32_t)(accumulator_buffer.timestamp_delta / accumulator_buffer.count); data->readings[0].is_near = accumulator_buffer.values[0] / accumulator_buffer.count; shell_info(ctx->sh, "channel type=%d(%s) index=%d num_samples=%d " "value=%" PRIsensor_byte_data(is_near), ch.chan_type, sensor_channel_name[ch.chan_type], ch.chan_idx, accumulator_buffer.count, PRIsensor_byte_data_arg(*data, 0, is_near)); break; } default: { struct sensor_q31_data *data = (struct sensor_q31_data *)decoded_buffer; data->header.base_timestamp_ns = accumulator_buffer.base_timestamp_ns; data->header.reading_count = 1; data->shift = accumulator_buffer.shift; data->readings[0].timestamp_delta = (uint32_t)(accumulator_buffer.timestamp_delta / accumulator_buffer.count); data->readings[0].value = (q31_t)(accumulator_buffer.values[0] / accumulator_buffer.count); shell_info(ctx->sh, "channel type=%d(%s) index=%d shift=%d num_samples=%d " "value=%" PRIsensor_q31_data, ch.chan_type, (ch.chan_type >= ARRAY_SIZE(sensor_channel_name)) ? "" : sensor_channel_name[ch.chan_type], ch.chan_idx, data->shift, accumulator_buffer.count, PRIsensor_q31_data_arg(*data, 0)); } } ++ch.chan_idx; } ch.chan_idx = 0; } } static int cmd_get_sensor(const struct shell *sh, size_t argc, char *argv[]) { static struct sensor_shell_processing_context ctx; const struct device *dev; int count = 0; int err; err = k_mutex_lock(&cmd_get_mutex, K_NO_WAIT); if (err < 0) { shell_error(sh, "Another sensor reading in progress"); return err; } dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Device unknown (%s)", argv[1]); k_mutex_unlock(&cmd_get_mutex); return -ENODEV; } if (!device_is_sensor(dev)) { shell_error(sh, "Device is not a sensor (%s)", argv[1]); k_mutex_unlock(&cmd_get_mutex); return -ENODEV; } if (argc == 2) { /* read all channel types */ for (int i = 0; i < ARRAY_SIZE(iodev_sensor_shell_channels); ++i) { if (SENSOR_CHANNEL_3_AXIS(i)) { continue; } iodev_sensor_shell_channels[count++] = (struct sensor_chan_spec){i, 0}; } } else { /* read specific channels */ for (int i = 2; i < argc; ++i) { int chan = parse_named_int(argv[i], sensor_channel_name, ARRAY_SIZE(sensor_channel_name)); if (chan < 0) { shell_error(sh, "Failed to read channel (%s)", argv[i]); continue; } iodev_sensor_shell_channels[count++] = (struct sensor_chan_spec){chan, 0}; } } if (count == 0) { shell_error(sh, "No channels to read, bailing"); k_mutex_unlock(&cmd_get_mutex); return -EINVAL; } iodev_sensor_shell_read_config.sensor = dev; iodev_sensor_shell_read_config.count = count; ctx.dev = dev; ctx.sh = sh; err = sensor_read_async_mempool(&iodev_sensor_shell_read, &sensor_read_rtio, &ctx); if (err < 0) { shell_error(sh, "Failed to read sensor: %d", err); } if (!IS_ENABLED(CONFIG_SENSOR_SHELL_STREAM)) { /* * Streaming enables a thread that polls the RTIO context, so if it's enabled, we * don't need a blocking read here. */ sensor_processing_with_callback(&sensor_read_rtio, sensor_shell_processing_callback); } k_mutex_unlock(&cmd_get_mutex); return 0; } static int cmd_sensor_attr_set(const struct shell *shell_ptr, size_t argc, char *argv[]) { const struct device *dev; int rc; dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(shell_ptr, "Device unknown (%s)", argv[1]); return -ENODEV; } if (!device_is_sensor(dev)) { shell_error(shell_ptr, "Device is not a sensor (%s)", argv[1]); k_mutex_unlock(&cmd_get_mutex); return -ENODEV; } for (size_t i = 2; i < argc; i += 3) { int channel = parse_named_int(argv[i], sensor_channel_name, ARRAY_SIZE(sensor_channel_name)); int attr = parse_named_int(argv[i + 1], sensor_attribute_name, ARRAY_SIZE(sensor_attribute_name)); struct sensor_value value = {0}; if (channel < 0) { shell_error(shell_ptr, "Channel '%s' unknown", argv[i]); return -EINVAL; } if (attr < 0) { shell_error(shell_ptr, "Attribute '%s' unknown", argv[i + 1]); return -EINVAL; } if (parse_sensor_value(argv[i + 2], &value)) { shell_error(shell_ptr, "Sensor value '%s' invalid", argv[i + 2]); return -EINVAL; } rc = sensor_attr_set(dev, channel, attr, &value); if (rc) { shell_error(shell_ptr, "Failed to set channel(%s) attribute(%s): %d", sensor_channel_name[channel], sensor_attribute_name[attr], rc); continue; } shell_info(shell_ptr, "%s channel=%s, attr=%s set to value=%s", dev->name, sensor_channel_name[channel], sensor_attribute_name[attr], argv[i + 2]); } return 0; } static void cmd_sensor_attr_get_handler(const struct shell *shell_ptr, const struct device *dev, const char *channel_name, const char *attr_name, bool print_missing_attribute) { int channel = parse_named_int(channel_name, sensor_channel_name, ARRAY_SIZE(sensor_channel_name)); int attr = parse_named_int(attr_name, sensor_attribute_name, ARRAY_SIZE(sensor_attribute_name)); struct sensor_value value = {0}; int rc; if (channel < 0) { shell_error(shell_ptr, "Channel '%s' unknown", channel_name); return; } if (attr < 0) { shell_error(shell_ptr, "Attribute '%s' unknown", attr_name); return; } rc = sensor_attr_get(dev, channel, attr, &value); if (rc != 0) { if (rc == -EINVAL && !print_missing_attribute) { return; } shell_error(shell_ptr, "Failed to get channel(%s) attribute(%s): %d", sensor_channel_name[channel], sensor_attribute_name[attr], rc); return; } shell_info(shell_ptr, "%s(channel=%s, attr=%s) value=%.6f", dev->name, sensor_channel_name[channel], sensor_attribute_name[attr], sensor_value_to_double(&value)); } static int cmd_sensor_attr_get(const struct shell *shell_ptr, size_t argc, char *argv[]) { const struct device *dev; dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(shell_ptr, "Device unknown (%s)", argv[1]); return -ENODEV; } if (!device_is_sensor(dev)) { shell_error(shell_ptr, "Device is not a sensor (%s)", argv[1]); k_mutex_unlock(&cmd_get_mutex); return -ENODEV; } if (argc > 2) { for (size_t i = 2; i < argc; i += 2) { cmd_sensor_attr_get_handler(shell_ptr, dev, argv[i], argv[i + 1], /*print_missing_attribute=*/true); } } else { for (size_t channel_idx = 0; channel_idx < ARRAY_SIZE(sensor_channel_name); ++channel_idx) { for (size_t attr_idx = 0; attr_idx < ARRAY_SIZE(sensor_attribute_name); ++attr_idx) { cmd_sensor_attr_get_handler(shell_ptr, dev, sensor_channel_name[channel_idx], sensor_attribute_name[attr_idx], /*print_missing_attribute=*/false); } } } return 0; } static void channel_name_get(size_t idx, struct shell_static_entry *entry); SHELL_DYNAMIC_CMD_CREATE(dsub_channel_name, channel_name_get); static void attribute_name_get(size_t idx, struct shell_static_entry *entry); SHELL_DYNAMIC_CMD_CREATE(dsub_attribute_name, attribute_name_get); static void channel_name_get(size_t idx, struct shell_static_entry *entry) { int cnt = 0; entry->syntax = NULL; entry->handler = NULL; entry->help = NULL; if (current_cmd_ctx == CTX_GET) { entry->subcmd = &dsub_channel_name; } else if (current_cmd_ctx == CTX_ATTR_GET_SET) { entry->subcmd = &dsub_attribute_name; } else { entry->subcmd = NULL; } for (int i = 0; i < ARRAY_SIZE(sensor_channel_name); i++) { if (sensor_channel_name[i] != NULL) { if (cnt == idx) { entry->syntax = sensor_channel_name[i]; break; } cnt++; } } } static void attribute_name_get(size_t idx, struct shell_static_entry *entry) { int cnt = 0; entry->syntax = NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = &dsub_channel_name; for (int i = 0; i < ARRAY_SIZE(sensor_attribute_name); i++) { if (sensor_attribute_name[i] != NULL) { if (cnt == idx) { entry->syntax = sensor_attribute_name[i]; break; } cnt++; } } } static void trigger_opt_get_for_stream(size_t idx, struct shell_static_entry *entry); SHELL_DYNAMIC_CMD_CREATE(dsub_trigger_opt_get_for_stream, trigger_opt_get_for_stream); static void trigger_opt_get_for_stream(size_t idx, struct shell_static_entry *entry) { entry->syntax = NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = NULL; switch (idx) { case SENSOR_STREAM_DATA_INCLUDE: entry->syntax = "incl"; break; case SENSOR_STREAM_DATA_DROP: entry->syntax = "drop"; break; case SENSOR_STREAM_DATA_NOP: entry->syntax = "nop"; break; } } static void trigger_name_get_for_stream(size_t idx, struct shell_static_entry *entry); SHELL_DYNAMIC_CMD_CREATE(dsub_trigger_name_for_stream, trigger_name_get_for_stream); static void trigger_name_get_for_stream(size_t idx, struct shell_static_entry *entry) { int cnt = 0; entry->syntax = NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = &dsub_trigger_opt_get_for_stream; for (int i = 0; i < ARRAY_SIZE(sensor_trigger_table); i++) { if (sensor_trigger_table[i].name != NULL) { if (cnt == idx) { entry->syntax = sensor_trigger_table[i].name; break; } cnt++; } } } static void stream_on_off(size_t idx, struct shell_static_entry *entry) { entry->syntax = NULL; entry->handler = NULL; entry->help = NULL; if (idx == 0) { entry->syntax = "on"; entry->subcmd = &dsub_trigger_name_for_stream; } else if (idx == 1) { entry->syntax = "off"; entry->subcmd = NULL; } } SHELL_DYNAMIC_CMD_CREATE(dsub_stream_on_off, stream_on_off); static void device_name_get(size_t idx, struct shell_static_entry *entry); SHELL_DYNAMIC_CMD_CREATE(dsub_device_name, device_name_get); static void device_name_get(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, NULL); current_cmd_ctx = CTX_GET; entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = &dsub_channel_name; } static void device_name_get_for_attr(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, NULL); current_cmd_ctx = CTX_ATTR_GET_SET; entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = &dsub_channel_name; } SHELL_DYNAMIC_CMD_CREATE(dsub_device_name_for_attr, device_name_get_for_attr); static void trigger_name_get(size_t idx, struct shell_static_entry *entry) { int cnt = 0; entry->syntax = NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = NULL; for (int i = 0; i < ARRAY_SIZE(sensor_trigger_table); i++) { if (sensor_trigger_table[i].name != NULL) { if (cnt == idx) { entry->syntax = sensor_trigger_table[i].name; break; } cnt++; } } } SHELL_DYNAMIC_CMD_CREATE(dsub_trigger_name, trigger_name_get); static void trigger_on_off_get(size_t idx, struct shell_static_entry *entry) { entry->handler = NULL; entry->help = NULL; entry->subcmd = &dsub_trigger_name; switch (idx) { case 0: entry->syntax = "on"; break; case 1: entry->syntax = "off"; break; default: entry->syntax = NULL; break; } } SHELL_DYNAMIC_CMD_CREATE(dsub_trigger_onoff, trigger_on_off_get); static void device_name_get_for_trigger(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, NULL); entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = &dsub_trigger_onoff; } SHELL_DYNAMIC_CMD_CREATE(dsub_trigger, device_name_get_for_trigger); static void device_name_get_for_stream(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, NULL); current_cmd_ctx = CTX_STREAM_ON_OFF; entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = &dsub_stream_on_off; } SHELL_DYNAMIC_CMD_CREATE(dsub_device_name_for_stream, device_name_get_for_stream); static int cmd_get_sensor_info(const struct shell *sh, size_t argc, char **argv) { ARG_UNUSED(argc); ARG_UNUSED(argv); #ifdef CONFIG_SENSOR_INFO const char *null_str = "(null)"; STRUCT_SECTION_FOREACH(sensor_info, sensor) { shell_print(sh, "device name: %s, vendor: %s, model: %s, " "friendly name: %s", sensor->dev->name, sensor->vendor ? sensor->vendor : null_str, sensor->model ? sensor->model : null_str, sensor->friendly_name ? sensor->friendly_name : null_str); } return 0; #else return -EINVAL; #endif } static void data_ready_trigger_handler(const struct device *sensor, const struct sensor_trigger *trigger) { const int64_t now = k_uptime_get(); struct sensor_value value; int sensor_idx = find_sensor_trigger_device(sensor); struct sample_stats *stats; int sensor_name_len_before_at; const char *sensor_name; if (sensor_idx < 0) { LOG_ERR("Unable to find sensor trigger device"); return; } stats = sensor_stats[sensor_idx]; sensor_name = sensor_trigger_devices[sensor_idx]->name; if (sensor_name) { sensor_name_len_before_at = strchr(sensor_name, '@') - sensor_name; } else { sensor_name_len_before_at = 0; } if (sensor_sample_fetch(sensor)) { LOG_ERR("Failed to fetch samples on data ready handler"); } for (int i = 0; i < SENSOR_CHAN_ALL; ++i) { int rc; /* Skip disabled channels */ if (stats[i].state == SAMPLE_STATS_STATE_DISABLED) { continue; } /* Skip 3 axis channels */ if (SENSOR_CHANNEL_3_AXIS(i)) { continue; } rc = sensor_channel_get(sensor, i, &value); if (stats[i].state == SAMPLE_STATS_STATE_UNINITIALIZED) { if (rc == -ENOTSUP) { /* * Stop reading this channel if the driver told us * it's not supported. */ stats[i].state = SAMPLE_STATS_STATE_DISABLED; } else if (rc == 0) { stats[i].state = SAMPLE_STATS_STATE_ENABLED; } } if (rc != 0) { /* Skip on any error. */ continue; } /* Do something with the data */ stats[i].accumulator += value.val1 * INT64_C(1000000) + value.val2; if (stats[i].count++ == 0) { stats[i].sample_window_start = now; } else if (now > stats[i].sample_window_start + CONFIG_SENSOR_SHELL_TRIG_PRINT_TIMEOUT_MS) { int64_t micro_value = stats[i].accumulator / stats[i].count; value.val1 = micro_value / 1000000; value.val2 = (int32_t)llabs(micro_value - (value.val1 * 1000000)); LOG_INF("sensor=%.*s, chan=%s, num_samples=%u, data=%d.%06d", sensor_name_len_before_at, sensor_name, sensor_channel_name[i], stats[i].count, value.val1, value.val2); stats[i].accumulator = 0; stats[i].count = 0; } } } static int cmd_trig_sensor(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int trigger; bool trigger_enabled = false; int err; if (argc < 4) { shell_error(sh, "Wrong number of args"); return -EINVAL; } /* Parse device name */ dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Device unknown (%s)", argv[1]); return -ENODEV; } /* Map the trigger string to an enum value */ trigger = sensor_trigger_name_lookup(argv[3]); if (trigger < 0 || sensor_trigger_table[trigger].handler == NULL) { shell_error(sh, "Unsupported trigger type (%s)", argv[3]); return -ENOTSUP; } /* Parse on/off */ if (strcmp(argv[2], "on") == 0) { /* find a free entry in sensor_trigger_devices[] */ int sensor_idx = find_sensor_trigger_device(NULL); if (sensor_idx < 0) { shell_error(sh, "Unable to support more simultaneous sensor trigger" " devices"); err = -ENOTSUP; } else { struct sample_stats *stats = sensor_stats[sensor_idx]; sensor_trigger_devices[sensor_idx] = dev; /* reset stats state to UNINITIALIZED */ for (unsigned int ch = 0; ch < SENSOR_CHAN_ALL; ch++) { stats[ch].state = SAMPLE_STATS_STATE_UNINITIALIZED; } err = sensor_trigger_set(dev, &sensor_trigger_table[trigger].trigger, sensor_trigger_table[trigger].handler); trigger_enabled = true; } } else if (strcmp(argv[2], "off") == 0) { /* Clear the handler for the given trigger on this device */ err = sensor_trigger_set(dev, &sensor_trigger_table[trigger].trigger, NULL); if (!err) { /* find entry in sensor_trigger_devices[] and free it */ int sensor_idx = find_sensor_trigger_device(dev); if (sensor_idx < 0) { shell_error(sh, "Unable to find sensor device in trigger array"); } else { sensor_trigger_devices[sensor_idx] = NULL; } } } else { shell_error(sh, "Pass 'on' or 'off' to enable/disable trigger"); return -EINVAL; } if (err) { shell_error(sh, "Error while setting trigger %d on device %s (%d)", trigger, argv[1], err); } else { shell_info(sh, "%s trigger idx=%d %s on device %s", trigger_enabled ? "Enabled" : "Disabled", trigger, sensor_trigger_table[trigger].name, argv[1]); } return err; } /* clang-format off */ SHELL_STATIC_SUBCMD_SET_CREATE(sub_sensor, SHELL_CMD_ARG(get, &dsub_device_name, SENSOR_GET_HELP, cmd_get_sensor, 2, 255), SHELL_CMD_ARG(attr_set, &dsub_device_name_for_attr, SENSOR_ATTR_SET_HELP, cmd_sensor_attr_set, 2, 255), SHELL_CMD_ARG(attr_get, &dsub_device_name_for_attr, SENSOR_ATTR_GET_HELP, cmd_sensor_attr_get, 2, 255), SHELL_COND_CMD(CONFIG_SENSOR_SHELL_STREAM, stream, &dsub_device_name_for_stream, SENSOR_STREAM_HELP, cmd_sensor_stream), SHELL_COND_CMD(CONFIG_SENSOR_INFO, info, NULL, SENSOR_INFO_HELP, cmd_get_sensor_info), SHELL_CMD_ARG(trig, &dsub_trigger, SENSOR_TRIG_HELP, cmd_trig_sensor, 2, 255), SHELL_SUBCMD_SET_END ); /* clang-format on */ SHELL_CMD_REGISTER(sensor, &sub_sensor, "Sensor commands", NULL); ```
/content/code_sandbox/drivers/sensor/sensor_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,157
```c /* * */ #define DT_DRV_COMPAT swir_hl7800 #include <zephyr/logging/log.h> #include <zephyr/logging/log_ctrl.h> #define LOG_MODULE_NAME modem_hl7800 LOG_MODULE_REGISTER(LOG_MODULE_NAME, CONFIG_MODEM_LOG_LEVEL); #include <zephyr/types.h> #include <stddef.h> #include <stdlib.h> #include <ctype.h> #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/drivers/gpio.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/pm/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/sys/util.h> #include <zephyr/net/net_context.h> #include <zephyr/net/net_if.h> #include <zephyr/net/net_offload.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/dns_resolve.h> #include <zephyr/net/offloaded_netdev.h> #if defined(CONFIG_NET_IPV6) #include "ipv6.h" #endif #if defined(CONFIG_NET_IPV4) #include "ipv4.h" #endif #if defined(CONFIG_NET_UDP) #include "udp_internal.h" #endif #ifdef CONFIG_MODEM_HL7800_FW_UPDATE #include <zephyr/fs/fs.h> #endif #include "modem_receiver.h" #include <zephyr/drivers/modem/hl7800.h> #define PREFIXED_SWITCH_CASE_RETURN_STRING(prefix, val) \ case prefix##_##val: { \ return #val; \ } /* Uncomment the #define below to enable a hexdump of all incoming * data from the modem receiver */ /* #define HL7800_ENABLE_VERBOSE_MODEM_RECV_HEXDUMP 1 */ #define HL7800_LOG_UNHANDLED_RX_MSGS 1 /* Uncomment the #define(s) below to enable extra debugging */ /* #define HL7800_RX_LOCK_LOG 1 */ /* #define HL7800_TX_LOCK_LOG 1 */ /* #define HL7800_IO_LOG 1 */ #define HL7800_RX_LOCK_DBG_LOG(fmt, ...) \ do { \ if (IS_ENABLED(HL7800_RX_LOCK_LOG)) { \ LOG_DBG(fmt, ##__VA_ARGS__); \ } \ } while (false) #define HL7800_TX_LOCK_DBG_LOG(fmt, ...) \ do { \ if (IS_ENABLED(HL7800_TX_LOCK_LOG)) { \ LOG_DBG(fmt, ##__VA_ARGS__); \ } \ } while (false) #define HL7800_IO_DBG_LOG(fmt, ...) \ do { \ if (IS_ENABLED(HL7800_IO_LOG)) { \ LOG_WRN(fmt, ##__VA_ARGS__); \ } \ } while (false) #if ((LOG_LEVEL == LOG_LEVEL_DBG) && \ defined(CONFIG_MODEM_HL7800_LOW_POWER_MODE)) #define PRINT_AWAKE_MSG LOG_WRN("awake") #define PRINT_NOT_AWAKE_MSG LOG_WRN("NOT awake") #else #define PRINT_AWAKE_MSG #define PRINT_NOT_AWAKE_MSG #endif enum tcp_notif { HL7800_TCP_NET_ERR, HL7800_TCP_NO_SOCKS, HL7800_TCP_MEM, HL7800_TCP_DNS, HL7800_TCP_DISCON, HL7800_TCP_CONN, HL7800_TCP_ERR, HL7800_TCP_CLIENT_REQ, HL7800_TCP_DATA_SND, HL7800_TCP_ID, HL7800_TCP_RUNNING, HL7800_TCP_ALL_USED, HL7800_TCP_TIMEOUT, HL7800_TCP_SSL_CONN, HL7800_TCP_SSL_INIT }; enum udp_notif { HL7800_UDP_NET_ERR = 0, HL7800_UDP_NO_SOCKS = 1, HL7800_UDP_MEM = 2, HL7800_UDP_DNS = 3, HL7800_UDP_CONN = 5, HL7800_UDP_ERR = 6, HL7800_UDP_DATA_SND = 8, /* this matches TCP_DATA_SND */ HL7800_UDP_ID = 9, HL7800_UDP_RUNNING = 10, HL7800_UDP_ALL_USED = 11 }; enum socket_state { SOCK_IDLE, SOCK_RX, SOCK_TX, SOCK_CONNECTED, }; enum hl7800_lpm { HL7800_LPM_NONE, HL7800_LPM_EDRX, HL7800_LPM_PSM, }; /* pin settings */ enum mdm_control_pins { MDM_RESET = 0, MDM_WAKE, MDM_PWR_ON, MDM_FAST_SHUTD, MDM_VGPIO, MDM_UART_DSR, MDM_UART_CTS, MDM_GPIO6, MAX_MDM_CONTROL_PINS, }; enum net_operator_status { NO_OPERATOR, REGISTERED }; enum device_service_indications { WDSI_PKG_DOWNLOADED = 3, }; #ifdef CONFIG_MODEM_HL7800_FW_UPDATE enum XMODEM_CONTROL_CHARACTERS { XM_SOH = 0x01, XM_SOH_1K = 0x02, XM_EOT = 0x04, XM_ACK = 0x06, /* 'R' */ XM_NACK = 0x15, /* 'N' */ XM_ETB = 0x17, XM_CAN = 0x18, XM_C = 0x43 }; #define XMODEM_DATA_SIZE 1024 #define XMODEM_PACKET_SIZE (XMODEM_DATA_SIZE + 4) #define XMODEM_PAD_VALUE 26 struct xmodem_packet { uint8_t preamble; uint8_t id; uint8_t id_complement; uint8_t data[XMODEM_DATA_SIZE]; uint8_t crc; }; #endif #define MDM_UART_DEV DEVICE_DT_GET(DT_INST_BUS(0)) #define MDM_SEND_OK_ENABLED 0 #define MDM_SEND_OK_DISABLED 1 #define MDM_CMD_SEND_TIMEOUT K_SECONDS(6) #define MDM_IP_SEND_RX_TIMEOUT K_SECONDS(62) #define MDM_SOCK_NOTIF_DELAY K_MSEC(150) #define MDM_CMD_CONN_TIMEOUT K_SECONDS(31) #define MDM_MAX_DATA_LENGTH 1500 #define MDM_MTU 1500 #define MDM_MAX_RESP_SIZE 128 #define MDM_IP_INFO_RESP_SIZE 256 #define MDM_EID_LENGTH 33 #define MDM_CCID_RESP_MAX_SIZE (MDM_HL7800_ICCID_MAX_SIZE + MDM_EID_LENGTH) #define MDM_HANDLER_MATCH_MAX_LEN 100 #define MDM_MAX_SOCKETS 6 /* Special value used to indicate that a socket is being created * and that its actual ID hasn't been assigned yet. */ #define MDM_CREATE_SOCKET_ID (MDM_MAX_SOCKETS + 1) #define MDM_INVALID_SOCKET_ID -1 #define BUF_ALLOC_TIMEOUT K_SECONDS(1) #define SIZE_OF_NUL 1 #define SIZE_WITHOUT_NUL(v) (sizeof(v) - SIZE_OF_NUL) #define CMD_HANDLER(cmd_, cb_) \ { \ .cmd = cmd_, .cmd_len = (uint16_t)sizeof(cmd_) - 1, \ .func = on_cmd_##cb_ \ } #define MDM_MANUFACTURER_LENGTH 16 #define MDM_MODEL_LENGTH 7 #define MDM_SN_RESPONSE_LENGTH (MDM_HL7800_SERIAL_NUMBER_SIZE + 7) #define MDM_NETWORK_STATUS_LENGTH 45 #define MDM_TOP_BAND_SIZE 4 #define MDM_MIDDLE_BAND_SIZE 8 #define MDM_BOTTOM_BAND_SIZE 8 #define MDM_TOP_BAND_START_POSITION 2 #define MDM_MIDDLE_BAND_START_POSITION 6 #define MDM_BOTTOM_BAND_START_POSITION 14 #define MDM_BAND_BITMAP_STR_LENGTH_MAX \ (MDM_TOP_BAND_SIZE + MDM_MIDDLE_BAND_SIZE + MDM_BOTTOM_BAND_SIZE) #define MDM_BAND_BITMAP_STR_LENGTH_MIN 1 #define MDM_DEFAULT_AT_CMD_RETRIES 3 #define MDM_WAKEUP_TIME K_SECONDS(12) #define MDM_BOOT_TIME K_SECONDS(12) #define MDM_WAKE_TO_CHECK_CTS_DELAY_MS K_MSEC(20) #define MDM_WAIT_FOR_DATA_TIME K_MSEC(50) #define MDM_RESET_LOW_TIME K_MSEC(50) #define MDM_RESET_HIGH_TIME K_MSEC(10) #define MDM_WAIT_FOR_DATA_RETRIES 3 #define RSSI_UNKNOWN -999 #define DNS_WORK_DELAY_SECS 1 #define IFACE_WORK_DELAY K_MSEC(500) #define SOCKET_CLEANUP_WORK_DELAY K_MSEC(100) #define WAIT_FOR_KSUP_RETRIES 5 #define CGCONTRDP_RESPONSE_NUM_DELIMS 7 #define COPS_RESPONSE_NUM_DELIMS 2 #define KCELLMEAS_RESPONSE_NUM_DELIMS 4 #define PROFILE_LINE_1 \ "E1 Q0 V1 X4 &C1 &D1 &R1 &S0 +IFC=2,2 &K3 +IPR=115200 +FCLASS0\r\n" #define PROFILE_LINE_2 \ "S00:255 S01:255 S03:255 S04:255 S05:255 S07:255 S08:255 S10:255\r\n" #define ADDRESS_FAMILY_IP "IP" #define ADDRESS_FAMILY_IPV4 "IPV4" #if defined(CONFIG_MODEM_HL7800_ADDRESS_FAMILY_IPV4V6) #define MODEM_HL7800_ADDRESS_FAMILY "IPV4V6" #elif defined(CONFIG_MODEM_HL7800_ADDRESS_FAMILY_IPV4) #define MODEM_HL7800_ADDRESS_FAMILY ADDRESS_FAMILY_IPV4 #else #define MODEM_HL7800_ADDRESS_FAMILY "IPV6" #endif #define MDM_HL7800_SOCKET_AF_IPV4 0 #define MDM_HL7800_SOCKET_AF_IPV6 1 #define SET_RAT_M1_CMD_LEGACY "AT+KSRAT=0" #define SET_RAT_NB1_CMD_LEGACY "AT+KSRAT=1" #define SET_RAT_M1_CMD "AT+KSRAT=0,1" #define SET_RAT_NB1_CMD "AT+KSRAT=1,1" #define NEW_RAT_CMD_MIN_VERSION "HL7800.4.5.4.0" #define HL7800_VERSION_FORMAT "HL7800.%zu.%zu.%zu.%zu" #define MAX_PROFILE_LINE_LENGTH \ MAX(sizeof(PROFILE_LINE_1), sizeof(PROFILE_LINE_2)) #define IPV6_ADDR_FORMAT "####:####:####:####:####:####:####:####" #define HL7800_IPV6_ADDR_LEN \ sizeof("a01.a02.a03.a04.a05.a06.a07.a08.a09.a10.a11.a12.a13.a14.a15.a16") #define MDM_ADDR_FAM_MAX_LEN sizeof("IPV4V6") /* The ? can be a + or - */ static const char TIME_STRING_FORMAT[] = "\"yy/MM/dd,hh:mm:ss?zz\""; #define TIME_STRING_DIGIT_STRLEN 2 #define TIME_STRING_SEPARATOR_STRLEN 1 #define TIME_STRING_PLUS_MINUS_INDEX (6 * 3) #define TIME_STRING_FIRST_SEPARATOR_INDEX 0 #define TIME_STRING_FIRST_DIGIT_INDEX 1 #define TIME_STRING_TO_TM_STRUCT_YEAR_OFFSET (2000 - 1900) /* Time structure min, max */ #define TM_YEAR_RANGE 0, 99 #define TM_MONTH_RANGE_PLUS_1 1, 12 #define TM_DAY_RANGE 1, 31 #define TM_HOUR_RANGE 0, 23 #define TM_MIN_RANGE 0, 59 #define TM_SEC_RANGE 0, 60 /* leap second */ #define QUARTER_HOUR_RANGE 0, 96 #define SECONDS_PER_QUARTER_HOUR (15 * 60) #define SEND_AT_CMD_ONCE_EXPECT_OK(c) \ do { \ ret = send_at_cmd(NULL, (c), MDM_CMD_SEND_TIMEOUT, 0, false); \ if (ret < 0) { \ LOG_ERR("%s result:%d", (c), ret); \ goto error; \ } \ } while (false) #define SEND_AT_CMD_IGNORE_ERROR(c) \ do { \ ret = send_at_cmd(NULL, (c), MDM_CMD_SEND_TIMEOUT, 0, false); \ if (ret < 0) { \ LOG_ERR("%s result:%d", (c), ret); \ } \ } while (false) #define SEND_AT_CMD_EXPECT_OK(c) \ do { \ ret = send_at_cmd(NULL, (c), MDM_CMD_SEND_TIMEOUT, \ MDM_DEFAULT_AT_CMD_RETRIES, false); \ if (ret < 0) { \ LOG_ERR("%s result:%d", (c), ret); \ goto error; \ } \ } while (false) /* Complex has "no_id_resp" set to true because the sending command * is the command used to process the response */ #define SEND_COMPLEX_AT_CMD(c) \ do { \ ret = send_at_cmd(NULL, (c), MDM_CMD_SEND_TIMEOUT, \ MDM_DEFAULT_AT_CMD_RETRIES, true); \ if (ret < 0) { \ LOG_ERR("%s result:%d", (c), ret); \ goto error; \ } \ } while (false) NET_BUF_POOL_DEFINE(mdm_recv_pool, CONFIG_MODEM_HL7800_RECV_BUF_CNT, CONFIG_MODEM_HL7800_RECV_BUF_SIZE, 0, NULL); static uint8_t mdm_recv_buf[MDM_MAX_DATA_LENGTH]; static K_SEM_DEFINE(hl7800_RX_lock_sem, 1, 1); static K_SEM_DEFINE(hl7800_TX_lock_sem, 1, 1); static K_SEM_DEFINE(cb_lock, 1, 1); /* RX thread structures */ K_THREAD_STACK_DEFINE(hl7800_rx_stack, CONFIG_MODEM_HL7800_RX_STACK_SIZE); struct k_thread hl7800_rx_thread; #define RX_THREAD_PRIORITY K_PRIO_COOP(7) /* RX thread work queue */ K_THREAD_STACK_DEFINE(hl7800_workq_stack, CONFIG_MODEM_HL7800_RX_WORKQ_STACK_SIZE); static struct k_work_q hl7800_workq; #define WORKQ_PRIORITY K_PRIO_COOP(7) static const char EOF_PATTERN[] = "--EOF--Pattern--"; static const char CONNECT_STRING[] = "CONNECT"; static const char OK_STRING[] = "OK"; struct hl7800_socket { struct net_context *context; sa_family_t family; enum net_sock_type type; enum net_ip_protocol ip_proto; struct sockaddr src; struct sockaddr dst; bool created; bool reconfig; int socket_id; int rx_size; int error; enum socket_state state; /** semaphore */ struct k_sem sock_send_sem; /** socket callbacks */ struct k_work recv_cb_work; struct k_work rx_data_work; struct k_work_delayable notif_work; net_context_recv_cb_t recv_cb; struct net_pkt *recv_pkt; void *recv_user_data; }; struct stale_socket { int reserved; /* first word of queue data item reserved for the kernel */ enum net_sock_type type; uint8_t id; bool allocated; }; #define NO_ID_RESP_CMD_MAX_LENGTH 32 struct hl7800_config { struct gpio_dt_spec gpio[MAX_MDM_CONTROL_PINS]; }; struct hl7800_iface_ctx { struct net_if *iface; uint8_t mac_addr[6]; struct in_addr ipv4Addr, subnet, gateway, dns_v4; #ifdef CONFIG_NET_IPV6 struct in6_addr ipv6Addr, dns_v6; char dns_v6_string[HL7800_IPV6_ADDR_LEN]; #endif bool restarting; bool initialized; bool wait_for_KSUP; uint32_t wait_for_KSUP_tries; bool reconfig_IP_connection; char dns_v4_string[NET_IPV4_ADDR_LEN]; char no_id_resp_cmd[NO_ID_RESP_CMD_MAX_LENGTH]; bool search_no_id_resp; /* GPIO PORT devices */ struct gpio_callback mdm_vgpio_cb; struct gpio_callback mdm_uart_dsr_cb; struct gpio_callback mdm_gpio6_cb; struct gpio_callback mdm_uart_cts_cb; int vgpio_state; int dsr_state; int gpio6_state; int cts_state; int last_cts_state; int last_cts_time; /* RX specific attributes */ struct mdm_receiver_context mdm_ctx; /* socket data */ struct hl7800_socket sockets[MDM_MAX_SOCKETS]; int last_socket_id; int last_error; struct stale_socket stale_sockets[MDM_MAX_SOCKETS]; struct k_queue stale_socket_queue; /* semaphores */ struct k_sem response_sem; struct k_sem mdm_awake; /* work */ struct k_work_delayable rssi_query_work; struct k_work_delayable iface_status_work; struct k_work_delayable dns_work; struct k_work mdm_vgpio_work; struct k_work_delayable mdm_reset_work; struct k_work_delayable allow_sleep_work; struct k_work_delayable delete_untracked_socket_work; struct k_work mdm_pwr_off_work; #ifdef CONFIG_MODEM_HL7800_FW_UPDATE /* firmware update */ enum mdm_hl7800_fota_state fw_update_state; struct fs_file_t fw_update_file; struct xmodem_packet fw_packet; uint32_t fw_packet_count; int file_pos; struct k_work finish_fw_update_work; bool fw_updated; #endif /* modem info */ /* NOTE: make sure length is +1 for null char */ char mdm_manufacturer[MDM_MANUFACTURER_LENGTH]; char mdm_model[MDM_MODEL_LENGTH]; char mdm_revision[MDM_HL7800_REVISION_MAX_SIZE]; char mdm_imei[MDM_HL7800_IMEI_SIZE]; char mdm_sn[MDM_HL7800_SERIAL_NUMBER_SIZE]; char mdm_network_status[MDM_NETWORK_STATUS_LENGTH]; char mdm_iccid[MDM_HL7800_ICCID_MAX_SIZE]; enum mdm_hl7800_startup_state mdm_startup_state; enum mdm_hl7800_radio_mode mdm_rat; char mdm_active_bands_string[MDM_HL7800_LTE_BAND_STR_SIZE]; char mdm_bands_string[MDM_HL7800_LTE_BAND_STR_SIZE]; char mdm_imsi[MDM_HL7800_IMSI_MAX_STR_SIZE]; int mdm_rssi; uint16_t mdm_bands_top; uint32_t mdm_bands_middle; uint32_t mdm_bands_bottom; int32_t mdm_sinr; bool mdm_echo_is_on; struct mdm_hl7800_apn mdm_apn; bool mdm_startup_reporting_on; int device_services_ind; bool new_rat_cmd_support; uint8_t operator_index; enum mdm_hl7800_functionality functionality; char mdm_pdp_addr_fam[MDM_ADDR_FAM_MAX_LEN]; /* modem state */ bool busy; bool socket_cmd; bool allow_sleep; enum mdm_hl7800_sleep desired_sleep_level; enum mdm_hl7800_sleep sleep_state; enum hl7800_lpm low_power_mode; enum mdm_hl7800_network_state network_state; bool network_dropped; bool dns_ready; enum net_operator_status operator_status; struct tm local_time; int32_t local_time_offset; bool local_time_valid; bool configured; bool off; void (*wake_up_callback)(int state); void (*gpio6_callback)(int state); void (*cts_callback)(int state); #ifdef CONFIG_MODEM_HL7800_GPS struct k_work_delayable gps_work; uint32_t gps_query_location_rate_seconds; #endif }; struct cmd_handler { const char *cmd; uint16_t cmd_len; bool (*func)(struct net_buf **buf, uint16_t len); }; static sys_slist_t hl7800_event_callback_list = SYS_SLIST_STATIC_INIT(&hl7800_event_callback_list); const static struct hl7800_config hl7800_cfg = { .gpio = { GPIO_DT_SPEC_INST_GET(0, mdm_reset_gpios), GPIO_DT_SPEC_INST_GET(0, mdm_wake_gpios), GPIO_DT_SPEC_INST_GET(0, mdm_pwr_on_gpios), GPIO_DT_SPEC_INST_GET(0, mdm_fast_shutd_gpios), GPIO_DT_SPEC_INST_GET(0, mdm_vgpio_gpios), GPIO_DT_SPEC_INST_GET(0, mdm_uart_dsr_gpios), GPIO_DT_SPEC_INST_GET(0, mdm_uart_cts_gpios), GPIO_DT_SPEC_INST_GET(0, mdm_gpio6_gpios), }, }; static struct hl7800_iface_ctx iface_ctx; static size_t hl7800_read_rx(struct net_buf **buf); static char *get_network_state_string(enum mdm_hl7800_network_state state); static char *get_startup_state_string(enum mdm_hl7800_startup_state state); static char *get_sleep_state_string(enum mdm_hl7800_sleep state); static void set_network_state(enum mdm_hl7800_network_state state); static void set_startup_state(enum mdm_hl7800_startup_state state); static void set_sleep_state(enum mdm_hl7800_sleep state); static void generate_network_state_event(void); static void generate_startup_state_event(void); static void generate_sleep_state_event(void); static int modem_boot_handler(char *reason); static void mdm_vgpio_work_cb(struct k_work *item); static void mdm_reset_work_callback(struct k_work *item); static void mdm_power_off_work_callback(struct k_work *item); static int write_apn(char *access_point_name); #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE static void mark_sockets_for_reconfig(void); #endif static void hl7800_build_mac(struct hl7800_iface_ctx *ictx); static void rssi_query(void); #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE static void initialize_sleep_level(void); static int set_sleep_level(void); #endif #ifdef CONFIG_MODEM_HL7800_FW_UPDATE static char *get_fota_state_string(enum mdm_hl7800_fota_state state); static void set_fota_state(enum mdm_hl7800_fota_state state); static void generate_fota_state_event(void); static void generate_fota_count_event(void); #endif static struct stale_socket *alloc_stale_socket(void) { struct stale_socket *sock = NULL; for (int i = 0; i < MDM_MAX_SOCKETS; i++) { if (!iface_ctx.stale_sockets[i].allocated) { sock = &iface_ctx.stale_sockets[i]; sock->allocated = true; break; } } return sock; } static void free_stale_socket(struct stale_socket *sock) { if (sock != NULL) { sock->allocated = false; } } static int queue_stale_socket(enum net_sock_type type, uint8_t id) { int ret = 0; struct stale_socket *sock = NULL; sock = alloc_stale_socket(); if (sock != NULL) { LOG_DBG("Queueing stale socket %d", id); sock->type = type; sock->id = id; k_queue_append(&iface_ctx.stale_socket_queue, (void *)sock); } else { LOG_ERR("Could not alloc stale socket"); ret = -ENOMEM; } return ret; } static struct stale_socket *dequeue_stale_socket(void) { struct stale_socket *sock = NULL; sock = (struct stale_socket *)k_queue_get(&iface_ctx.stale_socket_queue, K_NO_WAIT); return sock; } static bool convert_time_string_to_struct(struct tm *tm, int32_t *offset, char *time_string); static int modem_reset_and_configure(void); static int read_pin(int default_state, const struct gpio_dt_spec *spec) { int state = gpio_pin_get_raw(spec->port, spec->pin); if (state < 0) { LOG_ERR("Unable to read port: %s pin: %d status: %d", spec->port->name, spec->pin, state); state = default_state; } return state; } #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE static bool is_cmd_ready(void) { iface_ctx.vgpio_state = read_pin(0, &hl7800_cfg.gpio[MDM_VGPIO]); iface_ctx.gpio6_state = read_pin(0, &hl7800_cfg.gpio[MDM_GPIO6]); iface_ctx.cts_state = read_pin(1, &hl7800_cfg.gpio[MDM_UART_CTS]); return iface_ctx.vgpio_state && iface_ctx.gpio6_state && !iface_ctx.cts_state; } #endif /** * The definition of awake is that the HL7800 * is ready to receive AT commands successfully */ static void check_hl7800_awake(void) { #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE bool is_cmd_rdy = is_cmd_ready(); if (is_cmd_rdy && (iface_ctx.sleep_state != HL7800_SLEEP_AWAKE) && !iface_ctx.allow_sleep && !iface_ctx.wait_for_KSUP) { PRINT_AWAKE_MSG; set_sleep_state(HL7800_SLEEP_AWAKE); k_sem_give(&iface_ctx.mdm_awake); } else if (!is_cmd_rdy && iface_ctx.sleep_state == HL7800_SLEEP_AWAKE && iface_ctx.allow_sleep) { PRINT_NOT_AWAKE_MSG; if (iface_ctx.desired_sleep_level == HL7800_SLEEP_HIBERNATE || iface_ctx.desired_sleep_level == HL7800_SLEEP_LITE_HIBERNATE) { /* If the device is sleeping (not ready to receive commands) * then the device may send +KSUP when waking up. * We should wait for it. */ iface_ctx.wait_for_KSUP = true; iface_ctx.wait_for_KSUP_tries = 0; set_sleep_state(iface_ctx.desired_sleep_level); } else if (iface_ctx.desired_sleep_level == HL7800_SLEEP_SLEEP) { set_sleep_state(HL7800_SLEEP_SLEEP); } } #endif } static int hl7800_RX_lock(void) { HL7800_RX_LOCK_DBG_LOG("Locking RX [%p]...", k_current_get()); int rc = k_sem_take(&hl7800_RX_lock_sem, K_FOREVER); if (rc != 0) { LOG_ERR("Unable to lock hl7800 (%d)", rc); } else { HL7800_RX_LOCK_DBG_LOG("Locked RX [%p]", k_current_get()); } return rc; } static void hl7800_RX_unlock(void) { HL7800_RX_LOCK_DBG_LOG("UNLocking RX [%p]...", k_current_get()); k_sem_give(&hl7800_RX_lock_sem); HL7800_RX_LOCK_DBG_LOG("UNLocked RX [%p]", k_current_get()); } static bool hl7800_RX_locked(void) { if (k_sem_count_get(&hl7800_RX_lock_sem) == 0) { return true; } else { return false; } } static int hl7800_TX_lock(void) { HL7800_TX_LOCK_DBG_LOG("Locking TX [%p]...", k_current_get()); int rc = k_sem_take(&hl7800_TX_lock_sem, K_FOREVER); if (rc != 0) { LOG_ERR("Unable to lock hl7800 (%d)", rc); } else { HL7800_TX_LOCK_DBG_LOG("Locked TX [%p]", k_current_get()); } return rc; } static void hl7800_TX_unlock(void) { HL7800_TX_LOCK_DBG_LOG("UNLocking TX [%p]...", k_current_get()); k_sem_give(&hl7800_TX_lock_sem); HL7800_TX_LOCK_DBG_LOG("UNLocked TX [%p]", k_current_get()); } static bool hl7800_TX_locked(void) { if (k_sem_count_get(&hl7800_TX_lock_sem) == 0) { return true; } else { return false; } } static void hl7800_lock(void) { hl7800_TX_lock(); hl7800_RX_lock(); } static void hl7800_unlock(void) { hl7800_RX_unlock(); hl7800_TX_unlock(); } static struct hl7800_socket *socket_get(void) { int i; struct hl7800_socket *sock = NULL; for (i = 0; i < MDM_MAX_SOCKETS; i++) { if (!iface_ctx.sockets[i].context) { sock = &iface_ctx.sockets[i]; break; } } return sock; } static struct hl7800_socket *socket_from_id(int socket_id) { int i; struct hl7800_socket *sock = NULL; if (socket_id < 1) { return NULL; } for (i = 0; i < MDM_MAX_SOCKETS; i++) { if (iface_ctx.sockets[i].socket_id == socket_id) { sock = &iface_ctx.sockets[i]; break; } } return sock; } static inline void set_busy(bool busy) { iface_ctx.busy = busy; } static void socket_put(struct hl7800_socket *sock) { if (!sock) { return; } sock->context = NULL; sock->socket_id = MDM_INVALID_SOCKET_ID; sock->created = false; sock->reconfig = false; sock->error = 0; sock->rx_size = 0; sock->state = SOCK_IDLE; (void)memset(&sock->src, 0, sizeof(struct sockaddr)); (void)memset(&sock->dst, 0, sizeof(struct sockaddr)); } char *hl7800_sprint_ip_addr(const struct sockaddr *addr) { static char buf[NET_IPV6_ADDR_LEN]; #if defined(CONFIG_NET_IPV6) if (addr->sa_family == AF_INET6) { return net_addr_ntop(AF_INET6, &net_sin6(addr)->sin6_addr, buf, sizeof(buf)); } else #endif #if defined(CONFIG_NET_IPV4) if (addr->sa_family == AF_INET) { return net_addr_ntop(AF_INET, &net_sin(addr)->sin_addr, buf, sizeof(buf)); } else #endif { LOG_ERR("Unknown IP address family:%d", addr->sa_family); return NULL; } } void mdm_hl7800_register_wake_test_point_callback(void (*func)(int state)) { iface_ctx.wake_up_callback = func; } void mdm_hl7800_register_gpio6_callback(void (*func)(int state)) { iface_ctx.gpio6_callback = func; } void mdm_hl7800_register_cts_callback(void (*func)(int state)) { iface_ctx.cts_callback = func; } static void modem_assert_reset(bool assert) { if (assert) { HL7800_IO_DBG_LOG("MDM_RESET -> ASSERTED"); gpio_pin_set_dt(&hl7800_cfg.gpio[MDM_RESET], 1); } else { HL7800_IO_DBG_LOG("MDM_RESET -> NOT_ASSERTED"); gpio_pin_set_dt(&hl7800_cfg.gpio[MDM_RESET], 0); } } static void modem_assert_wake(bool assert) { int state; if (assert) { HL7800_IO_DBG_LOG("MDM_WAKE_PIN -> ASSERTED"); state = 1; } else { HL7800_IO_DBG_LOG("MDM_WAKE_PIN -> NOT_ASSERTED"); state = 0; } gpio_pin_set_dt(&hl7800_cfg.gpio[MDM_WAKE], state); if (iface_ctx.wake_up_callback != NULL) { iface_ctx.wake_up_callback(state); } } static void modem_assert_pwr_on(bool assert) { if (assert) { HL7800_IO_DBG_LOG("MDM_PWR_ON -> ASSERTED"); gpio_pin_set_dt(&hl7800_cfg.gpio[MDM_PWR_ON], 1); } else { HL7800_IO_DBG_LOG("MDM_PWR_ON -> NOT_ASSERTED"); gpio_pin_set_dt(&hl7800_cfg.gpio[MDM_PWR_ON], 0); } } static void modem_assert_fast_shutd(bool assert) { if (assert) { HL7800_IO_DBG_LOG("MDM_FAST_SHUTD -> ASSERTED"); gpio_pin_set_dt(&hl7800_cfg.gpio[MDM_FAST_SHUTD], 1); } else { HL7800_IO_DBG_LOG("MDM_FAST_SHUTD -> NOT_ASSERTED"); gpio_pin_set_dt(&hl7800_cfg.gpio[MDM_FAST_SHUTD], 0); } } static void allow_sleep_work_callback(struct k_work *item) { ARG_UNUSED(item); if (!iface_ctx.busy) { LOG_DBG("Allow sleep"); iface_ctx.allow_sleep = true; set_sleep_state(iface_ctx.desired_sleep_level); modem_assert_wake(false); } else { k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.allow_sleep_work, K_MSEC(CONFIG_MODEM_HL7800_ALLOW_SLEEP_DELAY_MS)); } } static void allow_sleep(bool allow) { #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE if (allow) { if (!iface_ctx.restarting && !iface_ctx.busy) { k_work_reschedule_for_queue( &hl7800_workq, &iface_ctx.allow_sleep_work, K_MSEC(CONFIG_MODEM_HL7800_ALLOW_SLEEP_DELAY_MS)); } else { k_work_cancel_delayable(&iface_ctx.allow_sleep_work); } } else { LOG_DBG("Keep awake"); k_work_cancel_delayable(&iface_ctx.allow_sleep_work); iface_ctx.allow_sleep = false; modem_assert_wake(true); } #endif } static void event_handler(enum mdm_hl7800_event event, void *event_data) { sys_snode_t *node; struct mdm_hl7800_callback_agent *agent; int ret; ret = k_sem_take(&cb_lock, K_FOREVER); if (ret == 0) { SYS_SLIST_FOR_EACH_NODE(&hl7800_event_callback_list, node) { agent = CONTAINER_OF(node, struct mdm_hl7800_callback_agent, node); if (agent->event_callback != NULL) { agent->event_callback(event, event_data); } } k_sem_give(&cb_lock); } } void mdm_hl7800_get_signal_quality(int *rsrp, int *sinr) { if (CONFIG_MODEM_HL7800_RSSI_RATE_SECONDS == 0) { rssi_query(); } *rsrp = iface_ctx.mdm_rssi; *sinr = iface_ctx.mdm_sinr; } void mdm_hl7800_wakeup(bool wakeup) { allow_sleep(!wakeup); } /* Send an AT command with a series of response handlers */ static int send_at_cmd(struct hl7800_socket *sock, const uint8_t *data, k_timeout_t timeout, int retries, bool no_id_resp) { int ret = 0; iface_ctx.last_error = 0; do { if (!sock) { k_sem_reset(&iface_ctx.response_sem); iface_ctx.last_socket_id = 0; iface_ctx.socket_cmd = false; } else { sock->error = 0; iface_ctx.socket_cmd = true; k_sem_reset(&sock->sock_send_sem); iface_ctx.last_socket_id = sock->socket_id; } if (no_id_resp) { strncpy(iface_ctx.no_id_resp_cmd, data, sizeof(iface_ctx.no_id_resp_cmd) - 1); iface_ctx.search_no_id_resp = true; } LOG_DBG("OUT: [%s]", (char *)data); mdm_receiver_send(&iface_ctx.mdm_ctx, data, strlen(data)); mdm_receiver_send(&iface_ctx.mdm_ctx, "\r", 1); if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { goto done; } if (!sock) { ret = k_sem_take(&iface_ctx.response_sem, timeout); } else { ret = k_sem_take(&sock->sock_send_sem, timeout); } if (ret == 0) { if (sock) { ret = sock->error; } else { ret = iface_ctx.last_error; } } else if (ret == -EAGAIN) { ret = -ETIMEDOUT; } retries--; if (retries < 0) { retries = 0; } } while (ret != 0 && retries > 0); done: iface_ctx.search_no_id_resp = false; return ret; } static int wakeup_hl7800(void) { set_busy(true); #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE int ret; allow_sleep(false); /* If modem is in sleep mode (not hibernate), * then it can respond in ~10 ms. */ if (iface_ctx.desired_sleep_level == HL7800_SLEEP_SLEEP) { k_sleep(MDM_WAKE_TO_CHECK_CTS_DELAY_MS); } if (!is_cmd_ready()) { LOG_DBG("Waiting to wakeup"); ret = k_sem_take(&iface_ctx.mdm_awake, MDM_WAKEUP_TIME); if (ret) { LOG_DBG("Err waiting for wakeup: %d", ret); } } #endif return 0; } int32_t mdm_hl7800_send_at_cmd(const uint8_t *data) { int ret; if (!data) { return -EINVAL; } hl7800_lock(); wakeup_hl7800(); iface_ctx.last_socket_id = 0; ret = send_at_cmd(NULL, data, MDM_CMD_SEND_TIMEOUT, 0, false); set_busy(false); allow_sleep(true); hl7800_unlock(); return ret; } /* The access point name (and username and password) are stored in the modem's * non-volatile memory. */ int32_t mdm_hl7800_update_apn(char *access_point_name) { int ret = -EINVAL; hl7800_lock(); wakeup_hl7800(); iface_ctx.last_socket_id = 0; ret = write_apn(access_point_name); set_busy(false); allow_sleep(true); hl7800_unlock(); if (ret >= 0) { /* After a reset the APN will be re-read from the modem * and an event will be generated. */ k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.mdm_reset_work, K_NO_WAIT); } return ret; } bool mdm_hl7800_valid_rat(uint8_t value) { if ((value == MDM_RAT_CAT_M1) || (value == MDM_RAT_CAT_NB1)) { return true; } return false; } int32_t mdm_hl7800_update_rat(enum mdm_hl7800_radio_mode value) { int ret = -EINVAL; if (value == iface_ctx.mdm_rat) { /* The set command will fail (in the modem) * if the RAT isn't different. */ return 0; } else if (!mdm_hl7800_valid_rat(value)) { return ret; } hl7800_lock(); wakeup_hl7800(); iface_ctx.last_socket_id = 0; if (value == MDM_RAT_CAT_M1) { if (iface_ctx.new_rat_cmd_support) { SEND_AT_CMD_ONCE_EXPECT_OK(SET_RAT_M1_CMD); } else { SEND_AT_CMD_ONCE_EXPECT_OK(SET_RAT_M1_CMD_LEGACY); } } else { /* MDM_RAT_CAT_NB1 */ if (iface_ctx.new_rat_cmd_support) { SEND_AT_CMD_ONCE_EXPECT_OK(SET_RAT_NB1_CMD); } else { SEND_AT_CMD_ONCE_EXPECT_OK(SET_RAT_NB1_CMD_LEGACY); } } error: set_busy(false); allow_sleep(true); hl7800_unlock(); /* Changing the RAT causes the modem to reset. * A reset and reconfigure ensures the modem configuration and * state are valid. */ if (ret >= 0) { k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.mdm_reset_work, K_NO_WAIT); } return ret; } int32_t mdm_hl7800_get_local_time(struct tm *tm, int32_t *offset) { int ret; iface_ctx.local_time_valid = false; hl7800_lock(); wakeup_hl7800(); iface_ctx.last_socket_id = 0; ret = send_at_cmd(NULL, "AT+CCLK?", MDM_CMD_SEND_TIMEOUT, 0, false); set_busy(false); allow_sleep(true); if (iface_ctx.local_time_valid) { memcpy(tm, &iface_ctx.local_time, sizeof(struct tm)); memcpy(offset, &iface_ctx.local_time_offset, sizeof(*offset)); } else { ret = -EIO; } hl7800_unlock(); return ret; } int32_t mdm_hl7800_get_operator_index(void) { int ret; hl7800_lock(); wakeup_hl7800(); iface_ctx.last_socket_id = 0; ret = send_at_cmd(NULL, "AT+KCARRIERCFG?", MDM_CMD_SEND_TIMEOUT, 0, false); set_busy(false); allow_sleep(true); hl7800_unlock(); if (ret < 0) { return ret; } else { return iface_ctx.operator_index; } } int32_t mdm_hl7800_get_functionality(void) { int ret; hl7800_lock(); wakeup_hl7800(); iface_ctx.last_socket_id = 0; ret = send_at_cmd(NULL, "AT+CFUN?", MDM_CMD_SEND_TIMEOUT, 0, false); set_busy(false); allow_sleep(true); hl7800_unlock(); if (ret < 0) { return ret; } else { return iface_ctx.functionality; } } int32_t mdm_hl7800_set_functionality(enum mdm_hl7800_functionality mode) { int ret; char buf[sizeof("AT+CFUN=###,0")] = { 0 }; hl7800_lock(); wakeup_hl7800(); snprintk(buf, sizeof(buf), "AT+CFUN=%u,0", mode); iface_ctx.last_socket_id = 0; ret = send_at_cmd(NULL, buf, MDM_CMD_SEND_TIMEOUT, MDM_DEFAULT_AT_CMD_RETRIES, false); set_busy(false); allow_sleep(true); hl7800_unlock(); return ret; } #ifdef CONFIG_MODEM_HL7800_GPS int32_t mdm_hl7800_set_gps_rate(uint32_t rate) { int ret = -1; hl7800_lock(); wakeup_hl7800(); iface_ctx.gps_query_location_rate_seconds = rate; /* Stopping first allows changing the rate between two non-zero values. * Ignore error if GNSS isn't running. */ SEND_AT_CMD_IGNORE_ERROR("AT+GNSSSTOP"); if (rate == 0) { SEND_AT_CMD_EXPECT_OK("AT+CFUN=1,0"); } else { /* Navigation doesn't work when LTE is on. */ SEND_AT_CMD_EXPECT_OK("AT+CFUN=4,0"); SEND_AT_CMD_EXPECT_OK("AT+GNSSCONF=1,1"); if (IS_ENABLED(CONFIG_MODEM_HL7800_USE_GLONASS)) { SEND_AT_CMD_EXPECT_OK("AT+GNSSCONF=10,1"); } /* Enable all NMEA sentences */ SEND_AT_CMD_EXPECT_OK("AT+GNSSNMEA=0,1000,0,1FF"); /* Enable GPS */ SEND_AT_CMD_EXPECT_OK("AT+GNSSSTART=0"); } error: if (rate && ret == 0) { k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.gps_work, K_SECONDS(iface_ctx.gps_query_location_rate_seconds)); } else { k_work_cancel_delayable(&iface_ctx.gps_work); } LOG_DBG("GPS status: %d rate: %u", ret, rate); set_busy(false); allow_sleep(true); hl7800_unlock(); return ret; } #endif /* CONFIG_MODEM_HL7800_GPS */ #ifdef CONFIG_MODEM_HL7800_POLTE int32_t mdm_hl7800_polte_register(void) { int ret = -1; hl7800_lock(); wakeup_hl7800(); /* register for events */ SEND_AT_CMD_EXPECT_OK("AT%POLTEEV=\"REGISTER\",1"); SEND_AT_CMD_EXPECT_OK("AT%POLTEEV=\"LOCATION\",1"); /* register with polte.io */ SEND_AT_CMD_EXPECT_OK("AT%POLTECMD=\"REGISTER\""); error: LOG_DBG("PoLTE register status: %d", ret); set_busy(false); allow_sleep(true); hl7800_unlock(); return ret; } int32_t mdm_hl7800_polte_enable(char *user, char *password) { int ret = -1; char buf[sizeof(MDM_HL7800_SET_POLTE_USER_AND_PASSWORD_FMT_STR) + MDM_HL7800_MAX_POLTE_USER_ID_SIZE + MDM_HL7800_MAX_POLTE_PASSWORD_SIZE] = { 0 }; hl7800_lock(); wakeup_hl7800(); /* register for events */ SEND_AT_CMD_EXPECT_OK("AT%POLTEEV=\"REGISTER\",1"); SEND_AT_CMD_EXPECT_OK("AT%POLTEEV=\"LOCATION\",1"); /* restore user and password (not saved in NV by modem) */ snprintk(buf, sizeof(buf), MDM_HL7800_SET_POLTE_USER_AND_PASSWORD_FMT_STR, user, password); ret = send_at_cmd(NULL, buf, MDM_CMD_SEND_TIMEOUT, MDM_DEFAULT_AT_CMD_RETRIES, false); error: LOG_DBG("PoLTE register status: %d", ret); set_busy(false); allow_sleep(true); hl7800_unlock(); return ret; } int32_t mdm_hl7800_polte_locate(void) { int ret = -1; hl7800_lock(); wakeup_hl7800(); SEND_AT_CMD_EXPECT_OK("AT%POLTECMD=\"LOCATE\",2,1"); error: LOG_DBG("PoLTE locate status: %d", ret); set_busy(false); allow_sleep(true); hl7800_unlock(); return ret; } #endif /* CONFIG_MODEM_HL7800_POLTE */ /** * @brief Perform a site survey. * */ int32_t mdm_hl7800_perform_site_survey(void) { int ret; hl7800_lock(); wakeup_hl7800(); ret = send_at_cmd(NULL, "at%meas=\"97\"", MDM_CMD_SEND_TIMEOUT, 0, false); set_busy(false); allow_sleep(true); hl7800_unlock(); return ret; } void mdm_hl7800_generate_status_events(void) { hl7800_lock(); generate_startup_state_event(); generate_network_state_event(); generate_sleep_state_event(); #ifdef CONFIG_MODEM_HL7800_FW_UPDATE generate_fota_state_event(); #endif event_handler(HL7800_EVENT_RSSI, &iface_ctx.mdm_rssi); event_handler(HL7800_EVENT_SINR, &iface_ctx.mdm_sinr); event_handler(HL7800_EVENT_APN_UPDATE, &iface_ctx.mdm_apn); event_handler(HL7800_EVENT_RAT, &iface_ctx.mdm_rat); event_handler(HL7800_EVENT_BANDS, iface_ctx.mdm_bands_string); event_handler(HL7800_EVENT_ACTIVE_BANDS, iface_ctx.mdm_active_bands_string); event_handler(HL7800_EVENT_REVISION, iface_ctx.mdm_revision); hl7800_unlock(); } uint32_t mdm_hl7800_log_filter_set(uint32_t level) { uint32_t new_log_level = 0; #ifdef CONFIG_LOG_RUNTIME_FILTERING new_log_level = log_filter_set(NULL, Z_LOG_LOCAL_DOMAIN_ID, log_source_id_get(STRINGIFY(LOG_MODULE_NAME)), level); #endif return new_log_level; } static int send_data(struct hl7800_socket *sock, struct net_pkt *pkt) { int ret; struct net_buf *frag; char dst_addr[NET_IPV6_ADDR_LEN]; char buf[sizeof("AT+KUDPSND=##,\"" IPV6_ADDR_FORMAT "\",#####,####")]; size_t send_len, actual_send_len; send_len = 0, actual_send_len = 0; if (!sock) { return -EINVAL; } sock->error = 0; sock->state = SOCK_TX; frag = pkt->frags; send_len = net_buf_frags_len(frag); /* start sending data */ k_sem_reset(&sock->sock_send_sem); if (sock->type == SOCK_STREAM) { snprintk(buf, sizeof(buf), "AT+KTCPSND=%d,%zu", sock->socket_id, send_len); } else { if (!net_addr_ntop(sock->family, &net_sin(&sock->dst)->sin_addr, dst_addr, sizeof(dst_addr))) { LOG_ERR("Invalid dst addr"); return -EINVAL; } snprintk(buf, sizeof(buf), "AT+KUDPSND=%d,\"%s\",%u,%zu", sock->socket_id, dst_addr, net_sin(&sock->dst)->sin_port, send_len); } send_at_cmd(sock, buf, K_NO_WAIT, 0, false); /* wait for CONNECT or error */ ret = k_sem_take(&sock->sock_send_sem, MDM_IP_SEND_RX_TIMEOUT); if (ret) { LOG_ERR("Err waiting for CONNECT (%d)", ret); goto done; } /* check for error */ if (sock->error != 0) { ret = sock->error; LOG_ERR("AT+K**PSND (%d)", ret); goto done; } /* Loop through packet data and send */ while (frag) { actual_send_len += frag->len; mdm_receiver_send(&iface_ctx.mdm_ctx, frag->data, frag->len); frag = frag->frags; } if (actual_send_len != send_len) { LOG_WRN("AT+K**PSND act: %zd exp: %zd", actual_send_len, send_len); } LOG_DBG("Sent %zu bytes", actual_send_len); /* Send EOF pattern to terminate data */ k_sem_reset(&sock->sock_send_sem); mdm_receiver_send(&iface_ctx.mdm_ctx, EOF_PATTERN, strlen(EOF_PATTERN)); ret = k_sem_take(&sock->sock_send_sem, MDM_IP_SEND_RX_TIMEOUT); if (ret == 0) { ret = sock->error; } else if (ret == -EAGAIN) { ret = -ETIMEDOUT; } done: if (sock->type == SOCK_STREAM) { if (sock->error == 0) { sock->state = SOCK_CONNECTED; } } else { sock->state = SOCK_IDLE; } return ret; } /*** NET_BUF HELPERS ***/ static bool is_crlf(uint8_t c) { if (c == '\n' || c == '\r') { return true; } else { return false; } } static void net_buf_skipcrlf(struct net_buf **buf) { /* chop off any /n or /r */ while (*buf && is_crlf(*(*buf)->data)) { net_buf_pull_u8(*buf); if (!(*buf)->len) { *buf = net_buf_frag_del(NULL, *buf); } } } static uint16_t net_buf_findcrlf(struct net_buf *buf, struct net_buf **frag) { uint16_t len = 0U, pos = 0U; while (buf && !is_crlf(*(buf->data + pos))) { if (pos + 1 >= buf->len) { len += buf->len; buf = buf->frags; pos = 0U; } else { pos++; } } if (buf && is_crlf(*(buf->data + pos))) { len += pos; *frag = buf; return len; } return 0; } static uint8_t net_buf_get_u8(struct net_buf **buf) { uint8_t val = net_buf_pull_u8(*buf); if (!(*buf)->len) { *buf = net_buf_frag_del(NULL, *buf); } return val; } static uint32_t net_buf_remove(struct net_buf **buf, uint32_t len) { uint32_t to_remove; uint32_t removed = 0; while (*buf && len > 0) { to_remove = (*buf)->len; if (to_remove > len) { to_remove = len; } net_buf_pull(*buf, to_remove); removed += to_remove; len -= to_remove; if (!(*buf)->len) { *buf = net_buf_frag_del(NULL, *buf); } } return removed; } /*** UDP / TCP Helper Function ***/ /* Setup IP header data to be used by some network applications. * While much is dummy data, some fields such as dst, port and family are * important. * Return the IP + protocol header length. */ static int pkt_setup_ip_data(struct net_pkt *pkt, struct hl7800_socket *sock) { int hdr_len = 0; uint16_t src_port = 0U, dst_port = 0U; #if defined(CONFIG_NET_TCP) struct net_tcp_hdr *tcp; #endif #if defined(CONFIG_NET_IPV6) if (net_pkt_family(pkt) == AF_INET6) { if (net_ipv6_create( pkt, &((struct sockaddr_in6 *)&sock->dst)->sin6_addr, &((struct sockaddr_in6 *)&sock->src)->sin6_addr)) { return -1; } net_pkt_set_remote_address(pkt, &sock->dst, sizeof(struct sockaddr_in6)); pkt->remote.sa_family = AF_INET6; src_port = ntohs(net_sin6(&sock->src)->sin6_port); dst_port = ntohs(net_sin6(&sock->dst)->sin6_port); hdr_len = sizeof(struct net_ipv6_hdr); } #endif #if defined(CONFIG_NET_IPV4) if (net_pkt_family(pkt) == AF_INET) { if (net_ipv4_create( pkt, &((struct sockaddr_in *)&sock->dst)->sin_addr, &((struct sockaddr_in *)&sock->src)->sin_addr)) { return -1; } net_pkt_set_remote_address(pkt, &sock->dst, sizeof(struct sockaddr_in)); pkt->remote.sa_family = AF_INET; src_port = ntohs(net_sin(&sock->src)->sin_port); dst_port = ntohs(net_sin(&sock->dst)->sin_port); hdr_len = sizeof(struct net_ipv4_hdr); } #endif #if defined(CONFIG_NET_UDP) if (sock->ip_proto == IPPROTO_UDP) { if (net_udp_create(pkt, dst_port, src_port)) { return -1; } hdr_len += NET_UDPH_LEN; } #endif #if defined(CONFIG_NET_TCP) if (sock->ip_proto == IPPROTO_TCP) { NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct net_tcp_hdr); tcp = (struct net_tcp_hdr *)net_pkt_get_data(pkt, &tcp_access); if (!tcp) { return -1; } (void)memset(tcp, 0, NET_TCPH_LEN); /* Setup TCP header */ tcp->src_port = dst_port; tcp->dst_port = src_port; if (net_pkt_set_data(pkt, &tcp_access)) { return -1; } hdr_len += NET_TCPH_LEN; } #endif /* CONFIG_NET_TCP */ return hdr_len; } /*** MODEM RESPONSE HANDLERS ***/ static uint32_t wait_for_modem_data(struct net_buf **buf, uint32_t current_len, uint32_t expected_len) { uint32_t waitForDataTries = 0; while ((current_len < expected_len) && (waitForDataTries < MDM_WAIT_FOR_DATA_RETRIES)) { LOG_DBG("cur:%d, exp:%d", current_len, expected_len); k_sleep(MDM_WAIT_FOR_DATA_TIME); current_len += hl7800_read_rx(buf); waitForDataTries++; } return current_len; } static uint32_t wait_for_modem_data_and_newline(struct net_buf **buf, uint32_t current_len, uint32_t expected_len) { return wait_for_modem_data(buf, current_len, (expected_len + strlen("\r\n"))); } /* Handler: AT+CGMI */ static bool on_cmd_atcmdinfo_manufacturer(struct net_buf **buf, uint16_t len) { struct net_buf *frag = NULL; size_t out_len; int len_no_null = MDM_MANUFACTURER_LENGTH - 1; /* make sure revision data is received * waiting for: Sierra Wireless\r\n */ wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), MDM_MANUFACTURER_LENGTH); frag = NULL; len = net_buf_findcrlf(*buf, &frag); if (!frag) { LOG_ERR("Unable to find mfg end"); goto done; } if (len < len_no_null) { LOG_WRN("mfg too short (len:%d)", len); } else if (len > len_no_null) { LOG_WRN("mfg too long (len:%d)", len); len = MDM_MANUFACTURER_LENGTH; } out_len = net_buf_linearize(iface_ctx.mdm_manufacturer, sizeof(iface_ctx.mdm_manufacturer) - 1, *buf, 0, len); iface_ctx.mdm_manufacturer[out_len] = 0; LOG_INF("Manufacturer: %s", iface_ctx.mdm_manufacturer); done: return true; } /* Handler: AT+CGMM */ static bool on_cmd_atcmdinfo_model(struct net_buf **buf, uint16_t len) { struct net_buf *frag = NULL; size_t out_len; int len_no_null = MDM_MODEL_LENGTH - 1; /* make sure model data is received * waiting for: HL7800\r\n */ wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), MDM_MODEL_LENGTH); frag = NULL; len = net_buf_findcrlf(*buf, &frag); if (!frag) { LOG_ERR("Unable to find model end"); goto done; } if (len < len_no_null) { LOG_WRN("model too short (len:%d)", len); } else if (len > len_no_null) { LOG_WRN("model too long (len:%d)", len); len = MDM_MODEL_LENGTH; } out_len = net_buf_linearize(iface_ctx.mdm_model, sizeof(iface_ctx.mdm_model) - 1, *buf, 0, len); iface_ctx.mdm_model[out_len] = 0; LOG_INF("Model: %s", iface_ctx.mdm_model); done: return true; } /* Handler: AT+CGMR */ static bool on_cmd_atcmdinfo_revision(struct net_buf **buf, uint16_t len) { struct net_buf *frag = NULL; size_t out_len; /* make sure revision data is received * waiting for something like: AHL7800.1.2.3.1.20171211\r\n */ wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), MDM_HL7800_REVISION_MAX_SIZE); frag = NULL; len = net_buf_findcrlf(*buf, &frag); if (!frag) { LOG_ERR("Unable to find rev end"); goto done; } if (len == 0) { LOG_WRN("revision not found"); } else if (len > MDM_HL7800_REVISION_MAX_STRLEN) { LOG_WRN("revision too long (len:%d)", len); len = MDM_HL7800_REVISION_MAX_STRLEN; } out_len = net_buf_linearize( iface_ctx.mdm_revision, sizeof(iface_ctx.mdm_revision) - 1, *buf, 0, len); iface_ctx.mdm_revision[out_len] = 0; LOG_INF("Revision: %s", iface_ctx.mdm_revision); event_handler(HL7800_EVENT_REVISION, iface_ctx.mdm_revision); done: return true; } /* Handler: AT+CGSN */ static bool on_cmd_atcmdinfo_imei(struct net_buf **buf, uint16_t len) { struct net_buf *frag = NULL; size_t out_len; /* make sure IMEI data is received * waiting for: ###############\r\n */ wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), MDM_HL7800_IMEI_SIZE); frag = NULL; len = net_buf_findcrlf(*buf, &frag); if (!frag) { LOG_ERR("Unable to find IMEI end"); goto done; } if (len < MDM_HL7800_IMEI_STRLEN) { LOG_WRN("IMEI too short (len:%d)", len); } else if (len > MDM_HL7800_IMEI_STRLEN) { LOG_WRN("IMEI too long (len:%d)", len); len = MDM_HL7800_IMEI_STRLEN; } out_len = net_buf_linearize(iface_ctx.mdm_imei, sizeof(iface_ctx.mdm_imei) - 1, *buf, 0, len); iface_ctx.mdm_imei[out_len] = 0; LOG_INF("IMEI: %s", iface_ctx.mdm_imei); done: return true; } /* Handler: +CCID: <ICCID>,<EID> * NOTE: EID will only be present for eSIM */ static bool on_cmd_atcmdinfo_iccid(struct net_buf **buf, uint16_t len) { char value[MDM_CCID_RESP_MAX_SIZE]; char *delim; int iccid_len; size_t out_len; out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); value[out_len] = 0; LOG_DBG("+CCID: %s", value); if (len > MDM_HL7800_ICCID_MAX_STRLEN) { delim = strchr(value, ','); if (!delim) { LOG_ERR("Could not process +CCID"); return true; } /* Replace ',' with null so value contains two null terminated strings */ *delim = 0; LOG_INF("EID: %s", delim + 1); } iccid_len = strlen(value); strncpy(iface_ctx.mdm_iccid, value, sizeof(iface_ctx.mdm_iccid)); len = MIN(iccid_len, sizeof(iface_ctx.mdm_iccid) - 1); iface_ctx.mdm_iccid[len] = '\0'; if (iccid_len > len) { LOG_WRN("ICCID too long: %d (max %d)", iccid_len, len); } LOG_INF("ICCID: %s", iface_ctx.mdm_iccid); return true; } static bool on_cmd_atcmdinfo_imsi(struct net_buf **buf, uint16_t len) { struct net_buf *frag = NULL; size_t out_len; /* The handler for the IMSI is based on the command. * waiting for: <IMSI>\r\n */ wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), MDM_HL7800_IMSI_MIN_STR_SIZE); frag = NULL; len = net_buf_findcrlf(*buf, &frag); if (!frag) { LOG_ERR("Unable to find IMSI end"); goto done; } if (len > MDM_HL7800_IMSI_MAX_STRLEN) { LOG_WRN("IMSI too long (len:%d)", len); len = MDM_HL7800_IMSI_MAX_STRLEN; } out_len = net_buf_linearize(iface_ctx.mdm_imsi, MDM_HL7800_IMSI_MAX_STR_SIZE, *buf, 0, len); iface_ctx.mdm_imsi[out_len] = 0; if (strstr(iface_ctx.mdm_imsi, "ERROR") != NULL) { LOG_ERR("Unable to read IMSI"); memset(iface_ctx.mdm_imsi, 0, sizeof(iface_ctx.mdm_imsi)); } LOG_INF("IMSI: %s", iface_ctx.mdm_imsi); done: return true; } static void dns_work_cb(struct k_work *work) { #if defined(CONFIG_DNS_RESOLVER) && !defined(CONFIG_DNS_SERVER_IP_ADDRESSES) int ret; struct dns_resolve_context *dnsCtx; struct sockaddr temp_addr; bool valid_address = false; bool retry = false; static const char *const dns_servers_str[] = { #ifdef CONFIG_NET_IPV6 iface_ctx.dns_v6_string, #endif #ifdef CONFIG_NET_IPV4 iface_ctx.dns_v4_string, #endif NULL}; #ifdef CONFIG_NET_IPV6 valid_address = net_ipaddr_parse(iface_ctx.dns_v6_string, strlen(iface_ctx.dns_v6_string), &temp_addr); if (!valid_address && IS_ENABLED(CONFIG_NET_IPV4)) { /* IPv6 DNS string is not valid, replace it with IPv4 address and recheck */ strncpy(iface_ctx.dns_v6_string, iface_ctx.dns_v4_string, sizeof(iface_ctx.dns_v6_string) - 1); valid_address = net_ipaddr_parse(iface_ctx.dns_v6_string, strlen(iface_ctx.dns_v6_string), &temp_addr); } #else valid_address = net_ipaddr_parse(iface_ctx.dns_v4_string, strlen(iface_ctx.dns_v4_string), &temp_addr); #endif if (!valid_address) { LOG_WRN("No valid DNS address!"); } else if (iface_ctx.iface && net_if_is_up(iface_ctx.iface) && !iface_ctx.dns_ready) { /* set new DNS addr in DNS resolver */ LOG_DBG("Refresh DNS resolver"); dnsCtx = dns_resolve_get_default(); if (dnsCtx->state == DNS_RESOLVE_CONTEXT_INACTIVE) { LOG_DBG("Initializing DNS resolver"); ret = dns_resolve_init(dnsCtx, (const char **)dns_servers_str, NULL); if (ret < 0) { LOG_ERR("dns_resolve_init fail (%d)", ret); retry = true; } } else { LOG_DBG("Reconfiguring DNS resolver"); ret = dns_resolve_reconfigure(dnsCtx, (const char **)dns_servers_str, NULL); if (ret < 0) { LOG_ERR("dns_resolve_reconfigure fail (%d)", ret); retry = true; } } if (!retry) { LOG_DBG("DNS ready"); iface_ctx.dns_ready = true; } else { LOG_DBG("DNS not ready, schedule a retry"); k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.dns_work, K_SECONDS(DNS_WORK_DELAY_SECS * 2)); } } #endif } char *mdm_hl7800_get_iccid(void) { return iface_ctx.mdm_iccid; } char *mdm_hl7800_get_sn(void) { return iface_ctx.mdm_sn; } char *mdm_hl7800_get_imei(void) { return iface_ctx.mdm_imei; } char *mdm_hl7800_get_fw_version(void) { return iface_ctx.mdm_revision; } char *mdm_hl7800_get_imsi(void) { return iface_ctx.mdm_imsi; } /* Convert HL7800 IPv6 address string in format * a01.a02.a03.a04.a05.a06.a07.a08.a09.a10.a11.a12.a13.a14.a15.a16 to * an IPv6 address. */ static int hl7800_net_addr6_pton(const char *src, struct in6_addr *dst) { int num_sections = 8; int i, len; uint16_t ipv6_section; len = strlen(src); for (i = 0; i < len; i++) { if (!(src[i] >= '0' && src[i] <= '9') && src[i] != '.') { return -EINVAL; } } for (i = 0; i < num_sections; i++) { if (!src || *src == '\0') { return -EINVAL; } ipv6_section = (uint16_t)strtol(src, NULL, 10); src = strchr(src, '.'); if (!src) { return -EINVAL; } src++; if (*src == '\0') { return -EINVAL; } ipv6_section = (ipv6_section << 8) | (uint16_t)strtol(src, NULL, 10); UNALIGNED_PUT(htons(ipv6_section), &dst->s6_addr16[i]); src = strchr(src, '.'); if (src) { src++; } else { if (i < num_sections - 1) { return -EINVAL; } } } return 0; } /* Handler: +CGCONTRDP: <cid>,<bearer_id>,<apn>,<local_addr and subnet_mask>, * <gw_addr>,<DNS_prim_addr>,<DNS_sec_addr> */ static bool on_cmd_atcmdinfo_ipaddr(struct net_buf **buf, uint16_t len) { int ret; int num_delims = CGCONTRDP_RESPONSE_NUM_DELIMS; char *delims[CGCONTRDP_RESPONSE_NUM_DELIMS]; size_t out_len; char value[MDM_IP_INFO_RESP_SIZE]; char *search_start, *addr_start, *sm_start; struct in_addr new_ipv4_addr; struct in6_addr new_ipv6_addr; bool is_ipv4; int addr_len; char temp_addr_str[HL7800_IPV6_ADDR_LEN]; k_timeout_t delay; out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); value[out_len] = 0; search_start = value; LOG_DBG("IP info: %s", value); /* find all delimiters (,) */ for (int i = 0; i < num_delims; i++) { delims[i] = strchr(search_start, ','); if (!delims[i]) { LOG_ERR("Could not find delim %d, val: %s", i, value); return true; } /* Start next search after current delim location */ search_start = delims[i] + 1; } /* determine if IPv4 or IPv6 by checking length of ip address plus * gateway string. */ is_ipv4 = false; addr_len = delims[3] - delims[2]; LOG_DBG("IP string len: %d", addr_len); if (addr_len <= (NET_IPV4_ADDR_LEN * 2)) { is_ipv4 = true; } /* Find start of subnet mask */ addr_start = delims[2] + 1; if (is_ipv4) { num_delims = 4; } else { num_delims = 16; } search_start = addr_start; sm_start = addr_start; for (int i = 0; i < num_delims; i++) { sm_start = strchr(search_start, '.'); if (!sm_start) { LOG_ERR("Could not find submask start"); return true; } /* Start next search after current delim location */ search_start = sm_start + 1; } /* get new IP addr */ addr_len = sm_start - addr_start; strncpy(temp_addr_str, addr_start, addr_len); temp_addr_str[addr_len] = 0; LOG_DBG("IP addr: %s", temp_addr_str); if (is_ipv4) { ret = net_addr_pton(AF_INET, temp_addr_str, &new_ipv4_addr); } else { ret = hl7800_net_addr6_pton(temp_addr_str, &new_ipv6_addr); } if (ret < 0) { LOG_ERR("Invalid IP addr"); return true; } if (is_ipv4) { /* move past the '.' */ sm_start += 1; /* store new subnet mask */ addr_len = delims[3] - sm_start; strncpy(temp_addr_str, sm_start, addr_len); temp_addr_str[addr_len] = 0; ret = net_addr_pton(AF_INET, temp_addr_str, &iface_ctx.subnet); if (ret < 0) { LOG_ERR("Invalid subnet"); return true; } /* store new gateway */ addr_start = delims[3] + 1; addr_len = delims[4] - addr_start; strncpy(temp_addr_str, addr_start, addr_len); temp_addr_str[addr_len] = 0; ret = net_addr_pton(AF_INET, temp_addr_str, &iface_ctx.gateway); if (ret < 0) { LOG_ERR("Invalid gateway"); return true; } } /* store new dns */ addr_start = delims[4] + 1; addr_len = delims[5] - addr_start; strncpy(temp_addr_str, addr_start, addr_len); temp_addr_str[addr_len] = 0; if (is_ipv4) { ret = strncmp(temp_addr_str, iface_ctx.dns_v4_string, addr_len); if (ret != 0) { iface_ctx.dns_ready = false; } strncpy(iface_ctx.dns_v4_string, addr_start, addr_len); iface_ctx.dns_v4_string[addr_len] = 0; ret = net_addr_pton(AF_INET, iface_ctx.dns_v4_string, &iface_ctx.dns_v4); LOG_DBG("IPv4 DNS addr: %s", iface_ctx.dns_v4_string); } #ifdef CONFIG_NET_IPV6 else { ret = strncmp(temp_addr_str, iface_ctx.dns_v6_string, addr_len); if (ret != 0) { iface_ctx.dns_ready = false; } /* store HL7800 formatted IPv6 DNS string temporarily */ strncpy(iface_ctx.dns_v6_string, addr_start, addr_len); ret = hl7800_net_addr6_pton(iface_ctx.dns_v6_string, &iface_ctx.dns_v6); net_addr_ntop(AF_INET6, &iface_ctx.dns_v6, iface_ctx.dns_v6_string, sizeof(iface_ctx.dns_v6_string)); LOG_DBG("IPv6 DNS addr: %s", iface_ctx.dns_v6_string); } #endif if (ret < 0) { LOG_ERR("Invalid dns"); return true; } if (iface_ctx.iface) { if (is_ipv4) { #ifdef CONFIG_NET_IPV4 /* remove the current IPv4 addr before adding a new one. * We dont care if it is successful or not. */ net_if_ipv4_addr_rm(iface_ctx.iface, &iface_ctx.ipv4Addr); if (!net_if_ipv4_addr_add(iface_ctx.iface, &new_ipv4_addr, NET_ADDR_DHCP, 0)) { LOG_ERR("Cannot set iface IPv4 addr"); } net_if_ipv4_set_netmask_by_addr(iface_ctx.iface, &new_ipv4_addr, &iface_ctx.subnet); net_if_ipv4_set_gw(iface_ctx.iface, &iface_ctx.gateway); #endif /* store the new IP addr */ net_ipaddr_copy(&iface_ctx.ipv4Addr, &new_ipv4_addr); } else { #if CONFIG_NET_IPV6 net_if_ipv6_addr_rm(iface_ctx.iface, &iface_ctx.ipv6Addr); if (!net_if_ipv6_addr_add(iface_ctx.iface, &new_ipv6_addr, NET_ADDR_AUTOCONF, 0)) { LOG_ERR("Cannot set iface IPv6 addr"); } #endif } /* start DNS update work */ delay = K_NO_WAIT; if (!iface_ctx.initialized) { /* Delay this in case the network * stack is still starting up */ delay = K_SECONDS(DNS_WORK_DELAY_SECS); } k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.dns_work, delay); } else { LOG_ERR("iface NULL"); } return true; } /* Handler1: +COPS: <mode>[,<format>,<oper>[,<AcT>]] * * Handler2: * +COPS: [list of supported (<stat>, long alphanumeric <oper>, short * alphanumeric <oper>, numeric <oper>[,< AcT>])s][,, * (list of supported <mode>s),(list of supported <format>s)] */ static bool on_cmd_atcmdinfo_operator_status(struct net_buf **buf, uint16_t len) { size_t out_len; char value[MDM_MAX_RESP_SIZE]; int num_delims = COPS_RESPONSE_NUM_DELIMS; char *delims[COPS_RESPONSE_NUM_DELIMS]; char *search_start; int i; out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); value[out_len] = 0; /* For AT+COPS=?, result is most likely longer than size of log string */ if (strchr(value, '(') != NULL) { LOG_HEXDUMP_DBG(value, out_len, "Operator: "); goto done; } else { LOG_INF("Operator: %s", value); } /* Process AT+COPS? */ if (len == 1) { /* only mode was returned, there is no operator info */ iface_ctx.operator_status = NO_OPERATOR; goto done; } search_start = value; /* find all delimiters (,) */ for (i = 0; i < num_delims; i++) { delims[i] = strchr(search_start, ','); if (!delims[i]) { LOG_ERR("Could not find delim %d, val: %s", i, value); goto done; } /* Start next search after current delim location */ search_start = delims[i] + 1; } /* we found both delimiters, that means we have an operator */ iface_ctx.operator_status = REGISTERED; done: return true; } /* Handler: +KGSN: T5640400011101 */ static bool on_cmd_atcmdinfo_serial_number(struct net_buf **buf, uint16_t len) { struct net_buf *frag = NULL; char value[MDM_SN_RESPONSE_LENGTH]; size_t out_len; int sn_len; char *val_start; /* make sure SN# data is received. * we are waiting for: +KGSN: ##############\r\n */ wait_for_modem_data(buf, net_buf_frags_len(*buf), MDM_SN_RESPONSE_LENGTH); frag = NULL; len = net_buf_findcrlf(*buf, &frag); if (!frag) { LOG_ERR("Unable to find sn end"); goto done; } /* get msg data */ out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); value[out_len] = 0; /* find ':' */ val_start = strchr(value, ':'); if (!val_start) { LOG_ERR("Unable to find sn ':'"); goto done; } /* Remove ": " chars */ val_start += 2; sn_len = len - (val_start - value); if (sn_len < MDM_HL7800_SERIAL_NUMBER_STRLEN) { LOG_WRN("sn too short (len:%d)", sn_len); } else if (sn_len > MDM_HL7800_SERIAL_NUMBER_STRLEN) { LOG_WRN("sn too long (len:%d)", sn_len); sn_len = MDM_HL7800_SERIAL_NUMBER_STRLEN; } strncpy(iface_ctx.mdm_sn, val_start, sn_len); iface_ctx.mdm_sn[sn_len] = 0; LOG_INF("Serial #: %s", iface_ctx.mdm_sn); done: return true; } /* Handler: +KSRAT: # */ static bool on_cmd_radio_tech_status(struct net_buf **buf, uint16_t len) { size_t out_len; char value[MDM_MAX_RESP_SIZE]; out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); value[out_len] = 0; iface_ctx.mdm_rat = strtol(value, NULL, 10); LOG_INF("+KSRAT: %d", iface_ctx.mdm_rat); event_handler(HL7800_EVENT_RAT, &iface_ctx.mdm_rat); return true; } /* Handler: +KBNDCFG: #,####################### */ static bool on_cmd_radio_band_configuration(struct net_buf **buf, uint16_t len) { size_t out_len; char value[MDM_MAX_RESP_SIZE]; char n_tmp[sizeof("#########")]; out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); value[out_len] = 0; if (value[0] != (iface_ctx.mdm_rat == MDM_RAT_CAT_M1 ? '0' : '1')) { /* Invalid RAT */ return true; } else if (strlen(value) < sizeof("#,###################")) { /* String size too short */ return true; } memcpy(iface_ctx.mdm_bands_string, &value[MDM_TOP_BAND_START_POSITION], MDM_HL7800_LTE_BAND_STRLEN); memcpy(n_tmp, &value[MDM_TOP_BAND_START_POSITION], MDM_TOP_BAND_SIZE); n_tmp[MDM_TOP_BAND_SIZE] = 0; iface_ctx.mdm_bands_top = strtoul(n_tmp, NULL, 16); memcpy(n_tmp, &value[MDM_MIDDLE_BAND_START_POSITION], MDM_MIDDLE_BAND_SIZE); n_tmp[MDM_MIDDLE_BAND_SIZE] = 0; iface_ctx.mdm_bands_middle = strtoul(n_tmp, NULL, 16); memcpy(n_tmp, &value[MDM_BOTTOM_BAND_START_POSITION], MDM_BOTTOM_BAND_SIZE); n_tmp[MDM_BOTTOM_BAND_SIZE] = 0; iface_ctx.mdm_bands_bottom = strtoul(n_tmp, NULL, 16); LOG_INF("Current band configuration: %04x %08x %08x", iface_ctx.mdm_bands_top, iface_ctx.mdm_bands_middle, iface_ctx.mdm_bands_bottom); event_handler(HL7800_EVENT_BANDS, iface_ctx.mdm_bands_string); return true; } /* Handler: +KBND: #,####################### */ static bool on_cmd_radio_active_bands(struct net_buf **buf, uint16_t len) { size_t out_len; char value[MDM_MAX_RESP_SIZE]; out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); value[out_len] = 0; if (strlen(value) < sizeof("#,###################")) { /* String size too short */ return true; } memcpy(iface_ctx.mdm_active_bands_string, &value[MDM_TOP_BAND_START_POSITION], MDM_HL7800_LTE_BAND_STRLEN); event_handler(HL7800_EVENT_ACTIVE_BANDS, iface_ctx.mdm_active_bands_string); return true; } static char *get_startup_state_string(enum mdm_hl7800_startup_state state) { /* clang-format off */ switch (state) { PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_STARTUP_STATE, READY); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_STARTUP_STATE, WAITING_FOR_ACCESS_CODE); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_STARTUP_STATE, SIM_NOT_PRESENT); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_STARTUP_STATE, SIMLOCK); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_STARTUP_STATE, UNRECOVERABLE_ERROR); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_STARTUP_STATE, UNKNOWN); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_STARTUP_STATE, INACTIVE_SIM); default: return "UNKNOWN"; } /* clang-format on */ } static void set_startup_state(enum mdm_hl7800_startup_state state) { iface_ctx.mdm_startup_state = state; generate_startup_state_event(); } static void generate_startup_state_event(void) { struct mdm_hl7800_compound_event event; event.code = iface_ctx.mdm_startup_state; event.string = get_startup_state_string(iface_ctx.mdm_startup_state); LOG_INF("Startup State: %s", event.string); event_handler(HL7800_EVENT_STARTUP_STATE_CHANGE, &event); } int mdm_hl7800_set_desired_sleep_level(enum mdm_hl7800_sleep level) { int r = -EPERM; #if CONFIG_MODEM_HL7800_LOW_POWER_MODE switch (level) { case HL7800_SLEEP_AWAKE: case HL7800_SLEEP_HIBERNATE: case HL7800_SLEEP_LITE_HIBERNATE: case HL7800_SLEEP_SLEEP: iface_ctx.desired_sleep_level = level; r = 0; break; default: r = -EINVAL; } if (r == 0) { hl7800_lock(); wakeup_hl7800(); r = set_sleep_level(); set_busy(false); allow_sleep(true); hl7800_unlock(); } #endif return r; } static void initialize_sleep_level(void) { if (iface_ctx.desired_sleep_level == HL7800_SLEEP_UNINITIALIZED) { if (IS_ENABLED(CONFIG_MODEM_HL7800_SLEEP_LEVEL_HIBERNATE)) { iface_ctx.desired_sleep_level = HL7800_SLEEP_HIBERNATE; } else if (IS_ENABLED(CONFIG_MODEM_HL7800_SLEEP_LEVEL_LITE_HIBERNATE)) { iface_ctx.desired_sleep_level = HL7800_SLEEP_LITE_HIBERNATE; } else if (IS_ENABLED(CONFIG_MODEM_HL7800_SLEEP_LEVEL_SLEEP)) { iface_ctx.desired_sleep_level = HL7800_SLEEP_SLEEP; } else { iface_ctx.desired_sleep_level = HL7800_SLEEP_AWAKE; } } } #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE static int set_sleep_level(void) { char cmd[sizeof("AT+KSLEEP=#,#,##")]; static const char SLEEP_CMD_FMT[] = "AT+KSLEEP=%d,%d,%d"; int delay = CONFIG_MODEM_HL7800_SLEEP_DELAY_AFTER_REBOOT; int ret = 0; /* AT+KSLEEP= <management>[,<level>[,<delay to sleep after reboot>]] * management 1 means the HL7800 decides when it enters sleep mode */ switch (iface_ctx.desired_sleep_level) { case HL7800_SLEEP_HIBERNATE: snprintk(cmd, sizeof(cmd), SLEEP_CMD_FMT, 1, 2, delay); break; case HL7800_SLEEP_LITE_HIBERNATE: snprintk(cmd, sizeof(cmd), SLEEP_CMD_FMT, 1, 1, delay); break; case HL7800_SLEEP_SLEEP: snprintk(cmd, sizeof(cmd), SLEEP_CMD_FMT, 1, 0, delay); break; default: /* don't sleep */ snprintk(cmd, sizeof(cmd), SLEEP_CMD_FMT, 2, 0, delay); break; } SEND_AT_CMD_EXPECT_OK(cmd); error: return ret; } #endif /* CONFIG_MODEM_HL7800_LOW_POWER_MODE */ static char *get_sleep_state_string(enum mdm_hl7800_sleep state) { /* clang-format off */ switch (state) { PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_SLEEP, UNINITIALIZED); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_SLEEP, HIBERNATE); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_SLEEP, LITE_HIBERNATE); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_SLEEP, SLEEP); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_SLEEP, AWAKE); default: return "UNKNOWN"; } /* clang-format on */ } static void set_sleep_state(enum mdm_hl7800_sleep state) { iface_ctx.sleep_state = state; if (iface_ctx.sleep_state != HL7800_SLEEP_AWAKE) { k_sem_reset(&iface_ctx.mdm_awake); } generate_sleep_state_event(); } static void generate_sleep_state_event(void) { struct mdm_hl7800_compound_event event; event.code = iface_ctx.sleep_state; event.string = get_sleep_state_string(iface_ctx.sleep_state); LOG_INF("Sleep State: %s", event.string); event_handler(HL7800_EVENT_SLEEP_STATE_CHANGE, &event); } #ifdef CONFIG_MODEM_HL7800_FW_UPDATE static char *get_fota_state_string(enum mdm_hl7800_fota_state state) { /* clang-format off */ switch (state) { PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_FOTA, IDLE); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_FOTA, START); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_FOTA, WIP); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_FOTA, PAD); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_FOTA, SEND_EOT); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_FOTA, FILE_ERROR); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_FOTA, INSTALL); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_FOTA, REBOOT_AND_RECONFIGURE); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800_FOTA, COMPLETE); default: return "UNKNOWN"; } /* clang-format on */ } static void set_fota_state(enum mdm_hl7800_fota_state state) { LOG_INF("FOTA state: %s->%s", get_fota_state_string(iface_ctx.fw_update_state), get_fota_state_string(state)); iface_ctx.fw_update_state = state; generate_fota_state_event(); } static void generate_fota_state_event(void) { struct mdm_hl7800_compound_event event; event.code = iface_ctx.fw_update_state; event.string = get_fota_state_string(iface_ctx.fw_update_state); event_handler(HL7800_EVENT_FOTA_STATE, &event); } static void generate_fota_count_event(void) { uint32_t count = iface_ctx.fw_packet_count * XMODEM_DATA_SIZE; event_handler(HL7800_EVENT_FOTA_COUNT, &count); } #endif /* Handler: +KSUP: # */ static bool on_cmd_startup_report(struct net_buf **buf, uint16_t len) { size_t out_len; char value[MDM_MAX_RESP_SIZE]; memset(value, 0, sizeof(value)); out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); if (out_len > 0) { set_startup_state(strtol(value, NULL, 10)); } else { set_startup_state(HL7800_STARTUP_STATE_UNKNOWN); } #ifdef CONFIG_MODEM_HL7800_FW_UPDATE if (iface_ctx.fw_updated) { iface_ctx.fw_updated = false; set_fota_state(HL7800_FOTA_REBOOT_AND_RECONFIGURE); /* issue reset after a firmware update to reconfigure modem state */ k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.mdm_reset_work, K_NO_WAIT); } else #endif { PRINT_AWAKE_MSG; iface_ctx.wait_for_KSUP = false; iface_ctx.mdm_startup_reporting_on = true; iface_ctx.reconfig_IP_connection = true; #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE mark_sockets_for_reconfig(); #endif set_sleep_state(HL7800_SLEEP_AWAKE); k_sem_give(&iface_ctx.mdm_awake); } return true; } static bool profile_handler(struct net_buf **buf, uint16_t len, bool active_profile) { uint32_t size; int echo_state = -1; struct net_buf *frag = NULL; uint16_t line_length; char line[MAX_PROFILE_LINE_LENGTH]; size_t output_length; /* Prepare net buffer for this parser. */ net_buf_remove(buf, len); net_buf_skipcrlf(buf); size = wait_for_modem_data(buf, net_buf_frags_len(*buf), sizeof(PROFILE_LINE_1)); net_buf_skipcrlf(buf); /* remove any \r\n that are in the front */ /* Parse configuration data to determine if echo is on/off. */ line_length = net_buf_findcrlf(*buf, &frag); if (line_length) { memset(line, 0, sizeof(line)); output_length = net_buf_linearize(line, SIZE_WITHOUT_NUL(line), *buf, 0, line_length); LOG_DBG("length: %u: %s", line_length, line); /* Echo on off is the first thing on the line: E0, E1 */ if (output_length >= SIZE_WITHOUT_NUL("E?")) { echo_state = (line[1] == '1') ? 1 : 0; } } LOG_DBG("echo: %d", echo_state); net_buf_remove(buf, line_length); net_buf_skipcrlf(buf); if (active_profile) { iface_ctx.mdm_echo_is_on = (echo_state != 0); } /* Discard next line. This waits for the longest possible response even * though most registers won't have the value 0xFF. */ size = wait_for_modem_data(buf, net_buf_frags_len(*buf), sizeof(PROFILE_LINE_2)); net_buf_skipcrlf(buf); len = net_buf_findcrlf(*buf, &frag); net_buf_remove(buf, len); net_buf_skipcrlf(buf); return false; } static bool on_cmd_atcmdinfo_active_profile(struct net_buf **buf, uint16_t len) { return profile_handler(buf, len, true); } static bool on_cmd_atcmdinfo_stored_profile0(struct net_buf **buf, uint16_t len) { return profile_handler(buf, len, false); } static bool on_cmd_atcmdinfo_stored_profile1(struct net_buf **buf, uint16_t len) { return profile_handler(buf, len, false); } /* +WPPP: 1,1,"username","password" */ static bool on_cmd_atcmdinfo_pdp_authentication_cfg(struct net_buf **buf, uint16_t len) { struct net_buf *frag = NULL; uint16_t line_length; char line[MDM_HL7800_APN_CMD_MAX_SIZE]; size_t output_length; size_t i; char *p; wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), MDM_HL7800_APN_CMD_MAX_SIZE); line_length = net_buf_findcrlf(*buf, &frag); if (line_length) { memset(line, 0, sizeof(line)); output_length = net_buf_linearize(line, SIZE_WITHOUT_NUL(line), *buf, 0, line_length); LOG_DBG("length: %u: %s", line_length, line); if (output_length > 0) { memset(iface_ctx.mdm_apn.username, 0, sizeof(iface_ctx.mdm_apn.username)); memset(iface_ctx.mdm_apn.password, 0, sizeof(iface_ctx.mdm_apn.password)); i = 0; p = strchr(line, '"'); if (p != NULL) { p += 1; i = 0; while ((p != NULL) && (*p != '"') && (i < MDM_HL7800_APN_USERNAME_MAX_STRLEN)) { iface_ctx.mdm_apn.username[i++] = *p++; } } else { LOG_WRN("Issue parsing APN username"); goto done; } LOG_INF("APN Username: %s", iface_ctx.mdm_apn.username); p = strchr(p + 1, '"'); if (p != NULL) { p += 1; i = 0; while ((p != NULL) && (*p != '"') && (i < MDM_HL7800_APN_PASSWORD_MAX_STRLEN)) { iface_ctx.mdm_apn.password[i++] = *p++; } } LOG_INF("APN Password: %s", iface_ctx.mdm_apn.password); } } done: net_buf_remove(buf, line_length); net_buf_skipcrlf(buf); return false; } /* Only context 1 is used. Other contexts are unhandled. * * +CGDCONT: 1,"IP","access point name",,0,0,0,0,0,,0,,,,, */ static bool on_cmd_atcmdinfo_pdp_context(struct net_buf **buf, uint16_t len) { struct net_buf *frag = NULL; uint16_t line_length; char line[MDM_HL7800_APN_CMD_MAX_SIZE]; size_t output_length; char *p; size_t i; wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), MDM_HL7800_APN_CMD_MAX_SIZE); line_length = net_buf_findcrlf(*buf, &frag); if (line_length) { memset(line, 0, sizeof(line)); output_length = net_buf_linearize(line, SIZE_WITHOUT_NUL(line), *buf, 0, line_length); LOG_DBG("length: %u: %s", line_length, line); if (output_length > 0) { memset(iface_ctx.mdm_apn.value, 0, sizeof(iface_ctx.mdm_apn.value)); memset(iface_ctx.mdm_pdp_addr_fam, 0, MDM_ADDR_FAM_MAX_LEN); /* Address family after first , */ p = strchr(line, ','); if (p == NULL) { LOG_WRN("Issue parsing APN response"); goto done; } p += 2; i = 0; while ((p != NULL) && (*p != '"') && (i < MDM_ADDR_FAM_MAX_LEN)) { iface_ctx.mdm_pdp_addr_fam[i++] = *p++; } if (strcmp(iface_ctx.mdm_pdp_addr_fam, ADDRESS_FAMILY_IP) == 0) { snprintk(iface_ctx.mdm_pdp_addr_fam, sizeof(iface_ctx.mdm_pdp_addr_fam), "%s", ADDRESS_FAMILY_IPV4); } LOG_DBG("PDP address family: %s", iface_ctx.mdm_pdp_addr_fam); /* APN after second , " */ p = strchr(p, ','); if (p == NULL) { LOG_WRN("Issue parsing APN response"); goto done; } p++; if (*p == ',') { /* APN is blank */ goto done; } if (*p == '"') { p++; i = 0; while ((p != NULL) && (*p != '"') && (i < MDM_HL7800_APN_MAX_STRLEN)) { iface_ctx.mdm_apn.value[i++] = *p++; } } LOG_INF("APN: %s", iface_ctx.mdm_apn.value); } } done: net_buf_remove(buf, line_length); net_buf_skipcrlf(buf); return false; } static int hl7800_query_rssi(void) { int ret; ret = send_at_cmd(NULL, "AT+KCELLMEAS=0", MDM_CMD_SEND_TIMEOUT, 1, false); if (ret < 0) { LOG_ERR("AT+KCELLMEAS ret:%d", ret); } return ret; } static void hl7800_start_rssi_work(void) { /* Rate is not checked here to allow one reading * when going from network down->up */ k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.rssi_query_work, K_NO_WAIT); } static void hl7800_stop_rssi_work(void) { int rc; rc = k_work_cancel_delayable(&iface_ctx.rssi_query_work); if (rc != 0) { LOG_ERR("Could not cancel RSSI work [%d]", rc); } } static void rssi_query(void) { hl7800_lock(); wakeup_hl7800(); hl7800_query_rssi(); set_busy(false); allow_sleep(true); hl7800_unlock(); } static void hl7800_rssi_query_work(struct k_work *work) { rssi_query(); /* re-start RSSI query work */ if (CONFIG_MODEM_HL7800_RSSI_RATE_SECONDS > 0) { k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.rssi_query_work, K_SECONDS(CONFIG_MODEM_HL7800_RSSI_RATE_SECONDS)); } } #ifdef CONFIG_MODEM_HL7800_GPS /* Unsolicited notification * Handler: +GNSSEV: <eventType>,<eventStatus> */ static bool on_cmd_gps_event(struct net_buf **buf, uint16_t len) { size_t out_len; char value[MDM_MAX_RESP_SIZE]; char *start = NULL; char *end = NULL; int8_t event = -1; int8_t status = -1; memset(value, 0, sizeof(value)); out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); if (out_len > 0) { start = value; event = strtol(start, &end, 10); if (end == strchr(value, ',')) { start = end + 1; status = strtol(start, &end, 10); } } LOG_INF("GPS event: %d status: %d", event, status); if (event == HL7800_GNSS_EVENT_POSITION) { event_handler(HL7800_EVENT_GPS_POSITION_STATUS, &status); } return true; } static void gps_work_callback(struct k_work *work) { ARG_UNUSED(work); int r; hl7800_lock(); wakeup_hl7800(); r = send_at_cmd(NULL, "AT+GNSSLOC?", MDM_CMD_SEND_TIMEOUT, 1, false); set_busy(false); allow_sleep(true); hl7800_unlock(); LOG_DBG("GPS location request status: %d", r); if (iface_ctx.gps_query_location_rate_seconds) { k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.gps_work, K_SECONDS(iface_ctx.gps_query_location_rate_seconds)); } } /* The AT+GNSSLOC? command returns 1 of 2 things: * * +GNSSLOC: * Latitude: "49 Deg 10 Min 21.49 Sec N" * Longitude: "123 Deg 4 Min 14.76 Sec W" * GpsTime: "yyyy mm dd hh:mm:ss" * FixType: "2D" or "3D" * HEPE: "8.485 m" (Horizontal Estimated Position Error) * Altitude: "-1 m" * AltUnc: "3.0 m" * Direction: "0.0 deg" * HorSpeed: "0.0 m/s" * VerSpeed: "0.0 m/s" * OK * * OR * * +GNSSLOC: * FIX NOT AVAILABLE * OK * * Since each response is on its own line, the command handler is used * to handle each one as an individual response. */ static bool gps_handler(struct net_buf **buf, uint16_t len, enum mdm_hl7800_gps_string_types str_type) { struct mdm_hl7800_compound_event event; char gps_str[MDM_HL7800_MAX_GPS_STR_SIZE]; size_t gps_len = sizeof(gps_str) - 1; struct net_buf *frag = NULL; size_t out_len; wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), sizeof(gps_str)); frag = NULL; len = net_buf_findcrlf(*buf, &frag); if (!frag) { LOG_ERR("Unable to find end"); goto done; } if (len > gps_len) { LOG_WRN("GPS string too long (len:%d)", len); len = gps_len; } out_len = net_buf_linearize(gps_str, gps_len, *buf, 0, len); gps_str[out_len] = 0; event.code = str_type; event.string = gps_str; event_handler(HL7800_EVENT_GPS, &event); done: return true; } static bool on_cmd_latitude(struct net_buf **buf, uint16_t len) { return gps_handler(buf, len, HL7800_GPS_STR_LATITUDE); } static bool on_cmd_longitude(struct net_buf **buf, uint16_t len) { return gps_handler(buf, len, HL7800_GPS_STR_LONGITUDE); } static bool on_cmd_gps_time(struct net_buf **buf, uint16_t len) { return gps_handler(buf, len, HL7800_GPS_STR_GPS_TIME); } static bool on_cmd_fix_type(struct net_buf **buf, uint16_t len) { return gps_handler(buf, len, HL7800_GPS_STR_FIX_TYPE); } static bool on_cmd_hepe(struct net_buf **buf, uint16_t len) { return gps_handler(buf, len, HL7800_GPS_STR_HEPE); } static bool on_cmd_altitude(struct net_buf **buf, uint16_t len) { return gps_handler(buf, len, HL7800_GPS_STR_ALTITUDE); } static bool on_cmd_alt_unc(struct net_buf **buf, uint16_t len) { return gps_handler(buf, len, HL7800_GPS_STR_ALT_UNC); } static bool on_cmd_direction(struct net_buf **buf, uint16_t len) { return gps_handler(buf, len, HL7800_GPS_STR_DIRECTION); } static bool on_cmd_hor_speed(struct net_buf **buf, uint16_t len) { return gps_handler(buf, len, HL7800_GPS_STR_HOR_SPEED); } static bool on_cmd_ver_speed(struct net_buf **buf, uint16_t len) { return gps_handler(buf, len, HL7800_GPS_STR_VER_SPEED); } #endif /* CONFIG_MODEM_HL7800_GPS */ #ifdef CONFIG_MODEM_HL7800_POLTE /* Handler: %POLTEEVU: "REGISTER",0, <mqttAuthUser>, <mqttAuthPassword> */ static bool on_cmd_polte_registration(struct net_buf **buf, uint16_t len) { char rsp[MDM_MAX_RESP_SIZE] = { 0 }; size_t rsp_len = sizeof(rsp) - 1; char *rsp_end = rsp + rsp_len; struct mdm_hl7800_polte_registration_event_data data; struct net_buf *frag = NULL; size_t out_len; char *location; bool parsed; memset(&data, 0, sizeof(data)); wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), sizeof(rsp)); location = rsp; parsed = false; frag = NULL; len = net_buf_findcrlf(*buf, &frag); do { if (!frag) { LOG_ERR("Unable to find end"); break; } if (len > rsp_len) { LOG_WRN("string too long (len:%d)", len); len = rsp_len; } out_len = net_buf_linearize(rsp, rsp_len, *buf, 0, len); rsp[out_len] = 0; /* Command handler looks for string up to the user field */ location = strstr(location, "\""); if (location != NULL && location < rsp_end) { location += 1; if (location >= rsp_end) { break; } data.user = location; } else { break; } /* Find end of user field and null terminate string */ location = strstr(location, "\""); if (location != NULL && location < rsp_end) { *location = 0; location += 1; if (location >= rsp_end) { break; } } else { break; } location = strstr(location, ",\""); if (location != NULL && location < rsp_end) { location += 2; if (location >= rsp_end) { break; } data.password = location; } else { break; } location = strstr(location, "\""); if (location != NULL && location < rsp_end) { *location = 0; } else { break; } parsed = true; } while (false); if (parsed && data.user && data.password) { data.status = 0; } else { data.status = -1; LOG_ERR("Unable to parse PoLTE registration"); } event_handler(HL7800_EVENT_POLTE_REGISTRATION, &data); return true; } /* Handler: %POLTECMD: "LOCATE",<res> */ static bool on_cmd_polte_locate_cmd_rsp(struct net_buf **buf, uint16_t len) { char rsp[sizeof("99")] = { 0 }; size_t rsp_len = sizeof(rsp) - 1; size_t out_len; struct net_buf *frag = NULL; struct mdm_hl7800_polte_location_data data; memset(&data, 0, sizeof(data)); wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), sizeof(rsp)); data.status = -1; frag = NULL; len = net_buf_findcrlf(*buf, &frag); do { if (!frag) { LOG_ERR("Unable to find end"); break; } if (len > rsp_len) { LOG_WRN("string too long (len:%d)", len); len = rsp_len; } out_len = net_buf_linearize(rsp, rsp_len, *buf, 0, len); rsp[out_len] = 0; data.status = (uint32_t)strtoul(rsp, NULL, 10); } while (false); event_handler(HL7800_EVENT_POLTE_LOCATE_STATUS, &data); return true; } /* Handler: * %POLTEEVU: "LOCATION",<stat>[,<latitude>,<longitude>,<time>,<confidence>] */ static bool on_cmd_polte_location(struct net_buf **buf, uint16_t len) { char rsp[MDM_MAX_RESP_SIZE] = { 0 }; size_t rsp_len = sizeof(rsp) - 1; char *rsp_end = rsp + rsp_len; struct net_buf *frag = NULL; size_t out_len = 0; char *start; char *end; bool parsed; struct mdm_hl7800_polte_location_data data; static const char POLTE_LOC_DELIMITER[] = "\",\""; memset(&data, 0, sizeof(data)); wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), sizeof(rsp)); parsed = false; frag = NULL; len = net_buf_findcrlf(*buf, &frag); do { if (!frag) { LOG_ERR("Unable to find end"); break; } if (len > rsp_len) { LOG_WRN("string too long (len:%d)", len); len = rsp_len; } out_len = net_buf_linearize(rsp, rsp_len, *buf, 0, len); rsp[out_len] = 0; data.status = -1; start = rsp; end = ""; /* Comma isn't present when there is an error. */ start = strstr(start, ","); if (start != NULL && start < rsp_end) { *start = ' '; start += 1; } data.status = (uint32_t)strtoul(rsp, &end, 10); if (data.status != 0) { LOG_WRN("Response not received from PoLTE server: %d", data.status); data.status = MDM_HL7800_POLTE_SERVER_ERROR; parsed = true; break; } else if (start >= rsp_end) { break; } start = strstr(start, "\"") + 1; end = strstr(start, POLTE_LOC_DELIMITER); if (start > rsp && start < rsp_end && end < rsp_end && end > start) { memcpy(data.latitude, start, MIN(end - start, sizeof(data.latitude) - 1)); } else { break; } start = end + strlen(POLTE_LOC_DELIMITER); end = strstr(start, POLTE_LOC_DELIMITER); if (start > rsp && start < rsp_end && end < rsp_end && end > start) { memcpy(data.longitude, start, MIN(end - start, sizeof(data.longitude) - 1)); } else { break; } start = end + strlen(POLTE_LOC_DELIMITER); end = strstr(start, POLTE_LOC_DELIMITER); if (start > rsp && start < rsp_end && end < rsp_end && end > start) { data.timestamp = (uint32_t)strtoul(start, NULL, 10); } else { break; } start = end + strlen(POLTE_LOC_DELIMITER); end = strstr(start, "\""); if (start > rsp && start < rsp_end && end < rsp_end && end > start) { memcpy(data.confidence_in_meters, start, MIN(end - start, sizeof(data.confidence_in_meters) - 1)); } else { break; } parsed = true; } while (false); if (!parsed) { LOG_HEXDUMP_ERR(rsp, out_len, "Unable to parse PoLTE location"); } else { LOG_HEXDUMP_DBG(rsp, out_len, "PoLTE Location"); } event_handler(HL7800_EVENT_POLTE, &data); return true; } #endif /* CONFIG_MODEM_HL7800_POLTE */ static void notify_all_tcp_sockets_closed(void) { int i; struct hl7800_socket *sock = NULL; for (i = 0; i < MDM_MAX_SOCKETS; i++) { sock = &iface_ctx.sockets[i]; if ((sock->context != NULL) && (sock->type == SOCK_STREAM)) { LOG_DBG("Sock %d closed", sock->socket_id); /* signal RX callback with null packet */ if (sock->recv_cb) { sock->recv_cb(sock->context, sock->recv_pkt, NULL, NULL, 0, sock->recv_user_data); } } } } static void iface_status_work_cb(struct k_work *work) { int ret; hl7800_lock(); enum mdm_hl7800_network_state state; if (iface_ctx.off) { goto done; } else if (!iface_ctx.initialized && iface_ctx.restarting) { LOG_DBG("Wait for driver init, process network state later"); /* we are not ready to process this yet, try again later */ k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.iface_status_work, IFACE_WORK_DELAY); goto done; } else if (iface_ctx.wait_for_KSUP && iface_ctx.wait_for_KSUP_tries < WAIT_FOR_KSUP_RETRIES) { LOG_DBG("Wait for +KSUP before updating network state"); iface_ctx.wait_for_KSUP_tries++; /* we have not received +KSUP yet, lets wait more time to receive +KSUP */ k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.iface_status_work, IFACE_WORK_DELAY); goto done; } else if (iface_ctx.wait_for_KSUP && iface_ctx.wait_for_KSUP_tries >= WAIT_FOR_KSUP_RETRIES) { /* give up waiting for KSUP */ LOG_DBG("Give up waiting for"); iface_ctx.wait_for_KSUP = false; check_hl7800_awake(); } wakeup_hl7800(); LOG_DBG("Updating network state..."); state = iface_ctx.network_state; /* Ensure we bring the network interface down and then re-check the current state */ if (iface_ctx.network_dropped) { iface_ctx.network_dropped = false; state = HL7800_OUT_OF_COVERAGE; k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.iface_status_work, IFACE_WORK_DELAY); } /* Query operator selection */ ret = send_at_cmd(NULL, "AT+COPS?", MDM_CMD_SEND_TIMEOUT, 0, false); if (ret < 0) { LOG_ERR("AT+COPS ret:%d", ret); } /* bring iface up/down */ switch (state) { case HL7800_HOME_NETWORK: case HL7800_ROAMING: if (iface_ctx.iface) { LOG_DBG("HL7800 iface UP"); net_if_carrier_on(iface_ctx.iface); } break; case HL7800_OUT_OF_COVERAGE: default: if (iface_ctx.iface && (iface_ctx.low_power_mode != HL7800_LPM_PSM)) { LOG_DBG("HL7800 iface DOWN"); iface_ctx.dns_ready = false; net_if_carrier_off(iface_ctx.iface); } break; } if ((iface_ctx.iface && !net_if_is_up(iface_ctx.iface)) || (iface_ctx.low_power_mode == HL7800_LPM_PSM && state == HL7800_OUT_OF_COVERAGE)) { hl7800_stop_rssi_work(); notify_all_tcp_sockets_closed(); } else if (iface_ctx.iface && net_if_is_up(iface_ctx.iface)) { hl7800_start_rssi_work(); /* get IP address info */ (void)send_at_cmd(NULL, "AT+CGCONTRDP=1", MDM_CMD_SEND_TIMEOUT, CONFIG_MODEM_HL7800_GET_IP_ADDR_INFO_ATTEMPTS, false); /* get active bands */ SEND_AT_CMD_IGNORE_ERROR("AT+KBND?"); } LOG_DBG("Network state updated"); set_busy(false); allow_sleep(true); done: hl7800_unlock(); } static char *get_network_state_string(enum mdm_hl7800_network_state state) { switch (state) { PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800, NOT_REGISTERED); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800, HOME_NETWORK); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800, SEARCHING); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800, REGISTRATION_DENIED); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800, OUT_OF_COVERAGE); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800, ROAMING); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800, EMERGENCY); PREFIXED_SWITCH_CASE_RETURN_STRING(HL7800, UNABLE_TO_CONFIGURE); default: return "UNKNOWN"; } } static void set_network_state(enum mdm_hl7800_network_state state) { iface_ctx.network_state = state; generate_network_state_event(); } static void generate_network_state_event(void) { struct mdm_hl7800_compound_event event; event.code = iface_ctx.network_state; event.string = get_network_state_string(iface_ctx.network_state); LOG_INF("Network State: %d %s", iface_ctx.network_state, event.string); event_handler(HL7800_EVENT_NETWORK_STATE_CHANGE, &event); } /* Handler: +CEREG: <n>,<stat>[,[<lac>],[<ci>],[<AcT>] * [,[<cause_type>],[<reject_cause>] [,[<Active-Time>],[<Periodic-TAU>]]]] */ static bool on_cmd_network_report_query(struct net_buf **buf, uint16_t len) { size_t out_len; char value[MDM_MAX_RESP_SIZE]; char *pos; int l; char val[MDM_MAX_RESP_SIZE]; out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); pos = strchr(value, ','); if (pos) { l = (value + out_len) - pos; strncpy(val, pos + 1, l); val[l] = 0; set_network_state(strtol(val, NULL, 0)); /* start work to adjust iface */ k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.iface_status_work, IFACE_WORK_DELAY); } return true; } static bool on_cmd_operator_index_query(struct net_buf **buf, uint16_t len) { struct net_buf *frag = NULL; char carrier[MDM_HL7800_OPERATOR_INDEX_SIZE]; size_t out_len; wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), MDM_HL7800_OPERATOR_INDEX_SIZE); frag = NULL; len = net_buf_findcrlf(*buf, &frag); if (!frag) { LOG_ERR("Unable to find end of operator index response"); goto done; } out_len = net_buf_linearize(carrier, MDM_HL7800_OPERATOR_INDEX_STRLEN, *buf, 0, len); carrier[out_len] = 0; iface_ctx.operator_index = (uint8_t)strtol(carrier, NULL, 10); LOG_INF("Operator Index: %u", iface_ctx.operator_index); done: return true; } static bool on_cmd_modem_functionality(struct net_buf **buf, uint16_t len) { struct net_buf *frag; size_t out_len; char rsp[MDM_HL7800_MODEM_FUNCTIONALITY_SIZE]; wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), MDM_HL7800_MODEM_FUNCTIONALITY_SIZE); frag = NULL; len = net_buf_findcrlf(*buf, &frag); if (!frag) { LOG_ERR("Unable to find end of response"); goto done; } out_len = net_buf_linearize(rsp, MDM_HL7800_MODEM_FUNCTIONALITY_STRLEN, *buf, 0, len); rsp[out_len] = 0; iface_ctx.functionality = strtol(rsp, NULL, 10); LOG_INF("Modem Functionality: %u", iface_ctx.functionality); done: return true; } /* There can be multiple responses from a single command. * %MEAS: EARFCN=5826, CellID=420, RSRP=-99, RSRQ=-15 * %MEAS: EARFCN=6400, CellID=201, RSRP=-93, RSRQ=-21 */ static bool on_cmd_survey_status(struct net_buf **buf, uint16_t len) { struct net_buf *frag = NULL; char response[sizeof("EARFCN=XXXXXXXXXXX, CellID=XXXXXXXXXXX, RSRP=-XXX, RSRQ=-XXX")]; char *key; size_t out_len; char *value; struct mdm_hl7800_site_survey site_survey; wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), sizeof(response)); frag = NULL; len = net_buf_findcrlf(*buf, &frag); if (!frag) { LOG_ERR("Unable to find end"); goto done; } out_len = net_buf_linearize(response, sizeof(response), *buf, 0, len); LOG_HEXDUMP_DBG(response, out_len, "Site Survey"); key = "EARFCN="; value = strstr(response, key); if (value == NULL) { goto done; } else { value += strlen(key); site_survey.earfcn = strtoul(value, NULL, 10); } key = "CellID="; value = strstr(response, key); if (value == NULL) { goto done; } else { value += strlen(key); site_survey.cell_id = strtoul(value, NULL, 10); } key = "RSRP="; value = strstr(response, key); if (value == NULL) { goto done; } else { value += strlen(key); site_survey.rsrp = strtol(value, NULL, 10); } key = "RSRQ="; value = strstr(response, key); if (value == NULL) { goto done; } else { value += strlen(key); site_survey.rsrq = strtol(value, NULL, 10); } event_handler(HL7800_EVENT_SITE_SURVEY, &site_survey); done: return true; } /* Handler: +CCLK: "yy/MM/dd,hh:mm:sszz" */ static bool on_cmd_rtc_query(struct net_buf **buf, uint16_t len) { struct net_buf *frag = NULL; size_t str_len = sizeof(TIME_STRING_FORMAT) - 1; char rtc_string[sizeof(TIME_STRING_FORMAT)]; memset(rtc_string, 0, sizeof(rtc_string)); iface_ctx.local_time_valid = false; wait_for_modem_data_and_newline(buf, net_buf_frags_len(*buf), sizeof(TIME_STRING_FORMAT)); frag = NULL; len = net_buf_findcrlf(*buf, &frag); if (!frag) { goto done; } if (len != str_len) { LOG_WRN("Unexpected length for RTC string %d (expected:%zu)", len, str_len); } else { net_buf_linearize(rtc_string, str_len, *buf, 0, str_len); LOG_INF("RTC string: '%s'", rtc_string); iface_ctx.local_time_valid = convert_time_string_to_struct( &iface_ctx.local_time, &iface_ctx.local_time_offset, rtc_string); } done: return true; } static bool valid_time_string(const char *time_string) { size_t offset, i; /* Ensure the all the expected delimiters are present */ offset = TIME_STRING_DIGIT_STRLEN + TIME_STRING_SEPARATOR_STRLEN; i = TIME_STRING_FIRST_SEPARATOR_INDEX; for (; i < TIME_STRING_PLUS_MINUS_INDEX; i += offset) { if (time_string[i] != TIME_STRING_FORMAT[i]) { return false; } } /* The last character is the offset from UTC and can be either * positive or negative. The last " is also handled here. */ if ((time_string[i] == '+' || time_string[i] == '-') && (time_string[i + offset] == '"')) { return true; } return false; } int get_next_time_string_digit(int *failure_cnt, char **pp, int min, int max) { char digits[TIME_STRING_DIGIT_STRLEN + SIZE_OF_NUL]; int result; memset(digits, 0, sizeof(digits)); memcpy(digits, *pp, TIME_STRING_DIGIT_STRLEN); *pp += TIME_STRING_DIGIT_STRLEN + TIME_STRING_SEPARATOR_STRLEN; result = strtol(digits, NULL, 10); if (result > max) { *failure_cnt += 1; return max; } else if (result < min) { *failure_cnt += 1; return min; } else { return result; } } static bool convert_time_string_to_struct(struct tm *tm, int32_t *offset, char *time_string) { int fc = 0; char *ptr = time_string; if (!valid_time_string(ptr)) { return false; } ptr = &ptr[TIME_STRING_FIRST_DIGIT_INDEX]; tm->tm_year = TIME_STRING_TO_TM_STRUCT_YEAR_OFFSET + get_next_time_string_digit(&fc, &ptr, TM_YEAR_RANGE); tm->tm_mon = get_next_time_string_digit(&fc, &ptr, TM_MONTH_RANGE_PLUS_1) - 1; tm->tm_mday = get_next_time_string_digit(&fc, &ptr, TM_DAY_RANGE); tm->tm_hour = get_next_time_string_digit(&fc, &ptr, TM_HOUR_RANGE); tm->tm_min = get_next_time_string_digit(&fc, &ptr, TM_MIN_RANGE); tm->tm_sec = get_next_time_string_digit(&fc, &ptr, TM_SEC_RANGE); tm->tm_isdst = 0; *offset = (int32_t)get_next_time_string_digit(&fc, &ptr, QUARTER_HOUR_RANGE) * SECONDS_PER_QUARTER_HOUR; if (time_string[TIME_STRING_PLUS_MINUS_INDEX] == '-') { *offset *= -1; } return (fc == 0); } /* Handler: +CEREG: <stat>[,[<lac>],[<ci>],[<AcT>] * [,[<cause_type>],[<reject_cause>] [,[<Active-Time>],[<Periodic-TAU>]]]] */ static bool on_cmd_network_report(struct net_buf **buf, uint16_t len) { size_t out_len; char *pos; int l; char val[MDM_MAX_RESP_SIZE]; out_len = net_buf_linearize(iface_ctx.mdm_network_status, sizeof(iface_ctx.mdm_network_status) - 1, *buf, 0, len); iface_ctx.mdm_network_status[out_len] = 0; LOG_DBG("Network status: %s", iface_ctx.mdm_network_status); pos = strchr(iface_ctx.mdm_network_status, ','); if (pos) { l = pos - iface_ctx.mdm_network_status; strncpy(val, iface_ctx.mdm_network_status, l); val[l] = 0; set_network_state(strtol(val, NULL, 0)); } else { set_network_state(strtol(iface_ctx.mdm_network_status, NULL, 0)); } /* keep HL7800 awake because we want to process the network state soon */ set_busy(true); allow_sleep(false); /* start work to adjust iface */ k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.iface_status_work, IFACE_WORK_DELAY); return true; } /* Handler: +KCELLMEAS: <RSRP>,<Downlink Path Loss>,<PUSCH Tx Power>, * <PUCCH Tx Power>,<SiNR> */ static bool on_cmd_atcmdinfo_rssi(struct net_buf **buf, uint16_t len) { /* number of ',' delimiters in this response */ int num_delims = KCELLMEAS_RESPONSE_NUM_DELIMS; char *delims[KCELLMEAS_RESPONSE_NUM_DELIMS]; size_t out_len; char value[MDM_MAX_RESP_SIZE]; char *search_start; int i; out_len = net_buf_linearize(value, len, *buf, 0, len); value[out_len] = 0; search_start = value; /* find all delimiters */ for (i = 0; i < num_delims; i++) { delims[i] = strchr(search_start, ','); if (!delims[i]) { LOG_ERR("Could not find delim %d, val: %s", i, value); goto done; } /* Start next search after current delim location */ search_start = delims[i] + 1; } /* the first value in the message is the RSRP */ iface_ctx.mdm_rssi = strtol(value, NULL, 10); /* the 4th ',' (last in the msg) is the start of the SINR */ iface_ctx.mdm_sinr = strtol(delims[3] + 1, NULL, 10); if ((delims[1] - delims[0]) == 1) { /* there is no value between the first and second * delimiter, signal is unknown */ LOG_INF("RSSI (RSRP): UNKNOWN"); } else { LOG_INF("RSSI (RSRP): %d SINR: %d", iface_ctx.mdm_rssi, iface_ctx.mdm_sinr); event_handler(HL7800_EVENT_RSSI, &iface_ctx.mdm_rssi); event_handler(HL7800_EVENT_SINR, &iface_ctx.mdm_sinr); } done: return true; } /* Handle the "OK" response from an AT command or a socket call */ static bool on_cmd_sockok(struct net_buf **buf, uint16_t len) { struct hl7800_socket *sock = NULL; sock = socket_from_id(iface_ctx.last_socket_id); if (!sock || !iface_ctx.socket_cmd) { iface_ctx.last_error = 0; k_sem_give(&iface_ctx.response_sem); } else { sock->error = 0; k_sem_give(&sock->sock_send_sem); } return true; } /* Handler: +KTCP_IND/+KUDP_IND */ static bool on_cmd_sock_ind(struct net_buf **buf, uint16_t len, const char *const type) { struct hl7800_socket *sock = NULL; char *delim; char value[MDM_MAX_RESP_SIZE]; size_t out_len; int id; iface_ctx.last_error = 0; out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); value[out_len] = 0; /* find ',' because this is the format we expect */ delim = strchr(value, ','); if (!delim) { LOG_ERR("%s could not find ','", type); goto done; } id = strtol(value, NULL, 10); LOG_DBG("%s ID: %d", type, id); sock = socket_from_id(id); if (sock) { sock->error = 0; k_sem_give(&sock->sock_send_sem); } done: return true; } static bool on_cmd_ktcp_ind(struct net_buf **buf, uint16_t len) { return on_cmd_sock_ind(buf, len, "+KTCP_IND"); } static bool on_cmd_kudp_ind(struct net_buf **buf, uint16_t len) { return on_cmd_sock_ind(buf, len, "+KUDP_IND"); } /* Handler: ERROR */ static bool on_cmd_sockerror(struct net_buf **buf, uint16_t len) { struct hl7800_socket *sock = NULL; char string[MDM_MAX_RESP_SIZE]; if (len > 0) { memset(string, 0, sizeof(string)); net_buf_linearize(string, sizeof(string), *buf, 0, len); LOG_ERR("'%s'", string); } sock = socket_from_id(iface_ctx.last_socket_id); if (!sock) { iface_ctx.last_error = -EIO; k_sem_give(&iface_ctx.response_sem); } else { sock->error = -EIO; k_sem_give(&sock->sock_send_sem); } return true; } /* Handler: CME/CMS Error */ static bool on_cmd_sock_error_code(struct net_buf **buf, uint16_t len) { struct hl7800_socket *sock = NULL; char value[MDM_MAX_RESP_SIZE]; size_t out_len; out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); value[out_len] = 0; LOG_ERR("Error code: %s", value); sock = socket_from_id(iface_ctx.last_socket_id); if (!sock) { iface_ctx.last_error = -EIO; k_sem_give(&iface_ctx.response_sem); } else { sock->error = -EIO; k_sem_give(&sock->sock_send_sem); } return true; } static void sock_notif_cb_work(struct k_work *work) { struct hl7800_socket *sock = NULL; struct k_work_delayable *dwork; dwork = k_work_delayable_from_work(work); sock = CONTAINER_OF(dwork, struct hl7800_socket, notif_work); hl7800_lock(); /* send null packet */ if (sock->recv_pkt != NULL) { /* we are in the middle of RX, * requeue this and try again */ k_work_reschedule_for_queue(&hl7800_workq, &sock->notif_work, MDM_SOCK_NOTIF_DELAY); } else { if (sock->type == SOCK_STREAM) { LOG_DBG("Sock %d trigger NULL packet", sock->socket_id); k_work_submit_to_queue(&hl7800_workq, &sock->recv_cb_work); } } hl7800_unlock(); } /* Handler: +KTCP_NOTIF/+KUDP_NOTIF */ static bool on_cmd_sock_notif(struct net_buf **buf, uint16_t len) { struct hl7800_socket *sock = NULL; char *delim; char value[MDM_MAX_RESP_SIZE]; size_t out_len; uint8_t notif_val; bool err = false; bool trigger_sem = true; int id; out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); value[out_len] = 0; /* find ',' because this is the format we expect */ delim = strchr(value, ','); if (!delim) { LOG_ERR("+K**P_NOTIF could not find ','"); goto done; } id = strtol(value, NULL, 10); notif_val = strtol(delim + 1, NULL, 10); if (notif_val == HL7800_TCP_DISCON) { LOG_DBG("+K**P_NOTIF: %d,%d", id, notif_val); } else { LOG_WRN("+K**P_NOTIF: %d,%d", id, notif_val); } sock = socket_from_id(id); if (!sock) { goto done; } switch (notif_val) { case HL7800_TCP_DATA_SND: err = false; sock->error = 0; break; case HL7800_TCP_DISCON: trigger_sem = false; err = true; sock->error = -ENOTCONN; break; default: iface_ctx.network_dropped = true; err = true; sock->error = -EIO; break; } if (err) { /* Send NULL packet to callback to notify upper stack layers * that the peer closed the connection or there was an error. * This is so an app will not get stuck in recv() forever. * Let's do the callback processing in a different work queue * so RX is not delayed. */ k_work_reschedule_for_queue(&hl7800_workq, &sock->notif_work, MDM_SOCK_NOTIF_DELAY); if (trigger_sem) { k_sem_give(&sock->sock_send_sem); } if (iface_ctx.network_dropped) { k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.iface_status_work, IFACE_WORK_DELAY); } } done: return true; } static int delete_socket(struct hl7800_socket *sock, enum net_sock_type type, uint8_t id) { char cmd[sizeof("AT+KUDPCLOSE=###")]; if (type == SOCK_STREAM) { snprintk(cmd, sizeof(cmd), "AT+KTCPDEL=%d", id); } else if (type == SOCK_DGRAM) { snprintk(cmd, sizeof(cmd), "AT+KUDPCLOSE=%d", id); } return send_at_cmd(sock, cmd, MDM_CMD_SEND_TIMEOUT, 0, false); } static void delete_untracked_socket_work_cb(struct k_work *item) { struct stale_socket *sock = NULL; hl7800_lock(); wakeup_hl7800(); do { sock = dequeue_stale_socket(); if (sock != NULL) { LOG_DBG("Delete untracked socket [%d]", sock->id); delete_socket(NULL, sock->type, sock->id); free_stale_socket(sock); } } while (sock != NULL); set_busy(false); allow_sleep(true); hl7800_unlock(); } static bool on_cmd_sockcreate(enum net_sock_type type, struct net_buf **buf, uint16_t len) { size_t out_len; char value[MDM_MAX_RESP_SIZE]; struct hl7800_socket *sock = NULL; out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); value[out_len] = 0; iface_ctx.last_socket_id = strtol(value, NULL, 10); if (type == SOCK_STREAM) { LOG_DBG("+KTCPCFG: %d", iface_ctx.last_socket_id); } else if (type == SOCK_DGRAM) { LOG_DBG("+KUDPCFG: %d", iface_ctx.last_socket_id); } /* check if the socket has been created already */ sock = socket_from_id(iface_ctx.last_socket_id); if (!sock) { LOG_DBG("look up new socket by creation id"); sock = socket_from_id(MDM_CREATE_SOCKET_ID); if (!sock || sock->type != type) { if (queue_stale_socket(type, iface_ctx.last_socket_id) == 0) { /* delay some time before socket cleanup in case there * are multiple sockets to cleanup */ k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.delete_untracked_socket_work, SOCKET_CLEANUP_WORK_DELAY); } goto done; } } sock->socket_id = iface_ctx.last_socket_id; sock->created = true; sock->reconfig = false; /* don't give back semaphore -- OK to follow */ done: return true; } /* Handler: +KTCPCFG: <session_id> */ static bool on_cmd_sock_tcp_create(struct net_buf **buf, uint16_t len) { return on_cmd_sockcreate(SOCK_STREAM, buf, len); } /* Handler: +KUDPCFG: <session_id> */ static bool on_cmd_sock_udp_create(struct net_buf **buf, uint16_t len) { return on_cmd_sockcreate(SOCK_DGRAM, buf, len); } static void sockreadrecv_cb_work(struct k_work *work) { struct hl7800_socket *sock = NULL; struct net_pkt *pkt; sock = CONTAINER_OF(work, struct hl7800_socket, recv_cb_work); LOG_DBG("Sock %d RX CB (size: %zd)", sock->socket_id, (sock->recv_pkt != NULL) ? net_pkt_get_len(sock->recv_pkt) : 0); /* return data */ pkt = sock->recv_pkt; sock->recv_pkt = NULL; if (sock->recv_cb) { sock->recv_cb(sock->context, pkt, NULL, NULL, 0, sock->recv_user_data); } else { net_pkt_unref(pkt); } } static void sock_read(struct net_buf **buf) { struct hl7800_socket *sock = NULL; struct net_buf *frag; uint8_t c = 0U; int i, hdr_len; char ok_resp[sizeof(OK_STRING)]; char eof[sizeof(EOF_PATTERN)]; size_t out_len; sock = socket_from_id(iface_ctx.last_socket_id); if (!sock) { LOG_ERR("Socket not found! (%d)", iface_ctx.last_socket_id); goto exit; } if (sock->error != 0) { /* cancel notif work and restart */ k_work_reschedule_for_queue(&hl7800_workq, &sock->notif_work, MDM_SOCK_NOTIF_DELAY); } LOG_DBG("Socket %d RX %u bytes", sock->socket_id, sock->rx_size); /* remove ending \r\n from last CONNECT */ if (net_buf_frags_len(*buf) < 2) { /* wait for \n to be RXd. \r was already RXd. */ wait_for_modem_data(buf, 0, 1); } /* remove \r\n */ net_buf_remove(buf, 2); if (!*buf) { wait_for_modem_data(buf, 0, sock->rx_size); } LOG_DBG("Processing RX, buf len: %zd", net_buf_frags_len(*buf)); /* allocate an RX pkt */ sock->recv_pkt = net_pkt_rx_alloc_with_buffer( net_context_get_iface(sock->context), sock->rx_size, sock->family, sock->ip_proto, BUF_ALLOC_TIMEOUT); if (!sock->recv_pkt) { LOG_ERR("Failed net_pkt_get_reserve_rx!"); goto done; } /* set pkt data */ net_pkt_set_context(sock->recv_pkt, sock->context); /* add IP / protocol headers */ hdr_len = pkt_setup_ip_data(sock->recv_pkt, sock); /* receive data */ for (i = 0; i < sock->rx_size; i++) { /* pull data from buf and advance to the next frag if needed */ c = net_buf_get_u8(buf); /* write data to packet */ if (net_pkt_write_u8(sock->recv_pkt, c)) { LOG_ERR("Unable to add data! Aborting! Bytes RXd:%d", i); goto rx_err; } if (!*buf && i < sock->rx_size) { LOG_DBG("RX more data, bytes RXd:%d", i + 1); /* wait for at least one more byte */ wait_for_modem_data(buf, 0, 1); if (!*buf) { LOG_ERR("No data in buf!"); break; } } } LOG_DBG("Got all data, get EOF and OK (buf len:%zd)", net_buf_frags_len(*buf)); if (!*buf || (net_buf_frags_len(*buf) < strlen(EOF_PATTERN))) { wait_for_modem_data(buf, net_buf_frags_len(*buf), strlen(EOF_PATTERN)); if (!*buf) { LOG_WRN("No EOF present"); goto all_rx_data; } } out_len = net_buf_linearize(eof, sizeof(eof), *buf, 0, strlen(EOF_PATTERN)); eof[out_len] = 0; /* remove EOF pattern from buffer */ net_buf_remove(buf, strlen(EOF_PATTERN)); if (strcmp(eof, EOF_PATTERN)) { LOG_WRN("Could not find EOF [%s]", eof); } /* Make sure we have \r\nOK\r\n length in the buffer */ if (!*buf || (net_buf_frags_len(*buf) < strlen(OK_STRING) + 4)) { wait_for_modem_data(buf, net_buf_frags_len(*buf), strlen(OK_STRING) + 4); if (!*buf) { LOG_WRN("No OK present"); goto all_rx_data; } } frag = NULL; (void)net_buf_findcrlf(*buf, &frag); if (!frag) { LOG_WRN("Unable to find OK start"); goto all_rx_data; } /* remove \r\n before OK */ net_buf_skipcrlf(buf); out_len = net_buf_linearize(ok_resp, sizeof(ok_resp), *buf, 0, strlen(OK_STRING)); ok_resp[out_len] = 0; /* remove the message from the buffer */ net_buf_remove(buf, strlen(OK_STRING)); if (strcmp(ok_resp, OK_STRING)) { LOG_WRN("Could not find OK [%s]", ok_resp); } /* remove \r\n after OK */ net_buf_skipcrlf(buf); all_rx_data: net_pkt_cursor_init(sock->recv_pkt); net_pkt_set_overwrite(sock->recv_pkt, true); if (hdr_len > 0) { net_pkt_skip(sock->recv_pkt, hdr_len); } /* Let's do the callback processing in a different work queue in * case the app takes a long time. */ k_work_submit_to_queue(&hl7800_workq, &sock->recv_cb_work); LOG_DBG("Sock %d RX done", sock->socket_id); goto done; rx_err: net_pkt_unref(sock->recv_pkt); sock->recv_pkt = NULL; done: if (sock->type == SOCK_STREAM) { if (sock->error == 0) { sock->state = SOCK_CONNECTED; } } else { sock->state = SOCK_IDLE; } exit: set_busy(false); allow_sleep(true); hl7800_TX_unlock(); } static bool on_cmd_connect(struct net_buf **buf, uint16_t len) { bool remove_data_from_buffer = true; struct hl7800_socket *sock = NULL; sock = socket_from_id(iface_ctx.last_socket_id); if (!sock) { LOG_ERR("Sock (%d) not found", iface_ctx.last_socket_id); goto done; } if (sock->state == SOCK_RX) { remove_data_from_buffer = false; sock_read(buf); } else { k_sem_give(&sock->sock_send_sem); } done: return remove_data_from_buffer; } static int start_socket_rx(struct hl7800_socket *sock, uint16_t rx_size) { char sendbuf[sizeof("AT+KTCPRCV=+#########,#####")]; if ((sock->socket_id <= 0) || (sock->rx_size <= 0)) { LOG_WRN("Cannot start socket RX, ID: %d rx size: %d", sock->socket_id, sock->rx_size); return -1; } LOG_DBG("Start socket RX ID:%d size:%d", sock->socket_id, rx_size); sock->state = SOCK_RX; if (sock->type == SOCK_DGRAM) { #if defined(CONFIG_NET_IPV4) if (rx_size > (net_if_get_mtu(iface_ctx.iface) - NET_IPV4UDPH_LEN)) { sock->rx_size = net_if_get_mtu(iface_ctx.iface) - NET_IPV4UDPH_LEN; } #endif #if defined(CONFIG_NET_IPV6) if (rx_size > (net_if_get_mtu(iface_ctx.iface) - NET_IPV6UDPH_LEN)) { sock->rx_size = net_if_get_mtu(iface_ctx.iface) - NET_IPV6UDPH_LEN; } #endif snprintk(sendbuf, sizeof(sendbuf), "AT+KUDPRCV=%d,%u", sock->socket_id, rx_size); } else { #if defined(CONFIG_NET_IPV4) if (rx_size > (net_if_get_mtu(iface_ctx.iface) - NET_IPV4TCPH_LEN)) { sock->rx_size = net_if_get_mtu(iface_ctx.iface) - NET_IPV4TCPH_LEN; } #endif #if defined(CONFIG_NET_IPV6) if (rx_size > (net_if_get_mtu(iface_ctx.iface) - NET_IPV6TCPH_LEN)) { sock->rx_size = net_if_get_mtu(iface_ctx.iface) - NET_IPV6TCPH_LEN; } #endif snprintk(sendbuf, sizeof(sendbuf), "AT+KTCPRCV=%d,%u", sock->socket_id, sock->rx_size); } /* Send AT+K**PRCV, The modem * will respond with "CONNECT" and the data requested * and then "OK" or "ERROR". * The rest of the data processing will be handled * once CONNECT is RXd. */ send_at_cmd(sock, sendbuf, K_NO_WAIT, 0, false); return 0; } static void sock_rx_data_cb_work(struct k_work *work) { struct hl7800_socket *sock = NULL; int rc; sock = CONTAINER_OF(work, struct hl7800_socket, rx_data_work); hl7800_lock(); wakeup_hl7800(); /* start RX */ rc = start_socket_rx(sock, sock->rx_size); /* Only unlock the RX because we just locked it above. * At the end of socket RX, the TX will be unlocked. */ hl7800_RX_unlock(); if (rc < 0) { /* we didn't start socket RX so unlock TX now. */ hl7800_TX_unlock(); } } /* Handler: +KTCP_DATA/+KUDP_DATA: <socket_id>,<left_bytes> */ static bool on_cmd_sockdataind(struct net_buf **buf, uint16_t len) { int socket_id, left_bytes, rc; size_t out_len; char *delim; char value[sizeof("##,####")]; struct hl7800_socket *sock = NULL; bool unlock = false; bool defer_rx = false; if (!hl7800_TX_locked()) { hl7800_TX_lock(); unlock = true; } else { defer_rx = true; } out_len = net_buf_linearize(value, sizeof(value) - 1, *buf, 0, len); value[out_len] = 0; /* First comma separator marks the end of socket_id */ delim = strchr(value, ','); if (!delim) { LOG_ERR("Missing comma"); goto error; } /* replace comma with null */ *delim++ = '\0'; socket_id = strtol(value, NULL, 0); /* second param is for left_bytes */ left_bytes = strtol(delim, NULL, 0); sock = socket_from_id(socket_id); if (!sock) { LOG_ERR("Unable to find socket_id:%d", socket_id); goto error; } sock->rx_size = left_bytes; if (defer_rx) { LOG_DBG("Defer socket RX -> ID: %d bytes: %u", socket_id, left_bytes); k_work_submit_to_queue(&hl7800_workq, &sock->rx_data_work); } else { if (left_bytes > 0) { wakeup_hl7800(); rc = start_socket_rx(sock, left_bytes); if (rc < 0) { goto error; } goto done; } } error: if (unlock) { hl7800_TX_unlock(); } done: return true; } /* Handler: +WDSI: ## */ static bool on_cmd_device_service_ind(struct net_buf **buf, uint16_t len) { char value[MDM_MAX_RESP_SIZE]; size_t out_len; memset(value, 0, sizeof(value)); out_len = net_buf_linearize(value, sizeof(value), *buf, 0, len); if (out_len > 0) { iface_ctx.device_services_ind = strtol(value, NULL, 10); } LOG_INF("+WDSI: %d", iface_ctx.device_services_ind); #ifdef CONFIG_MODEM_HL7800_FW_UPDATE if (iface_ctx.device_services_ind == WDSI_PKG_DOWNLOADED) { k_work_submit_to_queue(&hl7800_workq, &iface_ctx.finish_fw_update_work); } #endif return true; } static inline struct net_buf *read_rx_allocator(k_timeout_t timeout, void *user_data) { return net_buf_alloc((struct net_buf_pool *)user_data, timeout); } static size_t hl7800_read_rx(struct net_buf **buf) { uint8_t uart_buffer[CONFIG_MODEM_HL7800_RECV_BUF_SIZE]; size_t bytes_read, total_read; int ret; uint16_t rx_len; bytes_read = 0, total_read = 0; /* read all of the data from mdm_receiver */ while (true) { ret = mdm_receiver_recv(&iface_ctx.mdm_ctx, uart_buffer, sizeof(uart_buffer), &bytes_read); if (ret < 0 || bytes_read == 0) { /* mdm_receiver buffer is empty */ break; } if (IS_ENABLED(HL7800_ENABLE_VERBOSE_MODEM_RECV_HEXDUMP)) { LOG_HEXDUMP_DBG((const uint8_t *)&uart_buffer, bytes_read, "HL7800 RX"); } /* make sure we have storage */ if (!*buf) { *buf = net_buf_alloc(&mdm_recv_pool, BUF_ALLOC_TIMEOUT); if (!*buf) { LOG_ERR("Can't allocate RX data! " "Skipping data!"); break; } } rx_len = net_buf_append_bytes(*buf, bytes_read, uart_buffer, BUF_ALLOC_TIMEOUT, read_rx_allocator, &mdm_recv_pool); if (rx_len < bytes_read) { LOG_ERR("Data was lost! read %u of %zu!", rx_len, bytes_read); } total_read += bytes_read; } return total_read; } #ifdef CONFIG_MODEM_HL7800_FW_UPDATE static void finish_fw_update_work_callback(struct k_work *item) { ARG_UNUSED(item); send_at_cmd(NULL, "AT+WDSR=4", MDM_CMD_SEND_TIMEOUT, 0, false); iface_ctx.fw_updated = true; set_fota_state(HL7800_FOTA_INSTALL); hl7800_unlock(); } static uint8_t calc_fw_update_crc(uint8_t *ptr, int count) { uint8_t crc = 0; unsigned char l; uint16_t i = 0; while (i < count) { l = *ptr; crc += l; ++ptr; ++i; } return crc; } static int send_fw_update_packet(struct xmodem_packet *pkt) { generate_fota_count_event(); LOG_DBG("Send FW update packet %d,%d", pkt->id, iface_ctx.fw_packet_count); return mdm_receiver_send(&iface_ctx.mdm_ctx, (const uint8_t *)pkt, XMODEM_PACKET_SIZE); } static int prepare_and_send_fw_packet(void) { int ret = 0; int read_res; iface_ctx.fw_packet.id_complement = 0xFF - iface_ctx.fw_packet.id; ret = fs_seek(&iface_ctx.fw_update_file, iface_ctx.file_pos, FS_SEEK_SET); if (ret < 0) { set_fota_state(HL7800_FOTA_FILE_ERROR); LOG_ERR("Could not seek to offset %d of file", iface_ctx.file_pos); return ret; } read_res = fs_read(&iface_ctx.fw_update_file, iface_ctx.fw_packet.data, XMODEM_DATA_SIZE); if (read_res < 0) { set_fota_state(HL7800_FOTA_FILE_ERROR); LOG_ERR("Failed to read fw update file [%d]", read_res); return ret; } else if (read_res < XMODEM_DATA_SIZE) { set_fota_state(HL7800_FOTA_PAD); fs_close(&iface_ctx.fw_update_file); /* pad rest of data */ for (int i = read_res; i < XMODEM_DATA_SIZE; i++) { iface_ctx.fw_packet.data[i] = XMODEM_PAD_VALUE; } } iface_ctx.fw_packet.crc = calc_fw_update_crc(iface_ctx.fw_packet.data, XMODEM_DATA_SIZE); send_fw_update_packet(&iface_ctx.fw_packet); iface_ctx.file_pos += read_res; iface_ctx.fw_packet_count++; iface_ctx.fw_packet.id++; return ret; } static void process_fw_update_rx(struct net_buf **rx_buf) { static uint8_t xm_msg; uint8_t eot = XM_EOT; xm_msg = net_buf_get_u8(rx_buf); if (xm_msg == XM_NACK) { if (iface_ctx.fw_update_state == HL7800_FOTA_START) { /* send first FW update packet */ set_fota_state(HL7800_FOTA_WIP); iface_ctx.file_pos = 0; iface_ctx.fw_packet_count = 1; iface_ctx.fw_packet.id = 1; iface_ctx.fw_packet.preamble = XM_SOH_1K; prepare_and_send_fw_packet(); } else if (iface_ctx.fw_update_state == HL7800_FOTA_WIP) { LOG_DBG("RX FW update NACK"); /* resend last packet */ send_fw_update_packet(&iface_ctx.fw_packet); } } else if (xm_msg == XM_ACK) { LOG_DBG("RX FW update ACK"); if (iface_ctx.fw_update_state == HL7800_FOTA_WIP) { /* send next FW update packet */ prepare_and_send_fw_packet(); } else if (iface_ctx.fw_update_state == HL7800_FOTA_PAD) { set_fota_state(HL7800_FOTA_SEND_EOT); mdm_receiver_send(&iface_ctx.mdm_ctx, &eot, sizeof(eot)); } } else { LOG_WRN("RX unhandled FW update value: %02x", xm_msg); } } #endif /* CONFIG_MODEM_HL7800_FW_UPDATE */ /* RX thread */ static void hl7800_rx(void *p1, void *p2, void *p3) { ARG_UNUSED(p1); ARG_UNUSED(p2); ARG_UNUSED(p3); struct net_buf *rx_buf = NULL; struct net_buf *frag = NULL; int i, cmp_res; uint16_t len; size_t out_len; bool cmd_handled = false; static char rx_msg[MDM_HANDLER_MATCH_MAX_LEN]; bool unlock = false; bool remove_line_from_buf = true; #ifdef HL7800_LOG_UNHANDLED_RX_MSGS char msg[MDM_MAX_RESP_SIZE]; #endif static const struct cmd_handler handlers[] = { /* MODEM Information */ CMD_HANDLER("AT+CGMI", atcmdinfo_manufacturer), CMD_HANDLER("AT+CGMM", atcmdinfo_model), CMD_HANDLER("AT+CGMR", atcmdinfo_revision), CMD_HANDLER("AT+CGSN", atcmdinfo_imei), CMD_HANDLER("AT+KGSN=3", atcmdinfo_serial_number), CMD_HANDLER("+KCELLMEAS: ", atcmdinfo_rssi), CMD_HANDLER("+CGCONTRDP: ", atcmdinfo_ipaddr), CMD_HANDLER("+COPS: ", atcmdinfo_operator_status), CMD_HANDLER("+KSRAT: ", radio_tech_status), CMD_HANDLER("+KBNDCFG: ", radio_band_configuration), CMD_HANDLER("+KBND: ", radio_active_bands), CMD_HANDLER("+CCID: ", atcmdinfo_iccid), CMD_HANDLER("ACTIVE PROFILE:", atcmdinfo_active_profile), CMD_HANDLER("STORED PROFILE 0:", atcmdinfo_stored_profile0), CMD_HANDLER("STORED PROFILE 1:", atcmdinfo_stored_profile1), CMD_HANDLER("+WPPP: 1,1,", atcmdinfo_pdp_authentication_cfg), CMD_HANDLER("+CGDCONT: 1", atcmdinfo_pdp_context), CMD_HANDLER("AT+CEREG?", network_report_query), CMD_HANDLER("+KCARRIERCFG: ", operator_index_query), CMD_HANDLER("AT+CIMI", atcmdinfo_imsi), CMD_HANDLER("+CFUN: ", modem_functionality), CMD_HANDLER("%MEAS: ", survey_status), CMD_HANDLER("+CCLK: ", rtc_query), /* UNSOLICITED modem information */ /* mobile startup report */ CMD_HANDLER("+KSUP: ", startup_report), /* network status */ CMD_HANDLER("+CEREG: ", network_report), /* SOLICITED CMD AND SOCKET RESPONSES */ CMD_HANDLER("OK", sockok), CMD_HANDLER("ERROR", sockerror), /* SOLICITED SOCKET RESPONSES */ CMD_HANDLER("+CME ERROR: ", sock_error_code), CMD_HANDLER("+CMS ERROR: ", sock_error_code), CMD_HANDLER("+CEER: ", sockerror), CMD_HANDLER("+KTCPCFG: ", sock_tcp_create), CMD_HANDLER("+KUDPCFG: ", sock_udp_create), CMD_HANDLER(CONNECT_STRING, connect), CMD_HANDLER("NO CARRIER", sockerror), /* UNSOLICITED SOCKET RESPONSES */ CMD_HANDLER("+KTCP_IND: ", ktcp_ind), CMD_HANDLER("+KUDP_IND: ", kudp_ind), CMD_HANDLER("+KTCP_NOTIF: ", sock_notif), CMD_HANDLER("+KUDP_NOTIF: ", sock_notif), CMD_HANDLER("+KTCP_DATA: ", sockdataind), CMD_HANDLER("+KUDP_DATA: ", sockdataind), /* FIRMWARE UPDATE RESPONSES */ CMD_HANDLER("+WDSI: ", device_service_ind), #ifdef CONFIG_MODEM_HL7800_GPS CMD_HANDLER("+GNSSEV: ", gps_event), CMD_HANDLER("Latitude: ", latitude), CMD_HANDLER("Longitude: ", longitude), CMD_HANDLER("GpsTime: ", gps_time), CMD_HANDLER("FixType: ", fix_type), CMD_HANDLER("HEPE: ", hepe), CMD_HANDLER("Altitude: ", altitude), CMD_HANDLER("AltUnc: ", alt_unc), CMD_HANDLER("Direction: ", direction), CMD_HANDLER("HorSpeed: ", hor_speed), CMD_HANDLER("VerSpeed: ", ver_speed), #endif #ifdef CONFIG_MODEM_HL7800_POLTE CMD_HANDLER("%POLTEEVU: \"REGISTER\",0,", polte_registration), CMD_HANDLER("%POLTECMD: \"LOCATE\",", polte_locate_cmd_rsp), CMD_HANDLER("%POLTEEVU: \"LOCATION\",", polte_location), #endif }; while (true) { /* wait for incoming data */ (void)k_sem_take(&iface_ctx.mdm_ctx.rx_sem, K_FOREVER); hl7800_read_rx(&rx_buf); /* If an external module hasn't locked the command processor, * then do so now. */ if (!hl7800_RX_locked()) { hl7800_RX_lock(); unlock = true; } else { unlock = false; } while (rx_buf) { remove_line_from_buf = true; cmd_handled = false; #ifdef CONFIG_MODEM_HL7800_FW_UPDATE if ((iface_ctx.fw_update_state == HL7800_FOTA_START) || (iface_ctx.fw_update_state == HL7800_FOTA_WIP) || (iface_ctx.fw_update_state == HL7800_FOTA_PAD)) { process_fw_update_rx(&rx_buf); if (!rx_buf) { break; } } #endif net_buf_skipcrlf(&rx_buf); if (!rx_buf) { break; } frag = NULL; len = net_buf_findcrlf(rx_buf, &frag); if (!frag) { break; } out_len = net_buf_linearize(rx_msg, sizeof(rx_msg), rx_buf, 0, len); /* look for matching data handlers */ i = -1; for (i = 0; i < ARRAY_SIZE(handlers); i++) { if (iface_ctx.search_no_id_resp) { cmp_res = strncmp(iface_ctx.no_id_resp_cmd, handlers[i].cmd, handlers[i].cmd_len); } else { cmp_res = strncmp(rx_msg, handlers[i].cmd, handlers[i].cmd_len); } if (cmp_res == 0) { /* found a matching handler */ /* skip cmd_len */ if (!iface_ctx.search_no_id_resp) { rx_buf = net_buf_skip( rx_buf, handlers[i].cmd_len); } /* locate next cr/lf */ frag = NULL; len = net_buf_findcrlf(rx_buf, &frag); if (!frag) { break; } LOG_DBG("HANDLE %s (len:%u)", handlers[i].cmd, len); /* call handler */ if (handlers[i].func) { remove_line_from_buf = handlers[i].func( &rx_buf, len); } cmd_handled = true; iface_ctx.search_no_id_resp = false; frag = NULL; /* make sure buf still has data */ if (!rx_buf) { break; } /* We've handled the current line * and need to exit the "search for * handler loop". Let's skip any * "extra" data and look for the next * CR/LF, leaving us ready for the * next handler search. */ len = net_buf_findcrlf(rx_buf, &frag); break; } } /* Handle unhandled commands */ if (IS_ENABLED(HL7800_LOG_UNHANDLED_RX_MSGS) && !cmd_handled && frag && len > 1) { out_len = net_buf_linearize(msg, sizeof(msg), rx_buf, 0, len); msg[out_len] = 0; LOG_HEXDUMP_DBG((const uint8_t *)&msg, len, "UNHANDLED RX"); } if (remove_line_from_buf && frag && rx_buf) { /* clear out processed line (buffers) */ net_buf_remove(&rx_buf, len); } } if (unlock) { hl7800_RX_unlock(); } /* give up time if we have a solid stream of data */ k_yield(); } } static void shutdown_uart(void) { #ifdef CONFIG_PM_DEVICE int rc; enum pm_device_state state; rc = pm_device_state_get(iface_ctx.mdm_ctx.uart_dev, &state); if (rc) { LOG_ERR("Error getting UART power state (%d)", rc); } if (state != PM_DEVICE_STATE_SUSPENDED) { HL7800_IO_DBG_LOG("Power OFF the UART"); uart_irq_rx_disable(iface_ctx.mdm_ctx.uart_dev); rc = pm_device_action_run(iface_ctx.mdm_ctx.uart_dev, PM_DEVICE_ACTION_SUSPEND); if (rc) { LOG_ERR("Error disabling UART peripheral (%d)", rc); uart_irq_rx_enable(iface_ctx.mdm_ctx.uart_dev); } } #endif } static void power_on_uart(void) { #ifdef CONFIG_PM_DEVICE int rc; enum pm_device_state state; rc = pm_device_state_get(iface_ctx.mdm_ctx.uart_dev, &state); if (rc) { LOG_ERR("Error getting UART power state (%d)", rc); } if (state != PM_DEVICE_STATE_ACTIVE) { HL7800_IO_DBG_LOG("Power ON the UART"); rc = pm_device_action_run(iface_ctx.mdm_ctx.uart_dev, PM_DEVICE_ACTION_RESUME); if (rc) { LOG_ERR("Error enabling UART peripheral (%d)", rc); uart_irq_rx_disable(iface_ctx.mdm_ctx.uart_dev); } else { uart_irq_rx_enable(iface_ctx.mdm_ctx.uart_dev); } } #endif } /* Make sure all IO voltages are removed for proper reset. */ static void prepare_io_for_reset(void) { HL7800_IO_DBG_LOG("Preparing IO for reset/sleep"); shutdown_uart(); modem_assert_wake(false); modem_assert_pwr_on(false); modem_assert_fast_shutd(false); iface_ctx.wait_for_KSUP = true; iface_ctx.wait_for_KSUP_tries = 0; } static void mdm_vgpio_work_cb(struct k_work *item) { ARG_UNUSED(item); hl7800_lock(); if (!iface_ctx.vgpio_state) { if (iface_ctx.desired_sleep_level == HL7800_SLEEP_HIBERNATE || iface_ctx.desired_sleep_level == HL7800_SLEEP_LITE_HIBERNATE) { if (iface_ctx.sleep_state != iface_ctx.desired_sleep_level) { set_sleep_state(iface_ctx.desired_sleep_level); } } if (iface_ctx.iface && iface_ctx.initialized && iface_ctx.low_power_mode != HL7800_LPM_PSM) { net_if_carrier_off(iface_ctx.iface); } } hl7800_unlock(); } void mdm_vgpio_callback_isr(const struct device *port, struct gpio_callback *cb, uint32_t pins) { ARG_UNUSED(port); ARG_UNUSED(cb); ARG_UNUSED(pins); iface_ctx.vgpio_state = read_pin(1, &hl7800_cfg.gpio[MDM_VGPIO]); HL7800_IO_DBG_LOG("VGPIO:%d", iface_ctx.vgpio_state); if (!iface_ctx.vgpio_state) { prepare_io_for_reset(); if (!iface_ctx.restarting && iface_ctx.initialized) { iface_ctx.reconfig_IP_connection = true; } } else { if (iface_ctx.off) { return; } /* The peripheral must be enabled in ISR context * because the driver may be * waiting for +KSUP or waiting to send commands. * This can occur, for example, during a modem reset. */ power_on_uart(); /* Keep the modem awake to see if it has anything to send to us. */ allow_sleep(false); /* Allow the modem to go back to sleep if it was the one who * sourced the transition. */ allow_sleep(true); } check_hl7800_awake(); /* When the network state changes a semaphore must be taken. * This can't be done in interrupt context because the wait time != 0. */ k_work_submit_to_queue(&hl7800_workq, &iface_ctx.mdm_vgpio_work); } void mdm_uart_dsr_callback_isr(const struct device *port, struct gpio_callback *cb, uint32_t pins) { ARG_UNUSED(port); ARG_UNUSED(cb); ARG_UNUSED(pins); iface_ctx.dsr_state = read_pin(1, &hl7800_cfg.gpio[MDM_UART_DSR]); HL7800_IO_DBG_LOG("MDM_UART_DSR:%d", iface_ctx.dsr_state); } #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE static void mark_sockets_for_reconfig(void) { int i; struct hl7800_socket *sock = NULL; for (i = 0; i < MDM_MAX_SOCKETS; i++) { sock = &iface_ctx.sockets[i]; if ((sock->context != NULL) && (sock->created)) { /* mark socket as possibly needing re-configuration */ sock->reconfig = true; } } } #endif void mdm_gpio6_callback_isr(const struct device *port, struct gpio_callback *cb, uint32_t pins) { ARG_UNUSED(port); ARG_UNUSED(cb); ARG_UNUSED(pins); iface_ctx.gpio6_state = read_pin(1, &hl7800_cfg.gpio[MDM_GPIO6]); HL7800_IO_DBG_LOG("MDM_GPIO6:%d", iface_ctx.gpio6_state); #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE if (!iface_ctx.gpio6_state) { /* HL7800 is not awake, shut down UART to save power */ shutdown_uart(); iface_ctx.wait_for_KSUP = true; iface_ctx.wait_for_KSUP_tries = 0; iface_ctx.reconfig_IP_connection = true; mark_sockets_for_reconfig(); } else { if (iface_ctx.off) { return; } else if (iface_ctx.vgpio_state) { power_on_uart(); /* Keep the modem awake to see if it has anything to send to us. */ allow_sleep(false); /* Allow the modem to go back to sleep if it was the one who * sourced the transition. */ allow_sleep(true); } } check_hl7800_awake(); if ((iface_ctx.gpio6_callback != NULL) && ((iface_ctx.desired_sleep_level == HL7800_SLEEP_HIBERNATE) || (iface_ctx.desired_sleep_level == HL7800_SLEEP_LITE_HIBERNATE))) { iface_ctx.gpio6_callback(iface_ctx.gpio6_state); } #endif } void mdm_uart_cts_callback_isr(const struct device *port, struct gpio_callback *cb, uint32_t pins) { ARG_UNUSED(port); ARG_UNUSED(cb); ARG_UNUSED(pins); uint64_t now; uint64_t elapsed; int resample_state; iface_ctx.cts_state = read_pin(0, &hl7800_cfg.gpio[MDM_UART_CTS]); /* Debounce the CTS signal */ now = k_ticks_to_us_floor64(k_uptime_ticks()); elapsed = now - iface_ctx.last_cts_time; if (iface_ctx.last_cts_time <= 0) { /* This is the first transition we have seen, continue */ } else if (elapsed <= CONFIG_MODEM_HL7800_CTS_FILTER_US) { /* CTS changed too quickly, ignore this transition */ iface_ctx.last_cts_time = now; return; } iface_ctx.last_cts_time = now; k_busy_wait(CONFIG_MODEM_HL7800_CTS_FILTER_US); resample_state = read_pin(0, &hl7800_cfg.gpio[MDM_UART_CTS]); if (iface_ctx.cts_state != resample_state) { /* CTS changed while we were debouncing, ignore it */ iface_ctx.cts_state = resample_state; return; } iface_ctx.cts_state = resample_state; if (iface_ctx.cts_state != iface_ctx.last_cts_state) { iface_ctx.last_cts_state = iface_ctx.cts_state; } else { return; } HL7800_IO_DBG_LOG("MDM_UART_CTS:%d(%llu)", iface_ctx.cts_state, elapsed); #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE if (iface_ctx.cts_state && iface_ctx.allow_sleep) { /* HL7800 cannot receive UART data, shut down UART to save power. * This is critical for proper low power operation. If the UART is disabled * after VGPIO is low, the UART will not suspend properly. */ shutdown_uart(); } else { if (iface_ctx.off) { return; } if (iface_ctx.vgpio_state && iface_ctx.gpio6_state) { power_on_uart(); /* Wake up the modem to see if it has anything to send to us. */ allow_sleep(false); /* Allow the modem to go back to sleep if it was the one who * sourced the CTS transition. */ allow_sleep(true); } } #endif if ((iface_ctx.cts_callback != NULL) && (iface_ctx.desired_sleep_level == HL7800_SLEEP_SLEEP)) { iface_ctx.cts_callback(iface_ctx.cts_state); } check_hl7800_awake(); } static void modem_reset(void) { prepare_io_for_reset(); LOG_INF("Modem Reset"); /* Hard reset the modem */ modem_assert_reset(true); /* >20 milliseconds required for reset low */ k_sleep(MDM_RESET_LOW_TIME); iface_ctx.mdm_startup_reporting_on = false; set_sleep_state(HL7800_SLEEP_UNINITIALIZED); check_hl7800_awake(); set_network_state(HL7800_NOT_REGISTERED); set_startup_state(HL7800_STARTUP_STATE_UNKNOWN); #ifdef CONFIG_MODEM_HL7800_FW_UPDATE if (iface_ctx.fw_update_state == HL7800_FOTA_REBOOT_AND_RECONFIGURE) { set_fota_state(HL7800_FOTA_COMPLETE); } else { set_fota_state(HL7800_FOTA_IDLE); } #endif k_sem_reset(&iface_ctx.mdm_awake); iface_ctx.off = true; } static void modem_run(void) { LOG_INF("Modem Run"); iface_ctx.off = false; modem_assert_reset(false); allow_sleep(false); k_sleep(MDM_RESET_HIGH_TIME); } static int modem_boot_handler(char *reason) { int ret; LOG_DBG("%s", reason); ret = k_sem_take(&iface_ctx.mdm_awake, MDM_BOOT_TIME); if (ret) { LOG_WRN("Err waiting for boot: %d, DSR: %u", ret, iface_ctx.dsr_state); } else { LOG_INF("Modem booted!"); } /* Turn OFF EPS network registration status reporting because * it isn't needed until after initialization is complete. */ SEND_AT_CMD_EXPECT_OK("AT+CEREG=0"); /* Determine if echo is on/off by reading the profile * note: It wasn't clear how to read the * active profile so all 3 are read. */ iface_ctx.mdm_echo_is_on = true; SEND_AT_CMD_EXPECT_OK("AT&V"); if (iface_ctx.mdm_echo_is_on) { /* Turn OFF echo (after boot/reset) because a profile * hasn't been saved yet */ SEND_AT_CMD_EXPECT_OK("ATE0"); /* Save profile 0 */ SEND_AT_CMD_EXPECT_OK("AT&W"); /* Reread profiles so echo state can be checked again. */ SEND_AT_CMD_EXPECT_OK("AT&V"); } __ASSERT(!iface_ctx.mdm_echo_is_on, "Echo should be off"); return 0; error: return ret; } /** * @brief compares two version strings with any delimiter * * @param v1: version string 1 * @param v2: version string 2 * * @retval 0 if equal, < 0 if v1 < v2, > 0 if v1 > v2. */ static int compare_versions(char *v1, const char *v2) { int result = 0; char *tail1; char *tail2; unsigned long ver1, ver2; /* loop through each level of the version string */ while (result == 0) { /* extract leading version numbers */ ver1 = strtoul(v1, &tail1, 10); ver2 = strtoul(v2, &tail2, 10); /* if numbers differ, then set the result */ if (ver1 < ver2) { result = -1; } else if (ver1 > ver2) { result = 1; } else { /* if numbers are the same, go to next level */ v1 = tail1; v2 = tail2; /* if we reach the end of both, then they are identical */ if (*v1 == '\0' && *v2 == '\0') { break; /* if we reach the end of one only, it is the smaller */ } else if (*v1 == '\0') { result = -1; } else if (*v2 == '\0') { result = 1; /* not at end ... so far they match so keep going */ } else { v1++; v2++; } } } return result; } static int setup_gprs_connection(char *access_point_name) { char cmd_string[sizeof("AT+KCNXCFG=1,\"GPRS\",\"\",,," "\"IPV4V6\"") + MDM_HL7800_APN_MAX_SIZE]; int cmd_max_len = sizeof(cmd_string) - 1; memset(cmd_string, 0, cmd_max_len); strncat(cmd_string, "AT+KCNXCFG=1,\"GPRS\",\"", cmd_max_len); strncat(cmd_string, access_point_name, cmd_max_len); strncat(cmd_string, "\",,,\"", cmd_max_len); strncat(cmd_string, MODEM_HL7800_ADDRESS_FAMILY "\"", cmd_max_len); return send_at_cmd(NULL, cmd_string, MDM_CMD_SEND_TIMEOUT, 0, false); } static int set_bands(const char *bands, bool full_reboot) { int ret; char cmd[sizeof("AT+KBNDCFG=#,####################")]; snprintk(cmd, sizeof(cmd), "AT+KBNDCFG=%d,%s", iface_ctx.mdm_rat, bands); ret = send_at_cmd(NULL, cmd, MDM_CMD_SEND_TIMEOUT, MDM_DEFAULT_AT_CMD_RETRIES, false); if (ret < 0) { return ret; } if (!full_reboot) { ret = send_at_cmd(NULL, "AT+CFUN=1,1", MDM_CMD_SEND_TIMEOUT, MDM_DEFAULT_AT_CMD_RETRIES, false); if (ret < 0) { return ret; } ret = modem_boot_handler("LTE bands were just set"); } else { k_work_reschedule_for_queue(&hl7800_workq, &iface_ctx.mdm_reset_work, K_NO_WAIT); } return ret; } int32_t mdm_hl7800_set_bands(const char *bands) { int ret, i; char temp_bands[MDM_BAND_BITMAP_STR_LENGTH_MAX + 1]; int num_leading_zeros; if ((bands == NULL) || (strlen(bands) > MDM_BAND_BITMAP_STR_LENGTH_MAX) || (strlen(bands) < MDM_BAND_BITMAP_STR_LENGTH_MIN)) { return -EINVAL; } if (strlen(bands) < MDM_BAND_BITMAP_STR_LENGTH_MAX) { num_leading_zeros = MDM_BAND_BITMAP_STR_LENGTH_MAX - strlen(bands); for (i = 0; i < num_leading_zeros; i++) { temp_bands[i] = '0'; if (i == (num_leading_zeros - 1)) { strncpy(temp_bands + (i + 1), bands, sizeof(temp_bands) - (i + 1)); } } } else { memcpy(temp_bands, bands, sizeof(temp_bands)); } /* no need to set bands if settings match */ if (strncmp(temp_bands, iface_ctx.mdm_bands_string, sizeof(temp_bands)) == 0) { return 0; } hl7800_lock(); ret = set_bands(temp_bands, true); hl7800_unlock(); return ret; } static int modem_reset_and_configure(void) { int ret = 0; bool sleep = false; bool config_apn = false; char *apn; #ifdef CONFIG_MODEM_HL7800_EDRX int edrx_act_type; char set_edrx_msg[sizeof("AT+CEDRXS=2,4,\"0000\"")]; #endif #if CONFIG_MODEM_HL7800_CONFIGURE_BANDS uint16_t bands_top = 0; uint32_t bands_middle = 0, bands_bottom = 0; char new_bands[MDM_BAND_BITMAP_STR_LENGTH_MAX + 1]; #endif #if CONFIG_MODEM_HL7800_PSM const char TURN_ON_PSM[] = "AT+CPSMS=1,,,\"" CONFIG_MODEM_HL7800_PSM_PERIODIC_TAU "\",\"" CONFIG_MODEM_HL7800_PSM_ACTIVE_TIME "\""; #endif set_busy(true); iface_ctx.restarting = true; iface_ctx.dns_ready = false; if (iface_ctx.iface) { net_if_carrier_off(iface_ctx.iface); } hl7800_stop_rssi_work(); initialize_sleep_level(); reboot: modem_reset(); modem_run(); ret = modem_boot_handler("Initialization"); if (!iface_ctx.mdm_startup_reporting_on) { /* Turn on mobile start-up reporting for next reset. * It will indicate if SIM is present. * Its value is saved in non-volatile memory on the HL7800. */ SEND_AT_CMD_EXPECT_OK("AT+KSREP=1"); goto reboot; } else if (ret < 0) { goto error; } /* turn on numeric error codes */ SEND_AT_CMD_EXPECT_OK("AT+CMEE=1"); /* modem revision */ SEND_COMPLEX_AT_CMD("AT+CGMR"); /* determine RAT command support */ ret = compare_versions(iface_ctx.mdm_revision, NEW_RAT_CMD_MIN_VERSION); if (ret < 0) { iface_ctx.new_rat_cmd_support = false; } else { iface_ctx.new_rat_cmd_support = true; } /* Query current Radio Access Technology (RAT) */ SEND_AT_CMD_EXPECT_OK("AT+KSRAT?"); /* If CONFIG_MODEM_HL7800_RAT_M1 or CONFIG_MODEM_HL7800_RAT_NB1, then * set the radio mode. This is only done here if the driver has not been * initialized (!iface_ctx.configured) yet because the public API also * allows the RAT to be changed (and will reset the modem). */ #ifndef CONFIG_MODEM_HL7800_RAT_NO_CHANGE if (!iface_ctx.configured) { #if CONFIG_MODEM_HL7800_RAT_M1 if (iface_ctx.mdm_rat != MDM_RAT_CAT_M1) { if (iface_ctx.new_rat_cmd_support) { SEND_AT_CMD_ONCE_EXPECT_OK(SET_RAT_M1_CMD); } else { SEND_AT_CMD_ONCE_EXPECT_OK( SET_RAT_M1_CMD_LEGACY); } if (ret >= 0) { goto reboot; } } #elif CONFIG_MODEM_HL7800_RAT_NB1 if (iface_ctx.mdm_rat != MDM_RAT_CAT_NB1) { if (iface_ctx.new_rat_cmd_support) { SEND_AT_CMD_ONCE_EXPECT_OK(SET_RAT_NB1_CMD); } else { SEND_AT_CMD_ONCE_EXPECT_OK( SET_RAT_NB1_CMD_LEGACY); } if (ret >= 0) { goto reboot; } } #endif } #endif SEND_AT_CMD_EXPECT_OK("AT+KBNDCFG?"); /* Configure LTE bands */ #if CONFIG_MODEM_HL7800_CONFIGURE_BANDS #if CONFIG_MODEM_HL7800_BAND_1 bands_bottom |= 1 << 0; #endif #if CONFIG_MODEM_HL7800_BAND_2 bands_bottom |= 1 << 1; #endif #if CONFIG_MODEM_HL7800_BAND_3 bands_bottom |= 1 << 2; #endif #if CONFIG_MODEM_HL7800_BAND_4 bands_bottom |= 1 << 3; #endif #if CONFIG_MODEM_HL7800_BAND_5 bands_bottom |= 1 << 4; #endif #if CONFIG_MODEM_HL7800_BAND_8 bands_bottom |= 1 << 7; #endif #if CONFIG_MODEM_HL7800_BAND_9 bands_bottom |= 1 << 8; #endif #if CONFIG_MODEM_HL7800_BAND_10 bands_bottom |= 1 << 9; #endif #if CONFIG_MODEM_HL7800_BAND_12 bands_bottom |= 1 << 11; #endif #if CONFIG_MODEM_HL7800_BAND_13 bands_bottom |= 1 << 12; #endif #if CONFIG_MODEM_HL7800_BAND_14 bands_bottom |= 1 << 13; #endif #if CONFIG_MODEM_HL7800_BAND_17 bands_bottom |= 1 << 16; #endif #if CONFIG_MODEM_HL7800_BAND_18 bands_bottom |= 1 << 17; #endif #if CONFIG_MODEM_HL7800_BAND_19 bands_bottom |= 1 << 18; #endif #if CONFIG_MODEM_HL7800_BAND_20 bands_bottom |= 1 << 19; #endif #if CONFIG_MODEM_HL7800_BAND_25 bands_bottom |= 1 << 24; #endif #if CONFIG_MODEM_HL7800_BAND_26 bands_bottom |= 1 << 25; #endif #if CONFIG_MODEM_HL7800_BAND_27 bands_bottom |= 1 << 26; #endif #if CONFIG_MODEM_HL7800_BAND_28 bands_bottom |= 1 << 27; #endif #if CONFIG_MODEM_HL7800_BAND_66 bands_top |= 1 << 1; #endif /* Check if bands are configured correctly */ if (iface_ctx.mdm_bands_top != bands_top || iface_ctx.mdm_bands_middle != bands_middle || iface_ctx.mdm_bands_bottom != bands_bottom) { if (iface_ctx.mdm_bands_top != bands_top) { LOG_INF("Top band mismatch, want %04x got %04x", bands_top, iface_ctx.mdm_bands_top); } if (iface_ctx.mdm_bands_middle != bands_middle) { LOG_INF("Middle band mismatch, want %08x got %08x", bands_middle, iface_ctx.mdm_bands_middle); } if (iface_ctx.mdm_bands_bottom != bands_bottom) { LOG_INF("Bottom band mismatch, want %08x got %08x", bands_bottom, iface_ctx.mdm_bands_bottom); } snprintk(new_bands, sizeof(new_bands), "%0" STRINGIFY(MDM_TOP_BAND_SIZE) "x%0" STRINGIFY( MDM_MIDDLE_BAND_SIZE) "x%0" STRINGIFY(MDM_BOTTOM_BAND_SIZE) "x", bands_top, bands_middle, bands_bottom); ret = set_bands(new_bands, false); if (ret < 0) { goto error; } } #endif /** * Disable the radio until all config is done. * This ensures all settings are applied during this session instead of on the next reboot. */ SEND_AT_CMD_EXPECT_OK("AT+CFUN=4,0"); iface_ctx.low_power_mode = HL7800_LPM_NONE; #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE /* enable GPIO6 low power monitoring */ SEND_AT_CMD_EXPECT_OK("AT+KHWIOCFG=3,1,6"); ret = set_sleep_level(); if (ret < 0) { goto error; } #if CONFIG_MODEM_HL7800_PSM iface_ctx.low_power_mode = HL7800_LPM_PSM; /* Turn off eDRX */ SEND_AT_CMD_EXPECT_OK("AT+CEDRXS=0"); SEND_AT_CMD_EXPECT_OK(TURN_ON_PSM); #elif CONFIG_MODEM_HL7800_EDRX iface_ctx.low_power_mode = HL7800_LPM_EDRX; /* Turn off PSM */ SEND_AT_CMD_EXPECT_OK("AT+CPSMS=0"); /* turn on eDRX */ if (iface_ctx.mdm_rat == MDM_RAT_CAT_NB1) { edrx_act_type = 5; } else { edrx_act_type = 4; } snprintk(set_edrx_msg, sizeof(set_edrx_msg), "AT+CEDRXS=1,%d,\"%s\"", edrx_act_type, CONFIG_MODEM_HL7800_EDRX_VALUE); SEND_AT_CMD_EXPECT_OK(set_edrx_msg); #endif sleep = true; #else /* Turn off sleep mode */ SEND_AT_CMD_EXPECT_OK("AT+KSLEEP=2"); /* Turn off PSM */ SEND_AT_CMD_EXPECT_OK("AT+CPSMS=0"); /* Turn off eDRX */ SEND_AT_CMD_EXPECT_OK("AT+CEDRXS=0"); #endif /* modem manufacturer */ SEND_COMPLEX_AT_CMD("AT+CGMI"); /* modem model */ SEND_COMPLEX_AT_CMD("AT+CGMM"); /* query modem IMEI */ SEND_COMPLEX_AT_CMD("AT+CGSN"); /* query modem serial number */ SEND_COMPLEX_AT_CMD("AT+KGSN=3"); if (iface_ctx.mdm_startup_state != HL7800_STARTUP_STATE_SIM_NOT_PRESENT) { /* query SIM ICCID */ SEND_AT_CMD_IGNORE_ERROR("AT+CCID?"); /* query SIM IMSI */ (void)send_at_cmd(NULL, "AT+CIMI", MDM_CMD_SEND_TIMEOUT, MDM_DEFAULT_AT_CMD_RETRIES, true); } /* Query PDP context to get APN */ SEND_AT_CMD_EXPECT_OK("AT+CGDCONT?"); apn = iface_ctx.mdm_apn.value; if (strcmp(iface_ctx.mdm_pdp_addr_fam, MODEM_HL7800_ADDRESS_FAMILY)) { config_apn = true; } /* Query PDP authentication context to get APN username/password. * Temporary Workaround - Ignore error * On some modules this is returning an error and the response data. */ SEND_AT_CMD_IGNORE_ERROR("AT+WPPP?"); #if CONFIG_MODEM_HL7800_SET_APN_NAME_ON_STARTUP if (!iface_ctx.configured) { if (strncmp(iface_ctx.mdm_apn.value, CONFIG_MODEM_HL7800_APN_NAME, MDM_HL7800_APN_MAX_STRLEN) != 0) { apn = CONFIG_MODEM_HL7800_APN_NAME; config_apn = true; } } #endif if (config_apn) { /* set PDP context address family along with current APN */ ret = write_apn(apn); if (ret < 0) { goto error; } SEND_AT_CMD_EXPECT_OK("AT+CGDCONT?"); } ret = setup_gprs_connection(iface_ctx.mdm_apn.value); if (ret < 0) { goto error; } /* query the network status in case we already registered */ SEND_COMPLEX_AT_CMD("AT+CEREG?"); /* Turn on EPS network registration status reporting */ SEND_AT_CMD_EXPECT_OK("AT+CEREG=5"); /* query all socket configs to cleanup any sockets that are not * tracked by the driver */ SEND_AT_CMD_EXPECT_OK("AT+KTCPCFG?"); SEND_AT_CMD_EXPECT_OK("AT+KUDPCFG?"); /* Enabled the LTE radio */ #if !defined(CONFIG_MODEM_HL7800_BOOT_IN_AIRPLANE_MODE) SEND_AT_CMD_EXPECT_OK("AT+CFUN=1,0"); #endif /* The modem has been initialized and now the network interface can be * started in the CEREG message handler. */ LOG_INF("Modem ready!"); iface_ctx.restarting = false; iface_ctx.configured = true; set_busy(false); allow_sleep(sleep); /* trigger APN update event */ event_handler(HL7800_EVENT_APN_UPDATE, &iface_ctx.mdm_apn); #ifdef CONFIG_MODEM_HL7800_BOOT_DELAY if (!iface_ctx.initialized) { if (iface_ctx.iface != NULL) { hl7800_build_mac(&iface_ctx); net_if_set_link_addr(iface_ctx.iface, iface_ctx.mac_addr, sizeof(iface_ctx.mac_addr), NET_LINK_ETHERNET); iface_ctx.initialized = true; } } #endif return 0; error: LOG_ERR("Unable to configure modem"); iface_ctx.configured = false; set_network_state(HL7800_UNABLE_TO_CONFIGURE); /* Kernel will fault with non-zero return value. * Allow other parts of application to run when modem cannot be configured. */ return 0; } static int write_apn(char *access_point_name) { char cmd_string[MDM_HL7800_APN_CMD_MAX_SIZE]; /* PDP Context */ memset(cmd_string, 0, MDM_HL7800_APN_CMD_MAX_SIZE); if (strcmp(MODEM_HL7800_ADDRESS_FAMILY, ADDRESS_FAMILY_IPV4)) { strncat(cmd_string, "AT+CGDCONT=1,\"" MODEM_HL7800_ADDRESS_FAMILY "\",\"", MDM_HL7800_APN_CMD_MAX_STRLEN); } else { strncat(cmd_string, "AT+CGDCONT=1,\"" ADDRESS_FAMILY_IP "\",\"", MDM_HL7800_APN_CMD_MAX_STRLEN); } strncat(cmd_string, access_point_name, MDM_HL7800_APN_CMD_MAX_STRLEN); strncat(cmd_string, "\"", MDM_HL7800_APN_CMD_MAX_STRLEN); return send_at_cmd(NULL, cmd_string, MDM_CMD_SEND_TIMEOUT, 0, false); } static void mdm_reset_work_callback(struct k_work *item) { ARG_UNUSED(item); mdm_hl7800_reset(); } int32_t mdm_hl7800_reset(void) { int ret; hl7800_lock(); ret = modem_reset_and_configure(); hl7800_unlock(); return ret; } static void mdm_power_off_work_callback(struct k_work *item) { ARG_UNUSED(item); int ret; #if defined(CONFIG_DNS_RESOLVER) struct dns_resolve_context *dns_ctx; LOG_DBG("Shutdown DNS resolver"); dns_ctx = dns_resolve_get_default(); (void)dns_resolve_close(dns_ctx); #endif hl7800_lock(); notify_all_tcp_sockets_closed(); ret = send_at_cmd(NULL, "AT+CPOF", MDM_CMD_SEND_TIMEOUT, 1, false); if (ret) { LOG_ERR("AT+CPOF ret:%d", ret); return; } prepare_io_for_reset(); iface_ctx.dns_ready = false; iface_ctx.configured = false; iface_ctx.off = true; set_busy(false); /* bring the iface down */ if (iface_ctx.iface) { net_if_carrier_off(iface_ctx.iface); } LOG_INF("Modem powered off"); hl7800_unlock(); } static int hl7800_power_off(void) { LOG_INF("Powering off modem"); wakeup_hl7800(); hl7800_stop_rssi_work(); k_work_cancel_delayable(&iface_ctx.iface_status_work); k_work_cancel_delayable(&iface_ctx.dns_work); k_work_cancel_delayable(&iface_ctx.mdm_reset_work); k_work_cancel_delayable(&iface_ctx.allow_sleep_work); k_work_cancel_delayable(&iface_ctx.delete_untracked_socket_work); (void)k_work_submit_to_queue(&hl7800_workq, &iface_ctx.mdm_pwr_off_work); return 0; } int32_t mdm_hl7800_power_off(void) { int rc; hl7800_lock(); rc = hl7800_power_off(); hl7800_unlock(); return rc; } int mdm_hl7800_register_event_callback(struct mdm_hl7800_callback_agent *agent) { int ret; ret = k_sem_take(&cb_lock, K_NO_WAIT); if (ret < 0) { return ret; } if (!agent->event_callback) { LOG_WRN("event_callback is NULL"); } sys_slist_append(&hl7800_event_callback_list, &agent->node); k_sem_give(&cb_lock); return ret; } int mdm_hl7800_unregister_event_callback(struct mdm_hl7800_callback_agent *agent) { int ret; ret = k_sem_take(&cb_lock, K_NO_WAIT); if (ret < 0) { return ret; } ret = (int)sys_slist_find_and_remove(&hl7800_event_callback_list, &agent->node); if (ret) { ret = 0; } else { ret = -ENOENT; } k_sem_give(&cb_lock); return ret; } /*** OFFLOAD FUNCTIONS ***/ static int connect_TCP_socket(struct hl7800_socket *sock) { int ret; char cmd_con[sizeof("AT+KTCPCNX=##")]; snprintk(cmd_con, sizeof(cmd_con), "AT+KTCPCNX=%d", sock->socket_id); ret = send_at_cmd(sock, cmd_con, MDM_CMD_SEND_TIMEOUT, 0, false); if (ret < 0) { LOG_ERR("AT+KTCPCNX ret:%d", ret); ret = -EIO; goto done; } /* Now wait for +KTCP_IND or +KTCP_NOTIF to ensure * the connection succeeded or failed. */ ret = k_sem_take(&sock->sock_send_sem, MDM_CMD_CONN_TIMEOUT); if (ret == 0) { ret = sock->error; } else if (ret == -EAGAIN) { ret = -ETIMEDOUT; } if (ret < 0) { LOG_ERR("+KTCP_IND/NOTIF ret:%d", ret); goto done; } else { sock->state = SOCK_CONNECTED; net_context_set_state(sock->context, NET_CONTEXT_CONNECTED); } done: return ret; } static int configure_TCP_socket(struct hl7800_socket *sock) { int ret; char cmd_cfg[sizeof("AT+KTCPCFG=#,#,\"" IPV6_ADDR_FORMAT "\",#####,,,,#,,#")]; int dst_port = -1; int af; bool restore_on_boot = false; #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE restore_on_boot = true; #endif if (sock->dst.sa_family == AF_INET6) { af = MDM_HL7800_SOCKET_AF_IPV6; dst_port = net_sin6(&sock->dst)->sin6_port; } else if (sock->dst.sa_family == AF_INET) { af = MDM_HL7800_SOCKET_AF_IPV4; dst_port = net_sin(&sock->dst)->sin_port; } else { return -EINVAL; } sock->socket_id = MDM_CREATE_SOCKET_ID; snprintk(cmd_cfg, sizeof(cmd_cfg), "AT+KTCPCFG=%d,%d,\"%s\",%u,,,,%d,,%d", 1, 0, hl7800_sprint_ip_addr(&sock->dst), dst_port, af, restore_on_boot); ret = send_at_cmd(sock, cmd_cfg, MDM_CMD_SEND_TIMEOUT, 0, false); if (ret < 0) { LOG_ERR("AT+KTCPCFG ret:%d", ret); ret = -EIO; goto done; } done: return ret; } static int configure_UDP_socket(struct hl7800_socket *sock) { int ret = 0; char cmd[sizeof("AT+KUDPCFG=1,0,,,,,0,#")]; int af; bool restore_on_boot = false; #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE restore_on_boot = true; #endif sock->socket_id = MDM_CREATE_SOCKET_ID; if (sock->family == AF_INET) { af = MDM_HL7800_SOCKET_AF_IPV4; } else if (sock->family == AF_INET6) { af = MDM_HL7800_SOCKET_AF_IPV6; } else { return -EINVAL; } snprintk(cmd, sizeof(cmd), "AT+KUDPCFG=1,0,,,,,%d,%d", af, restore_on_boot); ret = send_at_cmd(sock, cmd, MDM_CMD_SEND_TIMEOUT, 0, false); if (ret < 0) { LOG_ERR("AT+KUDPCFG ret:%d", ret); goto done; } /* Now wait for +KUDP_IND or +KUDP_NOTIF to ensure * the socket was created. */ ret = k_sem_take(&sock->sock_send_sem, MDM_CMD_CONN_TIMEOUT); if (ret == 0) { ret = sock->error; } else if (ret == -EAGAIN) { ret = -ETIMEDOUT; } if (ret < 0) { LOG_ERR("+KUDP_IND/NOTIF ret:%d", ret); goto done; } done: return ret; } static int reconfigure_IP_connection(void) { int ret = 0; if (iface_ctx.reconfig_IP_connection) { iface_ctx.reconfig_IP_connection = false; /* reconfigure GPRS connection so sockets can be used */ ret = setup_gprs_connection(iface_ctx.mdm_apn.value); if (ret < 0) { LOG_ERR("AT+KCNXCFG= ret:%d", ret); goto done; } /* query all TCP socket configs */ ret = send_at_cmd(NULL, "AT+KTCPCFG?", MDM_CMD_SEND_TIMEOUT, 0, false); /* query all UDP socket configs */ ret = send_at_cmd(NULL, "AT+KUDPCFG?", MDM_CMD_SEND_TIMEOUT, 0, false); /* TODO: to make this better, wait for +KUDP_IND or timeout */ k_sleep(K_SECONDS(1)); } done: return ret; } static int offload_get(sa_family_t family, enum net_sock_type type, enum net_ip_protocol ip_proto, struct net_context **context) { int ret = 0; struct hl7800_socket *sock = NULL; hl7800_lock(); /* new socket */ sock = socket_get(); if (!sock) { ret = -ENOMEM; goto done; } (*context)->offload_context = sock; /* set the context iface index to our iface */ (*context)->iface = net_if_get_by_iface(iface_ctx.iface); sock->family = family; sock->type = type; sock->ip_proto = ip_proto; sock->context = *context; sock->reconfig = false; sock->created = false; sock->socket_id = MDM_CREATE_SOCKET_ID; /* If UDP, create UDP socket now. * TCP socket needs to be created later once the * connection IP address is known. */ if (type == SOCK_DGRAM) { wakeup_hl7800(); /* reconfig IP connection if necessary */ (void)reconfigure_IP_connection(); if (!sock->created) { ret = configure_UDP_socket(sock); if (ret < 0) { socket_put(sock); goto done; } } } done: set_busy(false); allow_sleep(true); hl7800_unlock(); return ret; } static int offload_bind(struct net_context *context, const struct sockaddr *addr, socklen_t addr_len) { struct hl7800_socket *sock = NULL; if (!context) { return -EINVAL; } sock = (struct hl7800_socket *)context->offload_context; if (!sock) { LOG_ERR("Can't locate socket for net_ctx:%p!", context); return -EINVAL; } /* save bind address information */ sock->src.sa_family = addr->sa_family; #if defined(CONFIG_NET_IPV6) if (addr->sa_family == AF_INET6) { net_ipaddr_copy(&net_sin6(&sock->src)->sin6_addr, &net_sin6(addr)->sin6_addr); net_sin6(&sock->src)->sin6_port = net_sin6(addr)->sin6_port; } else #endif #if defined(CONFIG_NET_IPV4) if (addr->sa_family == AF_INET) { net_ipaddr_copy(&net_sin(&sock->src)->sin_addr, &net_sin(addr)->sin_addr); net_sin(&sock->src)->sin_port = net_sin(addr)->sin_port; } else #endif { return -EPFNOSUPPORT; } return 0; } static int offload_listen(struct net_context *context, int backlog) { /* NOT IMPLEMENTED */ return -ENOTSUP; } static int offload_connect(struct net_context *context, const struct sockaddr *addr, socklen_t addr_len, net_context_connect_cb_t cb, int32_t timeout, void *user_data) { int ret = 0; int dst_port = -1; struct hl7800_socket *sock; if (!context || !addr) { return -EINVAL; } sock = (struct hl7800_socket *)context->offload_context; if (!sock) { LOG_ERR("Can't locate socket for net_ctx:%p!", context); return -EINVAL; } if (sock->socket_id < 1) { LOG_ERR("Invalid socket_id(%d) for net_ctx:%p!", sock->socket_id, context); return -EINVAL; } sock->dst.sa_family = addr->sa_family; #if defined(CONFIG_NET_IPV6) if (addr->sa_family == AF_INET6) { net_ipaddr_copy(&net_sin6(&sock->dst)->sin6_addr, &net_sin6(addr)->sin6_addr); dst_port = ntohs(net_sin6(addr)->sin6_port); net_sin6(&sock->dst)->sin6_port = dst_port; } else #endif #if defined(CONFIG_NET_IPV4) if (addr->sa_family == AF_INET) { net_ipaddr_copy(&net_sin(&sock->dst)->sin_addr, &net_sin(addr)->sin_addr); dst_port = ntohs(net_sin(addr)->sin_port); net_sin(&sock->dst)->sin_port = dst_port; } else #endif { return -EINVAL; } if (dst_port < 0) { LOG_ERR("Invalid port: %d", dst_port); return -EINVAL; } hl7800_lock(); if (sock->type == SOCK_STREAM) { wakeup_hl7800(); reconfigure_IP_connection(); /* Configure/create TCP connection */ if (!sock->created) { ret = configure_TCP_socket(sock); if (ret < 0) { goto done; } } /* Connect to TCP */ ret = connect_TCP_socket(sock); if (ret < 0) { goto done; } } done: set_busy(false); allow_sleep(true); hl7800_unlock(); if (cb) { cb(context, ret, user_data); } return ret; } static int offload_accept(struct net_context *context, net_tcp_accept_cb_t cb, int32_t timeout, void *user_data) { /* NOT IMPLEMENTED */ return -ENOTSUP; } static int offload_sendto(struct net_pkt *pkt, const struct sockaddr *dst_addr, socklen_t addr_len, net_context_send_cb_t cb, int32_t timeout, void *user_data) { struct net_context *context = net_pkt_context(pkt); struct hl7800_socket *sock; int ret, dst_port = 0; if (!context) { return -EINVAL; } sock = (struct hl7800_socket *)context->offload_context; if (!sock) { LOG_ERR("Can't locate socket for net_ctx:%p!", context); return -EINVAL; } #if defined(CONFIG_NET_IPV6) if (dst_addr->sa_family == AF_INET6) { net_ipaddr_copy(&net_sin6(&sock->dst)->sin6_addr, &net_sin6(dst_addr)->sin6_addr); dst_port = ntohs(net_sin6(dst_addr)->sin6_port); net_sin6(&sock->dst)->sin6_port = dst_port; } else #endif #if defined(CONFIG_NET_IPV4) if (dst_addr->sa_family == AF_INET) { net_ipaddr_copy(&net_sin(&sock->dst)->sin_addr, &net_sin(dst_addr)->sin_addr); dst_port = ntohs(net_sin(dst_addr)->sin_port); net_sin(&sock->dst)->sin_port = dst_port; } else #endif { return -EINVAL; } hl7800_lock(); wakeup_hl7800(); reconfigure_IP_connection(); ret = send_data(sock, pkt); set_busy(false); allow_sleep(true); hl7800_unlock(); if (ret >= 0) { net_pkt_unref(pkt); } if (cb) { cb(context, ret, user_data); } return ret; } static int offload_send(struct net_pkt *pkt, net_context_send_cb_t cb, int32_t timeout, void *user_data) { struct net_context *context = net_pkt_context(pkt); socklen_t addr_len; addr_len = 0; #if defined(CONFIG_NET_IPV6) if (net_pkt_family(pkt) == AF_INET6) { addr_len = sizeof(struct sockaddr_in6); } else #endif /* CONFIG_NET_IPV6 */ #if defined(CONFIG_NET_IPV4) if (net_pkt_family(pkt) == AF_INET) { addr_len = sizeof(struct sockaddr_in); } else #endif /* CONFIG_NET_IPV4 */ { return -EPFNOSUPPORT; } return offload_sendto(pkt, &context->remote, addr_len, cb, timeout, user_data); } static int offload_recv(struct net_context *context, net_context_recv_cb_t cb, int32_t timeout, void *user_data) { struct hl7800_socket *sock; if (!context) { return -EINVAL; } sock = (struct hl7800_socket *)context->offload_context; if (!sock) { LOG_ERR("Can't locate socket for net_ctx:%p!", context); return -EINVAL; } sock->recv_cb = cb; sock->recv_user_data = user_data; return 0; } static int offload_put(struct net_context *context) { struct hl7800_socket *sock; char cmd[sizeof("AT+KTCPCLOSE=##")]; if (!context) { return -EINVAL; } sock = (struct hl7800_socket *)context->offload_context; if (!sock) { /* socket was already closed? Exit quietly here. */ return 0; } /* cancel notif work if queued */ k_work_cancel_delayable(&sock->notif_work); hl7800_lock(); /* close connection */ if (sock->type == SOCK_STREAM) { snprintk(cmd, sizeof(cmd), "AT+KTCPCLOSE=%d", sock->socket_id); } else { snprintk(cmd, sizeof(cmd), "AT+KUDPCLOSE=%d", sock->socket_id); } wakeup_hl7800(); if ((sock->type == SOCK_DGRAM) || (sock->error != -ENOTCONN)) { send_at_cmd(sock, cmd, MDM_CMD_SEND_TIMEOUT, 0, false); } if (sock->type == SOCK_STREAM) { /* delete session */ delete_socket(sock, sock->type, sock->socket_id); } set_busy(false); allow_sleep(true); socket_put(sock); net_context_unref(context); if (sock->type == SOCK_STREAM) { /* TCP contexts are referenced twice, * once for the app and once for the stack. * Since TCP stack is not used for offload, * unref a second time. */ net_context_unref(context); } hl7800_unlock(); return 0; } static struct net_offload offload_funcs = { .get = offload_get, .bind = offload_bind, .listen = offload_listen, .connect = offload_connect, .accept = offload_accept, .send = offload_send, .sendto = offload_sendto, .recv = offload_recv, .put = offload_put, }; /* Use the last 6 digits of the IMEI as the mac address */ static void hl7800_build_mac(struct hl7800_iface_ctx *ictx) { ictx->mac_addr[0] = ictx->mdm_imei[MDM_HL7800_IMEI_STRLEN - 6]; ictx->mac_addr[1] = ictx->mdm_imei[MDM_HL7800_IMEI_STRLEN - 5]; ictx->mac_addr[2] = ictx->mdm_imei[MDM_HL7800_IMEI_STRLEN - 4]; ictx->mac_addr[3] = ictx->mdm_imei[MDM_HL7800_IMEI_STRLEN - 3]; ictx->mac_addr[4] = ictx->mdm_imei[MDM_HL7800_IMEI_STRLEN - 2]; ictx->mac_addr[5] = ictx->mdm_imei[MDM_HL7800_IMEI_STRLEN - 1]; } #ifdef CONFIG_MODEM_HL7800_FW_UPDATE int32_t mdm_hl7800_update_fw(char *file_path) { int ret = 0; struct fs_dirent file_info; char cmd1[sizeof("AT+WDSD=24643584")]; /* get file info */ ret = fs_stat(file_path, &file_info); if (ret >= 0) { LOG_DBG("file '%s' size %zu", file_info.name, file_info.size); } else { LOG_ERR("Failed to get file [%s] info: %d", file_path, ret); goto err; } ret = fs_open(&iface_ctx.fw_update_file, file_path, FS_O_READ); if (ret < 0) { LOG_ERR("%s open err: %d", file_path, ret); goto err; } /* turn on device service indications */ ret = send_at_cmd(NULL, "AT+WDSI=2", MDM_CMD_SEND_TIMEOUT, 0, false); if (ret < 0) { goto err; } notify_all_tcp_sockets_closed(); hl7800_stop_rssi_work(); k_work_cancel_delayable(&iface_ctx.iface_status_work); k_work_cancel_delayable(&iface_ctx.dns_work); k_work_cancel_delayable(&iface_ctx.mdm_reset_work); k_work_cancel_delayable(&iface_ctx.allow_sleep_work); k_work_cancel_delayable(&iface_ctx.delete_untracked_socket_work); iface_ctx.dns_ready = false; if (iface_ctx.iface) { LOG_DBG("HL7800 iface DOWN"); net_if_carrier_off(iface_ctx.iface); } /* HL7800 will stay locked for the duration of the FW update */ hl7800_lock(); /* start firmware update process */ LOG_INF("Initiate FW update, total packets: %zd", ((file_info.size / XMODEM_DATA_SIZE) + 1)); set_fota_state(HL7800_FOTA_START); (void)snprintk(cmd1, sizeof(cmd1), "AT+WDSD=%zd", file_info.size); (void)send_at_cmd(NULL, cmd1, K_NO_WAIT, 0, false); err: return ret; } #endif static int hl7800_init(const struct device *dev) { int i, ret = 0; struct k_work_queue_config cfg = { .name = "hl7800_workq", }; ARG_UNUSED(dev); LOG_DBG("HL7800 Init"); /* The UART starts in the on state and CTS is set low by the HL7800 */ iface_ctx.cts_state = iface_ctx.last_cts_state = 0; /* Prevent the network interface from starting until * the modem has been initialized * because the modem may not have a valid SIM card. */ iface_ctx.iface = net_if_get_default(); if (iface_ctx.iface == NULL) { return -EIO; } net_if_carrier_off(iface_ctx.iface); /* init sockets */ for (i = 0; i < MDM_MAX_SOCKETS; i++) { iface_ctx.sockets[i].socket_id = MDM_INVALID_SOCKET_ID; k_work_init(&iface_ctx.sockets[i].recv_cb_work, sockreadrecv_cb_work); k_work_init(&iface_ctx.sockets[i].rx_data_work, sock_rx_data_cb_work); k_work_init_delayable(&iface_ctx.sockets[i].notif_work, sock_notif_cb_work); k_sem_init(&iface_ctx.sockets[i].sock_send_sem, 0, 1); } iface_ctx.last_socket_id = 0; k_sem_init(&iface_ctx.response_sem, 0, 1); k_sem_init(&iface_ctx.mdm_awake, 0, 1); /* initialize the work queue */ k_work_queue_start(&hl7800_workq, hl7800_workq_stack, K_THREAD_STACK_SIZEOF(hl7800_workq_stack), WORKQ_PRIORITY, &cfg); /* init work tasks */ k_work_init_delayable(&iface_ctx.rssi_query_work, hl7800_rssi_query_work); k_work_init_delayable(&iface_ctx.iface_status_work, iface_status_work_cb); k_work_init_delayable(&iface_ctx.dns_work, dns_work_cb); k_work_init(&iface_ctx.mdm_vgpio_work, mdm_vgpio_work_cb); k_work_init_delayable(&iface_ctx.mdm_reset_work, mdm_reset_work_callback); k_work_init_delayable(&iface_ctx.allow_sleep_work, allow_sleep_work_callback); k_work_init_delayable(&iface_ctx.delete_untracked_socket_work, delete_untracked_socket_work_cb); k_work_init(&iface_ctx.mdm_pwr_off_work, mdm_power_off_work_callback); #ifdef CONFIG_MODEM_HL7800_GPS k_work_init_delayable(&iface_ctx.gps_work, gps_work_callback); #endif #ifdef CONFIG_MODEM_HL7800_FW_UPDATE k_work_init(&iface_ctx.finish_fw_update_work, finish_fw_update_work_callback); iface_ctx.fw_updated = false; #endif /* setup port devices and pin directions */ for (i = 0; i < MAX_MDM_CONTROL_PINS; i++) { if (!gpio_is_ready_dt(&hl7800_cfg.gpio[i])) { LOG_ERR("gpio port (%s) not ready!", hl7800_cfg.gpio[i].port->name); return -ENODEV; } } ret = gpio_pin_configure_dt(&hl7800_cfg.gpio[MDM_RESET], GPIO_OUTPUT); if (ret) { LOG_ERR("Error configuring IO MDM_RESET %d err: %d!", hl7800_cfg.gpio[MDM_RESET].pin, ret); return ret; } ret = gpio_pin_configure_dt(&hl7800_cfg.gpio[MDM_WAKE], GPIO_OUTPUT); if (ret) { LOG_ERR("Error configuring IO MDM_WAKE %d err: %d!", hl7800_cfg.gpio[MDM_WAKE].pin, ret); return ret; } ret = gpio_pin_configure_dt(&hl7800_cfg.gpio[MDM_PWR_ON], GPIO_OUTPUT); if (ret) { LOG_ERR("Error configuring IO MDM_PWR_ON %d err: %d!", hl7800_cfg.gpio[MDM_PWR_ON].pin, ret); return ret; } ret = gpio_pin_configure_dt(&hl7800_cfg.gpio[MDM_FAST_SHUTD], GPIO_OUTPUT); if (ret) { LOG_ERR("Error configuring IO MDM_FAST_SHUTD %d err: %d!", hl7800_cfg.gpio[MDM_FAST_SHUTD].pin, ret); return ret; } ret = gpio_pin_configure_dt(&hl7800_cfg.gpio[MDM_VGPIO], GPIO_INPUT); if (ret) { LOG_ERR("Error configuring IO MDM_VGPIO %d err: %d!", hl7800_cfg.gpio[MDM_VGPIO].pin, ret); return ret; } ret = gpio_pin_configure_dt(&hl7800_cfg.gpio[MDM_UART_DSR], GPIO_INPUT); if (ret) { LOG_ERR("Error configuring IO MDM_UART_DSR %d err: %d!", hl7800_cfg.gpio[MDM_UART_DSR].pin, ret); return ret; } ret = gpio_pin_configure_dt(&hl7800_cfg.gpio[MDM_UART_CTS], GPIO_INPUT); if (ret) { LOG_ERR("Error configuring IO MDM_UART_CTS %d err: %d!", hl7800_cfg.gpio[MDM_UART_CTS].pin, ret); return ret; } ret = gpio_pin_configure_dt(&hl7800_cfg.gpio[MDM_GPIO6], GPIO_INPUT); if (ret) { LOG_ERR("Error configuring IO MDM_GPIO6 %d err: %d!", hl7800_cfg.gpio[MDM_GPIO6].pin, ret); return ret; } modem_assert_wake(false); modem_assert_pwr_on(false); modem_assert_fast_shutd(false); /* Allow modem to run so we are in a known state. * This allows HL7800 VGPIO to be high, which is good because the UART * IO are already configured. */ modem_run(); /* setup input pin callbacks */ /* VGPIO */ gpio_init_callback(&iface_ctx.mdm_vgpio_cb, mdm_vgpio_callback_isr, BIT(hl7800_cfg.gpio[MDM_VGPIO].pin)); ret = gpio_add_callback(hl7800_cfg.gpio[MDM_VGPIO].port, &iface_ctx.mdm_vgpio_cb); if (ret) { LOG_ERR("Cannot setup vgpio callback! (%d)", ret); return ret; } ret = gpio_pin_interrupt_configure_dt(&hl7800_cfg.gpio[MDM_VGPIO], GPIO_INT_EDGE_BOTH); if (ret) { LOG_ERR("Error config vgpio interrupt! (%d)", ret); return ret; } /* UART DSR */ gpio_init_callback(&iface_ctx.mdm_uart_dsr_cb, mdm_uart_dsr_callback_isr, BIT(hl7800_cfg.gpio[MDM_UART_DSR].pin)); ret = gpio_add_callback(hl7800_cfg.gpio[MDM_UART_DSR].port, &iface_ctx.mdm_uart_dsr_cb); if (ret) { LOG_ERR("Cannot setup uart dsr callback! (%d)", ret); return ret; } ret = gpio_pin_interrupt_configure_dt(&hl7800_cfg.gpio[MDM_UART_DSR], GPIO_INT_EDGE_BOTH); if (ret) { LOG_ERR("Error config uart dsr interrupt! (%d)", ret); return ret; } /* GPIO6 */ gpio_init_callback(&iface_ctx.mdm_gpio6_cb, mdm_gpio6_callback_isr, BIT(hl7800_cfg.gpio[MDM_GPIO6].pin)); ret = gpio_add_callback(hl7800_cfg.gpio[MDM_GPIO6].port, &iface_ctx.mdm_gpio6_cb); if (ret) { LOG_ERR("Cannot setup gpio6 callback! (%d)", ret); return ret; } ret = gpio_pin_interrupt_configure_dt(&hl7800_cfg.gpio[MDM_GPIO6], GPIO_INT_EDGE_BOTH); if (ret) { LOG_ERR("Error config gpio6 interrupt! (%d)", ret); return ret; } /* UART CTS */ gpio_init_callback(&iface_ctx.mdm_uart_cts_cb, mdm_uart_cts_callback_isr, BIT(hl7800_cfg.gpio[MDM_UART_CTS].pin)); ret = gpio_add_callback(hl7800_cfg.gpio[MDM_UART_CTS].port, &iface_ctx.mdm_uart_cts_cb); if (ret) { LOG_ERR("Cannot setup uart cts callback! (%d)", ret); return ret; } ret = gpio_pin_interrupt_configure_dt(&hl7800_cfg.gpio[MDM_UART_CTS], GPIO_INT_EDGE_BOTH); if (ret) { LOG_ERR("Error config uart cts interrupt! (%d)", ret); return ret; } /* Set modem data storage */ iface_ctx.mdm_ctx.data_manufacturer = iface_ctx.mdm_manufacturer; iface_ctx.mdm_ctx.data_model = iface_ctx.mdm_model; iface_ctx.mdm_ctx.data_revision = iface_ctx.mdm_revision; #ifdef CONFIG_MODEM_SIM_NUMBERS iface_ctx.mdm_ctx.data_imei = iface_ctx.mdm_imei; #endif iface_ctx.mdm_ctx.data_rssi = &iface_ctx.mdm_rssi; ret = mdm_receiver_register(&iface_ctx.mdm_ctx, MDM_UART_DEV, mdm_recv_buf, sizeof(mdm_recv_buf)); if (ret < 0) { LOG_ERR("Error registering modem receiver (%d)!", ret); return ret; } k_queue_init(&iface_ctx.stale_socket_queue); /* start RX thread */ k_thread_name_set( k_thread_create(&hl7800_rx_thread, hl7800_rx_stack, K_THREAD_STACK_SIZEOF(hl7800_rx_stack), hl7800_rx, NULL, NULL, NULL, RX_THREAD_PRIORITY, 0, K_NO_WAIT), "hl7800 rx"); #ifdef CONFIG_MODEM_HL7800_BOOT_DELAY modem_reset(); #else ret = modem_reset_and_configure(); #endif return ret; } static void offload_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct hl7800_iface_ctx *ctx = dev->data; iface->if_dev->offload = &offload_funcs; ctx->iface = iface; if (!IS_ENABLED(CONFIG_MODEM_HL7800_BOOT_DELAY)) { hl7800_build_mac(&iface_ctx); net_if_set_link_addr(iface, iface_ctx.mac_addr, sizeof(iface_ctx.mac_addr), NET_LINK_ETHERNET); iface_ctx.initialized = true; } } static struct offloaded_if_api api_funcs = { .iface_api.init = offload_iface_init, }; NET_DEVICE_DT_INST_OFFLOAD_DEFINE(0, hl7800_init, NULL, &iface_ctx, &hl7800_cfg, CONFIG_MODEM_HL7800_INIT_PRIORITY, &api_funcs, MDM_MTU); ```
/content/code_sandbox/drivers/modem/hl7800.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
51,085
```c /* * */ #include <zephyr/shell/shell.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <zephyr/device.h> #include <zephyr/drivers/sensor.h> /** * @brief Collect the values for several channels * * @param dev Sensor device to read from * @param ... any number of pairs of arguments: * first is the sensor channel to read (-1 to terminate the list) * second is a pointer to the struct sensor_value to put it in * @return 0 on success * @return negative error code from sensor API on failure */ static int get_channels(const struct device *dev, ...) { va_list ptr; int i; va_start(ptr, dev); for (i = 0;; i++) { int chan; struct sensor_value *val; int err; chan = va_arg(ptr, int); if (chan == -1) { break; } val = va_arg(ptr, struct sensor_value *); err = sensor_channel_get(dev, chan, val); if (err < 0) { va_end(ptr); return err; } } va_end(ptr); return 0; } /* battery */ static int cmd_battery(const struct shell *sh, size_t argc, char **argv) { struct sensor_value temp, volt, current, i_desired, charge_remain; struct sensor_value charge, v_desired, v_design, cap, nom_cap; struct sensor_value full, empty; const struct device *const dev = DEVICE_DT_GET(DT_ALIAS(battery)); bool allowed; int err; if (!device_is_ready(dev)) { shell_error(sh, "Device not ready (%s)", argv[1]); return -ENODEV; } err = sensor_sample_fetch(dev); if (err < 0) { shell_error(sh, "Failed to read sensor: %d", err); } err = get_channels(dev, SENSOR_CHAN_GAUGE_TEMP, &temp, SENSOR_CHAN_GAUGE_VOLTAGE, &volt, SENSOR_CHAN_GAUGE_AVG_CURRENT, &current, SENSOR_CHAN_GAUGE_DESIRED_VOLTAGE, &v_desired, SENSOR_CHAN_GAUGE_DESIRED_CHARGING_CURRENT, &i_desired, SENSOR_CHAN_GAUGE_STATE_OF_CHARGE, &charge, SENSOR_CHAN_GAUGE_DESIGN_VOLTAGE, &v_design, SENSOR_CHAN_GAUGE_REMAINING_CHARGE_CAPACITY, &charge_remain, SENSOR_CHAN_GAUGE_FULL_CHARGE_CAPACITY, &cap, SENSOR_CHAN_GAUGE_NOM_AVAIL_CAPACITY, &nom_cap, SENSOR_CHAN_GAUGE_TIME_TO_FULL, &full, SENSOR_CHAN_GAUGE_TIME_TO_EMPTY, &empty, -1); if (err < 0) { return err; } shell_fprintf(sh, SHELL_NORMAL, "Temp: %.1d.%02d C\n", temp.val1, temp.val2 / 10000); shell_fprintf(sh, SHELL_NORMAL, "V: %5d.%02d V\n", volt.val1, volt.val2 / 10000); shell_fprintf(sh, SHELL_NORMAL, "V-desired: %d.%02d V\n", v_desired.val1, v_desired.val2 / 10000); shell_fprintf(sh, SHELL_NORMAL, "I: %d mA", current.val1); if (current.val1 > 0) { shell_fprintf(sh, SHELL_NORMAL, " (CHG)"); } else if (current.val1 < 0) { shell_fprintf(sh, SHELL_NORMAL, " (DISCHG)"); } shell_fprintf(sh, SHELL_NORMAL, "\n"); shell_fprintf(sh, SHELL_NORMAL, "I-desired: %5d mA\n", i_desired.val1); allowed = i_desired.val1 && v_desired.val2 && charge.val1 < 100; shell_fprintf(sh, SHELL_NORMAL, "Charging: %sAllowed\n", allowed ? "" : "Not "); shell_fprintf(sh, SHELL_NORMAL, "Charge: %d %%\n", charge.val1); shell_fprintf(sh, SHELL_NORMAL, "V-design: %d.%02d V\n", v_design.val1, v_design.val2 / 10000); shell_fprintf(sh, SHELL_NORMAL, "Remaining: %d mA\n", charge_remain.val1); shell_fprintf(sh, SHELL_NORMAL, "Cap-full: %d mA\n", cap.val1); shell_fprintf(sh, SHELL_NORMAL, "Design: %d mA\n", nom_cap.val1); shell_fprintf(sh, SHELL_NORMAL, "Time full: %dh:%02d\n", full.val1 / 60, full.val1 % 60); shell_fprintf(sh, SHELL_NORMAL, "Time empty: %dh:%02d\n", empty.val1 / 60, empty.val1 % 60); return 0; } SHELL_CMD_REGISTER(battery, NULL, "Battery status", cmd_battery); ```
/content/code_sandbox/drivers/sensor/shell_battery.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,118