text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```c /* * */ #define DT_DRV_COMPAT espressif_esp32_flash_controller #define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash) #define FLASH_WRITE_BLK_SZ DT_PROP(SOC_NV_FLASH_NODE, write_block_size) #define FLASH_ERASE_BLK_SZ DT_PROP(SOC_NV_FLASH_NODE, erase_block_size) /* * HAL includes go first to * avoid BIT macro redefinition */ #include <esp_flash.h> #include <spi_flash_mmap.h> #include <soc/spi_struct.h> #include <esp_flash_encrypt.h> #include <esp_flash_internal.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <stddef.h> #include <string.h> #include <errno.h> #include <zephyr/drivers/flash.h> #include <soc.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(flash_esp32, CONFIG_FLASH_LOG_LEVEL); #define FLASH_SEM_TIMEOUT (k_is_in_isr() ? K_NO_WAIT : K_FOREVER) struct flash_esp32_dev_config { spi_dev_t *controller; }; struct flash_esp32_dev_data { #ifdef CONFIG_MULTITHREADING struct k_sem sem; #endif }; static const struct flash_parameters flash_esp32_parameters = { .write_block_size = FLASH_WRITE_BLK_SZ, .erase_value = 0xff, }; #ifdef CONFIG_MULTITHREADING static inline void flash_esp32_sem_take(const struct device *dev) { struct flash_esp32_dev_data *data = dev->data; k_sem_take(&data->sem, FLASH_SEM_TIMEOUT); } static inline void flash_esp32_sem_give(const struct device *dev) { struct flash_esp32_dev_data *data = dev->data; k_sem_give(&data->sem); } #else #define flash_esp32_sem_take(dev) do {} while (0) #define flash_esp32_sem_give(dev) do {} while (0) #endif /* CONFIG_MULTITHREADING */ static int flash_esp32_read(const struct device *dev, off_t address, void *buffer, size_t length) { int ret = 0; flash_esp32_sem_take(dev); if (!esp_flash_encryption_enabled()) { ret = esp_flash_read(NULL, buffer, address, length); } else { ret = esp_flash_read_encrypted(NULL, address, buffer, length); } flash_esp32_sem_give(dev); if (ret != 0) { LOG_ERR("esp_flash_read failed %d", ret); return -EIO; } return 0; } static int flash_esp32_write(const struct device *dev, off_t address, const void *buffer, size_t length) { int ret = 0; flash_esp32_sem_take(dev); if (!esp_flash_encryption_enabled()) { ret = esp_flash_write(NULL, buffer, address, length); } else { ret = esp_flash_write_encrypted(NULL, address, buffer, length); } flash_esp32_sem_give(dev); if (ret != 0) { LOG_ERR("esp_flash_write failed %d", ret); return -EIO; } return 0; } static int flash_esp32_erase(const struct device *dev, off_t start, size_t len) { flash_esp32_sem_take(dev); int ret = esp_flash_erase_region(NULL, start, len); flash_esp32_sem_give(dev); if (ret != 0) { LOG_ERR("esp_flash_erase_region failed %d", ret); return -EIO; } return 0; } #if CONFIG_FLASH_PAGE_LAYOUT static const struct flash_pages_layout flash_esp32_pages_layout = { .pages_count = DT_REG_SIZE(SOC_NV_FLASH_NODE) / FLASH_ERASE_BLK_SZ, .pages_size = DT_PROP(SOC_NV_FLASH_NODE, erase_block_size), }; void flash_esp32_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { *layout = &flash_esp32_pages_layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static const struct flash_parameters * flash_esp32_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_esp32_parameters; } static int flash_esp32_init(const struct device *dev) { struct flash_esp32_dev_data *const dev_data = dev->data; uint32_t ret = 0; #ifdef CONFIG_MULTITHREADING k_sem_init(&dev_data->sem, 1, 1); #endif /* CONFIG_MULTITHREADING */ ret = esp_flash_init_default_chip(); if (ret != 0) { LOG_ERR("esp_flash_init_default_chip failed %d", ret); return 0; } return 0; } static const struct flash_driver_api flash_esp32_driver_api = { .read = flash_esp32_read, .write = flash_esp32_write, .erase = flash_esp32_erase, .get_parameters = flash_esp32_get_parameters, #ifdef CONFIG_FLASH_PAGE_LAYOUT .page_layout = flash_esp32_page_layout, #endif }; static struct flash_esp32_dev_data flash_esp32_data; static const struct flash_esp32_dev_config flash_esp32_config = { .controller = (spi_dev_t *) DT_INST_REG_ADDR(0), }; DEVICE_DT_INST_DEFINE(0, flash_esp32_init, NULL, &flash_esp32_data, &flash_esp32_config, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_esp32_driver_api); ```
/content/code_sandbox/drivers/flash/flash_esp32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,178
```unknown menu "MSPI flash device driver" config FLASH_MSPI bool select FLASH_HAS_DRIVER_ENABLED select MSPI help MSPI flash drivers are enabled. config FLASH_MSPI_EMUL_DEVICE bool "MSPI flash device emulator" default y depends on DT_HAS_ZEPHYR_MSPI_EMUL_FLASH_ENABLED select FLASH_MSPI select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_EXPLICIT_ERASE config FLASH_MSPI_ATXP032 bool "MSPI ATXP032 driver" default y depends on DT_HAS_MSPI_ATXP032_ENABLED select FLASH_MSPI select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_EXPLICIT_ERASE select FLASH_JESD216 select MSPI_AMBIQ_AP3 if SOC_SERIES_APOLLO3X endmenu ```
/content/code_sandbox/drivers/flash/Kconfig.mspi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
170
```unknown # FLASH configuration options config SOC_FLASH_ITE_IT8XXX2 bool "ITE IT8XXX2 flash driver" default y depends on DT_HAS_ITE_IT8XXX2_FLASH_CONTROLLER_ENABLED select SOC_IT8XXX2_USE_ILM select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_EXPLICIT_ERASE select HAS_FLASH_LOAD_OFFSET help The flash driver includes support for read, write and erase flash operations. It also supports protection. The it8xxx2 flash size is 1M byte. ```
/content/code_sandbox/drivers/flash/Kconfig.it8xxx2
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
118
```objective-c /* * */ #ifndef DRIVERS_FLASH_FLASH_SIMULATOR_NATIVE_H #define DRIVERS_FLASH_FLASH_SIMULATOR_NATIVE_H #ifdef __cplusplus extern "C" { #endif int flash_mock_init_native(bool flash_in_ram, uint8_t **mock_flash, unsigned int size, int *flash_fd, const char *flash_file_path, unsigned int erase_value, bool flash_erase_at_start); void flash_mock_cleanup_native(bool flash_in_ram, int flash_fd, uint8_t *mock_flash, unsigned int size, const char *flash_file_path, bool flash_rm_at_exit); #ifdef __cplusplus } #endif #endif /* DRIVERS_FLASH_FLASH_SIMULATOR_NATIVE_H */ ```
/content/code_sandbox/drivers/flash/flash_simulator_native.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
139
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_fiu_qspi #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/flash/npcx_flash_api_ex.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/spi.h> #include <zephyr/dt-bindings/flash_controller/npcx_fiu_qspi.h> #include <soc.h> #include "flash_npcx_fiu_qspi.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(npcx_fiu_qspi, LOG_LEVEL_ERR); /* Driver convenience defines */ #define HAL_INSTANCE(dev) \ ((struct fiu_reg *)((const struct npcx_qspi_fiu_config *)(dev)->config)->base) /* Device config */ struct npcx_qspi_fiu_config { /* Flash interface unit base address */ uintptr_t base; /* Clock configuration */ struct npcx_clk_cfg clk_cfg; /* Enable 2 external SPI devices for direct read on QSPI bus */ bool en_direct_access_2dev; }; /* Device data */ struct npcx_qspi_fiu_data { /* mutex of qspi bus controller */ struct k_sem lock_sem; /* Current device configuration on QSPI bus */ const struct npcx_qspi_cfg *cur_cfg; /* Current Software controlled Chip-Select number */ int sw_cs; /* Current QSPI bus operation */ uint32_t operation; }; /* NPCX SPI User Mode Access (UMA) functions */ static inline void qspi_npcx_uma_cs_level(const struct device *dev, uint8_t sw_cs, bool level) { struct fiu_reg *const inst = HAL_INSTANCE(dev); /* Set chip select to high/low level */ if (level) { inst->UMA_ECTS |= BIT(sw_cs); } else { inst->UMA_ECTS &= ~BIT(sw_cs); } } static inline void qspi_npcx_uma_write_byte(const struct device *dev, uint8_t data) { struct fiu_reg *const inst = HAL_INSTANCE(dev); /* Set data to UMA_CODE and trigger UMA */ inst->UMA_CODE = data; inst->UMA_CTS = UMA_CODE_CMD_WR_ONLY; /* EXEC_DONE will be zero automatically if a UMA transaction is completed. */ while (IS_BIT_SET(inst->UMA_CTS, NPCX_UMA_CTS_EXEC_DONE)) { continue; } } static inline void qspi_npcx_uma_read_byte(const struct device *dev, uint8_t *data) { struct fiu_reg *const inst = HAL_INSTANCE(dev); /* Trigger UMA and Get data from DB0 later */ inst->UMA_CTS = UMA_CODE_RD_BYTE(1); while (IS_BIT_SET(inst->UMA_CTS, NPCX_UMA_CTS_EXEC_DONE)) { continue; } *data = inst->UMA_DB0; } /* NPCX SPI Direct Read Access (DRA)/User Mode Access (UMA) configuration functions */ static inline void qspi_npcx_config_uma_mode(const struct device *dev, const struct npcx_qspi_cfg *qspi_cfg) { struct fiu_reg *const inst = HAL_INSTANCE(dev); if ((qspi_cfg->flags & NPCX_QSPI_SEC_FLASH_SL) != 0) { inst->UMA_ECTS |= BIT(NPCX_UMA_ECTS_SEC_CS); } else { inst->UMA_ECTS &= ~BIT(NPCX_UMA_ECTS_SEC_CS); } } static inline void qspi_npcx_config_dra_4byte_mode(const struct device *dev, const struct npcx_qspi_cfg *qspi_cfg) { #if defined(CONFIG_FLASH_NPCX_FIU_SUPP_DRA_4B_ADDR) struct fiu_reg *const inst = HAL_INSTANCE(dev); #if defined(CONFIG_FLASH_NPCX_FIU_DRA_V1) if (qspi_cfg->enter_4ba != 0) { if ((qspi_cfg->flags & NPCX_QSPI_SEC_FLASH_SL) != 0) { inst->SPI1_DEV |= BIT(NPCX_SPI1_DEV_FOUR_BADDR_CS11); } else { inst->SPI1_DEV |= BIT(NPCX_SPI1_DEV_FOUR_BADDR_CS10); } } else { inst->SPI1_DEV &= ~(BIT(NPCX_SPI1_DEV_FOUR_BADDR_CS11) | BIT(NPCX_SPI1_DEV_FOUR_BADDR_CS10)); } #elif defined(CONFIG_FLASH_NPCX_FIU_DRA_V2) if (qspi_cfg->enter_4ba != 0) { SET_FIELD(inst->SPI_DEV, NPCX_SPI_DEV_NADDRB, NPCX_DEV_NUM_ADDR_4BYTE); } #endif #endif /* CONFIG_FLASH_NPCX_FIU_SUPP_DRA_4B_ADDR */ } static inline void qspi_npcx_config_dra_mode(const struct device *dev, const struct npcx_qspi_cfg *qspi_cfg) { struct fiu_reg *const inst = HAL_INSTANCE(dev); /* Select SPI device number for DRA mode in npcx4 series */ if (IS_ENABLED(CONFIG_FLASH_NPCX_FIU_DRA_V2)) { int spi_dev_num = (qspi_cfg->flags & NPCX_QSPI_SEC_FLASH_SL) != 0 ? 1 : 0; SET_FIELD(inst->BURST_CFG, NPCX_BURST_CFG_SPI_DEV_SEL, spi_dev_num); } /* Enable quad mode of Direct Read Mode if needed */ if (qspi_cfg->qer_type != JESD216_DW15_QER_NONE) { inst->RESP_CFG |= BIT(NPCX_RESP_CFG_QUAD_EN); } else { inst->RESP_CFG &= ~BIT(NPCX_RESP_CFG_QUAD_EN); } /* Selects the SPI read access type of Direct Read Access mode */ SET_FIELD(inst->SPI_FL_CFG, NPCX_SPI_FL_CFG_RD_MODE, qspi_cfg->rd_mode); /* Enable/Disable 4 byte address mode for Direct Read Access (DRA) */ qspi_npcx_config_dra_4byte_mode(dev, qspi_cfg); } static inline void qspi_npcx_fiu_set_operation(const struct device *dev, uint32_t operation) { if ((operation & NPCX_EX_OP_INT_FLASH_WP) != 0) { npcx_pinctrl_flash_write_protect_set(); } } /* NPCX specific QSPI-FIU controller functions */ int qspi_npcx_fiu_uma_transceive(const struct device *dev, struct npcx_uma_cfg *cfg, uint32_t flags) { struct npcx_qspi_fiu_data *const data = dev->data; /* UMA transaction is permitted? */ if ((data->operation & NPCX_EX_OP_LOCK_UMA) != 0) { return -EPERM; } /* Assert chip select */ qspi_npcx_uma_cs_level(dev, data->sw_cs, false); /* Transmit op-code first */ qspi_npcx_uma_write_byte(dev, cfg->opcode); if ((flags & NPCX_UMA_ACCESS_ADDR) != 0) { /* 3-byte or 4-byte address? */ const int addr_start = (data->cur_cfg->enter_4ba != 0) ? 0 : 1; for (size_t i = addr_start; i < 4; i++) { LOG_DBG("addr %d, %02x", i, cfg->addr.u8[i]); qspi_npcx_uma_write_byte(dev, cfg->addr.u8[i]); } } if ((flags & NPCX_UMA_ACCESS_WRITE) != 0) { if (cfg->tx_buf == NULL) { return -EINVAL; } for (size_t i = 0; i < cfg->tx_count; i++) { qspi_npcx_uma_write_byte(dev, cfg->tx_buf[i]); } } if ((flags & NPCX_UMA_ACCESS_READ) != 0) { if (cfg->rx_buf == NULL) { return -EINVAL; } for (size_t i = 0; i < cfg->rx_count; i++) { qspi_npcx_uma_read_byte(dev, cfg->rx_buf + i); } } /* De-assert chip select */ qspi_npcx_uma_cs_level(dev, data->sw_cs, true); return 0; } void qspi_npcx_fiu_mutex_lock_configure(const struct device *dev, const struct npcx_qspi_cfg *cfg, const uint32_t operation) { struct npcx_qspi_fiu_data *const data = dev->data; k_sem_take(&data->lock_sem, K_FOREVER); /* If the current device is different from previous one, configure it */ if (data->cur_cfg != cfg) { data->cur_cfg = cfg; /* Apply pin-muxing and tri-state */ pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); /* Configure User Mode Access (UMA) settings */ qspi_npcx_config_uma_mode(dev, cfg); /* Configure for Direct Read Access (DRA) settings */ qspi_npcx_config_dra_mode(dev, cfg); /* Save SW CS bit used in UMA mode */ data->sw_cs = find_lsb_set(cfg->flags & NPCX_QSPI_SW_CS_MASK) - 1; } /* Set QSPI bus operation */ if (data->operation != operation) { qspi_npcx_fiu_set_operation(dev, operation); data->operation = operation; } } void qspi_npcx_fiu_mutex_unlock(const struct device *dev) { struct npcx_qspi_fiu_data *const data = dev->data; k_sem_give(&data->lock_sem); } static int qspi_npcx_fiu_init(const struct device *dev) { const struct npcx_qspi_fiu_config *const config = dev->config; struct npcx_qspi_fiu_data *const data = dev->data; const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE); int ret; if (!device_is_ready(clk_dev)) { LOG_ERR("%s device not ready", clk_dev->name); return -ENODEV; } /* Turn on device clock first and get source clock freq. */ ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clk_cfg); if (ret < 0) { LOG_ERR("Turn on FIU clock fail %d", ret); return ret; } /* initialize mutex for qspi controller */ k_sem_init(&data->lock_sem, 1, 1); /* Enable direct access for 2 external SPI devices */ if (config->en_direct_access_2dev) { #if defined(CONFIG_FLASH_NPCX_FIU_SUPP_DRA_2_DEV) struct fiu_reg *const inst = HAL_INSTANCE(dev); inst->FIU_EXT_CFG |= BIT(NPCX_FIU_EXT_CFG_SPI1_2DEV); #endif } return 0; } #define NPCX_SPI_FIU_INIT(n) \ static const struct npcx_qspi_fiu_config npcx_qspi_fiu_config_##n = { \ .base = DT_INST_REG_ADDR(n), \ .clk_cfg = NPCX_DT_CLK_CFG_ITEM(n), \ .en_direct_access_2dev = DT_INST_PROP(n, en_direct_access_2dev), \ }; \ static struct npcx_qspi_fiu_data npcx_qspi_fiu_data_##n; \ DEVICE_DT_INST_DEFINE(n, qspi_npcx_fiu_init, NULL, \ &npcx_qspi_fiu_data_##n, &npcx_qspi_fiu_config_##n, \ PRE_KERNEL_1, CONFIG_FLASH_INIT_PRIORITY, NULL); DT_INST_FOREACH_STATUS_OKAY(NPCX_SPI_FIU_INIT) ```
/content/code_sandbox/drivers/flash/flash_npcx_fiu_qspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,579
```c /* * */ #define DT_DRV_COMPAT st_stm32_qspi_nor #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/toolchain.h> #include <zephyr/arch/common/ffs.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/util.h> #include <soc.h> #include <string.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/flash.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/dma/dma_stm32.h> #include <zephyr/drivers/gpio.h> #if DT_INST_NODE_HAS_PROP(0, spi_bus_width) && \ DT_INST_PROP(0, spi_bus_width) == 4 #define STM32_QSPI_USE_QUAD_IO 1 #else #define STM32_QSPI_USE_QUAD_IO 0 #endif /* Get the base address of the flash from the DTS node */ #define STM32_QSPI_BASE_ADDRESS DT_INST_REG_ADDR(0) #define STM32_QSPI_RESET_GPIO DT_INST_NODE_HAS_PROP(0, reset_gpios) #define STM32_QSPI_RESET_CMD DT_INST_PROP(0, reset_cmd) #include <stm32_ll_dma.h> #include "spi_nor.h" #include "jesd216.h" #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(flash_stm32_qspi, CONFIG_FLASH_LOG_LEVEL); #define STM32_QSPI_FIFO_THRESHOLD 8 #define STM32_QSPI_CLOCK_PRESCALER_MAX 255 #define STM32_QSPI_UNKNOWN_MODE (0xFF) #define STM32_QSPI_USE_DMA DT_NODE_HAS_PROP(DT_INST_PARENT(0), dmas) #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_qspi_nor) /* In dual-flash mode, total size is twice the size of one flash component */ #define STM32_QSPI_DOUBLE_FLASH DT_PROP(DT_NODELABEL(quadspi), dual_flash) #if STM32_QSPI_USE_DMA static const uint32_t table_m_size[] = { LL_DMA_MDATAALIGN_BYTE, LL_DMA_MDATAALIGN_HALFWORD, LL_DMA_MDATAALIGN_WORD, }; static const uint32_t table_p_size[] = { LL_DMA_PDATAALIGN_BYTE, LL_DMA_PDATAALIGN_HALFWORD, LL_DMA_PDATAALIGN_WORD, }; /* Lookup table to set dma priority from the DTS */ static const uint32_t table_priority[] = { DMA_PRIORITY_LOW, DMA_PRIORITY_MEDIUM, DMA_PRIORITY_HIGH, DMA_PRIORITY_VERY_HIGH, }; #endif /* STM32_QSPI_USE_DMA */ typedef void (*irq_config_func_t)(const struct device *dev); struct stream { DMA_TypeDef *reg; const struct device *dev; uint32_t channel; struct dma_config cfg; }; struct flash_stm32_qspi_config { QUADSPI_TypeDef *regs; struct stm32_pclken pclken; irq_config_func_t irq_config; size_t flash_size; uint32_t max_frequency; const struct pinctrl_dev_config *pcfg; #if STM32_QSPI_RESET_GPIO const struct gpio_dt_spec reset; #endif #if DT_NODE_HAS_PROP(DT_INST(0, st_stm32_qspi_nor), jedec_id) uint8_t jedec_id[DT_INST_PROP_LEN(0, jedec_id)]; #endif /* jedec_id */ }; struct flash_stm32_qspi_data { QSPI_HandleTypeDef hqspi; struct k_sem sem; struct k_sem sync; #if defined(CONFIG_FLASH_PAGE_LAYOUT) struct flash_pages_layout layout; #endif struct jesd216_erase_type erase_types[JESD216_NUM_ERASE_TYPES]; /* Number of bytes per page */ uint16_t page_size; enum jesd216_dw15_qer_type qer_type; enum jesd216_mode_type mode; int cmd_status; struct stream dma; uint8_t qspi_write_cmd; uint8_t qspi_read_cmd; uint8_t qspi_read_cmd_latency; /* * If set addressed operations should use 32-bit rather than * 24-bit addresses. */ bool flag_access_32bit: 1; }; static const QSPI_CommandTypeDef cmd_write_en = { .Instruction = SPI_NOR_CMD_WREN, .InstructionMode = QSPI_INSTRUCTION_1_LINE }; static inline void qspi_lock_thread(const struct device *dev) { struct flash_stm32_qspi_data *dev_data = dev->data; k_sem_take(&dev_data->sem, K_FOREVER); } static inline void qspi_unlock_thread(const struct device *dev) { struct flash_stm32_qspi_data *dev_data = dev->data; k_sem_give(&dev_data->sem); } static inline void qspi_set_address_size(const struct device *dev, QSPI_CommandTypeDef *cmd) { struct flash_stm32_qspi_data *dev_data = dev->data; if (dev_data->flag_access_32bit) { cmd->AddressSize = QSPI_ADDRESS_32_BITS; return; } cmd->AddressSize = QSPI_ADDRESS_24_BITS; } static inline int qspi_prepare_quad_read(const struct device *dev, QSPI_CommandTypeDef *cmd) { struct flash_stm32_qspi_data *dev_data = dev->data; __ASSERT_NO_MSG(dev_data->mode == JESD216_MODE_114 || dev_data->mode == JESD216_MODE_144); cmd->Instruction = dev_data->qspi_read_cmd; cmd->AddressMode = ((dev_data->mode == JESD216_MODE_114) ? QSPI_ADDRESS_1_LINE : QSPI_ADDRESS_4_LINES); cmd->DataMode = QSPI_DATA_4_LINES; cmd->DummyCycles = dev_data->qspi_read_cmd_latency; return 0; } static inline int qspi_prepare_quad_program(const struct device *dev, QSPI_CommandTypeDef *cmd) { struct flash_stm32_qspi_data *dev_data = dev->data; __ASSERT_NO_MSG(dev_data->qspi_write_cmd == SPI_NOR_CMD_PP_1_1_4 || dev_data->qspi_write_cmd == SPI_NOR_CMD_PP_1_4_4); cmd->Instruction = dev_data->qspi_write_cmd; cmd->AddressMode = ((cmd->Instruction == SPI_NOR_CMD_PP_1_1_4) ? QSPI_ADDRESS_1_LINE : QSPI_ADDRESS_4_LINES); cmd->DataMode = QSPI_DATA_4_LINES; cmd->DummyCycles = 0; return 0; } /* * Send a command over QSPI bus. */ static int qspi_send_cmd(const struct device *dev, const QSPI_CommandTypeDef *cmd) { const struct flash_stm32_qspi_config *dev_cfg = dev->config; struct flash_stm32_qspi_data *dev_data = dev->data; HAL_StatusTypeDef hal_ret; ARG_UNUSED(dev_cfg); LOG_DBG("Instruction 0x%x", cmd->Instruction); dev_data->cmd_status = 0; hal_ret = HAL_QSPI_Command_IT(&dev_data->hqspi, (QSPI_CommandTypeDef *)cmd); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to send QSPI instruction", hal_ret); return -EIO; } LOG_DBG("CCR 0x%x", dev_cfg->regs->CCR); k_sem_take(&dev_data->sync, K_FOREVER); return dev_data->cmd_status; } /* * Perform a read access over QSPI bus. */ static int qspi_read_access(const struct device *dev, QSPI_CommandTypeDef *cmd, uint8_t *data, size_t size) { const struct flash_stm32_qspi_config *dev_cfg = dev->config; struct flash_stm32_qspi_data *dev_data = dev->data; HAL_StatusTypeDef hal_ret; ARG_UNUSED(dev_cfg); cmd->NbData = size; dev_data->cmd_status = 0; hal_ret = HAL_QSPI_Command_IT(&dev_data->hqspi, cmd); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to send QSPI instruction", hal_ret); return -EIO; } #if STM32_QSPI_USE_DMA hal_ret = HAL_QSPI_Receive_DMA(&dev_data->hqspi, data); #else hal_ret = HAL_QSPI_Receive_IT(&dev_data->hqspi, data); #endif if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to read data", hal_ret); return -EIO; } k_sem_take(&dev_data->sync, K_FOREVER); return dev_data->cmd_status; } /* * Perform a write access over QSPI bus. */ static int qspi_write_access(const struct device *dev, QSPI_CommandTypeDef *cmd, const uint8_t *data, size_t size) { const struct flash_stm32_qspi_config *dev_cfg = dev->config; struct flash_stm32_qspi_data *dev_data = dev->data; HAL_StatusTypeDef hal_ret; ARG_UNUSED(dev_cfg); LOG_DBG("Instruction 0x%x", cmd->Instruction); cmd->NbData = size; dev_data->cmd_status = 0; hal_ret = HAL_QSPI_Command_IT(&dev_data->hqspi, cmd); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to send QSPI instruction", hal_ret); return -EIO; } #if STM32_QSPI_USE_DMA hal_ret = HAL_QSPI_Transmit_DMA(&dev_data->hqspi, (uint8_t *)data); #else hal_ret = HAL_QSPI_Transmit_IT(&dev_data->hqspi, (uint8_t *)data); #endif if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to read data", hal_ret); return -EIO; } LOG_DBG("CCR 0x%x", dev_cfg->regs->CCR); k_sem_take(&dev_data->sync, K_FOREVER); return dev_data->cmd_status; } #if defined(CONFIG_FLASH_JESD216_API) /* * Read Serial Flash ID : * perform a read access over SPI bus for read Identification (DataMode is already set) * and compare to the jedec-id from the DTYS table exists */ static int qspi_read_jedec_id(const struct device *dev, uint8_t *id) { struct flash_stm32_qspi_data *dev_data = dev->data; uint8_t data[JESD216_READ_ID_LEN]; QSPI_CommandTypeDef cmd = { .Instruction = JESD216_CMD_READ_ID, .AddressSize = QSPI_ADDRESS_NONE, .DummyCycles = 8, .InstructionMode = QSPI_INSTRUCTION_1_LINE, .AddressMode = QSPI_ADDRESS_1_LINE, .DataMode = QSPI_DATA_1_LINE, .NbData = JESD216_READ_ID_LEN, }; HAL_StatusTypeDef hal_ret; hal_ret = HAL_QSPI_Command_IT(&dev_data->hqspi, &cmd); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to send OSPI instruction", hal_ret); return -EIO; } hal_ret = HAL_QSPI_Receive(&dev_data->hqspi, data, HAL_QSPI_TIMEOUT_DEFAULT_VALUE); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to read data", hal_ret); return -EIO; } LOG_DBG("Read JESD216-ID"); dev_data->cmd_status = 0; memcpy(id, data, JESD216_READ_ID_LEN); return 0; } #endif /* CONFIG_FLASH_JESD216_API */ /* * Read Serial Flash Discovery Parameter */ static int qspi_read_sfdp(const struct device *dev, off_t addr, void *data, size_t size) { struct flash_stm32_qspi_data *dev_data = dev->data; HAL_StatusTypeDef hal_ret; __ASSERT(data != NULL, "null destination"); LOG_INF("Reading SFDP"); QSPI_CommandTypeDef cmd = { .Instruction = JESD216_CMD_READ_SFDP, .Address = addr, .AddressSize = QSPI_ADDRESS_24_BITS, .DummyCycles = 8, .InstructionMode = QSPI_INSTRUCTION_1_LINE, .AddressMode = QSPI_ADDRESS_1_LINE, .DataMode = QSPI_DATA_1_LINE, .NbData = size, }; hal_ret = HAL_QSPI_Command(&dev_data->hqspi, &cmd, HAL_QSPI_TIMEOUT_DEFAULT_VALUE); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to send SFDP instruction", hal_ret); return -EIO; } hal_ret = HAL_QSPI_Receive(&dev_data->hqspi, (uint8_t *)data, HAL_QSPI_TIMEOUT_DEFAULT_VALUE); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to read SFDP", hal_ret); return -EIO; } dev_data->cmd_status = 0; return 0; } static bool qspi_address_is_valid(const struct device *dev, off_t addr, size_t size) { const struct flash_stm32_qspi_config *dev_cfg = dev->config; size_t flash_size = dev_cfg->flash_size; return (addr >= 0) && ((uint64_t)addr + (uint64_t)size <= flash_size); } #ifdef CONFIG_STM32_MEMMAP /* Must be called inside qspi_lock_thread(). */ static int stm32_qspi_set_memory_mapped(const struct device *dev) { int ret; HAL_StatusTypeDef hal_ret; struct flash_stm32_qspi_data *dev_data = dev->data; QSPI_CommandTypeDef cmd = { .Instruction = SPI_NOR_CMD_READ, .Address = 0, .InstructionMode = QSPI_INSTRUCTION_1_LINE, .AddressMode = QSPI_ADDRESS_1_LINE, .DataMode = QSPI_DATA_1_LINE, }; qspi_set_address_size(dev, &cmd); if (IS_ENABLED(STM32_QSPI_USE_QUAD_IO)) { ret = qspi_prepare_quad_read(dev, &cmd); if (ret < 0) { return ret; } } QSPI_MemoryMappedTypeDef mem_mapped = { .TimeOutActivation = QSPI_TIMEOUT_COUNTER_DISABLE, }; hal_ret = HAL_QSPI_MemoryMapped(&dev_data->hqspi, &cmd, &mem_mapped); if (hal_ret != 0) { LOG_ERR("%d: Failed to enable memory mapped", hal_ret); return -EIO; } LOG_DBG("MemoryMap mode enabled"); return 0; } static bool stm32_qspi_is_memory_mapped(const struct device *dev) { struct flash_stm32_qspi_data *dev_data = dev->data; return READ_BIT(dev_data->hqspi.Instance->CCR, QUADSPI_CCR_FMODE) == QUADSPI_CCR_FMODE; } static int stm32_qspi_abort(const struct device *dev) { struct flash_stm32_qspi_data *dev_data = dev->data; HAL_StatusTypeDef hal_ret; hal_ret = HAL_QSPI_Abort(&dev_data->hqspi); if (hal_ret != HAL_OK) { LOG_ERR("%d: QSPI abort failed", hal_ret); return -EIO; } return 0; } #endif static int flash_stm32_qspi_read(const struct device *dev, off_t addr, void *data, size_t size) { int ret; if (!qspi_address_is_valid(dev, addr, size)) { LOG_DBG("Error: address or size exceeds expected values: " "addr 0x%lx, size %zu", (long)addr, size); return -EINVAL; } /* read non-zero size */ if (size == 0) { return 0; } #ifdef CONFIG_STM32_MEMMAP qspi_lock_thread(dev); /* Do reads through memory-mapping instead of indirect */ if (!stm32_qspi_is_memory_mapped(dev)) { ret = stm32_qspi_set_memory_mapped(dev); if (ret != 0) { LOG_ERR("READ: failed to set memory mapped"); goto end; } } __ASSERT_NO_MSG(stm32_qspi_is_memory_mapped(dev)); uintptr_t mmap_addr = STM32_QSPI_BASE_ADDRESS + addr; LOG_DBG("Memory-mapped read from 0x%08lx, len %zu", mmap_addr, size); memcpy(data, (void *)mmap_addr, size); ret = 0; goto end; #else QSPI_CommandTypeDef cmd = { .Instruction = SPI_NOR_CMD_READ, .Address = addr, .InstructionMode = QSPI_INSTRUCTION_1_LINE, .AddressMode = QSPI_ADDRESS_1_LINE, .DataMode = QSPI_DATA_1_LINE, }; qspi_set_address_size(dev, &cmd); if (IS_ENABLED(STM32_QSPI_USE_QUAD_IO)) { ret = qspi_prepare_quad_read(dev, &cmd); if (ret < 0) { return ret; } } qspi_lock_thread(dev); ret = qspi_read_access(dev, &cmd, data, size); goto end; #endif end: qspi_unlock_thread(dev); return ret; } static int qspi_wait_until_ready(const struct device *dev) { uint8_t reg; int ret; QSPI_CommandTypeDef cmd = { .Instruction = SPI_NOR_CMD_RDSR, .InstructionMode = QSPI_INSTRUCTION_1_LINE, .DataMode = QSPI_DATA_1_LINE, }; do { ret = qspi_read_access(dev, &cmd, &reg, sizeof(reg)); } while (!ret && (reg & SPI_NOR_WIP_BIT)); return ret; } static int flash_stm32_qspi_write(const struct device *dev, off_t addr, const void *data, size_t size) { int ret = 0; if (!qspi_address_is_valid(dev, addr, size)) { LOG_DBG("Error: address or size exceeds expected values: " "addr 0x%lx, size %zu", (long)addr, size); return -EINVAL; } /* write non-zero size */ if (size == 0) { return 0; } QSPI_CommandTypeDef cmd_pp = { .Instruction = SPI_NOR_CMD_PP, .InstructionMode = QSPI_INSTRUCTION_1_LINE, .AddressMode = QSPI_ADDRESS_1_LINE, .DataMode = QSPI_DATA_1_LINE, }; qspi_set_address_size(dev, &cmd_pp); if (IS_ENABLED(STM32_QSPI_USE_QUAD_IO)) { ret = qspi_prepare_quad_program(dev, &cmd_pp); if (ret < 0) { return ret; } } qspi_lock_thread(dev); #ifdef CONFIG_STM32_MEMMAP if (stm32_qspi_is_memory_mapped(dev)) { /* Abort ongoing transfer to force CS high/BUSY deasserted */ ret = stm32_qspi_abort(dev); if (ret != 0) { LOG_ERR("Failed to abort memory-mapped access before write"); goto end; } } #endif while (size > 0) { size_t to_write = size; /* Don't write more than a page. */ if (to_write >= SPI_NOR_PAGE_SIZE) { to_write = SPI_NOR_PAGE_SIZE; } /* Don't write across a page boundary */ if (((addr + to_write - 1U) / SPI_NOR_PAGE_SIZE) != (addr / SPI_NOR_PAGE_SIZE)) { to_write = SPI_NOR_PAGE_SIZE - (addr % SPI_NOR_PAGE_SIZE); } ret = qspi_send_cmd(dev, &cmd_write_en); if (ret != 0) { break; } cmd_pp.Address = addr; ret = qspi_write_access(dev, &cmd_pp, data, to_write); if (ret != 0) { break; } size -= to_write; data = (const uint8_t *)data + to_write; addr += to_write; ret = qspi_wait_until_ready(dev); if (ret != 0) { break; } } goto end; end: qspi_unlock_thread(dev); return ret; } static int flash_stm32_qspi_erase(const struct device *dev, off_t addr, size_t size) { const struct flash_stm32_qspi_config *dev_cfg = dev->config; struct flash_stm32_qspi_data *dev_data = dev->data; int ret = 0; if (!qspi_address_is_valid(dev, addr, size)) { LOG_DBG("Error: address or size exceeds expected values: " "addr 0x%lx, size %zu", (long)addr, size); return -EINVAL; } /* erase non-zero size */ if (size == 0) { return 0; } QSPI_CommandTypeDef cmd_erase = { .Instruction = 0, .InstructionMode = QSPI_INSTRUCTION_1_LINE, .AddressMode = QSPI_ADDRESS_1_LINE, }; qspi_set_address_size(dev, &cmd_erase); qspi_lock_thread(dev); #ifdef CONFIG_STM32_MEMMAP if (stm32_qspi_is_memory_mapped(dev)) { /* Abort ongoing transfer to force CS high/BUSY deasserted */ ret = stm32_qspi_abort(dev); if (ret != 0) { LOG_ERR("Failed to abort memory-mapped access before erase"); goto end; } } #endif while ((size > 0) && (ret == 0)) { cmd_erase.Address = addr; qspi_send_cmd(dev, &cmd_write_en); if (size == dev_cfg->flash_size) { /* chip erase */ cmd_erase.Instruction = SPI_NOR_CMD_CE; cmd_erase.AddressMode = QSPI_ADDRESS_NONE; qspi_send_cmd(dev, &cmd_erase); size -= dev_cfg->flash_size; } else { const struct jesd216_erase_type *erase_types = dev_data->erase_types; const struct jesd216_erase_type *bet = NULL; for (uint8_t ei = 0; ei < JESD216_NUM_ERASE_TYPES; ++ei) { const struct jesd216_erase_type *etp = &erase_types[ei]; if ((etp->exp != 0) && SPI_NOR_IS_ALIGNED(addr, etp->exp) && SPI_NOR_IS_ALIGNED(size, etp->exp) && ((bet == NULL) || (etp->exp > bet->exp))) { bet = etp; cmd_erase.Instruction = bet->cmd; } } if (bet != NULL) { qspi_send_cmd(dev, &cmd_erase); addr += BIT(bet->exp); size -= BIT(bet->exp); } else { LOG_ERR("Can't erase %zu at 0x%lx", size, (long)addr); ret = -EINVAL; } } qspi_wait_until_ready(dev); } goto end; end: qspi_unlock_thread(dev); return ret; } static const struct flash_parameters flash_stm32_qspi_parameters = { .write_block_size = 1, .erase_value = 0xff }; static const struct flash_parameters * flash_stm32_qspi_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_stm32_qspi_parameters; } static void flash_stm32_qspi_isr(const struct device *dev) { struct flash_stm32_qspi_data *dev_data = dev->data; HAL_QSPI_IRQHandler(&dev_data->hqspi); } /* This function is executed in the interrupt context */ #if STM32_QSPI_USE_DMA static void qspi_dma_callback(const struct device *dev, void *arg, uint32_t channel, int status) { DMA_HandleTypeDef *hdma = arg; ARG_UNUSED(dev); if (status < 0) { LOG_ERR("DMA callback error with channel %d.", channel); } HAL_DMA_IRQHandler(hdma); } #endif __weak HAL_StatusTypeDef HAL_DMA_Abort(DMA_HandleTypeDef *hdma) { return HAL_OK; } __weak HAL_StatusTypeDef HAL_DMA_Abort_IT(DMA_HandleTypeDef *hdma) { return HAL_OK; } /* * Transfer Error callback. */ void HAL_QSPI_ErrorCallback(QSPI_HandleTypeDef *hqspi) { struct flash_stm32_qspi_data *dev_data = CONTAINER_OF(hqspi, struct flash_stm32_qspi_data, hqspi); LOG_DBG("Enter"); dev_data->cmd_status = -EIO; k_sem_give(&dev_data->sync); } /* * Command completed callback. */ void HAL_QSPI_CmdCpltCallback(QSPI_HandleTypeDef *hqspi) { struct flash_stm32_qspi_data *dev_data = CONTAINER_OF(hqspi, struct flash_stm32_qspi_data, hqspi); k_sem_give(&dev_data->sync); } /* * Rx Transfer completed callback. */ void HAL_QSPI_RxCpltCallback(QSPI_HandleTypeDef *hqspi) { struct flash_stm32_qspi_data *dev_data = CONTAINER_OF(hqspi, struct flash_stm32_qspi_data, hqspi); k_sem_give(&dev_data->sync); } /* * Tx Transfer completed callback. */ void HAL_QSPI_TxCpltCallback(QSPI_HandleTypeDef *hqspi) { struct flash_stm32_qspi_data *dev_data = CONTAINER_OF(hqspi, struct flash_stm32_qspi_data, hqspi); k_sem_give(&dev_data->sync); } /* * Status Match callback. */ void HAL_QSPI_StatusMatchCallback(QSPI_HandleTypeDef *hqspi) { struct flash_stm32_qspi_data *dev_data = CONTAINER_OF(hqspi, struct flash_stm32_qspi_data, hqspi); k_sem_give(&dev_data->sync); } /* * Timeout callback. */ void HAL_QSPI_TimeOutCallback(QSPI_HandleTypeDef *hqspi) { struct flash_stm32_qspi_data *dev_data = CONTAINER_OF(hqspi, struct flash_stm32_qspi_data, hqspi); LOG_DBG("Enter"); dev_data->cmd_status = -EIO; k_sem_give(&dev_data->sync); } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static void flash_stm32_qspi_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { struct flash_stm32_qspi_data *dev_data = dev->data; *layout = &dev_data->layout; *layout_size = 1; } #endif static const struct flash_driver_api flash_stm32_qspi_driver_api = { .read = flash_stm32_qspi_read, .write = flash_stm32_qspi_write, .erase = flash_stm32_qspi_erase, .get_parameters = flash_stm32_qspi_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_stm32_qspi_pages_layout, #endif #if defined(CONFIG_FLASH_JESD216_API) .sfdp_read = qspi_read_sfdp, .read_jedec_id = qspi_read_jedec_id, #endif /* CONFIG_FLASH_JESD216_API */ }; #if defined(CONFIG_FLASH_PAGE_LAYOUT) static int setup_pages_layout(const struct device *dev) { const struct flash_stm32_qspi_config *dev_cfg = dev->config; struct flash_stm32_qspi_data *data = dev->data; const size_t flash_size = dev_cfg->flash_size; uint32_t layout_page_size = data->page_size; uint8_t exp = 0; int rv = 0; /* Find the smallest erase size. */ for (size_t i = 0; i < ARRAY_SIZE(data->erase_types); ++i) { const struct jesd216_erase_type *etp = &data->erase_types[i]; if ((etp->cmd != 0) && ((exp == 0) || (etp->exp < exp))) { exp = etp->exp; } } if (exp == 0) { return -ENOTSUP; } uint32_t erase_size = BIT(exp) << STM32_QSPI_DOUBLE_FLASH; /* We need layout page size to be compatible with erase size */ if ((layout_page_size % erase_size) != 0) { LOG_DBG("layout page %u not compatible with erase size %u", layout_page_size, erase_size); LOG_DBG("erase size will be used as layout page size"); layout_page_size = erase_size; } /* Warn but accept layout page sizes that leave inaccessible * space. */ if ((flash_size % layout_page_size) != 0) { LOG_INF("layout page %u wastes space with device size %zu", layout_page_size, flash_size); } data->layout.pages_size = layout_page_size; data->layout.pages_count = flash_size / layout_page_size; LOG_DBG("layout %u x %u By pages", data->layout.pages_count, data->layout.pages_size); return rv; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static int qspi_program_addr_4b(const struct device *dev, bool write_enable) { int ret; /* Send write enable command, if required */ if (write_enable) { ret = qspi_send_cmd(dev, &cmd_write_en); if (ret != 0) { return ret; } } /* Program the flash memory to use 4 bytes addressing */ QSPI_CommandTypeDef cmd = { .Instruction = SPI_NOR_CMD_4BA, .InstructionMode = QSPI_INSTRUCTION_1_LINE, }; /* * No need to Read control register afterwards to verify if 4byte addressing mode * is enabled as the effect of the command is immediate * and the SPI_NOR_CMD_RDCR is vendor-specific : * SPI_NOR_4BYTE_BIT is BIT 5 for Macronix and 0 for Micron or Windbond * Moreover bit value meaning is also vendor-specific */ return qspi_send_cmd(dev, &cmd); } static int qspi_read_status_register(const struct device *dev, uint8_t reg_num, uint8_t *reg) { QSPI_CommandTypeDef cmd = { .InstructionMode = QSPI_INSTRUCTION_1_LINE, .DataMode = QSPI_DATA_1_LINE, }; switch (reg_num) { case 1U: cmd.Instruction = SPI_NOR_CMD_RDSR; break; case 2U: cmd.Instruction = SPI_NOR_CMD_RDSR2; break; case 3U: cmd.Instruction = SPI_NOR_CMD_RDSR3; break; default: return -EINVAL; } return qspi_read_access(dev, &cmd, reg, sizeof(*reg)); } static int qspi_write_status_register(const struct device *dev, uint8_t reg_num, uint8_t reg) { struct flash_stm32_qspi_data *dev_data = dev->data; size_t size; uint8_t regs[4] = { 0 }; uint8_t *regs_p; int ret; QSPI_CommandTypeDef cmd = { .Instruction = SPI_NOR_CMD_WRSR, .InstructionMode = QSPI_INSTRUCTION_1_LINE, .DataMode = QSPI_DATA_1_LINE, }; if (reg_num == 1) { size = 1U; regs[0] = reg; regs_p = &regs[0]; /* 1 byte write clears SR2, write SR2 as well */ if (dev_data->qer_type == JESD216_DW15_QER_S2B1v1) { ret = qspi_read_status_register(dev, 2, &regs[1]); if (ret < 0) { return ret; } size = 2U; } } else if (reg_num == 2) { cmd.Instruction = SPI_NOR_CMD_WRSR2; size = 1U; regs[1] = reg; regs_p = &regs[1]; /* if SR2 write needs SR1 */ if ((dev_data->qer_type == JESD216_DW15_QER_VAL_S2B1v1) || (dev_data->qer_type == JESD216_DW15_QER_VAL_S2B1v4) || (dev_data->qer_type == JESD216_DW15_QER_VAL_S2B1v5)) { ret = qspi_read_status_register(dev, 1, &regs[0]); if (ret < 0) { return ret; } cmd.Instruction = SPI_NOR_CMD_WRSR; size = 2U; regs_p = &regs[0]; } } else if (reg_num == 3) { cmd.Instruction = SPI_NOR_CMD_WRSR3; size = 1U; regs[2] = reg; regs_p = &regs[2]; } else { return -EINVAL; } return qspi_write_access(dev, &cmd, regs_p, size); } static int qspi_write_enable(const struct device *dev) { uint8_t reg; int ret; ret = qspi_send_cmd(dev, &cmd_write_en); if (ret) { return ret; } do { ret = qspi_read_status_register(dev, 1U, &reg); } while (!ret && !(reg & SPI_NOR_WEL_BIT)); return ret; } static int qspi_program_quad_io(const struct device *dev) { struct flash_stm32_qspi_data *data = dev->data; uint8_t qe_reg_num; uint8_t qe_bit; uint8_t reg; int ret; switch (data->qer_type) { case JESD216_DW15_QER_NONE: /* no QE bit, device detects reads based on opcode */ return 0; case JESD216_DW15_QER_S1B6: qe_reg_num = 1U; qe_bit = BIT(6U); break; case JESD216_DW15_QER_S2B7: qe_reg_num = 2U; qe_bit = BIT(7U); break; case JESD216_DW15_QER_S2B1v1: __fallthrough; case JESD216_DW15_QER_S2B1v4: __fallthrough; case JESD216_DW15_QER_S2B1v5: __fallthrough; case JESD216_DW15_QER_S2B1v6: qe_reg_num = 2U; qe_bit = BIT(1U); break; default: return -ENOTSUP; } ret = qspi_read_status_register(dev, qe_reg_num, &reg); if (ret < 0) { return ret; } /* exit early if QE bit is already set */ if ((reg & qe_bit) != 0U) { return 0; } reg |= qe_bit; ret = qspi_write_enable(dev); if (ret < 0) { return ret; } ret = qspi_write_status_register(dev, qe_reg_num, reg); if (ret < 0) { return ret; } ret = qspi_wait_until_ready(dev); if (ret < 0) { return ret; } /* validate that QE bit is set */ ret = qspi_read_status_register(dev, qe_reg_num, &reg); if (ret < 0) { return ret; } if ((reg & qe_bit) == 0U) { LOG_ERR("Status Register %u [0x%02x] not set", qe_reg_num, reg); return -EIO; } return ret; } static int spi_nor_process_bfp(const struct device *dev, const struct jesd216_param_header *php, const struct jesd216_bfp *bfp) { const struct flash_stm32_qspi_config *dev_cfg = dev->config; struct flash_stm32_qspi_data *data = dev->data; struct jesd216_erase_type *etp = data->erase_types; uint8_t addr_mode; const size_t flash_size = (jesd216_bfp_density(bfp) / 8U) << STM32_QSPI_DOUBLE_FLASH; int rc; if (flash_size != dev_cfg->flash_size) { LOG_ERR("Unexpected flash size: %u", flash_size); } LOG_INF("%s: %u MiBy flash", dev->name, (uint32_t)(flash_size >> 20)); /* Copy over the erase types, preserving their order. (The * Sector Map Parameter table references them by index.) */ memset(data->erase_types, 0, sizeof(data->erase_types)); for (uint8_t ti = 1; ti <= ARRAY_SIZE(data->erase_types); ++ti) { if (jesd216_bfp_erase(bfp, ti, etp) == 0) { LOG_DBG("Erase %u with %02x", (uint32_t)BIT(etp->exp), etp->cmd); } ++etp; } data->page_size = jesd216_bfp_page_size(php, bfp); LOG_DBG("Page size %u bytes", data->page_size); LOG_DBG("Flash size %u bytes", flash_size); addr_mode = jesd216_bfp_addrbytes(bfp); if (addr_mode == JESD216_SFDP_BFP_DW1_ADDRBYTES_VAL_3B4B) { struct jesd216_bfp_dw16 dw16; if (jesd216_bfp_decode_dw16(php, bfp, &dw16) == 0) { /* * According to JESD216, the bit0 of dw16.enter_4ba * portion of flash description register 16 indicates * if it is enough to use 0xB7 instruction without * write enable to switch to 4 bytes addressing mode. * If bit 1 is set, a write enable is needed. */ if (dw16.enter_4ba & 0x3) { rc = qspi_program_addr_4b(dev, dw16.enter_4ba & 2); if (rc == 0) { data->flag_access_32bit = true; LOG_INF("Flash - address mode: 4B"); } else { LOG_ERR("Unable to enter 4B mode: %d\n", rc); return rc; } } } } if (addr_mode == JESD216_SFDP_BFP_DW1_ADDRBYTES_VAL_4B) { data->flag_access_32bit = true; LOG_INF("Flash - address mode: 4B"); } /* * Only check if the 1-4-4 (i.e. 4READ) or 1-1-4 (QREAD) * is supported - other modes are not. */ if (IS_ENABLED(STM32_QSPI_USE_QUAD_IO)) { const enum jesd216_mode_type supported_modes[] = { JESD216_MODE_114, JESD216_MODE_144 }; struct jesd216_bfp_dw15 dw15; struct jesd216_instr res; /* reset active mode */ data->mode = STM32_QSPI_UNKNOWN_MODE; /* query supported read modes, begin from the slowest */ for (size_t i = 0; i < ARRAY_SIZE(supported_modes); ++i) { rc = jesd216_bfp_read_support(php, bfp, supported_modes[i], &res); if (rc >= 0) { LOG_INF("Quad read mode %d instr [0x%x] supported", supported_modes[i], res.instr); data->mode = supported_modes[i]; data->qspi_read_cmd = res.instr; data->qspi_read_cmd_latency = res.wait_states; if (res.mode_clocks) { data->qspi_read_cmd_latency += res.mode_clocks; } } } /* don't continue when there is no supported mode */ if (data->mode == STM32_QSPI_UNKNOWN_MODE) { LOG_ERR("No supported flash read mode found"); return -ENOTSUP; } LOG_INF("Quad read mode %d instr [0x%x] will be used", data->mode, res.instr); /* try to decode QE requirement type */ rc = jesd216_bfp_decode_dw15(php, bfp, &dw15); if (rc < 0) { /* will use QER from DTS or default (refer to device data) */ LOG_WRN("Unable to decode QE requirement [DW15]: %d", rc); } else { /* bypass DTS QER value */ data->qer_type = dw15.qer; } LOG_INF("QE requirement mode: %x", data->qer_type); /* enable QE */ rc = qspi_program_quad_io(dev); if (rc < 0) { LOG_ERR("Failed to enable Quad mode: %d", rc); return rc; } LOG_INF("Quad mode enabled"); } return 0; } #if STM32_QSPI_RESET_GPIO static void flash_stm32_qspi_gpio_reset(const struct device *dev) { const struct flash_stm32_qspi_config *dev_cfg = dev->config; /* Generate RESETn pulse for the flash memory */ gpio_pin_configure_dt(&dev_cfg->reset, GPIO_OUTPUT_ACTIVE); k_msleep(DT_INST_PROP(0, reset_gpios_duration)); gpio_pin_set_dt(&dev_cfg->reset, 0); } #endif #if STM32_QSPI_RESET_CMD static int flash_stm32_qspi_send_reset(const struct device *dev) { QSPI_CommandTypeDef cmd = { .Instruction = SPI_NOR_CMD_RESET_EN, .InstructionMode = QSPI_INSTRUCTION_1_LINE, }; int ret; ret = qspi_send_cmd(dev, &cmd); if (ret != 0) { LOG_ERR("%d: Failed to send RESET_EN", ret); return ret; } cmd.Instruction = SPI_NOR_CMD_RESET_MEM; ret = qspi_send_cmd(dev, &cmd); if (ret != 0) { LOG_ERR("%d: Failed to send RESET_MEM", ret); return ret; } LOG_DBG("Send Reset command"); return 0; } #endif static int flash_stm32_qspi_init(const struct device *dev) { const struct flash_stm32_qspi_config *dev_cfg = dev->config; struct flash_stm32_qspi_data *dev_data = dev->data; uint32_t ahb_clock_freq; uint32_t prescaler = 0; int ret; /* Signals configuration */ ret = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("QSPI pinctrl setup failed (%d)", ret); return ret; } #if STM32_QSPI_RESET_GPIO flash_stm32_qspi_gpio_reset(dev); #endif #if STM32_QSPI_USE_DMA /* * DMA configuration * Due to use of QSPI HAL API in current driver, * both HAL and Zephyr DMA drivers should be configured. * The required configuration for Zephyr DMA driver should only provide * the minimum information to inform the DMA slot will be in used and * how to route callbacks. */ struct dma_config dma_cfg = dev_data->dma.cfg; static DMA_HandleTypeDef hdma; if (!device_is_ready(dev_data->dma.dev)) { LOG_ERR("%s device not ready", dev_data->dma.dev->name); return -ENODEV; } /* Proceed to the minimum Zephyr DMA driver init */ dma_cfg.user_data = &hdma; /* HACK: This field is used to inform driver that it is overridden */ dma_cfg.linked_channel = STM32_DMA_HAL_OVERRIDE; ret = dma_config(dev_data->dma.dev, dev_data->dma.channel, &dma_cfg); if (ret != 0) { return ret; } /* Proceed to the HAL DMA driver init */ if (dma_cfg.source_data_size != dma_cfg.dest_data_size) { LOG_ERR("Source and destination data sizes not aligned"); return -EINVAL; } int index = find_lsb_set(dma_cfg.source_data_size) - 1; hdma.Init.PeriphDataAlignment = table_p_size[index]; hdma.Init.MemDataAlignment = table_m_size[index]; hdma.Init.PeriphInc = DMA_PINC_DISABLE; hdma.Init.MemInc = DMA_MINC_ENABLE; hdma.Init.Mode = DMA_NORMAL; hdma.Init.Priority = table_priority[dma_cfg.channel_priority]; #ifdef CONFIG_DMA_STM32_V1 /* TODO: Not tested in this configuration */ hdma.Init.Channel = dma_cfg.dma_slot; hdma.Instance = __LL_DMA_GET_STREAM_INSTANCE(dev_data->dma.reg, dev_data->dma.channel); #else hdma.Init.Request = dma_cfg.dma_slot; #ifdef CONFIG_DMAMUX_STM32 /* HAL expects a valid DMA channel (not a DMAMUX channel) */ hdma.Instance = __LL_DMA_GET_CHANNEL_INSTANCE(dev_data->dma.reg, dev_data->dma.channel); #else hdma.Instance = __LL_DMA_GET_CHANNEL_INSTANCE(dev_data->dma.reg, dev_data->dma.channel-1); #endif #endif /* CONFIG_DMA_STM32_V1 */ /* Initialize DMA HAL */ __HAL_LINKDMA(&dev_data->hqspi, hdma, hdma); HAL_DMA_Init(&hdma); #endif /* STM32_QSPI_USE_DMA */ /* Clock configuration */ if (clock_control_on(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), (clock_control_subsys_t) &dev_cfg->pclken) != 0) { LOG_DBG("Could not enable QSPI clock"); return -EIO; } if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), (clock_control_subsys_t) &dev_cfg->pclken, &ahb_clock_freq) < 0) { LOG_DBG("Failed to get AHB clock frequency"); return -EIO; } for (; prescaler <= STM32_QSPI_CLOCK_PRESCALER_MAX; prescaler++) { uint32_t clk = ahb_clock_freq / (prescaler + 1); if (clk <= dev_cfg->max_frequency) { break; } } __ASSERT_NO_MSG(prescaler <= STM32_QSPI_CLOCK_PRESCALER_MAX); /* Initialize QSPI HAL */ dev_data->hqspi.Init.ClockPrescaler = prescaler; /* Give a bit position from 0 to 31 to the HAL init minus 1 for the DCR1 reg */ dev_data->hqspi.Init.FlashSize = find_lsb_set(dev_cfg->flash_size) - 2; #if DT_PROP(DT_NODELABEL(quadspi), dual_flash) && defined(QUADSPI_CR_DFM) /* * When the DTS has <dual-flash>, it means Dual Flash Mode * Even in DUAL flash config, the SDFP is read from one single quad-NOR * else the magic nb is wrong (0x46465353) * That means that the Dual Flash config is set after the SFDP sequence */ dev_data->hqspi.Init.SampleShifting = QSPI_SAMPLE_SHIFTING_HALFCYCLE; dev_data->hqspi.Init.ChipSelectHighTime = QSPI_CS_HIGH_TIME_3_CYCLE; dev_data->hqspi.Init.DualFlash = QSPI_DUALFLASH_DISABLE; /* Set Dual Flash Mode only on MemoryMapped */ dev_data->hqspi.Init.FlashID = QSPI_FLASH_ID_1; #endif /* dual_flash */ HAL_QSPI_Init(&dev_data->hqspi); #if DT_NODE_HAS_PROP(DT_NODELABEL(quadspi), flash_id) && \ defined(QUADSPI_CR_FSEL) /* * Some stm32 mcu with quadspi (like stm32l47x or stm32l48x) * does not support Dual-Flash Mode */ uint8_t qspi_flash_id = DT_PROP(DT_NODELABEL(quadspi), flash_id); HAL_QSPI_SetFlashID(&dev_data->hqspi, (qspi_flash_id - 1) << QUADSPI_CR_FSEL_Pos); #endif /* Initialize semaphores */ k_sem_init(&dev_data->sem, 1, 1); k_sem_init(&dev_data->sync, 0, 1); /* Run IRQ init */ dev_cfg->irq_config(dev); #if STM32_QSPI_RESET_CMD flash_stm32_qspi_send_reset(dev); k_busy_wait(DT_INST_PROP(0, reset_cmd_wait)); #endif /* Run NOR init */ const uint8_t decl_nph = 2; union { /* We only process BFP so use one parameter block */ uint8_t raw[JESD216_SFDP_SIZE(decl_nph)]; struct jesd216_sfdp_header sfdp; } u; const struct jesd216_sfdp_header *hp = &u.sfdp; ret = qspi_read_sfdp(dev, 0, u.raw, sizeof(u.raw)); if (ret != 0) { LOG_ERR("SFDP read failed: %d", ret); return ret; } uint32_t magic = jesd216_sfdp_magic(hp); if (magic != JESD216_SFDP_MAGIC) { LOG_ERR("SFDP magic %08x invalid", magic); return -EINVAL; } LOG_INF("%s: SFDP v %u.%u AP %x with %u PH", dev->name, hp->rev_major, hp->rev_minor, hp->access, 1 + hp->nph); const struct jesd216_param_header *php = hp->phdr; const struct jesd216_param_header *phpe = php + MIN(decl_nph, 1 + hp->nph); while (php != phpe) { uint16_t id = jesd216_param_id(php); LOG_INF("PH%u: %04x rev %u.%u: %u DW @ %x", (php - hp->phdr), id, php->rev_major, php->rev_minor, php->len_dw, jesd216_param_addr(php)); if (id == JESD216_SFDP_PARAM_ID_BFP) { union { uint32_t dw[20]; struct jesd216_bfp bfp; } u2; const struct jesd216_bfp *bfp = &u2.bfp; ret = qspi_read_sfdp(dev, jesd216_param_addr(php), (uint8_t *)u2.dw, MIN(sizeof(uint32_t) * php->len_dw, sizeof(u2.dw))); if (ret == 0) { ret = spi_nor_process_bfp(dev, php, bfp); } if (ret != 0) { LOG_ERR("SFDP BFP failed: %d", ret); break; } } ++php; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) ret = setup_pages_layout(dev); if (ret != 0) { LOG_ERR("layout setup failed: %d", ret); return -ENODEV; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ #ifdef CONFIG_STM32_MEMMAP #if DT_PROP(DT_NODELABEL(quadspi), dual_flash) && defined(QUADSPI_CR_DFM) /* * When the DTS has dual_flash, it means Dual Flash Mode for Memory MAPPED * Force Dual Flash mode now, after the SFDP sequence which is reading * one quad-NOR only */ MODIFY_REG(dev_data->hqspi.Instance->CR, (QUADSPI_CR_DFM), QSPI_DUALFLASH_ENABLE); LOG_DBG("Dual Flash Mode"); #endif /* dual_flash */ ret = stm32_qspi_set_memory_mapped(dev); if (ret != 0) { LOG_ERR("Failed to enable memory-mapped mode: %d", ret); return ret; } LOG_INF("Memory-mapped NOR quad-flash at 0x%lx (0x%x bytes)", (long)(STM32_QSPI_BASE_ADDRESS), dev_cfg->flash_size); #else LOG_INF("NOR quad-flash at 0x%lx (0x%x bytes)", (long)(STM32_QSPI_BASE_ADDRESS), dev_cfg->flash_size); #endif return 0; } #define DMA_CHANNEL_CONFIG(node, dir) \ DT_DMAS_CELL_BY_NAME(node, dir, channel_config) #define QSPI_DMA_CHANNEL_INIT(node, dir) \ .dev = DEVICE_DT_GET(DT_DMAS_CTLR(node)), \ .channel = DT_DMAS_CELL_BY_NAME(node, dir, channel), \ .reg = (DMA_TypeDef *)DT_REG_ADDR( \ DT_PHANDLE_BY_NAME(node, dmas, dir)),\ .cfg = { \ .dma_slot = DT_DMAS_CELL_BY_NAME(node, dir, slot), \ .source_data_size = STM32_DMA_CONFIG_PERIPHERAL_DATA_SIZE( \ DMA_CHANNEL_CONFIG(node, dir)), \ .dest_data_size = STM32_DMA_CONFIG_MEMORY_DATA_SIZE( \ DMA_CHANNEL_CONFIG(node, dir)), \ .channel_priority = STM32_DMA_CONFIG_PRIORITY( \ DMA_CHANNEL_CONFIG(node, dir)), \ .dma_callback = qspi_dma_callback, \ }, \ #define QSPI_DMA_CHANNEL(node, dir) \ .dma = { \ COND_CODE_1(DT_DMAS_HAS_NAME(node, dir), \ (QSPI_DMA_CHANNEL_INIT(node, dir)), \ (NULL)) \ }, #define QSPI_FLASH_MODULE(drv_id, flash_id) \ (DT_DRV_INST(drv_id), qspi_nor_flash_##flash_id) static void flash_stm32_qspi_irq_config_func(const struct device *dev); #define DT_WRITEOC_PROP_OR(inst, default_value) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, writeoc), \ (_CONCAT(SPI_NOR_CMD_, DT_STRING_TOKEN(DT_DRV_INST(inst), writeoc))), \ ((default_value))) #define DT_QER_PROP_OR(inst, default_value) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, quad_enable_requirements), \ (_CONCAT(JESD216_DW15_QER_VAL_, \ DT_STRING_TOKEN(DT_DRV_INST(inst), quad_enable_requirements))), \ ((default_value))) #define STM32_QSPI_NODE DT_INST_PARENT(0) PINCTRL_DT_DEFINE(STM32_QSPI_NODE); static const struct flash_stm32_qspi_config flash_stm32_qspi_cfg = { .regs = (QUADSPI_TypeDef *)DT_REG_ADDR(STM32_QSPI_NODE), .pclken = { .enr = DT_CLOCKS_CELL(STM32_QSPI_NODE, bits), .bus = DT_CLOCKS_CELL(STM32_QSPI_NODE, bus) }, .irq_config = flash_stm32_qspi_irq_config_func, .flash_size = DT_INST_REG_ADDR_BY_IDX(0, 1) << STM32_QSPI_DOUBLE_FLASH, .max_frequency = DT_INST_PROP(0, qspi_max_frequency), .pcfg = PINCTRL_DT_DEV_CONFIG_GET(STM32_QSPI_NODE), #if STM32_QSPI_RESET_GPIO .reset = GPIO_DT_SPEC_INST_GET(0, reset_gpios), #endif #if DT_NODE_HAS_PROP(DT_INST(0, st_stm32_qspi_nor), jedec_id) .jedec_id = DT_INST_PROP(0, jedec_id), #endif /* jedec_id */ }; static struct flash_stm32_qspi_data flash_stm32_qspi_dev_data = { .hqspi = { .Instance = (QUADSPI_TypeDef *)DT_REG_ADDR(STM32_QSPI_NODE), .Init = { .FifoThreshold = STM32_QSPI_FIFO_THRESHOLD, .SampleShifting = QSPI_SAMPLE_SHIFTING_NONE, .ChipSelectHighTime = QSPI_CS_HIGH_TIME_1_CYCLE, .ClockMode = QSPI_CLOCK_MODE_0, }, }, .qer_type = DT_QER_PROP_OR(0, JESD216_DW15_QER_VAL_S1B6), .qspi_write_cmd = DT_WRITEOC_PROP_OR(0, SPI_NOR_CMD_PP_1_4_4), QSPI_DMA_CHANNEL(STM32_QSPI_NODE, tx_rx) }; DEVICE_DT_INST_DEFINE(0, &flash_stm32_qspi_init, NULL, &flash_stm32_qspi_dev_data, &flash_stm32_qspi_cfg, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_stm32_qspi_driver_api); static void flash_stm32_qspi_irq_config_func(const struct device *dev) { IRQ_CONNECT(DT_IRQN(STM32_QSPI_NODE), DT_IRQ(STM32_QSPI_NODE, priority), flash_stm32_qspi_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_IRQN(STM32_QSPI_NODE)); } #endif ```
/content/code_sandbox/drivers/flash/flash_stm32_qspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
12,638
```c /* * */ #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(flash_stm32generic, CONFIG_FLASH_LOG_LEVEL); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <zephyr/drivers/flash.h> #include <zephyr/init.h> #include <zephyr/sys/barrier.h> #include <soc.h> #include "flash_stm32.h" #if FLASH_STM32_WRITE_BLOCK_SIZE == 8 typedef uint64_t flash_prg_t; #elif FLASH_STM32_WRITE_BLOCK_SIZE == 4 typedef uint32_t flash_prg_t; #elif FLASH_STM32_WRITE_BLOCK_SIZE == 2 typedef uint16_t flash_prg_t; #elif FLASH_STM32_WRITE_BLOCK_SIZE == 1 typedef uint8_t flash_prg_t; #else #error Unknown write block size #endif #if defined(FLASH_CR_PER) #define FLASH_ERASED_VALUE ((flash_prg_t)-1) #elif defined(FLASH_PECR_ERASE) #define FLASH_ERASED_VALUE 0 #else #error Unknown erase value #endif static unsigned int get_page(off_t offset) { return offset / FLASH_PAGE_SIZE; } #if defined(FLASH_CR_PER) static int is_flash_locked(FLASH_TypeDef *regs) { return !!(regs->CR & FLASH_CR_LOCK); } static void write_enable(FLASH_TypeDef *regs) { regs->CR |= FLASH_CR_PG; } static void write_disable(FLASH_TypeDef *regs) { regs->CR &= (~FLASH_CR_PG); } static void erase_page_begin(FLASH_TypeDef *regs, unsigned int page) { /* Set the PER bit and select the page you wish to erase */ regs->CR |= FLASH_CR_PER; regs->AR = FLASH_STM32_BASE_ADDRESS + page * FLASH_PAGE_SIZE; barrier_dsync_fence_full(); /* Set the STRT bit */ regs->CR |= FLASH_CR_STRT; } static void erase_page_end(FLASH_TypeDef *regs) { regs->CR &= ~FLASH_CR_PER; } #else static int is_flash_locked(FLASH_TypeDef *regs) { return !!(regs->PECR & FLASH_PECR_PRGLOCK); } static void write_enable(FLASH_TypeDef *regs) { /* Only used for half-page programming on L1x */ #if !defined(CONFIG_SOC_SERIES_STM32L1X) regs->PECR |= FLASH_PECR_PROG; #endif } static void write_disable(FLASH_TypeDef *regs) { /* Clear the PG bit */ regs->PECR &= ~FLASH_PECR_PROG; } static void erase_page_begin(FLASH_TypeDef *regs, unsigned int page) { volatile flash_prg_t *page_base = (flash_prg_t *)( FLASH_STM32_BASE_ADDRESS + page * FLASH_PAGE_SIZE); /* Enable programming in erase mode. An erase is triggered by * writing 0 to the first word of a page. */ regs->PECR |= FLASH_PECR_ERASE; regs->PECR |= FLASH_PECR_PROG; barrier_dsync_fence_full(); *page_base = 0; } static void erase_page_end(FLASH_TypeDef *regs) { /* Disable programming */ regs->PECR &= ~FLASH_PECR_PROG; regs->PECR &= ~FLASH_PECR_ERASE; } #endif static int write_value(const struct device *dev, off_t offset, flash_prg_t val) { volatile flash_prg_t *flash = (flash_prg_t *)( offset + FLASH_STM32_BASE_ADDRESS); FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); int rc; /* if the control register is locked, do not fail silently */ if (is_flash_locked(regs)) { LOG_ERR("Flash is locked"); return -EIO; } /* Check that no Flash main memory operation is ongoing */ rc = flash_stm32_wait_flash_idle(dev); if (rc < 0) { return rc; } /* Check if this half word is erased */ if (*flash != FLASH_ERASED_VALUE) { LOG_ERR("Flash location not erased"); return -EIO; } /* Enable writing */ write_enable(regs); /* Make sure the register write has taken effect */ barrier_dsync_fence_full(); /* Perform the data write operation at the desired memory address */ *flash = val; /* Wait until the BSY bit is cleared */ rc = flash_stm32_wait_flash_idle(dev); /* Disable writing */ write_disable(regs); return rc; } int flash_stm32_block_erase_loop(const struct device *dev, unsigned int offset, unsigned int len) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); int i, rc = 0; /* if the control register is locked, do not fail silently */ if (is_flash_locked(regs)) { LOG_ERR("Flash is locked"); return -EIO; } /* Check that no Flash memory operation is ongoing */ rc = flash_stm32_wait_flash_idle(dev); if (rc < 0) { return rc; } for (i = get_page(offset); i <= get_page(offset + len - 1); ++i) { erase_page_begin(regs, i); barrier_dsync_fence_full(); rc = flash_stm32_wait_flash_idle(dev); erase_page_end(regs); if (rc < 0) { break; } } return rc; } int flash_stm32_write_range(const struct device *dev, unsigned int offset, const void *data, unsigned int len) { int i, rc = 0; flash_prg_t value; for (i = 0; i < len / sizeof(flash_prg_t); i++) { memcpy(&value, (const uint8_t *)data + i * sizeof(flash_prg_t), sizeof(flash_prg_t)); rc = write_value(dev, offset + i * sizeof(flash_prg_t), value); if (rc < 0) { return rc; } } return rc; } void flash_stm32_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { static struct flash_pages_layout flash_layout = { .pages_count = 0, .pages_size = 0, }; ARG_UNUSED(dev); if (flash_layout.pages_count == 0) { #if defined(CONFIG_SOC_SERIES_STM32F3X) flash_layout.pages_count = DT_REG_SIZE(DT_INST(0, soc_nv_flash)) / FLASH_PAGE_SIZE; #else flash_layout.pages_count = (CONFIG_FLASH_SIZE * 1024) / FLASH_PAGE_SIZE; #endif flash_layout.pages_size = FLASH_PAGE_SIZE; } *layout = &flash_layout; *layout_size = 1; } ```
/content/code_sandbox/drivers/flash/flash_stm32f1x.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,482
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_FLASH_XSPI_STM32_H_ #define ZEPHYR_DRIVERS_FLASH_XSPI_STM32_H_ /* Macro to check if any xspi device has a domain clock or more */ #define STM32_XSPI_DOMAIN_CLOCK_INST_SUPPORT(inst) \ DT_CLOCKS_HAS_IDX(DT_INST_PARENT(inst), 1) || #define STM32_XSPI_INST_DEV_DOMAIN_CLOCK_SUPPORT \ (DT_INST_FOREACH_STATUS_OKAY(STM32_XSPI_DOMAIN_CLOCK_INST_SUPPORT) 0) /* This symbol takes the value 1 if device instance has a domain clock in its dts */ #if STM32_XSPI_INST_DEV_DOMAIN_CLOCK_SUPPORT #define STM32_XSPI_DOMAIN_CLOCK_SUPPORT 1 #else #define STM32_XSPI_DOMAIN_CLOCK_SUPPORT 0 #endif #define STM32_XSPI_FIFO_THRESHOLD 4U /* Valid range is [0, 255] */ #define STM32_XSPI_CLOCK_PRESCALER_MIN 0U #define STM32_XSPI_CLOCK_PRESCALER_MAX 255U #define STM32_XSPI_CLOCK_COMPUTE(bus_freq, prescaler) ((bus_freq) / ((prescaler) + 1U)) /* Max Time value during reset or erase operation */ #define STM32_XSPI_RESET_MAX_TIME 100U #define STM32_XSPI_BULK_ERASE_MAX_TIME 460000U #define STM32_XSPI_SECTOR_ERASE_MAX_TIME 1000U #define STM32_XSPI_SUBSECTOR_4K_ERASE_MAX_TIME 400U #define STM32_XSPI_WRITE_REG_MAX_TIME 40U /* used as default value for DTS writeoc */ #define SPI_NOR_WRITEOC_NONE 0xFF #if STM32_XSPI_USE_DMA /* Lookup table to set dma priority from the DTS */ static const uint32_t table_priority[] = { DMA_LOW_PRIORITY_LOW_WEIGHT, DMA_LOW_PRIORITY_MID_WEIGHT, DMA_LOW_PRIORITY_HIGH_WEIGHT, DMA_HIGH_PRIORITY, }; /* Lookup table to set dma channel direction from the DTS */ static const uint32_t table_direction[] = { DMA_MEMORY_TO_MEMORY, DMA_MEMORY_TO_PERIPH, DMA_PERIPH_TO_MEMORY, }; struct stream { DMA_TypeDef *reg; const struct device *dev; uint32_t channel; struct dma_config cfg; uint8_t priority; bool src_addr_increment; bool dst_addr_increment; }; #endif /* STM32_XSPI_USE_DMA */ typedef void (*irq_config_func_t)(const struct device *dev); struct flash_stm32_xspi_config { const struct stm32_pclken *pclken; size_t pclk_len; irq_config_func_t irq_config; size_t flash_size; uint32_t max_frequency; int data_mode; /* SPI or QSPI or OSPI */ int data_rate; /* DTR or STR */ const struct pinctrl_dev_config *pcfg; #if STM32_XSPI_RESET_GPIO const struct gpio_dt_spec reset; #endif /* STM32_XSPI_RESET_GPIO */ }; struct flash_stm32_xspi_data { /* XSPI handle is modifiable ; so part of data struct */ XSPI_HandleTypeDef hxspi; struct k_sem sem; struct k_sem sync; #if defined(CONFIG_FLASH_PAGE_LAYOUT) struct flash_pages_layout layout; #endif struct jesd216_erase_type erase_types[JESD216_NUM_ERASE_TYPES]; /* Number of bytes per page */ uint16_t page_size; /* Address width in bytes */ uint8_t address_width; /* Read operation dummy cycles */ uint8_t read_dummy; uint32_t read_opcode; uint32_t write_opcode; enum jesd216_mode_type read_mode; enum jesd216_dw15_qer_type qer_type; #if defined(CONFIG_FLASH_JESD216_API) /* Table to hold the jedec Read ID given by the octoFlash const */ uint8_t jedec_id[JESD216_READ_ID_LEN]; #endif /* CONFIG_FLASH_JESD216_API */ int cmd_status; #if STM32_XSPI_USE_DMA struct stream dma_tx; struct stream dma_rx; #endif /* STM32_XSPI_USE_DMA */ }; #endif /* ZEPHYR_DRIVERS_FLASH_XSPI_STM32_H_ */ ```
/content/code_sandbox/drivers/flash/flash_stm32_xspi.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
880
```unknown # # # menuconfig SOC_FLASH_NRF_RRAM bool "Nordic Semiconductor flash driver for nRF RRAM" default y depends on DT_HAS_NORDIC_RRAM_CONTROLLER_ENABLED select NRFX_RRAMC if !BUILD_WITH_TFM select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_NO_EXPLICIT_ERASE select FLASH_NRF_FORCE_ALT select MPU_ALLOW_FLASH_WRITE if ARM_MPU help Enables Nordic Semiconductor flash driver for nRF RRAM controllers. The entire contiguous block of RRAM gets logically divided into pages, but partial erase is always supported. if SOC_FLASH_NRF_RRAM config NRF_RRAM_WRITE_BUFFER_SIZE int "Internal write-buffer size" default 32 if !SOC_FLASH_NRF_RADIO_SYNC_NONE default 0 range 0 32 help Number of 128-bit words. Maximum buffer size can be configured to the value of 32 (128-bit words). Set 0 to disable write-buffer. config NRF_RRAM_READYNEXT_TIMEOUT_VALUE int "Preload timeout value for waiting for a next write in RRAMC clock cycles" default 128 range 0 4095 help Set 0 to disable ready next timeout counter. choice SOC_FLASH_NRF_RADIO_SYNC_CHOICE prompt "Nordic nRFx flash driver synchronization" default SOC_FLASH_NRF_RADIO_SYNC_TICKER if BT_LL_SW_SPLIT default SOC_FLASH_NRF_RADIO_SYNC_NONE help synchronization between flash memory driver and radio. config SOC_FLASH_NRF_RADIO_SYNC_TICKER bool "Nordic nRFx flash driver synchronized with radio" depends on BT_LL_SW_SPLIT help Enable synchronization between flash memory driver and radio using BLE LL controller ticker API. config SOC_FLASH_NRF_RADIO_SYNC_NONE bool "none" help disable synchronization between flash memory driver and radio. endchoice config SOC_FLASH_NRF_TIMEOUT_MULTIPLIER int "Multiplier for flash operation timeouts [x0.1]" depends on !SOC_FLASH_NRF_RADIO_SYNC_NONE default 15 help This is a multiplier that will be divided by 10 that is applied to the flash erase and write operations timeout. The base for the multiplication would allow erasing all nRF flash pages in blocking mode. config NRF_RRAM_REGION_ADDRESS_RESOLUTION hex default 0x400 help RRAMC's region protection address resolution. Applies to region with configurable start address. config NRF_RRAM_REGION_SIZE_UNIT hex default 0x400 help Base unit for the size of RRAMC's region protection. endif # SOC_FLASH_NRF_RRAM ```
/content/code_sandbox/drivers/flash/Kconfig.nrf_rram
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
573
```unknown config SOC_FLASH_LPC bool "LPC flash shim driver" default y depends on DT_HAS_NXP_IAP_FMC11_ENABLED || \ DT_HAS_NXP_IAP_FMC54_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_EXPLICIT_ERASE help Enables the LPC IAP flash shim driver. WARNING: This driver will disable the system interrupts for the duration of the flash erase/write operations. This will have an impact on the overall system performance - whether this is acceptable or not will depend on the use case. ```
/content/code_sandbox/drivers/flash/Kconfig.lpc
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
129
```c /* * Emulate a memory device on MSPI emulator bus */ #define DT_DRV_COMPAT zephyr_mspi_emul_flash #include <zephyr/logging/log.h> #include <zephyr/kernel.h> #include <zephyr/sys/util.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/emul.h> #include <zephyr/drivers/mspi.h> #include <zephyr/drivers/mspi_emul.h> #include <zephyr/drivers/flash.h> #include "spi_nor.h" LOG_MODULE_REGISTER(zephyr_mspi_emul_flash, CONFIG_FLASH_LOG_LEVEL); /* add else if for other SoC platforms */ #if defined(CONFIG_SOC_POSIX) typedef struct mspi_timing_cfg mspi_timing_cfg; typedef enum mspi_timing_param mspi_timing_param; #endif struct flash_mspi_emul_device_config { uint32_t size; struct flash_parameters flash_param; struct flash_pages_layout page_layout; struct mspi_dev_id dev_id; struct mspi_dev_cfg tar_dev_cfg; struct mspi_xip_cfg tar_xip_cfg; struct mspi_scramble_cfg tar_scramble_cfg; bool sw_multi_periph; }; struct flash_mspi_emul_device_data { const struct device *bus; struct mspi_dev_cfg dev_cfg; struct mspi_xip_cfg xip_cfg; struct mspi_scramble_cfg scramble_cfg; mspi_timing_cfg timing_cfg; struct mspi_xfer xfer; struct mspi_xfer_packet packet; struct k_sem lock; uint8_t *mem; }; /** * Acquire the device lock. * * @param flash MSPI emulation flash device. */ static void acquire(const struct device *flash) { const struct flash_mspi_emul_device_config *cfg = flash->config; struct flash_mspi_emul_device_data *data = flash->data; k_sem_take(&data->lock, K_FOREVER); if (cfg->sw_multi_periph) { while (mspi_dev_config(data->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_ALL, &data->dev_cfg)) { ; } } else { while (mspi_dev_config(data->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_NONE, NULL)) { ; } } } /** * Release the device lock. * * @param flash MSPI emulation flash device. */ static void release(const struct device *flash) { struct flash_mspi_emul_device_data *data = flash->data; while (mspi_get_channel_status(data->bus, 0)) { } k_sem_give(&data->lock); } /** * API implementation of emul_mspi_dev_api_transceive transceive. * * @param target Pointer to MSPI device emulator. * @param dev_id Pointer to the device ID structure from a device. * @param xfer Pointer to the MSPI transfer started by dev_id. * * @retval 0 if successful. * @retval -ESTALE device ID don't match, need to call mspi_dev_config first. * @retval -Error transfer failed. */ static int emul_mspi_device_transceive(const struct emul *target, const struct mspi_xfer_packet *packets, uint32_t num_packet, bool async, uint32_t timeout) { ARG_UNUSED(timeout); const struct flash_mspi_emul_device_config *cfg = target->dev->config; struct flash_mspi_emul_device_data *data = target->dev->data; struct emul_mspi_driver_api *api = (struct emul_mspi_driver_api *)data->bus->api; __ASSERT_NO_MSG(api); __ASSERT_NO_MSG(api->trigger_event); for (uint32_t count = 0; count < num_packet; ++count) { const struct mspi_xfer_packet *packet = &packets[count]; if (packet->address > cfg->size || packet->address + packet->num_bytes > cfg->size) { return -ENOMEM; } if (packet->dir == MSPI_RX) { memcpy(packet->data_buf, data->mem + packet->address, packet->num_bytes); } else if (packet->dir == MSPI_TX) { memcpy(data->mem + packet->address, packet->data_buf, packet->num_bytes); } if (async) { if (packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) { api->trigger_event(data->bus, MSPI_BUS_XFER_COMPLETE); } } } return 0; } static int flash_mspi_emul_erase(const struct device *flash, off_t offset, size_t size) { const struct flash_mspi_emul_device_config *cfg = flash->config; struct flash_mspi_emul_device_data *data = flash->data; const size_t num_sectors = size / SPI_NOR_SECTOR_SIZE; const size_t num_blocks = size / SPI_NOR_BLOCK_SIZE; int i; acquire(flash); if (offset % SPI_NOR_SECTOR_SIZE) { LOG_ERR("Invalid offset"); return -EINVAL; } if (size % SPI_NOR_SECTOR_SIZE) { LOG_ERR("Invalid size"); return -EINVAL; } if ((offset == 0) && (size == cfg->size)) { memset(data->mem, cfg->flash_param.erase_value, size); } else if ((0 == (offset % SPI_NOR_BLOCK_SIZE)) && (0 == (size % SPI_NOR_BLOCK_SIZE))) { for (i = 0; i < num_blocks; i++) { memset(data->mem + offset, cfg->flash_param.erase_value, SPI_NOR_BLOCK_SIZE); offset += SPI_NOR_BLOCK_SIZE; } } else { for (i = 0; i < num_sectors; i++) { memset(data->mem + offset, cfg->flash_param.erase_value, SPI_NOR_SECTOR_SIZE); offset += SPI_NOR_SECTOR_SIZE; } } release(flash); return 0; } /** * API implementation of flash write. * * @param flash Pointer to MSPI flash device. * @param offset Flash device address. * @param wdata Pointer to the write data buffer. * @param len Number of bytes to write. * * @retval 0 if successful. * @retval -Error flash read fail. */ static int flash_mspi_emul_write(const struct device *flash, off_t offset, const void *wdata, size_t len) { const struct flash_mspi_emul_device_config *cfg = flash->config; struct flash_mspi_emul_device_data *data = flash->data; int ret; uint8_t *src = (uint8_t *)wdata; int i; acquire(flash); data->xfer.async = false; data->xfer.xfer_mode = MSPI_DMA; data->xfer.tx_dummy = data->dev_cfg.tx_dummy; data->xfer.cmd_length = data->dev_cfg.cmd_length; data->xfer.addr_length = data->dev_cfg.addr_length; data->xfer.hold_ce = false; data->xfer.priority = 1; data->xfer.packets = &data->packet; data->xfer.num_packet = 1; data->xfer.timeout = CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE; while (len) { /* If the offset isn't a multiple of the NOR page size, we first need * to write the remaining part that fits, otherwise the write could * be wrapped around within the same page */ i = MIN(SPI_NOR_PAGE_SIZE - (offset % SPI_NOR_PAGE_SIZE), len); data->packet.dir = MSPI_TX; data->packet.cmd = data->dev_cfg.write_cmd; data->packet.address = offset; data->packet.data_buf = src; data->packet.num_bytes = i; LOG_DBG("Write %d bytes to 0x%08zx", i, (ssize_t)offset); ret = mspi_transceive(data->bus, &cfg->dev_id, (const struct mspi_xfer *)&data->xfer); if (ret) { LOG_ERR("%u, MSPI write transaction failed with code: %d", __LINE__, ret); return -EIO; } /* emulate flash write busy wait */ k_busy_wait(100); src += i; offset += i; len -= i; } release(flash); return ret; } /** * API implementation of flash read. * * @param flash Pointer to MSPI flash device. * @param offset Flash device address. * @param rdata Pointer to the read data buffer. * @param len Number of bytes to read. * * @retval 0 if successful. * @retval -Error flash read fail. */ static int flash_mspi_emul_read(const struct device *flash, off_t offset, void *rdata, size_t len) { const struct flash_mspi_emul_device_config *cfg = flash->config; struct flash_mspi_emul_device_data *data = flash->data; int ret; acquire(flash); data->packet.dir = MSPI_RX; data->packet.cmd = data->dev_cfg.read_cmd; data->packet.address = offset; data->packet.data_buf = rdata; data->packet.num_bytes = len; data->xfer.async = false; data->xfer.xfer_mode = MSPI_DMA; data->xfer.rx_dummy = data->dev_cfg.rx_dummy; data->xfer.cmd_length = data->dev_cfg.cmd_length; data->xfer.addr_length = data->dev_cfg.addr_length; data->xfer.hold_ce = false; data->xfer.priority = 1; data->xfer.packets = &data->packet; data->xfer.num_packet = 1; data->xfer.timeout = CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE; LOG_DBG("Read %d bytes from 0x%08zx", len, (ssize_t)offset); ret = mspi_transceive(data->bus, &cfg->dev_id, (const struct mspi_xfer *)&data->xfer); if (ret) { LOG_ERR("%u, MSPI read transaction failed with code: %d", __LINE__, ret); return -EIO; } release(flash); return ret; } /** * API implementation of flash get_parameters. * * @param flash Pointer to MSPI flash device. * * @retval @ref flash_parameters. */ static const struct flash_parameters *flash_mspi_emul_get_parameters(const struct device *flash) { const struct flash_mspi_emul_device_config *cfg = flash->config; return &cfg->flash_param; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) /** * API implementation of flash pages_layout. * * @param flash Pointer to MSPI flash device. * @param layout @ref flash_pages_layout. * @param layout_size */ static void flash_mspi_emul_pages_layout(const struct device *flash, const struct flash_pages_layout **layout, size_t *layout_size) { const struct flash_mspi_emul_device_config *cfg = flash->config; *layout = &cfg->page_layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static const struct flash_driver_api flash_mspi_emul_device_api = { .erase = flash_mspi_emul_erase, .write = flash_mspi_emul_write, .read = flash_mspi_emul_read, .get_parameters = flash_mspi_emul_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_mspi_emul_pages_layout, #endif }; static const struct emul_mspi_device_api emul_mspi_dev_api = { .transceive = emul_mspi_device_transceive, }; /** * Set up a new MSPI device emulator * * @param emul The MSPI device emulator instance itself * @param bus The MSPI bus emulator instance * @return 0 If successful */ static int emul_mspi_device_init(const struct emul *emul_flash, const struct device *bus) { const struct flash_mspi_emul_device_config *cfg = emul_flash->dev->config; struct flash_mspi_emul_device_data *data = emul_flash->dev->data; data->bus = bus; if (mspi_dev_config(data->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_ALL, &cfg->tar_dev_cfg)) { LOG_ERR("%u, Failed to config mspi controller", __LINE__); return -EIO; } data->dev_cfg = cfg->tar_dev_cfg; #if CONFIG_MSPI_XIP if (cfg->tar_xip_cfg.enable) { if (mspi_xip_config(data->bus, &cfg->dev_id, &cfg->tar_xip_cfg)) { LOG_ERR("%u, Failed to enable XIP.", __LINE__); return -EIO; } data->xip_cfg = cfg->tar_xip_cfg; } #endif #if CONFIG_MSPI_SCRAMBLE if (cfg->tar_scramble_cfg.enable) { if (mspi_scramble_config(data->bus, &cfg->dev_id, &cfg->tar_scramble_cfg)) { LOG_ERR("%u, Failed to enable scrambling.", __LINE__); return -EIO; } data->scramble_cfg = cfg->tar_scramble_cfg; } #endif #if CONFIG_MSPI_TIMING if (mspi_timing_config(data->bus, &cfg->dev_id, MSPI_TIMING_PARAM_DUMMY, &data->timing_cfg)) { LOG_ERR("%u, Failed to configure timing.", __LINE__); return -EIO; } #endif release(emul_flash->dev); return 0; } static int flash_mspi_emul_device_init_stub(const struct device *dev) { ARG_UNUSED(dev); return 0; } #define FLASH_MSPI_EMUL_DEVICE(n) \ static uint8_t flash_mspi_emul_device_mem##n[DT_INST_PROP(n, size) / 8]; \ static const struct flash_mspi_emul_device_config flash_mspi_emul_device_config_##n = { \ .size = DT_INST_PROP(n, size) / 8, \ .flash_param = \ { \ .write_block_size = 1, \ .erase_value = 0xff, \ }, \ .page_layout = \ { \ .pages_count = DT_INST_PROP(n, size) / 8 / SPI_NOR_PAGE_SIZE,\ .pages_size = SPI_NOR_PAGE_SIZE, \ }, \ .dev_id = MSPI_DEVICE_ID_DT_INST(n), \ .tar_dev_cfg = MSPI_DEVICE_CONFIG_DT_INST(n), \ .tar_xip_cfg = MSPI_XIP_CONFIG_DT_INST(n), \ .tar_scramble_cfg = MSPI_SCRAMBLE_CONFIG_DT_INST(n), \ .sw_multi_periph = DT_PROP(DT_INST_BUS(n), software_multiperipheral) \ }; \ static struct flash_mspi_emul_device_data flash_mspi_emul_device_data_##n = { \ .lock = Z_SEM_INITIALIZER(flash_mspi_emul_device_data_##n.lock, 0, 1), \ .mem = (uint8_t *)flash_mspi_emul_device_mem##n, \ }; \ DEVICE_DT_INST_DEFINE(n, \ flash_mspi_emul_device_init_stub, \ NULL, \ &flash_mspi_emul_device_data_##n, \ &flash_mspi_emul_device_config_##n, \ POST_KERNEL, \ CONFIG_FLASH_INIT_PRIORITY, \ &flash_mspi_emul_device_api); #define EMUL_TEST(n) \ EMUL_DT_INST_DEFINE(n, \ emul_mspi_device_init, \ NULL, \ NULL, \ &emul_mspi_dev_api, \ NULL); DT_INST_FOREACH_STATUS_OKAY(EMUL_TEST); DT_INST_FOREACH_STATUS_OKAY(FLASH_MSPI_EMUL_DEVICE); ```
/content/code_sandbox/drivers/flash/flash_mspi_emul_device.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,543
```c /* * */ #define DT_DRV_COMPAT andestech_qspi_nor #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/flash.h> #include <zephyr/init.h> #include <string.h> #include <zephyr/logging/log.h> #include "flash_andes_qspi.h" #include "spi_nor.h" #include "jesd216.h" #include "flash_priv.h" LOG_MODULE_REGISTER(flash_andes, CONFIG_FLASH_LOG_LEVEL); /* Indicates that an access command includes bytes for the address. * If not provided the opcode is not followed by address bytes. */ #define ANDES_ACCESS_ADDRESSED BIT(0) /* Indicates that an access command is performing a write. If not * provided access is a read. */ #define ANDES_ACCESS_WRITE BIT(7) #define flash_andes_qspi_cmd_read(dev, opcode, dest, length) \ flash_andes_qspi_access(dev, opcode, 0, 0, dest, length) #define flash_andes_qspi_cmd_addr_read(dev, opcode, addr, dest, length) \ flash_andes_qspi_access(dev, opcode, ANDES_ACCESS_ADDRESSED, addr, \ dest, length) #define flash_andes_qspi_cmd_write(dev, opcode) \ flash_andes_qspi_access(dev, opcode, ANDES_ACCESS_WRITE, 0, NULL, 0) #define flash_andes_qspi_cmd_addr_write(dev, opcode, addr, src, length) \ flash_andes_qspi_access(dev, opcode, \ ANDES_ACCESS_WRITE | ANDES_ACCESS_ADDRESSED, \ addr, (void *)src, length) typedef void (*flash_andes_qspi_config_func_t)(void); struct flash_andes_qspi_config { flash_andes_qspi_config_func_t cfg_func; uint32_t base; uint32_t irq_num; struct flash_parameters parameters; bool xip; #if defined(CONFIG_FLASH_ANDES_QSPI_SFDP_DEVICETREE) uint8_t jedec_id[SPI_NOR_MAX_ID_LEN]; uint32_t flash_size; uint8_t bfp_len; const struct jesd216_bfp *bfp; #ifdef CONFIG_FLASH_PAGE_LAYOUT struct flash_pages_layout layout; #endif /* CONFIG_FLASH_PAGE_LAYOUT */ #endif /* CONFIG_FLASH_ANDES_QSPI_SFDP_DEVICETREE */ }; struct flash_andes_qspi_data { struct k_sem sem; struct k_sem device_sync_sem; uint32_t tx_fifo_size; uint32_t rx_fifo_size; uint8_t *tx_buf; uint8_t *rx_buf; uint32_t tx_len; uint32_t rx_len; uint32_t tx_ptr; /* write pointer */ uint32_t rx_ptr; /* read pointer */ struct jesd216_erase_type erase_types[JESD216_NUM_ERASE_TYPES]; uint16_t page_size; #ifdef CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME uint32_t flash_size; #ifdef CONFIG_FLASH_PAGE_LAYOUT struct flash_pages_layout layout; #endif /* CONFIG_FLASH_PAGE_LAYOUT */ #endif }; static int flash_andes_qspi_write_protection_set(const struct device *dev, bool write_protect); /* Get pointer to array of supported erase types. */ static inline const struct jesd216_erase_type * dev_erase_types(const struct device *dev) { const struct flash_andes_qspi_data *dev_data = dev->data; return dev_data->erase_types; } /* Get the size of the flash device. */ static inline uint32_t dev_flash_size(const struct device *dev) { #ifdef CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME const struct flash_andes_qspi_data *dev_data = dev->data; return dev_data->flash_size; #else /* CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME */ const struct flash_andes_qspi_config *config = dev->config; return config->flash_size; #endif /* CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME */ } /* Get the flash device page size. */ static inline uint16_t dev_page_size(const struct device *dev) { const struct flash_andes_qspi_data *dev_data = dev->data; return dev_data->page_size; } /* * @brief Send an SPI command * * @param dev Device struct * @param opcode The command to send * @param access flags that determine how the command is constructed. * @param addr The address to send * @param data The buffer to store or read the value * @param length The size of the buffer * @return 0 on success */ static int flash_andes_qspi_access(const struct device *const dev, uint8_t opcode, uint8_t access, off_t addr, void *data, size_t length) { struct flash_andes_qspi_data *dev_data = dev->data; const struct flash_andes_qspi_config *config = dev->config; uint32_t base = config->base; bool is_addressed = (access & ANDES_ACCESS_ADDRESSED) != 0U; bool is_write = (access & ANDES_ACCESS_WRITE) != 0U; int ret = 0; uint32_t tctrl, int_msk; /* Command phase enable */ tctrl = TCTRL_CMD_EN_MSK; if (is_addressed) { /* Enable and set ADDR len */ sys_write32((sys_read32(QSPI_TFMAT(base)) | (0x2 << TFMAT_ADDR_LEN_OFFSET)), QSPI_TFMAT(base)); sys_write32(addr, QSPI_ADDR(base)); /* Address phase enable */ tctrl |= TCTRL_ADDR_EN_MSK; } if (length == 0) { if ((opcode == FLASH_ANDES_CMD_4PP) || (opcode == FLASH_ANDES_CMD_4READ)) { goto exit; } tctrl |= TRNS_MODE_NONE_DATA; int_msk = IEN_END_MSK; } else if (is_write) { dev_data->tx_ptr = 0; dev_data->tx_buf = (uint8_t *)data; dev_data->tx_len = length; tctrl |= (TRNS_MODE_WRITE_ONLY | ((length - 1) << TCTRL_WR_TCNT_OFFSET)); int_msk = IEN_TX_FIFO_MSK | IEN_END_MSK; } else { dev_data->rx_ptr = 0; dev_data->rx_buf = (uint8_t *)data; tctrl |= (TRNS_MODE_READ_ONLY | ((length - 1) << TCTRL_RD_TCNT_OFFSET)); int_msk = IEN_RX_FIFO_MSK | IEN_END_MSK; } switch (opcode) { case FLASH_ANDES_CMD_4PP: tctrl = ((tctrl & ~TCTRL_TRNS_MODE_MSK) | DUAL_IO_MODE | TCTRL_ADDR_FMT_MSK | TCTRL_ADDR_EN_MSK | TRNS_MODE_WRITE_ONLY); break; case FLASH_ANDES_CMD_4READ: tctrl = ((tctrl & ~TCTRL_TRNS_MODE_MSK) | DUAL_IO_MODE | TCTRL_ADDR_FMT_MSK | TCTRL_ADDR_EN_MSK | TRNS_MODE_DUMMY_READ | DUMMY_CNT_3); break; case JESD216_CMD_READ_SFDP: tctrl = ((tctrl & ~TCTRL_TRNS_MODE_MSK) | TCTRL_ADDR_EN_MSK | TRNS_MODE_DUMMY_READ); break; default: break; } sys_write32(tctrl, QSPI_TCTRL(base)); /* Enable TX/RX FIFO interrupts */ sys_write32(int_msk, QSPI_INTEN(base)); /* write CMD register to send command*/ sys_write32(opcode, QSPI_CMD(base)); k_sem_take(&dev_data->device_sync_sem, K_FOREVER); exit: return ret; } /* Everything necessary to acquire owning access to the device. */ static void acquire_device(const struct device *dev) { struct flash_andes_qspi_data *dev_data = dev->data; k_sem_take(&dev_data->sem, K_FOREVER); } /* Everything necessary to release access to the device. */ static void release_device(const struct device *dev) { struct flash_andes_qspi_data *dev_data = dev->data; k_sem_give(&dev_data->sem); } /** * @brief Wait until the flash is ready * * @param dev The device structure * @return 0 on success, negative errno code otherwise */ static int flash_andes_qspi_wait_until_ready(const struct device *dev) { int ret; uint8_t reg; do { ret = flash_andes_qspi_cmd_read(dev, FLASH_ANDES_CMD_RDSR, &reg, 1); } while (!ret && (reg & FLASH_ANDES_WIP_BIT)); return ret; } #if defined(CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME) || \ defined(CONFIG_FLASH_JESD216_API) /* * @brief Read content from the SFDP hierarchy * * @note The device must be externally acquired before invoking this * function. * * @param dev Device struct * @param addr The address to send * @param data The buffer to store or read the value * @param length The size of the buffer * @return 0 on success, negative errno code otherwise */ static int read_sfdp(const struct device *const dev, off_t addr, void *data, size_t length) { /* READ_SFDP requires a 24-bit address followed by a single * byte for a wait state. This is effected by using 32-bit * address by shifting the 24-bit address up 8 bits. */ return flash_andes_qspi_access(dev, JESD216_CMD_READ_SFDP, ANDES_ACCESS_ADDRESSED, addr, data, length); } #endif /* CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME */ /** * @brief Write the status register. * * @note The device must be externally acquired before invoking this * function. * * @param dev Device struct * @param sr The new value of the status register * * @return 0 on success or a negative error code. */ static int flash_andes_qspi_wrsr(const struct device *dev, uint8_t sr) { int ret = flash_andes_qspi_cmd_write(dev, FLASH_ANDES_CMD_WREN); if (ret == 0) { ret = flash_andes_qspi_access(dev, FLASH_ANDES_CMD_WRSR, ANDES_ACCESS_WRITE, 0, &sr, sizeof(sr)); flash_andes_qspi_wait_until_ready(dev); } return ret; } static int flash_andes_qspi_read(const struct device *dev, off_t addr, void *dest, size_t size) { const size_t flash_size = dev_flash_size(dev); int ret; /* should be between 0 and flash size */ if ((addr < 0 || addr >= flash_size || ((flash_size - addr) < size))) { return -EINVAL; } if (size == 0) { return 0; } acquire_device(dev); ret = flash_andes_qspi_cmd_addr_read(dev, FLASH_ANDES_CMD_4READ, addr, dest, size); release_device(dev); return ret; } static int flash_andes_qspi_write(const struct device *dev, off_t addr, const void *src, size_t size) { const size_t flash_size = dev_flash_size(dev); const uint16_t page_size = dev_page_size(dev); size_t to_write = size; int ret = 0; /* should be between 0 and flash size */ if ((addr < 0 || addr >= flash_size || ((flash_size - addr) < size))) { return -EINVAL; } if (size == 0) { return 0; } acquire_device(dev); ret = flash_andes_qspi_write_protection_set(dev, false); if (ret != 0) { goto out; } do { /* Get the adequate size to send*/ to_write = MIN(page_size - (addr % page_size), size); ret = flash_andes_qspi_cmd_addr_write(dev, FLASH_ANDES_CMD_4PP, addr, src, to_write); if (ret != 0) { break; } size -= to_write; src = (const uint8_t *)src + to_write; addr += to_write; flash_andes_qspi_wait_until_ready(dev); } while (size > 0); int ret2 = flash_andes_qspi_write_protection_set(dev, true); if (!ret) { ret = ret2; } out: release_device(dev); return ret; } static int flash_andes_qspi_erase(const struct device *dev, off_t addr, size_t size) { const size_t flash_size = dev_flash_size(dev); int ret = 0; /* erase area must be subregion of device */ if ((addr < 0 || addr >= flash_size || ((flash_size - addr) < size))) { return -EINVAL; } if (size == 0) { return 0; } /* address must be sector-aligned */ if (!SPI_NOR_IS_SECTOR_ALIGNED(addr)) { return -EINVAL; } /* size must be a multiple of sectors */ if ((size % SPI_NOR_SECTOR_SIZE) != 0) { return -EINVAL; } acquire_device(dev); ret = flash_andes_qspi_write_protection_set(dev, false); if (ret != 0) { goto out; } if (size == flash_size) { /* chip erase */ flash_andes_qspi_cmd_write(dev, FLASH_ANDES_CMD_CE); size -= flash_size; flash_andes_qspi_wait_until_ready(dev); } while (size > 0) { const struct jesd216_erase_type *erase_types = dev_erase_types(dev); const struct jesd216_erase_type *bet = NULL; for (uint8_t ei = 0; ei < JESD216_NUM_ERASE_TYPES; ++ei) { const struct jesd216_erase_type *etp = &erase_types[ei]; if ((etp->exp != 0) && SPI_NOR_IS_ALIGNED(addr, etp->exp) && SPI_NOR_IS_ALIGNED(size, etp->exp) && ((bet == NULL) || (etp->exp > bet->exp))) { bet = etp; } } if (bet != NULL) { flash_andes_qspi_cmd_addr_write(dev, bet->cmd, addr, NULL, 0); addr += BIT(bet->exp); size -= BIT(bet->exp); } else { LOG_DBG("Can't erase %zu at 0x%lx", size, (long)addr); ret = -EINVAL; break; } flash_andes_qspi_wait_until_ready(dev); } int ret2 = flash_andes_qspi_write_protection_set(dev, true); if (!ret) { ret = ret2; } out: release_device(dev); return ret; } static int flash_andes_qspi_write_protection_set(const struct device *dev, bool write_protect) { return flash_andes_qspi_cmd_write(dev, (write_protect) ? FLASH_ANDES_CMD_WRDI : FLASH_ANDES_CMD_WREN); } #if defined(CONFIG_FLASH_JESD216_API) static int flash_andes_qspi_sfdp_read(const struct device *dev, off_t addr, void *dest, size_t size) { acquire_device(dev); int ret = read_sfdp(dev, addr, dest, size); release_device(dev); return ret; } #endif /* CONFIG_FLASH_JESD216_API */ static int flash_andes_qspi_read_jedec_id(const struct device *dev, uint8_t *id) { if (id == NULL) { return -EINVAL; } acquire_device(dev); int ret = flash_andes_qspi_cmd_read(dev, FLASH_ANDES_CMD_RDID, id, 3); release_device(dev); return ret; } static int spi_nor_process_bfp(const struct device *dev, const struct jesd216_param_header *php, const struct jesd216_bfp *bfp) { struct flash_andes_qspi_data *dev_data = dev->data; struct jesd216_erase_type *etp = dev_data->erase_types; const size_t flash_size = jesd216_bfp_density(bfp) / 8U; LOG_DBG("%s: %u MiBy flash", dev->name, (uint32_t)(flash_size >> 20)); /* Copy over the erase types, preserving their order. (The * Sector Map Parameter table references them by index.) */ memset(dev_data->erase_types, 0, sizeof(dev_data->erase_types)); for (uint8_t ti = 1; ti <= ARRAY_SIZE(dev_data->erase_types); ++ti) { if (jesd216_bfp_erase(bfp, ti, etp) == 0) { LOG_DBG("Erase %u with %02x", (uint32_t)BIT(etp->exp), etp->cmd); } ++etp; } dev_data->page_size = jesd216_bfp_page_size(php, bfp); #ifdef CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME dev_data->flash_size = flash_size; #else /* CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME */ if (flash_size != dev_flash_size(dev)) { LOG_ERR("BFP flash size mismatch with devicetree"); return -EINVAL; } #endif /* CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME */ return 0; } static int spi_nor_process_sfdp(const struct device *dev) { int ret; #if defined(CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME) const uint8_t decl_nph = 2; union { /* We only process BFP so use one parameter block */ uint8_t raw[JESD216_SFDP_SIZE(decl_nph)]; struct jesd216_sfdp_header sfdp; } u_header; const struct jesd216_sfdp_header *hp = &u_header.sfdp; ret = read_sfdp(dev, 0, u_header.raw, sizeof(u_header.raw)); if (ret != 0) { LOG_ERR("SFDP read failed: %d", ret); return ret; } uint32_t magic = jesd216_sfdp_magic(hp); if (magic != JESD216_SFDP_MAGIC) { LOG_ERR("SFDP magic %08x invalid", magic); return -EINVAL; } LOG_DBG("%s: SFDP v %u.%u AP %x with %u PH", dev->name, hp->rev_major, hp->rev_minor, hp->access, 1 + hp->nph); const struct jesd216_param_header *php = hp->phdr; const struct jesd216_param_header *phpe = php + MIN(decl_nph, 1 + hp->nph); while (php != phpe) { uint16_t id = jesd216_param_id(php); LOG_DBG("PH%zu: %04x rev %u.%u: %u DW @ %x", (php - hp->phdr), id, php->rev_major, php->rev_minor, php->len_dw, jesd216_param_addr(php)); if (id == JESD216_SFDP_PARAM_ID_BFP) { union { uint32_t dw[MIN(php->len_dw, 20)]; struct jesd216_bfp bfp; } u_param; const struct jesd216_bfp *bfp = &u_param.bfp; ret = read_sfdp(dev, jesd216_param_addr(php), u_param.dw, sizeof(u_param.dw)); if (ret != 0) { break; } ret = spi_nor_process_bfp(dev, php, bfp); if (ret != 0) { break; } } ++php; } #elif defined(CONFIG_FLASH_ANDES_QSPI_SFDP_DEVICETREE) /* For devicetree we need to synthesize a parameter header and * process the stored BFP data as if we had read it. */ const struct flash_andes_qspi_config *config = dev->config; struct jesd216_param_header bfp_hdr = { .len_dw = config->bfp_len, }; ret = spi_nor_process_bfp(dev, &bfp_hdr, config->bfp); #else #error Unhandled SFDP choice #endif return ret; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static int setup_pages_layout(const struct device *dev) { int ret = 0; #if defined(CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME) struct flash_andes_qspi_data *dev_data = dev->data; const size_t flash_size = dev_flash_size(dev); const uint32_t layout_page_size = CONFIG_FLASH_ANDES_QSPI_LAYOUT_PAGE_SIZE; uint8_t exponent = 0; /* Find the smallest erase size. */ for (size_t i = 0; i < ARRAY_SIZE(dev_data->erase_types); ++i) { const struct jesd216_erase_type *etp = &dev_data->erase_types[i]; if ((etp->cmd != 0) && ((exponent == 0) || (etp->exp < exponent))) { exponent = etp->exp; } } if (exponent == 0) { return -ENOTSUP; } uint32_t erase_size = BIT(exponent); /* Error if layout page size is not a multiple of smallest * erase size. */ if ((layout_page_size % erase_size) != 0) { LOG_ERR("layout page %u not compatible with erase size %u", layout_page_size, erase_size); return -EINVAL; } /* Warn but accept layout page sizes that leave inaccessible * space. */ if ((flash_size % layout_page_size) != 0) { LOG_WRN("layout page %u wastes space with device size %zu", layout_page_size, flash_size); } dev_data->layout.pages_size = layout_page_size; dev_data->layout.pages_count = flash_size / layout_page_size; LOG_DBG("layout %zu x %zu By pages", dev_data->layout.pages_count, dev_data->layout.pages_size); #elif defined(CONFIG_FLASH_ANDES_QSPI_SFDP_DEVICETREE) const struct flash_andes_qspi_config *config = dev->config; const struct flash_pages_layout *layout = &config->layout; const size_t flash_size = dev_flash_size(dev); size_t layout_size = layout->pages_size * layout->pages_count; if (!SPI_NOR_IS_SECTOR_ALIGNED(layout->pages_size)) { LOG_ERR("ANDES_QSPI_FLASH_LAYOUT_PAGE_SIZE must be " "multiple of 4096"); return -EINVAL; } if (flash_size != layout_size) { LOG_ERR("device size %zu mismatch %zu * %zu By pages", flash_size, layout->pages_count, layout->pages_size); return -EINVAL; } #else /* CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME */ #error Unhandled SFDP choice #endif /* CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME */ return ret; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static int qspi_andes_configure(const struct device *dev) { const struct flash_andes_qspi_config *config = dev->config; uint32_t base = config->base; /* Setting the divisor value to 0xff indicates the SCLK * frequency should be the same as the spi_clock frequency. */ sys_set_bits(QSPI_TIMIN(base), TIMIN_SCLK_DIV_MSK); /* Set Master mode */ sys_clear_bits(QSPI_TFMAT(base), TFMAT_SLVMODE_MSK); /* Disable data merge mode */ sys_clear_bits(QSPI_TFMAT(base), TFMAT_DATA_MERGE_MSK); /* Set data length */ sys_clear_bits(QSPI_TFMAT(base), TFMAT_DATA_LEN_MSK); sys_set_bits(QSPI_TFMAT(base), (7 << TFMAT_DATA_LEN_OFFSET)); /* Set TX/RX FIFO threshold */ sys_clear_bits(QSPI_CTRL(base), CTRL_TX_THRES_MSK); sys_clear_bits(QSPI_CTRL(base), CTRL_RX_THRES_MSK); sys_set_bits(QSPI_CTRL(base), TX_FIFO_THRESHOLD); sys_set_bits(QSPI_CTRL(base), RX_FIFO_THRESHOLD); return 0; } static void qspi_andes_irq_handler(const struct device *dev) { struct flash_andes_qspi_data *data = dev->data; const struct flash_andes_qspi_config *config = dev->config; uint32_t base = config->base; uint32_t i, intr_status, spi_status; uint32_t rx_data, cur_tx_fifo_num, cur_rx_fifo_num; uint32_t tx_num = 0, tx_data = 0; intr_status = sys_read32(QSPI_INTST(base)); if ((intr_status & INTST_TX_FIFO_INT_MSK) && !(intr_status & INTST_END_INT_MSK)) { spi_status = sys_read32(QSPI_STAT(base)); cur_tx_fifo_num = GET_TX_NUM(base); tx_num = data->tx_fifo_size - cur_tx_fifo_num; if (tx_num > data->tx_len) { tx_num = data->tx_len; } for (i = tx_num; i > 0; i--) { tx_data = data->tx_buf[data->tx_ptr]; sys_write32(tx_data, QSPI_DATA(base)); data->tx_ptr++; if (data->tx_ptr == data->tx_len) { sys_clear_bits(QSPI_INTEN(base), IEN_TX_FIFO_MSK); break; } } sys_write32(INTST_TX_FIFO_INT_MSK, QSPI_INTST(base)); } if (intr_status & INTST_RX_FIFO_INT_MSK) { cur_rx_fifo_num = GET_RX_NUM(base); for (i = cur_rx_fifo_num; i > 0; i--) { rx_data = sys_read32(QSPI_DATA(base)); data->rx_buf[data->rx_ptr] = rx_data; data->rx_ptr++; if (data->rx_ptr == data->rx_len) { sys_clear_bits(QSPI_INTEN(base), IEN_RX_FIFO_MSK); break; } } sys_write32(INTST_RX_FIFO_INT_MSK, QSPI_INTST(base)); } if (intr_status & INTST_END_INT_MSK) { /* Clear end interrupt */ sys_write32(INTST_END_INT_MSK, QSPI_INTST(base)); /* Disable all SPI interrupts */ sys_write32(0, QSPI_INTEN(base)); k_sem_give(&data->device_sync_sem); } } /** * @brief Initialize and configure the flash * * @param name The flash name * @return 0 on success, negative errno code otherwise */ static int flash_andes_qspi_init(const struct device *dev) { const struct flash_andes_qspi_config *config = dev->config; struct flash_andes_qspi_data *dev_data = dev->data; uint32_t base = config->base; uint8_t ret, reg = (0x1UL << 6); uint8_t jedec_id[SPI_NOR_MAX_ID_LEN]; /* we should not configure the device we are running on */ if (config->xip) { return -EINVAL; } k_sem_init(&dev_data->sem, 1, 1); k_sem_init(&dev_data->device_sync_sem, 0, 1); /* Get the TX/RX FIFO size of this device */ dev_data->tx_fifo_size = TX_FIFO_SIZE(base); dev_data->rx_fifo_size = RX_FIFO_SIZE(base); config->cfg_func(); irq_enable(config->irq_num); qspi_andes_configure(dev); ret = flash_andes_qspi_read_jedec_id(dev, jedec_id); if (ret != 0) { LOG_ERR("JEDEC ID read failed: %d", ret); return -ENODEV; } #ifndef CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME if (memcmp(jedec_id, config->jedec_id, sizeof(jedec_id)) != 0) { LOG_ERR("Device id %02x %02x %02x does not match config" "%02x %02x %02x", jedec_id[0], jedec_id[1], jedec_id[2], config->jedec_id[0], config->jedec_id[1], config->jedec_id[2]); return -EINVAL; } #endif ret = spi_nor_process_sfdp(dev); if (ret != 0) { LOG_ERR("SFDP read failed: %d", ret); return -ENODEV; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) ret = setup_pages_layout(dev); if (ret != 0) { LOG_ERR("layout setup failed: %d", ret); return -ENODEV; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ /* Set status register QE bit. */ flash_andes_qspi_wrsr(dev, reg); return 0; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static void flash_andes_qspi_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { #ifdef CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME const struct flash_andes_qspi_data *dev_data = dev->data; *layout = &dev_data->layout; #else /* CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME */ const struct flash_andes_qspi_config *config = dev->config; *layout = &config->layout; #endif /* CONFIG_FLASH_ANDES_QSPI_SFDP_RUNTIME */ *layout_size = 1; } #endif static const struct flash_parameters * flash_andes_qspi_get_parameters(const struct device *dev) { const struct flash_andes_qspi_config *config = dev->config; return &config->parameters; } static const struct flash_driver_api flash_andes_qspi_api = { .read = flash_andes_qspi_read, .write = flash_andes_qspi_write, .erase = flash_andes_qspi_erase, .get_parameters = flash_andes_qspi_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_andes_qspi_pages_layout, #endif #if defined(CONFIG_FLASH_JESD216_API) .sfdp_read = flash_andes_qspi_sfdp_read, .read_jedec_id = flash_andes_qspi_read_jedec_id, #endif }; #if (CONFIG_XIP) #define QSPI_ROM_CFG_XIP(node_id) DT_SAME_NODE(node_id, DT_CHOSEN(zephyr_flash)) #else #define QSPI_ROM_CFG_XIP(node_id) false #endif #define LAYOUT_PAGES_PROP(n) \ IF_ENABLED(CONFIG_FLASH_PAGE_LAYOUT, \ (.layout = { \ .pages_count = ((DT_INST_PROP(n, size) / 8) / \ CONFIG_FLASH_ANDES_QSPI_LAYOUT_PAGE_SIZE), \ .pages_size = \ CONFIG_FLASH_ANDES_QSPI_LAYOUT_PAGE_SIZE, \ }, \ )) #define ANDES_QSPI_SFDP_DEVICETREE_CONFIG(n) \ IF_ENABLED(CONFIG_FLASH_ANDES_QSPI_SFDP_DEVICETREE, \ ( \ static const __aligned(4) uint8_t bfp_data_##n[] = \ DT_INST_PROP(n, sfdp_bfp); \ )) #define ANDES_QSPI_SFDP_DEVICETREE_PROP(n) \ IF_ENABLED(CONFIG_FLASH_ANDES_QSPI_SFDP_DEVICETREE, \ (.jedec_id = DT_INST_PROP(n, jedec_id), \ .flash_size = DT_INST_PROP(n, size) / 8, \ .bfp_len = sizeof(bfp_data_##n) / 4, \ .bfp = (const struct jesd216_bfp *)bfp_data_##n, \ LAYOUT_PAGES_PROP(n) \ )) #define FLASH_ANDES_QSPI_INIT(n) \ static struct flash_andes_qspi_data flash_andes_qspi_data_##n; \ ANDES_QSPI_SFDP_DEVICETREE_CONFIG(n) \ \ static void flash_andes_qspi_configure_##n(void); \ static const struct flash_andes_qspi_config \ flash_andes_qspi_config_##n = { \ .cfg_func = flash_andes_qspi_configure_##n, \ .base = DT_REG_ADDR(DT_INST_BUS(n)), \ .irq_num = DT_IRQN(DT_INST_BUS(n)), \ .parameters = { \ .write_block_size = 1, \ .erase_value = 0xff \ }, \ .xip = QSPI_ROM_CFG_XIP(DT_DRV_INST(n)), \ ANDES_QSPI_SFDP_DEVICETREE_PROP(n) \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ &flash_andes_qspi_init, \ NULL, \ &flash_andes_qspi_data_##n, \ &flash_andes_qspi_config_##n, \ POST_KERNEL, \ CONFIG_FLASH_ANDES_QSPI_INIT_PRIORITY, \ &flash_andes_qspi_api); \ \ static void flash_andes_qspi_configure_##n(void) \ { \ IRQ_CONNECT(DT_IRQN(DT_INST_BUS(n)), \ DT_IRQ(DT_INST_BUS(n), priority), \ qspi_andes_irq_handler, \ DEVICE_DT_INST_GET(n), \ 0); \ } \ DT_INST_FOREACH_STATUS_OKAY(FLASH_ANDES_QSPI_INIT) ```
/content/code_sandbox/drivers/flash/flash_andes_qspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,430
```c /* * */ #include "flash_gd32.h" #include <zephyr/logging/log.h> #include <zephyr/kernel.h> #include <gd32_fmc.h> LOG_MODULE_DECLARE(flash_gd32); #define GD32_NV_FLASH_V2_NODE DT_INST(0, gd_gd32_nv_flash_v2) #define GD32_NV_FLASH_V2_TIMEOUT DT_PROP(GD32_NV_FLASH_V2_NODE, max_erase_time_ms) #if !defined(CONFIG_SOC_GD32A503) /** * @brief GD32 FMC v2 flash memory has 2 banks. * Bank0 holds the first 512KB, bank1 is used give capacity for reset. * The page size is the same within the same bank, but not equal for all banks. */ #if (PRE_KB(512) >= SOC_NV_FLASH_SIZE) #define GD32_NV_FLASH_V2_BANK0_SIZE SOC_NV_FLASH_SIZE #define GD32_NV_FLASH_V2_BANK0_PAGE_SIZE DT_PROP(GD32_NV_FLASH_V2_NODE, bank0_page_size) #else #define GD32_NV_FLASH_V2_BANK0_SIZE KB(512) #define GD32_NV_FLASH_V2_BANK0_PAGE_SIZE DT_PROP(GD32_NV_FLASH_V2_NODE, bank0_page_size) #define GD32_NV_FLASH_V2_BANK1_SIZE (SOC_NV_FLASH_SIZE - KB(512)) #define GD32_NV_FLASH_V2_BANK1_PAGE_SIZE DT_PROP(GD32_NV_FLASH_V2_NODE, bank1_page_size) #endif #elif defined(CONFIG_SOC_GD32A503) /** * @brief GD32A503 series flash memory has 2 banks. * Bank0 holds the first 256KB, bank1 is used give capacity for reset. * The page size is 1KB for all banks. */ #if (PRE_KB(256) >= SOC_NV_FLASH_SIZE) #define GD32_NV_FLASH_V2_BANK0_SIZE SOC_NV_FLASH_SIZE #define GD32_NV_FLASH_V2_BANK0_PAGE_SIZE DT_PROP(GD32_NV_FLASH_V2_NODE, bank0_page_size) #else #define GD32_NV_FLASH_V2_BANK0_SIZE KB(256) #define GD32_NV_FLASH_V2_BANK0_PAGE_SIZE DT_PROP(GD32_NV_FLASH_V2_NODE, bank0_page_size) #define GD32_NV_FLASH_V2_BANK1_SIZE (SOC_NV_FLASH_SIZE - KB(256)) #define GD32_NV_FLASH_V2_BANK1_PAGE_SIZE DT_PROP(GD32_NV_FLASH_V2_NODE, bank1_page_size) #endif #endif #define GD32_FMC_V2_BANK0_WRITE_ERR (FMC_STAT0_PGERR | FMC_STAT0_WPERR) #define GD32_FMC_V2_BANK0_ERASE_ERR FMC_STAT0_WPERR #define GD32_FMC_V2_BANK1_WRITE_ERR (FMC_STAT1_PGERR | FMC_STAT1_WPERR) #define GD32_FMC_V2_BANK1_ERASE_ERR FMC_STAT1_WPERR #ifdef CONFIG_FLASH_PAGE_LAYOUT static struct flash_pages_layout gd32_fmc_v2_layout[] = { { .pages_size = GD32_NV_FLASH_V2_BANK0_PAGE_SIZE, .pages_count = GD32_NV_FLASH_V2_BANK0_SIZE / GD32_NV_FLASH_V2_BANK0_PAGE_SIZE }, #ifdef GD32_NV_FLASH_V2_BANK1_SIZE { .pages_size = GD32_NV_FLASH_V2_BANK1_PAGE_SIZE, .pages_count = GD32_NV_FLASH_V2_BANK1_SIZE / GD32_NV_FLASH_V2_BANK1_PAGE_SIZE } #endif }; #endif static inline void gd32_fmc_v2_bank0_unlock(void) { FMC_KEY0 = UNLOCK_KEY0; FMC_KEY0 = UNLOCK_KEY1; } static inline void gd32_fmc_v2_bank0_lock(void) { FMC_CTL0 |= FMC_CTL0_LK; } static int gd32_fmc_v2_bank0_wait_idle(void) { const int64_t expired_time = k_uptime_get() + GD32_NV_FLASH_V2_TIMEOUT; while (FMC_STAT0 & FMC_STAT0_BUSY) { if (k_uptime_get() > expired_time) { return -ETIMEDOUT; } } return 0; } static int gd32_fmc_v2_bank0_write(off_t offset, const void *data, size_t len) { flash_prg_t *prg_flash = (flash_prg_t *)((uint8_t *)SOC_NV_FLASH_ADDR + offset); flash_prg_t *prg_data = (flash_prg_t *)data; int ret = 0; gd32_fmc_v2_bank0_unlock(); if (FMC_STAT0 & FMC_STAT0_BUSY) { return -EBUSY; } FMC_CTL0 |= FMC_CTL0_PG; for (size_t i = 0U; i < (len / sizeof(flash_prg_t)); i++) { *prg_flash++ = *prg_data++; } ret = gd32_fmc_v2_bank0_wait_idle(); if (ret < 0) { goto expired_out; } if (FMC_STAT0 & GD32_FMC_V2_BANK0_WRITE_ERR) { ret = -EIO; FMC_STAT0 |= GD32_FMC_V2_BANK0_WRITE_ERR; LOG_ERR("FMC bank0 programming failed"); } expired_out: FMC_CTL0 &= ~FMC_CTL0_PG; gd32_fmc_v2_bank0_lock(); return ret; } static int gd32_fmc_v2_bank0_page_erase(uint32_t page_addr) { int ret = 0; gd32_fmc_v2_bank0_unlock(); if (FMC_STAT0 & FMC_STAT0_BUSY) { return -EBUSY; } FMC_CTL0 |= FMC_CTL0_PER; FMC_ADDR0 = page_addr; FMC_CTL0 |= FMC_CTL0_START; ret = gd32_fmc_v2_bank0_wait_idle(); if (ret < 0) { goto expired_out; } if (FMC_STAT0 & GD32_FMC_V2_BANK0_ERASE_ERR) { ret = -EIO; FMC_STAT0 |= GD32_FMC_V2_BANK0_ERASE_ERR; LOG_ERR("FMC bank0 page %u erase failed", page_addr); } expired_out: FMC_CTL0 &= ~FMC_CTL0_PER; gd32_fmc_v2_bank0_lock(); return ret; } static int gd32_fmc_v2_bank0_erase_block(off_t offset, size_t size) { uint32_t page_addr = SOC_NV_FLASH_ADDR + offset; int ret = 0; while (size > 0U) { ret = gd32_fmc_v2_bank0_page_erase(page_addr); if (ret < 0) { return ret; } size -= GD32_NV_FLASH_V2_BANK0_PAGE_SIZE; page_addr += GD32_NV_FLASH_V2_BANK0_PAGE_SIZE; } return 0; } #ifdef GD32_NV_FLASH_V2_BANK1_SIZE static inline void gd32_fmc_v2_bank1_unlock(void) { FMC_KEY1 = UNLOCK_KEY0; FMC_KEY1 = UNLOCK_KEY1; } static inline void gd32_fmc_v2_bank1_lock(void) { FMC_CTL1 |= FMC_CTL1_LK; } static int gd32_fmc_v2_bank1_wait_idle(void) { const int64_t expired_time = k_uptime_get() + GD32_NV_FLASH_V2_TIMEOUT; while (FMC_STAT1 & FMC_STAT1_BUSY) { if (k_uptime_get() > expired_time) { return -ETIMEDOUT; } } return 0; } static int gd32_fmc_v2_bank1_write(off_t offset, const void *data, size_t len) { flash_prg_t *prg_flash = (flash_prg_t *)((uint8_t *)SOC_NV_FLASH_ADDR + offset); flash_prg_t *prg_data = (flash_prg_t *)data; int ret = 0; gd32_fmc_v2_bank1_unlock(); if (FMC_STAT1 & FMC_STAT1_BUSY) { return -EBUSY; } FMC_CTL1 |= FMC_CTL1_PG; for (size_t i = 0U; i < (len / sizeof(flash_prg_t)); i++) { *prg_flash++ = *prg_data++; } ret = gd32_fmc_v2_bank1_wait_idle(); if (ret < 0) { goto expired_out; } if (FMC_STAT1 & GD32_FMC_V2_BANK1_WRITE_ERR) { ret = -EIO; FMC_STAT1 |= GD32_FMC_V2_BANK1_WRITE_ERR; LOG_ERR("FMC bank1 programming failed"); } expired_out: FMC_CTL1 &= ~FMC_CTL1_PG; gd32_fmc_v2_bank1_lock(); return ret; } static int gd32_fmc_v2_bank1_page_erase(uint32_t page_addr) { int ret = 0; gd32_fmc_v2_bank1_unlock(); if (FMC_STAT1 & FMC_STAT1_BUSY) { return -EBUSY; } FMC_CTL1 |= FMC_CTL1_PER; FMC_ADDR1 = page_addr; FMC_CTL1 |= FMC_CTL1_START; ret = gd32_fmc_v2_bank1_wait_idle(); if (ret < 0) { goto expired_out; } if (FMC_STAT1 & GD32_FMC_V2_BANK1_ERASE_ERR) { ret = -EIO; FMC_STAT1 |= GD32_FMC_V2_BANK1_ERASE_ERR; LOG_ERR("FMC bank1 page %u erase failed", page_addr); } expired_out: FMC_CTL1 &= ~FMC_CTL1_PER; gd32_fmc_v2_bank1_lock(); return ret; } static int gd32_fmc_v2_bank1_erase_block(off_t offset, size_t size) { uint32_t page_addr = SOC_NV_FLASH_ADDR + offset; int ret = 0; while (size > 0U) { ret = gd32_fmc_v2_bank1_page_erase(page_addr); if (ret < 0) { return ret; } size -= GD32_NV_FLASH_V2_BANK0_SIZE; page_addr += GD32_NV_FLASH_V2_BANK0_SIZE; } return 0; } #endif /* GD32_NV_FLASH_V2_BANK1_SIZE */ bool flash_gd32_valid_range(off_t offset, uint32_t len, bool write) { if ((offset > SOC_NV_FLASH_SIZE) || ((offset + len) > SOC_NV_FLASH_SIZE)) { return false; } if (write) { /* Check offset and len is flash_prg_t aligned. */ if ((offset % sizeof(flash_prg_t)) || (len % sizeof(flash_prg_t))) { return false; } } else { if (offset < GD32_NV_FLASH_V2_BANK0_SIZE) { if (offset % GD32_NV_FLASH_V2_BANK0_PAGE_SIZE) { return false; } if (((offset + len) <= GD32_NV_FLASH_V2_BANK0_SIZE) && (len % GD32_NV_FLASH_V2_BANK0_PAGE_SIZE)) { return false; } } #ifdef GD32_NV_FLASH_V2_BANK1_SIZE /* Remove bank0 info from offset and len. */ if ((offset < GD32_NV_FLASH_V2_BANK0_SIZE) && ((offset + len) > GD32_NV_FLASH_V2_BANK0_SIZE)) { len -= (GD32_NV_FLASH_V2_BANK0_SIZE - offset); offset = GD32_NV_FLASH_V2_BANK0_SIZE; } if (offset >= GD32_NV_FLASH_V2_BANK0_SIZE) { if ((offset % GD32_NV_FLASH_V2_BANK1_PAGE_SIZE) || (len % GD32_NV_FLASH_V2_BANK1_PAGE_SIZE)) { return false; } } #endif } return true; } int flash_gd32_write_range(off_t offset, const void *data, size_t len) { size_t len0 = 0U; int ret = 0; if (offset < GD32_NV_FLASH_V2_BANK0_SIZE) { if ((offset + len) > GD32_NV_FLASH_V2_BANK0_SIZE) { len0 = GD32_NV_FLASH_V2_BANK0_SIZE - offset; } else { len0 = len; } ret = gd32_fmc_v2_bank0_write(offset, data, len0); if (ret < 0) { return ret; } } #ifdef GD32_NV_FLASH_V2_BANK1_SIZE size_t len1 = len - len0; if (len1 == 0U) { return 0; } /* Will programming bank1, remove bank0 offset. */ if (offset < GD32_NV_FLASH_V2_BANK0_SIZE) { offset = GD32_NV_FLASH_V2_BANK0_SIZE; } ret = gd32_fmc_v2_bank1_write(offset, data, len1); if (ret < 0) { return ret; } #endif return 0; } int flash_gd32_erase_block(off_t offset, size_t size) { size_t size0 = 0U; int ret = 0; if (offset < GD32_NV_FLASH_V2_BANK0_SIZE) { if ((offset + size0) > GD32_NV_FLASH_V2_BANK0_SIZE) { size0 = GD32_NV_FLASH_V2_BANK0_SIZE - offset; } else { size0 = size; } ret = gd32_fmc_v2_bank0_erase_block(offset, size0); if (ret < 0) { return ret; } } #ifdef GD32_NV_FLASH_V2_BANK1_SIZE size_t size1 = size - size0; if (size1 == 0U) { return 0; } /* Will programming bank1, remove bank0 info from offset. */ if (offset < GD32_NV_FLASH_V2_BANK0_SIZE) { offset = GD32_NV_FLASH_V2_BANK0_SIZE; } ret = gd32_fmc_v2_bank1_erase_block(offset, size1); if (ret < 0) { return ret; } #endif return 0; } #ifdef CONFIG_FLASH_PAGE_LAYOUT void flash_gd32_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { ARG_UNUSED(dev); *layout = gd32_fmc_v2_layout; *layout_size = ARRAY_SIZE(gd32_fmc_v2_layout); } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ ```
/content/code_sandbox/drivers/flash/flash_gd32_v2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,201
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_FLASH_FLASH_STM32_H_ #define ZEPHYR_DRIVERS_FLASH_FLASH_STM32_H_ #include <zephyr/drivers/flash.h> #if DT_NODE_HAS_PROP(DT_INST(0, st_stm32_flash_controller), clocks) || \ DT_NODE_HAS_PROP(DT_INST(0, st_stm32h7_flash_controller), clocks) #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #endif /* Get the base address of the flash from the DTS node */ #define FLASH_STM32_BASE_ADDRESS DT_REG_ADDR(DT_INST(0, st_stm32_nv_flash)) struct flash_stm32_priv { FLASH_TypeDef *regs; #if DT_NODE_HAS_PROP(DT_INST(0, st_stm32_flash_controller), clocks) || \ DT_NODE_HAS_PROP(DT_INST(0, st_stm32h7_flash_controller), clocks) /* clock subsystem driving this peripheral */ struct stm32_pclken pclken; #endif struct k_sem sem; }; #if DT_PROP(DT_INST(0, soc_nv_flash), write_block_size) #define FLASH_STM32_WRITE_BLOCK_SIZE \ DT_PROP(DT_INST(0, soc_nv_flash), write_block_size) #else #error Flash write block size not available /* Flash Write block size is extracted from device tree */ /* as flash node property 'write-block-size' */ #endif #if defined(CONFIG_SOC_SERIES_STM32H5X) /* FLASH register names differ for this serie */ #define FLASH_NSSR_BSY FLASH_SR_BSY #define OPTR OPTCR #endif /* CONFIG_SOC_SERIES_STM32H5X */ /* Register mapping for the stm32H7RS serie (single bank)*/ #if defined(CONFIG_SOC_SERIES_STM32H7RSX) #define FLASH_NB_32BITWORD_IN_FLASHWORD 4 /* 128 bits */ #define CR1 CR #define SR1 SR /* flash sectore Nb [0-7] */ #define FLASH_CR_SNB FLASH_CR_SSN #define FLASH_CR_SNB_Pos FLASH_CR_SSN_Pos #define KEYR1 KEYR #endif /* CONFIG_SOC_SERIES_STM32H7RSX */ /* Differentiate between arm trust-zone non-secure/secure, and others. */ #if defined(FLASH_NSSR_NSBSY) || defined(FLASH_NSSR_BSY) /* For mcu w. TZ in non-secure mode */ #define FLASH_SECURITY_NS #define FLASH_STM32_SR NSSR #elif defined(FLASH_SECSR_SECBSY) /* For mcu w. TZ in secured mode */ #error Flash is not supported in secure mode #define FLASH_SECURITY_SEC #else #define FLASH_SECURITY_NA /* For series which does not have * secured or non-secured mode */ #define FLASH_STM32_SR SR #endif #define FLASH_STM32_PRIV(dev) ((struct flash_stm32_priv *)((dev)->data)) #define FLASH_STM32_REGS(dev) (FLASH_STM32_PRIV(dev)->regs) /* Redefinitions of flags and masks to harmonize stm32 series: */ #if defined(CONFIG_SOC_SERIES_STM32U5X) #define FLASH_STM32_NSLOCK FLASH_NSCR_LOCK #define FLASH_STM32_DBANK FLASH_OPTR_DUALBANK #define FLASH_STM32_NSPG FLASH_NSCR_PG #define FLASH_STM32_NSBKER_MSK FLASH_NSCR_BKER_Msk #define FLASH_STM32_NSBKER FLASH_NSCR_BKER #define FLASH_STM32_NSPER FLASH_NSCR_PER #define FLASH_STM32_NSPNB_MSK FLASH_NSCR_PNB_Msk #define FLASH_STM32_NSPNB_POS FLASH_NSCR_PNB_Pos #define FLASH_STM32_NSPNB FLASH_NSCR_PNB #define FLASH_STM32_NSSTRT FLASH_NSCR_STRT #define FLASH_PAGE_SIZE_128_BITS FLASH_PAGE_SIZE #elif defined(CONFIG_SOC_SERIES_STM32H5X) #define FLASH_OPTR_SWAP_BANK FLASH_OPTCR_SWAP_BANK #define FLASH_STM32_NSLOCK FLASH_CR_LOCK #define FLASH_STM32_DBANK 1 #define FLASH_STM32_NSPG FLASH_CR_PG #define FLASH_STM32_NSBKER_MSK FLASH_CR_BKSEL_Msk #define FLASH_STM32_NSBKER FLASH_CR_BKSEL #define FLASH_STM32_NSPER FLASH_CR_SER #define FLASH_STM32_NSPNB_MSK FLASH_CR_SNB_Msk #define FLASH_STM32_NSPNB_POS FLASH_CR_SNB_Pos #define FLASH_STM32_NSPNB FLASH_CR_PNB #define FLASH_STM32_NSSTRT FLASH_CR_START /* TODO: get values from the cmsis and stm32h5_hal_flash.h */ #undef FLASH_SIZE /* Retrieve the FLASH SIZE from the DTS instead of cmsis as it seems erroneous */ #define FLASH_SIZE (CONFIG_FLASH_SIZE * 1024) /* Values are redefined below from the stm32h5_hal_flash.h */ #define FLASH_PAGE_SIZE (FLASH_SECTOR_SIZE) #define FLASH_PAGE_NB (FLASH_SECTOR_NB) #define FLASH_PAGE_NB_PER_BANK (FLASH_BANK_SIZE / FLASH_PAGE_SIZE) #define FLASH_PAGE_SIZE_128_BITS FLASH_PAGE_SIZE #elif defined(CONFIG_SOC_SERIES_STM32L5X) #define FLASH_STM32_NSLOCK FLASH_NSCR_NSLOCK #define FLASH_STM32_NSPG FLASH_NSCR_NSPG #define FLASH_STM32_NSBKER_MSK FLASH_NSCR_NSBKER_Pos #define FLASH_STM32_NSBKER FLASH_NSCR_NSBKER #define FLASH_STM32_NSPER FLASH_NSCR_NSPER #define FLASH_STM32_NSPNB_MSK FLASH_NSCR_NSPNB_Msk #define FLASH_STM32_NSPNB_POS FLASH_NSCR_NSPNB_Pos #define FLASH_STM32_NSPNB FLASH_NSCR_NSPNB #define FLASH_STM32_NSSTRT FLASH_NSCR_NSSTRT #elif defined(CONFIG_SOC_SERIES_STM32WBAX) #define NSCR NSCR1 #define FLASH_STM32_NSLOCK FLASH_NSCR1_LOCK #define FLASH_STM32_NSPG FLASH_NSCR1_PG #define FLASH_STM32_NSBKER_MSK FLASH_NSCR1_BKER_Msk #define FLASH_STM32_NSBKER FLASH_NSCR1_BKER #define FLASH_STM32_NSPER FLASH_NSCR1_PER #define FLASH_STM32_NSPNB_MSK FLASH_NSCR1_PNB_Msk #define FLASH_STM32_NSPNB_POS FLASH_NSCR1_PNB_Pos #define FLASH_STM32_NSPNB FLASH_NSCR1_PNB #define FLASH_STM32_NSSTRT FLASH_NSCR1_STRT #endif /* CONFIG_SOC_SERIES_STM32U5X */ #if defined(FLASH_OPTR_DBANK) #define FLASH_STM32_DBANK FLASH_OPTR_DBANK #endif /* FLASH_OPTR_DBANK */ #if defined(CONFIG_SOC_SERIES_STM32G0X) #if defined(FLASH_FLAG_BSY2) #define FLASH_STM32_SR_BUSY (FLASH_FLAG_BSY1 | FLASH_FLAG_BSY2); #else #define FLASH_STM32_SR_BUSY (FLASH_SR_BSY1) #endif /* defined(FLASH_FLAG_BSY2) */ #else #define FLASH_STM32_SR_BUSY (FLASH_FLAG_BSY) #endif #if defined(CONFIG_SOC_SERIES_STM32G0X) #define FLASH_STM32_SR_CFGBSY (FLASH_SR_CFGBSY) #elif defined(FLASH_FLAG_CFGBSY) #define FLASH_STM32_SR_CFGBSY (FLASH_FLAG_CFGBSY) #endif #if defined(CONFIG_SOC_SERIES_STM32G0X) /* STM32G0 HAL FLASH_FLAG_x don't represent bit-masks, need FLASH_SR_x instead */ #define FLASH_STM32_SR_OPERR FLASH_SR_OPERR #define FLASH_STM32_SR_PGERR 0 #define FLASH_STM32_SR_PROGERR FLASH_SR_PROGERR #define FLASH_STM32_SR_WRPERR FLASH_SR_WRPERR #define FLASH_STM32_SR_PGAERR FLASH_SR_PGAERR #define FLASH_STM32_SR_SIZERR FLASH_SR_SIZERR #define FLASH_STM32_SR_PGSERR FLASH_SR_PGSERR #define FLASH_STM32_SR_MISERR FLASH_SR_MISERR #define FLASH_STM32_SR_FASTERR FLASH_SR_FASTERR #if defined(FLASH_SR_RDERR) #define FLASH_STM32_SR_RDERR FLASH_SR_RDERR #else #define FLASH_STM32_SR_RDERR 0 #endif #define FLASH_STM32_SR_PGPERR 0 #else /* !defined(CONFIG_SOC_SERIES_STM32G0X) */ #if defined(FLASH_FLAG_OPERR) #define FLASH_STM32_SR_OPERR FLASH_FLAG_OPERR #else #define FLASH_STM32_SR_OPERR 0 #endif #if defined(FLASH_FLAG_PGERR) #define FLASH_STM32_SR_PGERR FLASH_FLAG_PGERR #else #define FLASH_STM32_SR_PGERR 0 #endif #if defined(FLASH_FLAG_PROGERR) #define FLASH_STM32_SR_PROGERR FLASH_FLAG_PROGERR #else #define FLASH_STM32_SR_PROGERR 0 #endif #if defined(FLASH_FLAG_WRPERR) #define FLASH_STM32_SR_WRPERR FLASH_FLAG_WRPERR #else #define FLASH_STM32_SR_WRPERR 0 #endif #if defined(FLASH_FLAG_PGAERR) #define FLASH_STM32_SR_PGAERR FLASH_FLAG_PGAERR #else #define FLASH_STM32_SR_PGAERR 0 #endif #if defined(FLASH_FLAG_SIZERR) #define FLASH_STM32_SR_SIZERR FLASH_FLAG_SIZERR #else #define FLASH_STM32_SR_SIZERR 0 #endif #if defined(FLASH_FLAG_PGSERR) #define FLASH_STM32_SR_PGSERR FLASH_FLAG_PGSERR #else #define FLASH_STM32_SR_PGSERR 0 #endif #if defined(FLASH_FLAG_MISERR) #define FLASH_STM32_SR_MISERR FLASH_FLAG_MISERR #else #define FLASH_STM32_SR_MISERR 0 #endif #if defined(FLASH_FLAG_FASTERR) #define FLASH_STM32_SR_FASTERR FLASH_FLAG_FASTERR #else #define FLASH_STM32_SR_FASTERR 0 #endif #if defined(FLASH_FLAG_RDERR) #define FLASH_STM32_SR_RDERR FLASH_FLAG_RDERR #else #define FLASH_STM32_SR_RDERR 0 #endif #if defined(FLASH_FLAG_PGPERR) #define FLASH_STM32_SR_PGPERR FLASH_FLAG_PGPERR #else #define FLASH_STM32_SR_PGPERR 0 #endif #endif /* !defined(CONFIG_SOC_SERIES_STM32G0X) */ #define FLASH_STM32_SR_ERRORS (FLASH_STM32_SR_OPERR | \ FLASH_STM32_SR_PGERR | \ FLASH_STM32_SR_PROGERR | \ FLASH_STM32_SR_WRPERR | \ FLASH_STM32_SR_PGAERR | \ FLASH_STM32_SR_SIZERR | \ FLASH_STM32_SR_PGSERR | \ FLASH_STM32_SR_MISERR | \ FLASH_STM32_SR_FASTERR | \ FLASH_STM32_SR_RDERR | \ FLASH_STM32_SR_PGPERR) #define FLASH_STM32_RDP0 0xAA #define FLASH_STM32_RDP2 0xCC #define FLASH_STM32_RDP1 \ DT_PROP(DT_INST(0, st_stm32_flash_controller), st_rdp1_enable_byte) #if FLASH_STM32_RDP1 == FLASH_STM32_RDP0 || FLASH_STM32_RDP1 == FLASH_STM32_RDP2 #error RDP1 byte has to be different than RDP0 and RDP2 byte #endif #ifdef CONFIG_FLASH_PAGE_LAYOUT static inline bool flash_stm32_range_exists(const struct device *dev, off_t offset, uint32_t len) { struct flash_pages_info info; return !(flash_get_page_info_by_offs(dev, offset, &info) || flash_get_page_info_by_offs(dev, offset + len - 1, &info)); } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static inline bool flash_stm32_valid_write(off_t offset, uint32_t len) { return ((offset % FLASH_STM32_WRITE_BLOCK_SIZE == 0) && (len % FLASH_STM32_WRITE_BLOCK_SIZE == 0U)); } bool flash_stm32_valid_range(const struct device *dev, off_t offset, uint32_t len, bool write); int flash_stm32_write_range(const struct device *dev, unsigned int offset, const void *data, unsigned int len); int flash_stm32_block_erase_loop(const struct device *dev, unsigned int offset, unsigned int len); int flash_stm32_wait_flash_idle(const struct device *dev); int flash_stm32_option_bytes_lock(const struct device *dev, bool enable); #ifdef CONFIG_SOC_SERIES_STM32WBX int flash_stm32_check_status(const struct device *dev); #endif /* CONFIG_SOC_SERIES_STM32WBX */ #ifdef CONFIG_FLASH_PAGE_LAYOUT void flash_stm32_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size); #endif #if defined(CONFIG_FLASH_STM32_WRITE_PROTECT) int flash_stm32_update_wp_sectors(const struct device *dev, uint32_t changed_sectors, uint32_t protected_sectors); int flash_stm32_get_wp_sectors(const struct device *dev, uint32_t *protected_sectors); #endif #if defined(CONFIG_FLASH_STM32_READOUT_PROTECTION) uint8_t flash_stm32_get_rdp_level(const struct device *dev); void flash_stm32_set_rdp_level(const struct device *dev, uint8_t level); #endif /* Flash extended operations */ #if defined(CONFIG_FLASH_STM32_WRITE_PROTECT) int flash_stm32_ex_op_sector_wp(const struct device *dev, const uintptr_t in, void *out); #endif #if defined(CONFIG_FLASH_STM32_READOUT_PROTECTION) int flash_stm32_ex_op_rdp(const struct device *dev, const uintptr_t in, void *out); #endif #endif /* ZEPHYR_DRIVERS_FLASH_FLASH_STM32_H_ */ ```
/content/code_sandbox/drivers/flash/flash_stm32.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,099
```c /* * an affiliate of Cypress Semiconductor Corporation * */ #define DT_DRV_COMPAT infineon_cat1_qspi_flash #define SOC_NV_FLASH_NODE DT_PARENT(DT_INST(0, fixed_partitions)) #define PAGE_LEN DT_PROP(SOC_NV_FLASH_NODE, erase_block_size) #include <zephyr/kernel.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/flash.h> #include <zephyr/logging/log.h> #include "cy_serial_flash_qspi.h" #include "cy_smif_memslot.h" LOG_MODULE_REGISTER(flash_infineon_cat1, CONFIG_FLASH_LOG_LEVEL); /* Device config structure */ struct ifx_cat1_flash_config { uint32_t base_addr; uint32_t max_addr; }; /* Data structure */ struct ifx_cat1_flash_data { cyhal_flash_t flash_obj; struct k_sem sem; }; static struct flash_parameters ifx_cat1_flash_parameters = { .write_block_size = DT_PROP(SOC_NV_FLASH_NODE, write_block_size), .erase_value = 0xFF, }; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_read_cmd = {0}; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_write_en_cmd = {0}; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_write_dis_cmd = {0}; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_erase_cmd = {0}; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_chip_erase_cmd = {0}; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_program_cmd = {0}; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_read_sts_reg_qe_cmd = {0}; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_read_sts_reg_wip_cmd = {0}; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_write_sts_reg_qe_cmd = {0}; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_read_sts_reg_oe_cmd = {0}; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_write_sts_reg_oe_cmd = {0}; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_read_latency_cmd = {0}; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_write_latency_cmd = {0}; cy_stc_smif_mem_cmd_t sfdp_slave_slot_0_read_sfdp_cmd = { /* The 8-bit command. 1 x I/O read command. */ .command = 0x5AU, /* The width of the command transfer. */ .cmdWidth = CY_SMIF_WIDTH_SINGLE, /* The width of the address transfer. */ .addrWidth = CY_SMIF_WIDTH_SINGLE, /* The 8-bit mode byte. This value is 0xFFFFFFFF when there is no mode present. */ .mode = 0xFFFFFFFFU, /* The width of the mode command transfer. */ .modeWidth = CY_SMIF_WIDTH_SINGLE, /* The number of dummy cycles. A zero value suggests no dummy cycles. */ .dummyCycles = 8U, /* The width of the data transfer. */ .dataWidth = CY_SMIF_WIDTH_SINGLE, }; cy_stc_smif_octal_ddr_en_seq_t oe_sequence_SFDP_SlaveSlot_0 = { .cmdSeq1Len = CY_SMIF_SFDP_ODDR_CMD_SEQ_MAX_LEN, .cmdSeq2Len = CY_SMIF_SFDP_ODDR_CMD_SEQ_MAX_LEN, .cmdSeq1 = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, .cmdSeq2 = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, }; /* Support for memories with hybrid regions is added in the version 1.50 * Please refer to the changelog in * path_to_url * ASSETS/repo/mtb-pdl-cat1/develop/Latest/deploy/docs/ * pdl_api_reference_manual/html/group__group__smif.html * for more details */ #if (CY_SMIF_DRV_VERSION_MAJOR > 1) && (CY_SMIF_DRV_VERSION_MINOR >= 50) static cy_stc_smif_hybrid_region_info_t sfdp_slave_slot_0_region_info_storage[16]; #define GENERATE_REGION_INFO_PTR(index, _) &sfdp_slave_slot_0_region_info_storage[index], static cy_stc_smif_hybrid_region_info_t *sfdp_slave_slot_0_region_info[16] = { LISTIFY(16, GENERATE_REGION_INFO_PTR, ())}; #endif cy_stc_smif_mem_device_cfg_t deviceCfg_SFDP_SlaveSlot_0 = { /* Specifies the number of address bytes used by the memory slave device. */ .numOfAddrBytes = 0x03U, /* The size of the memory. */ .memSize = 0x0000100U, /* Specifies the Read command. */ .readCmd = &sfdp_slave_slot_0_read_cmd, /* Specifies the Write Enable command. */ .writeEnCmd = &sfdp_slave_slot_0_write_en_cmd, /* Specifies the Write Disable command. */ .writeDisCmd = &sfdp_slave_slot_0_write_dis_cmd, /* Specifies the Erase command. */ .eraseCmd = &sfdp_slave_slot_0_erase_cmd, /* Specifies the sector size of each erase. */ .eraseSize = 0x0001000U, /* Specifies the Chip Erase command. */ .chipEraseCmd = &sfdp_slave_slot_0_chip_erase_cmd, /* Specifies the Program command. */ .programCmd = &sfdp_slave_slot_0_program_cmd, /* Specifies the page size for programming. */ .programSize = 0x0000100U, /* Specifies the command to read the QE-containing status register. */ .readStsRegQeCmd = &sfdp_slave_slot_0_read_sts_reg_qe_cmd, /* Specifies the command to read the WIP-containing status register. */ .readStsRegWipCmd = &sfdp_slave_slot_0_read_sts_reg_wip_cmd, /* Specifies the read SFDP command */ .readSfdpCmd = &sfdp_slave_slot_0_read_sfdp_cmd, /* Specifies the command to write into the QE-containing status register. */ .writeStsRegQeCmd = &sfdp_slave_slot_0_write_sts_reg_qe_cmd, /* The mask for the status register. */ .stsRegBusyMask = 0x00U, /* The mask for the status register. */ .stsRegQuadEnableMask = 0x00U, /* The max time for the erase type-1 cycle-time in ms. */ .eraseTime = 1U, /* The max time for the chip-erase cycle-time in ms. */ .chipEraseTime = 16U, /* The max time for the page-program cycle-time in us. */ .programTime = 8U, #if (CY_SMIF_DRV_VERSION_MAJOR > 1) && (CY_SMIF_DRV_VERSION_MINOR >= 50) /* Points to NULL or to structure with info about sectors for hybrid memory. */ .hybridRegionCount = 0U, .hybridRegionInfo = sfdp_slave_slot_0_region_info, #endif /* Specifies the command to read variable latency cycles configuration register */ .readLatencyCmd = &sfdp_slave_slot_0_read_latency_cmd, /* Specifies the command to write variable latency cycles configuration register */ .writeLatencyCmd = &sfdp_slave_slot_0_write_latency_cmd, /* Specifies the address for variable latency cycle address */ .latencyCyclesRegAddr = 0x00U, /* Specifies variable latency cycles Mask */ .latencyCyclesMask = 0x00U, /* Specifies data for memory with hybrid sectors */ .octalDDREnableSeq = &oe_sequence_SFDP_SlaveSlot_0, /* Specifies the command to read the OE-containing status register. */ .readStsRegOeCmd = &sfdp_slave_slot_0_read_sts_reg_oe_cmd, /* Specifies the command to write the OE-containing status register. */ .writeStsRegOeCmd = &sfdp_slave_slot_0_write_sts_reg_oe_cmd, /* QE mask for the status registers */ .stsRegOctalEnableMask = 0x00U, /* Octal enable register address */ .octalEnableRegAddr = 0x00U, /* Frequency of operation used in Octal mode */ .freq_of_operation = CY_SMIF_100MHZ_OPERATION, }; cy_stc_smif_mem_config_t sfdp_slave_slot_0 = { /* Determines the slot number where the memory device is placed. */ .slaveSelect = CY_SMIF_SLAVE_SELECT_0, /* Flags. */ .flags = CY_SMIF_FLAG_SMIF_REV_3 | CY_SMIF_FLAG_MEMORY_MAPPED | CY_SMIF_FLAG_WR_EN | CY_SMIF_FLAG_DETECT_SFDP | CY_SMIF_FLAG_MERGE_ENABLE, /* The data-line selection options for a slave device. */ .dataSelect = CY_SMIF_DATA_SEL0, /* The base address the memory slave * Valid when the memory-mapped mode is enabled. */ .baseAddress = 0x60000000U, /* The size allocated in the memory map, for the memory slave device. * The size is allocated from the base address. Valid when the memory mapped mode is * enabled. */ .memMappedSize = 0x100000U, /* If this memory device is one of the devices in the dual quad SPI configuration. * Valid when the memory mapped mode is enabled. */ .dualQuadSlots = 0, /* The configuration of the device. */ .deviceCfg = &deviceCfg_SFDP_SlaveSlot_0, /** Continuous transfer merge timeout. * After this period the memory device is deselected. A later transfer, even from a * continuous address, starts with the overhead phases (command, address, mode, dummy * cycles). This configuration parameter is available for CAT1B devices. */ .mergeTimeout = CY_SMIF_MERGE_TIMEOUT_1_CYCLE, }; static inline void flash_ifx_sem_take(const struct device *dev) { struct ifx_cat1_flash_data *data = dev->data; k_sem_take(&data->sem, K_FOREVER); } static inline void flash_ifx_sem_give(const struct device *dev) { struct ifx_cat1_flash_data *data = dev->data; k_sem_give(&data->sem); } static int ifx_cat1_flash_read(const struct device *dev, off_t offset, void *data, size_t data_len) { cy_rslt_t rslt = CY_RSLT_SUCCESS; int ret = 0; if (!data_len) { return 0; } flash_ifx_sem_take(dev); rslt = cy_serial_flash_qspi_read(offset, data_len, data); if (rslt != CY_RSLT_SUCCESS) { LOG_ERR("Error reading @ %lu (Err:0x%x)", offset, rslt); ret = -EIO; } flash_ifx_sem_give(dev); return ret; } static int ifx_cat1_flash_write(const struct device *dev, off_t offset, const void *data, size_t data_len) { cy_rslt_t rslt = CY_RSLT_SUCCESS; int ret = 0; if (data_len == 0) { return 0; } if (offset < 0) { return -EINVAL; } flash_ifx_sem_take(dev); rslt = cy_serial_flash_qspi_write(offset, data_len, data); if (rslt != CY_RSLT_SUCCESS) { LOG_ERR("Error in writing @ %lu (Err:0x%x)", offset, rslt); ret = -EIO; } flash_ifx_sem_give(dev); return ret; } static int ifx_cat1_flash_erase(const struct device *dev, off_t offset, size_t size) { cy_rslt_t rslt; int ret = 0; if (offset < 0) { return -EINVAL; } flash_ifx_sem_take(dev); rslt = cy_serial_flash_qspi_erase(offset, size); if (rslt != CY_RSLT_SUCCESS) { LOG_ERR("Error in erasing : 0x%x", rslt); ret = -EIO; } flash_ifx_sem_give(dev); return ret; } #if CONFIG_FLASH_PAGE_LAYOUT static const struct flash_pages_layout ifx_cat1_flash_pages_layout = { .pages_count = DT_REG_SIZE(SOC_NV_FLASH_NODE) / PAGE_LEN, .pages_size = PAGE_LEN, }; static void ifx_cat1_flash_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { *layout = &ifx_cat1_flash_pages_layout; /* * For flash memories which have uniform page sizes, this routine * returns an array of length 1, which specifies the page size and * number of pages in the memory. */ *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static const struct flash_parameters *ifx_cat1_flash_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &ifx_cat1_flash_parameters; } static int ifx_cat1_flash_init(const struct device *dev) { struct ifx_cat1_flash_data *data = dev->data; cy_rslt_t rslt = CY_RSLT_SUCCESS; rslt = cy_serial_flash_qspi_init(&sfdp_slave_slot_0, NC, NC, NC, NC, NC, NC, NC, NC, NC, NC, 50000000lu); if (rslt != CY_RSLT_SUCCESS) { LOG_ERR("Serial Flash initialization failed [rslt: 0x%x]", rslt); } k_sem_init(&data->sem, 1, 1); return 0; } static const struct flash_driver_api ifx_cat1_flash_driver_api = { .read = ifx_cat1_flash_read, .write = ifx_cat1_flash_write, .erase = ifx_cat1_flash_erase, .get_parameters = ifx_cat1_flash_get_parameters, #ifdef CONFIG_FLASH_PAGE_LAYOUT .page_layout = ifx_cat1_flash_page_layout, #endif }; static struct ifx_cat1_flash_data flash_data; static const struct ifx_cat1_flash_config flash_config = { .base_addr = DT_REG_ADDR(SOC_NV_FLASH_NODE), .max_addr = DT_REG_ADDR(SOC_NV_FLASH_NODE) + DT_REG_SIZE(SOC_NV_FLASH_NODE)}; DEVICE_DT_INST_DEFINE(0, ifx_cat1_flash_init, NULL, &flash_data, &flash_config, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &ifx_cat1_flash_driver_api); ```
/content/code_sandbox/drivers/flash/flash_ifx_cat1_qspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,296
```unknown menuconfig NORDIC_QSPI_NOR bool "QSPI NOR Flash" default y depends on DT_HAS_NORDIC_QSPI_NOR_ENABLED select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_EXPLICIT_ERASE select NRFX_QSPI select FLASH_JESD216 select PINCTRL help Enable support for nrfx QSPI driver with EasyDMA. if NORDIC_QSPI_NOR config NORDIC_QSPI_NOR_INIT_PRIORITY int "Init priority" default 41 help Device driver initialization priority. config NORDIC_QSPI_NOR_FLASH_LAYOUT_PAGE_SIZE int "Page size to use for FLASH_LAYOUT feature" default 65536 help When CONFIG_FLASH_PAGE_LAYOUT is used this driver will support that API. By default the page size corresponds to the block size (65536). Other option include the sector size (4096). config NORDIC_QSPI_NOR_STACK_WRITE_BUFFER_SIZE int "Size of a stack-based buffer to handle writes not supported by QSPI" default 4 help The QSPI peripheral uses DMA and can only write data that is read from a word-aligned location in RAM. A non-zero value here enables a stack buffer to be used for any source data that does not meet these restrictions. Such data will be copied into this buffer to allow the write to proceed. Multiple transfers will be initiated if the data is larger than the configured size. Must be a multiple of 4. When set to 0, the feature is disabled. config NORDIC_QSPI_NOR_XIP bool "XIP (eXecute in place)" depends on SOC_NRF5340_CPUAPP help Enable setting up the QSPI NOR driver to allow for execution of code stored in QSPI XIP region. Note that for this functionality to work, the QSPI NOR init priority must be set so that no XIP code in the QSPI NOR flash chip is executed until the driver has been setup. This will also disable power management for the QSPI NOR flash chip. config NORDIC_QSPI_NOR_TIMEOUT_MS int "Timeout for QSPI operations (ms)" default 500 help The QSPI peripheral operation timeout in milliseconds. Primarily intended for long running operations such as a flash sector erase. The 500 ms default allows for most typical NOR flash chips to erase a sector. endif # NORDIC_QSPI_NOR ```
/content/code_sandbox/drivers/flash/Kconfig.nordic_qspi_nor
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
544
```unknown config SOC_FLASH_SMARTBOND bool "Renesas SmartBond(tm) flash driver" default y depends on DT_HAS_RENESAS_SMARTBOND_FLASH_CONTROLLER_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_EXPLICIT_ERASE select MPU_ALLOW_FLASH_WRITE if ARM_MPU help Enable flash driver for Renesas SmartBond(tm) MCU family. ```
/content/code_sandbox/drivers/flash/Kconfig.smartbond
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
88
```unknown config SOC_FLASH_GD32 bool "GigaDevice GD32 flash driver" default y depends on (GD32_NV_FLASH_V1 || GD32_NV_FLASH_V2 || GD32_NV_FLASH_V3) select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_EXPLICIT_ERASE select MPU_ALLOW_FLASH_WRITE if ARM_MPU help Enable the GigaDevice GD32 flash driver. config GD32_NV_FLASH_V1 bool default y depends on DT_HAS_GD_GD32_NV_FLASH_V1_ENABLED help Enable the generic backend for GD32 FMC v1 flash driver. config GD32_NV_FLASH_V2 bool default y depends on DT_HAS_GD_GD32_NV_FLASH_V2_ENABLED help Enable the generic backend for GD32 FMC v2 flash driver. config GD32_NV_FLASH_V3 bool default y depends on DT_HAS_GD_GD32_NV_FLASH_V3_ENABLED help Enable the generic backend for GD32 FMC v3 flash driver. ```
/content/code_sandbox/drivers/flash/Kconfig.gd32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
228
```c /* * */ #include <sys/types.h> #include <zephyr/kernel.h> #include "jesd216.h" #include "spi_nor.h" static bool extract_instr(uint16_t packed, struct jesd216_instr *res) { bool rv = (res != NULL); if (rv) { res->instr = packed >> 8; res->mode_clocks = (packed >> 5) & 0x07; res->wait_states = packed & 0x1F; } return rv; } int jesd216_bfp_read_support(const struct jesd216_param_header *php, const struct jesd216_bfp *bfp, enum jesd216_mode_type mode, struct jesd216_instr *res) { int rv = -ENOTSUP; switch (mode) { case JESD216_MODE_044: if ((php->len_dw >= 15) && (sys_le32_to_cpu(bfp->dw10[5]) & BIT(9))) { rv = 0; } break; case JESD216_MODE_088: if ((php->len_dw >= 19) && (sys_le32_to_cpu(bfp->dw10[9]) & BIT(9))) { rv = 0; } break; case JESD216_MODE_111: rv = 0; break; case JESD216_MODE_112: if (sys_le32_to_cpu(bfp->dw1) & BIT(16)) { uint32_t dw4 = sys_le32_to_cpu(bfp->dw4); rv = extract_instr(dw4 >> 0, res); } break; case JESD216_MODE_114: if (sys_le32_to_cpu(bfp->dw1) & BIT(22)) { uint32_t dw3 = sys_le32_to_cpu(bfp->dw3); rv = extract_instr(dw3 >> 16, res); } break; case JESD216_MODE_118: if (php->len_dw >= 17) { uint32_t dw17 = sys_le32_to_cpu(bfp->dw10[7]); if ((dw17 >> 24) != 0) { rv = extract_instr(dw17 >> 16, res); } } break; case JESD216_MODE_122: if (sys_le32_to_cpu(bfp->dw1) & BIT(20)) { uint32_t dw4 = sys_le32_to_cpu(bfp->dw4); rv = extract_instr(dw4 >> 16, res); } break; case JESD216_MODE_144: if (sys_le32_to_cpu(bfp->dw1) & BIT(21)) { uint32_t dw3 = sys_le32_to_cpu(bfp->dw3); rv = extract_instr(dw3 >> 0, res); } break; case JESD216_MODE_188: if (php->len_dw >= 17) { uint32_t dw17 = sys_le32_to_cpu(bfp->dw10[7]); if ((uint8_t)(dw17 >> 8) != 0) { rv = extract_instr(dw17 >> 0, res); } } break; case JESD216_MODE_222: if (sys_le32_to_cpu(bfp->dw5) & BIT(0)) { uint32_t dw6 = sys_le32_to_cpu(bfp->dw6); rv = extract_instr(dw6 >> 16, res); } break; case JESD216_MODE_444: if (sys_le32_to_cpu(bfp->dw5) & BIT(4)) { uint32_t dw7 = sys_le32_to_cpu(bfp->dw7); rv = extract_instr(dw7 >> 16, res); } break; /* Not clear how to detect these; they are identified only by * enable/disable sequences. */ case JESD216_MODE_44D4D: case JESD216_MODE_888: case JESD216_MODE_8D8D8D: break; default: rv = -EINVAL; } return rv; } int jesd216_bfp_erase(const struct jesd216_bfp *bfp, uint8_t idx, struct jesd216_erase_type *etp) { __ASSERT_NO_MSG((idx > 0) && (idx <= JESD216_NUM_ERASE_TYPES)); /* Types 1 and 2 are in dw8, types 3 and 4 in dw9 */ const uint32_t *dwp = &bfp->dw8 + (idx - 1U) / 2U; uint32_t dw = sys_le32_to_cpu(*dwp); /* Type 2(4) is in the upper half of the value. */ if ((idx & 0x01) == 0x00) { dw >>= 16; } /* Extract the exponent and command */ uint8_t exp = (uint8_t)dw; uint8_t cmd = (uint8_t)(dw >> 8); if (exp == 0) { return -EINVAL; } etp->cmd = cmd; etp->exp = exp; return 0; } int jesd216_bfp_erase_type_times(const struct jesd216_param_header *php, const struct jesd216_bfp *bfp, uint8_t idx, uint32_t *typ_ms) { __ASSERT_NO_MSG((idx > 0) && (idx <= JESD216_NUM_ERASE_TYPES)); /* DW10 introduced in JESD216A */ if (php->len_dw < 10) { return -ENOTSUP; } uint32_t dw10 = sys_le32_to_cpu(bfp->dw10[0]); /* Each 7-bit erase time entry has a 5-bit count in the lower * bits, and a 2-bit unit in the upper bits. The actual count * is the field content plus one. * * The entries start with ET1 at bit 4. The low four bits * encode a value that is offset and scaled to produce a * multiplier to convert from typical time to maximum time. */ unsigned int count = 1 + ((dw10 >> (4 + (idx - 1) * 7)) & 0x1F); unsigned int units = ((dw10 >> (4 + 5 + (idx - 1) * 7)) & 0x03); unsigned int max_factor = 2 * (1 + (dw10 & 0x0F)); switch (units) { case 0x00: /* 1 ms */ *typ_ms = count; break; case 0x01: /* 16 ms */ *typ_ms = count * 16; break; case 0x02: /* 128 ms */ *typ_ms = count * 128; break; case 0x03: /* 1 s */ *typ_ms = count * MSEC_PER_SEC; break; } return max_factor; } int jesd216_bfp_decode_dw11(const struct jesd216_param_header *php, const struct jesd216_bfp *bfp, struct jesd216_bfp_dw11 *res) { /* DW11 introduced in JESD216A */ if (php->len_dw < 11) { return -ENOTSUP; } uint32_t dw11 = sys_le32_to_cpu(bfp->dw10[1]); uint32_t value = 1 + ((dw11 >> 24) & 0x1F); switch ((dw11 >> 29) & 0x03) { case 0x00: /* 16 ms */ value *= 16; break; case 0x01: value *= 256; break; case 0x02: value *= 4 * MSEC_PER_SEC; break; case 0x03: value *= 64 * MSEC_PER_SEC; break; } res->chip_erase_ms = value; value = 1 + ((dw11 >> 19) & 0x0F); if (dw11 & BIT(23)) { value *= 8; } res->byte_prog_addl_us = value; value = 1 + ((dw11 >> 14) & 0x0F); if (dw11 & BIT(18)) { value *= 8; } res->byte_prog_first_us = value; value = 1 + ((dw11 >> 8) & 0x01F); if (dw11 & BIT(13)) { value *= 64; } else { value *= 8; } res->page_prog_us = value; res->page_size = BIT((dw11 >> 4) & 0x0F); res->typ_max_factor = 2 * (1 + (dw11 & 0x0F)); return 0; } int jesd216_bfp_decode_dw14(const struct jesd216_param_header *php, const struct jesd216_bfp *bfp, struct jesd216_bfp_dw14 *res) { /* DW14 introduced in JESD216A */ if (php->len_dw < 14) { return -ENOTSUP; } uint32_t dw14 = sys_le32_to_cpu(bfp->dw10[4]); if (dw14 & BIT(31)) { return -ENOTSUP; } res->enter_dpd_instr = (dw14 >> 23) & 0xFF; res->exit_dpd_instr = (dw14 >> 15) & 0xFF; uint32_t value = 1 + ((dw14 >> 8) & 0x1F); switch ((dw14 >> 13) & 0x03) { case 0x00: /* 128 ns */ value *= 128; break; case 0x01: /* 1 us */ value *= NSEC_PER_USEC; break; case 0x02: /* 8 us */ value *= 8 * NSEC_PER_USEC; break; case 0x03: /* 64 us */ value *= 64 * NSEC_PER_USEC; break; } res->exit_delay_ns = value; res->poll_options = (dw14 >> 2) & 0x3F; return 0; } int jesd216_bfp_decode_dw15(const struct jesd216_param_header *php, const struct jesd216_bfp *bfp, struct jesd216_bfp_dw15 *res) { /* DW15 introduced in JESD216A */ if (php->len_dw < 15) { return -ENOTSUP; } uint32_t dw15 = sys_le32_to_cpu(bfp->dw10[5]); res->hold_reset_disable = (dw15 & BIT(23)) != 0U; res->qer = (dw15 >> 20) & 0x07; res->entry_044 = (dw15 >> 16) & 0x0F; res->exit_044 = (dw15 >> 10) & 0x3F; res->support_044 = (dw15 & BIT(9)) != 0U; res->enable_444 = (dw15 >> 4) & 0x1F; res->disable_444 = (dw15 >> 0) & 0x0F; return 0; } int jesd216_bfp_decode_dw16(const struct jesd216_param_header *php, const struct jesd216_bfp *bfp, struct jesd216_bfp_dw16 *res) { /* DW16 introduced in JESD216A */ if (php->len_dw < 16) { return -ENOTSUP; } uint32_t dw16 = sys_le32_to_cpu(bfp->dw10[6]); res->enter_4ba = (dw16 >> 24) & 0xFF; res->exit_4ba = (dw16 >> 14) & 0x3FF; res->srrs_support = (dw16 >> 8) & 0x3F; res->sr1_interface = (dw16 >> 0) & 0x7F; return 0; } ```
/content/code_sandbox/drivers/flash/jesd216.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,734
```c /* * */ /* * This driver is written based on the Altera's * Nios-II QSPI Controller HAL driver. */ #define DT_DRV_COMPAT altr_nios2_qspi_nor #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <zephyr/drivers/flash.h> #include <errno.h> #include <zephyr/init.h> #include <soc.h> #include <zephyr/sys/util.h> #include "flash_priv.h" #include "altera_generic_quad_spi_controller2_regs.h" #include "altera_generic_quad_spi_controller2.h" #define LOG_LEVEL CONFIG_FLASH_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(flash_nios2_qspi); /* * Remove the following macros once the Altera HAL * supports the QSPI Controller v2 IP. */ #define ALTERA_QSPI_CONTROLLER2_FLAG_STATUS_REG 0x0000001C #define FLAG_STATUS_PROTECTION_ERROR (1 << 1) #define FLAG_STATUS_PROGRAM_SUSPENDED (1 << 2) #define FLAG_STATUS_PROGRAM_ERROR (1 << 4) #define FLAG_STATUS_ERASE_ERROR (1 << 5) #define FLAG_STATUS_ERASE_SUSPENDED (1 << 6) #define FLAG_STATUS_CONTROLLER_READY (1 << 7) /* ALTERA_QSPI_CONTROLLER2_STATUS_REG bits */ #define STATUS_PROTECTION_POS 2 #define STATUS_PROTECTION_MASK 0x1F #define STATUS_PROTECTION_EN_VAL 0x17 #define STATUS_PROTECTION_DIS_VAL 0x0 /* ALTERA_QSPI_CONTROLLER2_MEM_OP_REG bits */ #define MEM_OP_ERASE_CMD 0x00000002 #define MEM_OP_WRITE_EN_CMD 0x00000004 #define MEM_OP_SECTOR_OFFSET_BIT_POS 8 #define MEM_OP_UNLOCK_ALL_SECTORS 0x00000003 #define MEM_OP_LOCK_ALL_SECTORS 0x00000F03 #define NIOS2_QSPI_BLANK_WORD 0xFFFFFFFF #define NIOS2_WRITE_BLOCK_SIZE 4 #define USEC_TO_MSEC(x) (x / 1000) struct flash_nios2_qspi_config { alt_qspi_controller2_dev qspi_dev; struct k_sem sem_lock; }; static const struct flash_parameters flash_nios2_qspi_parameters = { .write_block_size = NIOS2_WRITE_BLOCK_SIZE, .erase_value = 0xff, }; static int flash_nios2_qspi_write_protection(const struct device *dev, bool enable); static int flash_nios2_qspi_erase(const struct device *dev, off_t offset, size_t len) { struct flash_nios2_qspi_config *flash_cfg = dev->data; alt_qspi_controller2_dev *qspi_dev = &flash_cfg->qspi_dev; uint32_t block_offset, offset_in_block, length_to_erase; uint32_t erase_offset = offset; /* address of next byte to erase */ uint32_t remaining_length = len; /* length of data left to be erased */ uint32_t flag_status; int32_t rc = 0, i, timeout, rc2; k_sem_take(&flash_cfg->sem_lock, K_FOREVER); rc = flash_nios2_qspi_write_protection(dev, false); if (rc) { goto qspi_erase_err; } /* * check if offset is word aligned and * length is with in the range */ if (((offset + len) > qspi_dev->data_end) || (0 != (erase_offset & (NIOS2_WRITE_BLOCK_SIZE - 1)))) { LOG_ERR("erase failed at offset 0x%lx", (long)offset); rc = -EINVAL; goto qspi_erase_err; } for (i = offset/qspi_dev->sector_size; i < qspi_dev->number_of_sectors; i++) { if ((remaining_length <= 0U) || erase_offset >= (offset + len)) { break; } block_offset = 0U; /* block offset in byte addressing */ offset_in_block = 0U; /* offset into current sector to erase */ length_to_erase = 0U; /* length to erase in current sector */ /* calculate current sector/block offset in byte addressing */ block_offset = erase_offset & ~(qspi_dev->sector_size - 1); /* calculate offset into sector/block if there is one */ if (block_offset != erase_offset) { offset_in_block = erase_offset - block_offset; } /* calculate the byte size of data to be written in a sector */ length_to_erase = MIN(qspi_dev->sector_size - offset_in_block, remaining_length); /* Erase sector */ IOWR_32DIRECT(qspi_dev->csr_base, ALTERA_QSPI_CONTROLLER2_MEM_OP_REG, MEM_OP_WRITE_EN_CMD); IOWR_32DIRECT(qspi_dev->csr_base, ALTERA_QSPI_CONTROLLER2_MEM_OP_REG, (i << MEM_OP_SECTOR_OFFSET_BIT_POS) | MEM_OP_ERASE_CMD); /* * poll the status register to know the * completion of the erase operation. */ timeout = ALTERA_QSPI_CONTROLLER2_1US_TIMEOUT_VALUE; while (timeout > 0) { /* wait for 1 usec */ k_busy_wait(1); flag_status = IORD_32DIRECT(qspi_dev->csr_base, ALTERA_QSPI_CONTROLLER2_FLAG_STATUS_REG); if (flag_status & FLAG_STATUS_CONTROLLER_READY) { break; } timeout--; } if ((flag_status & FLAG_STATUS_ERASE_ERROR) || (flag_status & FLAG_STATUS_PROTECTION_ERROR)) { LOG_ERR("erase failed, Flag Status Reg:0x%x", flag_status); rc = -EIO; goto qspi_erase_err; } /* update remaining length and erase_offset */ remaining_length -= length_to_erase; erase_offset += length_to_erase; } qspi_erase_err: rc2 = flash_nios2_qspi_write_protection(dev, true); if (!rc) { rc = rc2; } k_sem_give(&flash_cfg->sem_lock); return rc; } static int flash_nios2_qspi_write_block(const struct device *dev, int block_offset, int mem_offset, const void *data, size_t len) { struct flash_nios2_qspi_config *flash_cfg = dev->data; alt_qspi_controller2_dev *qspi_dev = &flash_cfg->qspi_dev; uint32_t buffer_offset = 0U; /* offset into data buffer to get write data */ int32_t remaining_length = len; /* length left to write */ uint32_t write_offset = mem_offset; /* offset into flash to write too */ uint32_t word_to_write, padding, bytes_to_copy; uint32_t flag_status; int32_t rc = 0; while (remaining_length > 0) { /* initialize word to write to blank word */ word_to_write = NIOS2_QSPI_BLANK_WORD; /* bytes to pad the next word that is written */ padding = 0U; /* number of bytes from source to copy */ bytes_to_copy = NIOS2_WRITE_BLOCK_SIZE; /* * we need to make sure the write is word aligned * this should only be true at most 1 time */ if (0 != (write_offset & (NIOS2_WRITE_BLOCK_SIZE - 1))) { /* * data is not word aligned calculate padding bytes * need to add before start of a data offset */ padding = write_offset & (NIOS2_WRITE_BLOCK_SIZE - 1); /* * update variables to account * for padding being added */ bytes_to_copy -= padding; if (bytes_to_copy > remaining_length) { bytes_to_copy = remaining_length; } write_offset = write_offset - padding; if (0 != (write_offset & (NIOS2_WRITE_BLOCK_SIZE - 1))) { rc = -EINVAL; goto qspi_write_block_err; } } else { if (bytes_to_copy > remaining_length) { bytes_to_copy = remaining_length; } } /* Check memcpy length is within NIOS2_WRITE_BLOCK_SIZE */ if (padding + bytes_to_copy > NIOS2_WRITE_BLOCK_SIZE) { rc = -EINVAL; goto qspi_write_block_err; } /* prepare the word to be written */ memcpy((uint8_t *)&word_to_write + padding, (const uint8_t *)data + buffer_offset, bytes_to_copy); /* enable write */ IOWR_32DIRECT(qspi_dev->csr_base, ALTERA_QSPI_CONTROLLER2_MEM_OP_REG, MEM_OP_WRITE_EN_CMD); /* write to flash 32 bits at a time */ IOWR_32DIRECT(qspi_dev->data_base, write_offset, word_to_write); /* check whether write operation is successful */ flag_status = IORD_32DIRECT(qspi_dev->csr_base, ALTERA_QSPI_CONTROLLER2_FLAG_STATUS_REG); if ((flag_status & FLAG_STATUS_PROGRAM_ERROR) || (flag_status & FLAG_STATUS_PROTECTION_ERROR)) { LOG_ERR("write failed, Flag Status Reg:0x%x", flag_status); rc = -EIO; /* sector might be protected */ goto qspi_write_block_err; } /* update offset and length variables */ buffer_offset += bytes_to_copy; remaining_length -= bytes_to_copy; write_offset = write_offset + NIOS2_WRITE_BLOCK_SIZE; } qspi_write_block_err: return rc; } static int flash_nios2_qspi_write(const struct device *dev, off_t offset, const void *data, size_t len) { struct flash_nios2_qspi_config *flash_cfg = dev->data; alt_qspi_controller2_dev *qspi_dev = &flash_cfg->qspi_dev; uint32_t block_offset, offset_in_block, length_to_write; uint32_t write_offset = offset; /* address of next byte to write */ uint32_t buffer_offset = 0U; /* offset into source buffer */ uint32_t remaining_length = len; /* length of data left to be written */ int32_t rc = 0, i, rc2; k_sem_take(&flash_cfg->sem_lock, K_FOREVER); rc = flash_nios2_qspi_write_protection(dev, false); if (rc) { goto qspi_write_err; } /* * check if offset is word aligned and * length is with in the range */ if ((data == NULL) || ((offset + len) > qspi_dev->data_end) || (0 != (write_offset & (NIOS2_WRITE_BLOCK_SIZE - 1)))) { LOG_ERR("write failed at offset 0x%lx", (long)offset); rc = -EINVAL; goto qspi_write_err; } for (i = offset/qspi_dev->sector_size; i < qspi_dev->number_of_sectors; i++) { if (remaining_length <= 0U) { break; } block_offset = 0U; /* block offset in byte addressing */ offset_in_block = 0U; /* offset into current sector to write */ length_to_write = 0U; /* length to write to current sector */ /* calculate current sector/block offset in byte addressing */ block_offset = write_offset & ~(qspi_dev->sector_size - 1); /* calculate offset into sector/block if there is one */ if (block_offset != write_offset) { offset_in_block = write_offset - block_offset; } /* calculate the byte size of data to be written in a sector */ length_to_write = MIN(qspi_dev->sector_size - offset_in_block, remaining_length); rc = flash_nios2_qspi_write_block(dev, block_offset, write_offset, (const uint8_t *)data + buffer_offset, length_to_write); if (rc < 0) { goto qspi_write_err; } /* update remaining length and buffer_offset */ remaining_length -= length_to_write; buffer_offset += length_to_write; write_offset += length_to_write; } qspi_write_err: rc2 = flash_nios2_qspi_write_protection(dev, true); if (!rc) { rc = rc2; } k_sem_give(&flash_cfg->sem_lock); return rc; } static int flash_nios2_qspi_read(const struct device *dev, off_t offset, void *data, size_t len) { struct flash_nios2_qspi_config *flash_cfg = dev->data; alt_qspi_controller2_dev *qspi_dev = &flash_cfg->qspi_dev; uint32_t buffer_offset = 0U; /* offset into data buffer to get read data */ uint32_t remaining_length = len; /* length left to read */ uint32_t read_offset = offset; /* offset into flash to read from */ uint32_t word_to_read, bytes_to_copy; int32_t rc = 0; /* * check if offset and length are within the range */ if ((data == NULL) || (offset < qspi_dev->data_base) || ((offset + len) > qspi_dev->data_end)) { LOG_ERR("read failed at offset 0x%lx", (long)offset); return -EINVAL; } if (!len) { return 0; } k_sem_take(&flash_cfg->sem_lock, K_FOREVER); /* first unaligned start */ read_offset &= ~(NIOS2_WRITE_BLOCK_SIZE - 1U); if (offset > read_offset) { /* number of bytes from source to copy */ bytes_to_copy = NIOS2_WRITE_BLOCK_SIZE - (offset - read_offset); if (bytes_to_copy > remaining_length) { bytes_to_copy = remaining_length; } /* read from flash 32 bits at a time */ word_to_read = IORD_32DIRECT(qspi_dev->data_base, read_offset); memcpy((uint8_t *)data, (uint8_t *)&word_to_read + offset - read_offset, bytes_to_copy); /* update offset and length variables */ read_offset += NIOS2_WRITE_BLOCK_SIZE; buffer_offset += bytes_to_copy; remaining_length -= bytes_to_copy; } /* aligned part, including unaligned end */ while (remaining_length > 0) { /* number of bytes from source to copy */ bytes_to_copy = NIOS2_WRITE_BLOCK_SIZE; if (bytes_to_copy > remaining_length) { bytes_to_copy = remaining_length; } /* read from flash 32 bits at a time */ word_to_read = IORD_32DIRECT(qspi_dev->data_base, read_offset); memcpy((uint8_t *)data + buffer_offset, &word_to_read, bytes_to_copy); /* update offset and length variables */ read_offset += bytes_to_copy; buffer_offset += bytes_to_copy; remaining_length -= bytes_to_copy; } k_sem_give(&flash_cfg->sem_lock); return rc; } static int flash_nios2_qspi_write_protection(const struct device *dev, bool enable) { struct flash_nios2_qspi_config *flash_cfg = dev->data; alt_qspi_controller2_dev *qspi_dev = &flash_cfg->qspi_dev; uint32_t status, lock_val; int32_t rc = 0, timeout; /* set write enable */ IOWR_32DIRECT(qspi_dev->csr_base, ALTERA_QSPI_CONTROLLER2_MEM_OP_REG, MEM_OP_WRITE_EN_CMD); if (enable) { IOWR_32DIRECT(qspi_dev->csr_base, ALTERA_QSPI_CONTROLLER2_MEM_OP_REG, MEM_OP_LOCK_ALL_SECTORS); lock_val = STATUS_PROTECTION_EN_VAL; } else { IOWR_32DIRECT(qspi_dev->csr_base, ALTERA_QSPI_CONTROLLER2_MEM_OP_REG, MEM_OP_UNLOCK_ALL_SECTORS); lock_val = STATUS_PROTECTION_DIS_VAL; } /* * poll the status register to know the * completion of the erase operation. */ timeout = ALTERA_QSPI_CONTROLLER2_1US_TIMEOUT_VALUE; while (timeout > 0) { /* wait for 1 usec */ k_busy_wait(1); /* * read flash flag status register before * checking the QSPI status */ IORD_32DIRECT(qspi_dev->csr_base, ALTERA_QSPI_CONTROLLER2_FLAG_STATUS_REG); /* read QPSI status register */ status = IORD_32DIRECT(qspi_dev->csr_base, ALTERA_QSPI_CONTROLLER2_STATUS_REG); if (((status >> STATUS_PROTECTION_POS) & STATUS_PROTECTION_MASK) == lock_val) { break; } timeout--; } if (timeout <= 0) { LOG_ERR("locking failed, status-reg 0x%x", status); rc = -EIO; } /* clear flag status register */ IOWR_32DIRECT(qspi_dev->csr_base, ALTERA_QSPI_CONTROLLER2_FLAG_STATUS_REG, 0x0); return rc; } static const struct flash_parameters * flash_nios2_qspi_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_nios2_qspi_parameters; } static const struct flash_driver_api flash_nios2_qspi_api = { .erase = flash_nios2_qspi_erase, .write = flash_nios2_qspi_write, .read = flash_nios2_qspi_read, .get_parameters = flash_nios2_qspi_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = (flash_api_pages_layout) flash_page_layout_not_implemented, #endif }; static int flash_nios2_qspi_init(const struct device *dev) { struct flash_nios2_qspi_config *flash_cfg = dev->data; k_sem_init(&flash_cfg->sem_lock, 1, 1); return 0; } struct flash_nios2_qspi_config flash_cfg = { .qspi_dev = { .data_base = EXT_FLASH_AVL_MEM_BASE, .data_end = EXT_FLASH_AVL_MEM_BASE + EXT_FLASH_AVL_MEM_SPAN, .csr_base = EXT_FLASH_AVL_CSR_BASE, .size_in_bytes = EXT_FLASH_AVL_MEM_SPAN, .is_epcs = EXT_FLASH_AVL_MEM_IS_EPCS, .number_of_sectors = EXT_FLASH_AVL_MEM_NUMBER_OF_SECTORS, .sector_size = EXT_FLASH_AVL_MEM_SECTOR_SIZE, .page_size = EXT_FLASH_AVL_MEM_PAGE_SIZE, } }; BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 1, "only one 'altr,nios2-qspi-nor' compatible node may be present"); DEVICE_DT_INST_DEFINE(0, flash_nios2_qspi_init, NULL, &flash_cfg, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_nios2_qspi_api); ```
/content/code_sandbox/drivers/flash/soc_flash_nios2_qspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,224
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_FLASH_FLASH_PRIV_H_ #define ZEPHYR_DRIVERS_FLASH_FLASH_PRIV_H_ #if defined(CONFIG_FLASH_PAGE_LAYOUT) static inline void flash_page_layout_not_implemented(void) { k_panic(); } #endif #endif ```
/content/code_sandbox/drivers/flash/flash_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
56
```objective-c /* * */ /* Flash opcodes */ #define FLASH_ANDES_CMD_WRSR 0x01 /* Write status register */ #define FLASH_ANDES_CMD_RDSR 0x05 /* Read status register */ #define FLASH_ANDES_CMD_READ 0x03 /* Read data */ #define FLASH_ANDES_CMD_4READ 0xEB /* Quad mode Read data*/ #define FLASH_ANDES_CMD_WREN 0x06 /* Write enable */ #define FLASH_ANDES_CMD_WRDI 0x04 /* Write disable */ #define FLASH_ANDES_CMD_PP 0x02 /* Page program */ #define FLASH_ANDES_CMD_4PP 0x38 /* Quad mode page program*/ #define FLASH_ANDES_CMD_SE 0x20 /* Sector erase */ #define FLASH_ANDES_CMD_BE_32K 0x52 /* Block erase 32KB */ #define FLASH_ANDES_CMD_BE 0xD8 /* Block erase */ #define FLASH_ANDES_CMD_CE 0xC7 /* Chip erase */ #define FLASH_ANDES_CMD_RDID 0x9F /* Read JEDEC ID */ #define FLASH_ANDES_CMD_ULBPR 0x98 /* Global Block Protection Unlock */ #define FLASH_ANDES_CMD_DPD 0xB9 /* Deep Power Down */ #define FLASH_ANDES_CMD_RDPD 0xAB /* Release from Deep Power Down */ /* Status register bits */ #define FLASH_ANDES_WIP_BIT BIT(0) /* Write in progress */ #define FLASH_ANDES_WEL_BIT BIT(1) /* Write enable latch */ #define FLASH_ANDES_QE_BIT BIT(6) #define QSPI_TFMAT(base) (base + 0x10) #define QSPI_TCTRL(base) (base + 0x20) #define QSPI_CMD(base) (base + 0x24) #define QSPI_ADDR(base) (base + 0x28) #define QSPI_DATA(base) (base + 0x2c) #define QSPI_CTRL(base) (base + 0x30) #define QSPI_STAT(base) (base + 0x34) #define QSPI_INTEN(base) (base + 0x38) #define QSPI_INTST(base) (base + 0x3c) #define QSPI_TIMIN(base) (base + 0x40) #define QSPI_CONFIG(base) (base + 0x7c) /* Field mask of SPI transfer format register */ #define TFMAT_DATA_LEN_OFFSET (8) #define TFMAT_ADDR_LEN_OFFSET (16) #define TFMAT_SLVMODE_MSK BIT(2) #define TFMAT_DATA_MERGE_MSK BIT(7) #define TFMAT_DATA_LEN_MSK GENMASK(12, 8) /* Field mask of SPI transfer control register */ #define TCTRL_RD_TCNT_OFFSET (0) #define TCTRL_DUMMY_CNT_OFFSET (9) #define TCTRL_WR_TCNT_OFFSET (12) #define TCTRL_DUAL_MODE_OFFSET (22) #define TCTRL_TRNS_MODE_OFFSET (24) #define TCTRL_TRNS_MODE_MSK GENMASK(27, 24) #define TCTRL_ADDR_FMT_MSK BIT(28) #define TCTRL_ADDR_EN_MSK BIT(29) #define TCTRL_CMD_EN_MSK BIT(30) /* Transfer mode */ #define TRNS_MODE_WRITE_READ (0 << TCTRL_TRNS_MODE_OFFSET) #define TRNS_MODE_WRITE_ONLY (1 << TCTRL_TRNS_MODE_OFFSET) #define TRNS_MODE_READ_ONLY (2 << TCTRL_TRNS_MODE_OFFSET) #define TRNS_MODE_NONE_DATA (7 << TCTRL_TRNS_MODE_OFFSET) #define TRNS_MODE_DUMMY_READ (9 << TCTRL_TRNS_MODE_OFFSET) /* Dual/Qual mode */ #define DUAL_IO_MODE (2 << TCTRL_DUAL_MODE_OFFSET) /* Dummy count */ /* In Qual mode, dummy count 3 implies 6 dummy cycles */ #define DUMMY_CNT_3 (0x2 << TCTRL_DUMMY_CNT_OFFSET) /* Field mask of SPI interrupt enable register */ #define IEN_RX_FIFO_MSK BIT(2) #define IEN_TX_FIFO_MSK BIT(3) #define IEN_END_MSK BIT(4) /* Field mask of SPI interrupt status register */ #define INTST_RX_FIFO_INT_MSK BIT(2) #define INTST_TX_FIFO_INT_MSK BIT(3) #define INTST_END_INT_MSK BIT(4) /* Field mask of SPI config register */ #define CFG_RX_FIFO_SIZE_MSK GENMASK(3, 0) #define CFG_TX_FIFO_SIZE_MSK GENMASK(7, 4) /* Field mask of SPI status register */ #define STAT_RX_NUM_MSK GENMASK(13, 8) #define STAT_TX_NUM_MSK GENMASK(21, 16) /* Field mask of SPI control register */ #define CTRL_RX_THRES_OFFSET (8) #define CTRL_TX_THRES_OFFSET (16) #define CTRL_RX_THRES_MSK GENMASK(15, 8) #define CTRL_TX_THRES_MSK GENMASK(23, 16) /* Field mask of SPI status register */ #define TIMIN_SCLK_DIV_MSK GENMASK(7, 0) #define TX_FIFO_THRESHOLD (1 << CTRL_TX_THRES_OFFSET) #define RX_FIFO_THRESHOLD (1 << CTRL_RX_THRES_OFFSET) #define MAX_TRANSFER_CNT (512) #define TX_FIFO_SIZE_SETTING(base) \ (sys_read32(QSPI_CONFIG(base)) & CFG_TX_FIFO_SIZE_MSK) #define TX_FIFO_SIZE(base) \ (2 << (TX_FIFO_SIZE_SETTING(base) >> 4)) #define RX_FIFO_SIZE_SETTING(base) \ (sys_read32(QSPI_CONFIG(base)) & CFG_RX_FIFO_SIZE_MSK) #define RX_FIFO_SIZE(base) \ (2 << (RX_FIFO_SIZE_SETTING(base) >> 0)) #define TX_NUM_STAT(base) (sys_read32(QSPI_STAT(base)) & STAT_TX_NUM_MSK) #define RX_NUM_STAT(base) (sys_read32(QSPI_STAT(base)) & STAT_RX_NUM_MSK) #define GET_TX_NUM(base) (TX_NUM_STAT(base) >> 16) #define GET_RX_NUM(base) (RX_NUM_STAT(base) >> 8) ```
/content/code_sandbox/drivers/flash/flash_andes_qspi.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,312
```c /* * an affiliate of Cypress Semiconductor Corporation * */ #define DT_DRV_COMPAT infineon_cat1_flash_controller #define SOC_NV_FLASH_NODE DT_PARENT(DT_INST(0, fixed_partitions)) #define PAGE_LEN DT_PROP(SOC_NV_FLASH_NODE, erase_block_size) #include <zephyr/kernel.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/flash.h> #include <zephyr/logging/log.h> #include "cyhal_flash.h" LOG_MODULE_REGISTER(flash_infineon_cat1, CONFIG_FLASH_LOG_LEVEL); /* Device config structure */ struct ifx_cat1_flash_config { uint32_t base_addr; uint32_t max_addr; }; /* Data structure */ struct ifx_cat1_flash_data { cyhal_flash_t flash_obj; struct k_sem sem; }; static struct flash_parameters ifx_cat1_flash_parameters = { .write_block_size = DT_PROP(SOC_NV_FLASH_NODE, write_block_size), .erase_value = 0x00, }; static inline void flash_ifx_sem_take(const struct device *dev) { struct ifx_cat1_flash_data *data = dev->data; k_sem_take(&data->sem, K_FOREVER); } static inline void flash_ifx_sem_give(const struct device *dev) { struct ifx_cat1_flash_data *data = dev->data; k_sem_give(&data->sem); } static int ifx_cat1_flash_read(const struct device *dev, off_t offset, void *data, size_t data_len) { struct ifx_cat1_flash_data *dev_data = dev->data; const struct ifx_cat1_flash_config *dev_config = dev->config; uint32_t read_offset = dev_config->base_addr + offset; cy_rslt_t rslt = CY_RSLT_SUCCESS; int ret = 0; flash_ifx_sem_take(dev); rslt = cyhal_flash_read(&dev_data->flash_obj, read_offset, (uint8_t *)data, data_len); if (rslt != CY_RSLT_SUCCESS) { LOG_ERR("Error reading @ 0x%x (Err:0x%x)", read_offset, rslt); ret = -EIO; goto out; } out: flash_ifx_sem_give(dev); return ret; } static int ifx_cat1_flash_write(const struct device *dev, off_t offset, const void *data, size_t data_len) { struct ifx_cat1_flash_data *dev_data = dev->data; const struct ifx_cat1_flash_config *dev_config = dev->config; uint32_t write_offset = dev_config->base_addr + (uint32_t)offset; const uint8_t *data_ptr = (const uint8_t *)data; cy_rslt_t rslt = CY_RSLT_SUCCESS; int ret = 0; if (data_len == 0) { return 0; } if ((offset < 0) || (data_len % PAGE_LEN != 0) || (offset % PAGE_LEN != 0)) { return -EINVAL; } flash_ifx_sem_take(dev); while (data_len) { rslt = cyhal_flash_write(&dev_data->flash_obj, write_offset, (const uint32_t *)data_ptr); if (rslt != CY_RSLT_SUCCESS) { LOG_ERR("Error in writing @ 0x%x (Err:0x%x)", write_offset, rslt); ret = -EIO; goto out; } data_ptr += PAGE_LEN; write_offset += PAGE_LEN; data_len -= PAGE_LEN; } out: flash_ifx_sem_give(dev); return ret; } static int ifx_cat1_flash_erase(const struct device *dev, off_t offset, size_t size) { struct ifx_cat1_flash_data *data = dev->data; const struct ifx_cat1_flash_config *config = dev->config; uint32_t erase_offset = config->base_addr + (uint32_t)offset; cy_rslt_t rslt; if ((offset < 0) || ((offset % PAGE_LEN) != 0)) { return -EINVAL; } if (((erase_offset + size) > config->max_addr) || ((size % PAGE_LEN) != 0)) { return -EINVAL; } while (size) { rslt = cyhal_flash_erase(&data->flash_obj, erase_offset); if (rslt != CY_RSLT_SUCCESS) { LOG_ERR("Error in erasing : 0x%x", rslt); return -EIO; } size -= PAGE_LEN; erase_offset += PAGE_LEN; } return 0; } #if CONFIG_FLASH_PAGE_LAYOUT static const struct flash_pages_layout ifx_cat1_flash_pages_layout = { .pages_count = DT_REG_SIZE(SOC_NV_FLASH_NODE) / PAGE_LEN, .pages_size = PAGE_LEN, }; static void ifx_cat1_flash_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { *layout = &ifx_cat1_flash_pages_layout; /* * For flash memories which have uniform page sizes, this routine * returns an array of length 1, which specifies the page size and * number of pages in the memory. */ *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static const struct flash_parameters *ifx_cat1_flash_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &ifx_cat1_flash_parameters; } static int ifx_cat1_flash_init(const struct device *dev) { struct ifx_cat1_flash_data *data = dev->data; cy_rslt_t rslt = CY_RSLT_SUCCESS; rslt = cyhal_flash_init(&data->flash_obj); if (rslt != CY_RSLT_SUCCESS) { LOG_ERR("Failed to init flash hal driver (Err:0x%x)", rslt); return -EIO; } k_sem_init(&data->sem, 1, 1); return 0; } static const struct flash_driver_api ifx_cat1_flash_driver_api = { .read = ifx_cat1_flash_read, .write = ifx_cat1_flash_write, .erase = ifx_cat1_flash_erase, .get_parameters = ifx_cat1_flash_get_parameters, #ifdef CONFIG_FLASH_PAGE_LAYOUT .page_layout = ifx_cat1_flash_page_layout, #endif }; static struct ifx_cat1_flash_data flash_data; static const struct ifx_cat1_flash_config ifx_cat1_flash_config = { .base_addr = DT_REG_ADDR(SOC_NV_FLASH_NODE), .max_addr = DT_REG_ADDR(SOC_NV_FLASH_NODE) + DT_REG_SIZE(SOC_NV_FLASH_NODE)}; DEVICE_DT_INST_DEFINE(0, ifx_cat1_flash_init, NULL, &flash_data, &ifx_cat1_flash_config, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &ifx_cat1_flash_driver_api); ```
/content/code_sandbox/drivers/flash/flash_ifx_cat1.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,486
```unknown config SOC_FLASH_RV32M1 bool "RV32M1 flash shim driver" default y depends on DT_HAS_OPENISA_RV32M1_FTFE_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_EXPLICIT_ERASE help Enables the RV32M1 flash shim driver. WARNING: This driver will disable the system interrupts for the duration of the flash erase/write operations. This will have an impact on the overall system performance - whether this is acceptable or not will depend on the use case. ```
/content/code_sandbox/drivers/flash/Kconfig.rv32m1
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
124
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <zephyr/drivers/flash.h> #include <errno.h> #include <zephyr/init.h> #include <soc.h> #include "flash_priv.h" #include "fsl_common.h" #include "fsl_flashiap.h" #if DT_NODE_HAS_STATUS(DT_INST(0, nxp_iap_fmc11), okay) #define DT_DRV_COMPAT nxp_iap_fmc11 #elif DT_NODE_HAS_STATUS(DT_INST(0, nxp_iap_fmc54), okay) #define DT_DRV_COMPAT nxp_iap_fmc54 #else #error No matching compatible for soc_flash_lpc.c #endif #define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash) struct flash_priv { /* HACK: flash write protection is managed in software. */ struct k_sem write_lock; uint32_t pflash_block_base; uint32_t sector_size; }; static const struct flash_parameters flash_lpc_parameters = { #if DT_NODE_HAS_PROP(SOC_NV_FLASH_NODE, write_block_size) .write_block_size = DT_PROP(SOC_NV_FLASH_NODE, write_block_size), #else .write_block_size = FSL_FEATURE_FLASH_PFLASH_BLOCK_WRITE_UNIT_SIZE, #endif .erase_value = 0xff, }; static inline void prepare_erase_write(off_t offset, size_t len, uint32_t sector_size) { uint32_t start; uint32_t stop; start = offset / sector_size; stop = (offset+len-1) / sector_size; FLASHIAP_PrepareSectorForWrite(start, stop); } static int flash_lpc_erase(const struct device *dev, off_t offset, size_t len) { struct flash_priv *priv = dev->data; status_t rc; unsigned int key; uint32_t start; uint32_t stop; uint32_t page_size; if (k_sem_take(&priv->write_lock, K_FOREVER)) { return -EACCES; } key = irq_lock(); prepare_erase_write(offset, len, priv->sector_size); page_size = flash_lpc_parameters.write_block_size; start = offset / page_size; stop = (offset+len-1) / page_size; rc = FLASHIAP_ErasePage(start, stop, CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC); irq_unlock(key); k_sem_give(&priv->write_lock); return (rc == kStatus_FLASHIAP_Success) ? 0 : -EINVAL; } static int flash_lpc_read(const struct device *dev, off_t offset, void *data, size_t len) { struct flash_priv *priv = dev->data; uint32_t addr; addr = offset + priv->pflash_block_base; memcpy(data, (void *) addr, len); return 0; } static int flash_lpc_write(const struct device *dev, off_t offset, const void *data, size_t len) { struct flash_priv *priv = dev->data; uint32_t addr; status_t rc; unsigned int key; if (k_sem_take(&priv->write_lock, K_FOREVER)) { return -EACCES; } addr = offset + priv->pflash_block_base; key = irq_lock(); prepare_erase_write(offset, len, priv->sector_size); rc = FLASHIAP_CopyRamToFlash(addr, (uint32_t *) data, len, CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC); irq_unlock(key); k_sem_give(&priv->write_lock); return (rc == kStatus_FLASHIAP_Success) ? 0 : -EINVAL; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static const struct flash_pages_layout dev_layout = { .pages_count = DT_REG_SIZE(SOC_NV_FLASH_NODE) / DT_PROP(SOC_NV_FLASH_NODE, erase_block_size), .pages_size = DT_PROP(SOC_NV_FLASH_NODE, erase_block_size), }; static void flash_lpc_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { *layout = &dev_layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static const struct flash_parameters * flash_lpc_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_lpc_parameters; } static struct flash_priv flash_data; static const struct flash_driver_api flash_lpc_api = { .erase = flash_lpc_erase, .write = flash_lpc_write, .read = flash_lpc_read, .get_parameters = flash_lpc_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_lpc_pages_layout, #endif }; static int flash_lpc_init(const struct device *dev) { struct flash_priv *priv = dev->data; k_sem_init(&priv->write_lock, 1, 1); priv->pflash_block_base = DT_REG_ADDR(SOC_NV_FLASH_NODE); #if defined(FSL_FEATURE_SYSCON_FLASH_SECTOR_SIZE_BYTES) priv->sector_size = FSL_FEATURE_SYSCON_FLASH_SECTOR_SIZE_BYTES; #else #error "Sector size not set" #endif return 0; } DEVICE_DT_INST_DEFINE(0, flash_lpc_init, NULL, &flash_data, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_lpc_api); ```
/content/code_sandbox/drivers/flash/soc_flash_lpc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,127
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_FLASH_FLASH_GD32_H_ #define ZEPHYR_DRIVERS_FLASH_FLASH_GD32_H_ #include <stdint.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/flash.h> #define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash) #define SOC_NV_FLASH_SIZE DT_REG_SIZE(SOC_NV_FLASH_NODE) #define SOC_NV_FLASH_ADDR DT_REG_ADDR(SOC_NV_FLASH_NODE) #define SOC_NV_FLASH_PRG_SIZE DT_PROP(SOC_NV_FLASH_NODE, write_block_size) #if (4 == SOC_NV_FLASH_PRG_SIZE) typedef uint32_t flash_prg_t; #elif (2 == SOC_NV_FLASH_PRG_SIZE) typedef uint16_t flash_prg_t; #elif (1 == SOC_NV_FLASH_PRG_SIZE) typedef uint8_t flash_prg_t; #else #error "Invalid write-block-size value in FMC DTS" #endif /* Helper for conditional compilation directives, KB cannot be used because it has type casting. */ #define PRE_KB(x) ((x) << 10) bool flash_gd32_valid_range(off_t offset, uint32_t len, bool write); int flash_gd32_write_range(off_t offset, const void *data, size_t len); int flash_gd32_erase_block(off_t offset, size_t size); #ifdef CONFIG_FLASH_PAGE_LAYOUT void flash_gd32_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size); #endif #endif /* ZEPHYR_DRIVERS_FLASH_FLASH_GD32_H_ */ ```
/content/code_sandbox/drivers/flash/flash_gd32.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
338
```unknown # STM32 Octo SPI flash driver configuration options DT_STM32_OCTOSPI_1_HAS_DMA := $(dt_nodelabel_has_prop,octospi1,dmas) DT_STM32_OCTOSPI_2_HAS_DMA := $(dt_nodelabel_has_prop,octospi2,dmas) config FLASH_STM32_OSPI bool "STM32 Octo SPI Flash driver" default y depends on DT_HAS_ST_STM32_OSPI_NOR_ENABLED select USE_STM32_HAL_OSPI if !SOC_SERIES_STM32H5X select USE_STM32_HAL_XSPI if SOC_SERIES_STM32H5X select USE_STM32_LL_DLYB if (SOC_SERIES_STM32H5X || SOC_SERIES_STM32U5X) select USE_STM32_HAL_MDMA if SOC_SERIES_STM32H7X select FLASH_HAS_DRIVER_ENABLED select FLASH_JESD216 select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_EXPLICIT_ERASE select DMA if $(DT_STM32_OCTOSPI_1_HAS_DMA) || $(DT_STM32_OCTOSPI_2_HAS_DMA) select USE_STM32_HAL_DMA if $(DT_STM32_OCTOSPI_1_HAS_DMA) || \ $(DT_STM32_OCTOSPI_2_HAS_DMA) select USE_STM32_HAL_DMA_EX if SOC_SERIES_STM32U5X && \ ($(DT_STM32_OCTOSPI_1_HAS_DMA) || \ $(DT_STM32_OCTOSPI_2_HAS_DMA)) help Enable OSPI-NOR support on the STM32 family of processors. ```
/content/code_sandbox/drivers/flash/Kconfig.stm32_ospi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
362
```c /* * */ #define DT_DRV_COMPAT nxp_imx_flexspi_nor #include <zephyr/kernel.h> #include <zephyr/drivers/flash.h> #include <zephyr/irq.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> #include "spi_nor.h" #include "jesd216.h" #include "memc_mcux_flexspi.h" #ifdef CONFIG_HAS_MCUX_CACHE #include <fsl_cache.h> #endif #define NOR_WRITE_SIZE 1 #define NOR_ERASE_VALUE 0xff #ifdef CONFIG_FLASH_MCUX_FLEXSPI_NOR_WRITE_BUFFER static uint8_t nor_write_buf[SPI_NOR_PAGE_SIZE]; #endif /* * NOTE: If CONFIG_FLASH_MCUX_FLEXSPI_XIP is selected, Any external functions * called while interacting with the flexspi MUST be relocated to SRAM or ITCM * at runtime, so that the chip does not access the flexspi to read program * instructions while it is being written to * * Additionally, no data used by this driver should be stored in flash. */ #if defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP) && (CONFIG_FLASH_LOG_LEVEL > 0) #warning "Enabling flash driver logging and XIP mode simultaneously can cause \ read-while-write hazards. This configuration is not recommended." #endif LOG_MODULE_REGISTER(flash_flexspi_nor, CONFIG_FLASH_LOG_LEVEL); enum { READ, PAGE_PROGRAM, READ_STATUS, WRITE_ENABLE, ERASE_SECTOR, ERASE_BLOCK, READ_ID, READ_STATUS_REG, ERASE_CHIP, READ_JESD216, /* Entries after this should be for scratch commands */ FLEXSPI_INSTR_PROG_END, /* Used for temporary commands during initialization */ SCRATCH_CMD = FLEXSPI_INSTR_PROG_END, SCRATCH_CMD2, /* Must be last entry */ FLEXSPI_INSTR_END, }; struct flash_flexspi_nor_config { /* Note: don't use this controller reference in code. It is * only used during init to copy the device structure from ROM * into a RAM structure */ const struct device *controller; }; /* Device variables used in critical sections should be in this structure */ struct flash_flexspi_nor_data { struct device controller; flexspi_device_config_t config; flexspi_port_t port; bool legacy_poll; struct flash_pages_layout layout; struct flash_parameters flash_parameters; }; /* Initial LUT table */ static const uint32_t flash_flexspi_nor_base_lut[][MEMC_FLEXSPI_CMD_PER_SEQ] = { /* 1S-1S-1S flash read command, should be compatible with all SPI nor flashes */ [READ] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_READ, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_1PAD, 24), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_READ_SDR, kFLEXSPI_1PAD, 0x1, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0), }, [READ_JESD216] = { /* Install read SFDP command */ FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, JESD216_CMD_READ_SFDP, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_1PAD, 24), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DUMMY_SDR, kFLEXSPI_1PAD, 8, kFLEXSPI_Command_READ_SDR, kFLEXSPI_1PAD, 0x4), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0), }, /* Standard 1S-1S-1S flash write command, can be switched to 1S-1S-4S when QE is set */ [PAGE_PROGRAM] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_PP, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_1PAD, 0x18), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_WRITE_SDR, kFLEXSPI_1PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0), }, [WRITE_ENABLE] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_WREN, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0), }, [ERASE_SECTOR] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_SE, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_1PAD, 0x18), }, [ERASE_BLOCK] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_BE, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_1PAD, 0x18), }, [ERASE_CHIP] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_CE, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0), }, [READ_ID] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_RDID, kFLEXSPI_Command_READ_SDR, kFLEXSPI_1PAD, 0x01), }, [READ_STATUS_REG] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_RDSR, kFLEXSPI_Command_READ_SDR, kFLEXSPI_1PAD, 0x01), }, }; /* Helper so we can read flash ID without flash access for XIP */ static int flash_flexspi_nor_read_id_helper(struct flash_flexspi_nor_data *data, uint8_t *vendor_id) { uint32_t buffer = 0; int ret; flexspi_transfer_t transfer = { .deviceAddress = 0, .port = data->port, .cmdType = kFLEXSPI_Read, .SeqNumber = 1, .seqIndex = READ_ID, .data = &buffer, .dataSize = 3, }; LOG_DBG("Reading id"); ret = memc_flexspi_transfer(&data->controller, &transfer); if (ret < 0) { return ret; } memcpy(vendor_id, &buffer, 3); return ret; } static int flash_flexspi_nor_read_id(const struct device *dev, uint8_t *vendor_id) { struct flash_flexspi_nor_data *data = dev->data; return flash_flexspi_nor_read_id_helper(data, vendor_id); } static int flash_flexspi_nor_read_status(struct flash_flexspi_nor_data *data, uint32_t *status) { flexspi_transfer_t transfer = { .deviceAddress = 0, .port = data->port, .cmdType = kFLEXSPI_Read, .SeqNumber = 1, .seqIndex = READ_STATUS_REG, .data = status, .dataSize = 1, }; LOG_DBG("Reading status register"); return memc_flexspi_transfer(&data->controller, &transfer); } static int flash_flexspi_nor_write_enable(struct flash_flexspi_nor_data *data) { flexspi_transfer_t transfer = { .deviceAddress = 0, .port = data->port, .cmdType = kFLEXSPI_Command, .SeqNumber = 1, .seqIndex = WRITE_ENABLE, .data = NULL, .dataSize = 0, }; LOG_DBG("Enabling write"); return memc_flexspi_transfer(&data->controller, &transfer); } static int flash_flexspi_nor_erase_sector(struct flash_flexspi_nor_data *data, off_t offset) { flexspi_transfer_t transfer = { .deviceAddress = offset, .port = data->port, .cmdType = kFLEXSPI_Command, .SeqNumber = 1, .seqIndex = ERASE_SECTOR, .data = NULL, .dataSize = 0, }; LOG_DBG("Erasing sector at 0x%08zx", (ssize_t) offset); return memc_flexspi_transfer(&data->controller, &transfer); } static int flash_flexspi_nor_erase_block(struct flash_flexspi_nor_data *data, off_t offset) { flexspi_transfer_t transfer = { .deviceAddress = offset, .port = data->port, .cmdType = kFLEXSPI_Command, .SeqNumber = 1, .seqIndex = ERASE_BLOCK, .data = NULL, .dataSize = 0, }; LOG_DBG("Erasing block at 0x%08zx", (ssize_t) offset); return memc_flexspi_transfer(&data->controller, &transfer); } static int flash_flexspi_nor_erase_chip(struct flash_flexspi_nor_data *data) { flexspi_transfer_t transfer = { .deviceAddress = 0, .port = data->port, .cmdType = kFLEXSPI_Command, .SeqNumber = 1, .seqIndex = ERASE_CHIP, .data = NULL, .dataSize = 0, }; LOG_DBG("Erasing chip"); return memc_flexspi_transfer(&data->controller, &transfer); } static int flash_flexspi_nor_page_program(struct flash_flexspi_nor_data *data, off_t offset, const void *buffer, size_t len) { flexspi_transfer_t transfer = { .deviceAddress = offset, .port = data->port, .cmdType = kFLEXSPI_Write, .SeqNumber = 1, .seqIndex = PAGE_PROGRAM, .data = (uint32_t *) buffer, .dataSize = len, }; LOG_DBG("Page programming %d bytes to 0x%08zx", len, (ssize_t) offset); return memc_flexspi_transfer(&data->controller, &transfer); } static int flash_flexspi_nor_wait_bus_busy(struct flash_flexspi_nor_data *data) { uint32_t status = 0; int ret; while (1) { ret = flash_flexspi_nor_read_status(data, &status); LOG_DBG("status: 0x%x", status); if (ret) { LOG_ERR("Could not read status"); return ret; } if (data->legacy_poll) { if ((status & BIT(0)) == 0) { break; } } else { if (status & BIT(7)) { break; } } } return 0; } static int flash_flexspi_nor_read(const struct device *dev, off_t offset, void *buffer, size_t len) { struct flash_flexspi_nor_data *data = dev->data; uint8_t *src = memc_flexspi_get_ahb_address(&data->controller, data->port, offset); memcpy(buffer, src, len); return 0; } static int flash_flexspi_nor_write(const struct device *dev, off_t offset, const void *buffer, size_t len) { struct flash_flexspi_nor_data *data = dev->data; size_t size = len; uint8_t *src = (uint8_t *) buffer; int i; unsigned int key = 0; uint8_t *dst = memc_flexspi_get_ahb_address(&data->controller, data->port, offset); if (memc_flexspi_is_running_xip(&data->controller)) { /* * ==== ENTER CRITICAL SECTION ==== * No flash access should be performed in critical section. All * code and data accessed must reside in ram. */ key = irq_lock(); } while (len) { /* If the offset isn't a multiple of the NOR page size, we first need * to write the remaining part that fits, otherwise the write could * be wrapped around within the same page */ i = MIN(SPI_NOR_PAGE_SIZE - (offset % SPI_NOR_PAGE_SIZE), len); #ifdef CONFIG_FLASH_MCUX_FLEXSPI_NOR_WRITE_BUFFER memcpy(nor_write_buf, src, i); #endif flash_flexspi_nor_write_enable(data); #ifdef CONFIG_FLASH_MCUX_FLEXSPI_NOR_WRITE_BUFFER flash_flexspi_nor_page_program(data, offset, nor_write_buf, i); #else flash_flexspi_nor_page_program(data, offset, src, i); #endif flash_flexspi_nor_wait_bus_busy(data); memc_flexspi_reset(&data->controller); src += i; offset += i; len -= i; } if (memc_flexspi_is_running_xip(&data->controller)) { /* ==== EXIT CRITICAL SECTION ==== */ irq_unlock(key); } #ifdef CONFIG_HAS_MCUX_CACHE DCACHE_InvalidateByRange((uint32_t) dst, size); #endif return 0; } static int flash_flexspi_nor_erase(const struct device *dev, off_t offset, size_t size) { struct flash_flexspi_nor_data *data = dev->data; const size_t num_sectors = size / SPI_NOR_SECTOR_SIZE; const size_t num_blocks = size / SPI_NOR_BLOCK_SIZE; int i; unsigned int key = 0; uint8_t *dst = memc_flexspi_get_ahb_address(&data->controller, data->port, offset); if (offset % SPI_NOR_SECTOR_SIZE) { LOG_ERR("Invalid offset"); return -EINVAL; } if (size % SPI_NOR_SECTOR_SIZE) { LOG_ERR("Invalid size"); return -EINVAL; } if (memc_flexspi_is_running_xip(&data->controller)) { /* * ==== ENTER CRITICAL SECTION ==== * No flash access should be performed in critical section. All * code and data accessed must reside in ram. */ key = irq_lock(); } if ((offset == 0) && (size == data->config.flashSize * KB(1))) { flash_flexspi_nor_write_enable(data); flash_flexspi_nor_erase_chip(data); flash_flexspi_nor_wait_bus_busy(data); memc_flexspi_reset(&data->controller); } else if ((0 == (offset % SPI_NOR_BLOCK_SIZE)) && (0 == (size % SPI_NOR_BLOCK_SIZE))) { for (i = 0; i < num_blocks; i++) { flash_flexspi_nor_write_enable(data); flash_flexspi_nor_erase_block(data, offset); flash_flexspi_nor_wait_bus_busy(data); memc_flexspi_reset(&data->controller); offset += SPI_NOR_BLOCK_SIZE; } } else { for (i = 0; i < num_sectors; i++) { flash_flexspi_nor_write_enable(data); flash_flexspi_nor_erase_sector(data, offset); flash_flexspi_nor_wait_bus_busy(data); memc_flexspi_reset(&data->controller); offset += SPI_NOR_SECTOR_SIZE; } } if (memc_flexspi_is_running_xip(&data->controller)) { /* ==== EXIT CRITICAL SECTION ==== */ irq_unlock(key); } #ifdef CONFIG_HAS_MCUX_CACHE DCACHE_InvalidateByRange((uint32_t) dst, size); #endif return 0; } static const struct flash_parameters *flash_flexspi_nor_get_parameters( const struct device *dev) { struct flash_flexspi_nor_data *data = dev->data; return &data->flash_parameters; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static void flash_flexspi_nor_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { struct flash_flexspi_nor_data *data = dev->data; *layout = &data->layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ /* * This function enables quad mode, when supported. Otherwise it * returns an error. * @param dev: Flexspi device * @param flexspi_lut: flexspi lut table, useful if instruction writes are needed * @param qer: DW15 quad enable parameter * @return 0 if quad mode was entered, or -ENOTSUP if quad mode is not supported */ static int flash_flexspi_nor_quad_enable(struct flash_flexspi_nor_data *data, uint32_t (*flexspi_lut)[MEMC_FLEXSPI_CMD_PER_SEQ], uint8_t qer) { int ret; uint32_t buffer = 0; uint16_t bit = 0; uint8_t rd_size, wr_size; flexspi_transfer_t transfer = { .deviceAddress = 0, .port = data->port, .SeqNumber = 1, .data = &buffer, }; flexspi_device_config_t config = { .flexspiRootClk = MHZ(50), .flashSize = FLEXSPI_FLSHCR0_FLSHSZ_MASK, /* Max flash size */ .ARDSeqNumber = 1, .ARDSeqIndex = READ, }; switch (qer) { case JESD216_DW15_QER_VAL_NONE: /* No init needed */ return 0; case JESD216_DW15_QER_VAL_S2B1v1: case JESD216_DW15_QER_VAL_S2B1v4: /* Install read and write status command */ flexspi_lut[SCRATCH_CMD][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_RDSR, kFLEXSPI_Command_READ_SDR, kFLEXSPI_1PAD, 0x1); flexspi_lut[SCRATCH_CMD2][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_WRSR, kFLEXSPI_Command_WRITE_SDR, kFLEXSPI_1PAD, 0x1); /* Set bit 1 of status register 2 */ bit = BIT(9); rd_size = 2; wr_size = 2; break; case JESD216_DW15_QER_VAL_S1B6: /* Install read and write status command */ flexspi_lut[SCRATCH_CMD][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_RDSR, kFLEXSPI_Command_READ_SDR, kFLEXSPI_1PAD, 0x1); flexspi_lut[SCRATCH_CMD2][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_WRSR, kFLEXSPI_Command_WRITE_SDR, kFLEXSPI_1PAD, 0x1); /* Set bit 6 of status register 1 */ bit = BIT(6); rd_size = 1; wr_size = 1; break; case JESD216_DW15_QER_VAL_S2B7: /* Install read and write status command */ flexspi_lut[SCRATCH_CMD][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, 0x3F, kFLEXSPI_Command_READ_SDR, kFLEXSPI_1PAD, 0x1); flexspi_lut[SCRATCH_CMD2][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, 0x3E, kFLEXSPI_Command_WRITE_SDR, kFLEXSPI_1PAD, 0x1); /* Set bit 7 of status register 2 */ bit = BIT(7); rd_size = 1; wr_size = 1; break; case JESD216_DW15_QER_VAL_S2B1v5: /* Install read and write status command */ flexspi_lut[SCRATCH_CMD][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_RDSR2, kFLEXSPI_Command_READ_SDR, kFLEXSPI_1PAD, 0x1); flexspi_lut[SCRATCH_CMD2][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_WRSR, kFLEXSPI_Command_WRITE_SDR, kFLEXSPI_1PAD, 0x1); /* Set bit 1 of status register 2 */ bit = BIT(9); rd_size = 1; wr_size = 2; break; case JESD216_DW15_QER_VAL_S2B1v6: /* Install read and write status command */ flexspi_lut[SCRATCH_CMD][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_RDSR2, kFLEXSPI_Command_READ_SDR, kFLEXSPI_1PAD, 0x1); flexspi_lut[SCRATCH_CMD2][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_WRSR2, kFLEXSPI_Command_WRITE_SDR, kFLEXSPI_1PAD, 0x1); /* Set bit 7 of status register 2 */ bit = BIT(7); rd_size = 1; wr_size = 1; break; default: return -ENOTSUP; } ret = memc_flexspi_set_device_config(&data->controller, &config, (uint32_t *)flexspi_lut, FLEXSPI_INSTR_END * MEMC_FLEXSPI_CMD_PER_SEQ, data->port); if (ret < 0) { return ret; } transfer.dataSize = rd_size; transfer.seqIndex = SCRATCH_CMD; transfer.cmdType = kFLEXSPI_Read; /* Read status register */ ret = memc_flexspi_transfer(&data->controller, &transfer); if (ret < 0) { return ret; } buffer |= bit; transfer.dataSize = wr_size; transfer.seqIndex = SCRATCH_CMD2; transfer.cmdType = kFLEXSPI_Write; return memc_flexspi_transfer(&data->controller, &transfer); } /* * This function enables 4 byte addressing, when supported. Otherwise it * returns an error. * @param dev: Flexspi device * @param flexspi_lut: flexspi lut table, useful if instruction writes are needed * @param en4b: DW16 enable 4 byte mode parameter * @return 0 if 4 byte mode was entered, or -ENOTSUP if 4 byte mode was not supported */ static int flash_flexspi_nor_4byte_enable(struct flash_flexspi_nor_data *data, uint32_t (*flexspi_lut)[MEMC_FLEXSPI_CMD_PER_SEQ], uint32_t en4b) { int ret; uint32_t buffer = 0; flexspi_transfer_t transfer = { .deviceAddress = 0, .port = data->port, .SeqNumber = 1, .data = &buffer, }; flexspi_device_config_t config = { .flexspiRootClk = MHZ(50), .flashSize = FLEXSPI_FLSHCR0_FLSHSZ_MASK, /* Max flash size */ .ARDSeqNumber = 1, .ARDSeqIndex = READ, }; if (en4b & BIT(6)) { /* Flash is always in 4 byte mode. We just need to configure LUT */ return 0; } else if (en4b & BIT(5)) { /* Dedicated vendor instruction set, which we don't support. Exit here */ return -ENOTSUP; } else if (en4b & BIT(4)) { /* Set bit 0 of 16 bit configuration register */ flexspi_lut[SCRATCH_CMD][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, 0xB5, kFLEXSPI_Command_READ_SDR, kFLEXSPI_1PAD, 0x1); flexspi_lut[SCRATCH_CMD2][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, 0xB1, kFLEXSPI_Command_WRITE_SDR, kFLEXSPI_1PAD, 0x1); ret = memc_flexspi_set_device_config(&data->controller, &config, (uint32_t *)flexspi_lut, FLEXSPI_INSTR_END * MEMC_FLEXSPI_CMD_PER_SEQ, data->port); if (ret < 0) { return ret; } transfer.dataSize = 2; transfer.seqIndex = SCRATCH_CMD; transfer.cmdType = kFLEXSPI_Read; /* Read config register */ ret = memc_flexspi_transfer(&data->controller, &transfer); if (ret < 0) { return ret; } buffer |= BIT(0); /* Set config register */ transfer.seqIndex = SCRATCH_CMD2; transfer.cmdType = kFLEXSPI_Read; return memc_flexspi_transfer(&data->controller, &transfer); } else if (en4b & BIT(1)) { /* Issue write enable, then instruction 0xB7 */ flash_flexspi_nor_write_enable(data); flexspi_lut[SCRATCH_CMD][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, 0xB7, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0); ret = memc_flexspi_set_device_config(&data->controller, &config, (uint32_t *)flexspi_lut, FLEXSPI_INSTR_END * MEMC_FLEXSPI_CMD_PER_SEQ, data->port); if (ret < 0) { return ret; } transfer.dataSize = 0; transfer.seqIndex = SCRATCH_CMD; transfer.cmdType = kFLEXSPI_Command; return memc_flexspi_transfer(&data->controller, &transfer); } else if (en4b & BIT(0)) { /* Issue instruction 0xB7 */ flexspi_lut[SCRATCH_CMD][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, 0xB7, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0); ret = memc_flexspi_set_device_config(&data->controller, &config, (uint32_t *)flexspi_lut, FLEXSPI_INSTR_END * MEMC_FLEXSPI_CMD_PER_SEQ, data->port); if (ret < 0) { return ret; } transfer.dataSize = 0; transfer.seqIndex = SCRATCH_CMD; transfer.cmdType = kFLEXSPI_Command; return memc_flexspi_transfer(&data->controller, &transfer); } /* Other methods not supported */ return -ENOTSUP; } /* * This function configures the FlexSPI to manage the flash device * based on values in SFDP header * @param data: Flexspi device data * @param header: SFDP header for flash * @param bfp: basic flash parameters for flash * @param flexspi_lut: LUT table, filled with READ LUT command * @return 0 on success, or negative value on error */ static int flash_flexspi_nor_config_flash(struct flash_flexspi_nor_data *data, struct jesd216_sfdp_header *header, struct jesd216_bfp *bfp, uint32_t (*flexspi_lut)[MEMC_FLEXSPI_CMD_PER_SEQ]) { struct jesd216_instr instr; struct jesd216_bfp_dw16 dw16; struct jesd216_bfp_dw15 dw15; struct jesd216_bfp_dw14 dw14; uint8_t addr_width; uint8_t mode_cmd; int ret; addr_width = jesd216_bfp_addrbytes(bfp) == JESD216_SFDP_BFP_DW1_ADDRBYTES_VAL_4B ? 32 : 24; /* Check to see if we can enable 4 byte addressing */ ret = jesd216_bfp_decode_dw16(&header->phdr[0], bfp, &dw16); if (ret == 0) { /* Attempt to enable 4 byte addressing */ ret = flash_flexspi_nor_4byte_enable(data, flexspi_lut, dw16.enter_4ba); if (ret == 0) { /* Use 4 byte address width */ addr_width = 32; /* Update LUT for ERASE_SECTOR and ERASE_BLOCK to use 32 bit addr */ flexspi_lut[ERASE_SECTOR][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_SE, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_1PAD, addr_width); flexspi_lut[ERASE_BLOCK][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_BE, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_1PAD, addr_width); } } /* Extract the read command. * Note- enhanced XIP not currently supported, nor is 4-4-4 mode. */ if (jesd216_bfp_read_support(&header->phdr[0], bfp, JESD216_MODE_144, &instr) > 0) { LOG_DBG("Enable 144 mode"); /* Configure for 144 QUAD read mode */ if (instr.mode_clocks == 2) { mode_cmd = kFLEXSPI_Command_MODE8_SDR; } else if (instr.mode_clocks == 1) { mode_cmd = kFLEXSPI_Command_MODE4_SDR; } else if (instr.mode_clocks == 0) { /* Just send dummy cycles during mode clock period */ mode_cmd = kFLEXSPI_Command_DUMMY_SDR; } else { return -ENOTSUP; } flexspi_lut[READ][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, instr.instr, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_4PAD, addr_width); /* Note- we always set mode bits to 0x0 */ flexspi_lut[READ][1] = FLEXSPI_LUT_SEQ( mode_cmd, kFLEXSPI_4PAD, 0x00, kFLEXSPI_Command_DUMMY_SDR, kFLEXSPI_4PAD, instr.wait_states); flexspi_lut[READ][2] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_READ_SDR, kFLEXSPI_4PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0); /* Read 1S-4S-4S enable method */ ret = jesd216_bfp_decode_dw15(&header->phdr[0], bfp, &dw15); if (ret == 0) { ret = flash_flexspi_nor_quad_enable(data, flexspi_lut, dw15.qer); if (ret == 0) { /* Now, install 1S-1S-4S page program command */ flexspi_lut[PAGE_PROGRAM][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_PP_1_1_4, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_1PAD, addr_width); flexspi_lut[PAGE_PROGRAM][1] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_WRITE_SDR, kFLEXSPI_4PAD, 0x4, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0); } } } else if (jesd216_bfp_read_support(&header->phdr[0], bfp, JESD216_MODE_122, &instr) > 0) { LOG_DBG("Enable 122 mode"); if (instr.mode_clocks == 4) { mode_cmd = kFLEXSPI_Command_MODE8_SDR; } else if (instr.mode_clocks == 2) { mode_cmd = kFLEXSPI_Command_MODE4_SDR; } else if (instr.mode_clocks == 1) { mode_cmd = kFLEXSPI_Command_MODE2_SDR; } else if (instr.mode_clocks == 0) { /* Just send dummy cycles during mode clock period */ mode_cmd = kFLEXSPI_Command_DUMMY_SDR; } else { return -ENOTSUP; } flexspi_lut[READ][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, instr.instr, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_2PAD, addr_width); /* Note- we always set mode bits to 0x0 */ flexspi_lut[READ][1] = FLEXSPI_LUT_SEQ( mode_cmd, kFLEXSPI_2PAD, 0x0, kFLEXSPI_Command_DUMMY_SDR, kFLEXSPI_2PAD, instr.wait_states); flexspi_lut[READ][2] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_READ_SDR, kFLEXSPI_2PAD, 0x02, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0); /* Now, install 1S-1S-2S page program command */ flexspi_lut[PAGE_PROGRAM][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_PP_1_1_2, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_1PAD, addr_width); flexspi_lut[PAGE_PROGRAM][1] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_WRITE_SDR, kFLEXSPI_2PAD, 0x4, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0); } /* Default to 111 mode if no support exists, leave READ/WRITE untouched */ /* Now, read DW14 to determine the polling method we should use while programming */ ret = jesd216_bfp_decode_dw14(&header->phdr[0], bfp, &dw14); if (ret < 0) { /* Default to legacy polling mode */ dw14.poll_options = 0x0; } if (dw14.poll_options & BIT(1)) { /* Read instruction used for polling is 0x70 */ data->legacy_poll = false; flexspi_lut[READ_STATUS_REG][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, 0x70, kFLEXSPI_Command_READ_SDR, kFLEXSPI_1PAD, 0x01); } else { /* Read instruction used for polling is 0x05 */ data->legacy_poll = true; flexspi_lut[READ_STATUS_REG][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_RDSR, kFLEXSPI_Command_READ_SDR, kFLEXSPI_1PAD, 0x01); } return 0; } /* Helper so we can avoid flash access while performing SFDP probe */ static int flash_flexspi_nor_sfdp_read_helper(struct flash_flexspi_nor_data *dev_data, off_t offset, void *data, size_t len) { flexspi_transfer_t transfer = { .deviceAddress = offset, .port = dev_data->port, .cmdType = kFLEXSPI_Read, .seqIndex = READ_JESD216, .SeqNumber = 1, .data = (uint32_t *)data, .dataSize = len, }; /* Get SFDP data */ return memc_flexspi_transfer(&dev_data->controller, &transfer); } #if defined(CONFIG_FLASH_JESD216_API) static int flash_flexspi_nor_sfdp_read(const struct device *dev, off_t offset, void *data, size_t len) { struct flash_flexspi_nor_data *dev_data = dev->data; return flash_flexspi_nor_sfdp_read_helper(dev_data, offset, data, len); } #endif /* Checks JEDEC ID of flash. If supported, installs custom LUT table */ static int flash_flexspi_nor_check_jedec(struct flash_flexspi_nor_data *data, uint32_t (*flexspi_lut)[MEMC_FLEXSPI_CMD_PER_SEQ]) { int ret; uint32_t vendor_id; ret = flash_flexspi_nor_read_id_helper(data, (uint8_t *)&vendor_id); if (ret < 0) { return ret; } /* Switch on manufacturer and vendor ID */ switch (vendor_id & 0xFFFF) { case 0x25C2: /* MX25 flash, use 4 byte read/write */ flexspi_lut[READ][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_4READ_4B, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_4PAD, 32); /* Flash needs 10 dummy cycles */ flexspi_lut[READ][1] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_DUMMY_SDR, kFLEXSPI_4PAD, 10, kFLEXSPI_Command_READ_SDR, kFLEXSPI_4PAD, 0x04); /* Only 1S-4S-4S page program supported */ flexspi_lut[PAGE_PROGRAM][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_PP_1_4_4_4B, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_4PAD, 32); flexspi_lut[PAGE_PROGRAM][1] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_WRITE_SDR, kFLEXSPI_4PAD, 0x4, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0); /* Update ERASE commands for 4 byte mode */ flexspi_lut[ERASE_SECTOR][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_SE_4B, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_1PAD, 32); flexspi_lut[ERASE_BLOCK][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, 0xDC, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_1PAD, 32), /* Read instruction used for polling is 0x05 */ data->legacy_poll = true; flexspi_lut[READ_STATUS_REG][0] = FLEXSPI_LUT_SEQ( kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, SPI_NOR_CMD_RDSR, kFLEXSPI_Command_READ_SDR, kFLEXSPI_1PAD, 0x01); /* Device uses bit 6 of status reg 1 for QE */ return flash_flexspi_nor_quad_enable(data, flexspi_lut, JESD216_DW15_QER_VAL_S1B6); default: return -ENOTSUP; } } /* Probe parameters from flash SFDP header, and use them to configure the FlexSPI */ static int flash_flexspi_nor_probe(struct flash_flexspi_nor_data *data) { uint32_t flexspi_lut[FLEXSPI_INSTR_END][MEMC_FLEXSPI_CMD_PER_SEQ] = {0}; /* JESD216B defines up to 23 basic flash parameters */ uint32_t param_buf[23]; /* Space to store SFDP header and first parameter header */ uint8_t sfdp_buf[JESD216_SFDP_SIZE(1)] __aligned(4); struct jesd216_bfp *bfp = (struct jesd216_bfp *)param_buf; struct jesd216_sfdp_header *header = (struct jesd216_sfdp_header *)sfdp_buf; int ret; unsigned int key = 0U; flexspi_device_config_t config = { .flexspiRootClk = MHZ(50), .flashSize = FLEXSPI_FLSHCR0_FLSHSZ_MASK, /* Max flash size */ .ARDSeqNumber = 1, .ARDSeqIndex = READ, }; if (memc_flexspi_is_running_xip(&data->controller)) { /* * ==== ENTER CRITICAL SECTION ==== * No flash access should be performed in critical section. All * code and data accessed must reside in ram. */ key = irq_lock(); memc_flexspi_wait_bus_idle(&data->controller); } /* SFDP spec requires that we downclock the FlexSPI to 50MHz or less */ ret = memc_flexspi_update_clock(&data->controller, &config, data->port, MHZ(50)); if (ret < 0) { goto _exit; } /* Setup initial LUT table and FlexSPI configuration */ memcpy(flexspi_lut, flash_flexspi_nor_base_lut, sizeof(flash_flexspi_nor_base_lut)); ret = memc_flexspi_set_device_config(&data->controller, &config, (uint32_t *)flexspi_lut, FLEXSPI_INSTR_END * MEMC_FLEXSPI_CMD_PER_SEQ, data->port); if (ret < 0) { goto _exit; } /* First, check if the JEDEC ID of this flash has explicit support * in this driver */ ret = flash_flexspi_nor_check_jedec(data, flexspi_lut); if (ret == 0) { /* Flash was supported, SFDP probe not needed */ goto _program_lut; } ret = flash_flexspi_nor_sfdp_read_helper(data, 0, sfdp_buf, sizeof(sfdp_buf)); if (ret < 0) { goto _exit; } LOG_DBG("SFDP header magic: 0x%x", header->magic); if (jesd216_sfdp_magic(header) != JESD216_SFDP_MAGIC) { /* Header was read incorrectly */ LOG_WRN("Invalid header, using legacy SPI mode"); data->legacy_poll = true; goto _program_lut; } if (header->phdr[0].len_dw > ARRAY_SIZE(param_buf)) { /* Not enough space to read parameter table */ ret = -ENOBUFS; goto _exit; } /* Read basic flash parameter table */ ret = flash_flexspi_nor_sfdp_read_helper(data, jesd216_param_addr(&header->phdr[0]), param_buf, sizeof(uint32_t) * header->phdr[0].len_dw); if (ret < 0) { goto _exit; } /* Configure flash */ ret = flash_flexspi_nor_config_flash(data, header, bfp, flexspi_lut); if (ret < 0) { goto _exit; } _program_lut: /* * Update the FlexSPI with the config structure provided * from devicetree and the configured LUT */ ret = memc_flexspi_set_device_config(&data->controller, &data->config, (uint32_t *)flexspi_lut, FLEXSPI_INSTR_PROG_END * MEMC_FLEXSPI_CMD_PER_SEQ, data->port); if (ret < 0) { return ret; } _exit: memc_flexspi_reset(&data->controller); if (memc_flexspi_is_running_xip(&data->controller)) { /* ==== EXIT CRITICAL SECTION ==== */ irq_unlock(key); } return ret; } static int flash_flexspi_nor_init(const struct device *dev) { const struct flash_flexspi_nor_config *config = dev->config; struct flash_flexspi_nor_data *data = dev->data; uint32_t vendor_id; /* First step- use ROM pointer to controller device to create * a copy of the device structure in RAM we can use while in * critical sections of code. */ memcpy(&data->controller, config->controller, sizeof(struct device)); if (!device_is_ready(&data->controller)) { LOG_ERR("Controller device is not ready"); return -ENODEV; } if (flash_flexspi_nor_probe(data)) { if (memc_flexspi_is_running_xip(&data->controller)) { /* We can't continue from here- the LUT stored in * the FlexSPI will be invalid so we cannot XIP. * Instead, spin here */ while (1) { /* Spin */ } } LOG_ERR("SFDP probe failed"); return -EIO; } /* Set the FlexSPI to full clock speed */ if (memc_flexspi_update_clock(&data->controller, &data->config, data->port, data->config.flexspiRootClk)) { LOG_ERR("Could not set flexspi clock speed"); return -ENOTSUP; } memc_flexspi_reset(&data->controller); if (flash_flexspi_nor_read_id(dev, (uint8_t *)&vendor_id)) { LOG_ERR("Could not read vendor id"); return -EIO; } LOG_DBG("Vendor id: 0x%0x", vendor_id); return 0; } static const struct flash_driver_api flash_flexspi_nor_api = { .erase = flash_flexspi_nor_erase, .write = flash_flexspi_nor_write, .read = flash_flexspi_nor_read, .get_parameters = flash_flexspi_nor_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_flexspi_nor_pages_layout, #endif #if defined(CONFIG_FLASH_JESD216_API) .sfdp_read = flash_flexspi_nor_sfdp_read, .read_jedec_id = flash_flexspi_nor_read_id, #endif }; #define CONCAT3(x, y, z) x ## y ## z #define CS_INTERVAL_UNIT(unit) \ CONCAT3(kFLEXSPI_CsIntervalUnit, unit, SckCycle) #define AHB_WRITE_WAIT_UNIT(unit) \ CONCAT3(kFLEXSPI_AhbWriteWaitUnit, unit, AhbCycle) #define FLASH_FLEXSPI_DEVICE_CONFIG(n) \ { \ .flexspiRootClk = DT_INST_PROP(n, spi_max_frequency), \ .flashSize = DT_INST_PROP(n, size) / 8 / KB(1), \ .CSIntervalUnit = \ CS_INTERVAL_UNIT( \ DT_INST_PROP(n, cs_interval_unit)), \ .CSInterval = DT_INST_PROP(n, cs_interval), \ .CSHoldTime = DT_INST_PROP(n, cs_hold_time), \ .CSSetupTime = DT_INST_PROP(n, cs_setup_time), \ .dataValidTime = DT_INST_PROP(n, data_valid_time), \ .columnspace = DT_INST_PROP(n, column_space), \ .enableWordAddress = DT_INST_PROP(n, word_addressable), \ .AWRSeqIndex = 0, \ .AWRSeqNumber = 0, \ .ARDSeqIndex = READ, \ .ARDSeqNumber = 1, \ .AHBWriteWaitUnit = \ AHB_WRITE_WAIT_UNIT( \ DT_INST_PROP(n, ahb_write_wait_unit)), \ .AHBWriteWaitInterval = \ DT_INST_PROP(n, ahb_write_wait_interval), \ } \ #define FLASH_FLEXSPI_NOR(n) \ static const struct flash_flexspi_nor_config \ flash_flexspi_nor_config_##n = { \ .controller = DEVICE_DT_GET(DT_INST_BUS(n)), \ }; \ static struct flash_flexspi_nor_data \ flash_flexspi_nor_data_##n = { \ .config = FLASH_FLEXSPI_DEVICE_CONFIG(n), \ .port = DT_INST_REG_ADDR(n), \ .layout = { \ .pages_count = DT_INST_PROP(n, size) / 8 \ / SPI_NOR_SECTOR_SIZE, \ .pages_size = SPI_NOR_SECTOR_SIZE, \ }, \ .flash_parameters = { \ .write_block_size = NOR_WRITE_SIZE, \ .erase_value = NOR_ERASE_VALUE, \ }, \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ flash_flexspi_nor_init, \ NULL, \ &flash_flexspi_nor_data_##n, \ &flash_flexspi_nor_config_##n, \ POST_KERNEL, \ CONFIG_FLASH_INIT_PRIORITY, \ &flash_flexspi_nor_api); DT_INST_FOREACH_STATUS_OKAY(FLASH_FLEXSPI_NOR) ```
/content/code_sandbox/drivers/flash/flash_mcux_flexspi_nor.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
11,537
```c /* * */ #include "flash_gd32.h" #include <zephyr/logging/log.h> #include <zephyr/kernel.h> #include <gd32_fmc.h> LOG_MODULE_DECLARE(flash_gd32); #define GD32_NV_FLASH_V1_NODE DT_INST(0, gd_gd32_nv_flash_v1) #define GD32_NV_FLASH_V1_TIMEOUT DT_PROP(GD32_NV_FLASH_V1_NODE, max_erase_time_ms) #define GD32_NV_FLASH_V1_PAGE_SIZE DT_PROP(GD32_NV_FLASH_V1_NODE, page_size) #if defined(CONFIG_SOC_SERIES_GD32E10X) || \ defined(CONFIG_SOC_SERIES_GD32E50X) /* Some GD32 FMC v1 series require offset and len to word aligned. */ #define GD32_FMC_V1_WORK_ALIGNED #endif #ifdef FLASH_GD32_FMC_WORK_ALIGNED #define GD32_FMC_V1_WRITE_ERR (FMC_STAT_PGERR | FMC_STAT_WPERR | FMC_STAT_PGAERR) #else #define GD32_FMC_V1_WRITE_ERR (FMC_STAT_PGERR | FMC_STAT_WPERR) #endif #define GD32_FMC_V1_ERASE_ERR FMC_STAT_WPERR #ifdef CONFIG_FLASH_PAGE_LAYOUT static const struct flash_pages_layout gd32_fmc_v1_layout[] = { { .pages_size = GD32_NV_FLASH_V1_PAGE_SIZE, .pages_count = SOC_NV_FLASH_SIZE / GD32_NV_FLASH_V1_PAGE_SIZE } }; #endif static inline void gd32_fmc_v1_unlock(void) { FMC_KEY = UNLOCK_KEY0; FMC_KEY = UNLOCK_KEY1; } static inline void gd32_fmc_v1_lock(void) { FMC_CTL |= FMC_CTL_LK; } static int gd32_fmc_v1_wait_idle(void) { const int64_t expired_time = k_uptime_get() + GD32_NV_FLASH_V1_TIMEOUT; while (FMC_STAT & FMC_STAT_BUSY) { if (k_uptime_get() > expired_time) { return -ETIMEDOUT; } } return 0; } bool flash_gd32_valid_range(off_t offset, uint32_t len, bool write) { if ((offset > SOC_NV_FLASH_SIZE) || ((offset + len) > SOC_NV_FLASH_SIZE)) { return false; } if (write) { /* Check offset and len is flash_prg_t aligned. */ if ((offset % sizeof(flash_prg_t)) || (len % sizeof(flash_prg_t))) { return false; } #ifdef FLASH_GD32_FMC_WORK_ALIGNED /* Check offset and len is word aligned. */ if ((offset % sizeof(uint32_t)) || (len % sizeof(uint32_t))) { return false; } #endif } else { if ((offset % GD32_NV_FLASH_V1_PAGE_SIZE) || (len % GD32_NV_FLASH_V1_PAGE_SIZE)) { return false; } } return true; } int flash_gd32_write_range(off_t offset, const void *data, size_t len) { flash_prg_t *prg_flash = (flash_prg_t *)((uint8_t *)SOC_NV_FLASH_ADDR + offset); flash_prg_t *prg_data = (flash_prg_t *)data; int ret = 0; gd32_fmc_v1_unlock(); if (FMC_STAT & FMC_STAT_BUSY) { return -EBUSY; } FMC_CTL |= FMC_CTL_PG; for (size_t i = 0U; i < (len / sizeof(flash_prg_t)); i++) { *prg_flash++ = *prg_data++; } ret = gd32_fmc_v1_wait_idle(); if (ret < 0) { goto expired_out; } if (FMC_STAT & GD32_FMC_V1_WRITE_ERR) { ret = -EIO; FMC_STAT |= GD32_FMC_V1_WRITE_ERR; LOG_ERR("FMC programming failed"); } expired_out: FMC_CTL &= ~FMC_CTL_PG; gd32_fmc_v1_lock(); return ret; } static int gd32_fmc_v1_page_erase(uint32_t page_addr) { int ret = 0; gd32_fmc_v1_unlock(); if (FMC_STAT & FMC_STAT_BUSY) { return -EBUSY; } FMC_CTL |= FMC_CTL_PER; FMC_ADDR = page_addr; FMC_CTL |= FMC_CTL_START; ret = gd32_fmc_v1_wait_idle(); if (ret < 0) { goto expired_out; } if (FMC_STAT & GD32_FMC_V1_ERASE_ERR) { ret = -EIO; FMC_STAT |= GD32_FMC_V1_ERASE_ERR; LOG_ERR("FMC page %u erase failed", page_addr); } expired_out: FMC_CTL &= ~FMC_CTL_PER; gd32_fmc_v1_lock(); return ret; } int flash_gd32_erase_block(off_t offset, size_t size) { uint32_t page_addr = SOC_NV_FLASH_ADDR + offset; int ret = 0; while (size > 0U) { ret = gd32_fmc_v1_page_erase(page_addr); if (ret < 0) { return ret; } size -= GD32_NV_FLASH_V1_PAGE_SIZE; page_addr += GD32_NV_FLASH_V1_PAGE_SIZE; } return 0; } #ifdef CONFIG_FLASH_PAGE_LAYOUT void flash_gd32_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { ARG_UNUSED(dev); *layout = gd32_fmc_v1_layout; *layout_size = ARRAY_SIZE(gd32_fmc_v1_layout); } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ ```
/content/code_sandbox/drivers/flash/flash_gd32_v1.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,271
```c /* * */ #include "flash_cadence_qspi_nor_ll.h" #include <string.h> #include <zephyr/logging/log.h> #include <zephyr/kernel.h> LOG_MODULE_REGISTER(flash_cadence_ll, CONFIG_FLASH_LOG_LEVEL); int cad_qspi_idle(struct cad_qspi_params *cad_params) { if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } return (sys_read32(cad_params->reg_base + CAD_QSPI_CFG) & CAD_QSPI_CFG_IDLE) >> 31; } int cad_qspi_set_baudrate_div(struct cad_qspi_params *cad_params, uint32_t div) { if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } if (div > 0xf) { return CAD_INVALID; } sys_clear_bits(cad_params->reg_base + CAD_QSPI_CFG, ~CAD_QSPI_CFG_BAUDDIV_MSK); sys_set_bits(cad_params->reg_base + CAD_QSPI_CFG, CAD_QSPI_CFG_BAUDDIV(div)); return 0; } int cad_qspi_configure_dev_size(struct cad_qspi_params *cad_params, uint32_t addr_bytes, uint32_t bytes_per_dev, uint32_t bytes_per_block) { if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } sys_write32(CAD_QSPI_DEVSZ_ADDR_BYTES(addr_bytes) | CAD_QSPI_DEVSZ_BYTES_PER_PAGE(bytes_per_dev) | CAD_QSPI_DEVSZ_BYTES_PER_BLOCK(bytes_per_block), cad_params->reg_base + CAD_QSPI_DEVSZ); return 0; } int cad_qspi_set_read_config(struct cad_qspi_params *cad_params, uint32_t opcode, uint32_t instr_type, uint32_t addr_type, uint32_t data_type, uint32_t mode_bit, uint32_t dummy_clk_cycle) { if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } sys_write32(CAD_QSPI_DEV_OPCODE(opcode) | CAD_QSPI_DEV_INST_TYPE(instr_type) | CAD_QSPI_DEV_ADDR_TYPE(addr_type) | CAD_QSPI_DEV_DATA_TYPE(data_type) | CAD_QSPI_DEV_MODE_BIT(mode_bit) | CAD_QSPI_DEV_DUMMY_CLK_CYCLE(dummy_clk_cycle), cad_params->reg_base + CAD_QSPI_DEVRD); return 0; } int cad_qspi_set_write_config(struct cad_qspi_params *cad_params, uint32_t opcode, uint32_t addr_type, uint32_t data_type, uint32_t dummy_clk_cycle) { if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } sys_write32(CAD_QSPI_DEV_OPCODE(opcode) | CAD_QSPI_DEV_ADDR_TYPE(addr_type) | CAD_QSPI_DEV_DATA_TYPE(data_type) | CAD_QSPI_DEV_DUMMY_CLK_CYCLE(dummy_clk_cycle), cad_params->reg_base + CAD_QSPI_DEVWR); return 0; } int cad_qspi_timing_config(struct cad_qspi_params *cad_params, uint32_t clkphase, uint32_t clkpol, uint32_t csda, uint32_t csdads, uint32_t cseot, uint32_t cssot, uint32_t rddatacap) { if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } uint32_t cfg = sys_read32(cad_params->reg_base + CAD_QSPI_CFG); cfg &= CAD_QSPI_CFG_SELCLKPHASE_CLR_MSK & CAD_QSPI_CFG_SELCLKPOL_CLR_MSK; cfg |= CAD_QSPI_SELCLKPHASE(clkphase) | CAD_QSPI_SELCLKPOL(clkpol); sys_write32(cfg, cad_params->reg_base + CAD_QSPI_CFG); sys_write32(CAD_QSPI_DELAY_CSSOT(cssot) | CAD_QSPI_DELAY_CSEOT(cseot) | CAD_QSPI_DELAY_CSDADS(csdads) | CAD_QSPI_DELAY_CSDA(csda), cad_params->reg_base + CAD_QSPI_DELAY); return 0; } int cad_qspi_stig_cmd_helper(struct cad_qspi_params *cad_params, int cs, uint32_t cmd) { uint32_t count = 0; if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } /* chip select */ sys_write32((sys_read32(cad_params->reg_base + CAD_QSPI_CFG) & CAD_QSPI_CFG_CS_MSK) | CAD_QSPI_CFG_CS(cs), cad_params->reg_base + CAD_QSPI_CFG); sys_write32(cmd, cad_params->reg_base + CAD_QSPI_FLASHCMD); sys_write32(cmd | CAD_QSPI_FLASHCMD_EXECUTE, cad_params->reg_base + CAD_QSPI_FLASHCMD); do { uint32_t reg = sys_read32(cad_params->reg_base + CAD_QSPI_FLASHCMD); if (!(reg & CAD_QSPI_FLASHCMD_EXECUTE_STAT)) { break; } count++; } while (count < CAD_QSPI_COMMAND_TIMEOUT); if (count >= CAD_QSPI_COMMAND_TIMEOUT) { LOG_ERR("Error sending QSPI command %x, timed out\n", cmd); return CAD_QSPI_ERROR; } return 0; } int cad_qspi_stig_cmd(struct cad_qspi_params *cad_params, uint32_t opcode, uint32_t dummy) { if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) { LOG_ERR("Faulty dummy bytes\n"); return -1; } return cad_qspi_stig_cmd_helper(cad_params, cad_params->cad_qspi_cs, CAD_QSPI_FLASHCMD_OPCODE(opcode) | CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES(dummy)); } int cad_qspi_stig_read_cmd(struct cad_qspi_params *cad_params, uint32_t opcode, uint32_t dummy, uint32_t num_bytes, uint32_t *output) { if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) { LOG_ERR("Faulty dummy byes\n"); return -1; } if ((num_bytes > 8) || (num_bytes == 0)) { return -1; } if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } uint32_t cmd = CAD_QSPI_FLASHCMD_OPCODE(opcode) | CAD_QSPI_FLASHCMD_ENRDDATA(1) | CAD_QSPI_FLASHCMD_NUMRDDATABYTES(num_bytes - 1) | CAD_QSPI_FLASHCMD_ENCMDADDR(0) | CAD_QSPI_FLASHCMD_ENMODEBIT(0) | CAD_QSPI_FLASHCMD_NUMADDRBYTES(0) | CAD_QSPI_FLASHCMD_ENWRDATA(0) | CAD_QSPI_FLASHCMD_NUMWRDATABYTES(0) | CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(dummy); if (cad_qspi_stig_cmd_helper(cad_params, cad_params->cad_qspi_cs, cmd)) { LOG_ERR("failed to send stig cmd\n"); return -1; } output[0] = sys_read32(cad_params->reg_base + CAD_QSPI_FLASHCMD_RDDATA0); if (num_bytes > 4) { output[1] = sys_read32(cad_params->reg_base + CAD_QSPI_FLASHCMD_RDDATA1); } return 0; } int cad_qspi_stig_wr_cmd(struct cad_qspi_params *cad_params, uint32_t opcode, uint32_t dummy, uint32_t num_bytes, uint32_t *input) { if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) { LOG_ERR("Faulty dummy byes\n"); return -1; } if ((num_bytes > 8) || (num_bytes == 0)) { return -1; } if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } uint32_t cmd = CAD_QSPI_FLASHCMD_OPCODE(opcode) | CAD_QSPI_FLASHCMD_ENRDDATA(0) | CAD_QSPI_FLASHCMD_NUMRDDATABYTES(0) | CAD_QSPI_FLASHCMD_ENCMDADDR(0) | CAD_QSPI_FLASHCMD_ENMODEBIT(0) | CAD_QSPI_FLASHCMD_NUMADDRBYTES(0) | CAD_QSPI_FLASHCMD_ENWRDATA(1) | CAD_QSPI_FLASHCMD_NUMWRDATABYTES(num_bytes - 1) | CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(dummy); sys_write32(input[0], cad_params->reg_base + CAD_QSPI_FLASHCMD_WRDATA0); if (num_bytes > 4) { sys_write32(input[1], cad_params->reg_base + CAD_QSPI_FLASHCMD_WRDATA1); } return cad_qspi_stig_cmd_helper(cad_params, cad_params->cad_qspi_cs, cmd); } int cad_qspi_stig_addr_cmd(struct cad_qspi_params *cad_params, uint32_t opcode, uint32_t dummy, uint32_t addr) { uint32_t cmd; if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) { return -1; } if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } cmd = CAD_QSPI_FLASHCMD_OPCODE(opcode) | CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(dummy) | CAD_QSPI_FLASHCMD_ENCMDADDR(1) | CAD_QSPI_FLASHCMD_NUMADDRBYTES(2); sys_write32(addr, cad_params->reg_base + CAD_QSPI_FLASHCMD_ADDR); return cad_qspi_stig_cmd_helper(cad_params, cad_params->cad_qspi_cs, cmd); } int cad_qspi_device_bank_select(struct cad_qspi_params *cad_params, uint32_t bank) { int status = 0; if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } status = cad_qspi_stig_cmd(cad_params, CAD_QSPI_STIG_OPCODE_WREN, 0); if (status != 0) { return status; } status = cad_qspi_stig_wr_cmd(cad_params, CAD_QSPI_STIG_OPCODE_WREN_EXT_REG, 0, 1, &bank); if (status != 0) { return status; } return cad_qspi_stig_cmd(cad_params, CAD_QSPI_STIG_OPCODE_WRDIS, 0); } int cad_qspi_device_status(struct cad_qspi_params *cad_params, uint32_t *status) { return cad_qspi_stig_read_cmd(cad_params, CAD_QSPI_STIG_OPCODE_RDSR, 0, 1, status); } #if CAD_QSPI_MICRON_N25Q_SUPPORT int cad_qspi_n25q_enable(struct cad_qspi_params *cad_params) { cad_qspi_set_read_config(cad_params, QSPI_FAST_READ, CAD_QSPI_INST_SINGLE, CAD_QSPI_ADDR_FASTREAD, CAT_QSPI_ADDR_SINGLE_IO, 1, 0); cad_qspi_set_write_config(cad_params, QSPI_WRITE, 0, 0, 0); return 0; } int cad_qspi_n25q_wait_for_program_and_erase(struct cad_qspi_params *cad_params, int program_only) { uint32_t status, flag_sr; int count = 0; while (count < CAD_QSPI_COMMAND_TIMEOUT) { status = cad_qspi_device_status(cad_params, &status); if (status != 0) { LOG_ERR("Error getting device status\n"); return -1; } if (!CAD_QSPI_STIG_SR_BUSY(status)) break; count++; } if (count >= CAD_QSPI_COMMAND_TIMEOUT) { LOG_ERR("Timed out waiting for idle\n"); return -1; } count = 0; while (count < CAD_QSPI_COMMAND_TIMEOUT) { status = cad_qspi_stig_read_cmd(cad_params, CAD_QSPI_STIG_OPCODE_RDFLGSR, 0, 1, &flag_sr); if (status != 0) { LOG_ERR("Error waiting program and erase.\n"); return status; } if ((program_only && CAD_QSPI_STIG_FLAGSR_PROGRAMREADY(flag_sr)) || (!program_only && CAD_QSPI_STIG_FLAGSR_ERASEREADY(flag_sr))) break; } if (count >= CAD_QSPI_COMMAND_TIMEOUT) LOG_ERR("Timed out waiting for program and erase\n"); if ((program_only && CAD_QSPI_STIG_FLAGSR_PROGRAMERROR(flag_sr)) || (!program_only && CAD_QSPI_STIG_FLAGSR_ERASEERROR(flag_sr))) { LOG_ERR("Error programming/erasing flash\n"); cad_qspi_stig_cmd(cad_params, CAD_QSPI_STIG_OPCODE_CLFSR, 0); return -1; } return 0; } #endif int cad_qspi_indirect_read_start_bank(struct cad_qspi_params *cad_params, uint32_t flash_addr, uint32_t num_bytes) { if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } sys_write32(flash_addr, cad_params->reg_base + CAD_QSPI_INDRDSTADDR); sys_write32(num_bytes, cad_params->reg_base + CAD_QSPI_INDRDCNT); sys_write32(CAD_QSPI_INDRD_START | CAD_QSPI_INDRD_IND_OPS_DONE, cad_params->reg_base + CAD_QSPI_INDRD); return 0; } int cad_qspi_indirect_write_start_bank(struct cad_qspi_params *cad_params, uint32_t flash_addr, uint32_t num_bytes) { if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } sys_write32(flash_addr, cad_params->reg_base + CAD_QSPI_INDWRSTADDR); sys_write32(num_bytes, cad_params->reg_base + CAD_QSPI_INDWRCNT); sys_write32(CAD_QSPI_INDWR_START | CAD_QSPI_INDWR_INDDONE, cad_params->reg_base + CAD_QSPI_INDWR); return 0; } int cad_qspi_indirect_write_finish(struct cad_qspi_params *cad_params) { if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } #if CAD_QSPI_MICRON_N25Q_SUPPORT return cad_qspi_n25q_wait_for_program_and_erase(cad_params, 1); #else return 0; #endif } int cad_qspi_enable(struct cad_qspi_params *cad_params) { int status; if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } sys_set_bits(cad_params->reg_base + CAD_QSPI_CFG, CAD_QSPI_CFG_ENABLE); #if CAD_QSPI_MICRON_N25Q_SUPPORT status = cad_qspi_n25q_enable(cad_params); if (status != 0) { return status; } #endif return 0; } int cad_qspi_enable_subsector_bank(struct cad_qspi_params *cad_params, uint32_t addr) { int status = 0; if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } status = cad_qspi_stig_cmd(cad_params, CAD_QSPI_STIG_OPCODE_WREN, 0); if (status != 0) { return status; } status = cad_qspi_stig_addr_cmd(cad_params, CAD_QSPI_STIG_OPCODE_SUBSEC_ERASE, 0, addr); if (status != 0) { return status; } #if CAD_QSPI_MICRON_N25Q_SUPPORT status = cad_qspi_n25q_wait_for_program_and_erase(cad_params, 0); #endif return status; } int cad_qspi_erase_subsector(struct cad_qspi_params *cad_params, uint32_t addr) { int status = 0; if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } status = cad_qspi_device_bank_select(cad_params, addr >> 24); if (status != 0) { return status; } return cad_qspi_enable_subsector_bank(cad_params, addr); } int cad_qspi_erase_sector(struct cad_qspi_params *cad_params, uint32_t addr) { int status = 0; if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } status = cad_qspi_device_bank_select(cad_params, addr >> 24); if (status != 0) { return status; } status = cad_qspi_stig_cmd(cad_params, CAD_QSPI_STIG_OPCODE_WREN, 0); if (status != 0) { return status; } status = cad_qspi_stig_addr_cmd(cad_params, CAD_QSPI_STIG_OPCODE_SEC_ERASE, 0, addr); if (status != 0) { return status; } #if CAD_QSPI_MICRON_N25Q_SUPPORT status = cad_qspi_n25q_wait_for_program_and_erase(cad_params, 0); #endif return status; } void cad_qspi_calibration(struct cad_qspi_params *cad_params, uint32_t dev_clk, uint32_t qspi_clk_mhz) { int status; uint32_t dev_sclk_mhz = 27; /*min value to get biggest 0xF div factor*/ uint32_t data_cap_delay; uint32_t sample_rdid; uint32_t rdid; uint32_t div_actual; uint32_t div_bits; int first_pass, last_pass; if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); } /*1. Set divider to bigger value (slowest SCLK) *2. RDID and save the value */ div_actual = (qspi_clk_mhz + (dev_sclk_mhz - 1)) / dev_sclk_mhz; div_bits = (((div_actual + 1) / 2) - 1); status = cad_qspi_set_baudrate_div(cad_params, 0xf); status = cad_qspi_stig_read_cmd(cad_params, CAD_QSPI_STIG_OPCODE_RDID, 0, 3, &sample_rdid); if (status != 0) { return; } /*3. Set divider to the intended frequency *4. Set the read delay = 0 *5. RDID and check whether the value is same as item 2 *6. Increase read delay and compared the value against item 2 *7. Find the range of read delay that have same as * item 2 and divide it to 2 */ div_actual = (qspi_clk_mhz + (dev_clk - 1)) / dev_clk; div_bits = (((div_actual + 1) / 2) - 1); status = cad_qspi_set_baudrate_div(cad_params, div_bits); if (status != 0) { return; } data_cap_delay = 0; first_pass = -1; last_pass = -1; do { if (status != 0) { break; } status = cad_qspi_stig_read_cmd(cad_params, CAD_QSPI_STIG_OPCODE_RDID, 0, 3, &rdid); if (status != 0) { break; } if (rdid == sample_rdid) { if (first_pass == -1) first_pass = data_cap_delay; else last_pass = data_cap_delay; } data_cap_delay++; sys_write32(CAD_QSPI_RDDATACAP_BYP(1) | CAD_QSPI_RDDATACAP_DELAY(data_cap_delay), cad_params->reg_base + CAD_QSPI_RDDATACAP); } while (data_cap_delay < 0x10); if (first_pass > 0) { int diff = first_pass - last_pass; data_cap_delay = first_pass + diff / 2; } sys_write32(CAD_QSPI_RDDATACAP_BYP(1) | CAD_QSPI_RDDATACAP_DELAY(data_cap_delay), cad_params->reg_base + CAD_QSPI_RDDATACAP); status = cad_qspi_stig_read_cmd(cad_params, CAD_QSPI_STIG_OPCODE_RDID, 0, 3, &rdid); if (status != 0) { return; } } int cad_qspi_int_disable(struct cad_qspi_params *cad_params, uint32_t mask) { if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } if (cad_qspi_idle(cad_params) == 0) { return -1; } if ((CAD_QSPI_INT_STATUS_ALL & mask) == 0) { return -1; } sys_write32(mask, cad_params->reg_base + CAD_QSPI_IRQMSK); return 0; } void cad_qspi_set_chip_select(struct cad_qspi_params *cad_params, int cs) { if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); } cad_params->cad_qspi_cs = cs; } int cad_qspi_init(struct cad_qspi_params *cad_params, uint32_t clk_phase, uint32_t clk_pol, uint32_t csda, uint32_t csdads, uint32_t cseot, uint32_t cssot, uint32_t rddatacap) { int status = 0; uint32_t qspi_desired_clk_freq; uint32_t rdid = 0; uint32_t cap_code; LOG_INF("Initializing Qspi"); if (cad_params == NULL) { LOG_ERR("Wrong parameter"); return -EINVAL; } if (cad_qspi_idle(cad_params) == 0) { LOG_ERR("device not idle"); return -EBUSY; } status = cad_qspi_timing_config(cad_params, clk_phase, clk_pol, csda, csdads, cseot, cssot, rddatacap); if (status != 0) { LOG_ERR("config set timing failure\n"); return status; } sys_write32(CAD_QSPI_REMAPADDR_VALUE_SET(0), cad_params->reg_base + CAD_QSPI_REMAPADDR); status = cad_qspi_int_disable(cad_params, CAD_QSPI_INT_STATUS_ALL); if (status != 0) { LOG_ERR("failed disable\n"); return status; } cad_qspi_set_baudrate_div(cad_params, 0xf); status = cad_qspi_enable(cad_params); if (status != 0) { LOG_ERR("failed enable\n"); return status; } qspi_desired_clk_freq = 100; cad_qspi_calibration(cad_params, qspi_desired_clk_freq, cad_params->clk_rate); status = cad_qspi_stig_read_cmd(cad_params, CAD_QSPI_STIG_OPCODE_RDID, 0, 3, &rdid); if (status != 0) { LOG_ERR("Error reading RDID\n"); return status; } /* * NOTE: The Size code seems to be a form of BCD (binary coded decimal). * The first nibble is the 10's digit and the second nibble is the 1's * digit in the number of bytes. * * Capacity ID samples: * 0x15 : 16 Mb => 2 MiB => 1 << 21 ; BCD=15 * 0x16 : 32 Mb => 4 MiB => 1 << 22 ; BCD=16 * 0x17 : 64 Mb => 8 MiB => 1 << 23 ; BCD=17 * 0x18 : 128 Mb => 16 MiB => 1 << 24 ; BCD=18 * 0x19 : 256 Mb => 32 MiB => 1 << 25 ; BCD=19 * 0x1a * 0x1b * 0x1c * 0x1d * 0x1e * 0x1f * 0x20 : 512 Mb => 64 MiB => 1 << 26 ; BCD=20 * 0x21 : 1024 Mb => 128 MiB => 1 << 27 ; BCD=21 */ cap_code = CAD_QSPI_STIG_RDID_CAPACITYID(rdid); if (!(((cap_code >> 4) > 0x9) || ((cap_code & 0xf) > 0x9))) { uint32_t decoded_cap = ((cap_code >> 4) * 10) + (cap_code & 0xf); cad_params->qspi_device_size = 1 << (decoded_cap + 6); LOG_INF("QSPI Capacity: %x", cad_params->qspi_device_size); } else { LOG_ERR("Invalid CapacityID encountered: 0x%02x", cap_code); return -1; } cad_qspi_configure_dev_size(cad_params, QSPI_ADDR_BYTES, QSPI_BYTES_PER_DEV, QSPI_BYTES_PER_BLOCK); LOG_INF("Flash size: %d Bytes", cad_params->qspi_device_size); return status; } int cad_qspi_indirect_page_bound_write(struct cad_qspi_params *cad_params, uint32_t offset, uint8_t *buffer, uint32_t len) { int status = 0, i; uint32_t write_count, write_capacity, *write_data, space, write_fill_level, sram_partition; if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } status = cad_qspi_indirect_write_start_bank(cad_params, offset, len); if (status != 0) { return status; } write_count = 0; sram_partition = CAD_QSPI_SRAMPART_ADDR(sys_read32(cad_params->reg_base + CAD_QSPI_SRAMPART)); write_capacity = (uint32_t)CAD_QSPI_SRAM_FIFO_ENTRY_COUNT - sram_partition; while (write_count < len) { write_fill_level = CAD_QSPI_SRAMFILL_INDWRPART( sys_read32(cad_params->reg_base + CAD_QSPI_SRAMFILL)); space = MIN(write_capacity - write_fill_level, (len - write_count) / sizeof(uint32_t)); write_data = (uint32_t *)(buffer + write_count); for (i = 0; i < space; ++i) sys_write32(*write_data++, cad_params->data_base); write_count += space * sizeof(uint32_t); } return cad_qspi_indirect_write_finish(cad_params); } int cad_qspi_read_bank(struct cad_qspi_params *cad_params, uint8_t *buffer, uint32_t offset, uint32_t size) { int status; uint32_t read_count = 0, *read_data; int level = 1, count = 0, i; if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } status = cad_qspi_indirect_read_start_bank(cad_params, offset, size); if (status != 0) { return status; } while (read_count < size) { do { level = CAD_QSPI_SRAMFILL_INDRDPART( sys_read32(cad_params->reg_base + CAD_QSPI_SRAMFILL)); read_data = (uint32_t *)(buffer + read_count); for (i = 0; i < level; ++i) *read_data++ = sys_read32(cad_params->data_base); read_count += level * sizeof(uint32_t); count++; } while (level > 0); } return 0; } int cad_qspi_write_bank(struct cad_qspi_params *cad_params, uint32_t offset, uint8_t *buffer, uint32_t size) { int status = 0; uint32_t page_offset = offset & (CAD_QSPI_PAGE_SIZE - 1); uint32_t write_size = MIN(size, CAD_QSPI_PAGE_SIZE - page_offset); if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } while (size) { status = cad_qspi_indirect_page_bound_write(cad_params, offset, buffer, write_size); if (status != 0) { break; } offset += write_size; buffer += write_size; size -= write_size; write_size = MIN(size, CAD_QSPI_PAGE_SIZE); } return status; } int cad_qspi_read(struct cad_qspi_params *cad_params, void *buffer, uint32_t offset, uint32_t size) { uint32_t bank_count, bank_addr, bank_offset, copy_len; uint8_t *read_data; int i, status; status = 0; if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } if ((offset >= cad_params->qspi_device_size) || (offset + size - 1 >= cad_params->qspi_device_size) || (size == 0)) { LOG_ERR("Invalid read parameter\n"); return -EINVAL; } if (CAD_QSPI_INDRD_RD_STAT(sys_read32(cad_params->reg_base + CAD_QSPI_INDRD))) { LOG_ERR("Read in progress\n"); return -ENOTBLK; } /* * bank_count : Number of bank(s) affected, including partial banks. * bank_addr : Aligned address of the first bank, * including partial bank. * bank_ofst : The offset of the bank to read. * Only used when reading the first bank. */ bank_count = CAD_QSPI_BANK_ADDR(offset + size - 1) - CAD_QSPI_BANK_ADDR(offset) + 1; bank_addr = offset & CAD_QSPI_BANK_ADDR_MSK; bank_offset = offset & (CAD_QSPI_BANK_SIZE - 1); read_data = (uint8_t *)buffer; copy_len = MIN(size, CAD_QSPI_BANK_SIZE - bank_offset); for (i = 0; i < bank_count; ++i) { status = cad_qspi_device_bank_select(cad_params, CAD_QSPI_BANK_ADDR(bank_addr)); if (status != 0) { break; } status = cad_qspi_read_bank(cad_params, read_data, bank_offset, copy_len); if (status != 0) { break; } bank_addr += CAD_QSPI_BANK_SIZE; read_data += copy_len; size -= copy_len; bank_offset = 0; copy_len = MIN(size, CAD_QSPI_BANK_SIZE); } return status; } int cad_qspi_erase(struct cad_qspi_params *cad_params, uint32_t offset, uint32_t size) { int status = 0; uint32_t subsector_offset = offset & (CAD_QSPI_SUBSECTOR_SIZE - 1); uint32_t erase_size = MIN(size, CAD_QSPI_SUBSECTOR_SIZE - subsector_offset); if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } while (size) { status = cad_qspi_erase_subsector(cad_params, offset); if (status != 0) { break; } offset += erase_size; size -= erase_size; erase_size = MIN(size, CAD_QSPI_SUBSECTOR_SIZE); } return status; } int cad_qspi_write(struct cad_qspi_params *cad_params, void *buffer, uint32_t offset, uint32_t size) { int status, i; uint32_t bank_count, bank_addr, bank_offset, copy_len; uint8_t *write_data; status = 0; if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } if ((offset >= cad_params->qspi_device_size) || (offset + size - 1 >= cad_params->qspi_device_size) || (size == 0)) { return -EINVAL; } if (CAD_QSPI_INDWR_RDSTAT(sys_read32(cad_params->reg_base + CAD_QSPI_INDWR))) { LOG_ERR("QSPI Error: Write in progress\n"); return -ENOTBLK; } bank_count = CAD_QSPI_BANK_ADDR(offset + size - 1) - CAD_QSPI_BANK_ADDR(offset) + 1; bank_addr = offset & CAD_QSPI_BANK_ADDR_MSK; bank_offset = offset & (CAD_QSPI_BANK_SIZE - 1); write_data = buffer; copy_len = MIN(size, CAD_QSPI_BANK_SIZE - bank_offset); for (i = 0; i < bank_count; ++i) { status = cad_qspi_device_bank_select(cad_params, CAD_QSPI_BANK_ADDR(bank_addr)); if (status != 0) { break; } status = cad_qspi_write_bank(cad_params, bank_offset, write_data, copy_len); if (status != 0) { break; } bank_addr += CAD_QSPI_BANK_SIZE; write_data += copy_len; size -= copy_len; bank_offset = 0; copy_len = MIN(size, CAD_QSPI_BANK_SIZE); } return status; } int cad_qspi_update(struct cad_qspi_params *cad_params, void *Buffer, uint32_t offset, uint32_t size) { int status = 0; if (cad_params == NULL) { LOG_ERR("Wrong parameter\n"); return -EINVAL; } status = cad_qspi_erase(cad_params, offset, size); if (status != 0) { return status; } return cad_qspi_write(cad_params, Buffer, offset, size); } void cad_qspi_reset(struct cad_qspi_params *cad_params) { cad_qspi_stig_cmd(cad_params, CAD_QSPI_STIG_OPCODE_RESET_EN, 0); cad_qspi_stig_cmd(cad_params, CAD_QSPI_STIG_OPCODE_RESET_MEM, 0); } ```
/content/code_sandbox/drivers/flash/flash_cadence_qspi_nor_ll.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,527
```c /* * */ #include <string.h> #include <zephyr/drivers/flash.h> #include <zephyr/logging/log.h> #include <zephyr/sys/barrier.h> LOG_MODULE_REGISTER(flash_nrf_mram, CONFIG_FLASH_LOG_LEVEL); #define DT_DRV_COMPAT nordic_mram #define MRAM_START DT_INST_REG_ADDR(0) #define MRAM_SIZE DT_INST_REG_SIZE(0) #define MRAM_WORD_SIZE 16 #define MRAM_WORD_MASK 0xf #define WRITE_BLOCK_SIZE DT_INST_PROP_OR(0, write_block_size, MRAM_WORD_SIZE) #define ERASE_BLOCK_SIZE DT_INST_PROP_OR(0, erase_block_size, WRITE_BLOCK_SIZE) #define ERASE_VALUE 0xff BUILD_ASSERT(MRAM_START > 0, "nordic,mram: start address expected to be non-zero"); BUILD_ASSERT((ERASE_BLOCK_SIZE % WRITE_BLOCK_SIZE) == 0, "erase-block-size expected to be a multiple of write-block-size"); /** * @param[in,out] offset Relative offset into memory, from the driver API. * @param[in] len Number of bytes for the intended operation. * @param[in] must_align Require MRAM word alignment, if applicable. * * @return Absolute address in MRAM, or NULL if @p offset or @p len are not * within bounds or appropriately aligned. */ static uintptr_t validate_and_map_addr(off_t offset, size_t len, bool must_align) { if (unlikely(offset < 0 || offset >= MRAM_SIZE || len > MRAM_SIZE - offset)) { LOG_ERR("invalid offset: %ld:%zu", offset, len); return 0; } const uintptr_t addr = MRAM_START + offset; if (WRITE_BLOCK_SIZE > 1 && must_align && unlikely((addr % WRITE_BLOCK_SIZE) != 0 || (len % WRITE_BLOCK_SIZE) != 0)) { LOG_ERR("invalid alignment: %p:%zu", (void *)addr, len); return 0; } return addr; } /** * @param[in] addr_end Last modified MRAM address (not inclusive). */ static void commit_changes(uintptr_t addr_end) { /* Barrier following our last write. */ barrier_dmem_fence_full(); if ((WRITE_BLOCK_SIZE & MRAM_WORD_MASK) == 0 || (addr_end & MRAM_WORD_MASK) == 0) { /* Our last operation was MRAM word-aligned, so we're done. * Note: if WRITE_BLOCK_SIZE is a multiple of MRAM_WORD_SIZE, * then this was already checked in validate_and_map_addr(). */ return; } /* Get the most significant byte (MSB) of the last MRAM word we were modifying. * Writing to this byte makes the MRAM controller commit other pending writes to that word. */ addr_end |= MRAM_WORD_MASK; /* Issue a dummy write, since we didn't have anything to write here. * Doing this lets us finalize our changes before we exit the driver API. */ sys_write8(sys_read8(addr_end), addr_end); } static int nrf_mram_read(const struct device *dev, off_t offset, void *data, size_t len) { ARG_UNUSED(dev); const uintptr_t addr = validate_and_map_addr(offset, len, false); if (!addr) { return -EINVAL; } LOG_DBG("read: %p:%zu", (void *)addr, len); memcpy(data, (void *)addr, len); return 0; } static int nrf_mram_write(const struct device *dev, off_t offset, const void *data, size_t len) { ARG_UNUSED(dev); const uintptr_t addr = validate_and_map_addr(offset, len, true); if (!addr) { return -EINVAL; } LOG_DBG("write: %p:%zu", (void *)addr, len); memcpy((void *)addr, data, len); commit_changes(addr + len); return 0; } static int nrf_mram_erase(const struct device *dev, off_t offset, size_t size) { ARG_UNUSED(dev); const uintptr_t addr = validate_and_map_addr(offset, size, true); if (!addr) { return -EINVAL; } LOG_DBG("erase: %p:%zu", (void *)addr, size); memset((void *)addr, ERASE_VALUE, size); commit_changes(addr + size); return 0; } static const struct flash_parameters *nrf_mram_get_parameters(const struct device *dev) { ARG_UNUSED(dev); static const struct flash_parameters parameters = { .write_block_size = WRITE_BLOCK_SIZE, .erase_value = ERASE_VALUE, .caps = { .no_explicit_erase = true, }, }; return &parameters; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static void nrf_mram_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { ARG_UNUSED(dev); static const struct flash_pages_layout pages_layout = { .pages_count = (MRAM_SIZE) / (ERASE_BLOCK_SIZE), .pages_size = ERASE_BLOCK_SIZE, }; *layout = &pages_layout; *layout_size = 1; } #endif static const struct flash_driver_api nrf_mram_api = { .read = nrf_mram_read, .write = nrf_mram_write, .erase = nrf_mram_erase, .get_parameters = nrf_mram_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = nrf_mram_page_layout, #endif }; DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &nrf_mram_api); ```
/content/code_sandbox/drivers/flash/soc_flash_nrf_mram.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,223
```c /* * */ #define LOG_DOMAIN flash_stm32l5 #define LOG_LEVEL CONFIG_FLASH_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_DOMAIN); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <zephyr/drivers/flash.h> #include <zephyr/init.h> #include <soc.h> #include <stm32_ll_icache.h> #include <stm32_ll_system.h> #include "flash_stm32.h" #if defined(CONFIG_SOC_SERIES_STM32H5X) /* at this time stm32h5 mcus have 128KB (stm32h50x) or 2MB (stm32h56x/57x) */ #define STM32_SERIES_MAX_FLASH 2048 #elif defined(CONFIG_SOC_SERIES_STM32L5X) #define STM32_SERIES_MAX_FLASH 512 #elif defined(CONFIG_SOC_SERIES_STM32U5X) /* It is used to handle the 2 banks discontinuity case, the discontinuity is not happen on STM32U5, * so define it to flash size to avoid the unexptected check. */ #define STM32_SERIES_MAX_FLASH (CONFIG_FLASH_SIZE) #endif #define PAGES_PER_BANK ((FLASH_SIZE / FLASH_PAGE_SIZE) / 2) #define BANK2_OFFSET (KB(STM32_SERIES_MAX_FLASH) / 2) #define ICACHE_DISABLE_TIMEOUT_VALUE 1U /* 1ms */ #define ICACHE_INVALIDATE_TIMEOUT_VALUE 1U /* 1ms */ static int stm32_icache_disable(void) { int status = 0; uint32_t tickstart; LOG_DBG("I-cache Disable"); /* Clear BSYENDF flag first and then disable the instruction cache * that starts a cache invalidation procedure */ CLEAR_BIT(ICACHE->FCR, ICACHE_FCR_CBSYENDF); LL_ICACHE_Disable(); /* Get tick */ tickstart = k_uptime_get_32(); /* Wait for instruction cache to get disabled */ while (LL_ICACHE_IsEnabled()) { if ((k_uptime_get_32() - tickstart) > ICACHE_DISABLE_TIMEOUT_VALUE) { /* New check to avoid false timeout detection in case * of preemption. */ if (LL_ICACHE_IsEnabled()) { status = -ETIMEDOUT; break; } } } return status; } static void stm32_icache_enable(void) { LOG_DBG("I-cache Enable"); LL_ICACHE_Enable(); } static int icache_wait_for_invalidate_complete(void) { int status = -EIO; uint32_t tickstart; /* Check if ongoing invalidation operation */ if (LL_ICACHE_IsActiveFlag_BUSY()) { /* Get tick */ tickstart = k_uptime_get_32(); /* Wait for end of cache invalidation */ while (!LL_ICACHE_IsActiveFlag_BSYEND()) { if ((k_uptime_get_32() - tickstart) > ICACHE_INVALIDATE_TIMEOUT_VALUE) { break; } } } /* Clear any pending flags */ if (LL_ICACHE_IsActiveFlag_BSYEND()) { LOG_DBG("I-cache Invalidation complete"); LL_ICACHE_ClearFlag_BSYEND(); status = 0; } else { LOG_ERR("I-cache Invalidation timeout"); status = -ETIMEDOUT; } if (LL_ICACHE_IsActiveFlag_ERR()) { LOG_ERR("I-cache error"); LL_ICACHE_ClearFlag_ERR(); status = -EIO; } return status; } /* Macro to check if the flash is Dual bank or not */ #if defined(CONFIG_SOC_SERIES_STM32H5X) #define stm32_flash_has_2_banks(flash_device) true #else #define stm32_flash_has_2_banks(flash_device) \ (((FLASH_STM32_REGS(flash_device)->OPTR & FLASH_STM32_DBANK) \ == FLASH_STM32_DBANK) \ ? (true) : (false)) #endif /* CONFIG_SOC_SERIES_STM32H5X */ /* * offset and len must be aligned on write-block-size for write, * positive and not beyond end of flash */ bool flash_stm32_valid_range(const struct device *dev, off_t offset, uint32_t len, bool write) { if (stm32_flash_has_2_banks(dev) && (CONFIG_FLASH_SIZE < STM32_SERIES_MAX_FLASH)) { /* * In case of bank1/2 discontinuity, the range should not * start before bank2 and end beyond bank1 at the same time. * Locations beyond bank2 are caught by * flash_stm32_range_exists. */ if ((offset < BANK2_OFFSET) && (offset + len > FLASH_SIZE / 2)) { return 0; } } if (write && !flash_stm32_valid_write(offset, len)) { return false; } return flash_stm32_range_exists(dev, offset, len); } static int write_nwords(const struct device *dev, off_t offset, const uint32_t *buff, size_t n) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); volatile uint32_t *flash = (uint32_t *)(offset + FLASH_STM32_BASE_ADDRESS); bool full_zero = true; uint32_t tmp; int rc; int i; /* if the non-secure control register is locked,do not fail silently */ if (regs->NSCR & FLASH_STM32_NSLOCK) { LOG_ERR("NSCR locked\n"); return -EIO; } /* Check that no Flash main memory operation is ongoing */ rc = flash_stm32_wait_flash_idle(dev); if (rc < 0) { return rc; } /* Check if this double/quad word is erased and value isn't 0. * * It is allowed to write only zeros over an already written dword / qword * See 6.3.7 in STM32L5 reference manual. * See 7.3.7 in STM32U5 reference manual. * See 7.3.5 in STM32H5 reference manual. */ for (i = 0; i < n; i++) { if (buff[i] != 0) { full_zero = false; break; } } if (!full_zero) { for (i = 0; i < n; i++) { if (flash[i] != 0xFFFFFFFFUL) { LOG_ERR("Word at offs %ld not erased", (long)(offset + i)); return -EIO; } } } /* Set the NSPG bit */ regs->NSCR |= FLASH_STM32_NSPG; /* Flush the register write */ tmp = regs->NSCR; /* Perform the data write operation at the desired memory address */ for (i = 0; i < n; i++) { flash[i] = buff[i]; } /* Wait until the NSBSY bit is cleared */ rc = flash_stm32_wait_flash_idle(dev); /* Clear the NSPG bit */ regs->NSCR &= (~FLASH_STM32_NSPG); return rc; } static int erase_page(const struct device *dev, unsigned int offset) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); uint32_t tmp; int rc; int page; /* if the non-secure control register is locked,do not fail silently */ if (regs->NSCR & FLASH_STM32_NSLOCK) { LOG_ERR("NSCR locked\n"); return -EIO; } /* Check that no Flash memory operation is ongoing */ rc = flash_stm32_wait_flash_idle(dev); if (rc < 0) { return rc; } if (stm32_flash_has_2_banks(dev)) { bool bank_swap; /* Check whether bank1/2 are swapped */ bank_swap = ((regs->OPTR & FLASH_OPTR_SWAP_BANK) == FLASH_OPTR_SWAP_BANK); if ((offset < (FLASH_SIZE / 2)) && !bank_swap) { /* The pages to be erased is in bank 1 */ regs->NSCR &= ~FLASH_STM32_NSBKER_MSK; page = offset / FLASH_PAGE_SIZE; LOG_DBG("Erase page %d on bank 1", page); } else if ((offset >= BANK2_OFFSET) && bank_swap) { /* The pages to be erased is in bank 1 */ regs->NSCR &= ~FLASH_STM32_NSBKER_MSK; page = (offset - BANK2_OFFSET) / FLASH_PAGE_SIZE; LOG_DBG("Erase page %d on bank 1", page); } else if ((offset < (FLASH_SIZE / 2)) && bank_swap) { /* The pages to be erased is in bank 2 */ regs->NSCR |= FLASH_STM32_NSBKER; page = offset / FLASH_PAGE_SIZE; LOG_DBG("Erase page %d on bank 2", page); } else if ((offset >= BANK2_OFFSET) && !bank_swap) { /* The pages to be erased is in bank 2 */ regs->NSCR |= FLASH_STM32_NSBKER; page = (offset - BANK2_OFFSET) / FLASH_PAGE_SIZE; LOG_DBG("Erase page %d on bank 2", page); } else { LOG_ERR("Offset %d does not exist", offset); return -EINVAL; } } else { page = offset / FLASH_PAGE_SIZE_128_BITS; LOG_DBG("Erase page %d\n", page); } /* Set the NSPER bit and select the page you wish to erase */ regs->NSCR |= FLASH_STM32_NSPER; regs->NSCR &= ~FLASH_STM32_NSPNB_MSK; regs->NSCR |= (page << FLASH_STM32_NSPNB_POS); /* Set the NSSTRT bit */ regs->NSCR |= FLASH_STM32_NSSTRT; /* flush the register write */ tmp = regs->NSCR; /* Wait for the NSBSY bit */ rc = flash_stm32_wait_flash_idle(dev); if (stm32_flash_has_2_banks(dev)) { regs->NSCR &= ~(FLASH_STM32_NSPER | FLASH_STM32_NSBKER); } else { regs->NSCR &= ~(FLASH_STM32_NSPER); } return rc; } int flash_stm32_block_erase_loop(const struct device *dev, unsigned int offset, unsigned int len) { unsigned int address = offset; int rc = 0; bool icache_enabled = LL_ICACHE_IsEnabled(); if (icache_enabled) { /* Disable icache, this will start the invalidation procedure. * All changes(erase/write) to flash memory should happen when * i-cache is disabled. A write to flash performed without * disabling i-cache will set ERRF error flag in SR register. */ rc = stm32_icache_disable(); if (rc != 0) { return rc; } } for (; address <= offset + len - 1 ; address += FLASH_PAGE_SIZE) { rc = erase_page(dev, address); if (rc < 0) { break; } } if (icache_enabled) { /* Since i-cache was disabled, this would start the * invalidation procedure, so wait for completion. */ rc = icache_wait_for_invalidate_complete(); /* I-cache should be enabled only after the * invalidation is complete. */ stm32_icache_enable(); } return rc; } int flash_stm32_write_range(const struct device *dev, unsigned int offset, const void *data, unsigned int len) { int i, rc = 0; bool icache_enabled = LL_ICACHE_IsEnabled(); if (icache_enabled) { /* Disable icache, this will start the invalidation procedure. * All changes(erase/write) to flash memory should happen when * i-cache is disabled. A write to flash performed without * disabling i-cache will set ERRF error flag in SR register. */ rc = stm32_icache_disable(); if (rc != 0) { return rc; } } for (i = 0; i < len; i += FLASH_STM32_WRITE_BLOCK_SIZE) { rc = write_nwords(dev, offset + i, ((const uint32_t *) data + (i>>2)), FLASH_STM32_WRITE_BLOCK_SIZE / 4); if (rc < 0) { break; } } if (icache_enabled) { int rc2; /* Since i-cache was disabled, this would start the * invalidation procedure, so wait for completion. */ rc2 = icache_wait_for_invalidate_complete(); if (!rc) { rc = rc2; } /* I-cache should be enabled only after the * invalidation is complete. */ stm32_icache_enable(); } return rc; } void flash_stm32_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { static struct flash_pages_layout stm32_flash_layout[3]; static size_t stm32_flash_layout_size; *layout = stm32_flash_layout; if (stm32_flash_layout[0].pages_count != 0) { /* Short circuit calculation logic if already performed (size is known) */ *layout_size = stm32_flash_layout_size; return; } if (stm32_flash_has_2_banks(dev) && (CONFIG_FLASH_SIZE < STM32_SERIES_MAX_FLASH)) { /* * For stm32l552xx with 256 kB flash * which have space between banks 1 and 2. */ /* Bank1 */ stm32_flash_layout[0].pages_count = PAGES_PER_BANK; stm32_flash_layout[0].pages_size = FLASH_PAGE_SIZE; /* Dummy page corresponding to space between banks 1 and 2 */ stm32_flash_layout[1].pages_count = 1; stm32_flash_layout[1].pages_size = BANK2_OFFSET - (PAGES_PER_BANK * FLASH_PAGE_SIZE); /* Bank2 */ stm32_flash_layout[2].pages_count = PAGES_PER_BANK; stm32_flash_layout[2].pages_size = FLASH_PAGE_SIZE; stm32_flash_layout_size = ARRAY_SIZE(stm32_flash_layout); } else { /* * For stm32l562xx & stm32l552xx with 512 flash or stm32u5x, * which has no space between banks 1 and 2. */ if (stm32_flash_has_2_banks(dev)) { /* L5 flash with dualbank has 2k pages */ /* U5 flash pages are always 8 kB in size */ /* H5 flash pages are always 8 kB in size */ /* Considering one layout of full flash size, even with 2 banks */ stm32_flash_layout[0].pages_count = FLASH_SIZE / FLASH_PAGE_SIZE; stm32_flash_layout[0].pages_size = FLASH_PAGE_SIZE; #if defined(CONFIG_SOC_SERIES_STM32L5X) } else { /* L5 flash without dualbank has 4k pages */ stm32_flash_layout[0].pages_count = FLASH_PAGE_NB_128_BITS; stm32_flash_layout[0].pages_size = FLASH_PAGE_SIZE_128_BITS; #endif /* CONFIG_SOC_SERIES_STM32L5X */ } /* * In this case the stm32_flash_layout table has one single element * when read by the flash_get_page_info() */ stm32_flash_layout_size = 1; } *layout_size = stm32_flash_layout_size; } ```
/content/code_sandbox/drivers/flash/flash_stm32l5x.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,525
```objective-c /* * */ #ifndef CAD_QSPI_NOR_LL_H #define CAD_QSPI_NOR_LL_H #include <zephyr/device.h> #define CAD_QSPI_MICRON_N25Q_SUPPORT CONFIG_CAD_QSPI_MICRON_N25Q_SUPPORT #define CAD_INVALID -1 #define CAD_QSPI_ERROR -2 #define CAD_QSPI_ADDR_FASTREAD 0 #define CAD_QSPI_ADDR_FASTREAD_DUAL_IO 1 #define CAD_QSPI_ADDR_FASTREAD_QUAD_IO 2 #define CAT_QSPI_ADDR_SINGLE_IO 0 #define CAT_QSPI_ADDR_DUAL_IO 1 #define CAT_QSPI_ADDR_QUAD_IO 2 #define CAD_QSPI_BANK_ADDR(x) ((x) >> 24) #define CAD_QSPI_BANK_ADDR_MSK GENMASK(31, 24) #define CAD_QSPI_COMMAND_TIMEOUT 0x10000000 #define CAD_QSPI_CFG 0x0 #define CAD_QSPI_CFG_BAUDDIV_MSK 0xff87ffff #define CAD_QSPI_CFG_BAUDDIV(x) FIELD_PREP(0x780000, x) #define CAD_QSPI_CFG_CS_MSK ~0x3c00 #define CAD_QSPI_CFG_CS(x) (((x) << 11)) #define CAD_QSPI_CFG_ENABLE (BIT(0)) #define CAD_QSPI_CFG_ENDMA_CLR_MSK 0xffff7fff #define CAD_QSPI_CFG_IDLE (BIT(31)) #define CAD_QSPI_CFG_SELCLKPHASE_CLR_MSK 0xfffffffb #define CAD_QSPI_CFG_SELCLKPOL_CLR_MSK 0xfffffffd #define CAD_QSPI_DELAY 0xc #define CAD_QSPI_DELAY_CSSOT(x) (FIELD_GET(0xff, (x)) << 0) #define CAD_QSPI_DELAY_CSEOT(x) (FIELD_GET(0xff, (x)) << 8) #define CAD_QSPI_DELAY_CSDADS(x) (FIELD_GET(0xff, (x)) << 16) #define CAD_QSPI_DELAY_CSDA(x) (FIELD_GET(0xff, (x)) << 24) #define CAD_QSPI_DEVSZ 0x14 #define CAD_QSPI_DEVSZ_ADDR_BYTES(x) ((x) << 0) #define CAD_QSPI_DEVSZ_BYTES_PER_PAGE(x) ((x) << 4) #define CAD_QSPI_DEVSZ_BYTES_PER_BLOCK(x) ((x) << 16) #define CAD_QSPI_DEVWR 0x8 #define CAD_QSPI_DEVRD 0x4 #define CAD_QSPI_DEV_OPCODE(x) (FIELD_GET(0xff, (x)) << 0) #define CAD_QSPI_DEV_INST_TYPE(x) (FIELD_GET(0x03, (x)) << 8) #define CAD_QSPI_DEV_ADDR_TYPE(x) (FIELD_GET(0x03, (x)) << 12) #define CAD_QSPI_DEV_DATA_TYPE(x) (FIELD_GET(0x03, (x)) << 16) #define CAD_QSPI_DEV_MODE_BIT(x) (FIELD_GET(0x01, (x)) << 20) #define CAD_QSPI_DEV_DUMMY_CLK_CYCLE(x) (FIELD_GET(0x0f, (x)) << 24) #define CAD_QSPI_FLASHCMD 0x90 #define CAD_QSPI_FLASHCMD_ADDR 0x94 #define CAD_QSPI_FLASHCMD_EXECUTE 0x1 #define CAD_QSPI_FLASHCMD_EXECUTE_STAT 0x2 #define CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX 5 #define CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES(x) (FIELD_PREP(0x000f80, (x))) #define CAD_QSPI_FLASHCMD_OPCODE(x) (FIELD_GET(0xff, (x)) << 24) #define CAD_QSPI_FLASHCMD_ENRDDATA(x) (FIELD_GET(1, (x)) << 23) #define CAD_QSPI_FLASHCMD_NUMRDDATABYTES(x) (FIELD_GET(0xf, (x)) << 20) #define CAD_QSPI_FLASHCMD_ENCMDADDR(x) (FIELD_GET(1, (x)) << 19) #define CAD_QSPI_FLASHCMD_ENMODEBIT(x) (FIELD_GET(1, (x)) << 18) #define CAD_QSPI_FLASHCMD_NUMADDRBYTES(x) (FIELD_GET(0x3, (x)) << 16) #define CAD_QSPI_FLASHCMD_ENWRDATA(x) (FIELD_GET(1, (x)) << 15) #define CAD_QSPI_FLASHCMD_NUMWRDATABYTES(x) (FIELD_GET(0x7, (x)) << 12) #define CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(x) (FIELD_GET(0x1f, (x)) << 7) #define CAD_QSPI_FLASHCMD_RDDATA0 0xa0 #define CAD_QSPI_FLASHCMD_RDDATA1 0xa4 #define CAD_QSPI_FLASHCMD_WRDATA0 0xa8 #define CAD_QSPI_FLASHCMD_WRDATA1 0xac #define CAD_QSPI_RDDATACAP 0x10 #define CAD_QSPI_RDDATACAP_BYP(x) (FIELD_GET(1, (x)) << 0) #define CAD_QSPI_RDDATACAP_DELAY(x) (FIELD_GET(0xf, (x)) << 1) #define CAD_QSPI_REMAPADDR 0x24 #define CAD_QSPI_REMAPADDR_VALUE_SET(x) (FIELD_GET(0xffffffff, (x)) << 0) #define CAD_QSPI_SRAMPART 0x18 #define CAD_QSPI_SRAMFILL 0x2c #define CAD_QSPI_SRAMPART_ADDR(x) (FIELD_GET(0x3ff, ((x) >> 0))) #define CAD_QSPI_SRAM_FIFO_ENTRY_COUNT (512 / sizeof(uint32_t)) #define CAD_QSPI_SRAMFILL_INDWRPART(x) (FIELD_GET(0x00ffff, ((x) >> 16))) #define CAD_QSPI_SRAMFILL_INDRDPART(x) (FIELD_GET(0x00ffff, ((x) >> 0))) #define CAD_QSPI_SELCLKPHASE(x) (FIELD_GET(1, (x)) << 2) #define CAD_QSPI_SELCLKPOL(x) (FIELD_GET(1, (x)) << 1) #define CAD_QSPI_STIG_FLAGSR_PROGRAMREADY(x) (FIELD_GET(1, ((x) >> 7))) #define CAD_QSPI_STIG_FLAGSR_ERASEREADY(x) (FIELD_GET(1, ((x) >> 7))) #define CAD_QSPI_STIG_FLAGSR_ERASEERROR(x) (FIELD_GET(1, ((x) >> 5))) #define CAD_QSPI_STIG_FLAGSR_PROGRAMERROR(x) (FIELD_GET(1, ((x) >> 4))) #define CAD_QSPI_STIG_OPCODE_CLFSR 0x50 #define CAD_QSPI_STIG_OPCODE_RDID 0x9f #define CAD_QSPI_STIG_OPCODE_WRDIS 0x4 #define CAD_QSPI_STIG_OPCODE_WREN 0x6 #define CAD_QSPI_STIG_OPCODE_SUBSEC_ERASE 0x20 #define CAD_QSPI_STIG_OPCODE_SEC_ERASE 0xd8 #define CAD_QSPI_STIG_OPCODE_WREN_EXT_REG 0xc5 #define CAD_QSPI_STIG_OPCODE_DIE_ERASE 0xc4 #define CAD_QSPI_STIG_OPCODE_BULK_ERASE 0xc7 #define CAD_QSPI_STIG_OPCODE_RDSR 0x5 #define CAD_QSPI_STIG_OPCODE_RDFLGSR 0x70 #define CAD_QSPI_STIG_OPCODE_RESET_EN 0x66 #define CAD_QSPI_STIG_OPCODE_RESET_MEM 0x99 #define CAD_QSPI_STIG_RDID_CAPACITYID(x) (FIELD_GET(0xff, ((x) >> 16))) #define CAD_QSPI_STIG_SR_BUSY(x) (FIELD_GET(1, ((x)))) #define CAD_QSPI_INST_SINGLE 0 #define CAD_QSPI_INST_DUAL 1 #define CAD_QSPI_INST_QUAD 2 #define CAD_QSPI_INDRDSTADDR 0x68 #define CAD_QSPI_INDRDCNT 0x6c #define CAD_QSPI_INDRD 0x60 #define CAD_QSPI_INDRD_RD_STAT(x) (FIELD_GET(1, ((x) >> 2))) #define CAD_QSPI_INDRD_START 1 #define CAD_QSPI_INDRD_IND_OPS_DONE 0x20 #define CAD_QSPI_INDWR 0x70 #define CAD_QSPI_INDWR_RDSTAT(x) (FIELD_GET(1, ((x) >> 2))) #define CAD_QSPI_INDWRSTADDR 0x78 #define CAD_QSPI_INDWRCNT 0x7c #define CAD_QSPI_INDWR 0x70 #define CAD_QSPI_INDWR_START 0x1 #define CAD_QSPI_INDWR_INDDONE 0x20 #define CAD_QSPI_INT_STATUS_ALL 0x0000ffff #define CAD_QSPI_N25Q_DIE_SIZE 0x02000000 #define CAD_QSPI_BANK_SIZE 0x01000000 #define CAD_QSPI_PAGE_SIZE 0x00000100 #define CAD_QSPI_IRQMSK 0x44 #define CAD_QSPI_SUBSECTOR_SIZE CONFIG_CAD_QSPI_NOR_SUBSECTOR_SIZE #define QSPI_ADDR_BYTES CONFIG_QSPI_ADDR_BYTES #define QSPI_BYTES_PER_DEV CONFIG_QSPI_BYTES_PER_DEV #define QSPI_BYTES_PER_BLOCK CONFIG_QSPI_BYTES_PER_BLOCK #define QSPI_FAST_READ 0xb #define QSPI_WRITE 0x2 /* QSPI CONFIGURATIONS */ #define QSPI_CONFIG_CPOL 1 #define QSPI_CONFIG_CPHA 1 #define QSPI_CONFIG_CSSOT 0x14 #define QSPI_CONFIG_CSEOT 0x14 #define QSPI_CONFIG_CSDADS 0xff #define QSPI_CONFIG_CSDA 0xc8 struct cad_qspi_params { uintptr_t reg_base; uintptr_t data_base; uint32_t data_size; int clk_rate; uint32_t qspi_device_size; int cad_qspi_cs; }; int cad_qspi_init(struct cad_qspi_params *cad_params, uint32_t clk_phase, uint32_t clk_pol, uint32_t csda, uint32_t csdads, uint32_t cseot, uint32_t cssot, uint32_t rddatacap); void cad_qspi_set_chip_select(struct cad_qspi_params *cad_params, int cs); int cad_qspi_erase(struct cad_qspi_params *cad_params, uint32_t offset, uint32_t size); int cad_qspi_write(struct cad_qspi_params *cad_params, void *buffer, uint32_t offset, uint32_t size); int cad_qspi_read(struct cad_qspi_params *cad_params, void *buffer, uint32_t offset, uint32_t size); int cad_qspi_update(struct cad_qspi_params *cad_params, void *buffer, uint32_t offset, uint32_t size); #endif ```
/content/code_sandbox/drivers/flash/flash_cadence_qspi_nor_ll.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,484
```unknown menuconfig SPI_FLASH_AT45 bool "AT45 family flash driver" default y depends on DT_HAS_ATMEL_AT45_ENABLED select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_EXPLICIT_ERASE select SPI help This driver can handle several instances of AT45 family chips that are enabled by specifying devicetree nodes with the "compatible" property set to "atmel,at45" and other required properties like JEDEC ID, chip capacity, block and page size etc. configured accordingly. The driver is only capable of using "power of 2" binary page sizes in those chips and at initialization configures them to work in that mode (unless it is already done). if SPI_FLASH_AT45 config SPI_FLASH_AT45_USE_READ_MODIFY_WRITE bool "Use Read-Modify-Write command in flash_write()" default y help Use the Read-Modify-Write command (opcode 0x58) instead of the default Main Memory Program without Built-In Erase (opcode 0x02). This allows writing of data without prior erasing of corresponding pages. config SPI_FLASH_AT45_INIT_PRIORITY int "Driver initialization priority" default 80 help Device driver initialization priority. SPI driver needs to be initialized before this one. endif # SPI_FLASH_AT45 ```
/content/code_sandbox/drivers/flash/Kconfig.at45
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
295
```unknown # # # config SOC_FLASH_NRF_MRAM bool "Nordic Semiconductor flash driver for MRAM" default y depends on DT_HAS_NORDIC_MRAM_ENABLED select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_NO_EXPLICIT_ERASE imply MPU_ALLOW_FLASH_WRITE if ARM_MPU help Enables Nordic Semiconductor flash driver for MRAM in direct write mode. Note that MRAM words are auto-erased when written to, but writing to a pre-erased area is faster. Hence, the erase API is not required, but it can be used to amortize write performance for some use cases. ```
/content/code_sandbox/drivers/flash/Kconfig.nrf_mram
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
142
```c /* * */ /* * ************************************************************************** * xSPI flash controller driver for stm32 serie with xSPI periherals * This driver is based on the stm32Cube HAL XSPI driver * with one xspi DTS NODE * ************************************************************************** */ #define DT_DRV_COMPAT st_stm32_xspi_nor #include <errno.h> #include <zephyr/kernel.h> #include <soc.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/flash.h> #include <zephyr/dt-bindings/flash_controller/xspi.h> #include <zephyr/drivers/gpio.h> #include <zephyr/irq.h> #include "spi_nor.h" #include "jesd216.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(flash_stm32_xspi, CONFIG_FLASH_LOG_LEVEL); #define STM32_XSPI_NODE DT_INST_PARENT(0) #define DT_XSPI_IO_PORT_PROP_OR(prop, default_value) \ COND_CODE_1(DT_NODE_HAS_PROP(STM32_XSPI_NODE, prop), \ (_CONCAT(HAL_XSPIM_, DT_STRING_TOKEN(STM32_XSPI_NODE, prop))), \ ((default_value))) /* Get the base address of the flash from the DTS node */ #define STM32_XSPI_BASE_ADDRESS DT_INST_REG_ADDR(0) #define STM32_XSPI_RESET_GPIO DT_INST_NODE_HAS_PROP(0, reset_gpios) #define STM32_XSPI_DLYB_BYPASSED DT_PROP(STM32_XSPI_NODE, dlyb_bypass) #define STM32_XSPI_USE_DMA DT_NODE_HAS_PROP(STM32_XSPI_NODE, dmas) #if STM32_XSPI_USE_DMA #include <zephyr/drivers/dma/dma_stm32.h> #include <zephyr/drivers/dma.h> #include <stm32_ll_dma.h> #endif /* STM32_XSPI_USE_DMA */ #include "flash_stm32_xspi.h" static inline void xspi_lock_thread(const struct device *dev) { struct flash_stm32_xspi_data *dev_data = dev->data; k_sem_take(&dev_data->sem, K_FOREVER); } static inline void xspi_unlock_thread(const struct device *dev) { struct flash_stm32_xspi_data *dev_data = dev->data; k_sem_give(&dev_data->sem); } static int xspi_send_cmd(const struct device *dev, XSPI_RegularCmdTypeDef *cmd) { struct flash_stm32_xspi_data *dev_data = dev->data; HAL_StatusTypeDef hal_ret; LOG_DBG("Instruction 0x%x", cmd->Instruction); dev_data->cmd_status = 0; hal_ret = HAL_XSPI_Command(&dev_data->hxspi, cmd, HAL_XSPI_TIMEOUT_DEFAULT_VALUE); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to send XSPI instruction", hal_ret); return -EIO; } LOG_DBG("CCR 0x%x", dev_data->hxspi.Instance->CCR); return dev_data->cmd_status; } static int xspi_read_access(const struct device *dev, XSPI_RegularCmdTypeDef *cmd, uint8_t *data, const size_t size) { struct flash_stm32_xspi_data *dev_data = dev->data; HAL_StatusTypeDef hal_ret; LOG_DBG("Instruction 0x%x", cmd->Instruction); cmd->DataLength = size; dev_data->cmd_status = 0; hal_ret = HAL_XSPI_Command(&dev_data->hxspi, cmd, HAL_XSPI_TIMEOUT_DEFAULT_VALUE); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to send XSPI instruction", hal_ret); return -EIO; } #if STM32_XSPI_USE_DMA hal_ret = HAL_XSPI_Receive_DMA(&dev_data->hxspi, data); #else hal_ret = HAL_XSPI_Receive_IT(&dev_data->hxspi, data); #endif if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to read data", hal_ret); return -EIO; } k_sem_take(&dev_data->sync, K_FOREVER); return dev_data->cmd_status; } static int xspi_write_access(const struct device *dev, XSPI_RegularCmdTypeDef *cmd, const uint8_t *data, const size_t size) { const struct flash_stm32_xspi_config *dev_cfg = dev->config; struct flash_stm32_xspi_data *dev_data = dev->data; HAL_StatusTypeDef hal_ret; LOG_DBG("Instruction 0x%x", cmd->Instruction); cmd->DataLength = size; dev_data->cmd_status = 0; /* in OPI/STR the 3-byte AddressWidth is not supported by the NOR flash */ if ((dev_cfg->data_mode == XSPI_OCTO_MODE) && (cmd->AddressWidth != HAL_XSPI_ADDRESS_32_BITS)) { LOG_ERR("XSPI wr in OPI/STR mode is for 32bit address only"); return -EIO; } hal_ret = HAL_XSPI_Command(&dev_data->hxspi, cmd, HAL_XSPI_TIMEOUT_DEFAULT_VALUE); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to send XSPI instruction", hal_ret); return -EIO; } #if STM32_XSPI_USE_DMA hal_ret = HAL_XSPI_Transmit_DMA(&dev_data->hxspi, (uint8_t *)data); #else hal_ret = HAL_XSPI_Transmit_IT(&dev_data->hxspi, (uint8_t *)data); #endif if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to write data", hal_ret); return -EIO; } k_sem_take(&dev_data->sync, K_FOREVER); return dev_data->cmd_status; } /* * Gives a XSPI_RegularCmdTypeDef with all parameters set * except Instruction, Address, DummyCycles, NbData */ static XSPI_RegularCmdTypeDef xspi_prepare_cmd(const uint8_t transfer_mode, const uint8_t transfer_rate) { XSPI_RegularCmdTypeDef cmd_tmp = { .OperationType = HAL_XSPI_OPTYPE_COMMON_CFG, .InstructionWidth = ((transfer_mode == XSPI_OCTO_MODE) ? HAL_XSPI_INSTRUCTION_16_BITS : HAL_XSPI_INSTRUCTION_8_BITS), .InstructionDTRMode = ((transfer_rate == XSPI_DTR_TRANSFER) ? HAL_XSPI_INSTRUCTION_DTR_ENABLE : HAL_XSPI_INSTRUCTION_DTR_DISABLE), .AddressDTRMode = ((transfer_rate == XSPI_DTR_TRANSFER) ? HAL_XSPI_ADDRESS_DTR_ENABLE : HAL_XSPI_ADDRESS_DTR_DISABLE), /* AddressWidth must be set to 32bits for init and mem config phase */ .AddressWidth = HAL_XSPI_ADDRESS_32_BITS, .AlternateBytesMode = HAL_XSPI_ALT_BYTES_NONE, .DataDTRMode = ((transfer_rate == XSPI_DTR_TRANSFER) ? HAL_XSPI_DATA_DTR_ENABLE : HAL_XSPI_DATA_DTR_DISABLE), .DQSMode = (transfer_rate == XSPI_DTR_TRANSFER) ? HAL_XSPI_DQS_ENABLE : HAL_XSPI_DQS_DISABLE, .SIOOMode = HAL_XSPI_SIOO_INST_EVERY_CMD, }; switch (transfer_mode) { case XSPI_OCTO_MODE: { cmd_tmp.InstructionMode = HAL_XSPI_INSTRUCTION_8_LINES; cmd_tmp.AddressMode = HAL_XSPI_ADDRESS_8_LINES; cmd_tmp.DataMode = HAL_XSPI_DATA_8_LINES; break; } case XSPI_QUAD_MODE: { cmd_tmp.InstructionMode = HAL_XSPI_INSTRUCTION_4_LINES; cmd_tmp.AddressMode = HAL_XSPI_ADDRESS_4_LINES; cmd_tmp.DataMode = HAL_XSPI_DATA_4_LINES; break; } case XSPI_DUAL_MODE: { cmd_tmp.InstructionMode = HAL_XSPI_INSTRUCTION_2_LINES; cmd_tmp.AddressMode = HAL_XSPI_ADDRESS_2_LINES; cmd_tmp.DataMode = HAL_XSPI_DATA_2_LINES; break; } default: { cmd_tmp.InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE; cmd_tmp.AddressMode = HAL_XSPI_ADDRESS_1_LINE; cmd_tmp.DataMode = HAL_XSPI_DATA_1_LINE; break; } } return cmd_tmp; } static uint32_t stm32_xspi_hal_address_size(const struct device *dev) { struct flash_stm32_xspi_data *dev_data = dev->data; if (dev_data->address_width == 4U) { return HAL_XSPI_ADDRESS_32_BITS; } return HAL_XSPI_ADDRESS_24_BITS; } #if defined(CONFIG_FLASH_JESD216_API) /* * Read the JEDEC ID data from the external Flash at init * and store in the jedec_id Table of the flash_stm32_xspi_data * The JEDEC ID is not given by a DTS property */ static int stm32_xspi_read_jedec_id(const struct device *dev) { struct flash_stm32_xspi_data *dev_data = dev->data; /* This is a SPI/STR command to issue to the external Flash device */ XSPI_RegularCmdTypeDef cmd = xspi_prepare_cmd(XSPI_SPI_MODE, XSPI_STR_TRANSFER); cmd.Instruction = JESD216_CMD_READ_ID; cmd.AddressWidth = stm32_xspi_hal_address_size(dev); cmd.AddressMode = HAL_XSPI_ADDRESS_NONE; cmd.DataLength = JESD216_READ_ID_LEN; /* 3 bytes in the READ ID */ HAL_StatusTypeDef hal_ret; hal_ret = HAL_XSPI_Command(&dev_data->hxspi, &cmd, HAL_XSPI_TIMEOUT_DEFAULT_VALUE); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to send XSPI instruction", hal_ret); return -EIO; } /* Place the received data directly into the jedec Table */ hal_ret = HAL_XSPI_Receive(&dev_data->hxspi, dev_data->jedec_id, HAL_XSPI_TIMEOUT_DEFAULT_VALUE); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to read data", hal_ret); return -EIO; } LOG_DBG("Jedec ID = [%02x %02x %02x]", dev_data->jedec_id[0], dev_data->jedec_id[1], dev_data->jedec_id[2]); dev_data->cmd_status = 0; return 0; } /* * Read Serial Flash ID : * just gives the values received by the external Flash */ static int xspi_read_jedec_id(const struct device *dev, uint8_t *id) { struct flash_stm32_xspi_data *dev_data = dev->data; /* Take jedec Id values from the table (issued from the octoFlash) */ memcpy(id, dev_data->jedec_id, JESD216_READ_ID_LEN); LOG_INF("Manuf ID = %02x Memory Type = %02x Memory Density = %02x", id[0], id[1], id[2]); return 0; } #endif /* CONFIG_FLASH_JESD216_API */ /* * Read Serial Flash Discovery Parameter from the external Flash at init : * perform a read access over SPI bus for SDFP (DataMode is already set) * The SFDP table is not given by a DTS property */ static int stm32_xspi_read_sfdp(const struct device *dev, off_t addr, void *data, size_t size) { const struct flash_stm32_xspi_config *dev_cfg = dev->config; struct flash_stm32_xspi_data *dev_data = dev->data; XSPI_RegularCmdTypeDef cmd = xspi_prepare_cmd(dev_cfg->data_mode, dev_cfg->data_rate); if (dev_cfg->data_mode == XSPI_OCTO_MODE) { cmd.Instruction = JESD216_OCMD_READ_SFDP; cmd.DummyCycles = 20U; cmd.AddressWidth = HAL_XSPI_ADDRESS_32_BITS; } else { cmd.Instruction = JESD216_CMD_READ_SFDP; cmd.InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE; cmd.DataMode = HAL_XSPI_DATA_1_LINE; cmd.AddressMode = HAL_XSPI_ADDRESS_1_LINE; cmd.DummyCycles = 8U; cmd.AddressWidth = HAL_XSPI_ADDRESS_24_BITS; } cmd.Address = addr; cmd.DataLength = size; HAL_StatusTypeDef hal_ret; hal_ret = HAL_XSPI_Command(&dev_data->hxspi, &cmd, HAL_XSPI_TIMEOUT_DEFAULT_VALUE); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to send XSPI instruction", hal_ret); return -EIO; } hal_ret = HAL_XSPI_Receive(&dev_data->hxspi, (uint8_t *)data, HAL_XSPI_TIMEOUT_DEFAULT_VALUE); if (hal_ret != HAL_OK) { LOG_ERR("%d: Failed to read data", hal_ret); return -EIO; } dev_data->cmd_status = 0; return 0; } /* * Read Serial Flash Discovery Parameter : * perform a read access over SPI bus for SDFP (DataMode is already set) */ static int xspi_read_sfdp(const struct device *dev, off_t addr, void *data, size_t size) { LOG_INF("Read SFDP from externalFlash"); /* Get the SFDP from the external Flash (no sfdp-bfp table in the DeviceTree) */ if (stm32_xspi_read_sfdp(dev, addr, data, size) == 0) { /* If valid, then ignore any table from the DTS */ return 0; } LOG_INF("Error reading SFDP from external Flash and none in the DTS"); return -EINVAL; } static bool xspi_address_is_valid(const struct device *dev, off_t addr, size_t size) { const struct flash_stm32_xspi_config *dev_cfg = dev->config; size_t flash_size = dev_cfg->flash_size; return (addr >= 0) && ((uint64_t)addr + (uint64_t)size <= flash_size); } static int stm32_xspi_wait_auto_polling(const struct device *dev, XSPI_AutoPollingTypeDef *s_config, uint32_t timeout_ms) { struct flash_stm32_xspi_data *dev_data = dev->data; dev_data->cmd_status = 0; if (HAL_XSPI_AutoPolling_IT(&dev_data->hxspi, s_config) != HAL_OK) { LOG_ERR("XSPI AutoPoll failed"); return -EIO; } if (k_sem_take(&dev_data->sync, K_MSEC(timeout_ms)) != 0) { LOG_ERR("XSPI AutoPoll wait failed"); HAL_XSPI_Abort(&dev_data->hxspi); k_sem_reset(&dev_data->sync); return -EIO; } /* HAL_XSPI_AutoPolling_IT enables transfer error interrupt which sets * cmd_status. */ return dev_data->cmd_status; } /* * This function Polls the WEL (write enable latch) bit to become to 0 * When the Chip Erase Cycle is completed, the Write Enable Latch (WEL) bit is cleared. * in nor_mode SPI/OPI XSPI_SPI_MODE or XSPI_OCTO_MODE * and nor_rate transfer STR/DTR XSPI_STR_TRANSFER or XSPI_DTR_TRANSFER */ static int stm32_xspi_mem_erased(const struct device *dev) { const struct flash_stm32_xspi_config *dev_cfg = dev->config; struct flash_stm32_xspi_data *dev_data = dev->data; uint8_t nor_mode = dev_cfg->data_mode; uint8_t nor_rate = dev_cfg->data_rate; XSPI_AutoPollingTypeDef s_config = {0}; XSPI_RegularCmdTypeDef s_command = xspi_prepare_cmd(nor_mode, nor_rate); /* Configure automatic polling mode command to wait for memory ready */ if (nor_mode == XSPI_OCTO_MODE) { s_command.Instruction = SPI_NOR_OCMD_RDSR; s_command.DummyCycles = (nor_rate == XSPI_DTR_TRANSFER) ? SPI_NOR_DUMMY_REG_OCTAL_DTR : SPI_NOR_DUMMY_REG_OCTAL; } else { s_command.Instruction = SPI_NOR_CMD_RDSR; /* force 1-line InstructionMode for any non-OSPI transfer */ s_command.InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE; s_command.AddressMode = HAL_XSPI_ADDRESS_NONE; /* force 1-line DataMode for any non-OSPI transfer */ s_command.DataMode = HAL_XSPI_DATA_1_LINE; s_command.DummyCycles = 0; } s_command.DataLength = ((nor_rate == XSPI_DTR_TRANSFER) ? 2U : 1U); s_command.Address = 0U; /* Set the mask to 0x02 to mask all Status REG bits except WEL */ /* Set the match to 0x00 to check if the WEL bit is Reset */ s_config.MatchValue = SPI_NOR_WEL_MATCH; s_config.MatchMask = SPI_NOR_WEL_MASK; /* Write Enable Latch */ s_config.MatchMode = HAL_XSPI_MATCH_MODE_AND; s_config.IntervalTime = SPI_NOR_AUTO_POLLING_INTERVAL; s_config.AutomaticStop = HAL_XSPI_AUTOMATIC_STOP_ENABLE; if (HAL_XSPI_Command(&dev_data->hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("XSPI AutoPoll command (WEL) failed"); return -EIO; } /* Start Automatic-Polling mode to wait until the memory is totally erased */ return stm32_xspi_wait_auto_polling(dev, &s_config, STM32_XSPI_BULK_ERASE_MAX_TIME); } /* * This function Polls the WIP(Write In Progress) bit to become to 0 * in nor_mode SPI/OPI XSPI_SPI_MODE or XSPI_OCTO_MODE * and nor_rate transfer STR/DTR XSPI_STR_TRANSFER or XSPI_DTR_TRANSFER */ static int stm32_xspi_mem_ready(const struct device *dev, uint8_t nor_mode, uint8_t nor_rate) { struct flash_stm32_xspi_data *dev_data = dev->data; XSPI_AutoPollingTypeDef s_config = {0}; XSPI_RegularCmdTypeDef s_command = xspi_prepare_cmd(nor_mode, nor_rate); /* Configure automatic polling mode command to wait for memory ready */ if (nor_mode == XSPI_OCTO_MODE) { s_command.Instruction = SPI_NOR_OCMD_RDSR; s_command.DummyCycles = (nor_rate == XSPI_DTR_TRANSFER) ? SPI_NOR_DUMMY_REG_OCTAL_DTR : SPI_NOR_DUMMY_REG_OCTAL; } else { s_command.Instruction = SPI_NOR_CMD_RDSR; /* force 1-line InstructionMode for any non-OSPI transfer */ s_command.InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE; s_command.AddressMode = HAL_XSPI_ADDRESS_NONE; /* force 1-line DataMode for any non-OSPI transfer */ s_command.DataMode = HAL_XSPI_DATA_1_LINE; s_command.DummyCycles = 0; } s_command.DataLength = ((nor_rate == XSPI_DTR_TRANSFER) ? 2U : 1U); s_command.Address = 0U; /* Set the mask to 0x01 to mask all Status REG bits except WIP */ /* Set the match to 0x00 to check if the WIP bit is Reset */ s_config.MatchValue = SPI_NOR_MEM_RDY_MATCH; s_config.MatchMask = SPI_NOR_MEM_RDY_MASK; /* Write in progress */ s_config.MatchMode = HAL_XSPI_MATCH_MODE_AND; s_config.IntervalTime = SPI_NOR_AUTO_POLLING_INTERVAL; s_config.AutomaticStop = HAL_XSPI_AUTOMATIC_STOP_ENABLE; if (HAL_XSPI_Command(&dev_data->hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("XSPI AutoPoll command failed"); return -EIO; } /* Start Automatic-Polling mode to wait until the memory is ready WIP=0 */ return stm32_xspi_wait_auto_polling(dev, &s_config, HAL_XSPI_TIMEOUT_DEFAULT_VALUE); } /* Enables writing to the memory sending a Write Enable and wait it is effective */ static int stm32_xspi_write_enable(const struct device *dev, uint8_t nor_mode, uint8_t nor_rate) { struct flash_stm32_xspi_data *dev_data = dev->data; XSPI_AutoPollingTypeDef s_config = {0}; XSPI_RegularCmdTypeDef s_command = xspi_prepare_cmd(nor_mode, nor_rate); /* Initialize the write enable command */ if (nor_mode == XSPI_OCTO_MODE) { s_command.Instruction = SPI_NOR_OCMD_WREN; } else { s_command.Instruction = SPI_NOR_CMD_WREN; /* force 1-line InstructionMode for any non-OSPI transfer */ s_command.InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE; } s_command.AddressMode = HAL_XSPI_ADDRESS_NONE; s_command.DataMode = HAL_XSPI_DATA_NONE; s_command.DummyCycles = 0U; if (HAL_XSPI_Command(&dev_data->hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("XSPI flash write enable cmd failed"); return -EIO; } /* New command to Configure automatic polling mode to wait for write enabling */ if (nor_mode == XSPI_OCTO_MODE) { s_command.Instruction = SPI_NOR_OCMD_RDSR; s_command.AddressMode = HAL_XSPI_ADDRESS_8_LINES; s_command.DataMode = HAL_XSPI_DATA_8_LINES; s_command.DummyCycles = (nor_rate == XSPI_DTR_TRANSFER) ? SPI_NOR_DUMMY_REG_OCTAL_DTR : SPI_NOR_DUMMY_REG_OCTAL; } else { s_command.Instruction = SPI_NOR_CMD_RDSR; /* force 1-line DataMode for any non-OSPI transfer */ s_command.InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE; s_command.AddressMode = HAL_XSPI_ADDRESS_1_LINE; s_command.DataMode = HAL_XSPI_DATA_1_LINE; s_command.DummyCycles = 0; /* DummyCycles remains 0 */ } s_command.DataLength = (nor_rate == XSPI_DTR_TRANSFER) ? 2U : 1U; s_command.Address = 0U; if (HAL_XSPI_Command(&dev_data->hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("XSPI config auto polling cmd failed"); return -EIO; } s_config.MatchValue = SPI_NOR_WREN_MATCH; s_config.MatchMask = SPI_NOR_WREN_MASK; s_config.MatchMode = HAL_XSPI_MATCH_MODE_AND; s_config.IntervalTime = SPI_NOR_AUTO_POLLING_INTERVAL; s_config.AutomaticStop = HAL_XSPI_AUTOMATIC_STOP_ENABLE; return stm32_xspi_wait_auto_polling(dev, &s_config, HAL_XSPI_TIMEOUT_DEFAULT_VALUE); } /* Write Flash configuration register 2 with new dummy cycles */ static int stm32_xspi_write_cfg2reg_dummy(XSPI_HandleTypeDef *hxspi, uint8_t nor_mode, uint8_t nor_rate) { uint8_t transmit_data = SPI_NOR_CR2_DUMMY_CYCLES_66MHZ; XSPI_RegularCmdTypeDef s_command = xspi_prepare_cmd(nor_mode, nor_rate); /* Initialize the writing of configuration register 2 */ s_command.Instruction = (nor_mode == XSPI_SPI_MODE) ? SPI_NOR_CMD_WR_CFGREG2 : SPI_NOR_OCMD_WR_CFGREG2; s_command.Address = SPI_NOR_REG2_ADDR3; s_command.DummyCycles = 0U; s_command.DataLength = (nor_mode == XSPI_SPI_MODE) ? 1U : ((nor_rate == XSPI_DTR_TRANSFER) ? 2U : 1U); if (HAL_XSPI_Command(hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("XSPI transmit cmd"); return -EIO; } if (HAL_XSPI_Transmit(hxspi, &transmit_data, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("XSPI transmit "); return -EIO; } return 0; } /* Write Flash configuration register 2 with new single or octal SPI protocol */ static int stm32_xspi_write_cfg2reg_io(XSPI_HandleTypeDef *hxspi, uint8_t nor_mode, uint8_t nor_rate, uint8_t op_enable) { XSPI_RegularCmdTypeDef s_command = xspi_prepare_cmd(nor_mode, nor_rate); /* Initialize the writing of configuration register 2 */ s_command.Instruction = (nor_mode == XSPI_SPI_MODE) ? SPI_NOR_CMD_WR_CFGREG2 : SPI_NOR_OCMD_WR_CFGREG2; s_command.Address = SPI_NOR_REG2_ADDR1; s_command.DummyCycles = 0U; s_command.DataLength = (nor_mode == XSPI_SPI_MODE) ? 1U : ((nor_rate == XSPI_DTR_TRANSFER) ? 2U : 1U); if (HAL_XSPI_Command(hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("Write Flash configuration reg2 failed"); return -EIO; } if (HAL_XSPI_Transmit(hxspi, &op_enable, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("Write Flash configuration reg2 failed"); return -EIO; } return 0; } /* Read Flash configuration register 2 with new single or octal SPI protocol */ static int stm32_xspi_read_cfg2reg(XSPI_HandleTypeDef *hxspi, uint8_t nor_mode, uint8_t nor_rate, uint8_t *value) { XSPI_RegularCmdTypeDef s_command = xspi_prepare_cmd(nor_mode, nor_rate); /* Initialize the writing of configuration register 2 */ s_command.Instruction = (nor_mode == XSPI_SPI_MODE) ? SPI_NOR_CMD_RD_CFGREG2 : SPI_NOR_OCMD_RD_CFGREG2; s_command.Address = SPI_NOR_REG2_ADDR1; s_command.DummyCycles = (nor_mode == XSPI_SPI_MODE) ? 0U : ((nor_rate == XSPI_DTR_TRANSFER) ? SPI_NOR_DUMMY_REG_OCTAL_DTR : SPI_NOR_DUMMY_REG_OCTAL); s_command.DataLength = (nor_rate == XSPI_DTR_TRANSFER) ? 2U : 1U; if (HAL_XSPI_Command(hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("Write Flash configuration reg2 failed"); return -EIO; } if (HAL_XSPI_Receive(hxspi, value, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("Write Flash configuration reg2 failed"); return -EIO; } return 0; } /* Set the NOR Flash to desired Interface mode : SPI/OSPI and STR/DTR according to the DTS */ static int stm32_xspi_config_mem(const struct device *dev) { const struct flash_stm32_xspi_config *dev_cfg = dev->config; struct flash_stm32_xspi_data *dev_data = dev->data; uint8_t reg[2]; /* Going to set the SPI mode and STR transfer rate : done */ if ((dev_cfg->data_mode != XSPI_OCTO_MODE) && (dev_cfg->data_rate == XSPI_STR_TRANSFER)) { LOG_INF("OSPI flash config is SPI|DUAL|QUAD / STR"); return 0; } /* Going to set the XPI mode (STR or DTR transfer rate) */ LOG_DBG("XSPI configuring Octo SPI mode"); if (stm32_xspi_write_enable(dev, XSPI_SPI_MODE, XSPI_STR_TRANSFER) != 0) { LOG_ERR("OSPI write Enable failed"); return -EIO; } /* Write Configuration register 2 (with new dummy cycles) */ if (stm32_xspi_write_cfg2reg_dummy(&dev_data->hxspi, XSPI_SPI_MODE, XSPI_STR_TRANSFER) != 0) { LOG_ERR("XSPI write CFGR2 failed"); return -EIO; } if (stm32_xspi_mem_ready(dev, XSPI_SPI_MODE, XSPI_STR_TRANSFER) != 0) { LOG_ERR("XSPI autopolling failed"); return -EIO; } if (stm32_xspi_write_enable(dev, XSPI_SPI_MODE, XSPI_STR_TRANSFER) != 0) { LOG_ERR("XSPI write Enable 2 failed"); return -EIO; } /* Write Configuration register 2 (with Octal I/O SPI protocol : choose STR or DTR) */ uint8_t mode_enable = ((dev_cfg->data_rate == XSPI_DTR_TRANSFER) ? SPI_NOR_CR2_DTR_OPI_EN : SPI_NOR_CR2_STR_OPI_EN); if (stm32_xspi_write_cfg2reg_io(&dev_data->hxspi, XSPI_SPI_MODE, XSPI_STR_TRANSFER, mode_enable) != 0) { LOG_ERR("XSPI write CFGR2 failed"); return -EIO; } /* Wait that the configuration is effective and check that memory is ready */ k_busy_wait(STM32_XSPI_WRITE_REG_MAX_TIME * USEC_PER_MSEC); /* Reconfigure the memory type of the peripheral */ dev_data->hxspi.Init.MemoryType = HAL_XSPI_MEMTYPE_MACRONIX; dev_data->hxspi.Init.DelayHoldQuarterCycle = HAL_XSPI_DHQC_ENABLE; if (HAL_XSPI_Init(&dev_data->hxspi) != HAL_OK) { LOG_ERR("XSPI mem type MACRONIX failed"); return -EIO; } if (dev_cfg->data_rate == XSPI_STR_TRANSFER) { if (stm32_xspi_mem_ready(dev, XSPI_OCTO_MODE, XSPI_STR_TRANSFER) != 0) { /* Check Flash busy ? */ LOG_ERR("XSPI flash busy failed"); return -EIO; } if (stm32_xspi_read_cfg2reg(&dev_data->hxspi, XSPI_OCTO_MODE, XSPI_STR_TRANSFER, reg) != 0) { /* Check the configuration has been correctly done on SPI_NOR_REG2_ADDR1 */ LOG_ERR("XSPI flash config read failed"); return -EIO; } LOG_INF("XSPI flash config is OCTO / STR"); } if (dev_cfg->data_rate == XSPI_DTR_TRANSFER) { if (stm32_xspi_mem_ready(dev, XSPI_OCTO_MODE, XSPI_DTR_TRANSFER) != 0) { /* Check Flash busy ? */ LOG_ERR("XSPI flash busy failed"); return -EIO; } LOG_INF("XSPI flash config is OCTO / DTR"); } return 0; } /* gpio or send the different reset command to the NOR flash in SPI/OSPI and STR/DTR */ static int stm32_xspi_mem_reset(const struct device *dev) { struct flash_stm32_xspi_data *dev_data = dev->data; #if STM32_XSPI_RESET_GPIO const struct flash_stm32_xspi_config *dev_cfg = dev->config; /* Generate RESETn pulse for the flash memory */ gpio_pin_configure_dt(&dev_cfg->reset, GPIO_OUTPUT_ACTIVE); k_msleep(DT_INST_PROP(0, reset_gpios_duration)); gpio_pin_set_dt(&dev_cfg->reset, 0); #else /* Reset command sent sucessively for each mode SPI/OPS & STR/DTR */ XSPI_RegularCmdTypeDef s_command = { .OperationType = HAL_XSPI_OPTYPE_COMMON_CFG, .AddressMode = HAL_XSPI_ADDRESS_NONE, .InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE, .InstructionDTRMode = HAL_XSPI_INSTRUCTION_DTR_DISABLE, .Instruction = SPI_NOR_CMD_RESET_EN, .InstructionWidth = HAL_XSPI_INSTRUCTION_8_BITS, .AlternateBytesMode = HAL_XSPI_ALT_BYTES_NONE, .DataLength = HAL_XSPI_DATA_NONE, .DummyCycles = 0U, .DQSMode = HAL_XSPI_DQS_DISABLE, .SIOOMode = HAL_XSPI_SIOO_INST_EVERY_CMD, }; /* Reset enable in SPI mode and STR transfer mode */ if (HAL_XSPI_Command(&dev_data->hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("XSPI reset enable (SPI/STR) failed"); return -EIO; } /* Reset memory in SPI mode and STR transfer mode */ s_command.Instruction = SPI_NOR_CMD_RESET_MEM; if (HAL_XSPI_Command(&dev_data->hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("XSPI reset memory (SPI/STR) failed"); return -EIO; } /* Reset enable in OPI mode and STR transfer mode */ s_command.InstructionMode = HAL_XSPI_INSTRUCTION_8_LINES; s_command.InstructionDTRMode = HAL_XSPI_INSTRUCTION_DTR_DISABLE; s_command.Instruction = SPI_NOR_OCMD_RESET_EN; s_command.InstructionWidth = HAL_XSPI_INSTRUCTION_16_BITS; if (HAL_XSPI_Command(&dev_data->hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("XSPI reset enable (OCTO/STR) failed"); return -EIO; } /* Reset memory in OPI mode and STR transfer mode */ s_command.Instruction = SPI_NOR_OCMD_RESET_MEM; if (HAL_XSPI_Command(&dev_data->hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("XSPI reset memory (OCTO/STR) failed"); return -EIO; } /* Reset enable in OPI mode and DTR transfer mode */ s_command.InstructionDTRMode = HAL_XSPI_INSTRUCTION_DTR_ENABLE; s_command.Instruction = SPI_NOR_OCMD_RESET_EN; if (HAL_XSPI_Command(&dev_data->hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("XSPI reset enable (OCTO/DTR) failed"); return -EIO; } /* Reset memory in OPI mode and DTR transfer mode */ s_command.Instruction = SPI_NOR_OCMD_RESET_MEM; if (HAL_XSPI_Command(&dev_data->hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("XSPI reset memory (OCTO/DTR) failed"); return -EIO; } #endif /* STM32_XSPI_RESET_GPIO */ /* Wait after SWreset CMD, in case SWReset occurred during erase operation */ k_busy_wait(STM32_XSPI_RESET_MAX_TIME * USEC_PER_MSEC); return 0; } #ifdef CONFIG_STM32_MEMMAP /* Function to configure the octoflash in MemoryMapped mode */ static int stm32_xspi_set_memorymap(const struct device *dev) { HAL_StatusTypeDef ret; const struct flash_stm32_xspi_config *dev_cfg = dev->config; struct flash_stm32_xspi_data *dev_data = dev->data; XSPI_RegularCmdTypeDef s_command = {0}; /* Non-zero values disturb the command */ XSPI_MemoryMappedTypeDef s_MemMappedCfg; /* Configure octoflash in MemoryMapped mode */ if ((dev_cfg->data_mode == XSPI_SPI_MODE) && (stm32_xspi_hal_address_size(dev) == HAL_XSPI_ADDRESS_24_BITS)) { /* OPI mode and 3-bytes address size not supported by memory */ LOG_ERR("XSPI_SPI_MODE in 3Bytes addressing is not supported"); return -EIO; } /* Initialize the read command */ s_command.OperationType = HAL_XSPI_OPTYPE_READ_CFG; s_command.InstructionMode = (dev_cfg->data_rate == XSPI_STR_TRANSFER) ? ((dev_cfg->data_mode == XSPI_SPI_MODE) ? HAL_XSPI_INSTRUCTION_1_LINE : HAL_XSPI_INSTRUCTION_8_LINES) : HAL_XSPI_INSTRUCTION_8_LINES; s_command.InstructionDTRMode = (dev_cfg->data_rate == XSPI_STR_TRANSFER) ? HAL_XSPI_INSTRUCTION_DTR_DISABLE : HAL_XSPI_INSTRUCTION_DTR_ENABLE; s_command.InstructionWidth = (dev_cfg->data_rate == XSPI_STR_TRANSFER) ? ((dev_cfg->data_mode == XSPI_SPI_MODE) ? HAL_XSPI_INSTRUCTION_8_BITS : HAL_XSPI_INSTRUCTION_16_BITS) : HAL_XSPI_INSTRUCTION_16_BITS; s_command.Instruction = (dev_cfg->data_rate == XSPI_STR_TRANSFER) ? ((dev_cfg->data_mode == XSPI_SPI_MODE) ? ((stm32_xspi_hal_address_size(dev) == HAL_XSPI_ADDRESS_24_BITS) ? SPI_NOR_CMD_READ_FAST : SPI_NOR_CMD_READ_FAST_4B) : dev_data->read_opcode) : SPI_NOR_OCMD_DTR_RD; s_command.AddressMode = (dev_cfg->data_rate == XSPI_STR_TRANSFER) ? ((dev_cfg->data_mode == XSPI_SPI_MODE) ? HAL_XSPI_ADDRESS_1_LINE : HAL_XSPI_ADDRESS_8_LINES) : HAL_XSPI_ADDRESS_8_LINES; s_command.AddressDTRMode = (dev_cfg->data_rate == XSPI_STR_TRANSFER) ? HAL_XSPI_ADDRESS_DTR_DISABLE : HAL_XSPI_ADDRESS_DTR_ENABLE; s_command.AddressWidth = (dev_cfg->data_rate == XSPI_STR_TRANSFER) ? stm32_xspi_hal_address_size(dev) : HAL_XSPI_ADDRESS_32_BITS; s_command.DataMode = (dev_cfg->data_rate == XSPI_STR_TRANSFER) ? ((dev_cfg->data_mode == XSPI_SPI_MODE) ? HAL_XSPI_DATA_1_LINE : HAL_XSPI_DATA_8_LINES) : HAL_XSPI_DATA_8_LINES; s_command.DataDTRMode = (dev_cfg->data_rate == XSPI_STR_TRANSFER) ? HAL_XSPI_DATA_DTR_DISABLE : HAL_XSPI_DATA_DTR_ENABLE; s_command.DummyCycles = (dev_cfg->data_rate == XSPI_STR_TRANSFER) ? ((dev_cfg->data_mode == XSPI_SPI_MODE) ? SPI_NOR_DUMMY_RD : SPI_NOR_DUMMY_RD_OCTAL) : SPI_NOR_DUMMY_RD_OCTAL_DTR; s_command.DQSMode = (dev_cfg->data_rate == XSPI_STR_TRANSFER) ? HAL_XSPI_DQS_DISABLE : HAL_XSPI_DQS_ENABLE; #ifdef XSPI_CCR_SIOO s_command.SIOOMode = HAL_XSPI_SIOO_INST_EVERY_CMD; #endif /* XSPI_CCR_SIOO */ ret = HAL_XSPI_Command(&dev_data->hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE); if (ret != HAL_OK) { LOG_ERR("%d: Failed to set memory map", ret); return -EIO; } /* Initialize the program command */ s_command.OperationType = HAL_XSPI_OPTYPE_WRITE_CFG; if (dev_cfg->data_rate == XSPI_STR_TRANSFER) { s_command.Instruction = (dev_cfg->data_mode == XSPI_SPI_MODE) ? ((stm32_xspi_hal_address_size(dev) == HAL_XSPI_ADDRESS_24_BITS) ? SPI_NOR_CMD_PP : SPI_NOR_CMD_PP_4B) : SPI_NOR_OCMD_PAGE_PRG; } else { s_command.Instruction = SPI_NOR_OCMD_PAGE_PRG; } s_command.DQSMode = HAL_XSPI_DQS_DISABLE; ret = HAL_XSPI_Command(&dev_data->hxspi, &s_command, HAL_XSPI_TIMEOUT_DEFAULT_VALUE); if (ret != HAL_OK) { LOG_ERR("%d: Failed to set memory mapped", ret); return -EIO; } /* Enable the memory-mapping */ s_MemMappedCfg.TimeOutActivation = HAL_XSPI_TIMEOUT_COUNTER_DISABLE; ret = HAL_XSPI_MemoryMapped(&dev_data->hxspi, &s_MemMappedCfg); if (ret != HAL_OK) { LOG_ERR("%d: Failed to enable memory mapped", ret); return -EIO; } LOG_DBG("MemoryMap mode enabled"); return 0; } /* Function to return true if the octoflash is in MemoryMapped else false */ static bool stm32_xspi_is_memorymap(const struct device *dev) { struct flash_stm32_xspi_data *dev_data = dev->data; return ((READ_BIT(dev_data->hxspi.Instance->CR, XSPI_CR_FMODE) == XSPI_CR_FMODE) ? true : false); } static int stm32_xspi_abort(const struct device *dev) { struct flash_stm32_xspi_data *dev_data = dev->data; if (HAL_XSPI_Abort(&dev_data->hxspi) != HAL_OK) { LOG_ERR("XSPI abort failed"); return -EIO; } return 0; } #endif /* CONFIG_STM32_MEMMAP */ /* * Function to erase the flash : chip or sector with possible OCTO/SPI and STR/DTR * to erase the complete chip (using dedicated command) : * set size >= flash size * set addr = 0 */ static int flash_stm32_xspi_erase(const struct device *dev, off_t addr, size_t size) { const struct flash_stm32_xspi_config *dev_cfg = dev->config; struct flash_stm32_xspi_data *dev_data = dev->data; int ret = 0; /* Ignore zero size erase */ if (size == 0) { return 0; } /* Maximise erase size : means the complete chip */ if (size > dev_cfg->flash_size) { size = dev_cfg->flash_size; } if (!xspi_address_is_valid(dev, addr, size)) { LOG_ERR("Error: address or size exceeds expected values: " "addr 0x%lx, size %zu", (long)addr, size); return -EINVAL; } if (((size % SPI_NOR_SECTOR_SIZE) != 0) && (size < dev_cfg->flash_size)) { LOG_ERR("Error: wrong sector size 0x%x", size); return -ENOTSUP; } xspi_lock_thread(dev); #ifdef CONFIG_STM32_MEMMAP if (stm32_xspi_is_memorymap(dev)) { /* Abort ongoing transfer to force CS high/BUSY deasserted */ ret = stm32_xspi_abort(dev); if (ret != 0) { LOG_ERR("Failed to abort memory-mapped access before erase"); goto erase_end; } } #endif XSPI_RegularCmdTypeDef cmd_erase = { .OperationType = HAL_XSPI_OPTYPE_COMMON_CFG, .AlternateBytesMode = HAL_XSPI_ALT_BYTES_NONE, .DataMode = HAL_XSPI_DATA_NONE, .DummyCycles = 0U, .DQSMode = HAL_XSPI_DQS_DISABLE, .SIOOMode = HAL_XSPI_SIOO_INST_EVERY_CMD, }; if (stm32_xspi_mem_ready(dev, dev_cfg->data_mode, dev_cfg->data_rate) != 0) { LOG_ERR("Erase failed : flash busy"); goto erase_end; } cmd_erase.InstructionMode = (dev_cfg->data_mode == XSPI_OCTO_MODE) ? HAL_XSPI_INSTRUCTION_8_LINES : HAL_XSPI_INSTRUCTION_1_LINE; cmd_erase.InstructionDTRMode = (dev_cfg->data_rate == XSPI_DTR_TRANSFER) ? HAL_XSPI_INSTRUCTION_DTR_ENABLE : HAL_XSPI_INSTRUCTION_DTR_DISABLE; cmd_erase.InstructionWidth = (dev_cfg->data_mode == XSPI_OCTO_MODE) ? HAL_XSPI_INSTRUCTION_16_BITS : HAL_XSPI_INSTRUCTION_8_BITS; while ((size > 0) && (ret == 0)) { ret = stm32_xspi_write_enable(dev, dev_cfg->data_mode, dev_cfg->data_rate); if (ret != 0) { LOG_ERR("Erase failed : write enable"); break; } if (size == dev_cfg->flash_size) { /* Chip erase */ LOG_DBG("Chip Erase"); cmd_erase.Address = 0; cmd_erase.Instruction = (dev_cfg->data_mode == XSPI_OCTO_MODE) ? SPI_NOR_OCMD_BULKE : SPI_NOR_CMD_BULKE; cmd_erase.AddressMode = HAL_XSPI_ADDRESS_NONE; /* Full chip erase (Bulk) command */ xspi_send_cmd(dev, &cmd_erase); size -= dev_cfg->flash_size; /* Chip (Bulk) erase started, wait until WEL becomes 0 */ ret = stm32_xspi_mem_erased(dev); if (ret != 0) { LOG_ERR("Chip Erase failed"); break; } } else { /* Sector or Block erase depending on the size */ LOG_DBG("Sector/Block Erase"); cmd_erase.AddressMode = (dev_cfg->data_mode == XSPI_OCTO_MODE) ? HAL_XSPI_ADDRESS_8_LINES : HAL_XSPI_ADDRESS_1_LINE; cmd_erase.AddressDTRMode = (dev_cfg->data_rate == XSPI_DTR_TRANSFER) ? HAL_XSPI_ADDRESS_DTR_ENABLE : HAL_XSPI_ADDRESS_DTR_DISABLE; cmd_erase.AddressWidth = stm32_xspi_hal_address_size(dev); cmd_erase.Address = addr; const struct jesd216_erase_type *erase_types = dev_data->erase_types; const struct jesd216_erase_type *bet = NULL; for (uint8_t ei = 0; ei < JESD216_NUM_ERASE_TYPES; ++ei) { const struct jesd216_erase_type *etp = &erase_types[ei]; if ((etp->exp != 0) && SPI_NOR_IS_ALIGNED(addr, etp->exp) && (size >= BIT(etp->exp)) && ((bet == NULL) || (etp->exp > bet->exp))) { bet = etp; cmd_erase.Instruction = bet->cmd; } else if (bet == NULL) { /* Use the default sector erase cmd */ if (dev_cfg->data_mode == XSPI_OCTO_MODE) { cmd_erase.Instruction = SPI_NOR_OCMD_SE; } else { cmd_erase.Instruction = (stm32_xspi_hal_address_size(dev) == HAL_XSPI_ADDRESS_32_BITS) ? SPI_NOR_CMD_SE_4B : SPI_NOR_CMD_SE; } } /* Avoid using wrong erase type, * if zero entries are found in erase_types */ bet = NULL; } LOG_DBG("Sector/Block Erase addr 0x%x, asize 0x%x amode 0x%x instr 0x%x", cmd_erase.Address, cmd_erase.AddressWidth, cmd_erase.AddressMode, cmd_erase.Instruction); xspi_send_cmd(dev, &cmd_erase); if (bet != NULL) { addr += BIT(bet->exp); size -= BIT(bet->exp); } else { addr += SPI_NOR_SECTOR_SIZE; size -= SPI_NOR_SECTOR_SIZE; } ret = stm32_xspi_mem_ready(dev, dev_cfg->data_mode, dev_cfg->data_rate); } } /* Ends the erase operation */ erase_end: xspi_unlock_thread(dev); return ret; } /* Function to read the flash with possible OCTO/SPI and STR/DTR */ static int flash_stm32_xspi_read(const struct device *dev, off_t addr, void *data, size_t size) { const struct flash_stm32_xspi_config *dev_cfg = dev->config; struct flash_stm32_xspi_data *dev_data = dev->data; int ret; if (!xspi_address_is_valid(dev, addr, size)) { LOG_ERR("Error: address or size exceeds expected values: " "addr 0x%lx, size %zu", (long)addr, size); return -EINVAL; } /* Ignore zero size read */ if (size == 0) { return 0; } #ifdef CONFIG_STM32_MEMMAP ARG_UNUSED(dev_cfg); ARG_UNUSED(dev_data); xspi_lock_thread(dev); /* Do reads through memory-mapping instead of indirect */ if (!stm32_xspi_is_memorymap(dev)) { ret = stm32_xspi_set_memorymap(dev); if (ret != 0) { LOG_ERR("READ: failed to set memory mapped"); goto read_end; } } __ASSERT_NO_MSG(stm32_xspi_is_memorymap(dev)); uintptr_t mmap_addr = STM32_XSPI_BASE_ADDRESS + addr; LOG_DBG("Memory-mapped read from 0x%08lx, len %zu", mmap_addr, size); memcpy(data, (void *)mmap_addr, size); ret = 0; goto read_end; #else XSPI_RegularCmdTypeDef cmd = xspi_prepare_cmd(dev_cfg->data_mode, dev_cfg->data_rate); if (dev_cfg->data_mode != XSPI_OCTO_MODE) { switch (dev_data->read_mode) { case JESD216_MODE_112: { cmd.InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE; cmd.AddressMode = HAL_XSPI_ADDRESS_1_LINE; cmd.DataMode = HAL_XSPI_DATA_2_LINES; break; } case JESD216_MODE_122: { cmd.InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE; cmd.AddressMode = HAL_XSPI_ADDRESS_2_LINES; cmd.DataMode = HAL_XSPI_DATA_2_LINES; break; } case JESD216_MODE_114: { cmd.InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE; cmd.AddressMode = HAL_XSPI_ADDRESS_1_LINE; cmd.DataMode = HAL_XSPI_DATA_4_LINES; break; } case JESD216_MODE_144: { cmd.InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE; cmd.AddressMode = HAL_XSPI_ADDRESS_4_LINES; cmd.DataMode = HAL_XSPI_DATA_4_LINES; break; } default: /* use the mode from ospi_prepare_cmd */ break; } } /* Instruction and DummyCycles are set below */ cmd.Address = addr; /* AddressSize is 32bits in OPSI mode */ cmd.AddressWidth = stm32_xspi_hal_address_size(dev); /* DataSize is set by the read cmd */ /* Configure other parameters */ if (dev_cfg->data_rate == XSPI_DTR_TRANSFER) { /* DTR transfer rate (==> Octal mode) */ cmd.Instruction = SPI_NOR_OCMD_DTR_RD; cmd.DummyCycles = SPI_NOR_DUMMY_RD_OCTAL_DTR; } else { /* STR transfer rate */ if (dev_cfg->data_mode == XSPI_OCTO_MODE) { /* OPI and STR */ cmd.Instruction = SPI_NOR_OCMD_RD; cmd.DummyCycles = SPI_NOR_DUMMY_RD_OCTAL; } else { /* use SFDP:BFP read instruction */ cmd.Instruction = dev_data->read_opcode; cmd.DummyCycles = dev_data->read_dummy; /* in SPI and STR : expecting SPI_NOR_CMD_READ_FAST_4B */ } } LOG_DBG("XSPI: read %zu data at 0x%lx", size, (long)(STM32_XSPI_BASE_ADDRESS + addr)); xspi_lock_thread(dev); ret = xspi_read_access(dev, &cmd, data, size); goto read_end; #endif read_end: xspi_unlock_thread(dev); return ret; } /* Function to write the flash (page program) : with possible OCTO/SPI and STR/DTR */ static int flash_stm32_xspi_write(const struct device *dev, off_t addr, const void *data, size_t size) { const struct flash_stm32_xspi_config *dev_cfg = dev->config; struct flash_stm32_xspi_data *dev_data = dev->data; size_t to_write; int ret = 0; if (!xspi_address_is_valid(dev, addr, size)) { LOG_ERR("Error: address or size exceeds expected values: " "addr 0x%lx, size %zu", (long)addr, size); return -EINVAL; } /* Ignore zero size write */ if (size == 0) { return 0; } xspi_lock_thread(dev); #ifdef CONFIG_STM32_MEMMAP ARG_UNUSED(dev_data); if (stm32_xspi_is_memorymap(dev)) { /* Abort ongoing transfer to force CS high/BUSY deasserted */ ret = stm32_xspi_abort(dev); if (ret != 0) { LOG_ERR("Failed to abort memory-mapped access before write"); goto write_end; } } #endif /* page program for STR or DTR mode */ XSPI_RegularCmdTypeDef cmd_pp = xspi_prepare_cmd(dev_cfg->data_mode, dev_cfg->data_rate); /* using 32bits address also in SPI/STR mode */ cmd_pp.Instruction = dev_data->write_opcode; if (dev_cfg->data_mode != XSPI_OCTO_MODE) { switch (cmd_pp.Instruction) { case SPI_NOR_CMD_PP_4B: __fallthrough; case SPI_NOR_CMD_PP: { cmd_pp.InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE; cmd_pp.AddressMode = HAL_XSPI_ADDRESS_1_LINE; cmd_pp.DataMode = HAL_XSPI_DATA_1_LINE; break; } case SPI_NOR_CMD_PP_1_1_4_4B: __fallthrough; case SPI_NOR_CMD_PP_1_1_4: { cmd_pp.InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE; cmd_pp.AddressMode = HAL_XSPI_ADDRESS_1_LINE; cmd_pp.DataMode = HAL_XSPI_DATA_4_LINES; break; } case SPI_NOR_CMD_PP_1_4_4_4B: __fallthrough; case SPI_NOR_CMD_PP_1_4_4: { cmd_pp.InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE; cmd_pp.AddressMode = HAL_XSPI_ADDRESS_4_LINES; cmd_pp.DataMode = HAL_XSPI_DATA_4_LINES; break; } default: /* use the mode from ospi_prepare_cmd */ break; } } cmd_pp.Address = addr; cmd_pp.AddressWidth = stm32_xspi_hal_address_size(dev); cmd_pp.DummyCycles = 0U; LOG_DBG("XSPI: write %zu data at 0x%lx", size, (long)(STM32_XSPI_BASE_ADDRESS + addr)); ret = stm32_xspi_mem_ready(dev, dev_cfg->data_mode, dev_cfg->data_rate); if (ret != 0) { LOG_ERR("XSPI: write not ready"); goto write_end; } while ((size > 0) && (ret == 0)) { to_write = size; ret = stm32_xspi_write_enable(dev, dev_cfg->data_mode, dev_cfg->data_rate); if (ret != 0) { LOG_ERR("XSPI: write not enabled"); break; } /* Don't write more than a page. */ if (to_write >= SPI_NOR_PAGE_SIZE) { to_write = SPI_NOR_PAGE_SIZE; } /* Don't write across a page boundary */ if (((addr + to_write - 1U) / SPI_NOR_PAGE_SIZE) != (addr / SPI_NOR_PAGE_SIZE)) { to_write = SPI_NOR_PAGE_SIZE - (addr % SPI_NOR_PAGE_SIZE); } cmd_pp.Address = addr; ret = xspi_write_access(dev, &cmd_pp, data, to_write); if (ret != 0) { LOG_ERR("XSPI: write not access"); break; } size -= to_write; data = (const uint8_t *)data + to_write; addr += to_write; /* Configure automatic polling mode to wait for end of program */ ret = stm32_xspi_mem_ready(dev, dev_cfg->data_mode, dev_cfg->data_rate); if (ret != 0) { LOG_ERR("XSPI: write PP not ready"); break; } } /* Ends the write operation */ write_end: xspi_unlock_thread(dev); return ret; } static const struct flash_parameters flash_stm32_xspi_parameters = { .write_block_size = 1, .erase_value = 0xff }; static const struct flash_parameters * flash_stm32_xspi_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_stm32_xspi_parameters; } static void flash_stm32_xspi_isr(const struct device *dev) { struct flash_stm32_xspi_data *dev_data = dev->data; HAL_XSPI_IRQHandler(&dev_data->hxspi); } #if !defined(CONFIG_SOC_SERIES_STM32H7X) /* weak function required for HAL compilation */ __weak HAL_StatusTypeDef HAL_DMA_Abort_IT(DMA_HandleTypeDef *hdma) { return HAL_OK; } /* weak function required for HAL compilation */ __weak HAL_StatusTypeDef HAL_DMA_Abort(DMA_HandleTypeDef *hdma) { return HAL_OK; } #endif /* !CONFIG_SOC_SERIES_STM32H7X */ /* This function is executed in the interrupt context */ #if STM32_XSPI_USE_DMA static void xspi_dma_callback(const struct device *dev, void *arg, uint32_t channel, int status) { DMA_HandleTypeDef *hdma = arg; ARG_UNUSED(dev); if (status < 0) { LOG_ERR("DMA callback error with channel %d.", channel); } HAL_DMA_IRQHandler(hdma); } #endif /* * Transfer Error callback. */ void HAL_XSPI_ErrorCallback(XSPI_HandleTypeDef *hxspi) { struct flash_stm32_xspi_data *dev_data = CONTAINER_OF(hxspi, struct flash_stm32_xspi_data, hxspi); LOG_DBG("Error cb"); dev_data->cmd_status = -EIO; k_sem_give(&dev_data->sync); } /* * Command completed callback. */ void HAL_XSPI_CmdCpltCallback(XSPI_HandleTypeDef *hxspi) { struct flash_stm32_xspi_data *dev_data = CONTAINER_OF(hxspi, struct flash_stm32_xspi_data, hxspi); LOG_DBG("Cmd Cplt cb"); k_sem_give(&dev_data->sync); } /* * Rx Transfer completed callback. */ void HAL_XSPI_RxCpltCallback(XSPI_HandleTypeDef *hxspi) { struct flash_stm32_xspi_data *dev_data = CONTAINER_OF(hxspi, struct flash_stm32_xspi_data, hxspi); LOG_DBG("Rx Cplt cb"); k_sem_give(&dev_data->sync); } /* * Tx Transfer completed callback. */ void HAL_XSPI_TxCpltCallback(XSPI_HandleTypeDef *hxspi) { struct flash_stm32_xspi_data *dev_data = CONTAINER_OF(hxspi, struct flash_stm32_xspi_data, hxspi); LOG_DBG("Tx Cplt cb"); k_sem_give(&dev_data->sync); } /* * Status Match callback. */ void HAL_XSPI_StatusMatchCallback(XSPI_HandleTypeDef *hxspi) { struct flash_stm32_xspi_data *dev_data = CONTAINER_OF(hxspi, struct flash_stm32_xspi_data, hxspi); LOG_DBG("Status Match cb"); k_sem_give(&dev_data->sync); } /* * Timeout callback. */ void HAL_XSPI_TimeOutCallback(XSPI_HandleTypeDef *hxspi) { struct flash_stm32_xspi_data *dev_data = CONTAINER_OF(hxspi, struct flash_stm32_xspi_data, hxspi); LOG_DBG("Timeout cb"); dev_data->cmd_status = -EIO; k_sem_give(&dev_data->sync); } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static void flash_stm32_xspi_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { struct flash_stm32_xspi_data *dev_data = dev->data; *layout = &dev_data->layout; *layout_size = 1; } #endif static const struct flash_driver_api flash_stm32_xspi_driver_api = { .read = flash_stm32_xspi_read, .write = flash_stm32_xspi_write, .erase = flash_stm32_xspi_erase, .get_parameters = flash_stm32_xspi_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_stm32_xspi_pages_layout, #endif #if defined(CONFIG_FLASH_JESD216_API) .sfdp_read = xspi_read_sfdp, .read_jedec_id = xspi_read_jedec_id, #endif /* CONFIG_FLASH_JESD216_API */ }; #if defined(CONFIG_FLASH_PAGE_LAYOUT) static int setup_pages_layout(const struct device *dev) { const struct flash_stm32_xspi_config *dev_cfg = dev->config; struct flash_stm32_xspi_data *data = dev->data; const size_t flash_size = dev_cfg->flash_size; uint32_t layout_page_size = data->page_size; uint8_t value = 0; int rv = 0; /* Find the smallest erase size. */ for (size_t i = 0; i < ARRAY_SIZE(data->erase_types); ++i) { const struct jesd216_erase_type *etp = &data->erase_types[i]; if ((etp->cmd != 0) && ((value == 0) || (etp->exp < value))) { value = etp->exp; } } uint32_t erase_size = BIT(value); if (erase_size == 0) { erase_size = SPI_NOR_SECTOR_SIZE; } /* We need layout page size to be compatible with erase size */ if ((layout_page_size % erase_size) != 0) { LOG_DBG("layout page %u not compatible with erase size %u", layout_page_size, erase_size); LOG_DBG("erase size will be used as layout page size"); layout_page_size = erase_size; } /* Warn but accept layout page sizes that leave inaccessible * space. */ if ((flash_size % layout_page_size) != 0) { LOG_DBG("layout page %u wastes space with device size %zu", layout_page_size, flash_size); } data->layout.pages_size = layout_page_size; data->layout.pages_count = flash_size / layout_page_size; LOG_DBG("layout %u x %u By pages", data->layout.pages_count, data->layout.pages_size); return rv; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static int stm32_xspi_read_status_register(const struct device *dev, uint8_t reg_num, uint8_t *reg) { XSPI_RegularCmdTypeDef s_command = { .InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE, .DataMode = HAL_XSPI_DATA_1_LINE, }; switch (reg_num) { case 1U: s_command.Instruction = SPI_NOR_CMD_RDSR; break; case 2U: s_command.Instruction = SPI_NOR_CMD_RDSR2; break; case 3U: s_command.Instruction = SPI_NOR_CMD_RDSR3; break; default: return -EINVAL; } return xspi_read_access(dev, &s_command, reg, sizeof(*reg)); } static int stm32_xspi_write_status_register(const struct device *dev, uint8_t reg_num, uint8_t reg) { struct flash_stm32_xspi_data *data = dev->data; XSPI_RegularCmdTypeDef s_command = { .Instruction = SPI_NOR_CMD_WRSR, .InstructionMode = HAL_XSPI_INSTRUCTION_1_LINE, .DataMode = HAL_XSPI_DATA_1_LINE }; size_t size; uint8_t regs[4] = { 0 }; uint8_t *regs_p; int ret; if (reg_num == 1U) { size = 1U; regs[0] = reg; regs_p = &regs[0]; /* 1 byte write clears SR2, write SR2 as well */ if (data->qer_type == JESD216_DW15_QER_S2B1v1) { ret = stm32_xspi_read_status_register(dev, 2, &regs[1]); if (ret < 0) { return ret; } size = 2U; } } else if (reg_num == 2U) { s_command.Instruction = SPI_NOR_CMD_WRSR2; size = 1U; regs[1] = reg; regs_p = &regs[1]; /* if SR2 write needs SR1 */ if ((data->qer_type == JESD216_DW15_QER_VAL_S2B1v1) || (data->qer_type == JESD216_DW15_QER_VAL_S2B1v4) || (data->qer_type == JESD216_DW15_QER_VAL_S2B1v5)) { ret = stm32_xspi_read_status_register(dev, 1, &regs[0]); if (ret < 0) { return ret; } s_command.Instruction = SPI_NOR_CMD_WRSR; size = 2U; regs_p = &regs[0]; } } else if (reg_num == 3U) { s_command.Instruction = SPI_NOR_CMD_WRSR3; size = 1U; regs[2] = reg; regs_p = &regs[2]; } else { return -EINVAL; } return xspi_write_access(dev, &s_command, regs_p, size); } static int stm32_xspi_enable_qe(const struct device *dev) { struct flash_stm32_xspi_data *data = dev->data; uint8_t qe_reg_num; uint8_t qe_bit; uint8_t reg; int ret; switch (data->qer_type) { case JESD216_DW15_QER_NONE: /* no QE bit, device detects reads based on opcode */ return 0; case JESD216_DW15_QER_S1B6: qe_reg_num = 1U; qe_bit = BIT(6U); break; case JESD216_DW15_QER_S2B7: qe_reg_num = 2U; qe_bit = BIT(7U); break; case JESD216_DW15_QER_S2B1v1: __fallthrough; case JESD216_DW15_QER_S2B1v4: __fallthrough; case JESD216_DW15_QER_S2B1v5: __fallthrough; case JESD216_DW15_QER_S2B1v6: qe_reg_num = 2U; qe_bit = BIT(1U); break; default: return -ENOTSUP; } ret = stm32_xspi_read_status_register(dev, qe_reg_num, &reg); if (ret < 0) { return ret; } /* exit early if QE bit is already set */ if ((reg & qe_bit) != 0U) { return 0; } ret = stm32_xspi_write_enable(dev, XSPI_SPI_MODE, XSPI_STR_TRANSFER); if (ret < 0) { return ret; } reg |= qe_bit; ret = stm32_xspi_write_status_register(dev, qe_reg_num, reg); if (ret < 0) { return ret; } ret = stm32_xspi_mem_ready(dev, XSPI_SPI_MODE, XSPI_STR_TRANSFER); if (ret < 0) { return ret; } /* validate that QE bit is set */ ret = stm32_xspi_read_status_register(dev, qe_reg_num, &reg); if (ret < 0) { return ret; } if ((reg & qe_bit) == 0U) { LOG_ERR("Status Register %u [0x%02x] not set", qe_reg_num, reg); ret = -EIO; } return ret; } static void spi_nor_process_bfp_addrbytes(const struct device *dev, const uint8_t jesd216_bfp_addrbytes) { struct flash_stm32_xspi_data *data = dev->data; if ((jesd216_bfp_addrbytes == JESD216_SFDP_BFP_DW1_ADDRBYTES_VAL_4B) || (jesd216_bfp_addrbytes == JESD216_SFDP_BFP_DW1_ADDRBYTES_VAL_3B4B)) { data->address_width = 4U; } else { data->address_width = 3U; } } static inline uint8_t spi_nor_convert_read_to_4b(const uint8_t opcode) { switch (opcode) { case SPI_NOR_CMD_READ: return SPI_NOR_CMD_READ_4B; case SPI_NOR_CMD_DREAD: return SPI_NOR_CMD_DREAD_4B; case SPI_NOR_CMD_2READ: return SPI_NOR_CMD_2READ_4B; case SPI_NOR_CMD_QREAD: return SPI_NOR_CMD_QREAD_4B; case SPI_NOR_CMD_4READ: return SPI_NOR_CMD_4READ_4B; default: /* use provided */ return opcode; } } static inline uint8_t spi_nor_convert_write_to_4b(const uint8_t opcode) { switch (opcode) { case SPI_NOR_CMD_PP: return SPI_NOR_CMD_PP_4B; case SPI_NOR_CMD_PP_1_1_4: return SPI_NOR_CMD_PP_1_1_4_4B; case SPI_NOR_CMD_PP_1_4_4: return SPI_NOR_CMD_PP_1_4_4_4B; default: /* use provided */ return opcode; } } static int spi_nor_process_bfp(const struct device *dev, const struct jesd216_param_header *php, const struct jesd216_bfp *bfp) { const struct flash_stm32_xspi_config *dev_cfg = dev->config; struct flash_stm32_xspi_data *data = dev->data; /* must be kept in data mode order, ignore 1-1-1 (always supported) */ const enum jesd216_mode_type supported_read_modes[] = { JESD216_MODE_112, JESD216_MODE_122, JESD216_MODE_114, JESD216_MODE_144 }; size_t supported_read_modes_max_idx; struct jesd216_erase_type *etp = data->erase_types; size_t idx; const size_t flash_size = jesd216_bfp_density(bfp) / 8U; struct jesd216_instr read_instr = { 0 }; struct jesd216_bfp_dw15 dw15; if (flash_size != dev_cfg->flash_size) { LOG_DBG("Unexpected flash size: %u", flash_size); } LOG_DBG("%s: %u MiBy flash", dev->name, (uint32_t)(flash_size >> 20)); /* Copy over the erase types, preserving their order. (The * Sector Map Parameter table references them by index.) */ memset(data->erase_types, 0, sizeof(data->erase_types)); for (idx = 1U; idx <= ARRAY_SIZE(data->erase_types); ++idx) { if (jesd216_bfp_erase(bfp, idx, etp) == 0) { LOG_DBG("Erase %u with %02x", (uint32_t)BIT(etp->exp), etp->cmd); } ++etp; } spi_nor_process_bfp_addrbytes(dev, jesd216_bfp_addrbytes(bfp)); LOG_DBG("Address width: %u Bytes", data->address_width); /* use PP opcode based on configured data mode if nothing is set in DTS */ if (data->write_opcode == SPI_NOR_WRITEOC_NONE) { switch (dev_cfg->data_mode) { case XSPI_OCTO_MODE: data->write_opcode = SPI_NOR_OCMD_PAGE_PRG; break; case XSPI_QUAD_MODE: data->write_opcode = SPI_NOR_CMD_PP_1_4_4; break; case XSPI_DUAL_MODE: data->write_opcode = SPI_NOR_CMD_PP_1_1_2; break; default: data->write_opcode = SPI_NOR_CMD_PP; break; } } if (dev_cfg->data_mode != XSPI_OCTO_MODE) { /* determine supported read modes, begin from the slowest */ data->read_mode = JESD216_MODE_111; data->read_opcode = SPI_NOR_CMD_READ; data->read_dummy = 0U; if (dev_cfg->data_mode != XSPI_SPI_MODE) { if (dev_cfg->data_mode == XSPI_DUAL_MODE) { /* the index of JESD216_MODE_114 in supported_read_modes */ supported_read_modes_max_idx = 2U; } else { supported_read_modes_max_idx = ARRAY_SIZE(supported_read_modes); } for (idx = 0U; idx < supported_read_modes_max_idx; ++idx) { if (jesd216_bfp_read_support(php, bfp, supported_read_modes[idx], &read_instr) < 0) { /* not supported */ continue; } LOG_DBG("Supports read mode: %d, instr: 0x%X", supported_read_modes[idx], read_instr.instr); data->read_mode = supported_read_modes[idx]; data->read_opcode = read_instr.instr; data->read_dummy = (read_instr.wait_states + read_instr.mode_clocks); } } /* convert 3-Byte opcodes to 4-Byte (if required) */ if (IS_ENABLED(DT_INST_PROP(0, four_byte_opcodes))) { if (data->address_width != 4U) { LOG_DBG("4-Byte opcodes require 4-Byte address width"); return -ENOTSUP; } data->read_opcode = spi_nor_convert_read_to_4b(data->read_opcode); data->write_opcode = spi_nor_convert_write_to_4b(data->write_opcode); } /* enable quad mode (if required) */ if (dev_cfg->data_mode == XSPI_QUAD_MODE) { if (jesd216_bfp_decode_dw15(php, bfp, &dw15) < 0) { /* will use QER from DTS or default (refer to device data) */ LOG_WRN("Unable to decode QE requirement [DW15]"); } else { /* bypass DTS QER value */ data->qer_type = dw15.qer; } LOG_DBG("QE requirement mode: %x", data->qer_type); if (stm32_xspi_enable_qe(dev) < 0) { LOG_ERR("Failed to enable QUAD mode"); return -EIO; } LOG_DBG("QUAD mode enabled"); } } data->page_size = jesd216_bfp_page_size(php, bfp); LOG_DBG("Page size %u bytes", data->page_size); LOG_DBG("Flash size %zu bytes", flash_size); LOG_DBG("Using read mode: %d, instr: 0x%X, dummy cycles: %u", data->read_mode, data->read_opcode, data->read_dummy); LOG_DBG("Using write instr: 0x%X", data->write_opcode); return 0; } #if STM32_XSPI_USE_DMA static int flash_stm32_xspi_dma_init(DMA_HandleTypeDef *hdma, struct stream *dma_stream) { int ret; /* * DMA configuration * Due to use of XSPI HAL API in current driver, * both HAL and Zephyr DMA drivers should be configured. * The required configuration for Zephyr DMA driver should only provide * the minimum information to inform the DMA slot will be in used and * how to route callbacks. */ if (!device_is_ready(dma_stream->dev)) { LOG_ERR("DMA %s device not ready", dma_stream->dev->name); return -ENODEV; } /* Proceed to the minimum Zephyr DMA driver init of the channel */ dma_stream->cfg.user_data = hdma; /* HACK: This field is used to inform driver that it is overridden */ dma_stream->cfg.linked_channel = STM32_DMA_HAL_OVERRIDE; /* Because of the STREAM OFFSET, the DMA channel given here is from 1 - 8 */ ret = dma_config(dma_stream->dev, (dma_stream->channel + STM32_DMA_STREAM_OFFSET), &dma_stream->cfg); if (ret != 0) { LOG_ERR("Failed to configure DMA channel %d", dma_stream->channel + STM32_DMA_STREAM_OFFSET); return ret; } /* Proceed to the HAL DMA driver init */ if (dma_stream->cfg.source_data_size != dma_stream->cfg.dest_data_size) { LOG_ERR("DMA Source and destination data sizes not aligned"); return -EINVAL; } hdma->Init.SrcDataWidth = DMA_SRC_DATAWIDTH_WORD; /* Fixed value */ hdma->Init.DestDataWidth = DMA_DEST_DATAWIDTH_WORD; /* Fixed value */ hdma->Init.SrcInc = (dma_stream->src_addr_increment) ? DMA_SINC_INCREMENTED : DMA_SINC_FIXED; hdma->Init.DestInc = (dma_stream->dst_addr_increment) ? DMA_DINC_INCREMENTED : DMA_DINC_FIXED; hdma->Init.SrcBurstLength = 4; hdma->Init.DestBurstLength = 4; hdma->Init.Priority = table_priority[dma_stream->cfg.channel_priority]; hdma->Init.Direction = table_direction[dma_stream->cfg.channel_direction]; hdma->Init.TransferAllocatedPort = DMA_SRC_ALLOCATED_PORT0 | DMA_SRC_ALLOCATED_PORT1; hdma->Init.TransferEventMode = DMA_TCEM_BLOCK_TRANSFER; hdma->Init.Mode = DMA_NORMAL; hdma->Init.BlkHWRequest = DMA_BREQ_SINGLE_BURST; hdma->Init.Request = dma_stream->cfg.dma_slot; /* * HAL expects a valid DMA channel (not DMAMUX). * The channel is from 0 to 7 because of the STM32_DMA_STREAM_OFFSET * in the dma_stm32 driver */ hdma->Instance = LL_DMA_GET_CHANNEL_INSTANCE(dma_stream->reg, dma_stream->channel); /* Initialize DMA HAL */ if (HAL_DMA_Init(hdma) != HAL_OK) { LOG_ERR("XSPI DMA Init failed"); return -EIO; } if (HAL_DMA_ConfigChannelAttributes(hdma, DMA_CHANNEL_NPRIV) != HAL_OK) { LOG_ERR("XSPI DMA Init failed"); return -EIO; } LOG_DBG("XSPI with DMA transfer"); return 0; } #endif /* STM32_XSPI_USE_DMA */ static int flash_stm32_xspi_init(const struct device *dev) { const struct flash_stm32_xspi_config *dev_cfg = dev->config; struct flash_stm32_xspi_data *dev_data = dev->data; uint32_t ahb_clock_freq; uint32_t prescaler = STM32_XSPI_CLOCK_PRESCALER_MIN; int ret; /* The SPI/DTR is not a valid config of data_mode/data_rate according to the DTS */ if ((dev_cfg->data_mode != XSPI_OCTO_MODE) && (dev_cfg->data_rate == XSPI_DTR_TRANSFER)) { /* already the right config, continue */ LOG_ERR("XSPI mode SPI|DUAL|QUAD/DTR is not valid"); return -ENOTSUP; } /* Signals configuration */ ret = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("XSPI pinctrl setup failed (%d)", ret); return ret; } if (!device_is_ready(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE))) { LOG_ERR("clock control device not ready"); return -ENODEV; } #ifdef CONFIG_STM32_MEMMAP /* If MemoryMapped then configure skip init */ if (stm32_xspi_is_memorymap(dev)) { LOG_DBG("NOR init'd in MemMapped mode\n"); /* Force HAL instance in correct state */ dev_data->hxspi.State = HAL_XSPI_STATE_BUSY_MEM_MAPPED; return 0; } #endif /* CONFIG_STM32_MEMMAP */ if (dev_cfg->pclk_len > 3) { /* Max 3 domain clock are expected */ LOG_ERR("Could not select %d XSPI domain clock", dev_cfg->pclk_len); return -EIO; } /* Clock configuration */ if (clock_control_on(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), (clock_control_subsys_t) &dev_cfg->pclken[0]) != 0) { LOG_ERR("Could not enable XSPI clock"); return -EIO; } if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), (clock_control_subsys_t) &dev_cfg->pclken[0], &ahb_clock_freq) < 0) { LOG_ERR("Failed call clock_control_get_rate(pclken)"); return -EIO; } /* Alternate clock config for peripheral if any */ if (IS_ENABLED(STM32_XSPI_DOMAIN_CLOCK_SUPPORT) && (dev_cfg->pclk_len > 1)) { if (clock_control_configure(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), (clock_control_subsys_t) &dev_cfg->pclken[1], NULL) != 0) { LOG_ERR("Could not select XSPI domain clock"); return -EIO; } /* * Get the clock rate from this one (update ahb_clock_freq) * TODO: retrieve index in the clocks property where clocks has "xspi-ker" * Assuming index is 1 */ if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), (clock_control_subsys_t) &dev_cfg->pclken[1], &ahb_clock_freq) < 0) { LOG_ERR("Failed call clock_control_get_rate(pclken)"); return -EIO; } } /* Clock domain corresponding to the IO-Mgr (XSPIM) */ if (IS_ENABLED(STM32_XSPI_DOMAIN_CLOCK_SUPPORT) && (dev_cfg->pclk_len > 2)) { if (clock_control_on(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), (clock_control_subsys_t) &dev_cfg->pclken[2]) != 0) { LOG_ERR("Could not enable XSPI Manager clock"); return -EIO; } /* Do NOT Get the clock rate from this one */ } for (; prescaler <= STM32_XSPI_CLOCK_PRESCALER_MAX; prescaler++) { uint32_t clk = STM32_XSPI_CLOCK_COMPUTE(ahb_clock_freq, prescaler); if (clk <= dev_cfg->max_frequency) { break; } } __ASSERT_NO_MSG(prescaler >= STM32_XSPI_CLOCK_PRESCALER_MIN && prescaler <= STM32_XSPI_CLOCK_PRESCALER_MAX); /* Initialize XSPI HAL structure completely */ dev_data->hxspi.Init.ClockPrescaler = prescaler; /* The stm32 hal_xspi driver does not reduce DEVSIZE before writing the DCR1 */ dev_data->hxspi.Init.MemorySize = find_lsb_set(dev_cfg->flash_size) - 2; #if defined(XSPI_DCR2_WRAPSIZE) dev_data->hxspi.Init.WrapSize = HAL_XSPI_WRAP_NOT_SUPPORTED; #endif /* XSPI_DCR2_WRAPSIZE */ /* STR mode else Macronix for DTR mode */ if (dev_cfg->data_rate == XSPI_DTR_TRANSFER) { dev_data->hxspi.Init.MemoryType = HAL_XSPI_MEMTYPE_MACRONIX; dev_data->hxspi.Init.DelayHoldQuarterCycle = HAL_XSPI_DHQC_ENABLE; } else { } #if STM32_XSPI_DLYB_BYPASSED dev_data->hxspi.Init.DelayBlockBypass = HAL_XSPI_DELAY_BLOCK_BYPASS; #else dev_data->hxspi.Init.DelayBlockBypass = HAL_XSPI_DELAY_BLOCK_ON; #endif /* STM32_XSPI_DLYB_BYPASSED */ if (HAL_XSPI_Init(&dev_data->hxspi) != HAL_OK) { LOG_ERR("XSPI Init failed"); return -EIO; } LOG_DBG("XSPI Init'd"); #if defined(HAL_XSPIM_IOPORT_1) || defined(HAL_XSPIM_IOPORT_2) /* XSPI I/O manager init Function */ XSPIM_CfgTypeDef xspi_mgr_cfg; if (dev_data->hxspi.Instance == XSPI1) { xspi_mgr_cfg.IOPort = HAL_XSPIM_IOPORT_1; } else if (dev_data->hxspi.Instance == XSPI2) { xspi_mgr_cfg.IOPort = HAL_XSPIM_IOPORT_2; } xspi_mgr_cfg.nCSOverride = HAL_XSPI_CSSEL_OVR_DISABLED; xspi_mgr_cfg.Req2AckTime = 1; if (HAL_XSPIM_Config(&dev_data->hxspi, &xspi_mgr_cfg, HAL_XSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) { LOG_ERR("XSPI M config failed"); return -EIO; } #endif /* XSPIM */ #if defined(DLYB_XSPI1) || defined(DLYB_XSPI2) || defined(DLYB_OCTOSPI1) || defined(DLYB_OCTOSPI2) /* XSPI delay block init Function */ HAL_XSPI_DLYB_CfgTypeDef xspi_delay_block_cfg = {0}; (void)HAL_XSPI_DLYB_GetClockPeriod(&dev_data->hxspi, &xspi_delay_block_cfg); /* with DTR, set the PhaseSel/4 (empiric value from stm32Cube) */ xspi_delay_block_cfg.PhaseSel /= 4; if (HAL_XSPI_DLYB_SetConfig(&dev_data->hxspi, &xspi_delay_block_cfg) != HAL_OK) { LOG_ERR("XSPI DelayBlock failed"); return -EIO; } LOG_DBG("Delay Block Init"); #endif /* DLYB_ */ #if STM32_XSPI_USE_DMA /* Configure and enable the DMA channels after XSPI config */ static DMA_HandleTypeDef hdma_tx; static DMA_HandleTypeDef hdma_rx; if (flash_stm32_xspi_dma_init(&hdma_tx, &dev_data->dma_tx) != 0) { LOG_ERR("XSPI DMA Tx init failed"); return -EIO; } /* The dma_tx handle is hold by the dma_stream.cfg.user_data */ __HAL_LINKDMA(&dev_data->hxspi, hdmatx, hdma_tx); if (flash_stm32_xspi_dma_init(&hdma_rx, &dev_data->dma_rx) != 0) { LOG_ERR("XSPI DMA Rx init failed"); return -EIO; } /* The dma_rx handle is hold by the dma_stream.cfg.user_data */ __HAL_LINKDMA(&dev_data->hxspi, hdmarx, hdma_rx); #endif /* CONFIG_USE_STM32_HAL_DMA */ /* Initialize semaphores */ k_sem_init(&dev_data->sem, 1, 1); k_sem_init(&dev_data->sync, 0, 1); /* Run IRQ init */ dev_cfg->irq_config(dev); /* Reset NOR flash memory : still with the SPI/STR config for the NOR */ if (stm32_xspi_mem_reset(dev) != 0) { LOG_ERR("XSPI reset failed"); return -EIO; } LOG_DBG("Reset Mem (SPI/STR)"); /* Check if memory is ready in the SPI/STR mode */ if (stm32_xspi_mem_ready(dev, XSPI_SPI_MODE, XSPI_STR_TRANSFER) != 0) { LOG_ERR("XSPI memory not ready"); return -EIO; } LOG_DBG("Mem Ready (SPI/STR)"); #if defined(CONFIG_FLASH_JESD216_API) /* Process with the RDID (jedec read ID) instruction at init and fill jedec_id Table */ ret = stm32_xspi_read_jedec_id(dev); if (ret != 0) { LOG_ERR("Read ID failed: %d", ret); return ret; } #endif /* CONFIG_FLASH_JESD216_API */ if (stm32_xspi_config_mem(dev) != 0) { LOG_ERR("OSPI mode not config'd (%u rate %u)", dev_cfg->data_mode, dev_cfg->data_rate); return -EIO; } /* Send the instruction to read the SFDP */ const uint8_t decl_nph = 2; union { /* We only process BFP so use one parameter block */ uint8_t raw[JESD216_SFDP_SIZE(decl_nph)]; struct jesd216_sfdp_header sfdp; } u; const struct jesd216_sfdp_header *hp = &u.sfdp; ret = xspi_read_sfdp(dev, 0, u.raw, sizeof(u.raw)); if (ret != 0) { LOG_ERR("SFDP read failed: %d", ret); return ret; } uint32_t magic = jesd216_sfdp_magic(hp); if (magic != JESD216_SFDP_MAGIC) { LOG_ERR("SFDP magic %08x invalid", magic); return -EINVAL; } LOG_DBG("%s: SFDP v %u.%u AP %x with %u PH", dev->name, hp->rev_major, hp->rev_minor, hp->access, 1 + hp->nph); const struct jesd216_param_header *php = hp->phdr; const struct jesd216_param_header *phpe = php + MIN(decl_nph, 1 + hp->nph); while (php != phpe) { uint16_t id = jesd216_param_id(php); LOG_DBG("PH%u: %04x rev %u.%u: %u DW @ %x", (php - hp->phdr), id, php->rev_major, php->rev_minor, php->len_dw, jesd216_param_addr(php)); if (id == JESD216_SFDP_PARAM_ID_BFP) { union { uint32_t dw[20]; struct jesd216_bfp bfp; } u2; const struct jesd216_bfp *bfp = &u2.bfp; ret = xspi_read_sfdp(dev, jesd216_param_addr(php), (uint8_t *)u2.dw, MIN(sizeof(uint32_t) * php->len_dw, sizeof(u2.dw))); if (ret == 0) { ret = spi_nor_process_bfp(dev, php, bfp); } if (ret != 0) { LOG_ERR("SFDP BFP failed: %d", ret); break; } } if (id == JESD216_SFDP_PARAM_ID_4B_ADDR_INSTR) { if (dev_data->address_width == 4U) { /* * Check table 4 byte address instruction table to get supported * erase opcodes when running in 4 byte address mode */ union { uint32_t dw[2]; struct { uint32_t dummy; uint8_t type[4]; } types; } u2; ret = xspi_read_sfdp(dev, jesd216_param_addr(php), (uint8_t *)u2.dw, MIN(sizeof(uint32_t) * php->len_dw, sizeof(u2.dw))); if (ret != 0) { break; } for (uint8_t ei = 0; ei < JESD216_NUM_ERASE_TYPES; ++ei) { struct jesd216_erase_type *etp = &dev_data->erase_types[ei]; const uint8_t cmd = u2.types.type[ei]; /* 0xff means not supported */ if (cmd == 0xff) { etp->exp = 0; etp->cmd = 0; } else { etp->cmd = cmd; }; } } } ++php; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) ret = setup_pages_layout(dev); if (ret != 0) { LOG_ERR("layout setup failed: %d", ret); return -ENODEV; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ #ifdef CONFIG_STM32_MEMMAP ret = stm32_xspi_set_memorymap(dev); if (ret != 0) { LOG_ERR("Failed to enable memory-mapped mode: %d", ret); return ret; } LOG_INF("Memory-mapped NOR-flash at 0x%lx (0x%x bytes)", (long)(STM32_XSPI_BASE_ADDRESS), dev_cfg->flash_size); #else LOG_INF("NOR external-flash at 0x%lx (0x%x bytes)", (long)(STM32_XSPI_BASE_ADDRESS), dev_cfg->flash_size); #endif /* CONFIG_STM32_MEMMAP*/ return 0; } #if STM32_XSPI_USE_DMA #define DMA_CHANNEL_CONFIG(node, dir) \ DT_DMAS_CELL_BY_NAME(node, dir, channel_config) #define XSPI_DMA_CHANNEL_INIT(node, dir, dir_cap, src_dev, dest_dev) \ .dev = DEVICE_DT_GET(DT_DMAS_CTLR(node)), \ .channel = DT_DMAS_CELL_BY_NAME(node, dir, channel), \ .reg = (DMA_TypeDef *)DT_REG_ADDR( \ DT_PHANDLE_BY_NAME(node, dmas, dir)), \ .cfg = { \ .dma_slot = DT_DMAS_CELL_BY_NAME(node, dir, slot), \ .channel_direction = STM32_DMA_CONFIG_DIRECTION( \ DMA_CHANNEL_CONFIG(node, dir)), \ .channel_priority = STM32_DMA_CONFIG_PRIORITY( \ DMA_CHANNEL_CONFIG(node, dir)), \ .dma_callback = xspi_dma_callback, \ }, \ .src_addr_increment = STM32_DMA_CONFIG_##src_dev##_ADDR_INC( \ DMA_CHANNEL_CONFIG(node, dir)), \ .dst_addr_increment = STM32_DMA_CONFIG_##dest_dev##_ADDR_INC( \ DMA_CHANNEL_CONFIG(node, dir)), #define XSPI_DMA_CHANNEL(node, dir, DIR, src, dest) \ .dma_##dir = { \ COND_CODE_1(DT_DMAS_HAS_NAME(node, dir), \ (XSPI_DMA_CHANNEL_INIT(node, dir, DIR, src, dest)), \ (NULL)) \ }, #else #define XSPI_DMA_CHANNEL(node, dir, DIR, src, dest) #endif /* CONFIG_USE_STM32_HAL_DMA */ #define XSPI_FLASH_MODULE(drv_id, flash_id) \ (DT_DRV_INST(drv_id), xspi_nor_flash_##flash_id) #define DT_WRITEOC_PROP_OR(inst, default_value) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, writeoc), \ (_CONCAT(SPI_NOR_CMD_, DT_STRING_TOKEN(DT_DRV_INST(inst), writeoc))), \ ((default_value))) #define DT_QER_PROP_OR(inst, default_value) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, quad_enable_requirements), \ (_CONCAT(JESD216_DW15_QER_VAL_, \ DT_STRING_TOKEN(DT_DRV_INST(inst), quad_enable_requirements))), \ ((default_value))) static void flash_stm32_xspi_irq_config_func(const struct device *dev); static const struct stm32_pclken pclken[] = STM32_DT_CLOCKS(STM32_XSPI_NODE); PINCTRL_DT_DEFINE(STM32_XSPI_NODE); static const struct flash_stm32_xspi_config flash_stm32_xspi_cfg = { .pclken = pclken, .pclk_len = DT_NUM_CLOCKS(STM32_XSPI_NODE), .irq_config = flash_stm32_xspi_irq_config_func, .flash_size = DT_INST_REG_ADDR_BY_IDX(0, 1), .max_frequency = DT_INST_PROP(0, ospi_max_frequency), .data_mode = DT_INST_PROP(0, spi_bus_width), /* SPI or OPI */ .data_rate = DT_INST_PROP(0, data_rate), /* DTR or STR */ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(STM32_XSPI_NODE), #if STM32_XSPI_RESET_GPIO .reset = GPIO_DT_SPEC_INST_GET(0, reset_gpios), #endif /* STM32_XSPI_RESET_GPIO */ #if DT_NODE_HAS_PROP(DT_INST(0, st_stm32_ospi_nor), sfdp_bfp) .sfdp_bfp = DT_INST_PROP(0, sfdp_bfp), #endif /* sfdp_bfp */ }; static struct flash_stm32_xspi_data flash_stm32_xspi_dev_data = { .hxspi = { .Instance = (XSPI_TypeDef *)DT_REG_ADDR(STM32_XSPI_NODE), .Init = { .FifoThresholdByte = STM32_XSPI_FIFO_THRESHOLD, .SampleShifting = (DT_PROP(STM32_XSPI_NODE, ssht_enable) ? HAL_XSPI_SAMPLE_SHIFT_HALFCYCLE : HAL_XSPI_SAMPLE_SHIFT_NONE), .ChipSelectHighTimeCycle = 1, .ClockMode = HAL_XSPI_CLOCK_MODE_0, .ChipSelectBoundary = 0, .MemoryMode = HAL_XSPI_SINGLE_MEM, #if defined(HAL_XSPIM_IOPORT_1) || defined(HAL_XSPIM_IOPORT_2) .MemorySelect = ((DT_INST_PROP(0, ncs_line) == 1) ? HAL_XSPI_CSSEL_NCS1 : HAL_XSPI_CSSEL_NCS2), #endif .FreeRunningClock = HAL_XSPI_FREERUNCLK_DISABLE, #if defined(OCTOSPI_DCR4_REFRESH) .Refresh = 0, #endif /* OCTOSPI_DCR4_REFRESH */ }, }, .qer_type = DT_QER_PROP_OR(0, JESD216_DW15_QER_VAL_S1B6), .write_opcode = DT_WRITEOC_PROP_OR(0, SPI_NOR_WRITEOC_NONE), .page_size = SPI_NOR_PAGE_SIZE, /* by default, to be updated by sfdp */ #if DT_NODE_HAS_PROP(DT_INST(0, st_stm32_ospi_nor), jedec_id) .jedec_id = DT_INST_PROP(0, jedec_id), #endif /* jedec_id */ XSPI_DMA_CHANNEL(STM32_XSPI_NODE, tx, TX, MEMORY, PERIPHERAL) XSPI_DMA_CHANNEL(STM32_XSPI_NODE, rx, RX, PERIPHERAL, MEMORY) }; static void flash_stm32_xspi_irq_config_func(const struct device *dev) { IRQ_CONNECT(DT_IRQN(STM32_XSPI_NODE), DT_IRQ(STM32_XSPI_NODE, priority), flash_stm32_xspi_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_IRQN(STM32_XSPI_NODE)); } DEVICE_DT_INST_DEFINE(0, &flash_stm32_xspi_init, NULL, &flash_stm32_xspi_dev_data, &flash_stm32_xspi_cfg, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &flash_stm32_xspi_driver_api); ```
/content/code_sandbox/drivers/flash/flash_stm32_xspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
22,423
```c /* * */ #define DT_DRV_COMPAT nordic_qspi_nor #include <errno.h> #include <zephyr/drivers/flash.h> #include <zephyr/init.h> #include <zephyr/pm/device.h> #include <zephyr/pm/device_runtime.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/sys/atomic.h> #include <soc.h> #include <string.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(qspi_nor, CONFIG_FLASH_LOG_LEVEL); #include "spi_nor.h" #include "jesd216.h" #include "flash_priv.h" #include <nrf_erratas.h> #include <nrfx_qspi.h> #include <hal/nrf_clock.h> #include <hal/nrf_gpio.h> struct qspi_nor_data { #ifdef CONFIG_MULTITHREADING /* The semaphore to control exclusive access to the device. */ struct k_sem sem; /* The semaphore to indicate that transfer has completed. */ struct k_sem sync; /* A counter to control QSPI deactivation. */ atomic_t usage_count; #else /* CONFIG_MULTITHREADING */ /* A flag that signals completed transfer when threads are * not enabled. */ volatile bool ready; #endif /* CONFIG_MULTITHREADING */ bool xip_enabled; }; struct qspi_nor_config { nrfx_qspi_config_t nrfx_cfg; /* Size from devicetree, in bytes */ uint32_t size; /* JEDEC id from devicetree */ uint8_t id[SPI_NOR_MAX_ID_LEN]; const struct pinctrl_dev_config *pcfg; }; /* Status register bits */ #define QSPI_SECTOR_SIZE SPI_NOR_SECTOR_SIZE #define QSPI_BLOCK_SIZE SPI_NOR_BLOCK_SIZE /* instance 0 flash size in bytes */ #if DT_INST_NODE_HAS_PROP(0, size_in_bytes) #define INST_0_BYTES (DT_INST_PROP(0, size_in_bytes)) #elif DT_INST_NODE_HAS_PROP(0, size) #define INST_0_BYTES (DT_INST_PROP(0, size) / 8) #else #error "No size specified. 'size' or 'size-in-bytes' must be set" #endif BUILD_ASSERT(!(DT_INST_NODE_HAS_PROP(0, size_in_bytes) && DT_INST_NODE_HAS_PROP(0, size)), "Node " DT_NODE_PATH(DT_DRV_INST(0)) " has both size and size-in-bytes " "properties; use exactly one"); #define INST_0_SCK_FREQUENCY DT_INST_PROP(0, sck_frequency) /* * According to the respective specifications, the nRF52 QSPI supports clock * frequencies 2 - 32 MHz and the nRF53 one supports 6 - 96 MHz. */ BUILD_ASSERT(INST_0_SCK_FREQUENCY >= (NRF_QSPI_BASE_CLOCK_FREQ / 16), "Unsupported SCK frequency."); /* * Determine a configuration value (INST_0_SCK_CFG) and, if needed, a divider * (BASE_CLOCK_DIV) for the clock from which the SCK frequency is derived that * need to be used to achieve the SCK frequency as close as possible (but not * higher) to the one specified in DT. */ #if defined(CONFIG_SOC_SERIES_NRF53X) /* * On nRF53 Series SoCs, the default /4 divider for the HFCLK192M clock can * only be used when the QSPI peripheral is idle. When a QSPI operation is * performed, the divider needs to be changed to /1 or /2 (particularly, * the specification says that the peripheral "supports 192 MHz and 96 MHz * PCLK192M frequency"), but after that operation is complete, the default * divider needs to be restored to avoid increased current consumption. */ #if (INST_0_SCK_FREQUENCY >= NRF_QSPI_BASE_CLOCK_FREQ) /* For requested SCK >= 96 MHz, use HFCLK192M / 1 / (2*1) = 96 MHz */ #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_1 #define INST_0_SCK_CFG NRF_QSPI_FREQ_DIV1 /* If anomaly 159 is to be prevented, only /1 divider can be used. */ #elif NRF53_ERRATA_159_ENABLE_WORKAROUND #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_1 #define INST_0_SCK_CFG (DIV_ROUND_UP(NRF_QSPI_BASE_CLOCK_FREQ, \ INST_0_SCK_FREQUENCY) - 1) #elif (INST_0_SCK_FREQUENCY >= (NRF_QSPI_BASE_CLOCK_FREQ / 2)) /* For 96 MHz > SCK >= 48 MHz, use HFCLK192M / 2 / (2*1) = 48 MHz */ #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_2 #define INST_0_SCK_CFG NRF_QSPI_FREQ_DIV1 #elif (INST_0_SCK_FREQUENCY >= (NRF_QSPI_BASE_CLOCK_FREQ / 3)) /* For 48 MHz > SCK >= 32 MHz, use HFCLK192M / 1 / (2*3) = 32 MHz */ #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_1 #define INST_0_SCK_CFG NRF_QSPI_FREQ_DIV3 #else /* For requested SCK < 32 MHz, use divider /2 for HFCLK192M. */ #define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_2 #define INST_0_SCK_CFG (DIV_ROUND_UP(NRF_QSPI_BASE_CLOCK_FREQ / 2, \ INST_0_SCK_FREQUENCY) - 1) #endif /* After the base clock divider is changed, some time is needed for the new * setting to take effect. This value specifies the delay (in microseconds) * to be applied to ensure that the clock is ready when the QSPI operation * starts. It was measured with a logic analyzer (unfortunately, the nRF5340 * specification does not provide any numbers in this regard). */ #define BASE_CLOCK_SWITCH_DELAY_US 7 #else /* * On nRF52 Series SoCs, the base clock divider is not configurable, * so BASE_CLOCK_DIV is not defined. */ #if (INST_0_SCK_FREQUENCY >= NRF_QSPI_BASE_CLOCK_FREQ) #define INST_0_SCK_CFG NRF_QSPI_FREQ_DIV1 #else #define INST_0_SCK_CFG (DIV_ROUND_UP(NRF_QSPI_BASE_CLOCK_FREQ, \ INST_0_SCK_FREQUENCY) - 1) #endif #endif /* defined(CONFIG_SOC_SERIES_NRF53X) */ /* 0 for MODE0 (CPOL=0, CPHA=0), 1 for MODE3 (CPOL=1, CPHA=1). */ #define INST_0_SPI_MODE DT_INST_PROP(0, cpol) BUILD_ASSERT(DT_INST_PROP(0, cpol) == DT_INST_PROP(0, cpha), "Invalid combination of \"cpol\" and \"cpha\" properties."); /* for accessing devicetree properties of the bus node */ #define QSPI_NODE DT_INST_BUS(0) #define QSPI_PROP_AT(prop, idx) DT_PROP_BY_IDX(QSPI_NODE, prop, idx) #define QSPI_PROP_LEN(prop) DT_PROP_LEN(QSPI_NODE, prop) #define INST_0_QER _CONCAT(JESD216_DW15_QER_VAL_, \ DT_STRING_TOKEN(DT_DRV_INST(0), \ quad_enable_requirements)) #define IS_EQUAL(x, y) ((x) == (y)) #define SR1_WRITE_CLEARS_SR2 IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v1) #define SR2_WRITE_NEEDS_SR1 (IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v1) || \ IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v4) || \ IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v5)) #define QER_IS_S2B1 (IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v1) || \ IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v4) || \ IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v5) || \ IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v6)) BUILD_ASSERT((IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_NONE) || IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S1B6) || IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v1) || IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v4) || IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v5) || IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v6)), "Driver only supports NONE, S1B6, S2B1v1, S2B1v4, S2B1v5 or S2B1v6 for quad-enable-requirements"); #define INST_0_4BA DT_INST_PROP_OR(0, enter_4byte_addr, 0) #if (INST_0_4BA != 0) BUILD_ASSERT(((INST_0_4BA & 0x03) != 0), "Driver only supports command (0xB7) for entering 4 byte addressing mode"); BUILD_ASSERT(DT_INST_PROP(0, address_size_32), "After entering 4 byte addressing mode, 4 byte addressing is expected"); #endif void z_impl_nrf_qspi_nor_xip_enable(const struct device *dev, bool enable); void z_vrfy_nrf_qspi_nor_xip_enable(const struct device *dev, bool enable); #define WORD_SIZE 4 /** * @brief QSPI buffer structure * Structure used both for TX and RX purposes. * * @param buf is a valid pointer to a data buffer. * Can not be NULL. * @param len is the length of the data to be handled. * If no data to transmit/receive - pass 0. */ struct qspi_buf { uint8_t *buf; size_t len; }; /** * @brief QSPI command structure * Structure used for custom command usage. * * @param op_code is a command value (i.e 0x9F - get Jedec ID) * @param tx_buf structure used for TX purposes. Can be NULL if not used. * @param rx_buf structure used for RX purposes. Can be NULL if not used. */ struct qspi_cmd { uint8_t op_code; const struct qspi_buf *tx_buf; const struct qspi_buf *rx_buf; }; static int qspi_nor_write_protection_set(const struct device *dev, bool write_protect); static int exit_dpd(const struct device *const dev); /** * @brief Test whether offset is aligned. */ #define QSPI_IS_SECTOR_ALIGNED(_ofs) (((_ofs) & (QSPI_SECTOR_SIZE - 1U)) == 0) #define QSPI_IS_BLOCK_ALIGNED(_ofs) (((_ofs) & (QSPI_BLOCK_SIZE - 1U)) == 0) /** * @brief Converts NRFX return codes to the zephyr ones */ static inline int qspi_get_zephyr_ret_code(nrfx_err_t res) { switch (res) { case NRFX_SUCCESS: return 0; case NRFX_ERROR_INVALID_PARAM: case NRFX_ERROR_INVALID_ADDR: return -EINVAL; case NRFX_ERROR_INVALID_STATE: return -ECANCELED; #if NRF53_ERRATA_159_ENABLE_WORKAROUND case NRFX_ERROR_FORBIDDEN: LOG_ERR("nRF5340 anomaly 159 conditions detected"); LOG_ERR("Set the CPU clock to 64 MHz before starting QSPI operation"); return -ECANCELED; #endif case NRFX_ERROR_BUSY: case NRFX_ERROR_TIMEOUT: default: return -EBUSY; } } static inline void qspi_lock(const struct device *dev) { #ifdef CONFIG_MULTITHREADING struct qspi_nor_data *dev_data = dev->data; k_sem_take(&dev_data->sem, K_FOREVER); #endif } static inline void qspi_unlock(const struct device *dev) { #ifdef CONFIG_MULTITHREADING struct qspi_nor_data *dev_data = dev->data; k_sem_give(&dev_data->sem); #endif } static inline void qspi_clock_div_change(void) { #ifdef CONFIG_SOC_SERIES_NRF53X /* Make sure the base clock divider is changed accordingly * before a QSPI transfer is performed. */ nrf_clock_hfclk192m_div_set(NRF_CLOCK, BASE_CLOCK_DIV); k_busy_wait(BASE_CLOCK_SWITCH_DELAY_US); #endif } static inline void qspi_clock_div_restore(void) { #ifdef CONFIG_SOC_SERIES_NRF53X /* Restore the default base clock divider to reduce power * consumption when the QSPI peripheral is idle. */ nrf_clock_hfclk192m_div_set(NRF_CLOCK, NRF_CLOCK_HFCLK_DIV_4); #endif } static void qspi_acquire(const struct device *dev) { struct qspi_nor_data *dev_data = dev->data; int rc; rc = pm_device_runtime_get(dev); if (rc < 0) { LOG_ERR("pm_device_runtime_get failed: %d", rc); } #if defined(CONFIG_MULTITHREADING) /* In multithreading, the driver can call qspi_acquire more than once * before calling qspi_release. Keeping count, so QSPI is deactivated * only at the last call (usage_count == 0). */ atomic_inc(&dev_data->usage_count); #endif qspi_lock(dev); if (!dev_data->xip_enabled) { qspi_clock_div_change(); pm_device_busy_set(dev); } } static void qspi_release(const struct device *dev) { struct qspi_nor_data *dev_data = dev->data; bool deactivate = true; int rc; #if defined(CONFIG_MULTITHREADING) /* The last thread to finish using the driver deactivates the QSPI */ deactivate = atomic_dec(&dev_data->usage_count) == 1; #endif if (!dev_data->xip_enabled) { qspi_clock_div_restore(); if (deactivate) { (void) nrfx_qspi_deactivate(); } pm_device_busy_clear(dev); } qspi_unlock(dev); rc = pm_device_runtime_put(dev); if (rc < 0) { LOG_ERR("pm_device_runtime_put failed: %d", rc); } } static inline void qspi_wait_for_completion(const struct device *dev, nrfx_err_t res) { struct qspi_nor_data *dev_data = dev->data; if (res == NRFX_SUCCESS) { #ifdef CONFIG_MULTITHREADING k_sem_take(&dev_data->sync, K_FOREVER); #else /* CONFIG_MULTITHREADING */ unsigned int key = irq_lock(); while (!dev_data->ready) { k_cpu_atomic_idle(key); key = irq_lock(); } dev_data->ready = false; irq_unlock(key); #endif /* CONFIG_MULTITHREADING */ } } static inline void qspi_complete(struct qspi_nor_data *dev_data) { #ifdef CONFIG_MULTITHREADING k_sem_give(&dev_data->sync); #else /* CONFIG_MULTITHREADING */ dev_data->ready = true; #endif /* CONFIG_MULTITHREADING */ } /** * @brief QSPI handler * * @param event Driver event type * @param p_context Pointer to context. Use in interrupt handler. * @retval None */ static void qspi_handler(nrfx_qspi_evt_t event, void *p_context) { struct qspi_nor_data *dev_data = p_context; if (event == NRFX_QSPI_EVENT_DONE) { qspi_complete(dev_data); } } /* QSPI send custom command. * * If this is used for both send and receive the buffer sizes must be * equal and cover the whole transaction. */ static int qspi_send_cmd(const struct device *dev, const struct qspi_cmd *cmd, bool wren) { /* Check input parameters */ if (!cmd) { return -EINVAL; } const void *tx_buf = NULL; size_t tx_len = 0; void *rx_buf = NULL; size_t rx_len = 0; size_t xfer_len = sizeof(cmd->op_code); if (cmd->tx_buf) { tx_buf = cmd->tx_buf->buf; tx_len = cmd->tx_buf->len; } if (cmd->rx_buf) { rx_buf = cmd->rx_buf->buf; rx_len = cmd->rx_buf->len; } if ((rx_len != 0) && (tx_len != 0)) { if (rx_len != tx_len) { return -EINVAL; } xfer_len += tx_len; } else { /* At least one of these is zero. */ xfer_len += tx_len + rx_len; } if (xfer_len > NRF_QSPI_CINSTR_LEN_9B) { LOG_WRN("cinstr %02x transfer too long: %zu", cmd->op_code, xfer_len); return -EINVAL; } nrf_qspi_cinstr_conf_t cinstr_cfg = { .opcode = cmd->op_code, .length = xfer_len, .io2_level = true, .io3_level = true, .wipwait = false, .wren = wren, }; int res = nrfx_qspi_cinstr_xfer(&cinstr_cfg, tx_buf, rx_buf); return qspi_get_zephyr_ret_code(res); } #if !IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_NONE) /* RDSR. Negative value is error. */ static int qspi_rdsr(const struct device *dev, uint8_t sr_num) { uint8_t opcode = SPI_NOR_CMD_RDSR; if (sr_num > 2 || sr_num == 0) { return -EINVAL; } if (sr_num == 2) { opcode = SPI_NOR_CMD_RDSR2; } uint8_t sr = 0xFF; const struct qspi_buf sr_buf = { .buf = &sr, .len = sizeof(sr), }; struct qspi_cmd cmd = { .op_code = opcode, .rx_buf = &sr_buf, }; int rc = qspi_send_cmd(dev, &cmd, false); return (rc < 0) ? rc : sr; } /* Wait until RDSR confirms write is not in progress. */ static int qspi_wait_while_writing(const struct device *dev) { int rc; do { rc = qspi_rdsr(dev, 1); } while ((rc >= 0) && ((rc & SPI_NOR_WIP_BIT) != 0U)); return (rc < 0) ? rc : 0; } static int qspi_wrsr(const struct device *dev, uint8_t sr_val, uint8_t sr_num) { int rc = 0; uint8_t opcode = SPI_NOR_CMD_WRSR; uint8_t length = 1; uint8_t sr_array[2] = {0}; if (sr_num > 2 || sr_num == 0) { return -EINVAL; } if (sr_num == 1) { sr_array[0] = sr_val; #if SR1_WRITE_CLEARS_SR2 /* Writing sr1 clears sr2. need to read/modify/write both. */ rc = qspi_rdsr(dev, 2); if (rc < 0) { LOG_ERR("RDSR for WRSR failed: %d", rc); return rc; } sr_array[1] = rc; length = 2; #endif } else { /* sr_num == 2 */ #if SR2_WRITE_NEEDS_SR1 /* Writing sr2 requires writing sr1 as well. * Uses standard WRSR opcode */ sr_array[1] = sr_val; rc = qspi_rdsr(dev, 1); if (rc < 0) { LOG_ERR("RDSR for WRSR failed: %d", rc); return rc; } sr_array[0] = rc; length = 2; #elif IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S2B1v6) /* Writing sr2 uses a dedicated WRSR2 command */ sr_array[0] = sr_val; opcode = SPI_NOR_CMD_WRSR2; #else LOG_ERR("Attempted to write status register 2, but no known method to write sr2"); return -EINVAL; #endif } const struct qspi_buf sr_buf = { .buf = sr_array, .len = length, }; struct qspi_cmd cmd = { .op_code = opcode, .tx_buf = &sr_buf, }; rc = qspi_send_cmd(dev, &cmd, true); /* Writing SR can take some time, and further * commands sent while it's happening can be * corrupted. Wait. */ if (rc == 0) { rc = qspi_wait_while_writing(dev); } return rc; } #endif /* !IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_NONE) */ /* QSPI erase */ static int qspi_erase(const struct device *dev, uint32_t addr, uint32_t size) { const struct qspi_nor_config *params = dev->config; int rc, rc2; rc = qspi_nor_write_protection_set(dev, false); if (rc != 0) { return rc; } while (size > 0) { nrfx_err_t res = !NRFX_SUCCESS; uint32_t adj = 0; if (size == params->size) { /* chip erase */ res = nrfx_qspi_chip_erase(); adj = size; } else if ((size >= QSPI_BLOCK_SIZE) && QSPI_IS_BLOCK_ALIGNED(addr)) { /* 64 kB block erase */ res = nrfx_qspi_erase(NRF_QSPI_ERASE_LEN_64KB, addr); adj = QSPI_BLOCK_SIZE; } else if ((size >= QSPI_SECTOR_SIZE) && QSPI_IS_SECTOR_ALIGNED(addr)) { /* 4kB sector erase */ res = nrfx_qspi_erase(NRF_QSPI_ERASE_LEN_4KB, addr); adj = QSPI_SECTOR_SIZE; } else { /* minimal erase size is at least a sector size */ LOG_ERR("unsupported at 0x%lx size %zu", (long)addr, size); res = NRFX_ERROR_INVALID_PARAM; } qspi_wait_for_completion(dev, res); if (res == NRFX_SUCCESS) { addr += adj; size -= adj; } else { LOG_ERR("erase error at 0x%lx size %zu", (long)addr, size); rc = qspi_get_zephyr_ret_code(res); break; } } rc2 = qspi_nor_write_protection_set(dev, true); return rc != 0 ? rc : rc2; } static int configure_chip(const struct device *dev) { const struct qspi_nor_config *dev_config = dev->config; int rc = 0; /* Set QE to match transfer mode. If not using quad * it's OK to leave QE set, but doing so prevents use * of WP#/RESET#/HOLD# which might be useful. * * Note build assert above ensures QER is S1B6 or * S2B1v1/4/5/6. Other options require more logic. */ #if !IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_NONE) nrf_qspi_prot_conf_t const *prot_if = &dev_config->nrfx_cfg.prot_if; bool qe_value = (prot_if->writeoc == NRF_QSPI_WRITEOC_PP4IO) || (prot_if->writeoc == NRF_QSPI_WRITEOC_PP4O) || (prot_if->readoc == NRF_QSPI_READOC_READ4IO) || (prot_if->readoc == NRF_QSPI_READOC_READ4O) || (prot_if->readoc == NRF_QSPI_READOC_READ2IO); uint8_t sr_num = 0; uint8_t qe_mask = 0; #if IS_EQUAL(INST_0_QER, JESD216_DW15_QER_VAL_S1B6) sr_num = 1; qe_mask = BIT(6); #elif QER_IS_S2B1 sr_num = 2; qe_mask = BIT(1); #else LOG_ERR("Unsupported QER type"); return -EINVAL; #endif rc = qspi_rdsr(dev, sr_num); if (rc < 0) { LOG_ERR("RDSR failed: %d", rc); return rc; } uint8_t sr = (uint8_t)rc; bool qe_state = ((sr & qe_mask) != 0U); LOG_DBG("RDSR %02x QE %d need %d: %s", sr, qe_state, qe_value, (qe_state != qe_value) ? "updating" : "no-change"); rc = 0; if (qe_state != qe_value) { sr ^= qe_mask; rc = qspi_wrsr(dev, sr, sr_num); } if (rc < 0) { LOG_ERR("QE %s failed: %d", qe_value ? "set" : "clear", rc); return rc; } #endif if (INST_0_4BA != 0) { struct qspi_cmd cmd = { .op_code = SPI_NOR_CMD_4BA, }; /* Call will send write enable before instruction if that * requirement is encoded in INST_0_4BA. */ rc = qspi_send_cmd(dev, &cmd, (INST_0_4BA & 0x02)); if (rc < 0) { LOG_ERR("E4BA cmd issue failed: %d.", rc); } else { LOG_DBG("E4BA cmd issued."); } } return rc; } static int qspi_rdid(const struct device *dev, uint8_t *id) { const struct qspi_buf rx_buf = { .buf = id, .len = 3 }; const struct qspi_cmd cmd = { .op_code = SPI_NOR_CMD_RDID, .rx_buf = &rx_buf, }; return qspi_send_cmd(dev, &cmd, false); } #if defined(CONFIG_FLASH_JESD216_API) static int qspi_read_jedec_id(const struct device *dev, uint8_t *id) { int rc; qspi_acquire(dev); rc = qspi_rdid(dev, id); qspi_release(dev); return rc; } static int qspi_sfdp_read(const struct device *dev, off_t offset, void *data, size_t len) { __ASSERT(data != NULL, "null destination"); uint8_t addr_buf[] = { offset >> 16, offset >> 8, offset, 0, /* wait state */ }; nrf_qspi_cinstr_conf_t cinstr_cfg = { .opcode = JESD216_CMD_READ_SFDP, .length = NRF_QSPI_CINSTR_LEN_1B, .io2_level = true, .io3_level = true, }; nrfx_err_t res; qspi_acquire(dev); res = nrfx_qspi_lfm_start(&cinstr_cfg); if (res != NRFX_SUCCESS) { LOG_DBG("lfm_start: %x", res); goto out; } res = nrfx_qspi_lfm_xfer(addr_buf, NULL, sizeof(addr_buf), false); if (res != NRFX_SUCCESS) { LOG_DBG("lfm_xfer addr: %x", res); goto out; } res = nrfx_qspi_lfm_xfer(NULL, data, len, true); if (res != NRFX_SUCCESS) { LOG_DBG("lfm_xfer read: %x", res); goto out; } out: qspi_release(dev); return qspi_get_zephyr_ret_code(res); } #endif /* CONFIG_FLASH_JESD216_API */ static inline nrfx_err_t read_non_aligned(const struct device *dev, off_t addr, void *dest, size_t size) { uint8_t __aligned(WORD_SIZE) buf[WORD_SIZE * 2]; uint8_t *dptr = dest; off_t flash_prefix = (WORD_SIZE - (addr % WORD_SIZE)) % WORD_SIZE; if (flash_prefix > size) { flash_prefix = size; } off_t dest_prefix = (WORD_SIZE - (off_t)dptr % WORD_SIZE) % WORD_SIZE; if (dest_prefix > size) { dest_prefix = size; } off_t flash_suffix = (size - flash_prefix) % WORD_SIZE; off_t flash_middle = size - flash_prefix - flash_suffix; off_t dest_middle = size - dest_prefix - (size - dest_prefix) % WORD_SIZE; if (flash_middle > dest_middle) { flash_middle = dest_middle; flash_suffix = size - flash_prefix - flash_middle; } nrfx_err_t res = NRFX_SUCCESS; /* read from aligned flash to aligned memory */ if (flash_middle != 0) { res = nrfx_qspi_read(dptr + dest_prefix, flash_middle, addr + flash_prefix); qspi_wait_for_completion(dev, res); if (res != NRFX_SUCCESS) { return res; } /* perform shift in RAM */ if (flash_prefix != dest_prefix) { memmove(dptr + flash_prefix, dptr + dest_prefix, flash_middle); } } /* read prefix */ if (flash_prefix != 0) { res = nrfx_qspi_read(buf, WORD_SIZE, addr - (WORD_SIZE - flash_prefix)); qspi_wait_for_completion(dev, res); if (res != NRFX_SUCCESS) { return res; } memcpy(dptr, buf + WORD_SIZE - flash_prefix, flash_prefix); } /* read suffix */ if (flash_suffix != 0) { res = nrfx_qspi_read(buf, WORD_SIZE * 2, addr + flash_prefix + flash_middle); qspi_wait_for_completion(dev, res); if (res != NRFX_SUCCESS) { return res; } memcpy(dptr + flash_prefix + flash_middle, buf, flash_suffix); } return res; } static int qspi_nor_read(const struct device *dev, off_t addr, void *dest, size_t size) { const struct qspi_nor_config *params = dev->config; nrfx_err_t res; if (!dest) { return -EINVAL; } /* read size must be non-zero */ if (!size) { return 0; } /* affected region should be within device */ if (addr < 0 || (addr + size) > params->size) { LOG_ERR("read error: address or size " "exceeds expected values." "Addr: 0x%lx size %zu", (long)addr, size); return -EINVAL; } qspi_acquire(dev); res = read_non_aligned(dev, addr, dest, size); qspi_release(dev); return qspi_get_zephyr_ret_code(res); } /* addr aligned, sptr not null, slen less than 4 */ static inline nrfx_err_t write_sub_word(const struct device *dev, off_t addr, const void *sptr, size_t slen) { uint8_t __aligned(4) buf[4]; nrfx_err_t res; /* read out the whole word so that unchanged data can be * written back */ res = nrfx_qspi_read(buf, sizeof(buf), addr); qspi_wait_for_completion(dev, res); if (res == NRFX_SUCCESS) { memcpy(buf, sptr, slen); res = nrfx_qspi_write(buf, sizeof(buf), addr); qspi_wait_for_completion(dev, res); } return res; } BUILD_ASSERT((CONFIG_NORDIC_QSPI_NOR_STACK_WRITE_BUFFER_SIZE % 4) == 0, "NOR stack buffer must be multiple of 4 bytes"); /* If enabled write using a stack-allocated aligned SRAM buffer as * required for DMA transfers by QSPI peripheral. * * If not enabled return the error the peripheral would have produced. */ static nrfx_err_t write_through_buffer(const struct device *dev, off_t addr, const void *sptr, size_t slen) { nrfx_err_t res = NRFX_SUCCESS; if (CONFIG_NORDIC_QSPI_NOR_STACK_WRITE_BUFFER_SIZE > 0) { uint8_t __aligned(4) buf[CONFIG_NORDIC_QSPI_NOR_STACK_WRITE_BUFFER_SIZE]; const uint8_t *sp = sptr; while ((slen > 0) && (res == NRFX_SUCCESS)) { size_t len = MIN(slen, sizeof(buf)); memcpy(buf, sp, len); res = nrfx_qspi_write(buf, len, addr); qspi_wait_for_completion(dev, res); if (res == NRFX_SUCCESS) { slen -= len; sp += len; addr += len; } } } else { res = NRFX_ERROR_INVALID_ADDR; } return res; } static int qspi_nor_write(const struct device *dev, off_t addr, const void *src, size_t size) { const struct qspi_nor_config *params = dev->config; int rc, rc2; if (!src) { return -EINVAL; } /* write size must be non-zero, less than 4, or a multiple of 4 */ if ((size == 0) || ((size > 4) && ((size % 4U) != 0))) { return -EINVAL; } /* address must be 4-byte aligned */ if ((addr % 4U) != 0) { return -EINVAL; } /* affected region should be within device */ if (addr < 0 || (addr + size) > params->size) { LOG_ERR("write error: address or size " "exceeds expected values." "Addr: 0x%lx size %zu", (long)addr, size); return -EINVAL; } qspi_acquire(dev); rc = qspi_nor_write_protection_set(dev, false); if (rc == 0) { nrfx_err_t res; if (size < 4U) { res = write_sub_word(dev, addr, src, size); } else if (!nrfx_is_in_ram(src) || !nrfx_is_word_aligned(src)) { res = write_through_buffer(dev, addr, src, size); } else { res = nrfx_qspi_write(src, size, addr); qspi_wait_for_completion(dev, res); } rc = qspi_get_zephyr_ret_code(res); } rc2 = qspi_nor_write_protection_set(dev, true); qspi_release(dev); return rc != 0 ? rc : rc2; } static int qspi_nor_erase(const struct device *dev, off_t addr, size_t size) { const struct qspi_nor_config *params = dev->config; int rc; /* address must be sector-aligned */ if ((addr % QSPI_SECTOR_SIZE) != 0) { return -EINVAL; } /* size must be a non-zero multiple of sectors */ if ((size == 0) || (size % QSPI_SECTOR_SIZE) != 0) { return -EINVAL; } /* affected region should be within device */ if (addr < 0 || (addr + size) > params->size) { LOG_ERR("erase error: address or size " "exceeds expected values." "Addr: 0x%lx size %zu", (long)addr, size); return -EINVAL; } qspi_acquire(dev); rc = qspi_erase(dev, addr, size); qspi_release(dev); return rc; } static int qspi_nor_write_protection_set(const struct device *dev, bool write_protect) { int rc = 0; struct qspi_cmd cmd = { .op_code = ((write_protect) ? SPI_NOR_CMD_WRDI : SPI_NOR_CMD_WREN), }; if (qspi_send_cmd(dev, &cmd, false) != 0) { rc = -EIO; } return rc; } static int qspi_init(const struct device *dev) { const struct qspi_nor_config *dev_config = dev->config; uint8_t id[SPI_NOR_MAX_ID_LEN]; nrfx_err_t res; int rc; res = nrfx_qspi_init(&dev_config->nrfx_cfg, qspi_handler, dev->data); rc = qspi_get_zephyr_ret_code(res); if (rc < 0) { return rc; } #if DT_INST_NODE_HAS_PROP(0, rx_delay) if (!nrf53_errata_121()) { nrf_qspi_iftiming_set(NRF_QSPI, DT_INST_PROP(0, rx_delay)); } #endif /* It may happen that after the flash chip was previously put into * the DPD mode, the system was reset but the flash chip was not. * Consequently, the flash chip can be in the DPD mode at this point. * Some flash chips will just exit the DPD mode on the first CS pulse, * but some need to receive the dedicated command to do it, so send it. * This can be the case even if the current image does not have * CONFIG_PM_DEVICE set to enter DPD mode, as a previously executing image * (for example the main image if the currently executing image is the * bootloader) might have set DPD mode before reboot. As a result, * attempt to exit DPD mode regardless of whether CONFIG_PM_DEVICE is set. */ rc = exit_dpd(dev); if (rc < 0) { return rc; } /* Retrieve the Flash JEDEC ID and compare it with the one expected. */ rc = qspi_rdid(dev, id); if (rc < 0) { return rc; } if (memcmp(dev_config->id, id, SPI_NOR_MAX_ID_LEN) != 0) { LOG_ERR("JEDEC id [%02x %02x %02x] expect [%02x %02x %02x]", id[0], id[1], id[2], dev_config->id[0], dev_config->id[1], dev_config->id[2]); return -ENODEV; } /* The chip is correct, it can be configured now. */ return configure_chip(dev); } static int qspi_nor_init(const struct device *dev) { const struct qspi_nor_config *dev_config = dev->config; int rc; rc = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_DEFAULT); if (rc < 0) { return rc; } IRQ_CONNECT(DT_IRQN(QSPI_NODE), DT_IRQ(QSPI_NODE, priority), nrfx_isr, nrfx_qspi_irq_handler, 0); qspi_clock_div_change(); rc = qspi_init(dev); qspi_clock_div_restore(); if (!IS_ENABLED(CONFIG_NORDIC_QSPI_NOR_XIP) && nrfx_qspi_init_check()) { (void)nrfx_qspi_deactivate(); } #ifdef CONFIG_NORDIC_QSPI_NOR_XIP if (rc == 0) { /* Enable XIP mode for QSPI NOR flash, this will prevent the * flash from being powered down */ z_impl_nrf_qspi_nor_xip_enable(dev, true); } #endif return rc; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) /* instance 0 page count */ #define LAYOUT_PAGES_COUNT (INST_0_BYTES / \ CONFIG_NORDIC_QSPI_NOR_FLASH_LAYOUT_PAGE_SIZE) BUILD_ASSERT((CONFIG_NORDIC_QSPI_NOR_FLASH_LAYOUT_PAGE_SIZE * LAYOUT_PAGES_COUNT) == INST_0_BYTES, "QSPI_NOR_FLASH_LAYOUT_PAGE_SIZE incompatible with flash size"); static const struct flash_pages_layout dev_layout = { .pages_count = LAYOUT_PAGES_COUNT, .pages_size = CONFIG_NORDIC_QSPI_NOR_FLASH_LAYOUT_PAGE_SIZE, }; #undef LAYOUT_PAGES_COUNT static void qspi_nor_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { *layout = &dev_layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static const struct flash_parameters * qspi_flash_get_parameters(const struct device *dev) { ARG_UNUSED(dev); static const struct flash_parameters qspi_flash_parameters = { .write_block_size = 4, .erase_value = 0xff, }; return &qspi_flash_parameters; } static const struct flash_driver_api qspi_nor_api = { .read = qspi_nor_read, .write = qspi_nor_write, .erase = qspi_nor_erase, .get_parameters = qspi_flash_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = qspi_nor_pages_layout, #endif #if defined(CONFIG_FLASH_JESD216_API) .sfdp_read = qspi_sfdp_read, .read_jedec_id = qspi_read_jedec_id, #endif /* CONFIG_FLASH_JESD216_API */ }; #ifdef CONFIG_PM_DEVICE static int enter_dpd(const struct device *const dev) { if (IS_ENABLED(DT_INST_PROP(0, has_dpd))) { struct qspi_cmd cmd = { .op_code = SPI_NOR_CMD_DPD, }; uint32_t t_enter_dpd = DT_INST_PROP_OR(0, t_enter_dpd, 0); int rc; rc = qspi_send_cmd(dev, &cmd, false); if (rc < 0) { return rc; } if (t_enter_dpd) { uint32_t t_enter_dpd_us = DIV_ROUND_UP(t_enter_dpd, NSEC_PER_USEC); k_busy_wait(t_enter_dpd_us); } } return 0; } #endif /* CONFIG_PM_DEVICE */ static int exit_dpd(const struct device *const dev) { if (IS_ENABLED(DT_INST_PROP(0, has_dpd))) { nrf_qspi_pins_t pins; nrf_qspi_pins_t disconnected_pins = { .sck_pin = NRF_QSPI_PIN_NOT_CONNECTED, .csn_pin = NRF_QSPI_PIN_NOT_CONNECTED, .io0_pin = NRF_QSPI_PIN_NOT_CONNECTED, .io1_pin = NRF_QSPI_PIN_NOT_CONNECTED, .io2_pin = NRF_QSPI_PIN_NOT_CONNECTED, .io3_pin = NRF_QSPI_PIN_NOT_CONNECTED, }; struct qspi_cmd cmd = { .op_code = SPI_NOR_CMD_RDPD, }; uint32_t t_exit_dpd = DT_INST_PROP_OR(0, t_exit_dpd, 0); nrfx_err_t res; int rc; nrf_qspi_pins_get(NRF_QSPI, &pins); nrf_qspi_pins_set(NRF_QSPI, &disconnected_pins); res = nrfx_qspi_activate(true); nrf_qspi_pins_set(NRF_QSPI, &pins); if (res != NRFX_SUCCESS) { return -EIO; } rc = qspi_send_cmd(dev, &cmd, false); if (rc < 0) { return rc; } if (t_exit_dpd) { uint32_t t_exit_dpd_us = DIV_ROUND_UP(t_exit_dpd, NSEC_PER_USEC); k_busy_wait(t_exit_dpd_us); } } return 0; } #ifdef CONFIG_PM_DEVICE static int qspi_suspend(const struct device *dev) { const struct qspi_nor_config *dev_config = dev->config; nrfx_err_t res; int rc; res = nrfx_qspi_mem_busy_check(); if (res != NRFX_SUCCESS) { return -EBUSY; } rc = enter_dpd(dev); if (rc < 0) { return rc; } nrfx_qspi_uninit(); return pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_SLEEP); } static int qspi_resume(const struct device *dev) { const struct qspi_nor_config *dev_config = dev->config; nrfx_err_t res; int rc; rc = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_DEFAULT); if (rc < 0) { return rc; } res = nrfx_qspi_init(&dev_config->nrfx_cfg, qspi_handler, dev->data); if (res != NRFX_SUCCESS) { return -EIO; } return exit_dpd(dev); } static int qspi_nor_pm_action(const struct device *dev, enum pm_device_action action) { int rc; if (pm_device_is_busy(dev)) { return -EBUSY; } qspi_lock(dev); qspi_clock_div_change(); switch (action) { case PM_DEVICE_ACTION_SUSPEND: rc = qspi_suspend(dev); break; case PM_DEVICE_ACTION_RESUME: rc = qspi_resume(dev); break; default: rc = -ENOTSUP; } qspi_clock_div_restore(); qspi_unlock(dev); return rc; } #endif /* CONFIG_PM_DEVICE */ void z_impl_nrf_qspi_nor_xip_enable(const struct device *dev, bool enable) { struct qspi_nor_data *dev_data = dev->data; if (dev_data->xip_enabled == enable) { return; } qspi_acquire(dev); #if NRF_QSPI_HAS_XIPEN nrf_qspi_xip_set(NRF_QSPI, enable); #endif if (enable) { (void)nrfx_qspi_activate(false); } dev_data->xip_enabled = enable; qspi_release(dev); } #ifdef CONFIG_USERSPACE #include <zephyr/internal/syscall_handler.h> void z_vrfy_nrf_qspi_nor_xip_enable(const struct device *dev, bool enable) { K_OOPS(K_SYSCALL_SPECIFIC_DRIVER(dev, K_OBJ_DRIVER_FLASH, &qspi_nor_api)); z_impl_nrf_qspi_nor_xip_enable(dev, enable); } #include <zephyr/syscalls/nrf_qspi_nor_xip_enable_mrsh.c> #endif /* CONFIG_USERSPACE */ static struct qspi_nor_data qspi_nor_dev_data = { #ifdef CONFIG_MULTITHREADING .sem = Z_SEM_INITIALIZER(qspi_nor_dev_data.sem, 1, 1), .sync = Z_SEM_INITIALIZER(qspi_nor_dev_data.sync, 0, 1), #endif /* CONFIG_MULTITHREADING */ }; NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(QSPI_NODE); PINCTRL_DT_DEFINE(QSPI_NODE); static const struct qspi_nor_config qspi_nor_dev_config = { .nrfx_cfg.skip_gpio_cfg = true, .nrfx_cfg.skip_psel_cfg = true, .pcfg = PINCTRL_DT_DEV_CONFIG_GET(QSPI_NODE), .nrfx_cfg.prot_if = { .readoc = COND_CODE_1(DT_INST_NODE_HAS_PROP(0, readoc), (_CONCAT(NRF_QSPI_READOC_, DT_STRING_UPPER_TOKEN(DT_DRV_INST(0), readoc))), (NRF_QSPI_READOC_FASTREAD)), .writeoc = COND_CODE_1(DT_INST_NODE_HAS_PROP(0, writeoc), (_CONCAT(NRF_QSPI_WRITEOC_, DT_STRING_UPPER_TOKEN(DT_DRV_INST(0), writeoc))), (NRF_QSPI_WRITEOC_PP)), .addrmode = DT_INST_PROP(0, address_size_32) ? NRF_QSPI_ADDRMODE_32BIT : NRF_QSPI_ADDRMODE_24BIT, }, .nrfx_cfg.phy_if = { .sck_freq = INST_0_SCK_CFG, .sck_delay = DT_INST_PROP(0, sck_delay), .spi_mode = INST_0_SPI_MODE, }, .nrfx_cfg.timeout = CONFIG_NORDIC_QSPI_NOR_TIMEOUT_MS, .size = INST_0_BYTES, .id = DT_INST_PROP(0, jedec_id), }; PM_DEVICE_DT_INST_DEFINE(0, qspi_nor_pm_action); DEVICE_DT_INST_DEFINE(0, qspi_nor_init, PM_DEVICE_DT_INST_GET(0), &qspi_nor_dev_data, &qspi_nor_dev_config, POST_KERNEL, CONFIG_NORDIC_QSPI_NOR_INIT_PRIORITY, &qspi_nor_api); ```
/content/code_sandbox/drivers/flash/nrf_qspi_nor.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
10,929
```c /* * */ #define DT_DRV_COMPAT nuvoton_numaker_fmc #include <string.h> #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/flash.h> #include <zephyr/logging/log.h> #include "flash_priv.h" #include <NuMicro.h> LOG_MODULE_REGISTER(flash_numaker, CONFIG_FLASH_LOG_LEVEL); #define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash) #define SOC_NV_FLASH_WRITE_BLOCK_SIZE DT_PROP_OR(SOC_NV_FLASH_NODE, write_block_size, 0x04) struct flash_numaker_data { FMC_T *fmc; struct k_sem write_lock; uint32_t flash_block_base; }; static const struct flash_parameters flash_numaker_parameters = { .write_block_size = SOC_NV_FLASH_WRITE_BLOCK_SIZE, .erase_value = 0xff, }; /* Validate offset and length */ static bool flash_numaker_is_range_valid(off_t offset, size_t len) { uint32_t aprom_size = (FMC_APROM_END - FMC_APROM_BASE); /* check for min value */ if ((offset < 0) || (len == 0)) { return false; } /* check for max value */ if (offset >= aprom_size || len > aprom_size || (aprom_size - offset) < len) { return false; } return true; } /* * Erase a flash memory area. * * param dev Device struct * param offset The address's offset * param len The size of the buffer * return 0 on success * return -EINVAL erroneous code */ static int flash_numaker_erase(const struct device *dev, off_t offset, size_t len) { struct flash_numaker_data *dev_data = dev->data; uint32_t rc = 0; unsigned int key; int page_nums = (len / FMC_FLASH_PAGE_SIZE); uint32_t addr = dev_data->flash_block_base + offset; /* return SUCCESS for len == 0 (required by tests/drivers/flash) */ if (!len) { return 0; } /* Validate range */ if (!flash_numaker_is_range_valid(offset, len)) { return -EINVAL; } /* check alignment and erase only by pages */ if (((addr % FMC_FLASH_PAGE_SIZE) != 0) || ((len % FMC_FLASH_PAGE_SIZE) != 0)) { return -EINVAL; } /* take semaphore */ if (k_sem_take(&dev_data->write_lock, K_NO_WAIT)) { return -EACCES; } SYS_UnlockReg(); key = irq_lock(); while (page_nums) { if (((len >= FMC_BANK_SIZE)) && ((addr % FMC_BANK_SIZE) == 0)) { if (FMC_Erase_Bank(addr)) { LOG_ERR("Erase flash bank failed or erase time-out"); rc = -EIO; goto done; } page_nums -= (FMC_BANK_SIZE / FMC_FLASH_PAGE_SIZE); addr += FMC_BANK_SIZE; } else { /* erase page */ if (FMC_Erase(addr)) { LOG_ERR("Erase flash page failed or erase time-out"); rc = -EIO; goto done; } page_nums--; addr += FMC_FLASH_PAGE_SIZE; } } done: SYS_LockReg(); irq_unlock(key); /* release semaphore */ k_sem_give(&dev_data->write_lock); return rc; } /* * Read a flash memory area. * * param dev Device struct * param offset The address's offset * param data The buffer to store or read the value * param length The size of the buffer * return 0 on success, * return -EIO erroneous code */ static int flash_numaker_read(const struct device *dev, off_t offset, void *data, size_t len) { struct flash_numaker_data *dev_data = dev->data; uint32_t addr = dev_data->flash_block_base + offset; /* return SUCCESS for len == 0 (required by tests/drivers/flash) */ if (!len) { return 0; } /* Validate range */ if (!flash_numaker_is_range_valid(offset, len)) { return -EINVAL; } /* read flash */ memcpy(data, (void *)addr, len); return 0; } static int32_t flash_numaker_block_write(uint32_t u32_addr, uint8_t *pu8_data, int block_size) { int32_t retval; uint32_t *pu32_data = (uint32_t *)pu8_data; SYS_UnlockReg(); if (block_size == 4) { retval = FMC_Write(u32_addr, *pu32_data); } else if (block_size == 8) { retval = FMC_Write8Bytes(u32_addr, *pu32_data, *(pu32_data + 1)); } else { retval = -1; } SYS_LockReg(); return retval; } static int flash_numaker_write(const struct device *dev, off_t offset, const void *data, size_t len) { struct flash_numaker_data *dev_data = dev->data; uint32_t rc = 0; unsigned int key; uint32_t addr = dev_data->flash_block_base + offset; int block_size = flash_numaker_parameters.write_block_size; int blocks = (len / flash_numaker_parameters.write_block_size); uint8_t *pu8_data = (uint8_t *)data; /* return SUCCESS for len == 0 (required by tests/drivers/flash) */ if (!len) { return 0; } /* Validate range */ if (!flash_numaker_is_range_valid(offset, len)) { return -EINVAL; } /* Validate address alignment */ if ((addr % flash_numaker_parameters.write_block_size) != 0) { return -EINVAL; } /* Validate write size be multiples of the write block size */ if ((len % block_size) != 0) { return -EINVAL; } /* Validate offset be multiples of the write block size */ if ((offset % block_size) != 0) { return -EINVAL; } if (k_sem_take(&dev_data->write_lock, K_FOREVER)) { return -EACCES; } key = irq_lock(); while (blocks) { if (flash_numaker_block_write(addr, pu8_data, block_size)) { rc = -EIO; goto done; } pu8_data += block_size; addr += block_size; blocks--; } done: irq_unlock(key); k_sem_give(&dev_data->write_lock); return rc; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static const struct flash_pages_layout dev_layout = { .pages_count = DT_REG_SIZE(SOC_NV_FLASH_NODE) / DT_PROP(SOC_NV_FLASH_NODE, erase_block_size), .pages_size = DT_PROP(SOC_NV_FLASH_NODE, erase_block_size), }; static void flash_numaker_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { *layout = &dev_layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static const struct flash_parameters *flash_numaker_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_numaker_parameters; } static struct flash_numaker_data flash_data; static const struct flash_driver_api flash_numaker_api = { .erase = flash_numaker_erase, .write = flash_numaker_write, .read = flash_numaker_read, .get_parameters = flash_numaker_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_numaker_pages_layout, #endif }; static int flash_numaker_init(const struct device *dev) { struct flash_numaker_data *dev_data = dev->data; k_sem_init(&dev_data->write_lock, 1, 1); /* Enable FMC ISP function */ SYS_UnlockReg(); FMC_Open(); /* Enable APROM update. */ FMC_ENABLE_AP_UPDATE(); SYS_LockReg(); dev_data->flash_block_base = (uint32_t)FMC_APROM_BASE; dev_data->fmc = (FMC_T *)DT_REG_ADDR(DT_NODELABEL(fmc)); return 0; } DEVICE_DT_INST_DEFINE(0, flash_numaker_init, NULL, &flash_data, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_numaker_api); ```
/content/code_sandbox/drivers/flash/soc_flash_numaker.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,838
```c /* * */ #define DT_DRV_COMPAT atmel_sam0_nvmctrl #define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash) #define LOG_LEVEL CONFIG_FLASH_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(flash_sam0); #include <zephyr/device.h> #include <zephyr/drivers/flash.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <soc.h> #include <string.h> #define FLASH_WRITE_BLK_SZ DT_PROP(SOC_NV_FLASH_NODE, write_block_size) BUILD_ASSERT((FLASH_WRITE_BLK_SZ % sizeof(uint32_t)) == 0, "unsupported write-block-size"); /* * Zephyr and the SAM0 series use different and conflicting names for * the erasable units and programmable units: * * The erase unit is a row, which is a 'page' in Zephyr terms. * The program unit is a page, which is a 'write_block' in Zephyr. * * This file uses the SAM0 names internally and the Zephyr names in * any error messages. */ /* * Number of lock regions. The number is fixed and the region size * grows with the flash size. */ #define LOCK_REGIONS DT_INST_PROP(0, lock_regions) #define LOCK_REGION_SIZE (FLASH_SIZE / LOCK_REGIONS) #if defined(NVMCTRL_BLOCK_SIZE) #define ROW_SIZE NVMCTRL_BLOCK_SIZE #elif defined(NVMCTRL_ROW_SIZE) #define ROW_SIZE NVMCTRL_ROW_SIZE #endif #define PAGES_PER_ROW (ROW_SIZE / FLASH_PAGE_SIZE) #define FLASH_MEM(_a) ((uint32_t *)((uint8_t *)((_a) + CONFIG_FLASH_BASE_ADDRESS))) struct flash_sam0_data { #if CONFIG_SOC_FLASH_SAM0_EMULATE_BYTE_PAGES /* NOTE: this buffer can be large, avoid placing it on the stack... */ uint8_t buf[ROW_SIZE]; #endif #if defined(CONFIG_MULTITHREADING) struct k_sem sem; #endif }; #if CONFIG_FLASH_PAGE_LAYOUT static const struct flash_pages_layout flash_sam0_pages_layout = { .pages_count = CONFIG_FLASH_SIZE * 1024 / ROW_SIZE, .pages_size = ROW_SIZE, }; #endif static const struct flash_parameters flash_sam0_parameters = { #if CONFIG_SOC_FLASH_SAM0_EMULATE_BYTE_PAGES .write_block_size = 1, #else .write_block_size = FLASH_WRITE_BLK_SZ, #endif .erase_value = 0xff, }; static int flash_sam0_write_protection(const struct device *dev, bool enable); static inline void flash_sam0_sem_take(const struct device *dev) { #if defined(CONFIG_MULTITHREADING) struct flash_sam0_data *ctx = dev->data; k_sem_take(&ctx->sem, K_FOREVER); #endif } static inline void flash_sam0_sem_give(const struct device *dev) { #if defined(CONFIG_MULTITHREADING) struct flash_sam0_data *ctx = dev->data; k_sem_give(&ctx->sem); #endif } static int flash_sam0_valid_range(off_t offset, size_t len) { if (offset < 0) { LOG_WRN("0x%lx: before start of flash", (long)offset); return -EINVAL; } if ((offset + len) > CONFIG_FLASH_SIZE * 1024) { LOG_WRN("0x%lx: ends past the end of flash", (long)offset); return -EINVAL; } return 0; } static void flash_sam0_wait_ready(void) { #ifdef NVMCTRL_STATUS_READY while (NVMCTRL->STATUS.bit.READY == 0) { } #else while (NVMCTRL->INTFLAG.bit.READY == 0) { } #endif } static int flash_sam0_check_status(off_t offset) { flash_sam0_wait_ready(); #ifdef NVMCTRL_INTFLAG_PROGE NVMCTRL_INTFLAG_Type status = NVMCTRL->INTFLAG; /* Clear any flags */ NVMCTRL->INTFLAG.reg = status.reg; #else NVMCTRL_STATUS_Type status = NVMCTRL->STATUS; /* Clear any flags */ NVMCTRL->STATUS = status; #endif if (status.bit.PROGE) { LOG_ERR("programming error at 0x%lx", (long)offset); return -EIO; } else if (status.bit.LOCKE) { LOG_ERR("lock error at 0x%lx", (long)offset); return -EROFS; } else if (status.bit.NVME) { LOG_ERR("NVM error at 0x%lx", (long)offset); return -EIO; } return 0; } /* * Data to be written to the NVM block are first written to and stored * in an internal buffer called the page buffer. The page buffer contains * the same number of bytes as an NVM page. Writes to the page buffer must * be 16 or 32 bits. 8-bit writes to the page buffer are not allowed and * will cause a system exception */ static int flash_sam0_write_page(const struct device *dev, off_t offset, const void *data, size_t len) { const uint32_t *src = data; const uint32_t *end = src + (len / sizeof(*src)); uint32_t *dst = FLASH_MEM(offset); int err; #ifdef NVMCTRL_CTRLA_CMD_PBC NVMCTRL->CTRLA.reg = NVMCTRL_CTRLA_CMD_PBC | NVMCTRL_CTRLA_CMDEX_KEY; #else NVMCTRL->CTRLB.reg = NVMCTRL_CTRLB_CMD_PBC | NVMCTRL_CTRLB_CMDEX_KEY; #endif flash_sam0_wait_ready(); /* Ensure writes happen 32 bits at a time. */ for (; src != end; src++, dst++) { *dst = UNALIGNED_GET((uint32_t *)src); } #ifdef NVMCTRL_CTRLA_CMD_WP NVMCTRL->CTRLA.reg = NVMCTRL_CTRLA_CMD_WP | NVMCTRL_CTRLA_CMDEX_KEY; #else NVMCTRL->CTRLB.reg = NVMCTRL_CTRLB_CMD_WP | NVMCTRL_CTRLB_CMDEX_KEY; #endif err = flash_sam0_check_status(offset); if (err != 0) { return err; } if (memcmp(data, FLASH_MEM(offset), len) != 0) { LOG_ERR("verify error at offset 0x%lx", (long)offset); return -EIO; } return 0; } static int flash_sam0_erase_row(const struct device *dev, off_t offset) { *FLASH_MEM(offset) = 0U; #ifdef NVMCTRL_CTRLA_CMD_ER NVMCTRL->CTRLA.reg = NVMCTRL_CTRLA_CMD_ER | NVMCTRL_CTRLA_CMDEX_KEY; #else NVMCTRL->CTRLB.reg = NVMCTRL_CTRLB_CMD_EB | NVMCTRL_CTRLB_CMDEX_KEY; #endif return flash_sam0_check_status(offset); } #if CONFIG_SOC_FLASH_SAM0_EMULATE_BYTE_PAGES static int flash_sam0_commit(const struct device *dev, off_t base) { struct flash_sam0_data *ctx = dev->data; int err; int page; err = flash_sam0_erase_row(dev, base); if (err != 0) { return err; } for (page = 0; page < PAGES_PER_ROW; page++) { err = flash_sam0_write_page( dev, base + page * FLASH_PAGE_SIZE, &ctx->buf[page * FLASH_PAGE_SIZE], ROW_SIZE); if (err != 0) { return err; } } return 0; } static int flash_sam0_write(const struct device *dev, off_t offset, const void *data, size_t len) { struct flash_sam0_data *ctx = dev->data; const uint8_t *pdata = data; int err; LOG_DBG("0x%lx: len %zu", (long)offset, len); err = flash_sam0_valid_range(offset, len); if (err != 0) { return err; } if (len == 0) { return 0; } flash_sam0_sem_take(dev); err = flash_sam0_write_protection(dev, false); size_t pos = 0; while ((err == 0) && (pos < len)) { off_t start = offset % sizeof(ctx->buf); off_t base = offset - start; size_t len_step = sizeof(ctx->buf) - start; size_t len_copy = MIN(len - pos, len_step); if (len_copy < sizeof(ctx->buf)) { memcpy(ctx->buf, (void *)base, sizeof(ctx->buf)); } memcpy(&(ctx->buf[start]), &(pdata[pos]), len_copy); err = flash_sam0_commit(dev, base); offset += len_step; pos += len_copy; } int err2 = flash_sam0_write_protection(dev, true); if (!err) { err = err2; } flash_sam0_sem_give(dev); return err; } #else /* CONFIG_SOC_FLASH_SAM0_EMULATE_BYTE_PAGES */ static int flash_sam0_write(const struct device *dev, off_t offset, const void *data, size_t len) { const uint8_t *pdata = data; int err; err = flash_sam0_valid_range(offset, len); if (err != 0) { return err; } if ((offset % FLASH_WRITE_BLK_SZ) != 0) { LOG_WRN("0x%lx: not on a write block boundary", (long)offset); return -EINVAL; } if ((len % FLASH_WRITE_BLK_SZ) != 0) { LOG_WRN("%zu: not a integer number of write blocks", len); return -EINVAL; } flash_sam0_sem_take(dev); err = flash_sam0_write_protection(dev, false); if (err == 0) { /* Maximum size without crossing a page */ size_t eop_len = FLASH_PAGE_SIZE - (offset % FLASH_PAGE_SIZE); size_t write_len = MIN(len, eop_len); while (len > 0) { err = flash_sam0_write_page(dev, offset, pdata, write_len); if (err != 0) { break; } offset += write_len; pdata += write_len; len -= write_len; write_len = MIN(len, FLASH_PAGE_SIZE); } } int err2 = flash_sam0_write_protection(dev, true); if (!err) { err = err2; } flash_sam0_sem_give(dev); return err; } #endif static int flash_sam0_read(const struct device *dev, off_t offset, void *data, size_t len) { int err; err = flash_sam0_valid_range(offset, len); if (err != 0) { return err; } memcpy(data, (uint8_t *)CONFIG_FLASH_BASE_ADDRESS + offset, len); return 0; } static int flash_sam0_erase(const struct device *dev, off_t offset, size_t size) { int err; err = flash_sam0_valid_range(offset, ROW_SIZE); if (err != 0) { return err; } if ((offset % ROW_SIZE) != 0) { LOG_WRN("0x%lx: not on a page boundary", (long)offset); return -EINVAL; } if ((size % ROW_SIZE) != 0) { LOG_WRN("%zu: not a integer number of pages", size); return -EINVAL; } flash_sam0_sem_take(dev); err = flash_sam0_write_protection(dev, false); if (err == 0) { for (size_t addr = offset; addr < offset + size; addr += ROW_SIZE) { err = flash_sam0_erase_row(dev, addr); if (err != 0) { break; } } } int err2 = flash_sam0_write_protection(dev, true); if (!err) { err = err2; } flash_sam0_sem_give(dev); return err; } static int flash_sam0_write_protection(const struct device *dev, bool enable) { off_t offset; int err; for (offset = 0; offset < CONFIG_FLASH_SIZE * 1024; offset += LOCK_REGION_SIZE) { NVMCTRL->ADDR.reg = offset + CONFIG_FLASH_BASE_ADDRESS; #ifdef NVMCTRL_CTRLA_CMD_LR if (enable) { NVMCTRL->CTRLA.reg = NVMCTRL_CTRLA_CMD_LR | NVMCTRL_CTRLA_CMDEX_KEY; } else { NVMCTRL->CTRLA.reg = NVMCTRL_CTRLA_CMD_UR | NVMCTRL_CTRLA_CMDEX_KEY; } #else if (enable) { NVMCTRL->CTRLB.reg = NVMCTRL_CTRLB_CMD_LR | NVMCTRL_CTRLB_CMDEX_KEY; } else { NVMCTRL->CTRLB.reg = NVMCTRL_CTRLB_CMD_UR | NVMCTRL_CTRLB_CMDEX_KEY; } #endif err = flash_sam0_check_status(offset); if (err != 0) { goto done; } } done: return err; } #if CONFIG_FLASH_PAGE_LAYOUT void flash_sam0_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { *layout = &flash_sam0_pages_layout; *layout_size = 1; } #endif static const struct flash_parameters * flash_sam0_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_sam0_parameters; } static int flash_sam0_init(const struct device *dev) { #if defined(CONFIG_MULTITHREADING) struct flash_sam0_data *ctx = dev->data; k_sem_init(&ctx->sem, 1, 1); #endif #ifdef PM_APBBMASK_NVMCTRL /* Ensure the clock is on. */ PM->APBBMASK.bit.NVMCTRL_ = 1; #else MCLK->APBBMASK.reg |= MCLK_APBBMASK_NVMCTRL; #endif #ifdef NVMCTRL_CTRLB_MANW /* Require an explicit write command */ NVMCTRL->CTRLB.bit.MANW = 1; #elif NVMCTRL_CTRLA_WMODE /* Set manual write mode */ NVMCTRL->CTRLA.bit.WMODE = NVMCTRL_CTRLA_WMODE_MAN_Val; #endif return flash_sam0_write_protection(dev, false); } static const struct flash_driver_api flash_sam0_api = { .erase = flash_sam0_erase, .write = flash_sam0_write, .read = flash_sam0_read, .get_parameters = flash_sam0_get_parameters, #ifdef CONFIG_FLASH_PAGE_LAYOUT .page_layout = flash_sam0_page_layout, #endif }; static struct flash_sam0_data flash_sam0_data_0; DEVICE_DT_INST_DEFINE(0, flash_sam0_init, NULL, &flash_sam0_data_0, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_sam0_api); ```
/content/code_sandbox/drivers/flash/flash_sam0.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,321
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/devicetree.h> #include <zephyr/shell/shell.h> #include <zephyr/sys/util.h> #include <stdlib.h> #include <string.h> #include <zephyr/drivers/flash.h> /* Buffer is only needed for bytes that follow command and offset */ #define BUF_ARRAY_CNT (CONFIG_SHELL_ARGC_MAX - 2) /* This only issues compilation error when it would not be possible * to extract at least one byte from command line arguments, yet * it does not warrant successful writes if BUF_ARRAY_CNT * is smaller than flash write alignment. */ BUILD_ASSERT(BUF_ARRAY_CNT >= 1); static const struct device *const zephyr_flash_controller = DEVICE_DT_GET_OR_NULL(DT_CHOSEN(zephyr_flash_controller)); static uint8_t __aligned(4) test_arr[CONFIG_FLASH_SHELL_BUFFER_SIZE]; static int parse_helper(const struct shell *sh, size_t *argc, char **argv[], const struct device * *flash_dev, uint32_t *addr) { char *endptr; *addr = strtoul((*argv)[1], &endptr, 16); if (*endptr != '\0') { /* flash controller from user input */ *flash_dev = device_get_binding((*argv)[1]); if (!*flash_dev) { shell_error(sh, "Given flash device was not found"); return -ENODEV; } } else if (zephyr_flash_controller != NULL) { /* default to zephyr,flash-controller */ if (!device_is_ready(zephyr_flash_controller)) { shell_error(sh, "Default flash driver not ready"); return -ENODEV; } *flash_dev = zephyr_flash_controller; } else { /* no flash controller given, no default available */ shell_error(sh, "No flash device specified (required)"); return -ENODEV; } if (*endptr == '\0') { return 0; } if (*argc < 3) { shell_error(sh, "Missing address."); return -EINVAL; } *addr = strtoul((*argv)[2], &endptr, 16); (*argc)--; (*argv)++; return 0; } static int cmd_erase(const struct shell *sh, size_t argc, char *argv[]) { int result = -ENOTSUP; #if defined(CONFIG_FLASH_HAS_EXPLICIT_ERASE) const struct device *flash_dev; uint32_t page_addr; uint32_t size; result = parse_helper(sh, &argc, &argv, &flash_dev, &page_addr); if (result) { return result; } if (argc > 2) { size = strtoul(argv[2], NULL, 16); } else { struct flash_pages_info info; result = flash_get_page_info_by_offs(flash_dev, page_addr, &info); if (result != 0) { shell_error(sh, "Could not determine page size, " "code %d.", result); return -EINVAL; } size = info.size; } result = flash_erase(flash_dev, page_addr, size); if (result) { shell_error(sh, "Erase Failed, code %d.", result); } else { shell_print(sh, "Erase success."); } #endif return result; } static int cmd_write(const struct shell *sh, size_t argc, char *argv[]) { uint32_t __aligned(4) check_array[BUF_ARRAY_CNT]; uint32_t __aligned(4) buf_array[BUF_ARRAY_CNT]; const struct device *flash_dev; uint32_t w_addr; int ret; size_t op_size; ret = parse_helper(sh, &argc, &argv, &flash_dev, &w_addr); if (ret) { return ret; } if (argc <= 2) { shell_error(sh, "Missing data to be written."); return -EINVAL; } op_size = 0; for (int i = 2; i < argc; i++) { int j = i - 2; buf_array[j] = strtoul(argv[i], NULL, 16); check_array[j] = ~buf_array[j]; op_size += sizeof(buf_array[0]); } if (flash_write(flash_dev, w_addr, buf_array, op_size) != 0) { shell_error(sh, "Write internal ERROR!"); return -EIO; } shell_print(sh, "Write OK."); if (flash_read(flash_dev, w_addr, check_array, op_size) < 0) { shell_print(sh, "Verification read ERROR!"); return -EIO; } if (memcmp(buf_array, check_array, op_size) == 0) { shell_print(sh, "Verified."); } else { shell_error(sh, "Verification ERROR!"); return -EIO; } return 0; } static int cmd_read(const struct shell *sh, size_t argc, char *argv[]) { const struct device *flash_dev; uint32_t addr; int todo; int upto; int cnt; int ret; ret = parse_helper(sh, &argc, &argv, &flash_dev, &addr); if (ret) { return ret; } if (argc > 2) { cnt = strtoul(argv[2], NULL, 16); } else { cnt = 1; } for (upto = 0; upto < cnt; upto += todo) { uint8_t data[SHELL_HEXDUMP_BYTES_IN_LINE]; todo = MIN(cnt - upto, SHELL_HEXDUMP_BYTES_IN_LINE); ret = flash_read(flash_dev, addr, data, todo); if (ret != 0) { shell_error(sh, "Read ERROR!"); return -EIO; } shell_hexdump_line(sh, addr, data, todo); addr += todo; } shell_print(sh, ""); return 0; } static int cmd_test(const struct shell *sh, size_t argc, char *argv[]) { const struct device *flash_dev; uint32_t repeat; int result; uint32_t addr; uint32_t size; static uint8_t __aligned(4) check_arr[CONFIG_FLASH_SHELL_BUFFER_SIZE]; result = parse_helper(sh, &argc, &argv, &flash_dev, &addr); if (result) { return result; } size = strtoul(argv[2], NULL, 16); repeat = strtoul(argv[3], NULL, 16); if (size > CONFIG_FLASH_SHELL_BUFFER_SIZE) { shell_error(sh, "<size> must be at most 0x%x.", CONFIG_FLASH_SHELL_BUFFER_SIZE); return -EINVAL; } if (repeat == 0) { repeat = 1; } for (uint32_t i = 0; i < size; i++) { test_arr[i] = (uint8_t)i; } result = 0; while (repeat--) { result = flash_erase(flash_dev, addr, size); if (result) { shell_error(sh, "Erase Failed, code %d.", result); break; } shell_print(sh, "Erase OK."); result = flash_write(flash_dev, addr, test_arr, size); if (result) { shell_error(sh, "Write failed, code %d", result); break; } shell_print(sh, "Write OK."); result = flash_read(flash_dev, addr, check_arr, size); if (result < 0) { shell_print(sh, "Verification read failed, code: %d", result); break; } if (memcmp(test_arr, check_arr, size) != 0) { shell_error(sh, "Verification ERROR!"); break; } shell_print(sh, "Verified OK."); } if (result == 0) { shell_print(sh, "Erase-Write-Verify test done."); } return result; } #ifdef CONFIG_FLASH_SHELL_TEST_COMMANDS const static uint8_t speed_types[][4] = { "B", "KiB", "MiB", "GiB" }; const static uint32_t speed_divisor = 1024; static int read_write_erase_validate(const struct shell *sh, size_t argc, char *argv[], uint32_t *size, uint32_t *repeat) { if (argc < 4) { shell_error(sh, "Missing parameters: <device> <offset> <size> <repeat>"); return -EINVAL; } *size = strtoul(argv[2], NULL, 0); *repeat = strtoul(argv[3], NULL, 0); if (*size == 0 || *size > CONFIG_FLASH_SHELL_BUFFER_SIZE) { shell_error(sh, "<size> must be between 0x1 and 0x%x.", CONFIG_FLASH_SHELL_BUFFER_SIZE); return -EINVAL; } if (*repeat == 0 || *repeat > 10) { shell_error(sh, "<repeat> must be between 1 and 10."); return -EINVAL; } return 0; } static void speed_output(const struct shell *sh, uint64_t total_time, double loops, double size) { double time_per_loop = (double)total_time / loops; double throughput = size; uint8_t speed_index = 0; if (time_per_loop > 0) { throughput /= (time_per_loop / 1000.0); } while (throughput >= (double)speed_divisor && speed_index < ARRAY_SIZE(speed_types)) { throughput /= (double)speed_divisor; ++speed_index; } shell_print(sh, "Total: %llums, Per loop: ~%.0fms, Speed: ~%.1f%sps", total_time, time_per_loop, throughput, speed_types[speed_index]); } static int cmd_read_test(const struct shell *sh, size_t argc, char *argv[]) { const struct device *flash_dev; uint32_t repeat; int result; uint32_t addr; uint32_t size; uint64_t start_time; uint64_t loop_time; uint64_t total_time = 0; uint32_t loops = 0; result = parse_helper(sh, &argc, &argv, &flash_dev, &addr); if (result) { return result; } result = read_write_erase_validate(sh, argc, argv, &size, &repeat); if (result) { return result; } while (repeat--) { start_time = k_uptime_get(); result = flash_read(flash_dev, addr, test_arr, size); loop_time = k_uptime_delta(&start_time); if (result) { shell_error(sh, "Read failed: %d", result); break; } ++loops; total_time += loop_time; shell_print(sh, "Loop #%u done in %llums.", loops, loop_time); } if (result == 0) { speed_output(sh, total_time, (double)loops, (double)size); } return result; } static int cmd_write_test(const struct shell *sh, size_t argc, char *argv[]) { const struct device *flash_dev; uint32_t repeat; int result; uint32_t addr; uint32_t size; uint64_t start_time; uint64_t loop_time; uint64_t total_time = 0; uint32_t loops = 0; result = parse_helper(sh, &argc, &argv, &flash_dev, &addr); if (result) { return result; } result = read_write_erase_validate(sh, argc, argv, &size, &repeat); if (result) { return result; } for (uint32_t i = 0; i < size; i++) { test_arr[i] = (uint8_t)i; } while (repeat--) { start_time = k_uptime_get(); result = flash_write(flash_dev, addr, test_arr, size); loop_time = k_uptime_delta(&start_time); if (result) { shell_error(sh, "Write failed: %d", result); break; } ++loops; total_time += loop_time; shell_print(sh, "Loop #%u done in %llu ticks.", loops, loop_time); } if (result == 0) { speed_output(sh, total_time, (double)loops, (double)size); } return result; } static int cmd_erase_test(const struct shell *sh, size_t argc, char *argv[]) { const struct device *flash_dev; uint32_t repeat; int result; uint32_t addr; uint32_t size; uint64_t start_time; uint64_t loop_time; uint64_t total_time = 0; uint32_t loops = 0; result = parse_helper(sh, &argc, &argv, &flash_dev, &addr); if (result) { return result; } result = read_write_erase_validate(sh, argc, argv, &size, &repeat); if (result) { return result; } for (uint32_t i = 0; i < size; i++) { test_arr[i] = (uint8_t)i; } while (repeat--) { start_time = k_uptime_get(); result = flash_erase(flash_dev, addr, size); loop_time = k_uptime_delta(&start_time); if (result) { shell_error(sh, "Erase failed: %d", result); break; } ++loops; total_time += loop_time; shell_print(sh, "Loop #%u done in %llums.", loops, loop_time); } if (result == 0) { speed_output(sh, total_time, (double)loops, (double)size); } return result; } static int cmd_erase_write_test(const struct shell *sh, size_t argc, char *argv[]) { const struct device *flash_dev; uint32_t repeat; int result_erase = 0; int result_write = 0; uint32_t addr; uint32_t size; uint64_t start_time; uint64_t loop_time; uint64_t total_time = 0; uint32_t loops = 0; result_erase = parse_helper(sh, &argc, &argv, &flash_dev, &addr); if (result_erase) { return result_erase; } result_erase = read_write_erase_validate(sh, argc, argv, &size, &repeat); if (result_erase) { return result_erase; } for (uint32_t i = 0; i < size; i++) { test_arr[i] = (uint8_t)i; } while (repeat--) { start_time = k_uptime_get(); result_erase = flash_erase(flash_dev, addr, size); result_write = flash_write(flash_dev, addr, test_arr, size); loop_time = k_uptime_delta(&start_time); if (result_erase) { shell_error(sh, "Erase failed: %d", result_erase); break; } if (result_write) { shell_error(sh, "Write failed: %d", result_write); break; } ++loops; total_time += loop_time; shell_print(sh, "Loop #%u done in %llums.", loops, loop_time); } if (result_erase == 0 && result_write == 0) { speed_output(sh, total_time, (double)loops, (double)size); } return (result_erase != 0 ? result_erase : result_write); } #endif static int set_bypass(const struct shell *sh, shell_bypass_cb_t bypass) { static bool in_use; if (bypass && in_use) { shell_error(sh, "flash load supports setting bypass on a single instance."); return -EBUSY; } /* Mark that we have set or unset the bypass function */ in_use = bypass != NULL; if (in_use) { shell_print(sh, "Loading..."); } shell_set_bypass(sh, bypass); return 0; } #define FLASH_LOAD_BUF_MAX 256 static const struct device *flash_load_dev; static uint32_t flash_load_buf_size; static uint32_t flash_load_addr; static uint32_t flash_load_total; static uint32_t flash_load_written; static uint32_t flash_load_chunk; static uint32_t flash_load_boff; static uint8_t flash_load_buf[FLASH_LOAD_BUF_MAX]; static void bypass_cb(const struct shell *sh, uint8_t *recv, size_t len) { uint32_t left_to_read = flash_load_total - flash_load_written - flash_load_boff; uint32_t to_copy = MIN(len, left_to_read); uint32_t copied = 0; while (copied < to_copy) { uint32_t buf_copy = MIN(to_copy, flash_load_buf_size - flash_load_boff); memcpy(flash_load_buf + flash_load_boff, recv + copied, buf_copy); flash_load_boff += buf_copy; copied += buf_copy; /* Buffer is full. Write data to memory. */ if (flash_load_boff == flash_load_buf_size) { uint32_t addr = flash_load_addr + flash_load_written; int rc = flash_write(flash_load_dev, addr, flash_load_buf, flash_load_buf_size); if (rc != 0) { shell_error(sh, "Write to addr %x on dev %p ERROR!", addr, flash_load_dev); } shell_print(sh, "Written chunk %d", flash_load_chunk); flash_load_written += flash_load_buf_size; flash_load_chunk++; flash_load_boff = 0; } } /* When data is not aligned to flash_load_buf_size there may be partial write * at the end. */ if (flash_load_written < flash_load_total && flash_load_written + flash_load_boff >= flash_load_total) { uint32_t addr = flash_load_addr + flash_load_written; int rc = flash_write(flash_load_dev, addr, flash_load_buf, flash_load_boff); if (rc != 0) { set_bypass(sh, NULL); shell_error(sh, "Write to addr %x on dev %p ERROR!", addr, flash_load_dev); return; } shell_print(sh, "Written chunk %d", flash_load_chunk); flash_load_written += flash_load_boff; flash_load_chunk++; } if (flash_load_written >= flash_load_total) { set_bypass(sh, NULL); shell_print(sh, "Read all"); } } static int cmd_load(const struct shell *sh, size_t argc, char *argv[]) { const struct device *flash_dev; int result; uint32_t addr; uint32_t size; ssize_t write_block_size; result = parse_helper(sh, &argc, &argv, &flash_dev, &addr); if (result) { return result; } size = strtoul(argv[2], NULL, 0); write_block_size = flash_get_write_block_size(flash_dev); /* Check if size is aligned */ if (size % write_block_size != 0) { shell_error(sh, "Size must be %zu bytes aligned", write_block_size); return -EIO; } /* Align buffer size to write_block_size */ flash_load_buf_size = FLASH_LOAD_BUF_MAX; if (flash_load_buf_size < write_block_size) { shell_error(sh, "Size of buffer is too small to be aligned to %zu.", write_block_size); return -ENOSPC; } /* If buffer size is not aligned then change its size. */ if (flash_load_buf_size % write_block_size != 0) { flash_load_buf_size -= flash_load_buf_size % write_block_size; shell_warn(sh, "Load buffer was not aligned to %zu.", write_block_size); shell_warn(sh, "Effective load buffer size was set from %d to %d", FLASH_LOAD_BUF_MAX, flash_load_buf_size); } /* Prepare data for callback. */ flash_load_dev = flash_dev; flash_load_addr = addr; flash_load_total = size; flash_load_written = 0; flash_load_boff = 0; flash_load_chunk = 0; shell_print(sh, "Send %d bytes to complete flash load command", size); set_bypass(sh, bypass_cb); return 0; } static int cmd_page_info(const struct shell *sh, size_t argc, char *argv[]) { const struct device *flash_dev; struct flash_pages_info info; int result; uint32_t addr; result = parse_helper(sh, &argc, &argv, &flash_dev, &addr); if (result) { return result; } result = flash_get_page_info_by_offs(flash_dev, addr, &info); if (result != 0) { shell_error(sh, "Could not determine page size, error code %d.", result); return -EINVAL; } shell_print(sh, "Page for address 0x%x:\nstart offset: 0x%lx\nsize: %zu\nindex: %d", addr, info.start_offset, info.size, info.index); return 0; } static void device_name_get(size_t idx, struct shell_static_entry *entry); SHELL_DYNAMIC_CMD_CREATE(dsub_device_name, device_name_get); static void device_name_get(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, NULL); entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = &dsub_device_name; } SHELL_STATIC_SUBCMD_SET_CREATE(flash_cmds, SHELL_CMD_ARG(erase, &dsub_device_name, "[<device>] <page address> [<size>]", cmd_erase, 2, 2), SHELL_CMD_ARG(read, &dsub_device_name, "[<device>] <address> [<Dword count>]", cmd_read, 2, 2), SHELL_CMD_ARG(test, &dsub_device_name, "[<device>] <address> <size> <repeat count>", cmd_test, 4, 1), SHELL_CMD_ARG(write, &dsub_device_name, "[<device>] <address> <dword> [<dword>...]", cmd_write, 3, BUF_ARRAY_CNT), SHELL_CMD_ARG(load, &dsub_device_name, "[<device>] <address> <size>", cmd_load, 3, 1), SHELL_CMD_ARG(page_info, &dsub_device_name, "[<device>] <address>", cmd_page_info, 2, 1), #ifdef CONFIG_FLASH_SHELL_TEST_COMMANDS SHELL_CMD_ARG(read_test, &dsub_device_name, "[<device>] <address> <size> <repeat count>", cmd_read_test, 4, 1), SHELL_CMD_ARG(write_test, &dsub_device_name, "[<device>] <address> <size> <repeat count>", cmd_write_test, 4, 1), SHELL_CMD_ARG(erase_test, &dsub_device_name, "[<device>] <address> <size> <repeat count>", cmd_erase_test, 4, 1), SHELL_CMD_ARG(erase_write_test, &dsub_device_name, "[<device>] <address> <size> <repeat count>", cmd_erase_write_test, 4, 1), #endif SHELL_SUBCMD_SET_END ); static int cmd_flash(const struct shell *sh, size_t argc, char **argv) { shell_error(sh, "%s:unknown parameter: %s", argv[0], argv[1]); return -EINVAL; } SHELL_CMD_ARG_REGISTER(flash, &flash_cmds, "Flash shell commands", cmd_flash, 2, 0); ```
/content/code_sandbox/drivers/flash/flash_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,259
```c /* * */ #define DT_DRV_COMPAT nxp_imx_flexspi_mx25um51345g #include <zephyr/drivers/flash.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <zephyr/sys/util.h> #include "spi_nor.h" #include "memc_mcux_flexspi.h" #ifdef CONFIG_HAS_MCUX_CACHE #include <fsl_cache.h> #endif #define NOR_ERASE_VALUE 0xff #ifdef CONFIG_FLASH_MCUX_FLEXSPI_NOR_WRITE_BUFFER static uint8_t nor_write_buf[SPI_NOR_PAGE_SIZE]; #endif /* * NOTE: If CONFIG_FLASH_MCUX_FLEXSPI_XIP is selected, Any external functions * called while interacting with the flexspi MUST be relocated to SRAM or ITCM * at runtime, so that the chip does not access the flexspi to read program * instructions while it is being written to * * Additionally, no data used by this driver should be stored in flash. */ #if defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP) && (CONFIG_FLASH_LOG_LEVEL > 0) #warning "Enabling flash driver logging and XIP mode simultaneously can cause \ read-while-write hazards. This configuration is not recommended." #endif /* FLASH_ENABLE_OCTAL_CMD: (01 = STR OPI Enable) , (02 = DTR OPI Enable) */ #if CONFIG_FLASH_MCUX_FLEXSPI_MX25UM51345G_OPI_DTR #define NOR_FLASH_ENABLE_OCTAL_CMD 0x2 /* In OPI DTR mode, all writes must be 2 byte aligned, and multiples of 2 bytes */ #define NOR_WRITE_SIZE 2 #else #define NOR_FLASH_ENABLE_OCTAL_CMD 0x1 #define NOR_WRITE_SIZE 1 #endif LOG_MODULE_REGISTER(flash_flexspi_nor, CONFIG_FLASH_LOG_LEVEL); enum { /* Instructions matching with XIP layout */ READ, WRITE_ENABLE_OPI, WRITE_ENABLE, ERASE_SECTOR, PAGE_PROGRAM_INPUT, PAGE_PROGRAM, READ_ID_OPI, ENTER_OPI, READ_STATUS_REG, ERASE_CHIP, }; /* Device variables used in critical sections should be in this structure */ struct flash_flexspi_nor_data { const struct device *controller; flexspi_device_config_t config; flexspi_port_t port; struct flash_pages_layout layout; struct flash_parameters flash_parameters; }; static const uint32_t flash_flexspi_nor_lut[][4] = { [READ_ID_OPI] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x9F, kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x60), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x20, kFLEXSPI_Command_DUMMY_DDR, kFLEXSPI_8PAD, 0x16), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_READ_DDR, kFLEXSPI_8PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0), }, [WRITE_ENABLE] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, 0x06, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0), }, [ENTER_OPI] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_1PAD, 0x72, kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_1PAD, 0x20), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_WRITE_SDR, kFLEXSPI_1PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0), }, #if (NOR_FLASH_ENABLE_OCTAL_CMD == 0x1) [READ_STATUS_REG] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0x05, kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0xFA), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_8PAD, 0x20, kFLEXSPI_Command_DUMMY_SDR, kFLEXSPI_8PAD, 0x14), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_READ_SDR, kFLEXSPI_8PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0), }, [WRITE_ENABLE_OPI] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0x06, kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0xF9), }, [ERASE_SECTOR] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0x21, kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0xDE), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_8PAD, 0x20, kFLEXSPI_Command_STOP, kFLEXSPI_8PAD, 0), }, [ERASE_CHIP] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0x60, kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0x9F), }, [READ] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0xEC, kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0x13), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_8PAD, 0x20, kFLEXSPI_Command_DUMMY_SDR, kFLEXSPI_8PAD, 0x14), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_READ_SDR, kFLEXSPI_8PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0), }, [PAGE_PROGRAM] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0x12, kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0xED), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_RADDR_SDR, kFLEXSPI_8PAD, 0x20, kFLEXSPI_Command_WRITE_SDR, kFLEXSPI_8PAD, 0x04), }, #else [READ_STATUS_REG] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x05, kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xFA), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x20, kFLEXSPI_Command_READ_DDR, kFLEXSPI_8PAD, 0x4), }, [WRITE_ENABLE_OPI] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x06, kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xF9), }, [ERASE_SECTOR] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x21, kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xDE), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x20, kFLEXSPI_Command_STOP, kFLEXSPI_8PAD, 0), }, [ERASE_CHIP] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x60, kFLEXSPI_Command_SDR, kFLEXSPI_8PAD, 0x9F), }, [READ] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xEE, kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x11), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x20, kFLEXSPI_Command_DUMMY_DDR, kFLEXSPI_8PAD, 0x08), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_READ_DDR, kFLEXSPI_8PAD, 0x04, kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0), }, [PAGE_PROGRAM] = { FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x12, kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xED), FLEXSPI_LUT_SEQ(kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x20, kFLEXSPI_Command_WRITE_DDR, kFLEXSPI_8PAD, 0x04), }, #endif }; static int flash_flexspi_nor_get_vendor_id(const struct device *dev, uint8_t *vendor_id) { struct flash_flexspi_nor_data *data = dev->data; uint32_t buffer = 0; int ret; flexspi_transfer_t transfer = { .deviceAddress = 0, .port = data->port, .cmdType = kFLEXSPI_Read, .SeqNumber = 1, .seqIndex = READ_ID_OPI, .data = &buffer, .dataSize = 1, }; LOG_DBG("Reading id"); ret = memc_flexspi_transfer(data->controller, &transfer); *vendor_id = buffer; return ret; } static int flash_flexspi_nor_read_status(const struct device *dev, uint32_t *status) { struct flash_flexspi_nor_data *data = dev->data; flexspi_transfer_t transfer = { .deviceAddress = 0, .port = data->port, .cmdType = kFLEXSPI_Read, .SeqNumber = 1, .seqIndex = READ_STATUS_REG, .data = status, .dataSize = 1, }; LOG_DBG("Reading status register"); return memc_flexspi_transfer(data->controller, &transfer); } static int flash_flexspi_nor_write_status(const struct device *dev, uint32_t *status) { struct flash_flexspi_nor_data *data = dev->data; flexspi_transfer_t transfer = { .deviceAddress = 0, .port = data->port, .cmdType = kFLEXSPI_Write, .SeqNumber = 1, .seqIndex = ENTER_OPI, .data = status, .dataSize = 1, }; LOG_DBG("Writing status register"); return memc_flexspi_transfer(data->controller, &transfer); } static int flash_flexspi_nor_write_enable(const struct device *dev, bool enableOctal) { struct flash_flexspi_nor_data *data = dev->data; flexspi_transfer_t transfer; transfer.deviceAddress = 0; transfer.port = data->port; transfer.cmdType = kFLEXSPI_Command; transfer.SeqNumber = 1; if (enableOctal) { transfer.seqIndex = WRITE_ENABLE_OPI; } else { transfer.seqIndex = WRITE_ENABLE; } transfer.data = NULL; transfer.dataSize = 0; LOG_DBG("Enabling write"); return memc_flexspi_transfer(data->controller, &transfer); } static int flash_flexspi_nor_erase_sector(const struct device *dev, off_t offset) { struct flash_flexspi_nor_data *data = dev->data; flexspi_transfer_t transfer = { .deviceAddress = offset, .port = data->port, .cmdType = kFLEXSPI_Command, .SeqNumber = 1, .seqIndex = ERASE_SECTOR, .data = NULL, .dataSize = 0, }; LOG_DBG("Erasing sector at 0x%08zx", (ssize_t) offset); return memc_flexspi_transfer(data->controller, &transfer); } static int flash_flexspi_nor_erase_chip(const struct device *dev) { struct flash_flexspi_nor_data *data = dev->data; flexspi_transfer_t transfer = { .deviceAddress = 0, .port = data->port, .cmdType = kFLEXSPI_Command, .SeqNumber = 1, .seqIndex = ERASE_CHIP, .data = NULL, .dataSize = 0, }; LOG_DBG("Erasing chip"); return memc_flexspi_transfer(data->controller, &transfer); } static int flash_flexspi_nor_page_program(const struct device *dev, off_t offset, const void *buffer, size_t len) { struct flash_flexspi_nor_data *data = dev->data; flexspi_transfer_t transfer = { .deviceAddress = offset, .port = data->port, .cmdType = kFLEXSPI_Write, .SeqNumber = 1, .seqIndex = PAGE_PROGRAM, .data = (uint32_t *) buffer, .dataSize = len, }; LOG_DBG("Page programming %d bytes to 0x%08zx", len, (ssize_t) offset); return memc_flexspi_transfer(data->controller, &transfer); } static int flash_flexspi_nor_wait_bus_busy(const struct device *dev) { uint32_t status = 0; int ret; do { ret = flash_flexspi_nor_read_status(dev, &status); LOG_DBG("status: 0x%x", status); if (ret) { LOG_ERR("Could not read status"); return ret; } } while (status & BIT(0)); return 0; } static int flash_flexspi_enable_octal_mode(const struct device *dev) { struct flash_flexspi_nor_data *data = dev->data; /* FLASH_ENABLE_OCTAL_CMD: (01 = STR OPI Enable, 02 = DTR OPI Enable) */ uint32_t status = NOR_FLASH_ENABLE_OCTAL_CMD; flash_flexspi_nor_write_enable(dev, false); flash_flexspi_nor_write_status(dev, &status); flash_flexspi_nor_wait_bus_busy(dev); memc_flexspi_reset(data->controller); return 0; } static int flash_flexspi_nor_read(const struct device *dev, off_t offset, void *buffer, size_t len) { struct flash_flexspi_nor_data *data = dev->data; uint8_t *src = memc_flexspi_get_ahb_address(data->controller, data->port, offset); memcpy(buffer, src, len); return 0; } static int flash_flexspi_nor_write(const struct device *dev, off_t offset, const void *buffer, size_t len) { struct flash_flexspi_nor_data *data = dev->data; size_t size = len; uint8_t *src = (uint8_t *) buffer; int i; unsigned int key = 0; uint8_t *dst = memc_flexspi_get_ahb_address(data->controller, data->port, offset); if (memc_flexspi_is_running_xip(data->controller)) { /* * ==== ENTER CRITICAL SECTION ==== * No flash access should be performed in critical section. All * code and data accessed must reside in ram. */ key = irq_lock(); } if (IS_ENABLED(CONFIG_FLASH_MCUX_FLEXSPI_MX25UM51345G_OPI_DTR)) { /* Check that write size and length are even */ if ((offset & 0x1) || (len & 0x1)) { return -EINVAL; } } while (len) { /* If the offset isn't a multiple of the NOR page size, we first need * to write the remaining part that fits, otherwise the write could * be wrapped around within the same page */ i = MIN(SPI_NOR_PAGE_SIZE - (offset % SPI_NOR_PAGE_SIZE), len); #ifdef CONFIG_FLASH_MCUX_FLEXSPI_NOR_WRITE_BUFFER memcpy(nor_write_buf, src, i); #endif flash_flexspi_nor_write_enable(dev, true); #ifdef CONFIG_FLASH_MCUX_FLEXSPI_NOR_WRITE_BUFFER flash_flexspi_nor_page_program(dev, offset, nor_write_buf, i); #else flash_flexspi_nor_page_program(dev, offset, src, i); #endif flash_flexspi_nor_wait_bus_busy(dev); memc_flexspi_reset(data->controller); src += i; offset += i; len -= i; } if (memc_flexspi_is_running_xip(data->controller)) { /* ==== EXIT CRITICAL SECTION ==== */ irq_unlock(key); } #ifdef CONFIG_HAS_MCUX_CACHE DCACHE_InvalidateByRange((uint32_t) dst, size); #endif return 0; } static int flash_flexspi_nor_erase(const struct device *dev, off_t offset, size_t size) { struct flash_flexspi_nor_data *data = dev->data; int num_sectors = size / SPI_NOR_SECTOR_SIZE; int i; unsigned int key = 0; uint8_t *dst = memc_flexspi_get_ahb_address(data->controller, data->port, offset); if (offset % SPI_NOR_SECTOR_SIZE) { LOG_ERR("Invalid offset"); return -EINVAL; } if (size % SPI_NOR_SECTOR_SIZE) { LOG_ERR("Invalid size"); return -EINVAL; } if (memc_flexspi_is_running_xip(data->controller)) { /* * ==== ENTER CRITICAL SECTION ==== * No flash access should be performed in critical section. All * code and data accessed must reside in ram. */ key = irq_lock(); } if ((offset == 0) && (size == data->config.flashSize * KB(1))) { flash_flexspi_nor_write_enable(dev, true); flash_flexspi_nor_erase_chip(dev); flash_flexspi_nor_wait_bus_busy(dev); memc_flexspi_reset(data->controller); } else { for (i = 0; i < num_sectors; i++) { flash_flexspi_nor_write_enable(dev, true); flash_flexspi_nor_erase_sector(dev, offset); flash_flexspi_nor_wait_bus_busy(dev); memc_flexspi_reset(data->controller); offset += SPI_NOR_SECTOR_SIZE; } } if (memc_flexspi_is_running_xip(data->controller)) { /* ==== EXIT CRITICAL SECTION ==== */ irq_unlock(key); } #ifdef CONFIG_HAS_MCUX_CACHE DCACHE_InvalidateByRange((uint32_t) dst, size); #endif return 0; } static const struct flash_parameters *flash_flexspi_nor_get_parameters( const struct device *dev) { struct flash_flexspi_nor_data *data = dev->data; return &data->flash_parameters; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static void flash_flexspi_nor_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { struct flash_flexspi_nor_data *data = dev->data; *layout = &data->layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static int flash_flexspi_nor_init(const struct device *dev) { struct flash_flexspi_nor_data *data = dev->data; uint8_t vendor_id; if (!device_is_ready(data->controller)) { LOG_ERR("Controller device not ready"); return -ENODEV; } if (memc_flexspi_is_running_xip(data->controller)) { /* Wait for bus idle before configuring */ memc_flexspi_wait_bus_idle(data->controller); } if (memc_flexspi_set_device_config(data->controller, &data->config, (const uint32_t *)flash_flexspi_nor_lut, sizeof(flash_flexspi_nor_lut) / MEMC_FLEXSPI_CMD_SIZE, data->port)) { LOG_ERR("Could not set device configuration"); return -EINVAL; } memc_flexspi_reset(data->controller); if (flash_flexspi_enable_octal_mode(dev)) { LOG_ERR("Could not enable octal mode"); return -EIO; } if (flash_flexspi_nor_get_vendor_id(dev, &vendor_id)) { LOG_ERR("Could not read vendor id"); return -EIO; } LOG_DBG("Vendor id: 0x%0x", vendor_id); return 0; } static const struct flash_driver_api flash_flexspi_nor_api = { .erase = flash_flexspi_nor_erase, .write = flash_flexspi_nor_write, .read = flash_flexspi_nor_read, .get_parameters = flash_flexspi_nor_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_flexspi_nor_pages_layout, #endif }; #define CONCAT3(x, y, z) x ## y ## z #define CS_INTERVAL_UNIT(unit) \ CONCAT3(kFLEXSPI_CsIntervalUnit, unit, SckCycle) #define AHB_WRITE_WAIT_UNIT(unit) \ CONCAT3(kFLEXSPI_AhbWriteWaitUnit, unit, AhbCycle) #define FLASH_FLEXSPI_DEVICE_CONFIG(n) \ { \ .flexspiRootClk = MHZ(120), \ .flashSize = DT_INST_PROP(n, size) / 8 / KB(1), \ .CSIntervalUnit = \ CS_INTERVAL_UNIT( \ DT_INST_PROP(n, cs_interval_unit)), \ .CSInterval = DT_INST_PROP(n, cs_interval), \ .CSHoldTime = DT_INST_PROP(n, cs_hold_time), \ .CSSetupTime = DT_INST_PROP(n, cs_setup_time), \ .dataValidTime = DT_INST_PROP(n, data_valid_time), \ .columnspace = DT_INST_PROP(n, column_space), \ .enableWordAddress = DT_INST_PROP(n, word_addressable), \ .AWRSeqIndex = 0, \ .AWRSeqNumber = 0, \ .ARDSeqIndex = READ, \ .ARDSeqNumber = 1, \ .AHBWriteWaitUnit = \ AHB_WRITE_WAIT_UNIT( \ DT_INST_PROP(n, ahb_write_wait_unit)), \ .AHBWriteWaitInterval = \ DT_INST_PROP(n, ahb_write_wait_interval), \ } \ #define FLASH_FLEXSPI_NOR(n) \ static struct flash_flexspi_nor_data \ flash_flexspi_nor_data_##n = { \ .controller = DEVICE_DT_GET(DT_INST_BUS(n)), \ .config = FLASH_FLEXSPI_DEVICE_CONFIG(n), \ .port = DT_INST_REG_ADDR(n), \ .layout = { \ .pages_count = DT_INST_PROP(n, size) / 8 \ / SPI_NOR_SECTOR_SIZE, \ .pages_size = SPI_NOR_SECTOR_SIZE, \ }, \ .flash_parameters = { \ .write_block_size = NOR_WRITE_SIZE, \ .erase_value = NOR_ERASE_VALUE, \ }, \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ flash_flexspi_nor_init, \ NULL, \ &flash_flexspi_nor_data_##n, \ NULL, \ POST_KERNEL, \ CONFIG_FLASH_INIT_PRIORITY, \ &flash_flexspi_nor_api); DT_INST_FOREACH_STATUS_OKAY(FLASH_FLEXSPI_NOR) ```
/content/code_sandbox/drivers/flash/flash_mcux_flexspi_mx25um51345g.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,545
```objective-c /* * */ #ifndef CDNS_NAND_LL_H #define CDNS_NAND_LL_H #include <zephyr/device.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #define NAND_INT_SEM_TAKE(param_ptr) \ COND_CODE_1(IS_ENABLED(CONFIG_CDNS_NAND_INTERRUPT_SUPPORT), \ (k_sem_take(&(param_ptr->interrupt_sem_t), K_FOREVER)), ()) #define CNF_GET_INIT_COMP(x) (FIELD_GET(BIT(9), x)) #define CNF_GET_INIT_FAIL(x) (FIELD_GET(BIT(10), x)) #define CNF_GET_CTRL_BUSY(x) (FIELD_GET(BIT(8), x)) #define GET_PAGE_SIZE(x) (FIELD_GET(GENMASK(15, 0), x)) #define GET_PAGES_PER_BLOCK(x) (FIELD_GET(GENMASK(15, 0), x)) #define GET_SPARE_SIZE(x) (FIELD_GET(GENMASK(31, 16), x)) #define ONFI_TIMING_MODE_SDR(x) (FIELD_GET(GENMASK(15, 0), x)) #define ONFI_TIMING_MODE_NVDDR(x) (FIELD_GET(GENMASK(31, 15), x)) /* Controller parameter registers */ #define CNF_GET_NLUNS(x) (FIELD_GET(GENMASK(7, 0), x)) #define CNF_GET_DEV_TYPE(x) (FIELD_GET(GENMASK(31, 30), x)) #define CNF_CTRLPARAM_VERSION (0x800) #define CNF_CTRLPARAM_FEATURE (0x804) #define CNF_CTRLPARAM_MFR_ID (0x808) #define CNF_CTRLPARAM_DEV_AREA (0x80C) #define CNF_CTRLPARAM_DEV_PARAMS0 (0x810) #define CNF_CTRLPARAM_DEV_PARAMS1 (0x814) #define CNF_CTRLPARAM_DEV_FEATUERS (0x818) #define CNF_CTRLPARAM_DEV_BLOCKS_PLUN (0x81C) #define CNF_CTRLPARAM_ONFI_TIMING_0 (0x824) #define CNF_CTRLPARAM(_base, _reg) (_base + (CNF_CTRLPARAM_##_reg)) #define CNF_CMDREG_CTRL_STATUS (0x118) #define CNF_CMDREG(_base, _reg) (_base + (CNF_CMDREG_##_reg)) #define PINSEL(_x) (PINSEL##_x) #define PIN(_x) PINSEL(_x)##SEL /*Hardware Features Support*/ #define CNF_HW_NF_16_SUPPORT(x) (FIELD_GET(BIT(29), x)) #define CNF_HW_NVDDR_SS_SUPPORT(x) (FIELD_GET(BIT(27), x)) #define CNF_HW_ASYNC_SUPPORT(x) (FIELD_GET(BIT(26), x)) #define CNF_HW_DMA_DATA_WIDTH_SUPPORT(x) (FIELD_GET(BIT(21), x)) #define CNF_HW_DMA_ADDR_WIDTH_SUPPORT(x) (FIELD_GET(BIT(20), x)) #define CNF_HW_DI_PR_SUPPORT(x) (FIELD_GET(BIT(14), x)) #define CNF_HW_ECC_SUPPORT(x) (FIELD_GET(BIT(17), x)) #define CNF_HW_RMP_SUPPORT(x) (FIELD_GET(BIT(12), x)) #define CNF_HW_DI_CRC_SUPPORT(x) (FIELD_GET(BIT(8), x)) #define CNF_HW_WR_PT_SUPPORT(x) (FIELD_GET(BIT(9), x)) /* Device types */ #define CNF_DT_UNKNOWN (0x00) #define CNF_DT_ONFI (0x01) #define CNF_DT_JEDEC (0x02) #define CNF_DT_LEGACY (0x03) /* Controller configuration registers */ #define CNF_CTRLCFG_TRANS_CFG0 (0x400) #define CNF_CTRLCFG_TRANS_CFG1 (0x404) #define CNF_CTRLCFG_LONG_POLL (0x408) #define CNF_CTRLCFG_SHORT_POLL (0x40C) #define CNF_CTRLCFG_DEV_STAT (0x410) #define CNF_CTRLCFG_DEV_LAYOUT (0x424) #define CNF_CTRLCFG_ECC_CFG0 (0x428) #define CNF_CTRLCFG_ECC_CFG1 (0x42C) #define CNF_CTRLCFG_MULTIPLANE_CFG (0x434) #define CNF_CTRLCFG_CACHE_CFG (0x438) #define CNF_CTRLCFG_DMA_SETTINGS (0x43C) #define CNF_CTRLCFG_FIFO_TLEVEL (0x454) #define CNF_CTRLCFG(_base, _reg) (_base + (CNF_CTRLCFG_##_reg)) /* Data integrity registers */ #define CNF_DI_PAR_EN (0) #define CNF_DI_CRC_EN (1) #define CNF_DI_CONTROL (0x700) #define CNF_DI_INJECT0 (0x704) #define CNF_DI_INJECT1 (0x708) #define CNF_DI_ERR_REG_ADDR (0x70C) #define CNF_DI_INJECT2 (0x710) #define CNF_DI(_base, _reg) (_base + (CNF_DI_##_reg)) /* Thread idle timeout */ #define THREAD_IDLE_TIME_OUT 500U /* Operation work modes */ #define CNF_OPR_WORK_MODE_SDR (0) #define CNF_OPR_WORK_MODE_NVDDR (1) #define CNF_OPR_WORK_MODE_SDR_MASK (GENMASK(1, 0)) #define CNF_OPR_WORK_MODE_NVDDR_MASK (BIT(0)) #define ONFI_INTERFACE (0x01) #define NV_DDR_TIMING_READ (16) /* Interrupt register field offsets */ #define INTERRUPT_STATUS_REG (0x0114) #define THREAD_INTERRUPT_STATUS (0x0138) /* Mini controller DLL PHY controller register field offsets */ #define CNF_DLL_PHY_RST_N (24) #define CNF_DLL_PHY_EXT_WR_MODE (17) #define CNF_DLL_PHY_EXT_RD_MODE (16) #define CNF_MINICTRL_WP_SETTINGS (0x1000) #define CNF_MINICTRL_RBN_SETTINGS (0x1004) #define CNF_MINICTRL_CMN_SETTINGS (0x1008) #define CNF_MINICTRL_SKIP_BYTES_CFG (0x100C) #define CNF_MINICTRL_SKIP_BYTES_OFFSET (0x1010) #define CNF_MINICTRL_TOGGLE_TIMINGS0 (0x1014) #define CNF_MINICTRL_TOGGLE_TIMINGS1 (0x1018) #define CNF_MINICTRL_ASYNC_TOGGLE_TIMINGS (0x101C) #define CNF_MINICTRL_SYNC_TIMINGS (0x1020) #define CNF_MINICTRL_DLL_PHY_CTRL (0x1034) #define CNF_MINICTRL(_base, _reg) (_base + (CNF_MINICTRL_##_reg)) /* Async mode register field offsets */ #define CNF_ASYNC_TIMINGS_TRH FIELD_PREP(GENMASK(28, 24), 2) #define CNF_ASYNC_TIMINGS_TRP FIELD_PREP(GENMASK(20, 16), 4) #define CNF_ASYNC_TIMINGS_TWH FIELD_PREP(GENMASK(12, 8), 2) #define CNF_ASYNC_TIMINGS_TWP FIELD_PREP(GENMASK(4, 0), 4) /* Mini controller common settings register field offsets */ #define CNF_CMN_SETTINGS_WR_WUP (20) #define CNF_CMN_SETTINGS_RD_WUP (16) #define CNF_CMN_SETTINGS_DEV16 (8) #define CNF_CMN_SETTINGS_OPR (0) /* Interrupt status register. */ #define INTR_STATUS (0x0110) #define GINTR_ENABLE (31) #define INTERRUPT_DISABLE (0) #define INTERRUPT_ENABLE (1) /* CDMA Command type descriptor*/ /* CDMA Command type Erase*/ #define CNF_CMD_ERASE (0x1000) /* CDMA Program Page type */ #define CNF_CMD_WR (0x2100) /* CDMA Read Page type */ #define CNF_CMD_RD (0x2200) #define DMA_MS_SEL (1) #define VOL_ID (0) #define CDMA_CF_DMA_MASTER (10) #define CDMA_CF_DMA_MASTER_SET(x) FIELD_PREP(BIT(CDMA_CF_DMA_MASTER), x) #define F_CFLAGS_VOL_ID (4) #define F_CFLAGS_VOL_ID_SET(x) FIELD_PREP(GENMASK(7, 4), x) #define CDMA_CF_INT (8) #define CDMA_CF_INT_SET BIT(CDMA_CF_INT) #define COMMON_SET_DEVICE_16BIT (8) #define CDNS_READ (0) #define CDNS_WRITE (1) #define MAX_PAGES_IN_ONE_DSC (8) #define CFLAGS_MPTRPC (0) #define CFLAGS_MPTRPC_SET FIELD_PREP(BIT(CFLAGS_MPTRPC), 1) #define CFLAGS_FPTRPC (1) #define CFLAGS_FPTRPC_SET FIELD_PREP(BIT(CFLAGS_FPTRPC), 1) #define CFLAGS_CONT (9) #define CFLAGS_CONT_SET FIELD_PREP(BIT(CFLAGS_CONT), 1) #define CLEAR_ALL_INTERRUPT (0xFFFFFFFF) #define ENABLE (1) #define DISABLE (0) #define DEV_STAT_DEF_VALUE (0x40400000) /*Command Resister*/ #define CDNS_CMD_REG0 (0x00) #define CDNS_CMD_REG1 (0x04) #define CDNS_CMD_REG2 (0x08) #define CDNS_CMD_REG3 (0x0C) #define CMD_STATUS_PTR_ADDR (0x10) #define CMD_STAT_CMD_STATUS (0x14) #define CDNS_CMD_REG4 (0x20) /* Cdns Nand Operation Modes*/ #define CT_CDMA_MODE (0) #define CT_PIO_MODE (1) #define CT_GENERIC_MODE (3) #define OPERATING_MODE_CDMA (0) #define OPERATING_MODE_PIO (1) #define OPERATING_MODE_GENERIC (2) #define THR_STATUS (0x120) #define CMD_0_THREAD_POS (24) #define CMD_0_THREAD_POS_SET(x) (FIELD_PREP(GENMASK(26, 24), x)) #define CMD_0_C_MODE (30) #define CMD_0_C_MODE_SET(x) (FIELD_PREP(GENMASK(31, 30), x)) #define CMD_0_VOL_ID_SET(x) (FIELD_PREP(GENMASK(19, 16), x)) #define PIO_SET_FEA_MODE (0x0100) #define SET_FEAT_TIMING_MODE_ADDRESS (0x01) /* default thread number*/ #define NF_TDEF_TRD_NUM (0) /* NF device number */ #define NF_TDEF_DEV_NUM (0) #define F_OTE (16) #define F_BURST_SEL_SET(x) (FIELD_PREP(GENMASK(7, 0), x)) /* DMA maximum burst size (0-127)*/ #define NF_TDEF_BURST_SEL (127) #define NF_DMA_SETTING (0x043C) #define NF_PRE_FETCH (0x0454) #define PRE_FETCH_VALUE (1024/8) #define NF_FIFO_TRIGG_LVL_SET(x) (FIELD_PREP(GENMASK(15, 0), x)) #define NF_DMA_PACKAGE_SIZE_SET(x) (FIELD_PREP(GENMASK(31, 16), x)) #define NF_FIFO_TRIGG_LVL (0) /* BCH correction strength */ #define NF_TDEF_CORR_STR (0) #define F_CSTAT_COMP (15) #define F_CSTAT_FAIL (14) #define HPNFC_STAT_INPR (0) #define HPNFC_STAT_FAIL (2) #define HPNFC_STAT_OK (1) #define NF_16_ENABLE (1) #define NF_16_DISABLE (0) /*PIO Mode*/ #define NF_CMD4_BANK_SET(x) (FIELD_PREP(GENMASK(31, 24), x)) #define PIO_CMD0_CT_POS (0) #define PIO_CMD0_CT_SET(x) (FIELD_PREP(GENMASK(15, 0), x)) #define PIO_CF_INT (20) #define PIO_CF_INT_SET (FIELD_PREP(BIT(PIO_CF_INT), 1)) #define PIO_CF_DMA_MASTER (21) #define PIO_CF_DMA_MASTER_SET(x) (FIELD_PREP(BIT(PIO_CF_DMA_MASTER), x)) /* Phy registers*/ #define PHY_DQ_TIMING_REG_OFFSET (0x00002000) #define PHY_DQS_TIMING_REG_OFFSET (0x00002004) #define PHY_GATE_LPBK_OFFSET (0x00002008) #define PHY_DLL_MASTER_OFFSET (0x0000200c) #define PHY_CTRL_REG_OFFSET (0x00002080) #define PHY_TSEL_REG_OFFSET (0x00002084) #define PHY_CTRL_REG_SDR (0x00004040) #define PHY_TSEL_REG_SDR (0x00000000) #define PHY_DQ_TIMING_REG_SDR (0x00000002) #define PHY_DQS_TIMING_REG_SDR (0x00100004) #define PHY_GATE_LPBK_CTRL_REG_SDR (0x00D80000) #define PHY_DLL_MASTER_CTRL_REG_SDR (0x00800000) #define PHY_DLL_SLAVE_CTRL_REG_SDR (0x00000000) #define PHY_CTRL_REG_DDR (0x00000000) #define PHY_TSEL_REG_DDR (0x00000000) #define PHY_DQ_TIMING_REG_DDR (0x00000002) #define PHY_DQS_TIMING_REG_DDR (0x00000004) #define PHY_GATE_LPBK_CTRL_REG_DDR (0x00380002) #define PHY_DLL_MASTER_CTRL_REG_DDR (0x001400fe) #define PHY_DLL_SLAVE_CTRL_REG_DDR (0x00003f3f) /*SDMA*/ #define GCMD_TWB_VALUE BIT64(6) #define GCMCD_ADDR_SEQ (1) #define GCMCD_DATA_SEQ (2) #define ERASE_ADDR_SIZE (FIELD_PREP(GENMASK64(13, 11), 3ULL)) #define GEN_SECTOR_COUNT (1ULL) #define GEN_SECTOR_COUNT_SET (FIELD_PREP(GENMASK64(39, 32),\ GEN_SECTOR_COUNT)) #define GEN_SECTOR_SIZE (0x100ULL) #define GEN_LAST_SECTOR_SIZE_SET(x) (FIELD_PREP(GENMASK64(55, 40), x)) #define SDMA_TRIGG (21ULL) #define SDMA_SIZE_ADDR (0x0440) #define SDMA_TRD_NUM_ADDR (0x0444) #define SDMA_ADDR0_ADDR (0x044c) #define SDMA_ADDR1_ADDR (0x0450) #define PAGE_READ_CMD (0x3ULL) #define PAGE_WRITE_CMD (0x4ULL) #define PAGE_ERASE_CMD (0x6ULL) #define PAGE_CMOD_CMD (0x00) #define PAGE_MAX_SIZE (4) #define PAGE_MAX_BYTES(x) (FIELD_PREP(GENMASK64(13, 11), x)) #define GEN_CF_INT (20) #define GEN_CF_INT_SET(x) (FIELD_PREP(BIT(GEN_CF_INT), x)) #define GEN_CF_INT_ENABLE (1) #define GEN_ADDR_POS (16) #define GEN_DIR_SET(x) (FIELD_PREP(BIT64(11), x)) #define GEN_SECTOR_SET(x) (FIELD_PREP(GENMASK64(31, 16), x)) #define PAGE_WRITE_10H_CMD (FIELD_PREP(GENMASK64(23, 16), 0x10ULL)) #define GEN_ADDR_WRITE_DATA(x) (FIELD_PREP(GENMASK64(63, 32), x)) #define NUM_ONE (1) #define U32_MASK_VAL (0xFFFFFFFF) #define BIT16_CHECK (16) #define IDLE_TIME_OUT (5000U) #define ROW_VAL_SET(x, y, z) (FIELD_PREP(GENMASK(x, y), z)) #define SET_FEAT_ADDR(x) (FIELD_PREP(GENMASK(7, 0), x)) #define THREAD_VAL(x) (FIELD_PREP(GENMASK(2, 0), x)) #define INCR_CMD_TYPE(x) (x++) #define DECR_CNT_ONE(x) (--x) #define GET_INIT_SET_CHECK(x, y) (FIELD_GET(BIT(y), x)) struct nf_ctrl_version { uint32_t ctrl_rev:8; uint32_t ctrl_fix:8; uint32_t hpnfc_magic_number:16; }; /* Cadence cdma command descriptor*/ struct cdns_cdma_command_descriptor { /* Next descriptor address*/ uint64_t next_pointer; /* Flash address is a 32-bit address comprising of ROW ADDR. */ uint32_t flash_pointer; uint16_t bank_number; uint16_t reserved_0; /*operation the controller needs to perform*/ uint16_t command_type; uint16_t reserved_1; /* Flags for operation of this command. */ uint16_t command_flags; uint16_t reserved_2; /* System/host memory address required for data DMA commands. */ uint64_t memory_pointer; /* Status of operation. */ uint64_t status; /* Address pointer to sync buffer location. */ uint64_t sync_flag_pointer; /* Controls the buffer sync mechanism. */ uint32_t sync_arguments; uint32_t reserved_4; /* Control data pointer. */ uint64_t ctrl_data_ptr; } __aligned(64); /* Row Address */ union row_address { struct { uint32_t page_address:7; uint32_t block_address:10; uint32_t lun_address:3; } row_bit_reg; uint32_t row_address_raw; }; /* device info structure */ struct cadence_nand_params { uintptr_t nand_base; uintptr_t sdma_base; uint8_t datarate_mode; uint8_t nluns; uint16_t page_size; uint16_t spare_size; uint16_t npages_per_block; uint32_t nblocks_per_lun; uint32_t block_size; uint8_t total_bit_row; uint8_t page_size_bit; uint8_t block_size_bit; uint8_t lun_size_bit; size_t page_count; unsigned long long device_size; #ifdef CONFIG_CDNS_NAND_INTERRUPT_SUPPORT struct k_sem interrupt_sem_t; #endif } __aligned(32); /* Global function Api */ int cdns_nand_init(struct cadence_nand_params *params); int cdns_nand_read(struct cadence_nand_params *params, const void *buffer, uint32_t offset, uint32_t size); int cdns_nand_write(struct cadence_nand_params *params, const void *buffer, uint32_t offset, uint32_t len); int cdns_nand_erase(struct cadence_nand_params *params, uint32_t offset, uint32_t size); #if CONFIG_CDNS_NAND_INTERRUPT_SUPPORT void cdns_nand_irq_handler_ll(struct cadence_nand_params *params); #endif #endif ```
/content/code_sandbox/drivers/flash/flash_cadence_nand_ll.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,348
```unknown # menuconfig SPI_NOR bool "SPI NOR Flash" default y depends on DT_HAS_JEDEC_SPI_NOR_ENABLED select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_EXPLICIT_ERASE select FLASH_HAS_PAGE_LAYOUT select FLASH_JESD216 select FLASH_HAS_EX_OP select SPI if SPI_NOR choice SPI_NOR_SFDP prompt "Source for Serial Flash Discoverable Parameters" default SPI_NOR_SFDP_MINIMAL config SPI_NOR_SFDP_MINIMAL bool "Fixed flash configuration" help Synthesize a minimal configuration assuming 256 By page size (or as set by the page-size devicetree property) and standard 4 KiBy and 64 KiBy erase instructions. Requires the size and jedec-id properties in the devicetree jedec,spi-nor node. config SPI_NOR_SFDP_DEVICETREE bool "Basic Flash Parameters from devicetree node" help The JESD216 Basic Flash Parameters table must be provided in the sfdp-bfp property in devicetree. The size and jedec-id properties are also required. config SPI_NOR_SFDP_RUNTIME bool "Read flash parameters at runtime" help Read all flash device characteristics from the device at runtime. This option is the most flexible as it should provide functionality for all supported JESD216-compatible devices. endchoice config SPI_NOR_INIT_PRIORITY int default 80 help Device driver initialization priority. Device is connected to SPI bus, it has to be initialized after SPI driver. config SPI_NOR_CS_WAIT_DELAY int "Delay time in us" default 0 help This is the wait delay (in us) to allow for CS switching to take effect config SPI_NOR_SLEEP_WHILE_WAITING_UNTIL_READY bool "Sleep while waiting for flash operations to complete" default y help Flash operations can take anywhere from 1ms to 240 seconds to complete. Enabling this option adds a delay between polls on the status register for slow operations. Disabling this option can result in significant flash savings if this driver is the only user of "k_sleep". This can be the case when building as a bootloader. config SPI_NOR_FLASH_LAYOUT_PAGE_SIZE int "Page size to use for FLASH_LAYOUT feature" default 65536 help When CONFIG_FLASH_PAGE_LAYOUT is used this driver will support that API. By default the page size corresponds to the block size (65536). Other options include the 32K-byte erase size (32768), the sector size (4096), or any non-zero multiple of the sector size. config SPI_NOR_IDLE_IN_DPD bool "Use Deep Power-Down mode when flash is not being accessed." help Where supported deep power-down mode can reduce current draw to as little as 0.1% of standby current. However it takes some milliseconds to enter and exit from this mode. Select this option for applications where device power management is not enabled, the flash remains inactive for long periods, and when used the impact of waiting for mode enter and exit delays is acceptable. endif # SPI_NOR ```
/content/code_sandbox/drivers/flash/Kconfig.nor
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
709
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/drivers/flash.h> #define DT_DRV_COMPAT st_stm32wba_flash_controller #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(flash_stm32wba, CONFIG_FLASH_LOG_LEVEL); #include "flash_stm32.h" #include "flash_manager.h" #include "flash_driver.h" /* Let's wait for double the max erase time to be sure that the operation is * completed. */ #define STM32_FLASH_TIMEOUT \ (2 * DT_PROP(DT_INST(0, st_stm32_nv_flash), max_erase_time)) extern struct k_work_q ble_ctlr_work_q; struct k_work fm_work; static const struct flash_parameters flash_stm32_parameters = { .write_block_size = FLASH_STM32_WRITE_BLOCK_SIZE, .erase_value = 0xff, }; K_SEM_DEFINE(flash_busy, 0, 1); static void flash_callback(FM_FlashOp_Status_t status) { LOG_DBG("%d", status); k_sem_give(&flash_busy); } struct FM_CallbackNode cb_ptr = { .Callback = flash_callback }; void FM_ProcessRequest(void) { k_work_submit_to_queue(&ble_ctlr_work_q, &fm_work); } void FM_BackgroundProcess_Entry(struct k_work *work) { ARG_UNUSED(work); FM_BackgroundProcess(); } bool flash_stm32_valid_range(const struct device *dev, off_t offset, uint32_t len, bool write) { if (write && !flash_stm32_valid_write(offset, len)) { return false; } return flash_stm32_range_exists(dev, offset, len); } static inline void flash_stm32_sem_take(const struct device *dev) { k_sem_take(&FLASH_STM32_PRIV(dev)->sem, K_FOREVER); } static inline void flash_stm32_sem_give(const struct device *dev) { k_sem_give(&FLASH_STM32_PRIV(dev)->sem); } static int flash_stm32_read(const struct device *dev, off_t offset, void *data, size_t len) { if (!flash_stm32_valid_range(dev, offset, len, false)) { LOG_ERR("Read range invalid. Offset: %p, len: %zu", (void *) offset, len); return -EINVAL; } if (!len) { return 0; } flash_stm32_sem_take(dev); memcpy(data, (uint8_t *) FLASH_STM32_BASE_ADDRESS + offset, len); flash_stm32_sem_give(dev); return 0; } static int flash_stm32_erase(const struct device *dev, off_t offset, size_t len) { int rc; int sect_num = (len / FLASH_PAGE_SIZE) + 1; if (!flash_stm32_valid_range(dev, offset, len, true)) { LOG_ERR("Erase range invalid. Offset: %p, len: %zu", (void *)offset, len); return -EINVAL; } if (!len) { return 0; } flash_stm32_sem_take(dev); LOG_DBG("Erase offset: %p, page: %ld, len: %zu, sect num: %d", (void *)offset, offset / FLASH_PAGE_SIZE, len, sect_num); rc = FM_Erase(offset / FLASH_PAGE_SIZE, sect_num, &cb_ptr); if (rc == 0) { k_sem_take(&flash_busy, K_FOREVER); } else { LOG_DBG("Erase operation rejected. err = %d", rc); } flash_stm32_sem_give(dev); return rc; } static int flash_stm32_write(const struct device *dev, off_t offset, const void *data, size_t len) { int rc; if (!flash_stm32_valid_range(dev, offset, len, true)) { LOG_ERR("Write range invalid. Offset: %p, len: %zu", (void *)offset, len); return -EINVAL; } if (!len) { return 0; } flash_stm32_sem_take(dev); LOG_DBG("Write offset: %p, len: %zu", (void *)offset, len); rc = FM_Write((uint32_t *)data, (uint32_t *)(FLASH_STM32_BASE_ADDRESS + offset), (int32_t)len/4, &cb_ptr); if (rc == 0) { k_sem_take(&flash_busy, K_FOREVER); } else { LOG_DBG("Write operation rejected. err = %d", rc); } flash_stm32_sem_give(dev); return rc; } static const struct flash_parameters * flash_stm32_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_stm32_parameters; } static struct flash_stm32_priv flash_data = { .regs = (FLASH_TypeDef *) DT_INST_REG_ADDR(0), }; void flash_stm32wba_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { static struct flash_pages_layout stm32wba_flash_layout = { .pages_count = 0, .pages_size = 0, }; ARG_UNUSED(dev); if (stm32wba_flash_layout.pages_count == 0) { stm32wba_flash_layout.pages_count = FLASH_SIZE / FLASH_PAGE_SIZE; stm32wba_flash_layout.pages_size = FLASH_PAGE_SIZE; } *layout = &stm32wba_flash_layout; *layout_size = 1; } static const struct flash_driver_api flash_stm32_api = { .erase = flash_stm32_erase, .write = flash_stm32_write, .read = flash_stm32_read, .get_parameters = flash_stm32_get_parameters, #ifdef CONFIG_FLASH_PAGE_LAYOUT .page_layout = flash_stm32wba_page_layout, #endif }; static int stm32_flash_init(const struct device *dev) { k_sem_init(&FLASH_STM32_PRIV(dev)->sem, 1, 1); LOG_DBG("Flash initialized. BS: %zu", flash_stm32_parameters.write_block_size); k_work_init(&fm_work, &FM_BackgroundProcess_Entry); /* Enable flash driver system flag */ FD_SetStatus(FD_FLASHACCESS_RFTS, LL_FLASH_DISABLE); FD_SetStatus(FD_FLASHACCESS_RFTS_BYPASS, LL_FLASH_ENABLE); FD_SetStatus(FD_FLASHACCESS_SYSTEM, LL_FLASH_ENABLE); #if ((CONFIG_FLASH_LOG_LEVEL >= LOG_LEVEL_DBG) && CONFIG_FLASH_PAGE_LAYOUT) const struct flash_pages_layout *layout; size_t layout_size; flash_stm32wba_page_layout(dev, &layout, &layout_size); for (size_t i = 0; i < layout_size; i++) { LOG_DBG("Block %zu: bs: %zu count: %zu", i, layout[i].pages_size, layout[i].pages_count); } #endif return 0; } DEVICE_DT_INST_DEFINE(0, stm32_flash_init, NULL, &flash_data, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_stm32_api); ```
/content/code_sandbox/drivers/flash/flash_stm32wba_fm.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,550
```c /* * */ #define DT_DRV_COMPAT ambiq_flash_controller #include <zephyr/kernel.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/flash.h> #include <zephyr/logging/log.h> #include <am_mcu_apollo.h> LOG_MODULE_REGISTER(flash_ambiq, CONFIG_FLASH_LOG_LEVEL); #define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash) #define SOC_NV_FLASH_ADDR DT_REG_ADDR(SOC_NV_FLASH_NODE) #define SOC_NV_FLASH_SIZE DT_REG_SIZE(SOC_NV_FLASH_NODE) #if (CONFIG_SOC_SERIES_APOLLO4X) #define MIN_WRITE_SIZE 16 #else #define MIN_WRITE_SIZE 4 #endif /* CONFIG_SOC_SERIES_APOLLO4X */ #define FLASH_WRITE_BLOCK_SIZE MAX(DT_PROP(SOC_NV_FLASH_NODE, write_block_size), MIN_WRITE_SIZE) #define FLASH_ERASE_BLOCK_SIZE DT_PROP(SOC_NV_FLASH_NODE, erase_block_size) BUILD_ASSERT((FLASH_WRITE_BLOCK_SIZE & (MIN_WRITE_SIZE - 1)) == 0, "The flash write block size must be a multiple of MIN_WRITE_SIZE!"); #define FLASH_ERASE_BYTE 0xFF #define FLASH_ERASE_WORD \ (((uint32_t)(FLASH_ERASE_BYTE << 24)) | ((uint32_t)(FLASH_ERASE_BYTE << 16)) | \ ((uint32_t)(FLASH_ERASE_BYTE << 8)) | ((uint32_t)FLASH_ERASE_BYTE)) #if defined(CONFIG_MULTITHREADING) static struct k_sem flash_ambiq_sem; #define FLASH_SEM_INIT() k_sem_init(&flash_ambiq_sem, 1, 1) #define FLASH_SEM_TAKE() k_sem_take(&flash_ambiq_sem, K_FOREVER) #define FLASH_SEM_GIVE() k_sem_give(&flash_ambiq_sem) #else #define FLASH_SEM_INIT() #define FLASH_SEM_TAKE() #define FLASH_SEM_GIVE() #endif /* CONFIG_MULTITHREADING */ static const struct flash_parameters flash_ambiq_parameters = { .write_block_size = FLASH_WRITE_BLOCK_SIZE, .erase_value = FLASH_ERASE_BYTE, #if defined(CONFIG_SOC_SERIES_APOLLO4X) .caps = { .no_explicit_erase = true, }, #endif }; static bool flash_ambiq_valid_range(off_t offset, size_t len) { if ((offset < 0) || offset >= SOC_NV_FLASH_SIZE || (SOC_NV_FLASH_SIZE - offset) < len) { return false; } return true; } static int flash_ambiq_read(const struct device *dev, off_t offset, void *data, size_t len) { ARG_UNUSED(dev); if (!flash_ambiq_valid_range(offset, len)) { return -EINVAL; } if (len == 0) { return 0; } memcpy(data, (uint8_t *)(SOC_NV_FLASH_ADDR + offset), len); return 0; } static int flash_ambiq_write(const struct device *dev, off_t offset, const void *data, size_t len) { ARG_UNUSED(dev); int ret = 0; unsigned int key = 0; uint32_t aligned[FLASH_WRITE_BLOCK_SIZE / sizeof(uint32_t)] = {0}; uint32_t *src = (uint32_t *)data; /* write address must be block size aligned and the write length must be multiple of block * size. */ if (!flash_ambiq_valid_range(offset, len) || ((uint32_t)offset & (FLASH_WRITE_BLOCK_SIZE - 1)) || (len & (FLASH_WRITE_BLOCK_SIZE - 1))) { return -EINVAL; } if (len == 0) { return 0; } FLASH_SEM_TAKE(); key = irq_lock(); for (int i = 0; i < len / FLASH_WRITE_BLOCK_SIZE; i++) { for (int j = 0; j < FLASH_WRITE_BLOCK_SIZE / sizeof(uint32_t); j++) { /* Make sure the source data is 4-byte aligned. */ aligned[j] = UNALIGNED_GET((uint32_t *)src); src++; } #if (CONFIG_SOC_SERIES_APOLLO4X) ret = am_hal_mram_main_program( AM_HAL_MRAM_PROGRAM_KEY, aligned, (uint32_t *)(SOC_NV_FLASH_ADDR + offset + i * FLASH_WRITE_BLOCK_SIZE), FLASH_WRITE_BLOCK_SIZE / sizeof(uint32_t)); #elif (CONFIG_SOC_SERIES_APOLLO3X) ret = am_hal_flash_program_main( AM_HAL_FLASH_PROGRAM_KEY, aligned, (uint32_t *)(SOC_NV_FLASH_ADDR + offset + i * FLASH_WRITE_BLOCK_SIZE), FLASH_WRITE_BLOCK_SIZE / sizeof(uint32_t)); #endif /* CONFIG_SOC_SERIES_APOLLO4X */ if (ret) { break; } } irq_unlock(key); FLASH_SEM_GIVE(); return ret; } static int flash_ambiq_erase(const struct device *dev, off_t offset, size_t len) { ARG_UNUSED(dev); int ret = 0; if (!flash_ambiq_valid_range(offset, len)) { return -EINVAL; } if (len == 0) { return 0; } #if (CONFIG_SOC_SERIES_APOLLO4X) /* The erase address and length alignment check will be done in HAL.*/ #elif (CONFIG_SOC_SERIES_APOLLO3X) if ((offset % FLASH_ERASE_BLOCK_SIZE) != 0) { LOG_ERR("offset 0x%lx is not on a page boundary", (long)offset); return -EINVAL; } if ((len % FLASH_ERASE_BLOCK_SIZE) != 0) { LOG_ERR("len %zu is not multiple of a page size", len); return -EINVAL; } #endif /* CONFIG_SOC_SERIES_APOLLO4X */ FLASH_SEM_TAKE(); #if (CONFIG_SOC_SERIES_APOLLO4X) ret = am_hal_mram_main_fill(AM_HAL_MRAM_PROGRAM_KEY, FLASH_ERASE_WORD, (uint32_t *)(SOC_NV_FLASH_ADDR + offset), (len / sizeof(uint32_t))); #elif (CONFIG_SOC_SERIES_APOLLO3X) unsigned int key = 0; key = irq_lock(); ret = am_hal_flash_page_erase( AM_HAL_FLASH_PROGRAM_KEY, AM_HAL_FLASH_ADDR2INST(((uint32_t)SOC_NV_FLASH_ADDR + offset)), AM_HAL_FLASH_ADDR2PAGE(((uint32_t)SOC_NV_FLASH_ADDR + offset))); irq_unlock(key); #endif /* CONFIG_SOC_SERIES_APOLLO4X */ FLASH_SEM_GIVE(); return ret; } static const struct flash_parameters *flash_ambiq_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_ambiq_parameters; } #if CONFIG_FLASH_PAGE_LAYOUT static const struct flash_pages_layout pages_layout = { .pages_count = SOC_NV_FLASH_SIZE / FLASH_ERASE_BLOCK_SIZE, .pages_size = FLASH_ERASE_BLOCK_SIZE, }; static void flash_ambiq_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { ARG_UNUSED(dev); *layout = &pages_layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static const struct flash_driver_api flash_ambiq_driver_api = { .read = flash_ambiq_read, .write = flash_ambiq_write, .erase = flash_ambiq_erase, .get_parameters = flash_ambiq_get_parameters, #ifdef CONFIG_FLASH_PAGE_LAYOUT .page_layout = flash_ambiq_pages_layout, #endif }; static int flash_ambiq_init(const struct device *dev) { ARG_UNUSED(dev); FLASH_SEM_INIT(); return 0; } DEVICE_DT_INST_DEFINE(0, flash_ambiq_init, NULL, NULL, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_ambiq_driver_api); ```
/content/code_sandbox/drivers/flash/flash_ambiq.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,689
```c /* * */ #include <string.h> #include <zephyr/device.h> #include <zephyr/drivers/flash.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/sys/barrier.h> #include <soc.h> #include "flash_stm32.h" LOG_MODULE_REGISTER(flash_stm32f4x, CONFIG_FLASH_LOG_LEVEL); #if FLASH_STM32_WRITE_BLOCK_SIZE == 8 typedef uint64_t flash_prg_t; #define FLASH_PROGRAM_SIZE FLASH_PSIZE_DOUBLE_WORD #elif FLASH_STM32_WRITE_BLOCK_SIZE == 4 typedef uint32_t flash_prg_t; #define FLASH_PROGRAM_SIZE FLASH_PSIZE_WORD #elif FLASH_STM32_WRITE_BLOCK_SIZE == 2 typedef uint16_t flash_prg_t; #define FLASH_PROGRAM_SIZE FLASH_PSIZE_HALF_WORD #elif FLASH_STM32_WRITE_BLOCK_SIZE == 1 typedef uint8_t flash_prg_t; #define FLASH_PROGRAM_SIZE FLASH_PSIZE_BYTE #else #error Write block size must be a power of 2, from 1 to 8 #endif bool flash_stm32_valid_range(const struct device *dev, off_t offset, uint32_t len, bool write) { ARG_UNUSED(write); #if (FLASH_SECTOR_TOTAL == 12) && defined(FLASH_OPTCR_DB1M) FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); /* * RM0090, table 7.1: STM32F42xxx, STM32F43xxx */ if (regs->OPTCR & FLASH_OPTCR_DB1M) { /* Device configured in Dual Bank, but not supported for now */ return false; } #endif return flash_stm32_range_exists(dev, offset, len); } static inline void flush_cache(FLASH_TypeDef *regs) { if (regs->ACR & FLASH_ACR_DCEN) { regs->ACR &= ~FLASH_ACR_DCEN; /* Datasheet: DCRST: Data cache reset * This bit can be written only when the data cache is disabled */ regs->ACR |= FLASH_ACR_DCRST; regs->ACR &= ~FLASH_ACR_DCRST; regs->ACR |= FLASH_ACR_DCEN; } if (regs->ACR & FLASH_ACR_ICEN) { regs->ACR &= ~FLASH_ACR_ICEN; /* Datasheet: ICRST: Instruction cache reset : * This bit can be written only when the instruction cache * is disabled */ regs->ACR |= FLASH_ACR_ICRST; regs->ACR &= ~FLASH_ACR_ICRST; regs->ACR |= FLASH_ACR_ICEN; } } static int write_value(const struct device *dev, off_t offset, flash_prg_t val) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); #if defined(FLASH_OPTCR_DB1M) bool dcache_enabled = false; #endif /* FLASH_OPTCR_DB*/ uint32_t tmp; int rc; /* if the control register is locked, do not fail silently */ if (regs->CR & FLASH_CR_LOCK) { return -EIO; } rc = flash_stm32_wait_flash_idle(dev); if (rc < 0) { return rc; } #if defined(FLASH_OPTCR_DB1M) /* * Disable the data cache to avoid the silicon errata ES0206 Rev 16 2.2.12: * "Data cache might be corrupted during Flash memory read-while-write operation" */ if (regs->ACR & FLASH_ACR_DCEN) { dcache_enabled = true; regs->ACR &= (~FLASH_ACR_DCEN); } #endif /* FLASH_OPTCR_DB1M */ regs->CR &= CR_PSIZE_MASK; regs->CR |= FLASH_PROGRAM_SIZE; regs->CR |= FLASH_CR_PG; /* flush the register write */ tmp = regs->CR; *((flash_prg_t *)(offset + FLASH_STM32_BASE_ADDRESS)) = val; rc = flash_stm32_wait_flash_idle(dev); regs->CR &= (~FLASH_CR_PG); #if defined(FLASH_OPTCR_DB1M) /* Reset/enable the data cache if previously enabled */ if (dcache_enabled) { regs->ACR |= FLASH_ACR_DCRST; regs->ACR &= (~FLASH_ACR_DCRST); regs->ACR |= FLASH_ACR_DCEN; } #endif /* FLASH_OPTCR_DB1M */ return rc; } static int erase_sector(const struct device *dev, uint32_t sector) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); uint32_t tmp; int rc; /* if the control register is locked, do not fail silently */ if (regs->CR & FLASH_CR_LOCK) { return -EIO; } rc = flash_stm32_wait_flash_idle(dev); if (rc < 0) { return rc; } /* * If an erase operation in Flash memory also concerns data * in the instruction cache, the user has to ensure that these data * are rewritten before they are accessed during code execution. */ flush_cache(regs); #if FLASH_SECTOR_TOTAL == 24 /* * RM0090, 3.9.8: STM32F42xxx, STM32F43xxx * RM0386, 3.7.5: STM32F469xx, STM32F479xx */ if (sector >= 12) { /* From sector 12, SNB is offset by 0b10000 */ sector += 4U; } #endif regs->CR &= CR_PSIZE_MASK; regs->CR |= FLASH_PROGRAM_SIZE; regs->CR &= ~FLASH_CR_SNB; regs->CR |= FLASH_CR_SER | (sector << 3); regs->CR |= FLASH_CR_STRT; /* flush the register write */ tmp = regs->CR; rc = flash_stm32_wait_flash_idle(dev); regs->CR &= ~(FLASH_CR_SER | FLASH_CR_SNB); return rc; } int flash_stm32_block_erase_loop(const struct device *dev, unsigned int offset, unsigned int len) { struct flash_pages_info info; uint32_t start_sector, end_sector; uint32_t i; int rc = 0; rc = flash_get_page_info_by_offs(dev, offset, &info); if (rc) { return rc; } start_sector = info.index; rc = flash_get_page_info_by_offs(dev, offset + len - 1, &info); if (rc) { return rc; } end_sector = info.index; for (i = start_sector; i <= end_sector; i++) { rc = erase_sector(dev, i); if (rc < 0) { break; } } return rc; } int flash_stm32_write_range(const struct device *dev, unsigned int offset, const void *data, unsigned int len) { int i, rc = 0; flash_prg_t value; for (i = 0; i < len / sizeof(flash_prg_t); i++) { value = UNALIGNED_GET((flash_prg_t *)data + i); rc = write_value(dev, offset + i * sizeof(flash_prg_t), value); if (rc < 0) { return rc; } } return rc; } static __unused int write_optb(const struct device *dev, uint32_t mask, uint32_t value) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); int rc; if (regs->OPTCR & FLASH_OPTCR_OPTLOCK) { return -EIO; } if ((regs->OPTCR & mask) == value) { return 0; } rc = flash_stm32_wait_flash_idle(dev); if (rc < 0) { return rc; } regs->OPTCR = (regs->OPTCR & ~mask) | value; regs->OPTCR |= FLASH_OPTCR_OPTSTRT; /* Make sure previous write is completed. */ barrier_dsync_fence_full(); rc = flash_stm32_wait_flash_idle(dev); if (rc < 0) { return rc; } return 0; } #if defined(CONFIG_FLASH_STM32_WRITE_PROTECT) int flash_stm32_update_wp_sectors(const struct device *dev, uint32_t changed_sectors, uint32_t protected_sectors) { changed_sectors <<= FLASH_OPTCR_nWRP_Pos; protected_sectors <<= FLASH_OPTCR_nWRP_Pos; if ((changed_sectors & FLASH_OPTCR_nWRP_Msk) != changed_sectors) { return -EINVAL; } /* Sector is protected when bit == 0. Flip protected_sectors bits */ protected_sectors = ~protected_sectors & changed_sectors; return write_optb(dev, changed_sectors, protected_sectors); } int flash_stm32_get_wp_sectors(const struct device *dev, uint32_t *protected_sectors) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); *protected_sectors = (~regs->OPTCR & FLASH_OPTCR_nWRP_Msk) >> FLASH_OPTCR_nWRP_Pos; return 0; } #endif /* CONFIG_FLASH_STM32_WRITE_PROTECT */ #if defined(CONFIG_FLASH_STM32_READOUT_PROTECTION) uint8_t flash_stm32_get_rdp_level(const struct device *dev) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); return (regs->OPTCR & FLASH_OPTCR_RDP_Msk) >> FLASH_OPTCR_RDP_Pos; } void flash_stm32_set_rdp_level(const struct device *dev, uint8_t level) { write_optb(dev, FLASH_OPTCR_RDP_Msk, (uint32_t)level << FLASH_OPTCR_RDP_Pos); } #endif /* CONFIG_FLASH_STM32_READOUT_PROTECTION */ /* * Different SoC flash layouts are specified in across various * reference manuals, but the flash layout for a given number of * sectors is consistent across these manuals, with one "gotcha". The * number of sectors is given by the HAL as FLASH_SECTOR_TOTAL. * * The only "gotcha" is that when there are 24 sectors, they are split * across 2 "banks" of 12 sectors each, with another set of small * sectors (16 KB) in the second bank occurring after the large ones * (128 KB) in the first. We could consider supporting this as two * devices to make the layout cleaner, but this will do for now. */ #ifndef FLASH_SECTOR_TOTAL #error "Unknown flash layout" #else /* defined(FLASH_SECTOR_TOTAL) */ #if FLASH_SECTOR_TOTAL == 5 static const struct flash_pages_layout stm32f4_flash_layout[] = { /* RM0401, table 5: STM32F410Tx, STM32F410Cx, STM32F410Rx */ {.pages_count = 4, .pages_size = KB(16)}, {.pages_count = 1, .pages_size = KB(64)}, }; #elif FLASH_SECTOR_TOTAL == 6 static const struct flash_pages_layout stm32f4_flash_layout[] = { /* RM0368, table 5: STM32F401xC */ {.pages_count = 4, .pages_size = KB(16)}, {.pages_count = 1, .pages_size = KB(64)}, {.pages_count = 1, .pages_size = KB(128)}, }; #elif FLASH_SECTOR_TOTAL == 8 static const struct flash_pages_layout stm32f4_flash_layout[] = { /* * RM0368, table 5: STM32F401xE * RM0383, table 4: STM32F411xE * RM0390, table 4: STM32F446xx */ {.pages_count = 4, .pages_size = KB(16)}, {.pages_count = 1, .pages_size = KB(64)}, {.pages_count = 3, .pages_size = KB(128)}, }; #elif FLASH_SECTOR_TOTAL == 12 static const struct flash_pages_layout stm32f4_flash_layout[] = { /* * RM0090, table 5: STM32F405xx, STM32F415xx, STM32F407xx, STM32F417xx * RM0402, table 5: STM32F412Zx, STM32F412Vx, STM32F412Rx, STM32F412Cx */ {.pages_count = 4, .pages_size = KB(16)}, {.pages_count = 1, .pages_size = KB(64)}, {.pages_count = 7, .pages_size = KB(128)}, }; #elif FLASH_SECTOR_TOTAL == 16 static const struct flash_pages_layout stm32f4_flash_layout[] = { /* RM0430, table 5.: STM32F413xx, STM32F423xx */ {.pages_count = 4, .pages_size = KB(16)}, {.pages_count = 1, .pages_size = KB(64)}, {.pages_count = 11, .pages_size = KB(128)}, }; #elif FLASH_SECTOR_TOTAL == 24 static const struct flash_pages_layout stm32f4_flash_layout[] = { /* * RM0090, table 6: STM32F427xx, STM32F437xx, STM32F429xx, STM32F439xx * RM0386, table 4: STM32F469xx, STM32F479xx */ {.pages_count = 4, .pages_size = KB(16)}, {.pages_count = 1, .pages_size = KB(64)}, {.pages_count = 7, .pages_size = KB(128)}, {.pages_count = 4, .pages_size = KB(16)}, {.pages_count = 1, .pages_size = KB(64)}, {.pages_count = 7, .pages_size = KB(128)}, }; #else #error "Unknown flash layout" #endif /* FLASH_SECTOR_TOTAL == 5 */ #endif/* !defined(FLASH_SECTOR_TOTAL) */ void flash_stm32_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { ARG_UNUSED(dev); *layout = stm32f4_flash_layout; *layout_size = ARRAY_SIZE(stm32f4_flash_layout); } ```
/content/code_sandbox/drivers/flash/flash_stm32f4x.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,176
```c /* * */ #define DT_DRV_COMPAT infineon_xmc4xxx_flash_controller #define FLASH_WRITE_BLK_SZ DT_PROP(DT_INST(0, infineon_xmc4xxx_nv_flash), write_block_size) #include <stdint.h> #include <zephyr/device.h> #include <zephyr/drivers/flash.h> #include <zephyr/kernel.h> #include <xmc_flash.h> struct flash_xmc4xxx_data { struct k_sem sem; }; struct flash_xmc4xxx_config { uint32_t base; uint32_t size; struct flash_parameters parameters; }; static inline bool is_aligned_32(uint32_t data) { return (data & 0x3) ? false : true; } static int flash_xmc4xxx_init(const struct device *dev) { struct flash_xmc4xxx_data *dev_data = dev->data; k_sem_init(&dev_data->sem, 1, 1); return 0; } #define SET_PAGES(node_id) \ {.pages_count = DT_PROP(node_id, pages_count), .pages_size = DT_PROP(node_id, pages_size)}, #if CONFIG_FLASH_PAGE_LAYOUT static const struct flash_pages_layout flash_xmc4xxx_pages_layout[] = { DT_FOREACH_CHILD(DT_NODELABEL(pages_layout), SET_PAGES)}; static void flash_xmc4xxx_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { *layout = &flash_xmc4xxx_pages_layout[0]; *layout_size = ARRAY_SIZE(flash_xmc4xxx_pages_layout); } #endif static int flash_xmc4xxx_read(const struct device *dev, off_t offset, void *data, size_t len) { const struct flash_xmc4xxx_config *dev_config = dev->config; if (offset < 0 || offset + len > dev_config->size) { return -1; } memcpy(data, (void *)(dev_config->base + offset), len); return 0; } static __aligned(4) uint8_t aligned_page[DT_PROP(DT_INST(0, infineon_xmc4xxx_nv_flash), write_block_size)]; static int flash_xmc4xxx_write(const struct device *dev, off_t offset, const void *data, size_t len) { struct flash_xmc4xxx_data *dev_data = dev->data; const struct flash_xmc4xxx_config *dev_config = dev->config; uint32_t irq_key; uint32_t flash_addr = dev_config->base; const uint8_t *src = data; int num_pages; if (offset < 0 || offset + len > dev_config->size) { return -1; } if (len % dev_config->parameters.write_block_size || offset % dev_config->parameters.write_block_size > 0) { return -1; } k_sem_take(&dev_data->sem, K_FOREVER); /* erase and write operations must be on the uncached base address */ flash_addr |= 0xc000000; flash_addr += offset; num_pages = len / dev_config->parameters.write_block_size; for (int i = 0; i < num_pages; i++) { uint32_t *src_ptr = (uint32_t *)src; /* XMC_FLASH_ProgramPage() needs a 32 bit aligned input. */ /* Copy the data to an aligned array if needed. */ if (!is_aligned_32((uint32_t)src)) { memcpy(aligned_page, src, dev_config->parameters.write_block_size); src_ptr = (uint32_t *)aligned_page; } irq_key = irq_lock(); XMC_FLASH_ProgramPage((uint32_t *)flash_addr, src_ptr); irq_unlock(irq_key); flash_addr += dev_config->parameters.write_block_size; src += dev_config->parameters.write_block_size; } k_sem_give(&dev_data->sem); return 0; } #if CONFIG_FLASH_PAGE_LAYOUT static int flash_xmc4xxx_erase(const struct device *dev, off_t offset, size_t size) { struct flash_xmc4xxx_data *dev_data = dev->data; const struct flash_xmc4xxx_config *dev_config = dev->config; uint32_t irq_key; uint32_t offset_page = 0; int ret = 0; if (offset < 0 || offset > dev_config->size) { return -1; } k_sem_take(&dev_data->sem, K_FOREVER); for (int i = 0; i < ARRAY_SIZE(flash_xmc4xxx_pages_layout); i++) { for (int k = 0; k < flash_xmc4xxx_pages_layout[i].pages_count; k++) { uint32_t pages_size = flash_xmc4xxx_pages_layout[i].pages_size; /* erase and write operations must be on the uncached base address */ uint32_t flash_addr = dev_config->base | 0xc000000; if (offset == offset_page && size >= pages_size) { flash_addr += offset; irq_key = irq_lock(); XMC_FLASH_EraseSector((uint32_t *)flash_addr); irq_unlock(irq_key); size -= pages_size; offset += pages_size; } offset_page += pages_size; if (size == 0) { ret = 0; goto finish; } /* page not aligned with offset address */ if (offset_page > offset) { ret = -1; goto finish; } } } finish: k_sem_give(&dev_data->sem); return ret; } #else static int flash_xmc4xxx_erase(const struct device *dev, off_t offset, size_t size) { ARG_UNUSED(dev); ARG_UNUSED(offset); ARG_UNUSED(size); return -ENOTSUP; } #endif static const struct flash_parameters *flash_xmc4xxx_get_parameters(const struct device *dev) { const struct flash_xmc4xxx_config *dev_config = dev->config; return &dev_config->parameters; } static const struct flash_driver_api flash_xmc4xxx_api = {.erase = flash_xmc4xxx_erase, .write = flash_xmc4xxx_write, .read = flash_xmc4xxx_read, #ifdef CONFIG_FLASH_PAGE_LAYOUT .page_layout = flash_xmc4xxx_page_layout, #endif .get_parameters = flash_xmc4xxx_get_parameters}; static struct flash_xmc4xxx_data flash_xmc4xxx_data_0; static struct flash_xmc4xxx_config flash_xmc4xxx_cfg_0 = { .base = DT_REG_ADDR(DT_INST(0, infineon_xmc4xxx_nv_flash)), .size = DT_REG_SIZE(DT_INST(0, infineon_xmc4xxx_nv_flash)), .parameters = {.write_block_size = FLASH_WRITE_BLK_SZ, .erase_value = 0}}; DEVICE_DT_INST_DEFINE(0, flash_xmc4xxx_init, NULL, &flash_xmc4xxx_data_0, &flash_xmc4xxx_cfg_0, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_xmc4xxx_api); ```
/content/code_sandbox/drivers/flash/soc_flash_xmc4xxx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,539
```c /* * */ #define DT_DRV_COMPAT telink_b91_flash_controller #define FLASH_SIZE DT_REG_SIZE(DT_INST(0, soc_nv_flash)) #define FLASH_ORIGIN DT_REG_ADDR(DT_INST(0, soc_nv_flash)) #include "flash.h" #include <string.h> #include <zephyr/device.h> #include <zephyr/drivers/flash.h> #include <zephyr/kernel.h> /* driver definitions */ #define BLOCK_64K_SIZE (0x10000u) #define BLOCK_64K_PAGES (BLOCK_64K_SIZE / PAGE_SIZE) #define BLOCK_32K_SIZE (0x8000u) #define BLOCK_32K_PAGES (BLOCK_32K_SIZE / PAGE_SIZE) #define SECTOR_SIZE (0x1000u) #define SECTOR_PAGES (SECTOR_SIZE / PAGE_SIZE) /* driver data structure */ struct flash_b91_data { struct k_sem write_lock; }; /* driver parameters structure */ static const struct flash_parameters flash_b91_parameters = { .write_block_size = DT_PROP(DT_INST(0, soc_nv_flash), write_block_size), .erase_value = 0xff, }; /* Check for correct offset and length */ static bool flash_b91_is_range_valid(off_t offset, size_t len) { /* check for min value */ if ((offset < 0) || (len < 1)) { return false; } /* check for max value */ if ((offset + len) > FLASH_SIZE) { return false; } return true; } /* API implementation: driver initialization */ static int flash_b91_init(const struct device *dev) { struct flash_b91_data *dev_data = dev->data; k_sem_init(&dev_data->write_lock, 1, 1); return 0; } /* API implementation: erase */ static int flash_b91_erase(const struct device *dev, off_t offset, size_t len) { int page_nums = len / PAGE_SIZE; struct flash_b91_data *dev_data = dev->data; /* return SUCCESS if len equals 0 (required by tests/drivers/flash) */ if (!len) { return 0; } /* check for valid range */ if (!flash_b91_is_range_valid(offset, len)) { return -EINVAL; } /* erase can be done only by pages */ if (((offset % PAGE_SIZE) != 0) || ((len % PAGE_SIZE) != 0)) { return -EINVAL; } /* take semaphore */ if (k_sem_take(&dev_data->write_lock, K_NO_WAIT)) { return -EACCES; } while (page_nums) { /* check for 64K erase possibility, then check for 32K and so on.. */ if ((page_nums >= BLOCK_64K_PAGES) && ((offset % BLOCK_64K_SIZE) == 0)) { /* erase 64K block */ flash_erase_64kblock(offset); page_nums -= BLOCK_64K_PAGES; offset += BLOCK_64K_SIZE; } else if ((page_nums >= BLOCK_32K_PAGES) && ((offset % BLOCK_32K_SIZE) == 0)) { /* erase 32K block */ flash_erase_32kblock(offset); page_nums -= BLOCK_32K_PAGES; offset += BLOCK_32K_SIZE; } else if ((page_nums >= SECTOR_PAGES) && ((offset % SECTOR_SIZE) == 0)) { /* erase sector */ flash_erase_sector(offset); page_nums -= SECTOR_PAGES; offset += SECTOR_SIZE; } else { /* erase page */ flash_erase_page(offset); page_nums--; offset += PAGE_SIZE; } } /* release semaphore */ k_sem_give(&dev_data->write_lock); return 0; } /* API implementation: write */ static int flash_b91_write(const struct device *dev, off_t offset, const void *data, size_t len) { void *buf = NULL; struct flash_b91_data *dev_data = dev->data; /* return SUCCESS if len equals 0 (required by tests/drivers/flash) */ if (!len) { return 0; } /* check for valid range */ if (!flash_b91_is_range_valid(offset, len)) { return -EINVAL; } /* take semaphore */ if (k_sem_take(&dev_data->write_lock, K_NO_WAIT)) { return -EACCES; } /* need to store data in intermediate RAM buffer in case from flash to flash write */ if (((uint32_t)data >= FLASH_ORIGIN) && ((uint32_t)data < (FLASH_ORIGIN + FLASH_SIZE))) { buf = k_malloc(len); if (buf == NULL) { k_sem_give(&dev_data->write_lock); return -ENOMEM; } /* copy Flash data to RAM */ memcpy(buf, data, len); /* substitute data with allocated buffer */ data = buf; } /* write flash */ flash_write_page(offset, len, (unsigned char *)data); /* if ram memory is allocated for flash writing it should be free */ if (buf != NULL) { k_free(buf); } /* release semaphore */ k_sem_give(&dev_data->write_lock); return 0; } /* API implementation: read */ static int flash_b91_read(const struct device *dev, off_t offset, void *data, size_t len) { ARG_UNUSED(dev); /* return SUCCESS if len equals 0 (required by tests/drivers/flash) */ if (!len) { return 0; } /* check for valid range */ if (!flash_b91_is_range_valid(offset, len)) { return -EINVAL; } /* read flash */ flash_read_page(offset, len, (unsigned char *)data); return 0; } /* API implementation: get_parameters */ static const struct flash_parameters * flash_b91_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_b91_parameters; } /* API implementation: page_layout */ #if defined(CONFIG_FLASH_PAGE_LAYOUT) static const struct flash_pages_layout dev_layout = { .pages_count = FLASH_SIZE / PAGE_SIZE, .pages_size = PAGE_SIZE, }; static void flash_b91_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { *layout = &dev_layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static struct flash_b91_data flash_data; static const struct flash_driver_api flash_b91_api = { .erase = flash_b91_erase, .write = flash_b91_write, .read = flash_b91_read, .get_parameters = flash_b91_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_b91_pages_layout, #endif }; /* Driver registration */ DEVICE_DT_INST_DEFINE(0, flash_b91_init, NULL, &flash_data, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_b91_api); ```
/content/code_sandbox/drivers/flash/soc_flash_b91.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,503
```unknown # Flash simulator config menuconfig FLASH_SIMULATOR bool "Flash simulator" default y depends on DT_HAS_ZEPHYR_SIM_FLASH_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_DRIVER_ENABLED help Enable the flash simulator. if FLASH_SIMULATOR config FLASH_SIMULATOR_UNALIGNED_READ bool "Allow read access to be unaligned" default y help If selected, the reading operation does not check if access is aligned. Disable this option only if you want to simulate a specific FLASH interface that requires aligned read access. config FLASH_SIMULATOR_EXPLICIT_ERASE bool "Program-erase device" select FLASH_HAS_EXPLICIT_ERASE default y help Explicit erase (non-erase-on-write) Flash, which is device that requires erase to erase-value prior to write as it only allows to change bits from erase-value to the opposite. config FLASH_SIMULATOR_RAMLIKE bool default y if !FLASH_SIMULATOR_EXPLICIT_ERASE select FLASH_HAS_NO_EXPLICIT_ERASE select FLASH_SIMULATOR_DOUBLE_WRITES help This is used for setting FLASH_HAS_NO_EXPLICIT_ERASE. config FLASH_SIMULATOR_DOUBLE_WRITES bool "Allow program units to be programmed more than once" help If selected, writing to a non-erased program unit will succeed, otherwise, it will return an error. Keep in mind that write operations can only change value of a bit from erase-value to the opposite. This option does not impact FLASH_SIMULATOR_RAMLIKE. In case when FLASH_SIMULATOR_EXPLICIT_ERASE is selected multiple writes to the same bit but only change from erase-value to opposite will be registered. config FLASH_SIMULATOR_SIMULATE_TIMING bool "Hardware timing simulation" if FLASH_SIMULATOR_SIMULATE_TIMING config FLASH_SIMULATOR_MIN_READ_TIME_US int "Minimum read time (S)" default 2 range 1 1000000 config FLASH_SIMULATOR_MIN_WRITE_TIME_US int "Minimum write time (S)" default 100 range 1 1000000 config FLASH_SIMULATOR_MIN_ERASE_TIME_US int "Minimum erase time (S)" default 2000 range 1 1000000 endif config FLASH_SIMULATOR_STATS bool "flash operations statistic" default y select STATS select STATS_NAMES help Gather statistic measurement for flash simulator operations using the statistic subsystem. config FLASH_SIMULATOR_STAT_PAGE_COUNT int "Pages under statistic" depends on FLASH_SIMULATOR_STATS range 1 256 default 256 help Only up to this number of beginning pages will be tracked while catching dedicated flash operations and thresholds. This number is not automatic because implementation uses UNTIL_REPEAT() macro, which is limited to take explicitly number of iterations. This is why it's not possible to calculate the number of pages with preprocessor using DT properties. endif # FLASH_SIMULATOR ```
/content/code_sandbox/drivers/flash/Kconfig.simulator
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
627
```c /* * */ #define LOG_DOMAIN flash_stm32g0 #define LOG_LEVEL CONFIG_FLASH_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_DOMAIN); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <zephyr/drivers/flash.h> #include <zephyr/init.h> #include <soc.h> #include "flash_stm32.h" /* FLASH_DBANK_SUPPORT is defined in the HAL for all G0Bx and G0C1 SoCs, * while only those with 256KiB and 512KiB Flash have two banks. */ #if defined(FLASH_DBANK_SUPPORT) && (CONFIG_FLASH_SIZE > (128)) #define STM32G0_DBANK_SUPPORT #endif #if defined(STM32G0_DBANK_SUPPORT) #define STM32G0_BANK_COUNT 2 #define STM32G0_BANK2_START_PAGE_NR 256 #else #define STM32G0_BANK_COUNT 1 #endif #define STM32G0_FLASH_SIZE (FLASH_SIZE) #define STM32G0_FLASH_PAGE_SIZE (FLASH_PAGE_SIZE) #define STM32G0_PAGES_PER_BANK \ ((STM32G0_FLASH_SIZE / STM32G0_FLASH_PAGE_SIZE) / STM32G0_BANK_COUNT) static inline void flush_cache(FLASH_TypeDef *regs) { if (regs->ACR & FLASH_ACR_ICEN) { regs->ACR &= ~FLASH_ACR_ICEN; /* Datasheet: ICRST: Instruction cache reset : * This bit can be written only when the instruction cache * is disabled */ regs->ACR |= FLASH_ACR_ICRST; regs->ACR &= ~FLASH_ACR_ICRST; regs->ACR |= FLASH_ACR_ICEN; } } static int write_dword(const struct device *dev, off_t offset, uint64_t val) { volatile uint32_t *flash = (uint32_t *)(offset + FLASH_STM32_BASE_ADDRESS); FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); uint32_t tmp; int rc; /* if the control register is locked, do not fail silently */ if (regs->CR & FLASH_CR_LOCK) { return -EIO; } /* Check that no Flash main memory operation is ongoing */ rc = flash_stm32_wait_flash_idle(dev); if (rc < 0) { return rc; } /* Check if this double word is erased and value isn't 0. * * It is allowed to write only zeros over an already written dword * See 3.3.8 in reference manual. */ if ((flash[0] != 0xFFFFFFFFUL || flash[1] != 0xFFFFFFFFUL) && val != 0UL) { LOG_ERR("Word at offs %ld not erased", (long)offset); return -EIO; } /* Set the PG bit */ regs->CR |= FLASH_CR_PG; /* Flush the register write */ tmp = regs->CR; /* Perform the data write operation at the desired memory address */ flash[0] = (uint32_t)val; flash[1] = (uint32_t)(val >> 32); /* Wait until the BSY bit is cleared */ rc = flash_stm32_wait_flash_idle(dev); /* Clear the PG bit */ regs->CR &= (~FLASH_CR_PG); return rc; } static int erase_page(const struct device *dev, unsigned int offset) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); uint32_t tmp; int rc; int page; /* if the control register is locked, do not fail silently */ if (regs->CR & FLASH_CR_LOCK) { return -EIO; } /* Check that no Flash memory operation is ongoing */ rc = flash_stm32_wait_flash_idle(dev); if (rc < 0) { return rc; } /* * If an erase operation in Flash memory also concerns data * in the instruction cache, the user has to ensure that these data * are rewritten before they are accessed during code execution. */ flush_cache(regs); tmp = regs->CR; page = offset / STM32G0_FLASH_PAGE_SIZE; #if defined(STM32G0_DBANK_SUPPORT) bool swap_enabled = (regs->OPTR & FLASH_OPTR_nSWAP_BANK) == 0; /* big page-nr w/o swap or small page-nr w/ swap indicate bank2 */ if ((page >= STM32G0_PAGES_PER_BANK) != swap_enabled) { page = (page % STM32G0_PAGES_PER_BANK) + STM32G0_BANK2_START_PAGE_NR; tmp |= FLASH_CR_BKER; LOG_DBG("Erase page %d on bank 2", page); } else { page = page % STM32G0_PAGES_PER_BANK; tmp &= ~FLASH_CR_BKER; LOG_DBG("Erase page %d on bank 1", page); } #endif /* Set the PER bit and select the page you wish to erase */ tmp |= FLASH_CR_PER; tmp &= ~FLASH_CR_PNB_Msk; tmp |= ((page << FLASH_CR_PNB_Pos) & FLASH_CR_PNB_Msk); /* Set the STRT bit and write the reg */ tmp |= FLASH_CR_STRT; regs->CR = tmp; /* Wait for the BSY bit */ rc = flash_stm32_wait_flash_idle(dev); regs->CR &= ~FLASH_CR_PER; return rc; } int flash_stm32_block_erase_loop(const struct device *dev, unsigned int offset, unsigned int len) { unsigned int addr = offset; int rc = 0; for (; addr <= offset + len - 1 ; addr += STM32G0_FLASH_PAGE_SIZE) { rc = erase_page(dev, addr); if (rc < 0) { break; } } return rc; } int flash_stm32_write_range(const struct device *dev, unsigned int offset, const void *data, unsigned int len) { int i, rc = 0; for (i = 0; i < len; i += 8, offset += 8) { rc = write_dword(dev, offset, UNALIGNED_GET((const uint64_t *) data + (i >> 3))); if (rc < 0) { return rc; } } return rc; } /* * The address space is always continuous, even though a subset of G0 SoCs has * two flash banks. * Only the "physical" flash page-NRs are not continuous on those SoCs. * As a result the page numbers used in the zephyr flash api differs * from the "physical" flash page number. * The first is equal to the address offset divided by the page size, while * "physical" pages are numbered starting with 0 on bank1 and 256 on bank2. * As a result only a single homogeneous flash page layout needs to be defined. */ void flash_stm32_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { static struct flash_pages_layout stm32g0_flash_layout = { .pages_count = 0, .pages_size = 0, }; ARG_UNUSED(dev); if (stm32g0_flash_layout.pages_count == 0) { stm32g0_flash_layout.pages_count = STM32G0_FLASH_SIZE / STM32G0_FLASH_PAGE_SIZE; stm32g0_flash_layout.pages_size = STM32G0_FLASH_PAGE_SIZE; } *layout = &stm32g0_flash_layout; *layout_size = 1; } /* Override weak function */ int flash_stm32_check_configuration(void) { #if defined(STM32G0_DBANK_SUPPORT) && (CONFIG_FLASH_SIZE == 256) /* Single bank mode not supported on dual bank SoCs with 256kiB flash */ if ((FLASH->OPTR & FLASH_OPTR_DUAL_BANK) == 0) { LOG_ERR("Single bank configuration not supported by the driver"); return -ENOTSUP; } #endif return 0; } ```
/content/code_sandbox/drivers/flash/flash_stm32g0x.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,772
```unknown # Flash driver configuration options # # Flash driver # config FLASH_HAS_DRIVER_ENABLED bool help This option is enabled when any flash driver is enabled. config FLASH_HAS_EX_OP bool help This option is selected by drivers that support flash extended operations. config FLASH_HAS_EXPLICIT_ERASE bool help Device does not do erase-on-write (erase-on-program, auto-erase on write) and requires explicit erase procedure to be programmed with random value, in place where it has already been programmed with some other value, as program can only change bits from erased-value to the opposite. All pure Flash devices are evolution of EEPROM where erase has been separated from write, EEPROM has erase-on-write, giving it advantage of higher write speeds at a cost of larger erase block. Note that explicit-erase capability does not warrants that write without erase is not allowed, taking the above restrictions, it only states that write of a random information will require erase. Erase is usually performed in pages, as we have chosen to name the unit in Zephyr, that may have different naming in device specifications, like pages, sectors or blocks, and may vary in size, depending how they are named by vendor. This option should be selected by drivers that serve devices with such characteristic and is used and may be used by users to provide paths in code that only serve such devices, and could be optimized-out by compiler in case where there is no such device in a system. config FLASH_HAS_NO_EXPLICIT_ERASE bool help Device does not require explicit erase before programming a new random value at any location that has been previously programmed with some other value. Note that the device may have erase-on-write (auto-erase), as for example in EEPROM devices, but may also have no erase at all. A device driver may still provide erase callback, especially if it is able to perform erase to accelerate further writes or is able to fill the area requested for erase, with single value, faster than consecutive writes that would be used to emulate erase. This option should be selected by drivers that serve devices with such characteristic and is used and may be used by users to provide paths in code that only serve such devices, and could be optimized-out by compiler in case where there is no such device in a system. This option should be selected for any device that can change storage bits, by write, from any value to opposite value at any time. When your driver sets this option you also need to set no_explicit_erase capability in your drivers flash_parameters. config FLASH_HAS_PAGE_LAYOUT bool help This option is enabled when the SoC flash driver supports retrieving the layout of flash memory pages. config FLASH_JESD216 bool help Selected by drivers that support JESD216-compatible flash devices to enable building a common support module. menuconfig FLASH bool "Flash drivers" help Enable support for the flash hardware. if FLASH module = FLASH module-str = flash source "subsys/logging/Kconfig.template.log_config" config FLASH_JESD216_API bool "Provide API to read JESD216 flash parameters" depends on FLASH_JESD216 help This option extends the Zephyr flash API with the ability to access the Serial Flash Discoverable Parameter section allowing runtime determination of serial flash parameters for flash drivers that expose this capability. config FLASH_SHELL bool "Flash shell" depends on SHELL && FLASH_PAGE_LAYOUT select MPU_ALLOW_FLASH_WRITE if ARM_MPU help Enable the flash shell with flash related commands such as test, write, read and erase. if FLASH_SHELL config FLASH_SHELL_TEST_COMMANDS bool "Flash read/write/erase test commands" select CBPRINTF_FP_SUPPORT help Enable additional flash shell commands for performing read/write/erase tests with speed output. config FLASH_SHELL_BUFFER_SIZE hex "Flash shell buffer size" default 0x4000 if FLASH_SHELL_TEST_COMMANDS default 0x1000 range 0x400 0x1000000 help Size of the buffer used for flash commands, will determine the maximum size that can be used with a read/write test. endif # FLASH_SHELL config FLASH_FILL_BUFFER_SIZE int "Buffer size of flash_fill function" default 32 help Size of a buffer used by flash_fill function to fill a device with specific value; this buffer is allocated on stack. The buffer is needed as most devices have write-block alignment requirements that which imposes minimal size of data, which can be written to a device, and alignment of write offset. Even if device does not have such requirement, filling device by single bytes is not efficient. Value selected here should be a multiple of the largest write-block-size among all the memory devices used in system. if FLASH_HAS_PAGE_LAYOUT config FLASH_PAGE_LAYOUT bool "API for retrieving the layout of pages" default FLASH_HAS_PAGE_LAYOUT help Enables API for retrieving the layout of flash memory pages. endif config FLASH_EX_OP_ENABLED bool "API for extended flash operations" depends on FLASH_HAS_EX_OP default n help Enables flash extended operations API. It can be used to perform non-standard operations e.g. manipulating flash protection. config FLASH_INIT_PRIORITY int "Flash init priority" default KERNEL_INIT_PRIORITY_DEVICE help Flash driver device initialization priority. This initialization priority is used unless the driver implementation has its own initialization priority source "drivers/flash/Kconfig.b91" source "drivers/flash/Kconfig.cc13xx_cc26xx" source "drivers/flash/Kconfig.at45" source "drivers/flash/Kconfig.esp32" source "drivers/flash/Kconfig.it8xxx2" source "drivers/flash/Kconfig.nrf" source "drivers/flash/Kconfig.lpc" source "drivers/flash/Kconfig.mcux" source "drivers/flash/Kconfig.mspi" source "drivers/flash/Kconfig.nios2_qspi" source "drivers/flash/Kconfig.npcx_fiu" source "drivers/flash/Kconfig.gecko" source "drivers/flash/Kconfig.nor" source "drivers/flash/Kconfig.rpi_pico" source "drivers/flash/Kconfig.stm32" source "drivers/flash/Kconfig.stm32_qspi" source "drivers/flash/Kconfig.stm32_ospi" source "drivers/flash/Kconfig.stm32_xspi" source "drivers/flash/Kconfig.sam0" source "drivers/flash/Kconfig.sam" source "drivers/flash/Kconfig.simulator" source "drivers/flash/Kconfig.rv32m1" source "drivers/flash/Kconfig.nordic_qspi_nor" source "drivers/flash/Kconfig.smartbond" source "drivers/flash/Kconfig.cadence_qspi_nor" source "drivers/flash/Kconfig.gd32" source "drivers/flash/Kconfig.xmc4xxx" source "drivers/flash/Kconfig.ifx_cat1" source "drivers/flash/Kconfig.cadence_nand" source "drivers/flash/Kconfig.numaker" source "drivers/flash/Kconfig.nxp_s32" source "drivers/flash/Kconfig.andes" source "drivers/flash/Kconfig.ambiq" source "drivers/flash/Kconfig.nrf_rram" source "drivers/flash/Kconfig.nrf_mram" source "drivers/flash/Kconfig.numaker_rmc" endif # FLASH ```
/content/code_sandbox/drivers/flash/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,648
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_FLASH_JESD216_H_ #define ZEPHYR_DRIVERS_FLASH_JESD216_H_ #include <errno.h> #include <zephyr/sys/byteorder.h> #include <zephyr/sys/util.h> #include <zephyr/types.h> /* JEDEC Read identification */ #define JESD216_CMD_READ_ID SPI_NOR_CMD_RDID #define JESD216_OCMD_READ_ID 0x9F60 #define JESD216_READ_ID_LEN 3 /* Following are structures and constants supporting the JEDEC Serial * Flash Discoverable Parameters standard, JESD216 and its successors, * available at * path_to_url */ #define JESD216_CMD_READ_SFDP 0x5A #define JESD216_CMD_BURST_SFDP 0x5B #define JESD216_OCMD_READ_SFDP 0x5AA5 /* Layout of a JESD216 parameter header. */ struct jesd216_param_header { uint8_t id_lsb; /* ID LSB */ uint8_t rev_minor; /* Minor revision number */ uint8_t rev_major; /* Major revision number */ uint8_t len_dw; /* Length of table in 32-bit DWORDs */ uint8_t ptp[3]; /* Address of table in SFDP space (LSB@0) */ uint8_t id_msb; /* ID MSB */ } __packed; /* Get the number of bytes required for the parameter table. */ static inline uint32_t jesd216_param_len(const struct jesd216_param_header *hp) { return sizeof(uint32_t) * hp->len_dw; } /* Get the ID that identifies the content of the parameter table. */ static inline uint16_t jesd216_param_id(const struct jesd216_param_header *hp) { return ((uint16_t)hp->id_msb << 8) | hp->id_lsb; } /* Get the address within the SFDP where the data for the table is * stored. */ static inline uint32_t jesd216_param_addr(const struct jesd216_param_header *hp) { return ((hp->ptp[2] << 16) | (hp->ptp[1] << 8) | (hp->ptp[0] << 0)); } /* Layout of the Serial Flash Discoverable Parameters header. */ struct jesd216_sfdp_header { uint32_t magic; /* "SFDP" in little endian */ uint8_t rev_minor; /* Minor revision number */ uint8_t rev_major; /* Major revision number */ uint8_t nph; /* Number of parameter headers */ uint8_t access; /* Access protocol */ struct jesd216_param_header phdr[]; /* Headers */ } __packed; /* SFDP access protocol for backwards compatibility with JESD216B. */ #define JESD216_SFDP_AP_LEGACY 0xFF /* The expected value from the jesd216_sfdp::magic field in host byte * order. */ #define JESD216_SFDP_MAGIC 0x50444653 /* All JESD216 data is read from the device in little-endian byte * order. For JEDEC parameter tables defined through JESD216D-01 the * parameters are defined by 32-bit words that may need to be * byte-swapped to extract their information. * * A 16-bit ID from the parameter header is used to identify the * content of each table. The first parameter table in the SFDP * hierarchy must be a Basic Flash Parameter table (ID 0xFF00). */ /* JESD216D-01 section 6.4: Basic Flash Parameter */ #define JESD216_SFDP_PARAM_ID_BFP 0xFF00 /* JESD216D-01 section 6.5: Sector Map Parameter */ #define JESD216_SFDP_PARAM_ID_SECTOR_MAP 0xFF81 /* JESD216D-01 section 6.6: 4-Byte Address Instruction Parameter */ #define JESD216_SFDP_PARAM_ID_4B_ADDR_INSTR 0xFF84 /* JESD216D-01 section 6.7: xSPI (Profile 1.0) Parameter */ #define JESD216_SFDP_PARAM_ID_XSPI_PROFILE_1V0 0xFF05 /* JESD216D-01 section 6.8: xSPI (Profile 2.0) Parameter */ #define JESD216_SFDP_PARAM_ID_XSPI_PROFILE_2V0 0xFF06 /* Macro to define the number of bytes required for the SFDP pheader * and @p nph parameter headers. * * @param nph the number of parameter headers to be read. 1 is * sufficient for basic functionality. * * @return required buffer size in bytes. */ #define JESD216_SFDP_SIZE(nph) (sizeof(struct jesd216_sfdp_header) \ + ((nph) * sizeof(struct jesd216_param_header))) /** Extract the magic number from the SFDP structure in host byte order. * * If this compares equal to JESD216_SFDP_MAGIC then the SFDP header * may have been read correctly. */ static inline uint32_t jesd216_sfdp_magic(const struct jesd216_sfdp_header *hp) { return sys_le32_to_cpu(hp->magic); } /* Layout of the Basic Flash Parameters table. * * SFDP through JESD216B supported 9 DWORD values. JESD216C extended * this to 17, and JESD216D to 20. * * All values are expected to be stored as little-endian and must be * converted to host byte order to extract the bit fields defined in * the standard. Rather than pre-define layouts to access to all * potential fields this header provides functions for specific fields * known to be important, such as density and erase command support. */ struct jesd216_bfp { uint32_t dw1; uint32_t dw2; uint32_t dw3; uint32_t dw4; uint32_t dw5; uint32_t dw6; uint32_t dw7; uint32_t dw8; uint32_t dw9; uint32_t dw10[]; } __packed; /* Provide a few word-specific flags and bitfield ranges for values * that an application or driver might expect to want to extract. * * See the JESD216 specification for the interpretation of these * bitfields. */ #define JESD216_SFDP_BFP_DW1_DTRCLK_FLG BIT(19) #define JESD216_SFDP_BFP_DW1_ADDRBYTES_MASK (BIT(17) | BIT(18)) #define JESD216_SFDP_BFP_DW1_ADDRBYTES_SHFT 17 #define JESD216_SFDP_BFP_DW1_ADDRBYTES_VAL_3B 0 #define JESD216_SFDP_BFP_DW1_ADDRBYTES_VAL_3B4B 1 #define JESD216_SFDP_BFP_DW1_ADDRBYTES_VAL_4B 2 #define JESD216_SFDP_BFP_DW1_4KERASEINSTR_SHFT 8 #define JESD216_SFDP_BFP_DW1_4KERASEINSTR_MASK (0xFF << JESD216_SFDP_BFP_DW1_4KERASEINSTR_SHFT) #define JESD216_SFDP_BFP_DW1_WEISWVSR_FLG BIT(4) #define JESD216_SFDP_BFP_DW1_VSRBP_FLG BIT(3) #define JESD216_SFDP_BFP_DW1_WRTGRAN_FLG BIT(2) #define JESD216_SFDP_BFP_DW1_BSERSZ_SHFT 0 #define JESD216_SFDP_BFP_DW1_BSERSZ_MASK (0x03 << JESD216_SFDP_BFP_DW1_BSERSZ_SHFT) #define JESD216_SFDP_BFP_DW1_BSERSZ_VAL_4KSUP 0x01 #define JESD216_SFDP_BFP_DW1_BSERSZ_VAL_4KNOTSUP 0x03 #define JESD216_SFDP_BFP_DW12_SUSPRESSUP_FLG BIT(31) /* Data can be extracted from the BFP words using these APIs: * * * DW1 (capabilities) use DW1 bitfield macros above or * jesd216_read_support(). * * DW2 (density) use jesd216_bfp_density(). * * DW3-DW7 (instr) use jesd216_bfp_read_support(). * * DW8-DW9 (erase types) use jesd216_bfp_erase(). * * JESD216A (16 DW) * * * DW10 (erase times) use jesd216_bfp_erase_type_times(). * * DW11 (other times) use jesd216_bfp_decode_dw11(). * * DW12-13 (suspend/resume) no API except * JESD216_SFDP_BFP_DW12_SUSPRESSUP_FLG. * * DW14 (deep power down) use jesd216_bfp_decode_dw14(). * * DW15-16 no API except jesd216_bfp_read_support(). * * JESD216C (20 DW) * * DW17-20 (quad/oct support) no API except jesd216_bfp_read_support(). */ /* Extract the supported address bytes from BFP DW1. */ static inline uint8_t jesd216_bfp_addrbytes(const struct jesd216_bfp *hp) { uint32_t dw1 = sys_le32_to_cpu(hp->dw1); uint8_t addr_support = (dw1 & JESD216_SFDP_BFP_DW1_ADDRBYTES_MASK) >> JESD216_SFDP_BFP_DW1_ADDRBYTES_SHFT; return addr_support; } /* Extract the density of the chip in bits from BFP DW2. */ static inline uint64_t jesd216_bfp_density(const struct jesd216_bfp *hp) { uint32_t dw = sys_le32_to_cpu(hp->dw2); if (dw & BIT(31)) { return BIT64(dw & BIT_MASK(31)); } return 1U + (uint64_t)dw; } /* Protocol mode enumeration types. * * Modes are identified by fields representing the number of I/O * signals and the data rate in the transfer. The I/O width may be 1, * 2, 4, or 8 I/O signals. The data rate may be single or double. * SDR is assumed; DDR is indicated by a D following the I/O width. * * A transfer has three phases, and width/rate is specified for each * in turn: * * Transfer of the command * * Transfer of the command modifier (e.g. address) * * Transfer of the data. * * Modes explicitly mentioned in JESD216 or JESD251 are given * enumeration values below, which can be used to extract information * about instruction support. */ enum jesd216_mode_type { JESD216_MODE_044, /* implied instruction, execute in place */ JESD216_MODE_088, JESD216_MODE_111, JESD216_MODE_112, JESD216_MODE_114, JESD216_MODE_118, JESD216_MODE_122, JESD216_MODE_144, JESD216_MODE_188, JESD216_MODE_222, JESD216_MODE_444, JESD216_MODE_44D4D, JESD216_MODE_888, JESD216_MODE_8D8D8D, JESD216_MODE_LIMIT, }; /* Command to use for fast read operations in a specified protocol * mode. */ struct jesd216_instr { uint8_t instr; uint8_t mode_clocks; uint8_t wait_states; }; /* Determine whether a particular operational mode is supported for * read, and possibly what command may be used. * * @note For @p mode JESD216_MODE_111 this function will return zero * to indicate that standard read (instruction 03h) is supported, but * without providing information on how. SFDP does not provide an * indication of support for 1-1-1 Fast Read (0Bh). * * @param php pointer to the BFP header. * * @param bfp pointer to the BFP table. * * @param mode the desired protocol mode. * * @param res where to store instruction information. Pass a null * pointer to test for support without retrieving instruction * information. * * @retval positive if instruction is supported and *res has been set. * * @retval 0 if instruction is supported but *res has not been set * (e.g. no instruction needed, or instruction cannot be read from * BFP). * * @retval -ENOTSUP if instruction is not supported. */ int jesd216_bfp_read_support(const struct jesd216_param_header *php, const struct jesd216_bfp *bfp, enum jesd216_mode_type mode, struct jesd216_instr *res); /* Description of a supported erase operation. */ struct jesd216_erase_type { /* The command opcode used for an erase operation. */ uint8_t cmd; /* The value N when the erase operation erases a 2^N byte * region. */ uint8_t exp; }; /* The number of erase types defined in a JESD216 Basic Flash * Parameter table. */ #define JESD216_NUM_ERASE_TYPES 4 /* Extract a supported erase size and command from BFP DW8 or DW9. * * @param bfp pointer to the parameter table. * * @param idx the erase type index, from 1 through 4. Only index 1 is * guaranteed to be present. * * @param etp where to store the command and size used for the erase. * * @retval 0 if the erase type index provided usable information. * @retval -EINVAL if the erase type index is undefined. */ int jesd216_bfp_erase(const struct jesd216_bfp *bfp, uint8_t idx, struct jesd216_erase_type *etp); /* Extract typical and maximum erase times from DW10. * * @param php pointer to the BFP header. * * @param bfp pointer to the BFP table. * * @param idx the erase type index, from 1 through 4. For meaningful * results the index should be one for which jesd216_bfp_erase() * returns success. * * @param typ_ms where to store the typical erase time (in * milliseconds) for the specified erase type. * * @retval -ENOTSUP if the erase type index is undefined. * @retval positive is a multiplier that converts typical erase times * to maximum erase times. */ int jesd216_bfp_erase_type_times(const struct jesd216_param_header *php, const struct jesd216_bfp *bfp, uint8_t idx, uint32_t *typ_ms); /* Get the page size from the Basic Flash Parameters. * * @param php pointer to the BFP header. * * @param bfp pointer to the BFP table. * * @return the page size in bytes from the parameters if supported, * otherwise 256. */ static inline uint32_t jesd216_bfp_page_size(const struct jesd216_param_header *php, const struct jesd216_bfp *bfp) { /* Page size introduced in JESD216A */ if (php->len_dw < 11) { return 256; } uint32_t dw11 = sys_le32_to_cpu(bfp->dw10[1]); uint8_t exp = (dw11 >> 4) & 0x0F; return BIT(exp); } /* Decoded data from JESD216 DW11. */ struct jesd216_bfp_dw11 { /* Typical time for chip (die) erase, in milliseconds */ uint16_t chip_erase_ms; /* Typical time for first byte program, in microseconds */ uint16_t byte_prog_first_us; /* Typical time per byte for byte program after first, in * microseconds */ uint16_t byte_prog_addl_us; /* Typical time for page program, in microseconds */ uint16_t page_prog_us; /* Multiplier to get maximum time from typical times. */ uint16_t typ_max_factor; /* Number of bytes in a page. */ uint16_t page_size; }; /* Get data from BFP DW11. * * @param php pointer to the BFP header. * * @param bfp pointer to the BFP table. * * @param res pointer to where to store the decoded data. * * @retval -ENOTSUP if this information is not available from this BFP table. * @retval 0 on successful storage into @c *res. */ int jesd216_bfp_decode_dw11(const struct jesd216_param_header *php, const struct jesd216_bfp *bfp, struct jesd216_bfp_dw11 *res); /* Decoded data from JESD216 DW14 */ struct jesd216_bfp_dw14 { /* Instruction used to enter deep power-down */ uint8_t enter_dpd_instr; /* Instruction used to exit deep power-down */ uint8_t exit_dpd_instr; /* Bits defining ways busy status may be polled. */ uint8_t poll_options; /* Time after issuing exit instruction until device is ready * to accept a command, in nanoseconds. */ uint32_t exit_delay_ns; }; /* Get data from BFP DW14. * * @param php pointer to the BFP header. * * @param bfp pointer to the BFP table. * * @param res pointer to where to store the decoded data. * * @retval -ENOTSUP if this information is not available from this BFP table. * @retval 0 on successful storage into @c *res. */ int jesd216_bfp_decode_dw14(const struct jesd216_param_header *php, const struct jesd216_bfp *bfp, struct jesd216_bfp_dw14 *res); /* DW15 Quad Enable Requirements specifies status register QE bits. * * Two common configurations are summarized; see the specification for * full details of how to use these values. */ enum jesd216_dw15_qer_type { /* No QE status required for 1-1-4 or 1-4-4 mode */ JESD216_DW15_QER_NONE = 0, JESD216_DW15_QER_S2B1v1 = 1, /* Bit 6 of SR byte must be set to enable 1-1-4 or 1-4-4 mode. * SR is one byte. */ JESD216_DW15_QER_S1B6 = 2, JESD216_DW15_QER_S2B7 = 3, JESD216_DW15_QER_S2B1v4 = 4, JESD216_DW15_QER_S2B1v5 = 5, JESD216_DW15_QER_S2B1v6 = 6, }; #define JESD216_DW15_QER_VAL_NONE 0 #define JESD216_DW15_QER_VAL_S2B1v1 1 #define JESD216_DW15_QER_VAL_S1B6 2 #define JESD216_DW15_QER_VAL_S2B7 3 #define JESD216_DW15_QER_VAL_S2B1v4 4 #define JESD216_DW15_QER_VAL_S2B1v5 5 #define JESD216_DW15_QER_VAL_S2B1v6 6 /* Decoded data from JESD216 DW15 */ struct jesd216_bfp_dw15 { /* If true clear NVECR bit 4 to disable HOLD/RESET */ bool hold_reset_disable: 1; /* Encoded jesd216_dw15_qer_type */ unsigned int qer: 3; /* 0-4-4 mode entry method */ unsigned int entry_044: 4; /* 0-4-4 mode exit method */ unsigned int exit_044: 6; /* True if 0-4-4 mode is supported */ bool support_044: 1; /* 4-4-4 mode enable sequences */ unsigned int enable_444: 5; /* 4-4-4 mode disable sequences */ unsigned int disable_444: 4; }; /* Get data from BFP DW15. * * @param php pointer to the BFP header. * * @param bfp pointer to the BFP table. * * @param res pointer to where to store the decoded data. * * @retval -ENOTSUP if this information is not available from this BFP table. * @retval 0 on successful storage into @c *res. */ int jesd216_bfp_decode_dw15(const struct jesd216_param_header *php, const struct jesd216_bfp *bfp, struct jesd216_bfp_dw15 *res); /* Decoded data from JESD216_DW16 */ struct jesd216_bfp_dw16 { /* Bits specifying supported modes of entering 4-byte * addressing. */ unsigned int enter_4ba: 8; /* Bits specifying supported modes of exiting 4-byte * addressing. */ unsigned int exit_4ba: 10; /* Bits specifying the soft reset and rescue sequence to * restore the device to its power-on state. */ unsigned int srrs_support: 6; /* Bits specifying how to modify status register 1, and which * bits are non-volatile. */ unsigned int sr1_interface: 7; }; /* Get data from BFP DW16. * * @param php pointer to the BFP header. * * @param bfp pointer to the BFP table. * * @param res pointer to where to store the decoded data. * * @retval -ENOTSUP if this information is not available from this BFP table. * @retval 0 on successful storage into @c *res. */ int jesd216_bfp_decode_dw16(const struct jesd216_param_header *php, const struct jesd216_bfp *bfp, struct jesd216_bfp_dw16 *res); #endif /* ZEPHYR_DRIVERS_FLASH_JESD216_H_ */ ```
/content/code_sandbox/drivers/flash/jesd216.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,918
```unknown # STM32 Quad SPI flash driver configuration options DT_STM32_QUADSPI_HAS_DMA := $(dt_nodelabel_has_prop,quadspi,dmas) config FLASH_STM32_QSPI bool "STM32 Quad SPI Flash driver" default y depends on DT_HAS_ST_STM32_QSPI_NOR_ENABLED select USE_STM32_HAL_QSPI select USE_STM32_HAL_MDMA if SOC_SERIES_STM32H7X select FLASH_HAS_DRIVER_ENABLED select FLASH_JESD216 select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_EXPLICIT_ERASE select DMA if $(DT_STM32_QUADSPI_HAS_DMA) select USE_STM32_HAL_DMA if $(DT_STM32_QUADSPI_HAS_DMA) help Enable QSPI-NOR support on the STM32 family of processors. ```
/content/code_sandbox/drivers/flash/Kconfig.stm32_qspi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
174
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/device.h> #define DT_DRV_COMPAT st_stm32_flash_controller #include <string.h> #include <zephyr/drivers/flash.h> #include <zephyr/drivers/flash/stm32_flash_api_extensions.h> #include <zephyr/init.h> #include <soc.h> #include <stm32_ll_bus.h> #include <stm32_ll_rcc.h> #include <zephyr/logging/log.h> #include "flash_stm32.h" #include "stm32_hsem.h" LOG_MODULE_REGISTER(flash_stm32, CONFIG_FLASH_LOG_LEVEL); /* Let's wait for double the max erase time to be sure that the operation is * completed. */ #define STM32_FLASH_TIMEOUT \ (2 * DT_PROP(DT_INST(0, st_stm32_nv_flash), max_erase_time)) static const struct flash_parameters flash_stm32_parameters = { .write_block_size = FLASH_STM32_WRITE_BLOCK_SIZE, /* Some SoCs (L0/L1) use an EEPROM under the hood. Distinguish * between them based on the presence of the PECR register. */ #if defined(FLASH_PECR_ERASE) .erase_value = 0, #else .erase_value = 0xff, #endif }; static int flash_stm32_write_protection(const struct device *dev, bool enable); bool __weak flash_stm32_valid_range(const struct device *dev, off_t offset, uint32_t len, bool write) { if (write && !flash_stm32_valid_write(offset, len)) { return false; } return flash_stm32_range_exists(dev, offset, len); } int __weak flash_stm32_check_configuration(void) { return 0; } #if defined(CONFIG_MULTITHREADING) /* * This is named flash_stm32_sem_take instead of flash_stm32_lock (and * similarly for flash_stm32_sem_give) to avoid confusion with locking * actual flash pages. */ static inline void _flash_stm32_sem_take(const struct device *dev) { k_sem_take(&FLASH_STM32_PRIV(dev)->sem, K_FOREVER); z_stm32_hsem_lock(CFG_HW_FLASH_SEMID, HSEM_LOCK_WAIT_FOREVER); } static inline void _flash_stm32_sem_give(const struct device *dev) { z_stm32_hsem_unlock(CFG_HW_FLASH_SEMID); k_sem_give(&FLASH_STM32_PRIV(dev)->sem); } #define flash_stm32_sem_init(dev) k_sem_init(&FLASH_STM32_PRIV(dev)->sem, 1, 1) #define flash_stm32_sem_take(dev) _flash_stm32_sem_take(dev) #define flash_stm32_sem_give(dev) _flash_stm32_sem_give(dev) #else #define flash_stm32_sem_init(dev) #define flash_stm32_sem_take(dev) #define flash_stm32_sem_give(dev) #endif #if !defined(CONFIG_SOC_SERIES_STM32WBX) static int flash_stm32_check_status(const struct device *dev) { if (FLASH_STM32_REGS(dev)->FLASH_STM32_SR & FLASH_STM32_SR_ERRORS) { LOG_DBG("Status: 0x%08lx", FLASH_STM32_REGS(dev)->FLASH_STM32_SR & FLASH_STM32_SR_ERRORS); /* Clear errors to unblock usage of the flash */ FLASH_STM32_REGS(dev)->FLASH_STM32_SR = FLASH_STM32_REGS(dev)->FLASH_STM32_SR & FLASH_STM32_SR_ERRORS; return -EIO; } return 0; } #endif /* CONFIG_SOC_SERIES_STM32WBX */ int flash_stm32_wait_flash_idle(const struct device *dev) { int64_t timeout_time = k_uptime_get() + STM32_FLASH_TIMEOUT; int rc; uint32_t busy_flags; rc = flash_stm32_check_status(dev); if (rc < 0) { return -EIO; } busy_flags = FLASH_STM32_SR_BUSY; /* Some Series can't modify FLASH_CR reg while CFGBSY is set. Wait as well */ #if defined(FLASH_STM32_SR_CFGBSY) busy_flags |= FLASH_STM32_SR_CFGBSY; #endif while ((FLASH_STM32_REGS(dev)->FLASH_STM32_SR & busy_flags)) { if (k_uptime_get() > timeout_time) { LOG_ERR("Timeout! val: %d", STM32_FLASH_TIMEOUT); return -EIO; } } return 0; } static void flash_stm32_flush_caches(const struct device *dev, off_t offset, size_t len) { #if defined(CONFIG_SOC_SERIES_STM32F0X) || defined(CONFIG_SOC_SERIES_STM32F3X) || \ defined(CONFIG_SOC_SERIES_STM32G0X) || defined(CONFIG_SOC_SERIES_STM32L5X) || \ defined(CONFIG_SOC_SERIES_STM32U5X) || defined(CONFIG_SOC_SERIES_STM32H5X) ARG_UNUSED(dev); ARG_UNUSED(offset); ARG_UNUSED(len); #elif defined(CONFIG_SOC_SERIES_STM32F4X) || \ defined(CONFIG_SOC_SERIES_STM32L4X) || \ defined(CONFIG_SOC_SERIES_STM32WBX) || \ defined(CONFIG_SOC_SERIES_STM32G4X) ARG_UNUSED(offset); ARG_UNUSED(len); FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); if (regs->ACR & FLASH_ACR_DCEN) { regs->ACR &= ~FLASH_ACR_DCEN; regs->ACR |= FLASH_ACR_DCRST; regs->ACR &= ~FLASH_ACR_DCRST; regs->ACR |= FLASH_ACR_DCEN; } #elif defined(CONFIG_SOC_SERIES_STM32F7X) SCB_InvalidateDCache_by_Addr((uint32_t *)(FLASH_STM32_BASE_ADDRESS + offset), len); #endif } static int flash_stm32_read(const struct device *dev, off_t offset, void *data, size_t len) { if (!flash_stm32_valid_range(dev, offset, len, false)) { LOG_ERR("Read range invalid. Offset: %ld, len: %zu", (long int) offset, len); return -EINVAL; } if (!len) { return 0; } LOG_DBG("Read offset: %ld, len: %zu", (long int) offset, len); memcpy(data, (uint8_t *) FLASH_STM32_BASE_ADDRESS + offset, len); return 0; } static int flash_stm32_erase(const struct device *dev, off_t offset, size_t len) { int rc; if (!flash_stm32_valid_range(dev, offset, len, true)) { LOG_ERR("Erase range invalid. Offset: %ld, len: %zu", (long int) offset, len); return -EINVAL; } if (!len) { return 0; } flash_stm32_sem_take(dev); LOG_DBG("Erase offset: %ld, len: %zu", (long int) offset, len); rc = flash_stm32_write_protection(dev, false); if (rc == 0) { rc = flash_stm32_block_erase_loop(dev, offset, len); } flash_stm32_flush_caches(dev, offset, len); int rc2 = flash_stm32_write_protection(dev, true); if (!rc) { rc = rc2; } flash_stm32_sem_give(dev); return rc; } static int flash_stm32_write(const struct device *dev, off_t offset, const void *data, size_t len) { int rc; if (!flash_stm32_valid_range(dev, offset, len, true)) { LOG_ERR("Write range invalid. Offset: %ld, len: %zu", (long int) offset, len); return -EINVAL; } if (!len) { return 0; } flash_stm32_sem_take(dev); LOG_DBG("Write offset: %ld, len: %zu", (long int) offset, len); rc = flash_stm32_write_protection(dev, false); if (rc == 0) { rc = flash_stm32_write_range(dev, offset, data, len); } int rc2 = flash_stm32_write_protection(dev, true); if (!rc) { rc = rc2; } flash_stm32_sem_give(dev); return rc; } static int flash_stm32_write_protection(const struct device *dev, bool enable) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); int rc = 0; if (enable) { rc = flash_stm32_wait_flash_idle(dev); if (rc) { flash_stm32_sem_give(dev); return rc; } } #if defined(FLASH_SECURITY_NS) if (enable) { regs->NSCR |= FLASH_STM32_NSLOCK; } else { if (regs->NSCR & FLASH_STM32_NSLOCK) { regs->NSKEYR = FLASH_KEY1; regs->NSKEYR = FLASH_KEY2; } } #elif defined(FLASH_CR_LOCK) if (enable) { regs->CR |= FLASH_CR_LOCK; } else { if (regs->CR & FLASH_CR_LOCK) { regs->KEYR = FLASH_KEY1; regs->KEYR = FLASH_KEY2; } } #else if (enable) { regs->PECR |= FLASH_PECR_PRGLOCK; regs->PECR |= FLASH_PECR_PELOCK; } else { if (regs->PECR & FLASH_PECR_PRGLOCK) { LOG_DBG("Disabling write protection"); regs->PEKEYR = FLASH_PEKEY1; regs->PEKEYR = FLASH_PEKEY2; regs->PRGKEYR = FLASH_PRGKEY1; regs->PRGKEYR = FLASH_PRGKEY2; } if (FLASH->PECR & FLASH_PECR_PRGLOCK) { LOG_ERR("Unlock failed"); rc = -EIO; } } #endif /* FLASH_SECURITY_NS */ if (enable) { LOG_DBG("Enable write protection"); } else { LOG_DBG("Disable write protection"); } return rc; } int flash_stm32_option_bytes_lock(const struct device *dev, bool enable) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); #if defined(FLASH_OPTCR_OPTLOCK) /* F2, F4, F7 and H7 */ if (enable) { regs->OPTCR |= FLASH_OPTCR_OPTLOCK; } else if (regs->OPTCR & FLASH_OPTCR_OPTLOCK) { regs->OPTKEYR = FLASH_OPT_KEY1; regs->OPTKEYR = FLASH_OPT_KEY2; } #else int rc; /* Unlock CR/PECR/NSCR register if needed. */ if (!enable) { rc = flash_stm32_write_protection(dev, false); if (rc) { return rc; } } #if defined(FLASH_CR_OPTWRE) /* F0, F1 and F3 */ if (enable) { regs->CR &= ~FLASH_CR_OPTWRE; } else if (!(regs->CR & FLASH_CR_OPTWRE)) { regs->OPTKEYR = FLASH_OPTKEY1; regs->OPTKEYR = FLASH_OPTKEY2; } #elif defined(FLASH_CR_OPTLOCK) /* G0, G4, L4, WB and WL */ if (enable) { regs->CR |= FLASH_CR_OPTLOCK; } else if (regs->CR & FLASH_CR_OPTLOCK) { regs->OPTKEYR = FLASH_OPTKEY1; regs->OPTKEYR = FLASH_OPTKEY2; } #elif defined(FLASH_PECR_OPTLOCK) /* L0 and L1 */ if (enable) { regs->PECR |= FLASH_PECR_OPTLOCK; } else if (regs->PECR & FLASH_PECR_OPTLOCK) { regs->OPTKEYR = FLASH_OPTKEY1; regs->OPTKEYR = FLASH_OPTKEY2; } #elif defined(FLASH_NSCR_OPTLOCK) /* L5 and U5 */ if (enable) { regs->NSCR |= FLASH_NSCR_OPTLOCK; } else if (regs->NSCR & FLASH_NSCR_OPTLOCK) { regs->OPTKEYR = FLASH_OPTKEY1; regs->OPTKEYR = FLASH_OPTKEY2; } #elif defined(FLASH_NSCR1_OPTLOCK) /* WBA */ if (enable) { regs->NSCR1 |= FLASH_NSCR1_OPTLOCK; } else if (regs->NSCR1 & FLASH_NSCR1_OPTLOCK) { regs->OPTKEYR = FLASH_OPTKEY1; regs->OPTKEYR = FLASH_OPTKEY2; } #endif /* Lock CR/PECR/NSCR register if needed. */ if (enable) { rc = flash_stm32_write_protection(dev, true); if (rc) { return rc; } } #endif if (enable) { LOG_DBG("Option bytes locked"); } else { LOG_DBG("Option bytes unlocked"); } return 0; } #if defined(CONFIG_FLASH_EX_OP_ENABLED) && defined(CONFIG_FLASH_STM32_BLOCK_REGISTERS) static int flash_stm32_control_register_disable(const struct device *dev) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); #if defined(FLASH_CR_LOCK) /* F0, F1, F2, F3, F4, F7, L4, G0, G4, H7, WB, WL \ */ /* * Access to control register can be disabled by writing wrong key to * the key register. Option register will remain disabled until reset. * Writing wrong key causes a bus fault, so we need to set FAULTMASK to * disable faults, and clear bus fault pending bit before enabling them * again. */ regs->CR |= FLASH_CR_LOCK; __set_FAULTMASK(1); regs->KEYR = 0xffffffff; /* Clear Bus Fault pending bit */ SCB->SHCSR &= ~SCB_SHCSR_BUSFAULTPENDED_Msk; __set_FAULTMASK(0); return 0; #else ARG_UNUSED(regs); return -ENOTSUP; #endif } static int flash_stm32_option_bytes_disable(const struct device *dev) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); #if defined(FLASH_OPTCR_OPTLOCK) /* F2, F4, F7 and H7 */ /* * Access to option register can be disabled by writing wrong key to * the key register. Option register will remain disabled until reset. * Writing wrong key causes a bus fault, so we need to set FAULTMASK to * disable faults, and clear bus fault pending bit before enabling them * again. */ regs->OPTCR |= FLASH_OPTCR_OPTLOCK; __set_FAULTMASK(1); regs->OPTKEYR = 0xffffffff; /* Clear Bus Fault pending bit */ SCB->SHCSR &= ~SCB_SHCSR_BUSFAULTPENDED_Msk; __set_FAULTMASK(0); return 0; #else ARG_UNUSED(regs); return -ENOTSUP; #endif } #endif /* CONFIG_FLASH_STM32_BLOCK_REGISTERS */ static const struct flash_parameters * flash_stm32_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_stm32_parameters; } #ifdef CONFIG_FLASH_EX_OP_ENABLED static int flash_stm32_ex_op(const struct device *dev, uint16_t code, const uintptr_t in, void *out) { int rv = -ENOTSUP; flash_stm32_sem_take(dev); switch (code) { #if defined(CONFIG_FLASH_STM32_WRITE_PROTECT) case FLASH_STM32_EX_OP_SECTOR_WP: rv = flash_stm32_ex_op_sector_wp(dev, in, out); break; #endif /* CONFIG_FLASH_STM32_WRITE_PROTECT */ #if defined(CONFIG_FLASH_STM32_READOUT_PROTECTION) case FLASH_STM32_EX_OP_RDP: rv = flash_stm32_ex_op_rdp(dev, in, out); break; #endif /* CONFIG_FLASH_STM32_READOUT_PROTECTION */ #if defined(CONFIG_FLASH_STM32_BLOCK_REGISTERS) case FLASH_STM32_EX_OP_BLOCK_OPTION_REG: rv = flash_stm32_option_bytes_disable(dev); break; case FLASH_STM32_EX_OP_BLOCK_CONTROL_REG: rv = flash_stm32_control_register_disable(dev); break; #endif /* CONFIG_FLASH_STM32_BLOCK_REGISTERS */ } flash_stm32_sem_give(dev); return rv; } #endif static struct flash_stm32_priv flash_data = { .regs = (FLASH_TypeDef *) DT_INST_REG_ADDR(0), /* Getting clocks information from device tree description depending * on the presence of 'clocks' property. */ #if DT_INST_NODE_HAS_PROP(0, clocks) .pclken = { .enr = DT_INST_CLOCKS_CELL(0, bits), .bus = DT_INST_CLOCKS_CELL(0, bus), } #endif }; static const struct flash_driver_api flash_stm32_api = { .erase = flash_stm32_erase, .write = flash_stm32_write, .read = flash_stm32_read, .get_parameters = flash_stm32_get_parameters, #ifdef CONFIG_FLASH_PAGE_LAYOUT .page_layout = flash_stm32_page_layout, #endif #ifdef CONFIG_FLASH_EX_OP_ENABLED .ex_op = flash_stm32_ex_op, #endif }; static int stm32_flash_init(const struct device *dev) { int rc; /* Below is applicable to F0, F1, F3, G0, G4, L1, L4, L5, U5 & WB55 series. * For F2, F4, F7 & H7 series, this is not applicable. */ #if DT_INST_NODE_HAS_PROP(0, clocks) struct flash_stm32_priv *p = FLASH_STM32_PRIV(dev); const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); /* * On STM32 F0, F1, F3 & L1 series, flash interface clock source is * always HSI, so statically enable HSI here. */ #if defined(CONFIG_SOC_SERIES_STM32F0X) || \ defined(CONFIG_SOC_SERIES_STM32F1X) || \ defined(CONFIG_SOC_SERIES_STM32F3X) || \ defined(CONFIG_SOC_SERIES_STM32L1X) LL_RCC_HSI_Enable(); while (!LL_RCC_HSI_IsReady()) { } #endif if (!device_is_ready(clk)) { LOG_ERR("clock control device not ready"); return -ENODEV; } /* enable clock */ if (clock_control_on(clk, (clock_control_subsys_t)&p->pclken) != 0) { LOG_ERR("Failed to enable clock"); return -EIO; } #endif #ifdef CONFIG_SOC_SERIES_STM32WBX LL_AHB3_GRP1_EnableClock(LL_AHB3_GRP1_PERIPH_HSEM); #endif /* CONFIG_SOC_SERIES_STM32WBX */ flash_stm32_sem_init(dev); LOG_DBG("Flash @0x%x initialized. BS: %zu", FLASH_STM32_BASE_ADDRESS, flash_stm32_parameters.write_block_size); /* Check Flash configuration */ rc = flash_stm32_check_configuration(); if (rc < 0) { return rc; } #if ((CONFIG_FLASH_LOG_LEVEL >= LOG_LEVEL_DBG) && CONFIG_FLASH_PAGE_LAYOUT) const struct flash_pages_layout *layout; size_t layout_size; flash_stm32_page_layout(dev, &layout, &layout_size); for (size_t i = 0; i < layout_size; i++) { LOG_DBG("Block %zu: bs: %zu count: %zu", i, layout[i].pages_size, layout[i].pages_count); } #endif return 0; } DEVICE_DT_INST_DEFINE(0, stm32_flash_init, NULL, &flash_data, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_stm32_api); ```
/content/code_sandbox/drivers/flash/flash_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,450
```c /* * */ #define DT_DRV_COMPAT gd_gd32_flash_controller #include "flash_gd32.h" #include <zephyr/kernel.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/flash.h> #include <zephyr/logging/log.h> #include <gd32_fmc.h> LOG_MODULE_REGISTER(flash_gd32, CONFIG_FLASH_LOG_LEVEL); struct flash_gd32_data { struct k_sem mutex; }; static struct flash_gd32_data flash_data; static const struct flash_parameters flash_gd32_parameters = { .write_block_size = SOC_NV_FLASH_PRG_SIZE, .erase_value = 0xff, }; static int flash_gd32_read(const struct device *dev, off_t offset, void *data, size_t len) { if ((offset > SOC_NV_FLASH_SIZE) || ((offset + len) > SOC_NV_FLASH_SIZE)) { return -EINVAL; } if (len == 0U) { return 0; } memcpy(data, (uint8_t *)SOC_NV_FLASH_ADDR + offset, len); return 0; } static int flash_gd32_write(const struct device *dev, off_t offset, const void *data, size_t len) { struct flash_gd32_data *dev_data = dev->data; int ret = 0; if (!flash_gd32_valid_range(offset, len, true)) { return -EINVAL; } if (len == 0U) { return 0; } k_sem_take(&dev_data->mutex, K_FOREVER); ret = flash_gd32_write_range(offset, data, len); k_sem_give(&dev_data->mutex); return ret; } static int flash_gd32_erase(const struct device *dev, off_t offset, size_t size) { struct flash_gd32_data *data = dev->data; int ret = 0; if (size == 0U) { return 0; } if (!flash_gd32_valid_range(offset, size, false)) { return -EINVAL; } k_sem_take(&data->mutex, K_FOREVER); ret = flash_gd32_erase_block(offset, size); k_sem_give(&data->mutex); return ret; } static const struct flash_parameters* flash_gd32_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_gd32_parameters; } static const struct flash_driver_api flash_gd32_driver_api = { .read = flash_gd32_read, .write = flash_gd32_write, .erase = flash_gd32_erase, .get_parameters = flash_gd32_get_parameters, #ifdef CONFIG_FLASH_PAGE_LAYOUT .page_layout = flash_gd32_pages_layout, #endif }; static int flash_gd32_init(const struct device *dev) { struct flash_gd32_data *data = dev->data; k_sem_init(&data->mutex, 1, 1); return 0; } DEVICE_DT_INST_DEFINE(0, flash_gd32_init, NULL, &flash_data, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_gd32_driver_api); ```
/content/code_sandbox/drivers/flash/flash_gd32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
667
```c /* * */ #define DT_DRV_COMPAT cdns_qspi_nor #include "flash_cadence_qspi_nor_ll.h" #include <string.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/flash.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(flash_cadence, CONFIG_FLASH_LOG_LEVEL); struct flash_cad_priv { DEVICE_MMIO_NAMED_RAM(qspi_reg); DEVICE_MMIO_NAMED_RAM(qspi_data); struct cad_qspi_params params; }; struct flash_cad_config { DEVICE_MMIO_NAMED_ROM(qspi_reg); DEVICE_MMIO_NAMED_ROM(qspi_data); }; static const struct flash_parameters flash_cad_parameters = { .write_block_size = QSPI_BYTES_PER_DEV, .erase_value = 0xff, }; #define DEV_DATA(dev) ((struct flash_cad_priv *)((dev)->data)) #define DEV_CFG(dev) ((struct flash_cad_config *)((dev)->config)) static int flash_cad_read(const struct device *dev, off_t offset, void *data, size_t len) { struct flash_cad_priv *priv = dev->data; struct cad_qspi_params *cad_params = &priv->params; int rc; if ((data == NULL) || (len == 0)) { LOG_ERR("Invalid input parameter for QSPI Read!"); return -EINVAL; } rc = cad_qspi_read(cad_params, data, (uint32_t)offset, len); if (rc < 0) { LOG_ERR("Cadence QSPI Flash Read Failed"); return rc; } return 0; } static int flash_cad_erase(const struct device *dev, off_t offset, size_t len) { struct flash_cad_priv *priv = dev->data; struct cad_qspi_params *cad_params = &priv->params; int rc; if (len == 0) { LOG_ERR("Invalid input parameter for QSPI Erase!"); return -EINVAL; } rc = cad_qspi_erase(cad_params, (uint32_t)offset, len); if (rc < 0) { LOG_ERR("Cadence QSPI Flash Erase Failed!"); return rc; } return 0; } static int flash_cad_write(const struct device *dev, off_t offset, const void *data, size_t len) { struct flash_cad_priv *priv = dev->data; struct cad_qspi_params *cad_params = &priv->params; int rc; if ((data == NULL) || (len == 0)) { LOG_ERR("Invalid input parameter for QSPI Write!"); return -EINVAL; } rc = cad_qspi_write(cad_params, (void *)data, (uint32_t)offset, len); if (rc < 0) { LOG_ERR("Cadence QSPI Flash Write Failed!"); return rc; } return 0; } static const struct flash_parameters * flash_cad_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_cad_parameters; } static const struct flash_driver_api flash_cad_api = { .erase = flash_cad_erase, .write = flash_cad_write, .read = flash_cad_read, .get_parameters = flash_cad_get_parameters, }; static int flash_cad_init(const struct device *dev) { struct flash_cad_priv *priv = dev->data; struct cad_qspi_params *cad_params = &priv->params; int rc; DEVICE_MMIO_NAMED_MAP(dev, qspi_reg, K_MEM_CACHE_NONE); DEVICE_MMIO_NAMED_MAP(dev, qspi_data, K_MEM_CACHE_NONE); cad_params->reg_base = DEVICE_MMIO_NAMED_GET(dev, qspi_reg); cad_params->data_base = DEVICE_MMIO_NAMED_GET(dev, qspi_data); rc = cad_qspi_init(cad_params, QSPI_CONFIG_CPHA, QSPI_CONFIG_CPOL, QSPI_CONFIG_CSDA, QSPI_CONFIG_CSDADS, QSPI_CONFIG_CSEOT, QSPI_CONFIG_CSSOT, 0); if (rc < 0) { LOG_ERR("Cadence QSPI Flash Init Failed"); return rc; } return 0; } #define CREATE_FLASH_CADENCE_QSPI_DEVICE(inst) \ static struct flash_cad_priv flash_cad_priv_##inst = { \ .params = { \ .clk_rate = DT_INST_PROP(inst, clock_frequency),\ .data_size = DT_INST_REG_SIZE_BY_IDX(inst, 1), \ }, \ }; \ \ static struct flash_cad_config flash_cad_config_##inst = { \ DEVICE_MMIO_NAMED_ROM_INIT_BY_NAME( \ qspi_reg, DT_DRV_INST(inst)), \ DEVICE_MMIO_NAMED_ROM_INIT_BY_NAME( \ qspi_data, DT_DRV_INST(inst)), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, \ flash_cad_init, \ NULL, \ &flash_cad_priv_##inst, \ &flash_cad_config_##inst, \ POST_KERNEL, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &flash_cad_api); DT_INST_FOREACH_STATUS_OKAY(CREATE_FLASH_CADENCE_QSPI_DEVICE) ```
/content/code_sandbox/drivers/flash/flash_cadence_qspi_nor.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,166
```c /* * */ #include "flash_cadence_nand_ll.h" LOG_MODULE_REGISTER(flash_cdns_nand_ll, CONFIG_FLASH_LOG_LEVEL); /** * Wait for the Cadence NAND controller to become idle. * * @param base_address The base address of the Cadence NAND controller. * @retval 0 on success or -ETIMEDOUT error value on failure. */ static inline int32_t cdns_nand_wait_idle(uintptr_t base_address) { /* Wait status command response ready */ if (!WAIT_FOR(CNF_GET_CTRL_BUSY(sys_read32(CNF_CMDREG(base_address, CTRL_STATUS))) == 0U, IDLE_TIME_OUT, k_msleep(1))) { LOG_ERR("Timed out waiting for wait idle response"); return -ETIMEDOUT; } return 0; } /** * Set the row address for a NAND flash memory device using the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param local_row_address The row address. * @param page_set The page set number. */ static void row_address_set(struct cadence_nand_params *params, uint32_t *local_row_address, uint32_t page_set) { uint32_t block_number = 0; block_number = ((page_set) / (params->npages_per_block)); *local_row_address = 0; *local_row_address |= ROW_VAL_SET((params->page_size_bit) - 1, 0, ((page_set) % (params->npages_per_block))); *local_row_address |= ROW_VAL_SET((params->block_size_bit) - 1, (params->page_size_bit), block_number); *local_row_address |= ROW_VAL_SET((params->lun_size_bit) - 1, (params->block_size_bit), (block_number / params->nblocks_per_lun)); } /** * Retrieve information about the NAND flash device using the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @retval 0 on success or -ENXIO error value on failure. */ static int cdns_nand_device_info(struct cadence_nand_params *params) { struct nf_ctrl_version *nf_ver; uintptr_t base_address; uint32_t reg_value = 0; uint8_t type; base_address = params->nand_base; /* Read flash device version information */ reg_value = sys_read32(CNF_CTRLPARAM(base_address, VERSION)); nf_ver = (struct nf_ctrl_version *)&reg_value; LOG_INF("NAND Flash Version Information"); LOG_INF("HPNFC Magic Number 0x%x", nf_ver->hpnfc_magic_number); LOG_INF("Fixed number 0x%x", nf_ver->ctrl_fix); LOG_INF("Controller Revision Number 0x%x", nf_ver->ctrl_rev); /* Interface Type */ reg_value = sys_read32(CNF_CTRLPARAM(base_address, DEV_PARAMS0)); type = CNF_GET_DEV_TYPE(reg_value); if (type == CNF_DT_UNKNOWN) { LOG_ERR("%s: device type unknown", __func__); return -ENXIO; } params->nluns = CNF_GET_NLUNS(reg_value); LOG_INF("Number of LUMs %hhx", params->nluns); /* Pages per block */ reg_value = sys_read32(CNF_CTRLCFG(base_address, DEV_LAYOUT)); params->npages_per_block = GET_PAGES_PER_BLOCK(reg_value); /* Page size and spare size */ reg_value = sys_read32(CNF_CTRLPARAM(base_address, DEV_AREA)); params->page_size = GET_PAGE_SIZE(reg_value); params->spare_size = GET_SPARE_SIZE(reg_value); /* Device blocks per LUN */ params->nblocks_per_lun = sys_read32(CNF_CTRLPARAM(base_address, DEV_BLOCKS_PLUN)); /* Calculate block size and total device size */ params->block_size = (params->npages_per_block * params->page_size); params->device_size = ((long long)params->block_size * (long long)(params->nblocks_per_lun * params->nluns)); LOG_INF("block size %x total device size %llx", params->block_size, params->device_size); /* Calculate bit size of page, block and lun*/ params->page_size_bit = find_msb_set((params->npages_per_block) - 1); params->block_size_bit = find_msb_set((params->nblocks_per_lun) - 1); params->lun_size_bit = find_msb_set((params->nluns) - 1); return 0; } /** * Retrieve the status of a specific thread in the Cadence NAND controller. * * @param base_address The base address of the Cadence NAND controller. * @param thread The thread identifier. * @retval The status of the thread. */ static uint32_t cdns_nand_get_thrd_status(uintptr_t base_address, uint8_t thread) { uint32_t status; sys_write32(THREAD_VAL(thread), (base_address + CMD_STATUS_PTR_ADDR)); status = sys_read32((base_address + CMD_STAT_CMD_STATUS)); return status; } /** * Wait for a specific thread in the Cadence controller to complete. * * @param base_address The base address of the Cadence controller. * @param thread The thread identifier to wait for. * @retval 0 on success or -ETIMEDOUT error value on failure. */ static int cdns_wait_for_thread(uintptr_t base_address, uint8_t thread) { if (!WAIT_FOR((sys_read32((base_address) + THR_STATUS) & BIT(thread)) == 0U, THREAD_IDLE_TIME_OUT, k_msleep(1))) { LOG_ERR("Timed out waiting for thread response"); return -ETIMEDOUT; } return 0; } /** * Set features in the Cadence NAND controller using PIO operations. * * @param base_address The base address of the Cadence NAND controller. * @param feat_addr The address of the feature to be set. * @param feat_val The value of the feature to be set. * @param thread The thread identifier for the PIO operation. * @param vol_id The volume identifier for the feature set operation. * @param use_intr Flag indicating whether to use interrupts during the operation. * @retval 0 on success or -ETIMEDOUT error value on failure. */ static int cdns_nand_pio_set_features(uintptr_t base_address, uint8_t feat_addr, uint8_t feat_val, uint8_t thread, uint8_t vol_id) { uint32_t status = 0; int ret = 0; ret = cdns_wait_for_thread(base_address, thread); if (ret != 0) { return ret; } sys_write32(SET_FEAT_ADDR(feat_addr), (base_address + CDNS_CMD_REG1)); sys_write32(feat_val, (base_address + CDNS_CMD_REG2)); status = CMD_0_THREAD_POS_SET(thread); status |= CMD_0_C_MODE_SET(CT_PIO_MODE); status |= PIO_CMD0_CT_SET(PIO_SET_FEA_MODE); status |= CMD_0_VOL_ID_SET(vol_id); sys_write32(status, (base_address + CDNS_CMD_REG0)); return 0; } /** * Check whether a transfer complete for PIO operation in the Cadence controller has finished. * * @param base_address The base address of the Cadence controller. * @param thread The thread identifier for the PIO operation. * @retval 0 on success or negative error value on failure. */ static int cdns_pio_transfer_complete(uintptr_t base_address, uint8_t thread) { uint32_t status; status = WAIT_FOR(((cdns_nand_get_thrd_status(base_address, thread)) != 0), IDLE_TIME_OUT, k_msleep(1)); if (status == 0) { LOG_ERR("Timed out waiting for thread status response"); return -ETIMEDOUT; } if ((status & (BIT(F_CSTAT_COMP)))) { if ((status & (BIT(F_CSTAT_FAIL)))) { LOG_ERR("Cadence status operation failed %s", __func__); return -EIO; } } else { LOG_ERR("Cadence status complete failed %s", __func__); return -EIO; } return 0; } /** * Set the operational mode for the Cadence NAND controller. * * @param base_address The base address of the Cadence NAND controller. * @param opr_mode The operational mode SDR / NVDDR to set. * @retval 0 on success or negative error value on failure. */ static int cdns_nand_set_opr_mode(uintptr_t base_address, uint8_t opr_mode) { uint8_t device_type; uint32_t timing_mode = 0; uint32_t status; int ret; if (opr_mode == CNF_OPR_WORK_MODE_SDR) { status = ONFI_TIMING_MODE_SDR( sys_read32(CNF_CTRLPARAM(base_address, ONFI_TIMING_0))); timing_mode = find_lsb_set(status) - 1; /* PHY Register Timing setting*/ sys_write32(PHY_CTRL_REG_SDR, (base_address + PHY_CTRL_REG_OFFSET)); sys_write32(PHY_TSEL_REG_SDR, (base_address + PHY_TSEL_REG_OFFSET)); sys_write32(PHY_DQ_TIMING_REG_SDR, (base_address + PHY_DQ_TIMING_REG_OFFSET)); sys_write32(PHY_DQS_TIMING_REG_SDR, (base_address + PHY_DQS_TIMING_REG_OFFSET)); sys_write32(PHY_GATE_LPBK_CTRL_REG_SDR, (base_address + PHY_GATE_LPBK_OFFSET)); sys_write32(PHY_DLL_MASTER_CTRL_REG_SDR, (base_address + PHY_DLL_MASTER_OFFSET)); /* Async mode timing settings */ sys_write32((CNF_ASYNC_TIMINGS_TRH) | (CNF_ASYNC_TIMINGS_TRP) | (CNF_ASYNC_TIMINGS_TWH) | (CNF_ASYNC_TIMINGS_TWP), CNF_MINICTRL(base_address, ASYNC_TOGGLE_TIMINGS)); /* Set operation work mode in common settings */ sys_clear_bits(CNF_MINICTRL(base_address, CMN_SETTINGS), CNF_OPR_WORK_MODE_SDR_MASK); } else { /* NVDDR MODE */ status = ONFI_TIMING_MODE_NVDDR( sys_read32(CNF_CTRLPARAM(base_address, ONFI_TIMING_0))); timing_mode = find_lsb_set(status) - 1; /* PHY Register Timing setting*/ sys_write32(PHY_CTRL_REG_DDR, (base_address + PHY_CTRL_REG_OFFSET)); sys_write32(PHY_TSEL_REG_DDR, (base_address + PHY_TSEL_REG_OFFSET)); sys_write32(PHY_DQ_TIMING_REG_DDR, (base_address + PHY_DQ_TIMING_REG_OFFSET)); sys_write32(PHY_DQS_TIMING_REG_DDR, (base_address + PHY_DQS_TIMING_REG_OFFSET)); sys_write32(PHY_GATE_LPBK_CTRL_REG_DDR, (base_address + PHY_GATE_LPBK_OFFSET)); sys_write32(PHY_DLL_MASTER_CTRL_REG_DDR, (base_address + PHY_DLL_MASTER_OFFSET)); /* Set operation work mode in common settings */ sys_set_bits(CNF_MINICTRL(base_address, CMN_SETTINGS), CNF_OPR_WORK_MODE_NVDDR_MASK); } /* Wait for controller to be in idle state */ ret = cdns_nand_wait_idle(base_address); if (ret != 0) { LOG_ERR("Wait for controller to be in idle state Failed"); return ret; } /* Check device type */ device_type = CNF_GET_DEV_TYPE(sys_read32(CNF_CTRLPARAM(base_address, DEV_PARAMS0))); if (device_type != ONFI_INTERFACE) { LOG_ERR("Driver does not support this interface"); return -ENOTSUP; } /* Reset DLL PHY */ sys_clear_bit(CNF_MINICTRL(base_address, DLL_PHY_CTRL), CNF_DLL_PHY_RST_N); /* Wait for controller to be in idle state */ ret = cdns_nand_wait_idle(base_address); if (ret != 0) { LOG_ERR("Wait for controller to be in idle state Failed"); return ret; } ret = cdns_nand_pio_set_features(base_address, SET_FEAT_TIMING_MODE_ADDRESS, timing_mode, NF_TDEF_TRD_NUM, VOL_ID); if (ret != 0) { return ret; } ret = cdns_pio_transfer_complete(base_address, NF_TDEF_TRD_NUM); if (ret != 0) { LOG_ERR("cdns pio check failed"); return ret; } ret = cdns_nand_wait_idle(base_address); if (ret != 0) { LOG_ERR("Wait for controller to be in idle state Failed"); return ret; } /* set dll_rst_n in dll_phy_ctrl to 1 */ sys_set_bit(CNF_MINICTRL(base_address, DLL_PHY_CTRL), CNF_DLL_PHY_RST_N); ret = cdns_nand_wait_idle(base_address); if (ret != 0) { LOG_ERR("Wait for controller to be in idle state Failed"); return ret; } return 0; } /** * Configure the transfer settings of the Cadence NAND controller. * * @param base_address The base address of the Cadence NAND controller. * @retval 0 on success or -ETIMEDOUT error value on failure. */ static int cdns_nand_transfer_config(uintptr_t base_address) { int ret = 0; /* Wait for controller to be in idle state */ ret = cdns_nand_wait_idle(base_address); if (ret != 0) { LOG_ERR("Wait for controller to be in idle state Failed"); return ret; } /* Configure data transfer parameters */ sys_write32(ENABLE, CNF_CTRLCFG(base_address, TRANS_CFG0)); /* Disable cache and multiplane. */ sys_write32(DISABLE, CNF_CTRLCFG(base_address, MULTIPLANE_CFG)); sys_write32(DISABLE, CNF_CTRLCFG(base_address, CACHE_CFG)); /* Clear all interrupts. */ sys_write32(CLEAR_ALL_INTERRUPT, (base_address + INTR_STATUS)); return 0; } /** * Initialize the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @retval 0 on success or negative error value on failure. */ int cdns_nand_init(struct cadence_nand_params *params) { uint32_t reg_value_read = 0; uintptr_t base_address = params->nand_base; uint8_t datarate_mode = params->datarate_mode; int ret; if (!WAIT_FOR(CNF_GET_INIT_COMP(sys_read32(CNF_CMDREG(base_address, CTRL_STATUS))) != 0U, IDLE_TIME_OUT, k_msleep(1))) { LOG_ERR("Timed out waiting for NAND Controller Init complete status response"); return -ETIMEDOUT; } if (CNF_GET_INIT_FAIL(sys_read32(CNF_CMDREG(base_address, CTRL_STATUS))) != 0) { LOG_ERR("NAND Controller Init complete Failed!!!"); return -ENODEV; } ret = cdns_nand_device_info(params); if (ret != 0) { return ret; } /* Hardware Support Features */ reg_value_read = sys_read32(CNF_CTRLPARAM(base_address, FEATURE)); /* Enable data integrity parity check if the data integrity parity mechanism is */ /* supported by the device */ if (CNF_HW_DI_PR_SUPPORT(reg_value_read) != 0) { sys_set_bit(CNF_DI(base_address, CONTROL), CNF_DI_PAR_EN); } /* Enable data integrity CRC check if the data integrity CRC mechanism is */ /* supported by the device */ if (CNF_HW_DI_CRC_SUPPORT(reg_value_read) != 0) { sys_set_bit(CNF_DI(base_address, CONTROL), CNF_DI_CRC_EN); } /* Status polling mode, device control and status register */ ret = cdns_nand_wait_idle(base_address); if (ret != 0) { LOG_ERR("Wait for controller to be in idle state Failed"); return ret; } sys_write32(DEV_STAT_DEF_VALUE, CNF_CTRLCFG(base_address, DEV_STAT)); /* Set operation work mode */ ret = cdns_nand_set_opr_mode(base_address, datarate_mode); if (ret != 0) { return ret; } /* Set data transfer configuration parameters */ ret = cdns_nand_transfer_config(base_address); if (ret != 0) { return ret; } /* Wait for controller to be in idle state */ ret = cdns_nand_wait_idle(base_address); if (ret != 0) { LOG_ERR("Wait for controller to be in idle state Failed"); return ret; } /* DMA Setting */ sys_write32((F_BURST_SEL_SET(NF_TDEF_BURST_SEL)) | (BIT(F_OTE)), (base_address + NF_DMA_SETTING)); /* Pre fetch */ sys_write32(((NF_FIFO_TRIGG_LVL_SET(PRE_FETCH_VALUE)) | (NF_DMA_PACKAGE_SIZE_SET(PRE_FETCH_VALUE))), (base_address + NF_PRE_FETCH)); /* Total bits in row addressing*/ params->total_bit_row = find_msb_set(((params->npages_per_block) - 1)) + find_msb_set((params->nblocks_per_lun) - 1); if (ret != 0) { LOG_ERR("Failed to establish device access width!"); return -EINVAL; } /* Enable Global Interrupt for NAND*/ #ifdef CONFIG_CDNS_NAND_INTERRUPT_SUPPORT sys_set_bit((base_address + INTERRUPT_STATUS_REG), GINTR_ENABLE); #endif return 0; } #if CONFIG_CDNS_NAND_CDMA_MODE /** * * This function performs Command descriptor structure prepareation. * * @param nf_mem determine which NF memory bank will be selected * @param flash_ptr start ROW address in NF memory * @param mem_ptr system memory pointer * @param ctype Command type (read/write/erase) * @param cmd_cnt counter for commands * @param dma_sel select DMA engine (0 - slave DMA, 1 - master DMA) * @param vol_id specify target volume ID * */ void cdns_nand_cdma_prepare(char nf_mem, uint32_t flash_ptr, char *mem_ptr, uint16_t ctype, int32_t cmd_cnt, uint8_t dma_sel, uint8_t vol_id, struct cdns_cdma_command_descriptor *desc) { struct cdns_cdma_command_descriptor *cdma_desc; cdma_desc = desc; /* set fields for one descriptor */ cdma_desc->flash_pointer = flash_ptr; cdma_desc->bank_number = nf_mem; cdma_desc->command_flags |= CDMA_CF_DMA_MASTER_SET(dma_sel) | F_CFLAGS_VOL_ID_SET(vol_id); cdma_desc->memory_pointer = (uintptr_t)mem_ptr; cdma_desc->status = 0; cdma_desc->sync_flag_pointer = 0; cdma_desc->sync_arguments = 0; cdma_desc->ctrl_data_ptr = 0x40; cdma_desc->command_type = ctype; if (cmd_cnt > 1) { cdma_desc->next_pointer = (uintptr_t)(desc + 1); cdma_desc->command_flags |= CFLAGS_MPTRPC_SET | CFLAGS_MPTRPC_SET; cdma_desc->command_flags |= CFLAGS_CONT_SET; } else { cdma_desc->next_pointer = 0; #ifdef CONFIG_CDNS_NAND_INTERRUPT_SUPPORT cdma_desc->command_flags |= CDMA_CF_INT_SET; #endif } } /** * Check a command descriptor transfer complete status in the Cadence NAND controller. * * @param desc_ptr The pointer to the command descriptor structure. * @param params The Cadence NAND parameters structure. * @retval 0 on success or negative error value on failure. */ static int cdns_transfer_complete(struct cdns_cdma_command_descriptor *desc_ptr, struct cadence_nand_params *params) { #ifdef CONFIG_CDNS_NAND_INTERRUPT_SUPPORT uint32_t status = 0; NAND_INT_SEM_TAKE(params); sys_write32(NF_TDEF_TRD_NUM, (params->nand_base + CMD_STATUS_PTR_ADDR)); status = sys_read32((params->nand_base + CMD_STAT_CMD_STATUS)); if ((status & (BIT(F_CSTAT_COMP)))) { if ((status & (BIT(F_CSTAT_FAIL)))) { LOG_ERR("Cadence status operation failed %s", __func__); return -EIO; } } else { LOG_ERR("Cadence status complete failed %s", __func__); return -EIO; } #else ARG_UNUSED(params); if (!WAIT_FOR(((desc_ptr->status & (BIT(F_CSTAT_COMP))) != 0), IDLE_TIME_OUT, k_msleep(1))) { LOG_ERR("Timed out waiting for thread status response"); return -ETIMEDOUT; } if ((desc_ptr->status & (BIT(F_CSTAT_FAIL))) != 0) { LOG_ERR("Cadence status operation failed %s", __func__); return -EIO; } #endif return 0; } /** * Send a command descriptor to the Cadence NAND controller for execution. * * @param base_address The base address of the Cadence NAND controller. * @param desc_ptr The pointer to the command descriptor. * @param thread The thread number for the execution. * @retval 0 on success or -ETIMEDOUT error value on failure. */ static int cdns_nand_send(uintptr_t base_address, char *desc_ptr, uint8_t thread) { uint64_t desc_address; uint32_t status; int ret; desc_address = (uint64_t)desc_ptr; ret = cdns_wait_for_thread(base_address, thread); if (ret != 0) { return ret; } /* desc_ptr address passing */ sys_write32(desc_address & U32_MASK_VAL, (base_address + CDNS_CMD_REG2)); sys_write32((desc_address >> 32) & U32_MASK_VAL, (base_address + CDNS_CMD_REG3)); /* Thread selection */ status = CMD_0_THREAD_POS_SET(thread); /* CDMA Mode selection */ status |= CMD_0_C_MODE_SET(CT_CDMA_MODE); /* CMD 0 Reg write*/ sys_write32(status, (base_address + CDNS_CMD_REG0)); return 0; } static int cdns_cdma_desc_transfer_finish(struct cadence_nand_params *params, uint32_t page_count, uint32_t max_page_desc, uint32_t ctype, uint32_t cond_start, char *buffer) { uint32_t page_count_pass = 0; uint32_t row_address = 0; uint32_t base_address; uint32_t page_buffer_size; struct cdns_cdma_command_descriptor *cdma_desc; int ret; page_buffer_size = (page_count > max_page_desc) ? max_page_desc : page_count; cdma_desc = k_malloc(sizeof(struct cdns_cdma_command_descriptor) * page_buffer_size); if (cdma_desc == NULL) { LOG_ERR("Memory allocation error occurred %s", __func__); return -ENOSR; } base_address = params->nand_base; while (page_count > 0) { row_address_set(params, &row_address, cond_start); if (page_count > max_page_desc) { page_count_pass = max_page_desc; page_count = page_count - max_page_desc; cond_start = cond_start + page_count_pass; } else { page_count_pass = page_count; page_count = page_count - page_count_pass; } for (int index = 0; index < page_count_pass; index++) { cdns_nand_cdma_prepare(NF_TDEF_DEV_NUM, row_address, buffer, (ctype + index), (page_count_pass - index), DMA_MS_SEL, VOL_ID, (cdma_desc + index)); } ret = cdns_nand_send(base_address, (char *)cdma_desc, NF_TDEF_TRD_NUM); if (ret != 0) { k_free(cdma_desc); return ret; } if (ctype != CNF_CMD_ERASE) { buffer = buffer + (max_page_desc * params->page_size); } ret = cdns_transfer_complete(cdma_desc, params); if (ret != 0) { k_free(cdma_desc); return ret; } } k_free(cdma_desc); return 0; } /** * Perform a CDMA write operation for the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param start_page_number The starting page number for the write operation. * @param buffer The buffer containing the data to be written. * @param page_count The number of pages to be written. * @retval 0 on success or negative error value on failure. */ static int cdns_nand_cdma_write(struct cadence_nand_params *params, uint32_t start_page_number, char *buffer, uint32_t page_count) { int ret; ret = cdns_cdma_desc_transfer_finish(params, page_count, CONFIG_FLASH_CDNS_CDMA_PAGE_COUNT, CNF_CMD_WR, start_page_number, buffer); return ret; } /** * Perform a CDMA read operation for the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param start_page_number The starting page number for the read operation. * @param buffer The buffer to store the read data. * @param page_count The number of pages to be read. * @retval 0 on success or negative error value on failure. */ static int cdns_nand_cdma_read(struct cadence_nand_params *params, uint32_t start_page_number, char *buffer, uint32_t page_count) { int ret; ret = cdns_cdma_desc_transfer_finish(params, page_count, CONFIG_FLASH_CDNS_CDMA_PAGE_COUNT, CNF_CMD_RD, start_page_number, buffer); return ret; } /** * Perform a CDMA erase operation for the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param start_block_number The starting block number for the erase operation. * @param block_count The number of blocks to be erased. * @retval 0 on success or negative error value on failure. */ static int cdns_nand_cdma_erase(struct cadence_nand_params *params, uint32_t start_block_number, uint32_t block_count) { int ret; ret = cdns_cdma_desc_transfer_finish(params, block_count, CONFIG_FLASH_CDNS_CDMA_BLOCK_COUNT, CNF_CMD_ERASE, start_block_number, NULL); return ret; } #endif #if CONFIG_CDNS_NAND_PIO_MODE /** * Perform an erase operation on the Cadence NAND controller using PIO. * * @param params The Cadence NAND parameters structure. * @param thread The thread identifier for the PIO operation. * @param bank The bank identifier for the erase operation. * @param start_block The starting block number for the erase operation. * @param ctype The command type for the erase operation. * @param block_count The number of blocks to be erased. * @retval 0 on success or negative error value on failure. */ static int cdns_nand_pio_erase(struct cadence_nand_params *params, uint8_t thread, uint8_t bank, uint32_t start_block, uint16_t ctype, uint32_t block_count) { uint32_t status; uintptr_t base_address; uint32_t row_address = 0; uint32_t index = 0; int ret; base_address = params->nand_base; for (index = 0; index < block_count; index++) { ret = cdns_wait_for_thread(base_address, thread); if (ret != 0) { return ret; } row_address_set(params, &row_address, (start_block * params->npages_per_block)); sys_write32(row_address, (base_address + CDNS_CMD_REG1)); start_block++; sys_write32((NF_CMD4_BANK_SET(bank)), (base_address + CDNS_CMD_REG4)); status = CMD_0_THREAD_POS_SET(thread); #ifdef CONFIG_CDNS_NAND_INTERRUPT_SUPPORT status |= BIT(PIO_CF_INT); #endif status |= CMD_0_C_MODE_SET(CT_PIO_MODE); status |= PIO_CMD0_CT_SET(ctype); sys_write32(status, (base_address + CDNS_CMD_REG0)); NAND_INT_SEM_TAKE(params); ret = cdns_pio_transfer_complete(base_address, thread); if (ret != 0) { return ret; } } return 0; } /** * Prepare for a PIO operation in the Cadence NAND controller. * * @param base_address The base address of the Cadence NAND controller. * @param thread The thread ID associated with the operation. * @param bank The bank ID for the operation. * @param row_address The row address for the operation. * @param buf The buffer containing the data for the operation. * @param ctype The command type for the operation. * @param dma_sel The DMA selection flag for the operation. * @param vol_id The volume ID for the operation. * @retval 0 on success or -ETIMEDOUT error value on failure. */ static int cdns_nand_pio_prepare(uintptr_t base_address, uint8_t thread, uint8_t bank, uint32_t row_address, char *buf, uint16_t ctype, uint8_t dma_sel, uint8_t vol_id) { uint64_t buf_addr = (uintptr_t)buf; uint32_t status; int ret; ret = cdns_wait_for_thread(base_address, thread); if (ret != 0) { return ret; } sys_write32(row_address, (base_address + CDNS_CMD_REG1)); sys_write32(NF_CMD4_BANK_SET(bank), (base_address + CDNS_CMD_REG4)); sys_write32(buf_addr & U32_MASK_VAL, (base_address + CDNS_CMD_REG2)); sys_write32((buf_addr >> 32) & U32_MASK_VAL, (base_address + CDNS_CMD_REG3)); status = CMD_0_THREAD_POS_SET(thread); #ifdef CONFIG_CDNS_NAND_INTERRUPT_SUPPORT status |= PIO_CF_INT_SET; #endif status |= PIO_CF_DMA_MASTER_SET(dma_sel); status |= CMD_0_C_MODE_SET(CT_PIO_MODE); status |= PIO_CMD0_CT_SET(ctype); status |= CMD_0_VOL_ID_SET(vol_id); sys_write32(status, (base_address + CDNS_CMD_REG0)); return 0; } /** * Perform a PIO write operation for the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param row_address The row address for the write operation. * @param buffer The buffer containing the data to be written. * @retval 0 on success or negative error value on failure. */ static int cdns_nand_pio_write(struct cadence_nand_params *params, uint32_t row_address, char *buffer) { uintptr_t base_address; int ret; base_address = params->nand_base; ret = cdns_nand_pio_prepare(base_address, NF_TDEF_TRD_NUM, NF_TDEF_DEV_NUM, row_address, buffer, CNF_CMD_WR, DMA_MS_SEL, VOL_ID); if (ret != 0) { return ret; } NAND_INT_SEM_TAKE(params); ret = cdns_pio_transfer_complete(base_address, NF_TDEF_TRD_NUM); if (ret != 0) { return ret; } return 0; } /** * Perform a PIO read operation for the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param row_address The row address for the read operation. * @param buffer The buffer to store the read data. * @retval 0 on success or negative error value on failure. */ static int cdns_nand_pio_read(struct cadence_nand_params *params, uint32_t row_address, char *buffer) { uintptr_t base_address; int ret; base_address = params->nand_base; ret = cdns_nand_pio_prepare(base_address, NF_TDEF_TRD_NUM, NF_TDEF_DEV_NUM, row_address, buffer, CNF_CMD_RD, DMA_MS_SEL, VOL_ID); if (ret != 0) { return ret; } NAND_INT_SEM_TAKE(params); ret = cdns_pio_transfer_complete(base_address, NF_TDEF_TRD_NUM); if (ret != 0) { return ret; } return 0; } /** * Perform a combined PIO read and write operation for the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param start_page_number The starting page number for the read/write operation. * @param buffer The buffer containing the data to be written or to store the read data. * @param page_count The number of pages to be read or written. * @param mode The mode of operation (read, write). * @retval 0 on success or negative error value on failure. */ static int cdns_nand_pio_read_write(struct cadence_nand_params *params, uint32_t start_page_number, char *buffer, uint32_t page_count, uint8_t mode) { uint32_t index; uint32_t pio_row_address = 0; int ret = 0; for (index = 0; index < page_count; index++) { row_address_set(params, &pio_row_address, start_page_number++); if (mode == CDNS_READ) { ret = cdns_nand_pio_read(params, pio_row_address, buffer + (index * (params->page_size))); } else { ret = cdns_nand_pio_write(params, pio_row_address, buffer + (index * (params->page_size))); } } return ret; } #endif #if CONFIG_CDNS_NAND_GENERIC_MODE /** * Send a generic command to the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param mini_ctrl_cmd The command to be sent. * @retval 0 on success or -ETIMEDOUT error value on failure. */ static int cdns_generic_send_cmd(struct cadence_nand_params *params, uint64_t mini_ctrl_cmd) { uint32_t mini_ctrl_cmd_l, mini_ctrl_cmd_h, status; uintptr_t base_address; int ret = 0; base_address = params->nand_base; mini_ctrl_cmd_l = mini_ctrl_cmd & U32_MASK_VAL; mini_ctrl_cmd_h = mini_ctrl_cmd >> 32; ret = cdns_nand_wait_idle(base_address); if (ret != 0) { LOG_ERR("Wait for controller to be in idle state Failed"); return ret; } sys_write32(mini_ctrl_cmd_l, (base_address + CDNS_CMD_REG2)); sys_write32(mini_ctrl_cmd_h, (base_address + CDNS_CMD_REG3)); /* Select generic command. */ status = CMD_0_THREAD_POS_SET(NF_TDEF_TRD_NUM); #ifdef CONFIG_CDNS_NAND_INTERRUPT_SUPPORT status |= GEN_CF_INT_SET(GEN_CF_INT_ENABLE); #endif status |= CMD_0_C_MODE_SET(CT_GENERIC_MODE); sys_write32(status, (base_address + CDNS_CMD_REG0)); return 0; } /** * Send a generic command data to the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param mode The mode of operation (read, write). * @param data_length The length of the associated data. * @retval 0 on success or -ETIMEDOUT error value on failure. */ static int cdns_generic_cmd_data(struct cadence_nand_params *params, uint8_t mode, uint32_t data_length) { uint64_t mini_ctrl_cmd = 0; int ret = 0; mini_ctrl_cmd |= GCMD_TWB_VALUE; mini_ctrl_cmd |= GCMCD_DATA_SEQ; mini_ctrl_cmd |= GEN_SECTOR_COUNT_SET; mini_ctrl_cmd |= GEN_LAST_SECTOR_SIZE_SET((uint64_t)data_length); mini_ctrl_cmd |= GEN_DIR_SET((uint64_t)mode); mini_ctrl_cmd |= GEN_SECTOR_SET((uint64_t)data_length); ret = cdns_generic_send_cmd(params, mini_ctrl_cmd); return ret; } /** * Wait for the completion of an SDMA operation in the Cadence NAND controller. * * @param base_address The base address of the Cadence NAND controller. * @retval 0 on success or -ETIMEDOUT error value on failure. */ static int cdns_wait_sdma(uintptr_t base_address) { if (!WAIT_FOR(((sys_read32(base_address + INTR_STATUS) & BIT(SDMA_TRIGG)) != 0), IDLE_TIME_OUT, k_msleep(1))) { LOG_ERR("Timed out waiting for sdma response"); return -ETIMEDOUT; } sys_set_bit((base_address + INTR_STATUS), SDMA_TRIGG); return 0; } /** * Perform buffer copying to SDMA regs in the Cadence NAND controller. * * @param sdma_base_address The base address of the SDMA in the Cadence NAND controller. * @param buffer The source or destination buffer for the copy operation. * @param data_length The length of the data to be copied. */ static void sdma_buffer_copy_in(uint32_t sdma_base_address, uint8_t *buffer, uint32_t data_length) { uint32_t index; for (index = 0; index < data_length; index++) { sys_write8(*(buffer + index), sdma_base_address + index); } } /** * Perform buffer copying from SDMA regs in the Cadence NAND controller. * * @param sdma_base_address The base address of the SDMA in the Cadence NAND controller. * @param buffer The source or destination buffer for the copy operation. * @param data_length The length of the data to be copied. */ static void sdma_buffer_copy_out(uint32_t sdma_base_address, uint8_t *buffer, uint32_t data_length) { uint32_t index; for (index = 0; index < data_length; index++) { *(buffer + index) = sys_read8(sdma_base_address + index); } } /** * Perform a generic page read operation in the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param read_address The address from which to read the page. * @param data_buffer The buffer to store the read data. * @retval 0 on success or negative error value on failure. */ static int cdns_generic_page_read(struct cadence_nand_params *params, uint64_t read_address, void *data_buffer) { uint64_t mini_ctrl_cmd = 0; uintptr_t base_address = params->nand_base; int ret; mini_ctrl_cmd = PAGE_READ_CMD; mini_ctrl_cmd |= GCMD_TWB_VALUE; if ((params->nluns > 1) || (params->total_bit_row > 16)) { mini_ctrl_cmd |= PAGE_MAX_BYTES(PAGE_MAX_SIZE); } else { mini_ctrl_cmd |= PAGE_MAX_BYTES(PAGE_MAX_SIZE - 1); } mini_ctrl_cmd |= read_address << 32; ret = cdns_generic_send_cmd(params, mini_ctrl_cmd); if (ret != 0) { return ret; } NAND_INT_SEM_TAKE(params); ret = cdns_generic_cmd_data(params, CDNS_READ, params->page_size); if (ret != 0) { return ret; } NAND_INT_SEM_TAKE(params); ret = cdns_wait_sdma(base_address); if (ret != 0) { return ret; } sdma_buffer_copy_out(params->sdma_base, data_buffer, params->page_size); return 0; } /** * Perform a generic page write operation in the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param write_address The address to which the page will be written. * @param data_buffer The buffer containing the data to be written. * @retval 0 on success or negative error value on failure. */ static int cdns_generic_page_write(struct cadence_nand_params *params, uint64_t write_address, void *data_buffer) { uint64_t mini_ctrl_cmd = 0; int ret; uintptr_t base_address = params->nand_base; mini_ctrl_cmd |= GCMD_TWB_VALUE; mini_ctrl_cmd |= GEN_ADDR_WRITE_DATA((uint32_t)write_address); if ((params->nluns > 1) || (params->total_bit_row > BIT16_CHECK)) { mini_ctrl_cmd |= PAGE_MAX_BYTES(PAGE_MAX_SIZE); } else { mini_ctrl_cmd |= PAGE_MAX_BYTES(PAGE_MAX_SIZE - 1); } mini_ctrl_cmd |= PAGE_WRITE_CMD; ret = cdns_generic_send_cmd(params, mini_ctrl_cmd); if (ret != 0) { return ret; } NAND_INT_SEM_TAKE(params); ret = cdns_generic_cmd_data(params, CDNS_WRITE, params->page_size); if (ret != 0) { return ret; } sdma_buffer_copy_in(params->sdma_base, data_buffer, params->page_size); NAND_INT_SEM_TAKE(params); mini_ctrl_cmd = 0; mini_ctrl_cmd |= PAGE_WRITE_10H_CMD; mini_ctrl_cmd |= GCMD_TWB_VALUE; mini_ctrl_cmd |= PAGE_CMOD_CMD; ret = cdns_generic_send_cmd(params, mini_ctrl_cmd); if (ret != 0) { return ret; } NAND_INT_SEM_TAKE(params); ret = cdns_wait_sdma(base_address); return ret; } /** * Perform a generic read or write operation for a range of pages in the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param start_page_number The starting page number for the read or write operation. * @param buffer The buffer containing the data to be written or to store the read data. * @param page_count The number of pages to be read or written. * @param mode The mode of operation (read, write). * @retval 0 on success or negative error value on failure. */ static int cdns_nand_gen_read_write(struct cadence_nand_params *params, uint32_t start_page_number, char *buffer, uint32_t page_count, uint8_t mode) { uint64_t address = 0; uint32_t index = 0; uint32_t gen_row_address = 0; int ret = 0; for (index = 0; index < page_count; index++) { row_address_set(params, &gen_row_address, start_page_number++); address = ((uint64_t)gen_row_address); if (mode == CDNS_READ) { ret = cdns_generic_page_read(params, address, buffer + (index * (params->page_size))); if (ret != 0) { LOG_ERR("Cadence NAND Generic Page Read Error!!"); return ret; } } else { ret = cdns_generic_page_write(params, address, buffer + (index * (params->page_size))); if (ret != 0) { LOG_ERR("Cadence NAND Generic Page write Error!!"); return ret; } } } return 0; } /** * Perform a generic erase operation for a range of blocks in the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param start_block The starting block number for the erase operation. * @param block_count The number of blocks to be erased. * @retval 0 on success or -ETIMEDOUT error value on failure. */ static int cdns_nand_gen_erase(struct cadence_nand_params *params, uint32_t start_block, uint32_t block_count) { uint64_t mini_ctrl_cmd = 0; uintptr_t base_address = 0; uint32_t gen_row_address = 0; uint32_t index = 0; int ret = 0; for (index = 0; index < block_count; index++) { row_address_set(params, &gen_row_address, (start_block * params->npages_per_block)); start_block++; base_address = params->nand_base; mini_ctrl_cmd |= GCMD_TWB_VALUE; mini_ctrl_cmd |= ERASE_ADDR_SIZE; mini_ctrl_cmd |= ((gen_row_address) & (U32_MASK_VAL)); mini_ctrl_cmd |= PAGE_ERASE_CMD; ret = cdns_generic_send_cmd(params, mini_ctrl_cmd); if (ret != 0) { return ret; } NAND_INT_SEM_TAKE(params); } return 0; } #endif /** * Read data from the Cadence NAND controller into a buffer. */ static inline int cdns_read_data(struct cadence_nand_params *params, uint32_t start_page_number, const void *buffer, uint32_t page_count) { int ret; #if CONFIG_CDNS_NAND_CDMA_MODE ret = cdns_nand_cdma_read(params, start_page_number, (char *)buffer, page_count); #elif CONFIG_CDNS_NAND_PIO_MODE ret = cdns_nand_pio_read_write(params, start_page_number, (char *)buffer, page_count, CDNS_READ); #elif CONFIG_CDNS_NAND_GENERIC_MODE ret = cdns_nand_gen_read_write(params, start_page_number, (char *)buffer, page_count, CDNS_READ); #endif return ret; } /** * Read data from the Cadence NAND controller into a buffer. * * @param params The Cadence NAND parameters structure. * @param buffer The buffer to store the read data. * @param offset The offset within the NAND to start reading from. * @param size The size of the data to read. * @retval 0 on success or negative error value on failure. */ int cdns_nand_read(struct cadence_nand_params *params, const void *buffer, uint32_t offset, uint32_t size) { uint32_t start_page_number; uint32_t end_page_number; uint32_t page_count; int ret = 0; uint16_t r_bytes; uint16_t bytes_dif; uint16_t lp_bytes_dif; uint8_t check_page_first = 0; uint8_t check_page_last = 0; uint8_t *first_end_page; uint8_t *last_end_page; if (params == NULL) { LOG_ERR("Wrong parameter passed!!"); return -EINVAL; } if (size == 0) { return 0; } if ((offset >= params->device_size) || (size > (params->device_size - offset))) { LOG_ERR("Wrong offset or size value passed!!"); return -EINVAL; } start_page_number = offset / (params->page_size); end_page_number = ((offset + size) - 1) / ((params->page_size)); if ((offset % params->page_size) == 0) { check_page_first = 1; } if (((offset + size) % params->page_size) == 0) { check_page_last = 1; } page_count = end_page_number - start_page_number; page_count++; if ((check_page_last == 1) && (check_page_first == 1)) { ret = cdns_read_data(params, start_page_number, (char *)buffer, page_count); if (ret != 0) { return ret; } } else if (((check_page_last == 0) && (check_page_first == 1) && (page_count == 1)) || ((check_page_last == 0) && (check_page_first == 0) && (page_count == 1)) || ((check_page_last == 1) && (check_page_first == 0) && (page_count == 1))) { first_end_page = (char *)k_malloc(sizeof(char) * (params->page_size)); if (first_end_page != NULL) { memset(first_end_page, 0xFF, sizeof(char) * (params->page_size)); } else { LOG_ERR("Memory allocation error occurred %s", __func__); return -ENOSR; } ret = cdns_read_data(params, start_page_number, first_end_page, page_count); if (ret != 0) { k_free(first_end_page); return ret; } memcpy((char *)buffer, first_end_page + (offset % (params->page_size)), size); k_free(first_end_page); } else if (((check_page_last == 0) && (check_page_first == 1) && (page_count == 2)) || ((check_page_last == 0) && (check_page_first == 0) && (page_count == 2)) || ((check_page_last == 1) && (check_page_first == 0) && (page_count == 2))) { first_end_page = (char *)k_malloc(sizeof(char) * (params->page_size * 2)); if (first_end_page != NULL) { memset(first_end_page, 0xFF, sizeof(char) * (params->page_size * 2)); } else { LOG_ERR("Memory allocation error occurred %s", __func__); return -ENOSR; } ret = cdns_read_data(params, start_page_number, first_end_page, page_count); if (ret < 0) { k_free(first_end_page); return ret; } memcpy((char *)buffer, first_end_page + (offset % (params->page_size)), size); k_free(first_end_page); } else if ((check_page_last == 0) && (check_page_first == 1) && (page_count > 2)) { first_end_page = (char *)k_malloc(sizeof(char) * (params->page_size)); if (first_end_page != NULL) { memset(first_end_page, 0xFF, sizeof(char) * (params->page_size)); } else { LOG_ERR("Memory allocation error occurred %s", __func__); return -ENOSR; } ret = cdns_read_data(params, end_page_number, first_end_page, 1); if (ret < 0) { k_free(first_end_page); return ret; } r_bytes = (offset + size) % (params->page_size); ret = cdns_read_data(params, start_page_number, (char *)buffer, (--page_count)); if (ret != 0) { k_free(first_end_page); return ret; } memcpy((char *)buffer + ((page_count - 1) * params->page_size), first_end_page, r_bytes); k_free(first_end_page); } else if ((check_page_last == 1) && (check_page_first == 0) && (page_count > 2)) { first_end_page = (char *)k_malloc(sizeof(char) * (params->page_size)); if (first_end_page != NULL) { memset(first_end_page, 0xFF, sizeof(char) * (params->page_size)); } else { LOG_ERR("Memory allocation error occurred %s", __func__); return -ENOSR; } ret = cdns_read_data(params, start_page_number, first_end_page, 1); if (ret < 0) { k_free(first_end_page); return ret; } r_bytes = (offset) % (params->page_size); bytes_dif = (((start_page_number + 1) * params->page_size) - r_bytes); r_bytes = (offset + size) % (params->page_size); ret = cdns_read_data(params, (++start_page_number), ((char *)buffer + bytes_dif), (--page_count)); if (ret != 0) { k_free(first_end_page); return ret; } memcpy((char *)buffer, first_end_page + r_bytes, bytes_dif); k_free(first_end_page); } else if ((check_page_last == 0) && (check_page_first == 0) && (page_count > 2)) { first_end_page = (char *)k_malloc(sizeof(char) * (params->page_size)); last_end_page = (char *)k_malloc(sizeof(char) * (params->page_size)); if ((first_end_page != NULL) && (last_end_page != NULL)) { memset(first_end_page, 0xFF, sizeof(char) * (params->page_size)); memset(last_end_page, 0xFF, sizeof(char) * (params->page_size)); } else { LOG_ERR("Memory allocation error occurred %s", __func__); return -ENOSR; } ret = cdns_read_data(params, start_page_number, first_end_page, 1); if (ret != 0) { k_free(first_end_page); k_free(last_end_page); return ret; } r_bytes = (offset) % (params->page_size); bytes_dif = (((start_page_number + 1) * params->page_size) - r_bytes); lp_bytes_dif = (offset + size) % (params->page_size); ret = cdns_read_data(params, end_page_number, last_end_page, 1); if (ret != 0) { k_free(last_end_page); k_free(first_end_page); return ret; } r_bytes = (offset + size) % (params->page_size); ret = cdns_read_data(params, (++start_page_number), ((char *)buffer + bytes_dif), (page_count - 2)); if (ret != 0) { k_free(last_end_page); k_free(first_end_page); return ret; } memcpy((char *)buffer, first_end_page + r_bytes, bytes_dif); memcpy(((char *)buffer + bytes_dif + ((page_count - 2) * (params->npages_per_block))), last_end_page, lp_bytes_dif); } return 0; } /** * Write data from a buffer to the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param buffer The buffer containing the data to be written. * @param offset The offset within the NAND to start writing to. * @param len The length of the data to write. * @retval 0 on success or negative error value on failure. */ int cdns_nand_write(struct cadence_nand_params *params, const void *buffer, uint32_t offset, uint32_t len) { uint32_t start_page_number; uint32_t end_page_number; uint32_t page_count; int ret = 0; if (params == NULL) { LOG_ERR("Wrong parameter passed!!"); return -EINVAL; } if (len == 0) { return 0; } if ((offset >= params->device_size) || (len > (params->device_size - offset))) { LOG_ERR("Wrong offset or len value passed!!"); return -EINVAL; } if ((offset % params->page_size) != 0) { LOG_ERR("offset not page aligned!!! Page size = 0x%x", params->page_size); return -EINVAL; } if ((len % params->page_size) != 0) { LOG_ERR("length not page aligned!!! Page size = 0x%x", params->page_size); return -EINVAL; } start_page_number = offset / (params->page_size); end_page_number = ((offset + len) - 1) / ((params->page_size)); page_count = end_page_number - start_page_number; #if CONFIG_CDNS_NAND_CDMA_MODE ret = cdns_nand_cdma_write(params, start_page_number, (char *)buffer, ++page_count); #elif CONFIG_CDNS_NAND_PIO_MODE ret = cdns_nand_pio_read_write(params, start_page_number, (char *)buffer, ++page_count, CDNS_WRITE); #elif CONFIG_CDNS_NAND_GENERIC_MODE ret = cdns_nand_gen_read_write(params, start_page_number, (char *)buffer, ++page_count, CDNS_WRITE); #endif if (ret != 0) { LOG_ERR("Cadence driver write Failed!!!"); } return ret; } /** * Perform an erase operation on the Cadence NAND controller. * * @param params The Cadence NAND parameters structure. * @param offset The offset within the NAND to start erasing. * @param size The size of the data to erase. * @retval 0 on success or negative error value on failure. */ int cdns_nand_erase(struct cadence_nand_params *params, uint32_t offset, uint32_t size) { uint32_t start_block_number; uint32_t end_block_number; uint32_t block_count; int ret; if (params == NULL) { LOG_ERR("Wrong parameter passed!!"); return -EINVAL; } if (size == 0) { return 0; } if ((offset >= params->device_size) || (size > (params->device_size - offset))) { LOG_ERR("Wrong offset or size value passed!!"); return -EINVAL; } if ((offset % (params->block_size)) != 0) { LOG_ERR("Offset value not aligned with block size!! Erase block size = %x", params->block_size); return -EINVAL; } if ((size % (params->block_size)) != 0) { LOG_ERR("Length value not aligned with block size!! Erase block size = %x", params->block_size); return -EINVAL; } start_block_number = (offset / ((params->page_size))) / (params->npages_per_block); end_block_number = (((offset + size) - 1) / ((params->page_size))) / (params->npages_per_block); block_count = end_block_number - start_block_number; #if CONFIG_CDNS_NAND_CDMA_MODE ret = cdns_nand_cdma_erase(params, start_block_number, ++block_count); #elif CONFIG_CDNS_NAND_PIO_MODE ret = cdns_nand_pio_erase(params, NF_TDEF_TRD_NUM, NF_TDEF_DEV_NUM, start_block_number, CNF_CMD_ERASE, ++block_count); #elif CONFIG_CDNS_NAND_GENERIC_MODE ret = cdns_nand_gen_erase(params, start_block_number, ++block_count); #endif if (ret != 0) { LOG_ERR("Cadence driver Erase Failed!!!"); } return ret; } #if CONFIG_CDNS_NAND_INTERRUPT_SUPPORT void cdns_nand_irq_handler_ll(struct cadence_nand_params *params) { uint32_t status = 0; uint8_t thread_num = 0; status = sys_read32(params->nand_base + THREAD_INTERRUPT_STATUS); thread_num = find_lsb_set(status); if (GET_INIT_SET_CHECK(status, (thread_num - 1)) != 0) { /* Clear the interrupt*/ sys_write32(BIT((thread_num - 1)), params->nand_base + THREAD_INTERRUPT_STATUS); } } #endif ```
/content/code_sandbox/drivers/flash/flash_cadence_nand_ll.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
12,860
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_fiu_nor #include <zephyr/kernel.h> #include <zephyr/drivers/flash.h> #include <zephyr/drivers/flash/npcx_flash_api_ex.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/dt-bindings/flash_controller/npcx_fiu_qspi.h> #include <soc.h> #ifdef CONFIG_USERSPACE #include <zephyr/syscall.h> #include <zephyr/internal/syscall_handler.h> #endif #include "flash_npcx_fiu_qspi.h" #include "spi_nor.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(flash_npcx_fiu_nor, CONFIG_FLASH_LOG_LEVEL); #define BLOCK_64K_SIZE KB(64) #define BLOCK_4K_SIZE KB(4) /* Device config */ struct flash_npcx_nor_config { /* QSPI bus device for mutex control and bus configuration */ const struct device *qspi_bus; /* Mapped address for flash read via direct access */ uintptr_t mapped_addr; /* Size of nor device in bytes, from size property */ uint32_t flash_size; /* Maximum chip erase time-out in ms */ uint32_t max_timeout; /* SPI Nor device configuration on QSPI bus */ struct npcx_qspi_cfg qspi_cfg; #if defined(CONFIG_FLASH_PAGE_LAYOUT) struct flash_pages_layout layout; #endif }; /* Device data */ struct flash_npcx_nor_data { /* Specific control operation for Quad-SPI Nor Flash */ uint32_t operation; }; static const struct flash_parameters flash_npcx_parameters = { .write_block_size = 1, .erase_value = 0xff, }; #define DT_INST_QUAD_EN_PROP_OR(inst) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, quad_enable_requirements), \ (_CONCAT(JESD216_DW15_QER_VAL_, \ DT_INST_STRING_TOKEN(inst, quad_enable_requirements))), \ ((JESD216_DW15_QER_VAL_NONE))) static inline bool is_within_region(off_t addr, size_t size, off_t region_start, size_t region_size) { return (addr >= region_start && (addr < (region_start + region_size)) && ((addr + size) <= (region_start + region_size))); } static int flash_npcx_uma_transceive(const struct device *dev, struct npcx_uma_cfg *cfg, uint32_t flags) { const struct flash_npcx_nor_config *config = dev->config; struct flash_npcx_nor_data *data = dev->data; int ret; /* Lock SPI bus and configure it if needed */ qspi_npcx_fiu_mutex_lock_configure(config->qspi_bus, &config->qspi_cfg, data->operation); /* Execute UMA transaction */ ret = qspi_npcx_fiu_uma_transceive(config->qspi_bus, cfg, flags); /* Unlock SPI bus */ qspi_npcx_fiu_mutex_unlock(config->qspi_bus); return ret; } /* NPCX UMA functions for SPI NOR flash */ static int flash_npcx_uma_cmd_only(const struct device *dev, uint8_t opcode) { struct npcx_uma_cfg cfg = { .opcode = opcode}; return flash_npcx_uma_transceive(dev, &cfg, 0); /* opcode only */ } static int flash_npcx_uma_cmd_by_addr(const struct device *dev, uint8_t opcode, uint32_t addr) { struct npcx_uma_cfg cfg = { .opcode = opcode}; cfg.addr.u32 = sys_cpu_to_be32(addr); return flash_npcx_uma_transceive(dev, &cfg, NPCX_UMA_ACCESS_ADDR); } static int flash_npcx_uma_read(const struct device *dev, uint8_t opcode, uint8_t *dst, const size_t size) { struct npcx_uma_cfg cfg = { .opcode = opcode, .rx_buf = dst, .rx_count = size}; return flash_npcx_uma_transceive(dev, &cfg, NPCX_UMA_ACCESS_READ); } static int flash_npcx_uma_write(const struct device *dev, uint8_t opcode, uint8_t *src, const size_t size) { struct npcx_uma_cfg cfg = { .opcode = opcode, .tx_buf = src, .tx_count = size}; return flash_npcx_uma_transceive(dev, &cfg, NPCX_UMA_ACCESS_WRITE); } static int flash_npcx_uma_write_by_addr(const struct device *dev, uint8_t opcode, uint8_t *src, const size_t size, uint32_t addr) { struct npcx_uma_cfg cfg = { .opcode = opcode, .tx_buf = src, .tx_count = size}; cfg.addr.u32 = sys_cpu_to_be32(addr); return flash_npcx_uma_transceive(dev, &cfg, NPCX_UMA_ACCESS_WRITE | NPCX_UMA_ACCESS_ADDR); } /* Local SPI NOR flash functions */ static int flash_npcx_nor_wait_until_ready(const struct device *dev) { int ret; uint8_t reg; const struct flash_npcx_nor_config *config = dev->config; int64_t st = k_uptime_get(); do { ret = flash_npcx_uma_read(dev, SPI_NOR_CMD_RDSR, &reg, sizeof(reg)); if (ret != 0) { return ret; } else if ((reg & SPI_NOR_WIP_BIT) == 0) { return 0; } } while ((k_uptime_get() - st) < config->max_timeout); return -EBUSY; } static int flash_npcx_nor_read_status_regs(const struct device *dev, uint8_t *sts_reg) { int ret = flash_npcx_uma_read(dev, SPI_NOR_CMD_RDSR, sts_reg, 1); if (ret != 0) { return ret; } return flash_npcx_uma_read(dev, SPI_NOR_CMD_RDSR2, sts_reg + 1, 1); } static int flash_npcx_nor_write_status_regs(const struct device *dev, uint8_t *sts_reg) { int ret; ret = flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_WREN); if (ret != 0) { return ret; } ret = flash_npcx_uma_write(dev, SPI_NOR_CMD_WRSR, sts_reg, 2); if (ret != 0) { return ret; } return flash_npcx_nor_wait_until_ready(dev); } /* Flash API functions */ #if defined(CONFIG_FLASH_JESD216_API) static int flash_npcx_nor_read_jedec_id(const struct device *dev, uint8_t *id) { if (id == NULL) { return -EINVAL; } return flash_npcx_uma_read(dev, SPI_NOR_CMD_RDID, id, SPI_NOR_MAX_ID_LEN); } static int flash_npcx_nor_read_sfdp(const struct device *dev, off_t addr, void *data, size_t size) { uint8_t sfdp_addr[4]; struct npcx_uma_cfg cfg = { .opcode = JESD216_CMD_READ_SFDP, .tx_buf = sfdp_addr, .tx_count = 4, .rx_buf = data, .rx_count = size}; if (data == NULL) { return -EINVAL; } /* CMD_READ_SFDP needs a 24-bit address followed by a dummy byte */ sfdp_addr[0] = (addr >> 16) & 0xff; sfdp_addr[1] = (addr >> 8) & 0xff; sfdp_addr[2] = addr & 0xff; return flash_npcx_uma_transceive(dev, &cfg, NPCX_UMA_ACCESS_WRITE | NPCX_UMA_ACCESS_READ); } #endif /* CONFIG_FLASH_JESD216_API */ #if defined(CONFIG_FLASH_PAGE_LAYOUT) static void flash_npcx_nor_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { const struct flash_npcx_nor_config *config = dev->config; *layout = &config->layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static int flash_npcx_nor_read(const struct device *dev, off_t addr, void *data, size_t size) { const struct flash_npcx_nor_config *config = dev->config; struct flash_npcx_nor_data *dev_data = dev->data; /* Out of the region of nor flash device? */ if (!is_within_region(addr, size, 0, config->flash_size)) { return -EINVAL; } /* Lock/Unlock SPI bus also for DRA mode */ qspi_npcx_fiu_mutex_lock_configure(config->qspi_bus, &config->qspi_cfg, dev_data->operation); /* Trigger Direct Read Access (DRA) via reading memory mapped-address */ memcpy(data, (void *)(config->mapped_addr + addr), size); qspi_npcx_fiu_mutex_unlock(config->qspi_bus); return 0; } static int flash_npcx_nor_erase(const struct device *dev, off_t addr, size_t size) { const struct flash_npcx_nor_config *config = dev->config; int ret = 0; /* Out of the region of nor flash device? */ if (!is_within_region(addr, size, 0, config->flash_size)) { LOG_ERR("Addr %ld, size %d are out of range", addr, size); return -EINVAL; } /* address must be sector-aligned */ if (!SPI_NOR_IS_SECTOR_ALIGNED(addr)) { LOG_ERR("Addr %ld is not sector-aligned", addr); return -EINVAL; } /* size must be a multiple of sectors */ if ((size % BLOCK_4K_SIZE) != 0) { LOG_ERR("Size %d is not a multiple of sectors", size); return -EINVAL; } /* Select erase opcode by size */ if (size == config->flash_size) { flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_WREN); /* Send chip erase command */ flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_CE); return flash_npcx_nor_wait_until_ready(dev); } while (size > 0) { flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_WREN); /* Send page/block erase command with addr */ if ((size >= BLOCK_64K_SIZE) && SPI_NOR_IS_64K_ALIGNED(addr)) { flash_npcx_uma_cmd_by_addr(dev, SPI_NOR_CMD_BE, addr); addr += BLOCK_64K_SIZE; size -= BLOCK_64K_SIZE; } else { flash_npcx_uma_cmd_by_addr(dev, SPI_NOR_CMD_SE, addr); addr += BLOCK_4K_SIZE; size -= BLOCK_4K_SIZE; } ret = flash_npcx_nor_wait_until_ready(dev); if (ret != 0) { break; } } return ret; } static int flash_npcx_nor_write(const struct device *dev, off_t addr, const void *data, size_t size) { const struct flash_npcx_nor_config *config = dev->config; uint8_t *tx_buf = (uint8_t *)data; int ret = 0; size_t sz_write; /* Out of the region of nor flash device? */ if (!is_within_region(addr, size, 0, config->flash_size)) { return -EINVAL; } /* Don't write more than a page. */ if (size > SPI_NOR_PAGE_SIZE) { sz_write = SPI_NOR_PAGE_SIZE; } else { sz_write = size; } /* * Correct the size of first write to not go through page boundary and * make the address of next write to align to page boundary. */ if (((addr + sz_write - 1U) / SPI_NOR_PAGE_SIZE) != (addr / SPI_NOR_PAGE_SIZE)) { sz_write -= (addr + sz_write) & (SPI_NOR_PAGE_SIZE - 1); } while (size > 0) { /* Start to write */ flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_WREN); ret = flash_npcx_uma_write_by_addr(dev, SPI_NOR_CMD_PP, tx_buf, sz_write, addr); if (ret != 0) { break; } /* Wait for writing completed */ ret = flash_npcx_nor_wait_until_ready(dev); if (ret != 0) { break; } size -= sz_write; tx_buf += sz_write; addr += sz_write; if (size > SPI_NOR_PAGE_SIZE) { sz_write = SPI_NOR_PAGE_SIZE; } else { sz_write = size; } } return ret; } static const struct flash_parameters * flash_npcx_nor_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_npcx_parameters; }; #ifdef CONFIG_FLASH_EX_OP_ENABLED static int flash_npcx_nor_ex_exec_uma(const struct device *dev, const struct npcx_ex_ops_uma_in *op_in, const struct npcx_ex_ops_uma_out *op_out) { int flag = 0; struct npcx_uma_cfg cfg; if (op_in == NULL) { return -EINVAL; } /* Organize a UMA transaction */ cfg.opcode = op_in->opcode; if (op_in->tx_count != 0) { cfg.tx_buf = op_in->tx_buf; cfg.tx_count = op_in->tx_count; flag |= NPCX_UMA_ACCESS_WRITE; } if (op_in->addr_count != 0) { cfg.addr.u32 = sys_cpu_to_be32(op_in->addr); flag |= NPCX_UMA_ACCESS_ADDR; } if (op_out != NULL && op_in->rx_count != 0) { cfg.rx_buf = op_out->rx_buf; cfg.rx_count = op_in->rx_count; flag |= NPCX_UMA_ACCESS_READ; } return flash_npcx_uma_transceive(dev, &cfg, flag); } static int flash_npcx_nor_ex_set_spi_spec(const struct device *dev, const struct npcx_ex_ops_qspi_oper_in *op_in) { struct flash_npcx_nor_data *data = dev->data; /* Cannot disable write protection of internal flash */ if ((data->operation & NPCX_EX_OP_INT_FLASH_WP) != 0) { if ((op_in->mask & NPCX_EX_OP_INT_FLASH_WP) != 0 && !op_in->enable) { return -EINVAL; } } if (op_in->enable) { data->operation |= op_in->mask; } else { data->operation &= ~op_in->mask; } return 0; } static int flash_npcx_nor_ex_get_spi_spec(const struct device *dev, struct npcx_ex_ops_qspi_oper_out *op_out) { struct flash_npcx_nor_data *data = dev->data; op_out->oper = data->operation; return 0; } static int flash_npcx_nor_ex_op(const struct device *dev, uint16_t code, const uintptr_t in, void *out) { #ifdef CONFIG_USERSPACE bool syscall_trap = z_syscall_trap(); #endif int ret; switch (code) { case FLASH_NPCX_EX_OP_EXEC_UMA: { struct npcx_ex_ops_uma_in *op_in = (struct npcx_ex_ops_uma_in *)in; struct npcx_ex_ops_uma_out *op_out = (struct npcx_ex_ops_uma_out *)out; #ifdef CONFIG_USERSPACE struct npcx_ex_ops_uma_in in_copy; struct npcx_ex_ops_uma_out out_copy; if (syscall_trap) { K_OOPS(k_usermode_from_copy(&in_copy, op_in, sizeof(in_copy))); op_in = &in_copy; op_out = &out_copy; } #endif ret = flash_npcx_nor_ex_exec_uma(dev, op_in, op_out); #ifdef CONFIG_USERSPACE if (ret == 0 && syscall_trap) { K_OOPS(k_usermode_to_copy(out, op_out, sizeof(out_copy))); } #endif break; } case FLASH_NPCX_EX_OP_SET_QSPI_OPER: { struct npcx_ex_ops_qspi_oper_in *op_in = (struct npcx_ex_ops_qspi_oper_in *)in; #ifdef CONFIG_USERSPACE struct npcx_ex_ops_qspi_oper_in in_copy; if (syscall_trap) { K_OOPS(k_usermode_from_copy(&in_copy, op_in, sizeof(in_copy))); op_in = &in_copy; } #endif ret = flash_npcx_nor_ex_set_spi_spec(dev, op_in); break; } case FLASH_NPCX_EX_OP_GET_QSPI_OPER: { struct npcx_ex_ops_qspi_oper_out *op_out = (struct npcx_ex_ops_qspi_oper_out *)out; #ifdef CONFIG_USERSPACE struct npcx_ex_ops_qspi_oper_out out_copy; if (syscall_trap) { op_out = &out_copy; } #endif ret = flash_npcx_nor_ex_get_spi_spec(dev, op_out); #ifdef CONFIG_USERSPACE if (ret == 0 && syscall_trap) { K_OOPS(k_usermode_to_copy(out, op_out, sizeof(out_copy))); } #endif break; } default: ret = -ENOTSUP; break; } return ret; } #endif static const struct flash_driver_api flash_npcx_nor_driver_api = { .read = flash_npcx_nor_read, .write = flash_npcx_nor_write, .erase = flash_npcx_nor_erase, .get_parameters = flash_npcx_nor_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_npcx_nor_pages_layout, #endif #if defined(CONFIG_FLASH_JESD216_API) .sfdp_read = flash_npcx_nor_read_sfdp, .read_jedec_id = flash_npcx_nor_read_jedec_id, #endif #ifdef CONFIG_FLASH_EX_OP_ENABLED .ex_op = flash_npcx_nor_ex_op, #endif }; static int flash_npcx_nor_init(const struct device *dev) { const struct flash_npcx_nor_config *config = dev->config; int ret; if (!IS_ENABLED(CONFIG_FLASH_NPCX_FIU_NOR_INIT)) { return 0; } /* Enable quad access of spi NOR flash */ if (config->qspi_cfg.qer_type != JESD216_DW15_QER_NONE) { uint8_t qe_idx, qe_bit, sts_reg[2]; /* Read status registers first */ ret = flash_npcx_nor_read_status_regs(dev, sts_reg); if (ret != 0) { LOG_ERR("Enable quad access: read reg failed %d!", ret); return ret; } switch (config->qspi_cfg.qer_type) { case JESD216_DW15_QER_S1B6: qe_idx = 1; qe_bit = 6; break; case JESD216_DW15_QER_S2B1v1: __fallthrough; case JESD216_DW15_QER_S2B1v4: __fallthrough; case JESD216_DW15_QER_S2B1v5: qe_idx = 2; qe_bit = 1; break; default: return -ENOTSUP; } /* Set QE bit in status register */ sts_reg[qe_idx - 1] |= BIT(qe_bit); ret = flash_npcx_nor_write_status_regs(dev, sts_reg); if (ret != 0) { LOG_ERR("Enable quad access: write reg failed %d!", ret); return ret; } } /* Enable 4-byte address of spi NOR flash */ if (config->qspi_cfg.enter_4ba != 0) { bool wr_en = (config->qspi_cfg.enter_4ba & 0x02) != 0; if (wr_en) { ret = flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_WREN); if (ret != 0) { LOG_ERR("Enable 4byte addr: WREN failed %d!", ret); return ret; } } ret = flash_npcx_uma_cmd_only(dev, SPI_NOR_CMD_4BA); if (ret != 0) { LOG_ERR("Enable 4byte addr: 4BA failed %d!", ret); return ret; } } return 0; } #define NPCX_FLASH_NOR_INIT(n) \ BUILD_ASSERT(DT_INST_QUAD_EN_PROP_OR(n) == JESD216_DW15_QER_NONE || \ DT_INST_STRING_TOKEN(n, rd_mode) == NPCX_RD_MODE_FAST_DUAL, \ "Fast Dual IO read must be selected in Quad mode"); \ PINCTRL_DT_INST_DEFINE(n); \ static const struct flash_npcx_nor_config flash_npcx_nor_config_##n = { \ .qspi_bus = DEVICE_DT_GET(DT_PARENT(DT_DRV_INST(n))), \ .mapped_addr = DT_INST_PROP(n, mapped_addr), \ .flash_size = DT_INST_PROP(n, size) / 8, \ .max_timeout = DT_INST_PROP(n, max_timeout), \ .qspi_cfg = { \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .flags = DT_INST_PROP(n, qspi_flags), \ .enter_4ba = DT_INST_PROP_OR(n, enter_4byte_addr, 0), \ .qer_type = DT_INST_QUAD_EN_PROP_OR(n), \ .rd_mode = DT_INST_STRING_TOKEN(n, rd_mode), \ }, \ IF_ENABLED(CONFIG_FLASH_PAGE_LAYOUT, ( \ .layout = { \ .pages_count = DT_INST_PROP(n, size) / \ (8 * SPI_NOR_PAGE_SIZE), \ .pages_size = SPI_NOR_PAGE_SIZE, \ },)) \ }; \ static struct flash_npcx_nor_data flash_npcx_nor_data_##n; \ DEVICE_DT_INST_DEFINE(n, flash_npcx_nor_init, NULL, \ &flash_npcx_nor_data_##n, &flash_npcx_nor_config_##n, \ POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, \ &flash_npcx_nor_driver_api); DT_INST_FOREACH_STATUS_OKAY(NPCX_FLASH_NOR_INIT) ```
/content/code_sandbox/drivers/flash/flash_npcx_fiu_nor.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,046
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <zephyr/drivers/flash.h> #include <errno.h> #include <zephyr/init.h> #include <soc.h> #include <zephyr/sys/barrier.h> #include "flash_priv.h" #include "fsl_common.h" #define LOG_LEVEL CONFIG_FLASH_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(flash_mcux); #if DT_NODE_HAS_STATUS(DT_INST(0, nxp_kinetis_ftfa), okay) #define DT_DRV_COMPAT nxp_kinetis_ftfa #elif DT_NODE_HAS_STATUS(DT_INST(0, nxp_kinetis_ftfe), okay) #define DT_DRV_COMPAT nxp_kinetis_ftfe #elif DT_NODE_HAS_STATUS(DT_INST(0, nxp_kinetis_ftfl), okay) #define DT_DRV_COMPAT nxp_kinetis_ftfl #elif DT_NODE_HAS_STATUS(DT_INST(0, nxp_iap_fmc55), okay) #define DT_DRV_COMPAT nxp_iap_fmc55 #define SOC_HAS_IAP 1 #elif DT_NODE_HAS_STATUS(DT_INST(0, nxp_iap_fmc553), okay) #define DT_DRV_COMPAT nxp_iap_fmc553 #define SOC_HAS_IAP 1 #elif DT_NODE_HAS_STATUS(DT_INST(0, nxp_iap_msf1), okay) #define DT_DRV_COMPAT nxp_iap_msf1 #define SOC_HAS_IAP_MSF1 1 #else #error No matching compatible for soc_flash_mcux.c #endif #if defined(SOC_HAS_IAP) && !defined(CONFIG_SOC_LPC55S36) #include "fsl_iap.h" #else #include "fsl_flash.h" #endif /* SOC_HAS_IAP && !CONFIG_SOC_LPC55S36*/ #define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash) #if defined(CONFIG_CHECK_BEFORE_READING) && !defined(CONFIG_SOC_LPC55S36) #define FMC_STATUS_FAIL FLASH_INT_CLR_ENABLE_FAIL_MASK #define FMC_STATUS_ERR FLASH_INT_CLR_ENABLE_ERR_MASK #define FMC_STATUS_DONE FLASH_INT_CLR_ENABLE_DONE_MASK #define FMC_STATUS_ECC FLASH_INT_CLR_ENABLE_ECC_ERR_MASK #define FMC_STATUS_FAILURES \ (FMC_STATUS_FAIL | FMC_STATUS_ERR | FMC_STATUS_ECC) #define FMC_CMD_BLANK_CHECK 5 #define FMC_CMD_MARGIN_CHECK 6 /* Issue single command that uses an a start and stop address. */ static uint32_t get_cmd_status(uint32_t cmd, uint32_t addr, size_t len) { FLASH_Type *p_fmc = (FLASH_Type *)DT_INST_REG_ADDR(0); uint32_t status; /* issue low level command */ p_fmc->INT_CLR_STATUS = 0xF; p_fmc->STARTA = (addr>>4) & 0x3FFFF; p_fmc->STOPA = ((addr+len-1)>>4) & 0x3FFFF; p_fmc->CMD = cmd; barrier_dsync_fence_full(); barrier_isync_fence_full(); /* wait for command to be done */ while (!(p_fmc->INT_STATUS & FMC_STATUS_DONE)) ; /* get read status and then clear it */ status = p_fmc->INT_STATUS; p_fmc->INT_CLR_STATUS = 0xF; return status; } /* This function prevents erroneous reading. Some ECC enabled devices will * crash when reading an erased or wrongly programmed area. */ static status_t is_area_readable(uint32_t addr, size_t len) { uint32_t key; status_t status; key = irq_lock(); /* Check if the are is correctly programmed and can be read. */ status = get_cmd_status(FMC_CMD_MARGIN_CHECK, addr, len); if (status & FMC_STATUS_FAILURES) { /* If the area was erased, ECC errors are triggered on read. */ status = get_cmd_status(FMC_CMD_BLANK_CHECK, addr, len); if (!(status & FMC_STATUS_FAIL)) { LOG_DBG("read request on erased addr:0x%08x size:%d", addr, len); irq_unlock(key); return -ENODATA; } LOG_DBG("read request error for addr:0x%08x size:%d", addr, len); irq_unlock(key); return -EIO; } irq_unlock(key); return 0; } #endif /* CONFIG_CHECK_BEFORE_READING && ! CONFIG_SOC_LPC55S36 */ struct flash_priv { flash_config_t config; /* * HACK: flash write protection is managed in software. */ struct k_sem write_lock; uint32_t pflash_block_base; }; static const struct flash_parameters flash_mcux_parameters = { #if DT_NODE_HAS_PROP(SOC_NV_FLASH_NODE, write_block_size) .write_block_size = DT_PROP(SOC_NV_FLASH_NODE, write_block_size), #else .write_block_size = FSL_FEATURE_FLASH_PFLASH_BLOCK_WRITE_UNIT_SIZE, #endif .erase_value = 0xff, }; /* * Interrupt vectors could be executed from flash hence the need for locking. * The underlying MCUX driver takes care of copying the functions to SRAM. * * For more information, see the application note below on Read-While-Write * path_to_url * */ static int flash_mcux_erase(const struct device *dev, off_t offset, size_t len) { struct flash_priv *priv = dev->data; uint32_t addr; status_t rc; unsigned int key; if (k_sem_take(&priv->write_lock, K_FOREVER)) { return -EACCES; } addr = offset + priv->pflash_block_base; key = irq_lock(); rc = FLASH_Erase(&priv->config, addr, len, kFLASH_ApiEraseKey); irq_unlock(key); k_sem_give(&priv->write_lock); return (rc == kStatus_Success) ? 0 : -EINVAL; } /* * @brief Read a flash memory area. * * @param dev Device struct * @param offset The address's offset * @param data The buffer to store or read the value * @param length The size of the buffer * @return 0 on success, * -EIO for erroneous area */ static int flash_mcux_read(const struct device *dev, off_t offset, void *data, size_t len) { struct flash_priv *priv = dev->data; uint32_t addr; status_t rc = 0; /* * The MCUX supports different flash chips whose valid ranges are * hidden below the API: until the API export these ranges, we can not * do any generic validation */ addr = offset + priv->pflash_block_base; #ifdef CONFIG_CHECK_BEFORE_READING #ifdef CONFIG_SOC_LPC55S36 /* Validates the given address range is loaded in the flash hiding region. */ rc = FLASH_IsFlashAreaReadable(&priv->config, addr, len); if (rc != kStatus_FLASH_Success) { rc = -EIO; } else { /* Check whether the flash is erased ("len" and "addr" must be word-aligned). */ rc = FLASH_VerifyErase(&priv->config, ((addr + 0x3) & ~0x3), ((len + 0x3) & ~0x3)); if (rc == kStatus_FLASH_Success) { rc = -ENODATA; } else { rc = 0; } } #else rc = is_area_readable(addr, len); #endif /* CONFIG_SOC_LPC55S36 */ #endif /* CONFIG_CHECK_BEFORE_READING */ if (!rc) { memcpy(data, (void *) addr, len); } #ifdef CONFIG_CHECK_BEFORE_READING else if (rc == -ENODATA) { /* Erased area, return dummy data as an erased page. */ memset(data, 0xFF, len); rc = 0; } #endif return rc; } static int flash_mcux_write(const struct device *dev, off_t offset, const void *data, size_t len) { struct flash_priv *priv = dev->data; uint32_t addr; status_t rc; unsigned int key; if (k_sem_take(&priv->write_lock, K_FOREVER)) { return -EACCES; } addr = offset + priv->pflash_block_base; key = irq_lock(); rc = FLASH_Program(&priv->config, addr, (uint8_t *) data, len); irq_unlock(key); k_sem_give(&priv->write_lock); return (rc == kStatus_Success) ? 0 : -EINVAL; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static const struct flash_pages_layout dev_layout = { .pages_count = DT_REG_SIZE(SOC_NV_FLASH_NODE) / DT_PROP(SOC_NV_FLASH_NODE, erase_block_size), .pages_size = DT_PROP(SOC_NV_FLASH_NODE, erase_block_size), }; static void flash_mcux_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { *layout = &dev_layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static const struct flash_parameters * flash_mcux_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_mcux_parameters; } static struct flash_priv flash_data; static const struct flash_driver_api flash_mcux_api = { .erase = flash_mcux_erase, .write = flash_mcux_write, .read = flash_mcux_read, .get_parameters = flash_mcux_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_mcux_pages_layout, #endif }; static int flash_mcux_init(const struct device *dev) { struct flash_priv *priv = dev->data; uint32_t pflash_block_base; status_t rc; k_sem_init(&priv->write_lock, 1, 1); rc = FLASH_Init(&priv->config); #if defined(SOC_HAS_IAP) || defined(SOC_HAS_IAP_MSF1) FLASH_GetProperty(&priv->config, kFLASH_PropertyPflashBlockBaseAddr, &pflash_block_base); #else FLASH_GetProperty(&priv->config, kFLASH_PropertyPflash0BlockBaseAddr, &pflash_block_base); #endif priv->pflash_block_base = (uint32_t) pflash_block_base; return (rc == kStatus_Success) ? 0 : -EIO; } DEVICE_DT_INST_DEFINE(0, flash_mcux_init, NULL, &flash_data, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_mcux_api); ```
/content/code_sandbox/drivers/flash/soc_flash_mcux.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,301
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <zephyr/drivers/flash.h> #include <zephyr/init.h> #include <soc.h> #include "flash_stm32.h" bool flash_stm32_valid_range(const struct device *dev, off_t offset, uint32_t len, bool write) { ARG_UNUSED(write); return flash_stm32_range_exists(dev, offset, len); } static inline void flush_cache(FLASH_TypeDef *regs) { /* If Data cache is enabled, disable Data cache, reset Data cache * and then re-enable Data cache. */ if (regs->ACR & FLASH_ACR_DCEN) { regs->ACR &= ~FLASH_ACR_DCEN; /* Datasheet: DCRST: Data cache reset * This bit can be written only when the Data cache is disabled */ regs->ACR |= FLASH_ACR_DCRST; regs->ACR &= ~FLASH_ACR_DCRST; regs->ACR |= FLASH_ACR_DCEN; } /* If Instruction cache is enabled, disable Instruction cache, reset * Instruction cache and then re-enable Instruction cache. */ if (regs->ACR & FLASH_ACR_ICEN) { regs->ACR &= ~FLASH_ACR_ICEN; /* Datasheet: ICRST: Instruction cache reset * This bit can be written only when the Instruction cache * is disabled */ regs->ACR |= FLASH_ACR_ICRST; regs->ACR &= ~FLASH_ACR_ICRST; regs->ACR |= FLASH_ACR_ICEN; } } static int write_byte(const struct device *dev, off_t offset, uint8_t val) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); uint32_t tmp; int rc; /* if the control register is locked, do not fail silently */ if (regs->CR & FLASH_CR_LOCK) { return -EIO; } /* Check that no Flash main memory operation is ongoing */ rc = flash_stm32_wait_flash_idle(dev); if (rc < 0) { return rc; } regs->CR &= ~FLASH_CR_PSIZE; regs->CR |= FLASH_PSIZE_BYTE; regs->CR |= FLASH_CR_PG; /* flush the register write */ tmp = regs->CR; *((uint8_t *) offset + FLASH_STM32_BASE_ADDRESS) = val; /* Wait until the BSY bit is cleared */ rc = flash_stm32_wait_flash_idle(dev); /* Clear the PG bit */ regs->CR &= (~FLASH_CR_PG); return rc; } static int erase_sector(const struct device *dev, uint32_t sector) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); uint32_t tmp; int rc; /* if the control register is locked, do not fail silently */ if (regs->CR & FLASH_CR_LOCK) { return -EIO; } /* Check that no Flash memory operation is ongoing */ rc = flash_stm32_wait_flash_idle(dev); if (rc < 0) { return rc; } regs->CR &= ~FLASH_CR_SNB; regs->CR |= FLASH_CR_SER | (sector << 3); regs->CR |= FLASH_CR_STRT; /* flush the register write */ tmp = regs->CR; /* Wait for the BSY bit */ rc = flash_stm32_wait_flash_idle(dev); flush_cache(regs); regs->CR &= ~(FLASH_CR_SER | FLASH_CR_SNB); return rc; } int flash_stm32_block_erase_loop(const struct device *dev, unsigned int offset, unsigned int len) { struct flash_pages_info info; uint32_t start_sector, end_sector; uint32_t i; int rc = 0; rc = flash_get_page_info_by_offs(dev, offset, &info); if (rc) { return rc; } start_sector = info.index; rc = flash_get_page_info_by_offs(dev, offset + len - 1, &info); if (rc) { return rc; } end_sector = info.index; for (i = start_sector; i <= end_sector; i++) { rc = erase_sector(dev, i); if (rc < 0) { break; } } return rc; } int flash_stm32_write_range(const struct device *dev, unsigned int offset, const void *data, unsigned int len) { int i, rc = 0; for (i = 0; i < len; i++, offset++) { rc = write_byte(dev, offset, ((const uint8_t *) data)[i]); if (rc < 0) { return rc; } } return rc; } /* * The flash memory in stm32f2 series has bank 1 only with 12 sectors, * they are split as 4 sectors of 16 Kbytes, 1 sector of 64 Kbytes, * and 7 sectors of 128 Kbytes. */ #ifndef FLASH_SECTOR_TOTAL #error "Unknown flash layout" #else /* defined(FLASH_SECTOR_TOTAL) */ #if FLASH_SECTOR_TOTAL == 12 static const struct flash_pages_layout stm32f2_flash_layout[] = { /* * PM0059, table 10: STM32F207xx */ {.pages_count = 4, .pages_size = KB(16)}, {.pages_count = 1, .pages_size = KB(64)}, {.pages_count = 7, .pages_size = KB(128)}, }; #else #error "Unknown flash layout" #endif /* FLASH_SECTOR_TOTAL == 12 */ #endif/* !defined(FLASH_SECTOR_TOTAL) */ void flash_stm32_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { ARG_UNUSED(dev); *layout = stm32f2_flash_layout; *layout_size = ARRAY_SIZE(stm32f2_flash_layout); } ```
/content/code_sandbox/drivers/flash/flash_stm32f2x.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,322
```c /* * */ #include <errno.h> #include <zephyr/drivers/flash.h> static int flash_get_page_info(const struct device *dev, off_t offs, uint32_t index, struct flash_pages_info *info) { const struct flash_driver_api *api = dev->api; const struct flash_pages_layout *layout; size_t layout_size; uint32_t index_jmp; info->start_offset = 0; info->index = 0U; api->page_layout(dev, &layout, &layout_size); while (layout_size--) { info->size = layout->pages_size; if (offs == 0) { index_jmp = index - info->index; } else { index_jmp = (offs - info->start_offset) / info->size; } index_jmp = MIN(index_jmp, layout->pages_count); info->start_offset += (index_jmp * info->size); info->index += index_jmp; if (index_jmp < layout->pages_count) { return 0; } layout++; } return -EINVAL; /* page at offs or idx doesn't exist */ } int z_impl_flash_get_page_info_by_offs(const struct device *dev, off_t offs, struct flash_pages_info *info) { return flash_get_page_info(dev, offs, 0U, info); } int z_impl_flash_get_page_info_by_idx(const struct device *dev, uint32_t page_index, struct flash_pages_info *info) { return flash_get_page_info(dev, 0, page_index, info); } size_t z_impl_flash_get_page_count(const struct device *dev) { const struct flash_driver_api *api = dev->api; const struct flash_pages_layout *layout; size_t layout_size; size_t count = 0; api->page_layout(dev, &layout, &layout_size); while (layout_size--) { count += layout->pages_count; layout++; } return count; } void flash_page_foreach(const struct device *dev, flash_page_cb cb, void *data) { const struct flash_driver_api *api = dev->api; const struct flash_pages_layout *layout; struct flash_pages_info page_info; size_t block, num_blocks, page = 0, i; off_t off = 0; api->page_layout(dev, &layout, &num_blocks); for (block = 0; block < num_blocks; block++) { const struct flash_pages_layout *l = &layout[block]; page_info.size = l->pages_size; for (i = 0; i < l->pages_count; i++) { page_info.start_offset = off; page_info.index = page; if (!cb(&page_info, data)) { return; } off += page_info.size; page++; } } } ```
/content/code_sandbox/drivers/flash/flash_page_layout.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
616
```unknown # NPCX Flash driver configuration options config FLASH_NPCX_FIU_QSPI bool "Nuvoton NPCX QSPI Bus Flash driver" default y depends on DT_HAS_NUVOTON_NPCX_FIU_QSPI_ENABLED help This option enables the QSPI Bus Flash driver for NPCX family of processors. config FLASH_NPCX_FIU_NOR bool "Nuvoton NPCX embedded controller (EC) QSPI NOR Flash driver" default y depends on DT_HAS_NUVOTON_NPCX_FIU_NOR_ENABLED depends on FLASH_NPCX_FIU_QSPI select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_EXPLICIT_ERASE select FLASH_JESD216 select FLASH_HAS_EX_OP help This option enables the QSPI NOR Flash driver for NPCX family of processors. if FLASH_NPCX_FIU_QSPI config FLASH_NPCX_FIU_NOR_INIT bool "QSPI NOR flash feature during driver initialization" default y depends on FLASH_NPCX_FIU_NOR help This option enables the QSPI NOR Flash features such as Quad-Enable, 4-byte address support and so on during driver initialization. Disable it if QSPI NOR devices are not ready during driver initialization. config FLASH_NPCX_FIU_DRA_V1 bool "Direct Read Access version 1 support" default y if SOC_SERIES_NPCX9 help This option enables DRA V1 support. config FLASH_NPCX_FIU_DRA_V2 bool "Direct Read Access version 2 support" default y if SOC_SERIES_NPCX4 help This option enables DRA V1 support. config FLASH_NPCX_FIU_SUPP_DRA_4B_ADDR bool "4 byte address support in Direct Read Access mode" default y if FLASH_NPCX_FIU_DRA_V1 || \ FLASH_NPCX_FIU_DRA_V2 help Selected if NPCX series supports 4 byte address mode in Direct Read Access mode. config FLASH_NPCX_FIU_SUPP_DRA_2_DEV bool "4 byte address support in Direct Read Access mode" default y if FLASH_NPCX_FIU_DRA_V1 || \ FLASH_NPCX_FIU_DRA_V2 help Selected if NPCX series supports two external SPI devices in Direct Read Access (DRA) on QSPI bus. endif #FLASH_NPCX_FIU_QSPI ```
/content/code_sandbox/drivers/flash/Kconfig.npcx_fiu
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
524
```c /* * */ #define DT_DRV_COMPAT mspi_atxp032 #include <zephyr/kernel.h> #include <zephyr/pm/device.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/mspi.h> #if CONFIG_SOC_FAMILY_AMBIQ #include "mspi_ambiq.h" typedef struct mspi_ambiq_timing_cfg mspi_timing_cfg; typedef enum mspi_ambiq_timing_param mspi_timing_param; #else typedef struct mspi_timing_cfg mspi_timing_cfg; typedef enum mspi_timing_param mspi_timing_param; #define TIMING_CFG_GET_RX_DUMMY(cfg) #define TIMING_CFG_SET_RX_DUMMY(cfg, num) #endif #include <zephyr/drivers/flash.h> #include "spi_nor.h" LOG_MODULE_REGISTER(flash_mspi_atxp032, CONFIG_FLASH_LOG_LEVEL); #define NOR_WRITE_SIZE 1 #define NOR_ERASE_VALUE 0xff #define ATXP032_VENDOR_ID 0x43 enum atxp032_dummy_clock { ATXP032_DC_8, ATXP032_DC_10, ATXP032_DC_12, ATXP032_DC_14, ATXP032_DC_16, ATXP032_DC_18, ATXP032_DC_20, ATXP032_DC_22, }; struct flash_mspi_atxp032_config { uint32_t port; uint32_t mem_size; struct flash_parameters flash_param; struct flash_pages_layout page_layout; const struct device *bus; struct mspi_dev_id dev_id; struct mspi_dev_cfg serial_cfg; struct mspi_dev_cfg tar_dev_cfg; struct mspi_xip_cfg tar_xip_cfg; struct mspi_scramble_cfg tar_scramble_cfg; mspi_timing_cfg tar_timing_cfg; mspi_timing_param timing_cfg_mask; bool sw_multi_periph; }; struct flash_mspi_atxp032_data { struct mspi_dev_cfg dev_cfg; struct mspi_xip_cfg xip_cfg; struct mspi_scramble_cfg scramble_cfg; mspi_timing_cfg timing_cfg; struct mspi_xfer trans; struct mspi_xfer_packet packet; struct k_sem lock; uint32_t jedec_id; }; static int atxp032_get_dummy_clk(uint8_t rxdummy, uint32_t *dummy_clk) { switch (rxdummy) { case 8: *dummy_clk = ATXP032_DC_8; break; case 10: *dummy_clk = ATXP032_DC_10; break; case 12: *dummy_clk = ATXP032_DC_12; break; case 14: *dummy_clk = ATXP032_DC_14; break; case 16: *dummy_clk = ATXP032_DC_16; break; case 18: *dummy_clk = ATXP032_DC_18; break; case 20: *dummy_clk = ATXP032_DC_20; break; case 22: *dummy_clk = ATXP032_DC_22; break; default: return 1; } return 0; } static int flash_mspi_atxp032_command_write(const struct device *flash, uint8_t cmd, uint32_t addr, uint16_t addr_len, uint32_t tx_dummy, uint8_t *wdata, uint32_t length) { const struct flash_mspi_atxp032_config *cfg = flash->config; struct flash_mspi_atxp032_data *data = flash->data; int ret; data->packet.dir = MSPI_TX; data->packet.cmd = cmd; data->packet.address = addr; data->packet.data_buf = wdata; data->packet.num_bytes = length; data->trans.async = false; data->trans.xfer_mode = MSPI_PIO; data->trans.tx_dummy = tx_dummy; data->trans.cmd_length = 1; data->trans.addr_length = addr_len; data->trans.hold_ce = false; data->trans.packets = &data->packet; data->trans.num_packet = 1; data->trans.timeout = 10; ret = mspi_transceive(cfg->bus, &cfg->dev_id, (const struct mspi_xfer *)&data->trans); if (ret) { LOG_ERR("MSPI write transaction failed with code: %d/%u", ret, __LINE__); return -EIO; } return ret; } static int flash_mspi_atxp032_command_read(const struct device *flash, uint8_t cmd, uint32_t addr, uint16_t addr_len, uint32_t rx_dummy, uint8_t *rdata, uint32_t length) { const struct flash_mspi_atxp032_config *cfg = flash->config; struct flash_mspi_atxp032_data *data = flash->data; int ret; data->packet.dir = MSPI_RX; data->packet.cmd = cmd; data->packet.address = addr; data->packet.data_buf = rdata; data->packet.num_bytes = length; data->trans.async = false; data->trans.xfer_mode = MSPI_PIO; data->trans.rx_dummy = rx_dummy; data->trans.cmd_length = 1; data->trans.addr_length = addr_len; data->trans.hold_ce = false; data->trans.packets = &data->packet; data->trans.num_packet = 1; data->trans.timeout = 10; ret = mspi_transceive(cfg->bus, &cfg->dev_id, (const struct mspi_xfer *)&data->trans); if (ret) { LOG_ERR("MSPI read transaction failed with code: %d/%u", ret, __LINE__); return -EIO; } return ret; } static void acquire(const struct device *flash) { const struct flash_mspi_atxp032_config *cfg = flash->config; struct flash_mspi_atxp032_data *data = flash->data; k_sem_take(&data->lock, K_FOREVER); if (cfg->sw_multi_periph) { while (mspi_dev_config(cfg->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_ALL, &data->dev_cfg)) { ; } } else { while (mspi_dev_config(cfg->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_NONE, NULL)) { ; } } } static void release(const struct device *flash) { const struct flash_mspi_atxp032_config *cfg = flash->config; struct flash_mspi_atxp032_data *data = flash->data; while (mspi_get_channel_status(cfg->bus, cfg->port)) { ; } k_sem_give(&data->lock); } static int flash_mspi_atxp032_write_enable(const struct device *flash) { int ret; LOG_DBG("Enabling write"); ret = flash_mspi_atxp032_command_write(flash, SPI_NOR_CMD_WREN, 0, 0, 0, NULL, 0); return ret; } static int flash_mspi_atxp032_write_disable(const struct device *flash) { int ret; LOG_DBG("Disabling write"); ret = flash_mspi_atxp032_command_write(flash, SPI_NOR_CMD_WRDI, 0, 0, 0, NULL, 0); return ret; } static int flash_mspi_atxp032_reset(const struct device *flash) { int ret; ret = flash_mspi_atxp032_write_enable(flash); if (ret) { return ret; } LOG_DBG("Return to SPI mode"); ret = flash_mspi_atxp032_command_write(flash, 0xFF, 0, 0, 0, NULL, 0); if (ret) { return ret; } ret = flash_mspi_atxp032_write_disable(flash); if (ret) { return ret; } return ret; } static int flash_mspi_atxp032_get_vendor_id(const struct device *flash, uint8_t *vendor_id) { struct flash_mspi_atxp032_data *data = flash->data; uint8_t buffer[11]; int ret; if (vendor_id == NULL) { return -EINVAL; } LOG_DBG("Reading id"); /* serial mode */ ret = flash_mspi_atxp032_command_read(flash, SPI_NOR_CMD_RDID, 0, 0, 0, buffer, 11); *vendor_id = buffer[7]; data->jedec_id = (buffer[7] << 16) | (buffer[8] << 8) | buffer[9]; return ret; } static int flash_mspi_atxp032_unprotect_sector(const struct device *flash, off_t addr) { int ret; LOG_DBG("unprotect sector at 0x%08zx", (ssize_t)addr); ret = flash_mspi_atxp032_command_write(flash, 0x39, addr, 4, 0, NULL, 0); return ret; } static int flash_mspi_atxp032_erase_sector(const struct device *flash, off_t addr) { int ret; LOG_DBG("Erasing sector at 0x%08zx", (ssize_t)addr); ret = flash_mspi_atxp032_command_write(flash, SPI_NOR_CMD_SE, addr, 4, 0, NULL, 0); return ret; } static int flash_mspi_atxp032_erase_block(const struct device *flash, off_t addr) { int ret; LOG_DBG("Erasing block at 0x%08zx", (ssize_t)addr); ret = flash_mspi_atxp032_command_write(flash, SPI_NOR_CMD_BE, addr, 4, 0, NULL, 0); return ret; } static int flash_mspi_atxp032_erase_chip(const struct device *flash) { int ret; LOG_DBG("Erasing chip"); ret = flash_mspi_atxp032_command_write(flash, SPI_NOR_CMD_CE, 0, 0, 0, NULL, 0); return ret; } static int flash_mspi_atxp032_page_program(const struct device *flash, off_t offset, void *wdata, size_t len) { const struct flash_mspi_atxp032_config *cfg = flash->config; struct flash_mspi_atxp032_data *data = flash->data; int ret; data->packet.dir = MSPI_TX; data->packet.cmd = data->dev_cfg.write_cmd; data->packet.address = offset; data->packet.data_buf = wdata; data->packet.num_bytes = len; data->trans.async = false; data->trans.xfer_mode = MSPI_DMA; data->trans.tx_dummy = data->dev_cfg.tx_dummy; data->trans.cmd_length = data->dev_cfg.cmd_length; data->trans.addr_length = data->dev_cfg.addr_length; data->trans.hold_ce = false; data->trans.priority = 1; data->trans.packets = &data->packet; data->trans.num_packet = 1; data->trans.timeout = CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE; LOG_DBG("Page programming %d bytes to 0x%08zx", len, (ssize_t)offset); ret = mspi_transceive(cfg->bus, &cfg->dev_id, (const struct mspi_xfer *)&data->trans); if (ret) { LOG_ERR("MSPI write transaction failed with code: %d/%u", ret, __LINE__); return -EIO; } return ret; } static int flash_mspi_atxp032_busy_wait(const struct device *flash) { const struct flash_mspi_atxp032_config *cfg = flash->config; struct flash_mspi_atxp032_data *data = flash->data; mspi_timing_cfg bkp = data->timing_cfg; uint32_t status = 0; uint32_t rx_dummy; int ret; if (data->dev_cfg.io_mode == MSPI_IO_MODE_SINGLE) { rx_dummy = 0; } else { rx_dummy = 4; TIMING_CFG_SET_RX_DUMMY(&data->timing_cfg, 4); if (mspi_timing_config(cfg->bus, &cfg->dev_id, cfg->timing_cfg_mask, (void *)&data->timing_cfg)) { LOG_ERR("Failed to config mspi controller/%u", __LINE__); return -EIO; } } do { LOG_DBG("Reading status register"); ret = flash_mspi_atxp032_command_read(flash, SPI_NOR_CMD_RDSR, 0, 0, rx_dummy, (uint8_t *)&status, 1); if (ret) { LOG_ERR("Could not read status"); return ret; } LOG_DBG("status: 0x%x", status); } while (status & SPI_NOR_WIP_BIT); if (data->dev_cfg.io_mode != MSPI_IO_MODE_SINGLE) { data->timing_cfg = bkp; if (mspi_timing_config(cfg->bus, &cfg->dev_id, cfg->timing_cfg_mask, (void *)&data->timing_cfg)) { LOG_ERR("Failed to config mspi controller/%u", __LINE__); return -EIO; } } return ret; } static int flash_mspi_atxp032_read(const struct device *flash, off_t offset, void *rdata, size_t len) { const struct flash_mspi_atxp032_config *cfg = flash->config; struct flash_mspi_atxp032_data *data = flash->data; int ret; acquire(flash); data->packet.dir = MSPI_RX; data->packet.cmd = data->dev_cfg.read_cmd; data->packet.address = offset; data->packet.data_buf = rdata; data->packet.num_bytes = len; data->trans.async = false; data->trans.xfer_mode = MSPI_DMA; data->trans.rx_dummy = data->dev_cfg.rx_dummy; data->trans.cmd_length = data->dev_cfg.cmd_length; data->trans.addr_length = data->dev_cfg.addr_length; data->trans.hold_ce = false; data->trans.priority = 1; data->trans.packets = &data->packet; data->trans.num_packet = 1; data->trans.timeout = CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE; LOG_DBG("Read %d bytes from 0x%08zx", len, (ssize_t)offset); ret = mspi_transceive(cfg->bus, &cfg->dev_id, (const struct mspi_xfer *)&data->trans); if (ret) { LOG_ERR("MSPI read transaction failed with code: %d/%u", ret, __LINE__); return -EIO; } release(flash); return ret; } static int flash_mspi_atxp032_write(const struct device *flash, off_t offset, const void *wdata, size_t len) { int ret; uint8_t *src = (uint8_t *)wdata; int i; acquire(flash); while (len) { /* If the offset isn't a multiple of the NOR page size, we first need * to write the remaining part that fits, otherwise the write could * be wrapped around within the same page */ i = MIN(SPI_NOR_PAGE_SIZE - (offset % SPI_NOR_PAGE_SIZE), len); ret = flash_mspi_atxp032_write_enable(flash); if (ret) { return ret; } ret = flash_mspi_atxp032_page_program(flash, offset, src, i); if (ret) { return ret; } ret = flash_mspi_atxp032_busy_wait(flash); if (ret) { return ret; } src += i; offset += i; len -= i; } ret = flash_mspi_atxp032_write_disable(flash); if (ret) { return ret; } release(flash); return ret; } static int flash_mspi_atxp032_erase(const struct device *flash, off_t offset, size_t size) { const struct flash_mspi_atxp032_config *cfg = flash->config; int ret = 0; const size_t num_sectors = size / SPI_NOR_SECTOR_SIZE; const size_t num_blocks = size / SPI_NOR_BLOCK_SIZE; int i; acquire(flash); if (offset % SPI_NOR_SECTOR_SIZE) { LOG_ERR("Invalid offset"); return -EINVAL; } if (size % SPI_NOR_SECTOR_SIZE) { LOG_ERR("Invalid size"); return -EINVAL; } if ((offset == 0) && (size == cfg->mem_size)) { ret = flash_mspi_atxp032_write_enable(flash); if (ret) { return ret; } ret = flash_mspi_atxp032_erase_chip(flash); if (ret) { return ret; } ret = flash_mspi_atxp032_busy_wait(flash); if (ret) { return ret; } } else if ((0 == (offset % SPI_NOR_BLOCK_SIZE)) && (0 == (size % SPI_NOR_BLOCK_SIZE))) { for (i = 0; i < num_blocks; i++) { ret = flash_mspi_atxp032_write_enable(flash); if (ret) { return ret; } ret = flash_mspi_atxp032_unprotect_sector(flash, offset); if (ret) { return ret; } ret = flash_mspi_atxp032_write_enable(flash); if (ret) { return ret; } ret = flash_mspi_atxp032_erase_block(flash, offset); if (ret) { return ret; } ret = flash_mspi_atxp032_busy_wait(flash); if (ret) { return ret; } offset += SPI_NOR_BLOCK_SIZE; } } else { for (i = 0; i < num_sectors; i++) { ret = flash_mspi_atxp032_write_enable(flash); if (ret) { return ret; } ret = flash_mspi_atxp032_unprotect_sector(flash, offset); if (ret) { return ret; } ret = flash_mspi_atxp032_write_enable(flash); if (ret) { return ret; } ret = flash_mspi_atxp032_erase_sector(flash, offset); if (ret) { return ret; } ret = flash_mspi_atxp032_busy_wait(flash); if (ret) { return ret; } offset += SPI_NOR_SECTOR_SIZE; } } release(flash); return ret; } static const struct flash_parameters *flash_mspi_atxp032_get_parameters(const struct device *flash) { const struct flash_mspi_atxp032_config *cfg = flash->config; return &cfg->flash_param; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static void flash_mspi_atxp032_pages_layout(const struct device *flash, const struct flash_pages_layout **layout, size_t *layout_size) { const struct flash_mspi_atxp032_config *cfg = flash->config; *layout = &cfg->page_layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static int flash_mspi_atxp032_init(const struct device *flash) { const struct flash_mspi_atxp032_config *cfg = flash->config; struct flash_mspi_atxp032_data *data = flash->data; uint8_t vendor_id; uint32_t CRB3; if (!device_is_ready(cfg->bus)) { LOG_ERR("Controller device is not ready"); return -ENODEV; } switch (cfg->tar_dev_cfg.io_mode) { case MSPI_IO_MODE_SINGLE: case MSPI_IO_MODE_QUAD: case MSPI_IO_MODE_OCTAL: break; default: LOG_ERR("bus mode %d not supported/%u", cfg->tar_dev_cfg.io_mode, __LINE__); return -EIO; } if (mspi_dev_config(cfg->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_ALL, &cfg->serial_cfg)) { LOG_ERR("Failed to config mspi controller/%u", __LINE__); return -EIO; } data->dev_cfg = cfg->serial_cfg; if (flash_mspi_atxp032_reset(flash)) { LOG_ERR("Could not reset Flash/%u", __LINE__); return -EIO; } if (flash_mspi_atxp032_get_vendor_id(flash, &vendor_id)) { LOG_ERR("Could not read vendor id/%u", __LINE__); return -EIO; } LOG_DBG("Vendor id: 0x%0x", vendor_id); if (vendor_id != ATXP032_VENDOR_ID) { LOG_WRN("Vendor ID does not match expected value of 0x%0x/%u", ATXP032_VENDOR_ID, __LINE__); } if (atxp032_get_dummy_clk((TIMING_CFG_GET_RX_DUMMY(&cfg->tar_timing_cfg)), &CRB3)) { return -ENOTSUP; } if (flash_mspi_atxp032_write_enable(flash)) { return -EIO; } if (flash_mspi_atxp032_command_write(flash, 0x71, 0x3, 1, 0, (uint8_t *)&CRB3, 1)) { return -EIO; } uint8_t cmd; if (cfg->tar_dev_cfg.io_mode == MSPI_IO_MODE_QUAD) { cmd = 0x38; } else if (cfg->tar_dev_cfg.io_mode == MSPI_IO_MODE_OCTAL) { cmd = 0xe8; } else { cmd = 0xff; } if (flash_mspi_atxp032_write_enable(flash)) { return -EIO; } if (flash_mspi_atxp032_command_write(flash, cmd, 0, 0, 0, NULL, 0)) { return -EIO; } if (mspi_dev_config(cfg->bus, &cfg->dev_id, MSPI_DEVICE_CONFIG_ALL, &cfg->tar_dev_cfg)) { LOG_ERR("Failed to config mspi controller/%u", __LINE__); return -EIO; } data->dev_cfg = cfg->tar_dev_cfg; if (mspi_timing_config(cfg->bus, &cfg->dev_id, cfg->timing_cfg_mask, (void *)&cfg->tar_timing_cfg)) { LOG_ERR("Failed to config mspi timing/%u", __LINE__); return -EIO; } data->timing_cfg = cfg->tar_timing_cfg; if (cfg->tar_xip_cfg.enable) { if (mspi_xip_config(cfg->bus, &cfg->dev_id, &cfg->tar_xip_cfg)) { LOG_ERR("Failed to enable XIP/%u", __LINE__); return -EIO; } data->xip_cfg = cfg->tar_xip_cfg; } if (cfg->tar_scramble_cfg.enable) { if (mspi_scramble_config(cfg->bus, &cfg->dev_id, &cfg->tar_scramble_cfg)) { LOG_ERR("Failed to enable scrambling/%u", __LINE__); return -EIO; } data->scramble_cfg = cfg->tar_scramble_cfg; } release(flash); return 0; } #if defined(CONFIG_FLASH_JESD216_API) static int flash_mspi_atxp032_read_sfdp(const struct device *flash, off_t addr, void *rdata, size_t size) { const struct flash_mspi_atxp032_config *cfg = flash->config; struct flash_mspi_atxp032_data *data = flash->data; int ret; acquire(flash); data->packet.dir = MSPI_RX; data->packet.cmd = 0x5A; data->packet.address = addr; data->packet.data_buf = rdata; data->packet.num_bytes = size; data->trans.async = false; data->trans.xfer_mode = MSPI_DMA; data->trans.rx_dummy = 8; data->trans.cmd_length = 1; data->trans.addr_length = 3; data->trans.hold_ce = false; data->trans.priority = 1; data->trans.packets = &data->packet; data->trans.num_packet = 1; data->trans.timeout = CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE; LOG_DBG("Read %d bytes from 0x%08zx", size, (ssize_t)addr); ret = mspi_transceive(cfg->bus, &cfg->dev_id, (const struct mspi_xfer *)&data->trans); if (ret) { LOG_ERR("MSPI read transaction failed with code: %d/%u", ret, __LINE__); return -EIO; } release(flash); return 0; } static int flash_mspi_atxp032_read_jedec_id(const struct device *flash, uint8_t *id) { struct flash_mspi_atxp032_data *data = flash->data; id = &data->jedec_id; return 0; } #endif /* CONFIG_FLASH_JESD216_API */ #if defined(CONFIG_PM_DEVICE) static int flash_mspi_atxp032_pm_action(const struct device *flash, enum pm_device_action action) { switch (action) { case PM_DEVICE_ACTION_RESUME: acquire(flash); release(flash); break; case PM_DEVICE_ACTION_SUSPEND: acquire(flash); release(flash); break; default: return -ENOTSUP; } return 0; } #endif /** IS_ENABLED(CONFIG_PM_DEVICE) */ static const struct flash_driver_api flash_mspi_atxp032_api = { .erase = flash_mspi_atxp032_erase, .write = flash_mspi_atxp032_write, .read = flash_mspi_atxp032_read, .get_parameters = flash_mspi_atxp032_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_mspi_atxp032_pages_layout, #endif #if defined(CONFIG_FLASH_JESD216_API) .sfdp_read = flash_mspi_atxp032_read_sfdp, .read_jedec_id = flash_mspi_atxp032_read_jedec_id, #endif }; #define MSPI_DEVICE_CONFIG_SERIAL(n) \ { \ .ce_num = DT_INST_PROP(n, mspi_hardware_ce_num), \ .freq = 12000000, \ .io_mode = MSPI_IO_MODE_SINGLE, \ .data_rate = MSPI_DATA_RATE_SINGLE, \ .cpp = MSPI_CPP_MODE_0, \ .endian = MSPI_XFER_LITTLE_ENDIAN, \ .ce_polarity = MSPI_CE_ACTIVE_LOW, \ .dqs_enable = false, \ .rx_dummy = 8, \ .tx_dummy = 0, \ .read_cmd = SPI_NOR_CMD_READ_FAST, \ .write_cmd = SPI_NOR_CMD_PP, \ .cmd_length = 1, \ .addr_length = 4, \ .mem_boundary = 0, \ .time_to_break = 0, \ } #if CONFIG_SOC_FAMILY_AMBIQ #define MSPI_TIMING_CONFIG(n) \ { \ .ui8WriteLatency = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 0), \ .ui8TurnAround = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 1), \ .bTxNeg = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 2), \ .bRxNeg = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 3), \ .bRxCap = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 4), \ .ui32TxDQSDelay = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 5), \ .ui32RxDQSDelay = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 6), \ .ui32RXDQSDelayEXT = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 7), \ } #define MSPI_TIMING_CONFIG_MASK(n) DT_INST_PROP(n, ambiq_timing_config_mask) #else #define MSPI_TIMING_CONFIG(n) #define MSPI_TIMING_CONFIG_MASK(n) #endif #define FLASH_MSPI_ATXP032(n) \ static const struct flash_mspi_atxp032_config flash_mspi_atxp032_config_##n = { \ .mem_size = DT_INST_PROP(n, size) / 8, \ .port = MSPI_PORT(n), \ .flash_param = \ { \ .write_block_size = NOR_WRITE_SIZE, \ .erase_value = NOR_ERASE_VALUE, \ }, \ .page_layout = \ { \ .pages_count = DT_INST_PROP(n, size) / 8 / SPI_NOR_PAGE_SIZE, \ .pages_size = SPI_NOR_PAGE_SIZE, \ }, \ .bus = DEVICE_DT_GET(DT_INST_BUS(n)), \ .dev_id = MSPI_DEVICE_ID_DT_INST(n), \ .serial_cfg = MSPI_DEVICE_CONFIG_SERIAL(n), \ .tar_dev_cfg = MSPI_DEVICE_CONFIG_DT_INST(n), \ .tar_xip_cfg = MSPI_XIP_CONFIG_DT_INST(n), \ .tar_scramble_cfg = MSPI_SCRAMBLE_CONFIG_DT_INST(n), \ .tar_timing_cfg = MSPI_TIMING_CONFIG(n), \ .timing_cfg_mask = MSPI_TIMING_CONFIG_MASK(n), \ .sw_multi_periph = DT_PROP(DT_INST_BUS(n), software_multiperipheral) \ }; \ static struct flash_mspi_atxp032_data flash_mspi_atxp032_data_##n = { \ .lock = Z_SEM_INITIALIZER(flash_mspi_atxp032_data_##n.lock, 0, 1), \ }; \ PM_DEVICE_DT_INST_DEFINE(n, flash_mspi_atxp032_pm_action); \ DEVICE_DT_INST_DEFINE(n, \ flash_mspi_atxp032_init, \ PM_DEVICE_DT_INST_GET(n), \ &flash_mspi_atxp032_data_##n, \ &flash_mspi_atxp032_config_##n, \ POST_KERNEL, \ CONFIG_FLASH_INIT_PRIORITY, \ &flash_mspi_atxp032_api); DT_INST_FOREACH_STATUS_OKAY(FLASH_MSPI_ATXP032) ```
/content/code_sandbox/drivers/flash/flash_mspi_atxp032.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,800
```objective-c /* * */ #ifndef __SOC_FLASH_NRF_H__ #define __SOC_FLASH_NRF_H__ #include <zephyr/kernel.h> #include <soc.h> #define FLASH_OP_DONE (0) /* 0 for compliance with the driver API. */ #define FLASH_OP_ONGOING 1 struct flash_context { uint32_t data_addr; /* Address of data to write. */ uint32_t flash_addr; /* Address of flash to write or erase. */ uint32_t len; /* Size of data to write or erase [B]. */ #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE uint8_t enable_time_limit; /* set execution limited to the execution * window. */ #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ #if defined(CONFIG_SOC_FLASH_NRF_PARTIAL_ERASE) uint32_t flash_addr_next; #endif /* CONFIG_SOC_FLASH_NRF_PARTIAL_ERASE */ }; /*< Context type for f. @ref write_op @ref erase_op */ #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE /* The timeout is multiplied by CONFIG_SOC_FLASH_NRF_TIMEOUT_MULTIPLIER/10 * because switching tasks may take a significant portion of time. */ #define FLASH_TIMEOUT_MS ((FLASH_PAGE_ERASE_MAX_TIME_US) * \ (FLASH_PAGE_MAX_CNT) / 1000 * \ CONFIG_SOC_FLASH_NRF_TIMEOUT_MULTIPLIER / 10) /** * @defgroup nrf_flash_sync sync backend API * * API declared below contains prototypes of function which shall be * implemented by the synchronization backend. * @{ */ /** * Callback which executes the flash operation. * * @param context pointer to flash_context structure. * @retval @ref FLASH_OP_DONE once operation was done, @ref FLASH_OP_ONGOING if * operation needs more time for execution and a negative error code if * operation was aborted. */ typedef int (*flash_op_handler_t) (void *context); struct flash_op_desc { flash_op_handler_t handler; struct flash_context *context; /* [in,out] */ }; /** * Synchronization backend driver initialization procedure. * * This will be run within flash driver initialization */ int nrf_flash_sync_init(void); /** * Set synchronization context for synchronous operations. * * This function set backend's internal context for expected timing parameter. * * @param duration Duration of the execution window [us] */ void nrf_flash_sync_set_context(uint32_t duration); /** * Check if the operation need to be run synchronous with radio. * * @retval True if operation need to be run synchronously, otherwise False */ bool nrf_flash_sync_is_required(void); /** * Execute the flash operation synchronously along the radio operations. * * Function executes callbacks op_desc->handler() in execution windows according * to timing settings requested by nrf_flash_sync_set_context(). * This routine need to be called the handler as many time as it returns * FLASH_OP_ONGOING, however an operation timeout should be implemented. * When the handler() returns FLASH_OP_DONE or an error code, no further * execution windows are needed so function should return as the handler() * finished its operation. * * @retval 0 if op_desc->handler() was executed and finished its operation * successfully. Otherwise (handler returned error, timeout, couldn't schedule * execution...) a negative error code. * * execution window * Driver task task * | | * | | * nrf_flash_sync_ # | * set_context() # | * | | * | | * call nrf_flash_ # | * sync_exe() # | * #---------------->| * | | * | # execution window 0 * | # call flash_op_handler_t handler() * | # * | # * | # flash_op_handler_t handler() return * | # FLASH_OP_ONGOING * | # {backend request/allow * | | the next execution window} * . . * . . * . . * | | * | # execution window N * | # call flash_op_handler_t handler() * | # * | # * | # * | # flash_op_handler_t handler() returns * | # FLASH_OP_DONE * |<----------------# {backend transfer execution * # | to the driver back} * nrf_flash_ # | * sync_exe() | | * return | | */ int nrf_flash_sync_exe(struct flash_op_desc *op_desc); /** * @} */ /** * @defgroup nrf_flash_sync_timing sync timing backend API * @ingroup nrf_flash_sync * @{ * * API which is used by nrf flash driver for check where execution fill in * the execution window. * * API is used as follows: * begin of execution window * call flash_op_handler_t handler() * nrf_flash_sync_get_timestamp_begin() * [does some chunk of work] * nrf_flash_sync_check_time_limit() == false * [does some chunk of work] * nrf_flash_sync_check_time_limit() == false * [does some chunk of work] * ... * nrf_flash_sync_check_time_limit() == true * [preserve work context for next execution window] * return form flash_op_handler_t handler() * [return from execution window] * end of execution window */ /** * Get timestamp and store it in synchronization backend * context data as operation beginning time reference. * This timestamp will be used by @ref nrf_flash_sync_check_time_limit() * as the execution window begin reference. */ void nrf_flash_sync_get_timestamp_begin(void); /** * Estimate whether next iteration will fit in time constraints. * This function fetch current timestamp and compare it with the operation * beginning timestamp reference stored by * @ref nrf_flash_sync_get_timestamp_begin() in the synchronization backend * context data. * * @param iteration iteration number. * @retval true if estimated time excess, false otherwise. */ bool nrf_flash_sync_check_time_limit(uint32_t iteration); /** * @} */ #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ #endif /* !__SOC_FLASH_NRF_H__ */ ```
/content/code_sandbox/drivers/flash/soc_flash_nrf.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,347
```c /* * */ #define LOG_DOMAIN flash_stm32wb #define LOG_LEVEL CONFIG_FLASH_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_DOMAIN); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <zephyr/drivers/flash.h> #include <zephyr/init.h> #include <soc.h> #include <zephyr/sys/__assert.h> #include "flash_stm32.h" #include "stm32_hsem.h" #if defined(CONFIG_BT) #include "shci.h" #endif #define STM32WBX_PAGE_SHIFT 12 /* * Up to 255 4K pages */ static uint32_t get_page(off_t offset) { return offset >> STM32WBX_PAGE_SHIFT; } static inline void flush_cache(FLASH_TypeDef *regs) { if (regs->ACR & FLASH_ACR_DCEN) { regs->ACR &= ~FLASH_ACR_DCEN; /* Datasheet: DCRST: Data cache reset * This bit can be written only when the data cache is disabled */ regs->ACR |= FLASH_ACR_DCRST; regs->ACR &= ~FLASH_ACR_DCRST; regs->ACR |= FLASH_ACR_DCEN; } if (regs->ACR & FLASH_ACR_ICEN) { regs->ACR &= ~FLASH_ACR_ICEN; /* Datasheet: ICRST: Instruction cache reset : * This bit can be written only when the instruction cache * is disabled */ regs->ACR |= FLASH_ACR_ICRST; regs->ACR &= ~FLASH_ACR_ICRST; regs->ACR |= FLASH_ACR_ICEN; } } static int write_dword(const struct device *dev, off_t offset, uint64_t val) { volatile uint32_t *flash = (uint32_t *)(offset + FLASH_STM32_BASE_ADDRESS); FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); uint32_t tmp; int ret, rc; uint32_t cpu1_sem_status; uint32_t cpu2_sem_status = 0; uint32_t key; /* if the control register is locked, do not fail silently */ if (regs->CR & FLASH_CR_LOCK) { return -EIO; } /* Check if this double word is erased and value isn't 0. * * It is allowed to write only zeros over an already written dword * See 3.3.8 in reference manual. */ if ((flash[0] != 0xFFFFFFFFUL || flash[1] != 0xFFFFFFFFUL) && val != 0UL) { LOG_ERR("Word at offs %ld not erased", (long)offset); return -EIO; } ret = flash_stm32_check_status(dev); if (ret < 0) { return -EIO; } /* Implementation of STM32 AN5289, proposed in STM32WB Cube Application * BLE_RfWithFlash * path_to_url */ do { /** * When the PESD bit mechanism is used by CPU2 to protect its * timing, the PESD bit should be polled here. * If the PESD is set, the CPU1 will be stalled when reading * literals from an ISR that may occur after the flash * processing has been requested but suspended due to the PESD * bit. * * Note: This code is required only when the PESD mechanism is * used to protect the CPU2 timing. * However, keeping that code make it compatible with both * mechanisms. */ while (LL_FLASH_IsActiveFlag_OperationSuspended()) { ; } /* Enter critical section */ key = irq_lock(); /** * Depending on the application implementation, in case a * multitasking is possible with an OS, it should be checked * here if another task in the application disallowed flash * processing to protect some latency in critical code * execution. * When flash processing is ongoing, the CPU cannot access the * flash anymore.Trying to access the flash during that time * stalls the CPU. * The only way for CPU1 to disallow flash processing is to * take CFG_HW_BLOCK_FLASH_REQ_BY_CPU1_SEMID. */ cpu1_sem_status = LL_HSEM_GetStatus(HSEM, CFG_HW_BLOCK_FLASH_REQ_BY_CPU1_SEMID); if (cpu1_sem_status == 0) { /** * Check now if the CPU2 disallows flash processing to * protect its timing. If the semaphore is locked, the * CPU2 does not allow flash processing * * Note: By default, the CPU2 uses the PESD mechanism * to protect its timing, therefore, it is useless to * get/release the semaphore. * * However, keeping that code make it compatible with * both mechanisms. * The protection by semaphore is enabled on CPU2 side * with the command SHCI_C2_SetFlashActivityControl() * */ cpu2_sem_status = LL_HSEM_1StepLock(HSEM, CFG_HW_BLOCK_FLASH_REQ_BY_CPU2_SEMID); if (cpu2_sem_status == 0) { /** * When CFG_HW_BLOCK_FLASH_REQ_BY_CPU2_SEMID is * taken, it is allowed to only write one * single 64bits data. * When several 64bits data need to be erased, * the application shall first exit from the * critical section and try again. */ /* Set the PG bit */ regs->CR |= FLASH_CR_PG; /* Flush the register write */ tmp = regs->CR; /* Perform the data write operation at desired * memory address */ flash[0] = (uint32_t)val; flash[1] = (uint32_t)(val >> 32); /** * Release the semaphore to give the * opportunity to CPU2 to protect its timing * versus the next flash operation by taking * this semaphore. * Note that the CPU2 is polling on this * semaphore so CPU1 shall release it as fast * as possible. * This is why this code is protected by a * critical section. */ LL_HSEM_ReleaseLock(HSEM, CFG_HW_BLOCK_FLASH_REQ_BY_CPU2_SEMID, 0); } } /* Exit critical section */ irq_unlock(key); } while (cpu2_sem_status || cpu1_sem_status); /* Wait until the BSY bit is cleared */ rc = flash_stm32_wait_flash_idle(dev); /* Clear the PG bit */ regs->CR &= (~FLASH_CR_PG); return rc; } static int erase_page(const struct device *dev, uint32_t page) { uint32_t cpu1_sem_status; uint32_t cpu2_sem_status = 0; uint32_t key; FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); int rc; /* if the control register is locked, do not fail silently */ if (regs->CR & FLASH_CR_LOCK) { return -EIO; } /* Check that no Flash memory operation is ongoing */ rc = flash_stm32_wait_flash_idle(dev); if (rc < 0) { return rc; } /* * If an erase operation in Flash memory also concerns data in the data * or instruction cache, the user has to ensure that these data * are rewritten before they are accessed during code execution. */ flush_cache(regs); /* Implementation of STM32 AN5289, proposed in STM32WB Cube Application * BLE_RfWithFlash * path_to_url */ do { /** * When the PESD bit mechanism is used by CPU2 to protect its * timing, the PESD bit should be polled here. * If the PESD is set, the CPU1 will be stalled when reading * literals from an ISR that may occur after the flash * processing has been requested but suspended due to the PESD * bit. * * Note: This code is required only when the PESD mechanism is * used to protect the CPU2 timing. * However, keeping that code make it compatible with both * mechanisms. */ while (LL_FLASH_IsActiveFlag_OperationSuspended()) { ; } /* Enter critical section */ key = irq_lock(); /** * Depending on the application implementation, in case a * multitasking is possible with an OS, it should be checked * here if another task in the application disallowed flash * processing to protect some latency in critical code * execution. * When flash processing is ongoing, the CPU cannot access the * flash anymore.Trying to access the flash during that time * stalls the CPU. * The only way for CPU1 to disallow flash processing is to * take CFG_HW_BLOCK_FLASH_REQ_BY_CPU1_SEMID. */ cpu1_sem_status = LL_HSEM_GetStatus(HSEM, CFG_HW_BLOCK_FLASH_REQ_BY_CPU1_SEMID); if (cpu1_sem_status == 0) { /** * Check now if the CPU2 disallows flash processing to * protect its timing. If the semaphore is locked, the * CPU2 does not allow flash processing * * Note: By default, the CPU2 uses the PESD mechanism * to protect its timing, therefore, it is useless to * get/release the semaphore. * * However, keeping that code make it compatible with * both mechanisms. * The protection by semaphore is enabled on CPU2 side * with the command SHCI_C2_SetFlashActivityControl() * */ cpu2_sem_status = LL_HSEM_1StepLock(HSEM, CFG_HW_BLOCK_FLASH_REQ_BY_CPU2_SEMID); if (cpu2_sem_status == 0) { /** * When CFG_HW_BLOCK_FLASH_REQ_BY_CPU2_SEMID is * taken, it is allowed to only erase one * sector. * When several sectors need to be erased, * the application shall first exit from the * critical section and try again. */ regs->CR |= FLASH_CR_PER; regs->CR &= ~FLASH_CR_PNB_Msk; regs->CR |= page << FLASH_CR_PNB_Pos; regs->CR |= FLASH_CR_STRT; /** * Release the semaphore to give the * opportunity to CPU2 to protect its timing * versus the next flash operation by taking * this semaphore. * Note that the CPU2 is polling on this * semaphore so CPU1 shall release it as fast * as possible. * This is why this code is protected by a * critical section. */ LL_HSEM_ReleaseLock(HSEM, CFG_HW_BLOCK_FLASH_REQ_BY_CPU2_SEMID, 0); } } /* Exit critical section */ irq_unlock(key); } while (cpu2_sem_status || cpu1_sem_status); /* Wait for the BSY bit */ rc = flash_stm32_wait_flash_idle(dev); regs->CR &= ~FLASH_CR_PER; return rc; } int flash_stm32_block_erase_loop(const struct device *dev, unsigned int offset, unsigned int len) { int i, rc = 0; #if defined(CONFIG_BT) /** * Notify the CPU2 that some flash erase activity may be executed * On reception of this command, the CPU2 enables the BLE timing * protection versus flash erase processing. * The Erase flash activity will be executed only when the BLE RF is * idle for at least 25ms. * The CPU2 will prevent all flash activity (write or erase) in all * cases when the BL RF Idle is shorter than 25ms. */ SHCI_C2_FLASH_EraseActivity(ERASE_ACTIVITY_ON); #endif /* CONFIG_BT */ i = get_page(offset); for (; i <= get_page(offset + len - 1) ; ++i) { rc = erase_page(dev, i); if (rc < 0) { break; } } #if defined(CONFIG_BT) /** * Notify the CPU2 there will be no request anymore to erase the flash * On reception of this command, the CPU2 disables the BLE timing * protection versus flash erase processing */ SHCI_C2_FLASH_EraseActivity(ERASE_ACTIVITY_OFF); #endif /* CONFIG_BT */ return rc; } int flash_stm32_write_range(const struct device *dev, unsigned int offset, const void *data, unsigned int len) { int i, rc = 0; for (i = 0; i < len; i += 8, offset += 8U) { rc = write_dword(dev, offset, UNALIGNED_GET((const uint64_t *) data + (i >> 3))); if (rc < 0) { return rc; } } return rc; } void flash_stm32_page_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { static struct flash_pages_layout stm32wb_flash_layout = { .pages_count = 0, .pages_size = 0, }; ARG_UNUSED(dev); if (stm32wb_flash_layout.pages_count == 0) { stm32wb_flash_layout.pages_count = FLASH_SIZE / FLASH_PAGE_SIZE; stm32wb_flash_layout.pages_size = FLASH_PAGE_SIZE; } *layout = &stm32wb_flash_layout; *layout_size = 1; } int flash_stm32_check_status(const struct device *dev) { FLASH_TypeDef *regs = FLASH_STM32_REGS(dev); uint32_t error = 0; /* Save Flash errors */ error = (regs->SR & FLASH_FLAG_SR_ERRORS); error |= (regs->ECCR & FLASH_FLAG_ECCC); /* Clear systematic Option and Engineering bits validity error */ if (error & FLASH_FLAG_OPTVERR) { regs->SR |= FLASH_FLAG_SR_ERRORS; return 0; } if (error) { return -EIO; } return 0; } ```
/content/code_sandbox/drivers/flash/flash_stm32wbx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,249
```c /* * */ #include <errno.h> #include <zephyr/drivers/flash.h> #include <zephyr/logging/log.h> #include <string.h> LOG_MODULE_REGISTER(flash, CONFIG_FLASH_LOG_LEVEL); int z_impl_flash_fill(const struct device *dev, uint8_t val, off_t offset, size_t size) { uint8_t filler[CONFIG_FLASH_FILL_BUFFER_SIZE]; const struct flash_driver_api *api = (const struct flash_driver_api *)dev->api; const struct flash_parameters *fparams = api->get_parameters(dev); int rc = 0; size_t stored = 0; if (sizeof(filler) < fparams->write_block_size) { LOG_ERR("Size of CONFIG_FLASH_FILL_BUFFER_SIZE"); return -EINVAL; } /* The flash_write will, probably, check write alignment but this * is too late, as we write datain chunks; data alignment may be * broken by the size of the last chunk, that is why the check * happens here too. * Note that we have no way to check whether offset and size are * are correct, as such info is only available at the level of * a driver, so only basic check on offset. */ if (offset < 0) { LOG_ERR("Negative offset not allowed\n"); return -EINVAL; } if ((size | (size_t)offset) & (fparams->write_block_size - 1)) { LOG_ERR("Incorrect size or offset alignment, expected %zx\n", fparams->write_block_size); return -EINVAL; } memset(filler, val, sizeof(filler)); while (stored < size) { size_t chunk = MIN(sizeof(filler), size - stored); rc = api->write(dev, offset + stored, filler, chunk); if (rc < 0) { LOG_DBG("Fill to dev %p failed at offset 0x%zx\n", dev, (size_t)offset + stored); break; } stored += chunk; } return rc; } int z_impl_flash_flatten(const struct device *dev, off_t offset, size_t size) { const struct flash_driver_api *api = (const struct flash_driver_api *)dev->api; __maybe_unused const struct flash_parameters *params = api->get_parameters(dev); #if defined(CONFIG_FLASH_HAS_EXPLICIT_ERASE) if ((flash_params_get_erase_cap(params) & FLASH_ERASE_C_EXPLICIT) && api->erase != NULL) { return api->erase(dev, offset, size); } #endif #if defined(CONFIG_FLASH_HAS_NO_EXPLICIT_ERASE) return flash_fill(dev, params->erase_value, offset, size); #else return -ENOSYS; #endif } ```
/content/code_sandbox/drivers/flash/flash_util.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
591
```c /* * */ #define DT_DRV_COMPAT nxp_s32_qspi_nor #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(nxp_s32_qspi_nor, CONFIG_FLASH_LOG_LEVEL); #include <zephyr/kernel.h> #include <zephyr/drivers/flash.h> #include <zephyr/sys/util.h> #include <Qspi_Ip.h> #include "spi_nor.h" #include "jesd216.h" #include "memc_nxp_s32_qspi.h" #define QSPI_INST_NODE_HAS_PROP_EQ_AND_OR(n, prop, val) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(n, prop), \ (IS_EQ(DT_INST_ENUM_IDX(n, prop), val)), \ (0)) || #define QSPI_ANY_INST_HAS_PROP_EQ(prop, val) \ (DT_INST_FOREACH_STATUS_OKAY_VARGS(QSPI_INST_NODE_HAS_PROP_EQ_AND_OR, prop, val) 0) #define QSPI_INST_NODE_NOT_HAS_PROP_AND_OR(n, prop) \ !DT_INST_NODE_HAS_PROP(n, prop) || #define QSPI_ANY_INST_HAS_PROP_STATUS_NOT_OKAY(prop) \ (DT_INST_FOREACH_STATUS_OKAY_VARGS(QSPI_INST_NODE_NOT_HAS_PROP_AND_OR, prop) 0) #define QSPI_QER_TYPE(n) \ _CONCAT(JESD216_DW15_QER_VAL_, \ DT_INST_STRING_TOKEN_OR(n, quad_enable_requirements, S1B6)) #define QSPI_HAS_QUAD_MODE(n) \ (QSPI_INST_NODE_HAS_PROP_EQ_AND_OR(n, readoc, 3) \ QSPI_INST_NODE_HAS_PROP_EQ_AND_OR(n, readoc, 4) \ QSPI_INST_NODE_HAS_PROP_EQ_AND_OR(n, writeoc, 2) \ QSPI_INST_NODE_HAS_PROP_EQ_AND_OR(n, writeoc, 3) \ 0) #define QSPI_WRITE_SEQ(n) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(n, writeoc), \ (_CONCAT(QSPI_SEQ_PP_, DT_INST_STRING_UPPER_TOKEN(n, writeoc))),\ (QSPI_SEQ_PP_1_1_1)) #define QSPI_READ_SEQ(n) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(n, readoc), \ (_CONCAT(QSPI_SEQ_READ_, DT_INST_STRING_UPPER_TOKEN(n, readoc))),\ (QSPI_SEQ_READ_1_1_1)) #define QSPI_ERASE_VALUE 0xff #define QSPI_WRITE_BLOCK_SIZE 1U #define QSPI_IS_ALIGNED(addr, bits) (((addr) & BIT_MASK(bits)) == 0) #define QSPI_LUT_ENTRY_SIZE (FEATURE_QSPI_LUT_SEQUENCE_SIZE * 2) #define QSPI_LUT_IDX(n) (n * QSPI_LUT_ENTRY_SIZE) #if defined(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME) /* Size of LUT */ #define QSPI_SFDP_LUT_SIZE 130U /* Size of init operations */ #define QSPI_SFDP_INIT_OP_SIZE 8U #if defined(CONFIG_FLASH_JESD216_API) /* Size of all LUT sequences for JESD216 operations */ #define QSPI_JESD216_SEQ_SIZE 8U #endif /* CONFIG_FLASH_JESD216_API */ #endif /* CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME */ struct nxp_s32_qspi_config { const struct device *controller; struct flash_parameters flash_parameters; #if defined(CONFIG_FLASH_PAGE_LAYOUT) struct flash_pages_layout layout; #endif #if !defined(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME) const Qspi_Ip_MemoryConfigType memory_cfg; enum jesd216_dw15_qer_type qer_type; bool quad_mode; #endif }; struct nxp_s32_qspi_data { uint8_t instance; Qspi_Ip_MemoryConnectionType memory_conn_cfg; uint8_t read_sfdp_lut_idx; #if defined(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME) Qspi_Ip_MemoryConfigType memory_cfg; Qspi_Ip_InstrOpType lut_ops[QSPI_SFDP_LUT_SIZE]; Qspi_Ip_InitOperationType init_ops[QSPI_SFDP_INIT_OP_SIZE]; #endif #if defined(CONFIG_MULTITHREADING) struct k_sem sem; #endif }; enum { QSPI_SEQ_RDSR, QSPI_SEQ_RDSR2, QSPI_SEQ_WRSR, QSPI_SEQ_WRSR2, QSPI_SEQ_WREN, QSPI_SEQ_RESET, QSPI_SEQ_SE, #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(has_32k_erase) QSPI_SEQ_BE_32K, #endif QSPI_SEQ_BE, QSPI_SEQ_CE, QSPI_SEQ_READ_SFDP, QSPI_SEQ_RDID, #if QSPI_ANY_INST_HAS_PROP_EQ(readoc, 0) || QSPI_ANY_INST_HAS_PROP_STATUS_NOT_OKAY(readoc) QSPI_SEQ_READ_1_1_1, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(readoc, 1) QSPI_SEQ_READ_1_1_2, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(readoc, 2) QSPI_SEQ_READ_1_2_2, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(readoc, 3) QSPI_SEQ_READ_1_1_4, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(readoc, 4) QSPI_SEQ_READ_1_4_4, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(writeoc, 0) || QSPI_ANY_INST_HAS_PROP_STATUS_NOT_OKAY(writeoc) QSPI_SEQ_PP_1_1_1, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(writeoc, 1) QSPI_SEQ_PP_1_1_2, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(writeoc, 2) QSPI_SEQ_PP_1_1_4, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(writeoc, 3) QSPI_SEQ_PP_1_4_4, #endif }; #if !defined(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME) static const Qspi_Ip_InstrOpType nxp_s32_qspi_lut[][QSPI_LUT_ENTRY_SIZE] = { [QSPI_SEQ_RDSR] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_RDSR), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_READ, QSPI_IP_LUT_PADS_1, 1U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, [QSPI_SEQ_RDSR2] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_RDSR2), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_READ, QSPI_IP_LUT_PADS_1, 1U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, [QSPI_SEQ_WRSR] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_WRSR), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_WRITE, QSPI_IP_LUT_PADS_1, 1U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, [QSPI_SEQ_WRSR2] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_WRSR2), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_WRITE, QSPI_IP_LUT_PADS_1, 1U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, [QSPI_SEQ_WREN] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_WREN), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, [QSPI_SEQ_RESET] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_RESET_EN), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_PADS_1, 0U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_RESET_MEM), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_PADS_1, 0U), }, [QSPI_SEQ_SE] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_SE), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_1, 24U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(has_32k_erase) [QSPI_SEQ_BE_32K] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_BE_32K), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_1, 24U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, #endif [QSPI_SEQ_BE] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_BE), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_1, 24U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, [QSPI_SEQ_CE] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_CE), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, [QSPI_SEQ_READ_SFDP] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, JESD216_CMD_READ_SFDP), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_1, 24U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_DUMMY, QSPI_IP_LUT_PADS_1, 8U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_READ, QSPI_IP_LUT_PADS_1, 16U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, [QSPI_SEQ_RDID] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, JESD216_CMD_READ_ID), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_READ, QSPI_IP_LUT_PADS_1, JESD216_READ_ID_LEN), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, #if QSPI_ANY_INST_HAS_PROP_EQ(readoc, 0) || QSPI_ANY_INST_HAS_PROP_STATUS_NOT_OKAY(readoc) [QSPI_SEQ_READ_1_1_1] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_READ_FAST), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_1, 24U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_DUMMY, QSPI_IP_LUT_PADS_1, 8U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_READ, QSPI_IP_LUT_PADS_1, 8U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(readoc, 1) [QSPI_SEQ_READ_1_1_2] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_DREAD), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_1, 24U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_DUMMY, QSPI_IP_LUT_PADS_1, 8U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_READ, QSPI_IP_LUT_PADS_2, 8U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(readoc, 2) [QSPI_SEQ_READ_1_2_2] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_2READ), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_2, 24U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_DUMMY, QSPI_IP_LUT_PADS_2, 4U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_READ, QSPI_IP_LUT_PADS_2, 8U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(readoc, 3) [QSPI_SEQ_READ_1_1_4] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_QREAD), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_1, 24U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_DUMMY, QSPI_IP_LUT_PADS_1, 8U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_READ, QSPI_IP_LUT_PADS_4, 8U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(readoc, 4) [QSPI_SEQ_READ_1_4_4] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_4READ), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_4, 24U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_MODE, QSPI_IP_LUT_PADS_4, 0U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_DUMMY, QSPI_IP_LUT_PADS_4, 4U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_READ, QSPI_IP_LUT_PADS_4, 8U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(writeoc, 0) || QSPI_ANY_INST_HAS_PROP_STATUS_NOT_OKAY(writeoc) [QSPI_SEQ_PP_1_1_1] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_PP), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_1, 24U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_WRITE, QSPI_IP_LUT_PADS_1, 8U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(writeoc, 1) [QSPI_SEQ_PP_1_1_2] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_PP_1_1_2), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_1, 24U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_WRITE, QSPI_IP_LUT_PADS_2, 8U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(writeoc, 2) [QSPI_SEQ_PP_1_1_4] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_PP_1_1_4), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_1, 24U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_WRITE, QSPI_IP_LUT_PADS_4, 8U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, #endif #if QSPI_ANY_INST_HAS_PROP_EQ(writeoc, 3) [QSPI_SEQ_PP_1_4_4] = { QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, SPI_NOR_CMD_PP_1_4_4), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_4, 24U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_WRITE, QSPI_IP_LUT_PADS_4, 16U), QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END), }, #endif }; #endif /* !defined(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME) */ static ALWAYS_INLINE Qspi_Ip_MemoryConfigType *get_memory_config(const struct device *dev) { #if defined(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME) return &((struct nxp_s32_qspi_data *)dev->data)->memory_cfg; #else return ((Qspi_Ip_MemoryConfigType *) &((const struct nxp_s32_qspi_config *)dev->config)->memory_cfg); #endif } static ALWAYS_INLINE bool area_is_subregion(const struct device *dev, off_t offset, size_t size) { Qspi_Ip_MemoryConfigType *memory_cfg = get_memory_config(dev); return ((offset >= 0) && (offset < memory_cfg->memSize) && ((size + offset) <= memory_cfg->memSize)); } static inline void nxp_s32_qspi_lock(const struct device *dev) { #ifdef CONFIG_MULTITHREADING struct nxp_s32_qspi_data *data = dev->data; k_sem_take(&data->sem, K_FOREVER); #else ARG_UNUSED(dev); #endif } static inline void nxp_s32_qspi_unlock(const struct device *dev) { #ifdef CONFIG_MULTITHREADING struct nxp_s32_qspi_data *data = dev->data; k_sem_give(&data->sem); #else ARG_UNUSED(dev); #endif } /* Must be called with lock */ static int nxp_s32_qspi_wait_until_ready(const struct device *dev) { struct nxp_s32_qspi_data *data = dev->data; Qspi_Ip_StatusType status; uint32_t timeout = 0xFFFFFF; int ret = 0; do { status = Qspi_Ip_GetMemoryStatus(data->instance); timeout--; } while ((status == STATUS_QSPI_IP_BUSY) && (timeout > 0)); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Failed to read memory status (%d)", status); ret = -EIO; } else if (timeout <= 0) { LOG_ERR("Timeout, memory is busy"); ret = -ETIMEDOUT; } return ret; } #if !defined(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME) static int nxp_s32_qspi_read_status_register(const struct device *dev, uint8_t reg_num, uint8_t *val) { struct nxp_s32_qspi_data *data = dev->data; uint16_t lut_idx; Qspi_Ip_StatusType status; int ret = 0; switch (reg_num) { case 1U: lut_idx = QSPI_LUT_IDX(QSPI_SEQ_RDSR); break; case 2U: lut_idx = QSPI_LUT_IDX(QSPI_SEQ_RDSR2); break; default: LOG_ERR("Reading SR%u is not supported", reg_num); return -EINVAL; } nxp_s32_qspi_lock(dev); status = Qspi_Ip_RunReadCommand(data->instance, lut_idx, 0U, val, NULL, sizeof(*val)); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Failed to read SR%u (%d)", reg_num, status); ret = -EIO; } nxp_s32_qspi_unlock(dev); return ret; } static int nxp_s32_qspi_write_enable(const struct device *dev) { struct nxp_s32_qspi_data *data = dev->data; Qspi_Ip_MemoryConfigType *memory_cfg = get_memory_config(dev); Qspi_Ip_StatusType status; int ret = 0; nxp_s32_qspi_lock(dev); status = Qspi_Ip_RunCommand(data->instance, memory_cfg->statusConfig.writeEnableSRLut, 0U); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Failed to enable SR write (%d)", status); ret = -EIO; } nxp_s32_qspi_unlock(dev); return ret; } static int nxp_s32_qspi_write_status_register(const struct device *dev, uint8_t reg_num, uint8_t val) { const struct nxp_s32_qspi_config *config = dev->config; struct nxp_s32_qspi_data *data = dev->data; Qspi_Ip_StatusType status; uint8_t buf[2] = { 0 }; uint16_t lut_idx; size_t size; int ret; if (reg_num == 1) { /* buf = [val] or [val, SR2] */ lut_idx = QSPI_LUT_IDX(QSPI_SEQ_WRSR); size = 1U; buf[0] = val; if (config->qer_type == JESD216_DW15_QER_S2B1v1) { /* Writing SR1 clears SR2 */ size = 2U; ret = nxp_s32_qspi_read_status_register(dev, 2, &buf[1]); if (ret < 0) { return ret; } } } else if (reg_num == 2) { /* buf = [val] or [SR1, val] */ if ((config->qer_type == JESD216_DW15_QER_VAL_S2B1v1) || (config->qer_type == JESD216_DW15_QER_VAL_S2B1v4) || (config->qer_type == JESD216_DW15_QER_VAL_S2B1v5)) { /* Writing SR2 requires writing SR1 as well */ lut_idx = QSPI_LUT_IDX(QSPI_SEQ_WRSR); size = 2U; buf[1] = val; ret = nxp_s32_qspi_read_status_register(dev, 1, &buf[0]); if (ret < 0) { return ret; } } else { lut_idx = QSPI_LUT_IDX(QSPI_SEQ_WRSR2); size = 1U; buf[0] = val; } } else { return -EINVAL; } nxp_s32_qspi_lock(dev); status = Qspi_Ip_RunWriteCommand(data->instance, lut_idx, 0U, (const uint8_t *)buf, (uint32_t)size); if (status == STATUS_QSPI_IP_SUCCESS) { /* Wait for the write command to complete */ ret = nxp_s32_qspi_wait_until_ready(dev); } else { LOG_ERR("Failed to write to SR%u (%d)", reg_num, status); ret = -EIO; } nxp_s32_qspi_unlock(dev); return ret; } static int nxp_s32_qspi_set_quad_mode(const struct device *dev, bool enabled) { const struct nxp_s32_qspi_config *config = dev->config; uint8_t sr_num; uint8_t sr_val; uint8_t qe_mask; bool qe_state; int ret; switch (config->qer_type) { case JESD216_DW15_QER_NONE: /* no QE bit, device detects reads based on opcode */ return 0; case JESD216_DW15_QER_S1B6: sr_num = 1U; qe_mask = BIT(6U); break; case JESD216_DW15_QER_S2B7: sr_num = 2U; qe_mask = BIT(7U); break; case JESD216_DW15_QER_S2B1v1: __fallthrough; case JESD216_DW15_QER_S2B1v4: __fallthrough; case JESD216_DW15_QER_S2B1v5: __fallthrough; case JESD216_DW15_QER_S2B1v6: sr_num = 2U; qe_mask = BIT(1U); break; default: return -ENOTSUP; } ret = nxp_s32_qspi_read_status_register(dev, sr_num, &sr_val); if (ret < 0) { return ret; } qe_state = ((sr_val & qe_mask) != 0U); if (qe_state == enabled) { return 0; } sr_val ^= qe_mask; ret = nxp_s32_qspi_write_enable(dev); if (ret < 0) { return ret; } ret = nxp_s32_qspi_write_status_register(dev, sr_num, sr_val); if (ret < 0) { return ret; } /* Verify write was successful */ ret = nxp_s32_qspi_read_status_register(dev, sr_num, &sr_val); if (ret < 0) { return ret; } qe_state = ((sr_val & qe_mask) != 0U); if (qe_state != enabled) { LOG_ERR("Failed to %s Quad mode", enabled ? "enable" : "disable"); return -EIO; } return ret; } #endif /* !defined(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME) */ static int nxp_s32_qspi_read(const struct device *dev, off_t offset, void *dest, size_t size) { struct nxp_s32_qspi_data *data = dev->data; Qspi_Ip_StatusType status; int ret = 0; if (!dest) { return -EINVAL; } if (!area_is_subregion(dev, offset, size)) { return -ENODEV; } if (size) { nxp_s32_qspi_lock(dev); status = Qspi_Ip_Read(data->instance, (uint32_t)offset, (uint8_t *)dest, (uint32_t)size); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Failed to read %zu bytes at 0x%lx (%d)", size, offset, status); ret = -EIO; } nxp_s32_qspi_unlock(dev); } return ret; } static int nxp_s32_qspi_write(const struct device *dev, off_t offset, const void *src, size_t size) { struct nxp_s32_qspi_data *data = dev->data; Qspi_Ip_MemoryConfigType *memory_cfg = get_memory_config(dev); Qspi_Ip_StatusType status; size_t max_write = (size_t)MIN(QSPI_IP_MAX_WRITE_SIZE, memory_cfg->pageSize); size_t len; int ret = 0; if (!src) { return -EINVAL; } if (!area_is_subregion(dev, offset, size)) { return -ENODEV; } nxp_s32_qspi_lock(dev); while (size) { len = MIN(max_write - (offset % max_write), size); status = Qspi_Ip_Program(data->instance, (uint32_t)offset, (const uint8_t *)src, (uint32_t)len); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Failed to write %zu bytes at 0x%lx (%d)", len, offset, status); ret = -EIO; break; } ret = nxp_s32_qspi_wait_until_ready(dev); if (ret != 0) { break; } if (IS_ENABLED(CONFIG_FLASH_NXP_S32_QSPI_VERIFY_WRITE)) { status = Qspi_Ip_ProgramVerify(data->instance, (uint32_t)offset, (const uint8_t *)src, (uint32_t)len); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Write verification failed at 0x%lx (%d)", offset, status); ret = -EIO; break; } } size -= len; src = (const uint8_t *)src + len; offset += len; } nxp_s32_qspi_unlock(dev); return ret; } static int nxp_s32_qspi_erase_block(const struct device *dev, off_t offset, size_t size, size_t *erase_size) { struct nxp_s32_qspi_data *data = dev->data; Qspi_Ip_MemoryConfigType *memory_cfg = get_memory_config(dev); Qspi_Ip_EraseVarConfigType *etp = NULL; Qspi_Ip_EraseVarConfigType *etp_tmp; Qspi_Ip_StatusType status; int ret = 0; /* * Find the erase type with bigger size that can erase all or part of the * requested memory size */ for (uint8_t i = 0; i < QSPI_IP_ERASE_TYPES; i++) { etp_tmp = (Qspi_Ip_EraseVarConfigType *)&(memory_cfg->eraseSettings.eraseTypes[i]); if ((etp_tmp->eraseLut != QSPI_IP_LUT_INVALID) && QSPI_IS_ALIGNED(offset, etp_tmp->size) && (BIT(etp_tmp->size) <= size) && ((etp == NULL) || (etp_tmp->size > etp->size))) { etp = etp_tmp; } } if (etp != NULL) { *erase_size = BIT(etp->size); status = Qspi_Ip_EraseBlock(data->instance, (uint32_t)offset, *erase_size); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Failed to erase %zu bytes at 0x%lx (%d)", *erase_size, (long)offset, status); ret = -EIO; } } else { LOG_ERR("Can't find erase size to erase %zu bytes", size); ret = -EINVAL; } return ret; } static int nxp_s32_qspi_erase(const struct device *dev, off_t offset, size_t size) { struct nxp_s32_qspi_data *data = dev->data; Qspi_Ip_MemoryConfigType *memory_cfg = get_memory_config(dev); Qspi_Ip_StatusType status; size_t erase_size; int ret = 0; if (!area_is_subregion(dev, offset, size)) { return -ENODEV; } nxp_s32_qspi_lock(dev); if (size == memory_cfg->memSize) { status = Qspi_Ip_EraseChip(data->instance); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Failed to erase chip (%d)", status); ret = -EIO; } } else { while (size > 0) { erase_size = 0; ret = nxp_s32_qspi_erase_block(dev, offset, size, &erase_size); if (ret != 0) { break; } ret = nxp_s32_qspi_wait_until_ready(dev); if (ret != 0) { break; } if (IS_ENABLED(CONFIG_FLASH_NXP_S32_QSPI_VERIFY_ERASE)) { status = Qspi_Ip_EraseVerify(data->instance, (uint32_t)offset, erase_size); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Erase verification failed at 0x%lx (%d)", offset, status); ret = -EIO; break; } } offset += erase_size; size -= erase_size; } } nxp_s32_qspi_unlock(dev); return ret; } #if defined(CONFIG_FLASH_JESD216_API) || !defined(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME) static int nxp_s32_qspi_read_id(const struct device *dev, uint8_t *id) { struct nxp_s32_qspi_data *data = dev->data; Qspi_Ip_StatusType status; int ret = 0; nxp_s32_qspi_lock(dev); status = Qspi_Ip_ReadId(data->instance, id); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Failed to read device ID (%d)", status); ret = -EIO; } nxp_s32_qspi_unlock(dev); return ret; } #endif /* CONFIG_FLASH_JESD216_API || !CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME */ #if defined(CONFIG_FLASH_JESD216_API) static int nxp_s32_qspi_sfdp_read(const struct device *dev, off_t offset, void *buf, size_t len) { struct nxp_s32_qspi_data *data = dev->data; Qspi_Ip_StatusType status; int ret = 0; nxp_s32_qspi_lock(dev); status = Qspi_Ip_RunReadCommand(data->instance, data->read_sfdp_lut_idx, (uint32_t)offset, (uint8_t *)buf, NULL, (uint32_t)len); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Failed to read SFDP at 0x%lx (%d)", offset, status); ret = -EIO; } nxp_s32_qspi_unlock(dev); return ret; } #endif /* CONFIG_FLASH_JESD216_API */ #if defined(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME) static int nxp_s32_qspi_sfdp_config(const struct device *dev) { struct nxp_s32_qspi_data *data = dev->data; Qspi_Ip_MemoryConfigType *memory_cfg = get_memory_config(dev); Qspi_Ip_StatusType status; /* Populate memory configuration with values obtained from SFDP */ memory_cfg->memType = QSPI_IP_SERIAL_FLASH; memory_cfg->lutSequences.opCount = QSPI_SFDP_LUT_SIZE; memory_cfg->lutSequences.lutOps = (Qspi_Ip_InstrOpType *)data->lut_ops; memory_cfg->initConfiguration.opCount = QSPI_SFDP_INIT_OP_SIZE; memory_cfg->initConfiguration.operations = (Qspi_Ip_InitOperationType *)data->init_ops; status = Qspi_Ip_ReadSfdp(memory_cfg, &data->memory_conn_cfg); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Fail to read SFDP (%d)", status); return -EIO; } #if defined(CONFIG_FLASH_JESD216_API) /* The HAL does not populate LUTs for read SFDP and read ID */ uint8_t lut_idx = QSPI_SFDP_LUT_SIZE; for (int i = 0; i < QSPI_SFDP_LUT_SIZE - 1; i++) { if ((data->lut_ops[i] == QSPI_IP_LUT_SEQ_END) && (data->lut_ops[i+1] == QSPI_IP_LUT_SEQ_END)) { lut_idx = i + 1; break; } } /* Make sure there's enough space to add the LUT sequences */ if ((lut_idx + QSPI_JESD216_SEQ_SIZE - 1) >= QSPI_SFDP_LUT_SIZE) { return -ENOMEM; } data->read_sfdp_lut_idx = lut_idx; data->lut_ops[lut_idx++] = QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, JESD216_CMD_READ_SFDP); data->lut_ops[lut_idx++] = QSPI_LUT_OP(QSPI_IP_LUT_INSTR_ADDR, QSPI_IP_LUT_PADS_1, 24U); data->lut_ops[lut_idx++] = QSPI_LUT_OP(QSPI_IP_LUT_INSTR_DUMMY, QSPI_IP_LUT_PADS_1, 8U); data->lut_ops[lut_idx++] = QSPI_LUT_OP(QSPI_IP_LUT_INSTR_READ, QSPI_IP_LUT_PADS_1, 16U); data->lut_ops[lut_idx++] = QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END); memory_cfg->readIdSettings.readIdLut = lut_idx; memory_cfg->readIdSettings.readIdSize = JESD216_READ_ID_LEN; data->lut_ops[lut_idx++] = QSPI_LUT_OP(QSPI_IP_LUT_INSTR_CMD, QSPI_IP_LUT_PADS_1, JESD216_CMD_READ_ID); data->lut_ops[lut_idx++] = QSPI_LUT_OP(QSPI_IP_LUT_INSTR_READ, QSPI_IP_LUT_PADS_1, JESD216_READ_ID_LEN); data->lut_ops[lut_idx++] = QSPI_LUT_OP(QSPI_IP_LUT_INSTR_STOP, QSPI_IP_LUT_SEQ_END, QSPI_IP_LUT_SEQ_END); #endif /* CONFIG_FLASH_JESD216_API */ return 0; } #endif static const struct flash_parameters *nxp_s32_qspi_get_parameters(const struct device *dev) { const struct nxp_s32_qspi_config *config = dev->config; return &config->flash_parameters; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static void nxp_s32_qspi_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { const struct nxp_s32_qspi_config *config = dev->config; *layout = &config->layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static int nxp_s32_qspi_init(const struct device *dev) { struct nxp_s32_qspi_data *data = dev->data; const struct nxp_s32_qspi_config *config = dev->config; Qspi_Ip_MemoryConfigType *memory_cfg = get_memory_config(dev); Qspi_Ip_StatusType status; static uint8_t instance_cnt; int ret = 0; /* Used by the HAL to retrieve the internal driver state */ data->instance = instance_cnt++; __ASSERT_NO_MSG(data->instance < QSPI_IP_MEM_INSTANCE_COUNT); data->memory_conn_cfg.qspiInstance = memc_nxp_s32_qspi_get_instance(config->controller); #if defined(CONFIG_MULTITHREADING) k_sem_init(&data->sem, 1, 1); #endif #if defined(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME) nxp_s32_qspi_sfdp_config(dev); #endif /* Init memory device connected to the bus */ status = Qspi_Ip_Init(data->instance, (const Qspi_Ip_MemoryConfigType *)memory_cfg, (const Qspi_Ip_MemoryConnectionType *)&data->memory_conn_cfg); if (status != STATUS_QSPI_IP_SUCCESS) { LOG_ERR("Fail to init memory device %d (%d)", data->instance, status); return -EIO; } #if !defined(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME) uint8_t jedec_id[JESD216_READ_ID_LEN]; /* Verify connectivity by reading the device ID */ ret = nxp_s32_qspi_read_id(dev, jedec_id); if (ret != 0) { LOG_ERR("JEDEC ID read failed (%d)", ret); return -ENODEV; } /* * Check the memory device ID against the one configured from devicetree * to verify we are talking to the correct device. */ if (memcmp(jedec_id, memory_cfg->readIdSettings.readIdExpected, sizeof(jedec_id)) != 0) { LOG_ERR("Device id %02x %02x %02x does not match config %02x %02x %02x", jedec_id[0], jedec_id[1], jedec_id[2], memory_cfg->readIdSettings.readIdExpected[0], memory_cfg->readIdSettings.readIdExpected[1], memory_cfg->readIdSettings.readIdExpected[2]); return -EINVAL; } ret = nxp_s32_qspi_set_quad_mode(dev, config->quad_mode); if (ret < 0) { return ret; } #endif /* !CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME */ return ret; } static const struct flash_driver_api nxp_s32_qspi_api = { .erase = nxp_s32_qspi_erase, .write = nxp_s32_qspi_write, .read = nxp_s32_qspi_read, .get_parameters = nxp_s32_qspi_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = nxp_s32_qspi_pages_layout, #endif /* CONFIG_FLASH_PAGE_LAYOUT */ #if defined(CONFIG_FLASH_JESD216_API) .sfdp_read = nxp_s32_qspi_sfdp_read, .read_jedec_id = nxp_s32_qspi_read_id, #endif /* CONFIG_FLASH_JESD216_API */ }; #define QSPI_PAGE_LAYOUT(n) \ .layout = { \ .pages_count = (DT_INST_PROP(n, size) / 8) \ / CONFIG_FLASH_NXP_S32_QSPI_LAYOUT_PAGE_SIZE, \ .pages_size = CONFIG_FLASH_NXP_S32_QSPI_LAYOUT_PAGE_SIZE, \ } #define QSPI_READ_ID_CFG(n) \ { \ .readIdLut = QSPI_LUT_IDX(QSPI_SEQ_RDID), \ .readIdSize = DT_INST_PROP_LEN(n, jedec_id), \ .readIdExpected = DT_INST_PROP(n, jedec_id), \ } #define QSPI_MEMORY_CONN_CFG(n) \ { \ .connectionType = (Qspi_Ip_ConnectionType)DT_INST_REG_ADDR(n), \ .memAlignment = DT_INST_PROP_OR(n, memory_alignment, 1) \ } #define QSPI_ERASE_CFG(n) \ { \ .eraseTypes = { \ { \ .eraseLut = QSPI_LUT_IDX(QSPI_SEQ_SE), \ .size = 12, /* 4 KB */ \ }, \ { \ .eraseLut = QSPI_LUT_IDX(QSPI_SEQ_BE), \ .size = 16, /* 64 KB */ \ }, \ COND_CODE_1(DT_INST_PROP(n, has_32k_erase), ( \ { \ .eraseLut = QSPI_LUT_IDX(QSPI_SEQ_BE_32K), \ .size = 15, /* 32 KB */ \ }, \ ), ( \ { \ .eraseLut = QSPI_IP_LUT_INVALID, \ }, \ )) \ { \ .eraseLut = QSPI_IP_LUT_INVALID, \ }, \ }, \ .chipEraseLut = QSPI_LUT_IDX(QSPI_SEQ_CE), \ } #define QSPI_RESET_CFG(n) \ { \ .resetCmdLut = QSPI_LUT_IDX(QSPI_SEQ_RESET), \ .resetCmdCount = 4U, \ } /* * SR information used internally by the HAL to access fields BUSY and WEL * during read/write/erase and polling status operations. */ #define QSPI_STATUS_REG_CFG(n) \ { \ .statusRegInitReadLut = QSPI_LUT_IDX(QSPI_SEQ_RDSR), \ .statusRegReadLut = QSPI_LUT_IDX(QSPI_SEQ_RDSR), \ .statusRegWriteLut = QSPI_LUT_IDX(QSPI_SEQ_WRSR), \ .writeEnableSRLut = QSPI_LUT_IDX(QSPI_SEQ_WREN), \ .writeEnableLut = QSPI_LUT_IDX(QSPI_SEQ_WREN), \ .regSize = 1U, \ .busyOffset = 0U, \ .busyValue = 1U, \ .writeEnableOffset = 1U, \ } #define QSPI_INIT_CFG(n) \ { \ .opCount = 0U, \ .operations = NULL, \ } #define QSPI_LUT_CFG(n) \ { \ .opCount = ARRAY_SIZE(nxp_s32_qspi_lut), \ .lutOps = (Qspi_Ip_InstrOpType *)nxp_s32_qspi_lut, \ } #define QSPI_MEMORY_CFG(n) \ { \ .memType = QSPI_IP_SERIAL_FLASH, \ .hfConfig = NULL, \ .memSize = DT_INST_PROP(n, size) / 8, \ .pageSize = CONFIG_FLASH_NXP_S32_QSPI_LAYOUT_PAGE_SIZE, \ .writeLut = QSPI_LUT_IDX(QSPI_WRITE_SEQ(n)), \ .readLut = QSPI_LUT_IDX(QSPI_READ_SEQ(n)), \ .read0xxLut = QSPI_IP_LUT_INVALID, \ .read0xxLutAHB = QSPI_IP_LUT_INVALID, \ .eraseSettings = QSPI_ERASE_CFG(n), \ .statusConfig = QSPI_STATUS_REG_CFG(n), \ .resetSettings = QSPI_RESET_CFG(n), \ .initResetSettings = QSPI_RESET_CFG(n), \ .initConfiguration = QSPI_INIT_CFG(n), \ .lutSequences = QSPI_LUT_CFG(n), \ COND_CODE_1(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME, (), ( \ .readIdSettings = QSPI_READ_ID_CFG(n),) \ ) \ .suspendSettings = { \ .eraseSuspendLut = QSPI_IP_LUT_INVALID, \ .eraseResumeLut = QSPI_IP_LUT_INVALID, \ .programSuspendLut = QSPI_IP_LUT_INVALID, \ .programResumeLut = QSPI_IP_LUT_INVALID, \ }, \ .initCallout = NULL, \ .resetCallout = NULL, \ .errorCheckCallout = NULL, \ .eccCheckCallout = NULL, \ .ctrlAutoCfgPtr = NULL, \ } #define FLASH_NXP_S32_QSPI_INIT_DEVICE(n) \ COND_CODE_1(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME, (), ( \ BUILD_ASSERT(DT_INST_NODE_HAS_PROP(n, jedec_id), \ "jedec-id is required for non-runtime SFDP"); \ BUILD_ASSERT(DT_INST_PROP_LEN(n, jedec_id) == JESD216_READ_ID_LEN,\ "jedec-id must be of size JESD216_READ_ID_LEN bytes"); \ )) \ \ static const struct nxp_s32_qspi_config nxp_s32_qspi_config_##n = { \ .controller = DEVICE_DT_GET(DT_INST_BUS(n)), \ .flash_parameters = { \ .write_block_size = QSPI_WRITE_BLOCK_SIZE, \ .erase_value = QSPI_ERASE_VALUE, \ }, \ IF_ENABLED(CONFIG_FLASH_PAGE_LAYOUT, \ (QSPI_PAGE_LAYOUT(n),)) \ COND_CODE_1(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME, (), ( \ .memory_cfg = QSPI_MEMORY_CFG(n), \ .qer_type = QSPI_QER_TYPE(n), \ .quad_mode = QSPI_HAS_QUAD_MODE(n) \ )) \ }; \ \ static struct nxp_s32_qspi_data nxp_s32_qspi_data_##n = { \ .memory_conn_cfg = QSPI_MEMORY_CONN_CFG(n), \ COND_CODE_1(CONFIG_FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME, (), ( \ .read_sfdp_lut_idx = QSPI_LUT_IDX(QSPI_SEQ_READ_SFDP), \ )) \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ nxp_s32_qspi_init, \ NULL, \ &nxp_s32_qspi_data_##n, \ &nxp_s32_qspi_config_##n, \ POST_KERNEL, \ CONFIG_FLASH_INIT_PRIORITY, \ &nxp_s32_qspi_api); DT_INST_FOREACH_STATUS_OKAY(FLASH_NXP_S32_QSPI_INIT_DEVICE) ```
/content/code_sandbox/drivers/flash/flash_nxp_s32_qspi_nor.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
11,540
```unknown # Infineon CAT1 Flash configuration options # an affiliate of Cypress Semiconductor Corporation # config FLASH_INFINEON_CAT1 bool "Infineon CAT1 FLASH driver" default y depends on DT_HAS_INFINEON_CAT1_FLASH_CONTROLLER_ENABLED && DT_HAS_FIXED_PARTITIONS_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_DRIVER_ENABLED select USE_INFINEON_FLASH select FLASH_HAS_EXPLICIT_ERASE help Enable the Flash driver for Infineon CAT1 family. config INFINEON_CAT1_QSPI_FLASH bool "Infineon CAT1 QSPI FLASH driver" default y depends on DT_HAS_INFINEON_CAT1_QSPI_FLASH_ENABLED && DT_HAS_FIXED_PARTITIONS_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_DRIVER_ENABLED select USE_INFINEON_FLASH select USE_INFINEON_SMIF select FLASH_HAS_EXPLICIT_ERASE help Enable the QSPI Flash driver for Infineon CAT1 family. config MPU_ALLOW_FLASH_WRITE bool "Add MPU access to write to flash" help Enable this to allow MPU RWX access to flash memory. ```
/content/code_sandbox/drivers/flash/Kconfig.ifx_cat1
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
234
```unknown config SOC_FLASH_XMC4XXX bool "XMC4XXX flash driver" default y depends on DT_HAS_INFINEON_XMC4XXX_FLASH_CONTROLLER_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_EXPLICIT_ERASE help Enables XMC4XXX flash driver. ```
/content/code_sandbox/drivers/flash/Kconfig.xmc4xxx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
69
```c /* * */ #include <zephyr/internal/syscall_handler.h> #include <zephyr/drivers/flash.h> static inline int z_vrfy_flash_read(const struct device *dev, off_t offset, void *data, size_t len) { K_OOPS(K_SYSCALL_DRIVER_FLASH(dev, read)); K_OOPS(K_SYSCALL_MEMORY_WRITE(data, len)); return z_impl_flash_read((const struct device *)dev, offset, (void *)data, len); } #include <zephyr/syscalls/flash_read_mrsh.c> static inline int z_vrfy_flash_write(const struct device *dev, off_t offset, const void *data, size_t len) { K_OOPS(K_SYSCALL_DRIVER_FLASH(dev, write)); K_OOPS(K_SYSCALL_MEMORY_READ(data, len)); return z_impl_flash_write((const struct device *)dev, offset, (const void *)data, len); } #include <zephyr/syscalls/flash_write_mrsh.c> static inline int z_vrfy_flash_erase(const struct device *dev, off_t offset, size_t size) { K_OOPS(K_SYSCALL_DRIVER_FLASH(dev, erase)); return z_impl_flash_erase((const struct device *)dev, offset, size); } #include <zephyr/syscalls/flash_erase_mrsh.c> static inline size_t z_vrfy_flash_get_write_block_size(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_FLASH)); return z_impl_flash_get_write_block_size(dev); } #include <zephyr/syscalls/flash_get_write_block_size_mrsh.c> static inline const struct flash_parameters *z_vrfy_flash_get_parameters(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_FLASH(dev, get_parameters)); return z_impl_flash_get_parameters(dev); } #include <zephyr/syscalls/flash_get_parameters_mrsh.c> int z_vrfy_flash_fill(const struct device *dev, uint8_t val, off_t offset, size_t size) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_FLASH)); return z_impl_flash_fill(dev, val, offset, size); } #include <zephyr/syscalls/flash_fill_mrsh.c> int z_vrfy_flash_flatten(const struct device *dev, off_t offset, size_t size) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_FLASH)); return z_impl_flash_flatten(dev, offset, size); } #include <zephyr/syscalls/flash_flatten_mrsh.c> #ifdef CONFIG_FLASH_PAGE_LAYOUT static inline int z_vrfy_flash_get_page_info_by_offs(const struct device *dev, off_t offs, struct flash_pages_info *info) { K_OOPS(K_SYSCALL_DRIVER_FLASH(dev, page_layout)); K_OOPS(K_SYSCALL_MEMORY_WRITE(info, sizeof(struct flash_pages_info))); return z_impl_flash_get_page_info_by_offs((const struct device *)dev, offs, (struct flash_pages_info *)info); } #include <zephyr/syscalls/flash_get_page_info_by_offs_mrsh.c> static inline int z_vrfy_flash_get_page_info_by_idx(const struct device *dev, uint32_t idx, struct flash_pages_info *info) { K_OOPS(K_SYSCALL_DRIVER_FLASH(dev, page_layout)); K_OOPS(K_SYSCALL_MEMORY_WRITE(info, sizeof(struct flash_pages_info))); return z_impl_flash_get_page_info_by_idx((const struct device *)dev, idx, (struct flash_pages_info *)info); } #include <zephyr/syscalls/flash_get_page_info_by_idx_mrsh.c> static inline size_t z_vrfy_flash_get_page_count(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_FLASH(dev, page_layout)); return z_impl_flash_get_page_count((const struct device *)dev); } #include <zephyr/syscalls/flash_get_page_count_mrsh.c> #endif /* CONFIG_FLASH_PAGE_LAYOUT */ #ifdef CONFIG_FLASH_JESD216_API static inline int z_vrfy_flash_sfdp_read(const struct device *dev, off_t offset, void *data, size_t len) { K_OOPS(K_SYSCALL_DRIVER_FLASH(dev, sfdp_read)); K_OOPS(K_SYSCALL_MEMORY_WRITE(data, len)); return z_impl_flash_sfdp_read(dev, offset, data, len); } #include <zephyr/syscalls/flash_sfdp_read_mrsh.c> static inline int z_vrfy_flash_read_jedec_id(const struct device *dev, uint8_t *id) { K_OOPS(K_SYSCALL_DRIVER_FLASH(dev, read_jedec_id)); K_OOPS(K_SYSCALL_MEMORY_WRITE(id, 3)); return z_impl_flash_read_jedec_id(dev, id); } #include <zephyr/syscalls/flash_read_jedec_id_mrsh.c> #endif /* CONFIG_FLASH_JESD216_API */ #ifdef CONFIG_FLASH_EX_OP_ENABLED static inline int z_vrfy_flash_ex_op(const struct device *dev, uint16_t code, const uintptr_t in, void *out) { K_OOPS(K_SYSCALL_DRIVER_FLASH(dev, ex_op)); /* * If the code is a vendor code, then ex_op function have to perform * verification. Zephyr codes should be verified here, but currently * there are no Zephyr extended codes yet. */ return z_impl_flash_ex_op(dev, code, in, out); } #include <zephyr/syscalls/flash_ex_op_mrsh.c> #endif /* CONFIG_FLASH_EX_OP_ENABLED */ ```
/content/code_sandbox/drivers/flash/flash_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,170
```unknown # Atmel SAM0 flash driver config menuconfig SOC_FLASH_SAM0 bool "Atmel SAM0 flash driver" default y depends on DT_HAS_ATMEL_SAM0_NVMCTRL_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_EXPLICIT_ERASE select MPU_ALLOW_FLASH_WRITE if ARM_MPU help Enable the Atmel SAM0 series internal flash driver. config SOC_FLASH_SAM0_EMULATE_BYTE_PAGES bool "Emulate byte-sized pages" depends on SOC_FLASH_SAM0 default y if SOC_SERIES_SAMC20 || \ SOC_SERIES_SAMC21 help Emulate a device with byte-sized pages by doing a read/modify/erase/write. ```
/content/code_sandbox/drivers/flash/Kconfig.sam0
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
162
```c /* * */ #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <soc.h> #include <zephyr/drivers/flash.h> #include <string.h> #include <nrfx_nvmc.h> #include <nrf_erratas.h> #include "soc_flash_nrf.h" #define LOG_LEVEL CONFIG_FLASH_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(flash_nrf); #if DT_NODE_HAS_STATUS(DT_INST(0, nordic_nrf51_flash_controller), okay) #define DT_DRV_COMPAT nordic_nrf51_flash_controller #elif DT_NODE_HAS_STATUS(DT_INST(0, nordic_nrf52_flash_controller), okay) #define DT_DRV_COMPAT nordic_nrf52_flash_controller #elif DT_NODE_HAS_STATUS(DT_INST(0, nordic_nrf53_flash_controller), okay) #define DT_DRV_COMPAT nordic_nrf53_flash_controller #elif DT_NODE_HAS_STATUS(DT_INST(0, nordic_nrf91_flash_controller), okay) #define DT_DRV_COMPAT nordic_nrf91_flash_controller #else #error No matching compatible for soc_flash_nrf.c #endif #define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash) #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE #define FLASH_SLOT_WRITE 7500 #if defined(CONFIG_SOC_FLASH_NRF_PARTIAL_ERASE) #define FLASH_SLOT_ERASE (MAX(CONFIG_SOC_FLASH_NRF_PARTIAL_ERASE_MS * 1000, \ 7500)) #else #define FLASH_SLOT_ERASE FLASH_PAGE_ERASE_MAX_TIME_US #endif /* CONFIG_SOC_FLASH_NRF_PARTIAL_ERASE */ static int write_op(void *context); /* instance of flash_op_handler_t */ static int write_synchronously(off_t addr, const void *data, size_t len); static int erase_op(void *context); /* instance of flash_op_handler_t */ static int erase_synchronously(uint32_t addr, uint32_t size); #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ static const struct flash_parameters flash_nrf_parameters = { #if defined(CONFIG_SOC_FLASH_NRF_EMULATE_ONE_BYTE_WRITE_ACCESS) .write_block_size = 1, #else .write_block_size = 4, #endif .erase_value = 0xff, }; #if defined(CONFIG_MULTITHREADING) /* semaphore for locking flash resources (tickers) */ static struct k_sem sem_lock; #define SYNC_INIT() k_sem_init(&sem_lock, 1, 1) #define SYNC_LOCK() k_sem_take(&sem_lock, K_FOREVER) #define SYNC_UNLOCK() k_sem_give(&sem_lock) #else #define SYNC_INIT() #define SYNC_LOCK() #define SYNC_UNLOCK() #endif #if NRF52_ERRATA_242_PRESENT #include <hal/nrf_power.h> static int suspend_pofwarn(void); static void restore_pofwarn(void); #define SUSPEND_POFWARN() suspend_pofwarn() #define RESUME_POFWARN() restore_pofwarn() #else #define SUSPEND_POFWARN() 0 #define RESUME_POFWARN() #endif /* NRF52_ERRATA_242_PRESENT */ static int write(off_t addr, const void *data, size_t len); static int erase(uint32_t addr, uint32_t size); static inline bool is_aligned_32(uint32_t data) { return (data & 0x3) ? false : true; } static inline bool is_within_bounds(off_t addr, size_t len, off_t boundary_start, size_t boundary_size) { return (addr >= boundary_start && (addr < (boundary_start + boundary_size)) && (len <= (boundary_start + boundary_size - addr))); } static inline bool is_regular_addr_valid(off_t addr, size_t len) { return is_within_bounds(addr, len, 0, nrfx_nvmc_flash_size_get()); } static inline bool is_uicr_addr_valid(off_t addr, size_t len) { #ifdef CONFIG_SOC_FLASH_NRF_UICR return is_within_bounds(addr, len, (off_t)NRF_UICR, sizeof(*NRF_UICR)); #else return false; #endif /* CONFIG_SOC_FLASH_NRF_UICR */ } #if CONFIG_SOC_FLASH_NRF_UICR && IS_ENABLED(NRF91_ERRATA_7_ENABLE_WORKAROUND) static inline void nrf91_errata_7_enter(void) { __disable_irq(); } static inline void nrf91_errata_7_exit(void) { __DSB(); __enable_irq(); } static void nrf_buffer_read_91_uicr(void *data, off_t addr, size_t len) { nrf91_errata_7_enter(); nrf_nvmc_buffer_read(data, (uint32_t)addr, len); nrf91_errata_7_exit(); } #endif static void nvmc_wait_ready(void) { while (!nrfx_nvmc_write_done_check()) { } } static int flash_nrf_read(const struct device *dev, off_t addr, void *data, size_t len) { const bool within_uicr = is_uicr_addr_valid(addr, len); if (is_regular_addr_valid(addr, len)) { addr += DT_REG_ADDR(SOC_NV_FLASH_NODE); } else if (!within_uicr) { LOG_ERR("invalid address: 0x%08lx:%zu", (unsigned long)addr, len); return -EINVAL; } if (!len) { return 0; } #if CONFIG_SOC_FLASH_NRF_UICR && IS_ENABLED(NRF91_ERRATA_7_ENABLE_WORKAROUND) if (within_uicr) { nrf_buffer_read_91_uicr(data, (uint32_t)addr, len); return 0; } #endif nrf_nvmc_buffer_read(data, (uint32_t)addr, len); return 0; } static int flash_nrf_write(const struct device *dev, off_t addr, const void *data, size_t len) { int ret; if (is_regular_addr_valid(addr, len)) { addr += DT_REG_ADDR(SOC_NV_FLASH_NODE); } else if (!is_uicr_addr_valid(addr, len)) { LOG_ERR("invalid address: 0x%08lx:%zu", (unsigned long)addr, len); return -EINVAL; } #if !defined(CONFIG_SOC_FLASH_NRF_EMULATE_ONE_BYTE_WRITE_ACCESS) if (!is_aligned_32(addr) || (len % sizeof(uint32_t))) { LOG_ERR("not word-aligned: 0x%08lx:%zu", (unsigned long)addr, len); return -EINVAL; } #endif if (!len) { return 0; } SYNC_LOCK(); #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE if (nrf_flash_sync_is_required()) { ret = write_synchronously(addr, data, len); } else #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ { ret = write(addr, data, len); } SYNC_UNLOCK(); return ret; } static int flash_nrf_erase(const struct device *dev, off_t addr, size_t size) { uint32_t pg_size = nrfx_nvmc_flash_page_size_get(); uint32_t n_pages = size / pg_size; int ret; if (is_regular_addr_valid(addr, size)) { /* Erase can only be done per page */ if (((addr % pg_size) != 0) || ((size % pg_size) != 0)) { LOG_ERR("unaligned address: 0x%08lx:%zu", (unsigned long)addr, size); return -EINVAL; } if (!n_pages) { return 0; } addr += DT_REG_ADDR(SOC_NV_FLASH_NODE); #ifdef CONFIG_SOC_FLASH_NRF_UICR } else if (addr != (off_t)NRF_UICR || size != sizeof(*NRF_UICR)) { LOG_ERR("invalid address: 0x%08lx:%zu", (unsigned long)addr, size); return -EINVAL; } #else } else { LOG_ERR("invalid address: 0x%08lx:%zu", (unsigned long)addr, size); return -EINVAL; } #endif /* CONFIG_SOC_FLASH_NRF_UICR */ SYNC_LOCK(); #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE if (nrf_flash_sync_is_required()) { ret = erase_synchronously(addr, size); } else #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ { ret = erase(addr, size); } SYNC_UNLOCK(); return ret; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static struct flash_pages_layout dev_layout; static void flash_nrf_pages_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { *layout = &dev_layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static const struct flash_parameters * flash_nrf_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_nrf_parameters; } static const struct flash_driver_api flash_nrf_api = { .read = flash_nrf_read, .write = flash_nrf_write, .erase = flash_nrf_erase, .get_parameters = flash_nrf_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_nrf_pages_layout, #endif }; static int nrf_flash_init(const struct device *dev) { SYNC_INIT(); #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE nrf_flash_sync_init(); #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ #if defined(CONFIG_FLASH_PAGE_LAYOUT) dev_layout.pages_count = nrfx_nvmc_flash_page_count_get(); dev_layout.pages_size = nrfx_nvmc_flash_page_size_get(); #endif return 0; } DEVICE_DT_INST_DEFINE(0, nrf_flash_init, NULL, NULL, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_nrf_api); #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE static int erase_synchronously(uint32_t addr, uint32_t size) { struct flash_context context = { .flash_addr = addr, .len = size, .enable_time_limit = 1, /* enable time limit */ #if defined(CONFIG_SOC_FLASH_NRF_PARTIAL_ERASE) .flash_addr_next = addr #endif }; struct flash_op_desc flash_op_desc = { .handler = erase_op, .context = &context }; nrf_flash_sync_set_context(FLASH_SLOT_ERASE); return nrf_flash_sync_exe(&flash_op_desc); } static int write_synchronously(off_t addr, const void *data, size_t len) { struct flash_context context = { .data_addr = (uint32_t) data, .flash_addr = addr, .len = len, .enable_time_limit = 1 /* enable time limit */ }; struct flash_op_desc flash_op_desc = { .handler = write_op, .context = &context }; nrf_flash_sync_set_context(FLASH_SLOT_WRITE); return nrf_flash_sync_exe(&flash_op_desc); } #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ static int erase_op(void *context) { uint32_t pg_size = nrfx_nvmc_flash_page_size_get(); struct flash_context *e_ctx = context; #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE uint32_t i = 0U; if (e_ctx->enable_time_limit) { nrf_flash_sync_get_timestamp_begin(); } #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ #ifdef CONFIG_SOC_FLASH_NRF_UICR if (e_ctx->flash_addr == (off_t)NRF_UICR) { if (SUSPEND_POFWARN()) { return -ECANCELED; } (void)nrfx_nvmc_uicr_erase(); RESUME_POFWARN(); return FLASH_OP_DONE; } #endif do { if (SUSPEND_POFWARN()) { return -ECANCELED; } #if defined(CONFIG_SOC_FLASH_NRF_PARTIAL_ERASE) if (e_ctx->flash_addr == e_ctx->flash_addr_next) { nrfx_nvmc_page_partial_erase_init(e_ctx->flash_addr, CONFIG_SOC_FLASH_NRF_PARTIAL_ERASE_MS); e_ctx->flash_addr_next += pg_size; } if (nrfx_nvmc_page_partial_erase_continue()) { e_ctx->len -= pg_size; e_ctx->flash_addr += pg_size; } #else (void)nrfx_nvmc_page_erase(e_ctx->flash_addr); e_ctx->len -= pg_size; e_ctx->flash_addr += pg_size; #endif /* CONFIG_SOC_FLASH_NRF_PARTIAL_ERASE */ RESUME_POFWARN(); #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE i++; if (e_ctx->enable_time_limit) { if (nrf_flash_sync_check_time_limit(i)) { break; } } #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ } while (e_ctx->len > 0); return (e_ctx->len > 0) ? FLASH_OP_ONGOING : FLASH_OP_DONE; } static void shift_write_context(uint32_t shift, struct flash_context *w_ctx) { w_ctx->flash_addr += shift; w_ctx->data_addr += shift; w_ctx->len -= shift; } static int write_op(void *context) { struct flash_context *w_ctx = context; #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE uint32_t i = 1U; if (w_ctx->enable_time_limit) { nrf_flash_sync_get_timestamp_begin(); } #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ #if defined(CONFIG_SOC_FLASH_NRF_EMULATE_ONE_BYTE_WRITE_ACCESS) /* If not aligned, write unaligned beginning */ if (!is_aligned_32(w_ctx->flash_addr)) { uint32_t count = sizeof(uint32_t) - (w_ctx->flash_addr & 0x3); if (count > w_ctx->len) { count = w_ctx->len; } if (SUSPEND_POFWARN()) { return -ECANCELED; } nrfx_nvmc_bytes_write(w_ctx->flash_addr, (const void *)w_ctx->data_addr, count); RESUME_POFWARN(); shift_write_context(count, w_ctx); #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE if (w_ctx->enable_time_limit) { if (nrf_flash_sync_check_time_limit(1)) { nvmc_wait_ready(); return FLASH_OP_ONGOING; } } #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ } #endif /* CONFIG_SOC_FLASH_NRF_EMULATE_ONE_BYTE_WRITE_ACCESS */ /* Write all the 4-byte aligned data */ while (w_ctx->len >= sizeof(uint32_t)) { if (SUSPEND_POFWARN()) { return -ECANCELED; } nrfx_nvmc_word_write(w_ctx->flash_addr, UNALIGNED_GET((uint32_t *)w_ctx->data_addr)); RESUME_POFWARN(); shift_write_context(sizeof(uint32_t), w_ctx); #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE i++; if (w_ctx->enable_time_limit) { if (nrf_flash_sync_check_time_limit(i)) { nvmc_wait_ready(); return FLASH_OP_ONGOING; } } #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ } #if defined(CONFIG_SOC_FLASH_NRF_EMULATE_ONE_BYTE_WRITE_ACCESS) /* Write remaining unaligned data */ if (w_ctx->len) { if (SUSPEND_POFWARN()) { return -ECANCELED; } nrfx_nvmc_bytes_write(w_ctx->flash_addr, (const void *)w_ctx->data_addr, w_ctx->len); RESUME_POFWARN(); shift_write_context(w_ctx->len, w_ctx); } #endif /* CONFIG_SOC_FLASH_NRF_EMULATE_ONE_BYTE_WRITE_ACCESS */ nvmc_wait_ready(); return FLASH_OP_DONE; } static int erase(uint32_t addr, uint32_t size) { struct flash_context context = { .flash_addr = addr, .len = size, #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE .enable_time_limit = 0, /* disable time limit */ #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ #if defined(CONFIG_SOC_FLASH_NRF_PARTIAL_ERASE) .flash_addr_next = addr #endif }; return erase_op(&context); } static int write(off_t addr, const void *data, size_t len) { struct flash_context context = { .data_addr = (uint32_t) data, .flash_addr = addr, .len = len, #ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE .enable_time_limit = 0 /* disable time limit */ #endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */ }; return write_op(&context); } #if NRF52_ERRATA_242_PRESENT /* Disable POFWARN by writing POFCON before a write or erase operation. * Do not attempt to write or erase if EVENTS_POFWARN is already asserted. */ static bool pofcon_enabled; static int suspend_pofwarn(void) { if (!nrf52_errata_242()) { return 0; } bool enabled; nrf_power_pof_thr_t pof_thr; pof_thr = nrf_power_pofcon_get(NRF_POWER, &enabled); if (enabled) { nrf_power_pofcon_set(NRF_POWER, false, pof_thr); /* This check need to be reworked once POFWARN event will be * served by zephyr. */ if (nrf_power_event_check(NRF_POWER, NRF_POWER_EVENT_POFWARN)) { nrf_power_pofcon_set(NRF_POWER, true, pof_thr); return -ECANCELED; } pofcon_enabled = enabled; } return 0; } static void restore_pofwarn(void) { nrf_power_pof_thr_t pof_thr; if (pofcon_enabled) { pof_thr = nrf_power_pofcon_get(NRF_POWER, NULL); nrf_power_pofcon_set(NRF_POWER, true, pof_thr); pofcon_enabled = false; } } #endif /* NRF52_ERRATA_242_PRESENT */ ```
/content/code_sandbox/drivers/flash/soc_flash_nrf.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,994
```unknown config SOC_FLASH_TELINK_B91 bool "Telink Semiconductor B91 flash driver" default y depends on DT_HAS_TELINK_B91_FLASH_CONTROLLER_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_EXPLICIT_ERASE help Enables Telink B91 flash driver. ```
/content/code_sandbox/drivers/flash/Kconfig.b91
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
69
```unknown # config FLASH_RPI_PICO bool "Raspberry Pi Pico flash driver" default y depends on DT_HAS_RASPBERRYPI_PICO_FLASH_CONTROLLER_ENABLED select FLASH_HAS_DRIVER_ENABLED select FLASH_HAS_PAGE_LAYOUT select FLASH_HAS_EXPLICIT_ERASE select PICOSDK_USE_FLASH help Enable Raspberry Pi Pico flash driver. ```
/content/code_sandbox/drivers/flash/Kconfig.rpi_pico
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
76
```c /* * */ #include <zephyr/drivers/kscan.h> #include <zephyr/internal/syscall_handler.h> static inline int z_vrfy_kscan_config(const struct device *dev, kscan_callback_t callback_isr) { K_OOPS(K_SYSCALL_DRIVER_KSCAN(dev, config)); K_OOPS(K_SYSCALL_VERIFY_MSG(callback_isr == 0, "callback cannot be set from user mode")); return z_impl_kscan_config((const struct device *)dev, callback_isr); } #include <zephyr/syscalls/kscan_config_mrsh.c> static inline int z_vrfy_kscan_disable_callback(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_KSCAN(dev, disable_callback)); return z_impl_kscan_disable_callback((const struct device *)dev); } #include <zephyr/syscalls/kscan_disable_callback_mrsh.c> static int z_vrfy_kscan_enable_callback(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_KSCAN(dev, enable_callback)); return z_impl_kscan_enable_callback((const struct device *)dev); } #include <zephyr/syscalls/kscan_enable_callback_mrsh.c> ```
/content/code_sandbox/drivers/kscan/kscan_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
241
```unknown config KSCAN_INPUT bool "Input subsystem to kscan adapter driver" default y depends on INPUT depends on DT_HAS_ZEPHYR_KSCAN_INPUT_ENABLED ```
/content/code_sandbox/drivers/kscan/Kconfig.input
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
39
```c /* * */ #include <zephyr/device.h> #include <zephyr/drivers/flash.h> #include <zephyr/irq.h> #include <zephyr/kernel.h> #include <string.h> #include <driverlib/flash.h> #include <driverlib/vims.h> #define DT_DRV_COMPAT ti_cc13xx_cc26xx_flash_controller #define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash) #define FLASH_ADDR DT_REG_ADDR(SOC_NV_FLASH_NODE) #define FLASH_SIZE DT_REG_SIZE(SOC_NV_FLASH_NODE) #define FLASH_ERASE_SIZE DT_PROP(SOC_NV_FLASH_NODE, erase_block_size) #define FLASH_WRITE_SIZE DT_PROP(SOC_NV_FLASH_NODE, write_block_size) struct flash_priv { struct k_sem mutex; }; static const struct flash_parameters flash_cc13xx_cc26xx_parameters = { .write_block_size = FLASH_WRITE_SIZE, .erase_value = 0xff, }; static int flash_cc13xx_cc26xx_init(const struct device *dev) { struct flash_priv *priv = dev->data; k_sem_init(&priv->mutex, 1, 1); return 0; } static void flash_cc13xx_cc26xx_cache_restore(uint32_t vims_mode) { while (VIMSModeGet(VIMS_BASE) == VIMS_MODE_CHANGING) { ; } /* Restore VIMS mode and line buffers */ if (vims_mode != VIMS_MODE_DISABLED) { VIMSModeSafeSet(VIMS_BASE, vims_mode, true); } VIMSLineBufEnable(VIMS_BASE); } static uint32_t flash_cc13xx_cc26xx_cache_disable(void) { uint32_t vims_mode; /* VIMS and both line buffers should be off during flash update */ VIMSLineBufDisable(VIMS_BASE); while (VIMSModeGet(VIMS_BASE) == VIMS_MODE_CHANGING) { ; } /* Save current VIMS mode for restoring it later */ vims_mode = VIMSModeGet(VIMS_BASE); if (vims_mode != VIMS_MODE_DISABLED) { VIMSModeSafeSet(VIMS_BASE, VIMS_MODE_DISABLED, true); } return vims_mode; } static bool flash_cc13xx_cc26xx_range_protected(off_t offs, size_t size) { off_t sector, end; sector = (offs / FLASH_ERASE_SIZE) * FLASH_ERASE_SIZE; end = offs + size; /* * From TI's HAL 'driverlib/flash.h': * * After write protecting a sector this sector can only be set back * to unprotected by a device reset. * * Return early if any of sectors from requested range is protected. */ do { if (FlashProtectionGet(sector) == FLASH_WRITE_PROTECT) { return true; } sector += FLASH_ERASE_SIZE; } while (sector < end); return false; } static int flash_cc13xx_cc26xx_erase(const struct device *dev, off_t offs, size_t size) { struct flash_priv *priv = dev->data; uint32_t vims_mode; unsigned int key; int i, rc = 0; size_t cnt; if (!size) { return 0; } /* Offset and length should be multiple of erase size */ if (((offs % FLASH_ERASE_SIZE) != 0) || ((size % FLASH_ERASE_SIZE) != 0)) { return -EINVAL; } if (flash_cc13xx_cc26xx_range_protected(offs, size)) { return -EINVAL; } if (k_sem_take(&priv->mutex, K_FOREVER)) { return -EACCES; } vims_mode = flash_cc13xx_cc26xx_cache_disable(); /* * Disable all interrupts to prevent flash read, from TI's TRF: * * During a FLASH memory write or erase operation, the FLASH memory * must not be read. */ key = irq_lock(); /* Erase sector/page one by one, break out in case of an error */ cnt = size / FLASH_ERASE_SIZE; for (i = 0; i < cnt; i++, offs += FLASH_ERASE_SIZE) { while (FlashCheckFsmForReady() != FAPI_STATUS_FSM_READY) { ; } rc = FlashSectorErase(offs); if (rc != FAPI_STATUS_SUCCESS) { rc = -EIO; break; } } irq_unlock(key); flash_cc13xx_cc26xx_cache_restore(vims_mode); k_sem_give(&priv->mutex); return rc; } static int flash_cc13xx_cc26xx_write(const struct device *dev, off_t offs, const void *data, size_t size) { struct flash_priv *priv = dev->data; uint32_t vims_mode; unsigned int key; int rc = 0; if (!size) { return 0; } if ((offs < 0) || (size < 1)) { return -EINVAL; } if ((offs + size) > FLASH_SIZE) { return -EINVAL; } /* * From TI's HAL 'driverlib/flash.h': * * The pui8DataBuffer pointer can not point to flash. */ if ((data >= (void *)FLASH_ADDR) && (data <= (void *)(FLASH_ADDR + FLASH_SIZE))) { return -EINVAL; } if (flash_cc13xx_cc26xx_range_protected(offs, size)) { return -EINVAL; } if (k_sem_take(&priv->mutex, K_FOREVER)) { return -EACCES; } vims_mode = flash_cc13xx_cc26xx_cache_disable(); key = irq_lock(); while (FlashCheckFsmForReady() != FAPI_STATUS_FSM_READY) { ; } rc = FlashProgram((uint8_t *)data, offs, size); if (rc != FAPI_STATUS_SUCCESS) { rc = -EIO; } irq_unlock(key); flash_cc13xx_cc26xx_cache_restore(vims_mode); k_sem_give(&priv->mutex); return rc; } static int flash_cc13xx_cc26xx_read(const struct device *dev, off_t offs, void *data, size_t size) { ARG_UNUSED(dev); if (!size) { return 0; } if ((offs < 0) || (size < 1)) { return -EINVAL; } if ((offs + size) > FLASH_SIZE) { return -EINVAL; } memcpy(data, (void *)offs, size); return 0; } static const struct flash_parameters * flash_cc13xx_cc26xx_get_parameters(const struct device *dev) { ARG_UNUSED(dev); return &flash_cc13xx_cc26xx_parameters; } #if defined(CONFIG_FLASH_PAGE_LAYOUT) static const struct flash_pages_layout dev_layout = { .pages_count = FLASH_SIZE / FLASH_ERASE_SIZE, .pages_size = FLASH_ERASE_SIZE, }; static void flash_cc13xx_cc26xx_layout(const struct device *dev, const struct flash_pages_layout **layout, size_t *layout_size) { *layout = &dev_layout; *layout_size = 1; } #endif /* CONFIG_FLASH_PAGE_LAYOUT */ static const struct flash_driver_api flash_cc13xx_cc26xx_api = { .erase = flash_cc13xx_cc26xx_erase, .write = flash_cc13xx_cc26xx_write, .read = flash_cc13xx_cc26xx_read, .get_parameters = flash_cc13xx_cc26xx_get_parameters, #if defined(CONFIG_FLASH_PAGE_LAYOUT) .page_layout = flash_cc13xx_cc26xx_layout, #endif }; static struct flash_priv flash_data; DEVICE_DT_INST_DEFINE(0, flash_cc13xx_cc26xx_init, NULL, &flash_data, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_cc13xx_cc26xx_api); ```
/content/code_sandbox/drivers/flash/soc_flash_cc13xx_cc26xx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,700
```c /* * */ #define DT_DRV_COMPAT zephyr_kscan_input #include <zephyr/drivers/kscan.h> #include <zephyr/input/input.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(kscan_input, CONFIG_KSCAN_LOG_LEVEL); struct kscan_input_config { const struct device *input_dev; }; struct kscan_input_data { bool enabled; kscan_callback_t callback; int row; int col; bool pressed; }; static void kscan_input_cb(struct input_event *evt, void *user_data) { const struct device *dev = user_data; struct kscan_input_data *data = dev->data; switch (evt->code) { case INPUT_ABS_X: data->col = evt->value; break; case INPUT_ABS_Y: data->row = evt->value; break; case INPUT_BTN_TOUCH: data->pressed = evt->value; break; } if (evt->sync) { LOG_DBG("input event: %3d %3d %d", data->row, data->col, data->pressed); if (data->callback) { data->callback(dev, data->row, data->col, data->pressed); } } } static int kscan_input_configure(const struct device *dev, kscan_callback_t callback) { struct kscan_input_data *data = dev->data; if (!callback) { LOG_ERR("Invalid callback (NULL)"); return -EINVAL; } data->callback = callback; return 0; } static int kscan_input_enable_callback(const struct device *dev) { struct kscan_input_data *data = dev->data; data->enabled = true; return 0; } static int kscan_input_disable_callback(const struct device *dev) { struct kscan_input_data *data = dev->data; data->enabled = false; return 0; } static int kscan_input_init(const struct device *dev) { const struct kscan_input_config *cfg = dev->config; if (!device_is_ready(cfg->input_dev)) { LOG_ERR("Input device not ready"); return -ENODEV; } return 0; } static const struct kscan_driver_api kscan_input_driver_api = { .config = kscan_input_configure, .enable_callback = kscan_input_enable_callback, .disable_callback = kscan_input_disable_callback, }; #define KSCAN_INPUT_INIT(index) \ INPUT_CALLBACK_DEFINE(DEVICE_DT_GET(DT_INST_PARENT(index)), \ kscan_input_cb, \ (void *)DEVICE_DT_INST_GET(index)); \ static const struct kscan_input_config kscan_input_config_##index = { \ .input_dev = DEVICE_DT_GET(DT_INST_PARENT(index)), \ }; \ static struct kscan_input_data kscan_input_data_##index; \ DEVICE_DT_INST_DEFINE(index, kscan_input_init, NULL, \ &kscan_input_data_##index, \ &kscan_input_config_##index, \ POST_KERNEL, CONFIG_KSCAN_INIT_PRIORITY, \ &kscan_input_driver_api); DT_INST_FOREACH_STATUS_OKAY(KSCAN_INPUT_INIT) ```
/content/code_sandbox/drivers/kscan/kscan_input.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
667
```unknown # Keyboard scan configuration options menuconfig KSCAN bool "Keyboard scan drivers" help Include Keyboard scan drivers in system config. if KSCAN source "drivers/kscan/Kconfig.input" module = KSCAN module-str = kscan source "subsys/logging/Kconfig.template.log_config" config KSCAN_INIT_PRIORITY int "Keyboard scan driver init priority" default 90 help Keyboard scan device driver initialization priority. endif # KSCAN ```
/content/code_sandbox/drivers/kscan/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
97
```objective-c /* * */ #include <zephyr/kernel.h> #include <zephyr/drivers/gpio.h> #if defined(CONFIG_SOC_SERIES_NRF52X) #define CPU_CLOCK 64000000U #else #define CPU_CLOCK CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC #endif #if defined(CONFIG_SOC_SERIES_NRF52X) || defined(CONFIG_SOC_SERIES_NRF53X) #define FAST_BITBANG_HW_SUPPORT 1 #else #define FAST_BITBANG_HW_SUPPORT 0 #endif static ALWAYS_INLINE void pin_delay_asm(uint32_t delay) { #if defined(CONFIG_CPU_CORTEX_M) __asm volatile ("movs r3, %[p]\n" ".start_%=:\n" "subs r3, #1\n" "bne .start_%=\n" : : [p] "r" (delay) : "r3", "cc" ); #else #warning "Pin delay is not defined" #endif } static ALWAYS_INLINE void swdp_ll_pin_input(void *const base, uint8_t pin) { #if defined(CONFIG_SOC_SERIES_NRF52X) || defined(CONFIG_SOC_SERIES_NRF53X) NRF_GPIO_Type * reg = base; reg->PIN_CNF[pin] = 0b0000; #endif } static ALWAYS_INLINE void swdp_ll_pin_output(void *const base, uint8_t pin) { #if defined(CONFIG_SOC_SERIES_NRF52X) || defined(CONFIG_SOC_SERIES_NRF53X) NRF_GPIO_Type * reg = base; reg->PIN_CNF[pin] = 0b0001; #endif } static ALWAYS_INLINE void swdp_ll_pin_set(void *const base, uint8_t pin) { #if defined(CONFIG_SOC_SERIES_NRF52X) || defined(CONFIG_SOC_SERIES_NRF53X) NRF_GPIO_Type * reg = base; reg->OUTSET = BIT(pin); #endif } static ALWAYS_INLINE void swdp_ll_pin_clr(void *const base, uint8_t pin) { #if defined(CONFIG_SOC_SERIES_NRF52X) || defined(CONFIG_SOC_SERIES_NRF53X) NRF_GPIO_Type * reg = base; reg->OUTCLR = BIT(pin); #endif } static ALWAYS_INLINE uint32_t swdp_ll_pin_get(void *const base, uint8_t pin) { #if defined(CONFIG_SOC_SERIES_NRF52X) || defined(CONFIG_SOC_SERIES_NRF53X) NRF_GPIO_Type * reg = base; return ((reg->IN >> pin) & 1); #else return 0UL; #endif } ```
/content/code_sandbox/drivers/dp/swdp_ll_pin.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
553
```unknown # menuconfig DP_DRIVER bool "Debug Port interface driver [EXPERIMENTAL]" select EXPERIMENTAL help Enable Debug Port interface driver if DP_DRIVER module = DP_DRIVER module-str = dp drv source "subsys/logging/Kconfig.template.log_config" config DP_DRIVER_INIT_PRIO int "Debug Port driver initialization priority" default 80 help Set the initialization priority number. config SWDP_BITBANG_DRIVER bool "Serial Wire Debug Port bit-bang driver" default y depends on DT_HAS_ZEPHYR_SWDP_GPIO_ENABLED depends on GPIO help Serial Wire Debug Port bit-bang driver. endif # DP_DRIVER ```
/content/code_sandbox/drivers/dp/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
145
```c /* * */ /* * This file is based on SW_DP.c from CMSIS-DAP Source (Revision: V2.0.0) * path_to_url */ /* Serial Wire Debug Port interface bit-bang driver */ #define DT_DRV_COMPAT zephyr_swdp_gpio #include <zephyr/kernel.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/swdp.h> #include "swdp_ll_pin.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(swdp, CONFIG_DP_DRIVER_LOG_LEVEL); #define CLOCK_DELAY(swclk_freq, port_write_cycles) \ ((CPU_CLOCK / 2 / swclk_freq) - port_write_cycles) /* * Default SWCLK frequency in Hz. * sw_clock can be used to overwrite this default value. */ #define SWDP_DEFAULT_SWCLK_FREQUENCY 1000000U #define DELAY_SLOW_CYCLES 3U struct sw_config { struct gpio_dt_spec clk; struct gpio_dt_spec dout; struct gpio_dt_spec dio; struct gpio_dt_spec dnoe; void *dout_reg; void *dio_reg; void *dnoe_reg; struct gpio_dt_spec noe; struct gpio_dt_spec reset; uint32_t port_write_cycles; void *clk_reg; }; struct sw_cfg_data { uint32_t clock_delay; uint8_t turnaround; bool data_phase; bool fast_clock; }; /* * Move A[2:3], RnW, APnDP bits to their position, * add start bit, stop bit(6), park bit and parity bit. * For example, reading IDCODE would be APnDP=0, RnW=1, A2=0, A3=0. * The request would be 0xa5, which is 10100101 in binary. * * For more information, see: * - CMSIS-DAP Command Specification, DAP_Transfer * - ARM Debug Interface v5 Architecture Specification */ const static uint8_t sw_request_lut[16] = { 0x81, 0xa3, 0xa5, 0x87, 0xa9, 0x8b, 0x8d, 0xaf, 0xb1, 0x93, 0x95, 0xb7, 0x99, 0xbb, 0xbd, 0x9f }; static ALWAYS_INLINE uint32_t sw_get32bit_parity(uint32_t data) { data ^= data >> 16; data ^= data >> 8; data ^= data >> 4; data ^= data >> 2; data ^= data >> 1; return data & 1U; } /* Set SWCLK DAP hardware output pin to high level */ static ALWAYS_INLINE void pin_swclk_set(const struct device *dev) { const struct sw_config *config = dev->config; if (FAST_BITBANG_HW_SUPPORT) { swdp_ll_pin_set(config->clk_reg, config->clk.pin); } else { gpio_pin_set_dt(&config->clk, 1); } } /* Set SWCLK DAP hardware output pin to low level */ static ALWAYS_INLINE void pin_swclk_clr(const struct device *dev) { const struct sw_config *config = dev->config; if (FAST_BITBANG_HW_SUPPORT) { swdp_ll_pin_clr(config->clk_reg, config->clk.pin); } else { gpio_pin_set_dt(&config->clk, 0); } } /* Set the SWDIO DAP hardware output pin to high level */ static ALWAYS_INLINE void pin_swdio_set(const struct device *dev) { const struct sw_config *config = dev->config; if (config->dout.port) { if (FAST_BITBANG_HW_SUPPORT) { swdp_ll_pin_set(config->dout_reg, config->dout.pin); } else { gpio_pin_set_dt(&config->dout, 1); } } else { if (FAST_BITBANG_HW_SUPPORT) { swdp_ll_pin_set(config->dio_reg, config->dio.pin); } else { gpio_pin_set_dt(&config->dio, 1); } } } /* Set the SWDIO DAP hardware output pin to low level */ static ALWAYS_INLINE void pin_swdio_clr(const struct device *dev) { const struct sw_config *config = dev->config; if (config->dout.port) { if (FAST_BITBANG_HW_SUPPORT) { swdp_ll_pin_clr(config->dout_reg, config->dout.pin); } else { gpio_pin_set_dt(&config->dout, 0); } } else { if (FAST_BITBANG_HW_SUPPORT) { swdp_ll_pin_clr(config->dio_reg, config->dio.pin); } else { gpio_pin_set_dt(&config->dio, 0); } } } /* Set the SWDIO DAP hardware output pin to bit level */ static ALWAYS_INLINE void pin_swdio_out(const struct device *dev, const uint32_t bit) { if (bit & 1U) { pin_swdio_set(dev); } else { pin_swdio_clr(dev); } } /* Return current level of the SWDIO DAP hardware input pin */ static ALWAYS_INLINE uint32_t pin_swdio_in(const struct device *dev) { const struct sw_config *config = dev->config; if (FAST_BITBANG_HW_SUPPORT) { return swdp_ll_pin_get(config->dio_reg, config->dio.pin); } else { return gpio_pin_get_dt(&config->dio); } } /* * Configure the SWDIO DAP hardware to output mode. * This is default configuration for every transfer. */ static ALWAYS_INLINE void pin_swdio_out_enable(const struct device *dev) { const struct sw_config *config = dev->config; if (config->dnoe.port) { if (FAST_BITBANG_HW_SUPPORT) { swdp_ll_pin_set(config->dnoe_reg, config->dnoe.pin); } else { gpio_pin_set_dt(&config->dnoe, 1); } } else { if (FAST_BITBANG_HW_SUPPORT) { swdp_ll_pin_output(config->dio_reg, config->dio.pin); } else { gpio_pin_configure_dt(&config->dio, GPIO_OUTPUT_ACTIVE); } } } /* * Configure the SWDIO DAP hardware to input mode. */ static ALWAYS_INLINE void pin_swdio_out_disable(const struct device *dev) { const struct sw_config *config = dev->config; if (config->dnoe.port) { if (FAST_BITBANG_HW_SUPPORT) { swdp_ll_pin_clr(config->dnoe_reg, config->dnoe.pin); } else { gpio_pin_set_dt(&config->dnoe, 0); } } else { if (FAST_BITBANG_HW_SUPPORT) { swdp_ll_pin_input(config->dio_reg, config->dio.pin); } else { gpio_pin_configure_dt(&config->dio, GPIO_INPUT); } } } #define SW_CLOCK_CYCLE(dev, delay) \ do { \ pin_swclk_clr(dev); \ pin_delay_asm(delay); \ pin_swclk_set(dev); \ pin_delay_asm(delay); \ } while (0) #define SW_WRITE_BIT(dev, bit, delay) \ do { \ pin_swdio_out(dev, bit); \ pin_swclk_clr(dev); \ pin_delay_asm(delay); \ pin_swclk_set(dev); \ pin_delay_asm(delay); \ } while (0) #define SW_READ_BIT(dev, bit, delay) \ do { \ pin_swclk_clr(dev); \ pin_delay_asm(delay); \ bit = pin_swdio_in(dev); \ pin_swclk_set(dev); \ pin_delay_asm(delay); \ } while (0) static int sw_output_sequence(const struct device *dev, uint32_t count, const uint8_t *data) { struct sw_cfg_data *sw_data = dev->data; unsigned int key; uint32_t val = 0; /* current byte */ uint32_t n = 0; /* bit counter */ LOG_DBG("writing %u bits", count); LOG_HEXDUMP_DBG(data, count, "sequence bit data"); key = irq_lock(); pin_swdio_out_enable(dev); while (count--) { if (n == 0U) { val = *data++; n = 8U; } if (val & 1U) { pin_swdio_set(dev); } else { pin_swdio_clr(dev); } SW_CLOCK_CYCLE(dev, sw_data->clock_delay); val >>= 1; n--; } irq_unlock(key); return 0; } static int sw_input_sequence(const struct device *dev, uint32_t count, uint8_t *data) { struct sw_cfg_data *sw_data = dev->data; unsigned int key; uint32_t val = 0U; /* current byte */ uint32_t n = 8U; /* bit counter */ uint32_t bit; LOG_DBG("reading %u bits", count); key = irq_lock(); pin_swdio_out_disable(dev); while (count--) { if (n == 0U) { *data++ = val; val = 0; n = 8U; } SW_READ_BIT(dev, bit, sw_data->clock_delay); LOG_DBG("Read bit: %d", bit); val = (val << 1 | bit); n--; } *data = val; /* write last byte */ irq_unlock(key); return 0; } static ALWAYS_INLINE void sw_cycle_turnaround(const struct device *dev) { struct sw_cfg_data *sw_data = dev->data; uint32_t n; for (n = sw_data->turnaround; n; n--) { SW_CLOCK_CYCLE(dev, sw_data->clock_delay); } } static int sw_transfer(const struct device *dev, const uint8_t request, uint32_t *const data, const uint8_t idle_cycles, uint8_t *const response) { struct sw_cfg_data *sw_data = dev->data; unsigned int key; uint32_t ack; uint32_t bit; uint32_t val; uint32_t parity = 0; uint32_t n; pin_swdio_out_enable(dev); LOG_DBG("request 0x%02x idle %u", request, idle_cycles); if (!(request & SWDP_REQUEST_RnW)) { LOG_DBG("write data 0x%08x", *data); parity = sw_get32bit_parity(*data); } key = irq_lock(); val = sw_request_lut[request & 0xFU]; for (n = 8U; n; n--) { SW_WRITE_BIT(dev, val, sw_data->clock_delay); val >>= 1; } pin_swdio_out_disable(dev); sw_cycle_turnaround(dev); /* Acknowledge response */ SW_READ_BIT(dev, bit, sw_data->clock_delay); ack = bit << 0; SW_READ_BIT(dev, bit, sw_data->clock_delay); ack |= bit << 1; SW_READ_BIT(dev, bit, sw_data->clock_delay); ack |= bit << 2; if (ack == SWDP_ACK_OK) { /* Data transfer */ if (request & SWDP_REQUEST_RnW) { /* Read data */ val = 0U; for (n = 32U; n; n--) { /* Read RDATA[0:31] */ SW_READ_BIT(dev, bit, sw_data->clock_delay); val >>= 1; val |= bit << 31; } /* Read parity bit */ SW_READ_BIT(dev, bit, sw_data->clock_delay); sw_cycle_turnaround(dev); pin_swdio_out_enable(dev); if ((sw_get32bit_parity(val) ^ bit) & 1U) { ack = SWDP_TRANSFER_ERROR; } if (data) { *data = val; } } else { sw_cycle_turnaround(dev); pin_swdio_out_enable(dev); /* Write data */ val = *data; for (n = 32U; n; n--) { SW_WRITE_BIT(dev, val, sw_data->clock_delay); val >>= 1; } /* Write parity bit */ SW_WRITE_BIT(dev, parity, sw_data->clock_delay); } /* Idle cycles */ n = idle_cycles; if (n) { pin_swdio_out(dev, 0U); for (; n; n--) { SW_CLOCK_CYCLE(dev, sw_data->clock_delay); } } pin_swdio_out(dev, 1U); irq_unlock(key); if (request & SWDP_REQUEST_RnW) { LOG_DBG("read data 0x%08x", *data); } if (response) { *response = (uint8_t)ack; } return 0; } if ((ack == SWDP_ACK_WAIT) || (ack == SWDP_ACK_FAULT)) { /* WAIT OR fault response */ if (sw_data->data_phase) { for (n = 32U + 1U + sw_data->turnaround; n; n--) { /* Dummy Read RDATA[0:31] + Parity */ SW_CLOCK_CYCLE(dev, sw_data->clock_delay); } } else { sw_cycle_turnaround(dev); } pin_swdio_out_enable(dev); pin_swdio_out(dev, 1U); irq_unlock(key); LOG_DBG("Transfer wait or fault"); if (response) { *response = (uint8_t)ack; } return 0; } /* Protocol error */ for (n = sw_data->turnaround + 32U + 1U; n; n--) { /* Back off data phase */ SW_CLOCK_CYCLE(dev, sw_data->clock_delay); } pin_swdio_out_enable(dev); pin_swdio_out(dev, 1U); irq_unlock(key); LOG_INF("Protocol error"); if (response) { *response = (uint8_t)ack; } return 0; } static int sw_set_pins(const struct device *dev, const uint8_t pins, const uint8_t value) { const struct sw_config *config = dev->config; LOG_DBG("pins 0x%02x value 0x%02x", pins, value); if (pins & BIT(SWDP_SWCLK_PIN)) { if (value & BIT(SWDP_SWCLK_PIN)) { gpio_pin_set_dt(&config->clk, 1); } else { gpio_pin_set_dt(&config->clk, 0); } } if (config->dout_reg != NULL) { if (pins & BIT(SWDP_SWDIO_PIN)) { if (value & BIT(SWDP_SWDIO_PIN)) { gpio_pin_set_dt(&config->dout, 1); } else { gpio_pin_set_dt(&config->dout, 0); } } } else { if (pins & BIT(SWDP_SWDIO_PIN)) { if (value & BIT(SWDP_SWDIO_PIN)) { gpio_pin_set_dt(&config->dio, 1); } else { gpio_pin_set_dt(&config->dio, 0); } } } if (config->reset.port) { if (pins & BIT(SWDP_nRESET_PIN)) { if (value & BIT(SWDP_nRESET_PIN)) { gpio_pin_set_dt(&config->reset, 1); } else { gpio_pin_set_dt(&config->reset, 0); } } } return 0; } static int sw_get_pins(const struct device *dev, uint8_t *const state) { const struct sw_config *config = dev->config; uint32_t val; if (config->reset.port) { val = gpio_pin_get_dt(&config->reset); *state = val ? BIT(SWDP_nRESET_PIN) : 0; } val = gpio_pin_get_dt(&config->dio); *state |= val ? BIT(SWDP_SWDIO_PIN) : 0; val = gpio_pin_get_dt(&config->clk); *state |= val ? BIT(SWDP_SWCLK_PIN) : 0; LOG_DBG("pins state 0x%02x", *state); return 0; } static int sw_set_clock(const struct device *dev, const uint32_t clock) { const struct sw_config *config = dev->config; struct sw_cfg_data *sw_data = dev->data; uint32_t delay; sw_data->fast_clock = false; delay = ((CPU_CLOCK / 2U) + (clock - 1U)) / clock; if (delay > config->port_write_cycles) { delay -= config->port_write_cycles; delay = (delay + (DELAY_SLOW_CYCLES - 1U)) / DELAY_SLOW_CYCLES; } else { delay = 1U; } sw_data->clock_delay = delay; LOG_WRN("cpu_clock %d, delay %d", CPU_CLOCK, sw_data->clock_delay); return 0; } static int sw_configure(const struct device *dev, const uint8_t turnaround, const bool data_phase) { struct sw_cfg_data *sw_data = dev->data; sw_data->turnaround = turnaround; sw_data->data_phase = data_phase; LOG_INF("turnaround %d, data_phase %d", sw_data->turnaround, sw_data->data_phase); return 0; } static int sw_port_on(const struct device *dev) { const struct sw_config *config = dev->config; gpio_pin_set_dt(&config->clk, 1); if (config->dnoe.port) { gpio_pin_set_dt(&config->dnoe, 1); } if (config->dout.port) { gpio_pin_set_dt(&config->dout, 1); } else { int ret; ret = gpio_pin_configure_dt(&config->dio, GPIO_OUTPUT_ACTIVE); if (ret) { return ret; } } if (config->noe.port) { gpio_pin_set_dt(&config->noe, 1); } if (config->reset.port) { gpio_pin_set_dt(&config->reset, 1); } return 0; } static int sw_port_off(const struct device *dev) { const struct sw_config *config = dev->config; if (config->dnoe.port) { gpio_pin_set_dt(&config->dnoe, 0); } if (config->dout.port) { gpio_pin_set_dt(&config->dout, 0); } else { int ret; ret = gpio_pin_configure_dt(&config->dio, GPIO_INPUT); if (ret) { return ret; } } if (config->noe.port) { gpio_pin_set_dt(&config->noe, 0); } if (config->reset.port) { gpio_pin_set_dt(&config->reset, 1); } return 0; } static int sw_gpio_init(const struct device *dev) { const struct sw_config *config = dev->config; struct sw_cfg_data *sw_data = dev->data; int ret; ret = gpio_pin_configure_dt(&config->clk, GPIO_OUTPUT_ACTIVE); if (ret) { return ret; } ret = gpio_pin_configure_dt(&config->dio, GPIO_INPUT); if (ret) { return ret; } if (config->dout.port) { ret = gpio_pin_configure_dt(&config->dout, GPIO_OUTPUT_ACTIVE); if (ret) { return ret; } } if (config->dnoe.port) { ret = gpio_pin_configure_dt(&config->dnoe, GPIO_OUTPUT_INACTIVE); if (ret) { return ret; } } if (config->noe.port) { ret = gpio_pin_configure_dt(&config->noe, GPIO_OUTPUT_INACTIVE); if (ret) { return ret; } } if (config->reset.port) { ret = gpio_pin_configure_dt(&config->reset, GPIO_OUTPUT_ACTIVE); if (ret) { return ret; } } sw_data->turnaround = 1U; sw_data->data_phase = false; sw_data->fast_clock = false; sw_data->clock_delay = CLOCK_DELAY(SWDP_DEFAULT_SWCLK_FREQUENCY, config->port_write_cycles); return 0; } static struct swdp_api swdp_bitbang_api = { .swdp_output_sequence = sw_output_sequence, .swdp_input_sequence = sw_input_sequence, .swdp_transfer = sw_transfer, .swdp_set_pins = sw_set_pins, .swdp_get_pins = sw_get_pins, .swdp_set_clock = sw_set_clock, .swdp_configure = sw_configure, .swdp_port_on = sw_port_on, .swdp_port_off = sw_port_off, }; #define SW_GPIOS_GET_REG(n, gpios) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(n, gpios), \ (INT_TO_POINTER(DT_REG_ADDR(DT_PHANDLE(DT_DRV_INST(n), gpios)))), \ (NULL)) #define SW_DEVICE_DEFINE(n) \ BUILD_ASSERT((DT_INST_NODE_HAS_PROP(n, dout_gpios)) == \ (DT_INST_NODE_HAS_PROP(n, dnoe_gpios)), \ "Either the dout-gpios or dnoe-gpios property is missing."); \ \ static const struct sw_config sw_cfg_##n = { \ .clk = GPIO_DT_SPEC_INST_GET(n, clk_gpios), \ .clk_reg = SW_GPIOS_GET_REG(n, clk_gpios), \ .dio = GPIO_DT_SPEC_INST_GET(n, dio_gpios), \ .dio_reg = SW_GPIOS_GET_REG(n, dio_gpios), \ .dout = GPIO_DT_SPEC_INST_GET_OR(n, dout_gpios, {0}), \ .dout_reg = SW_GPIOS_GET_REG(n, dout_gpios), \ .dnoe = GPIO_DT_SPEC_INST_GET_OR(n, dnoe_gpios, {0}), \ .dnoe_reg = SW_GPIOS_GET_REG(n, dnoe_gpios), \ .noe = GPIO_DT_SPEC_INST_GET_OR(n, noe_gpios, {0}), \ .reset = GPIO_DT_SPEC_INST_GET_OR(n, reset_gpios, {0}), \ .port_write_cycles = DT_INST_PROP(n, port_write_cycles), \ }; \ \ static struct sw_cfg_data sw_data_##n; \ \ DEVICE_DT_INST_DEFINE(n, sw_gpio_init, NULL, \ &sw_data_##n, &sw_cfg_##n, \ POST_KERNEL, CONFIG_DP_DRIVER_INIT_PRIO, \ &swdp_bitbang_api); DT_INST_FOREACH_STATUS_OKAY(SW_DEVICE_DEFINE) ```
/content/code_sandbox/drivers/dp/swdp_bitbang.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,232
```c /* * */ #define DT_DRV_COMPAT ti_bq24190 #include <errno.h> #include "bq24190.h" #include "zephyr/device.h" #include "zephyr/drivers/charger.h" #include "zephyr/drivers/i2c.h" #include "zephyr/kernel.h" #include "zephyr/sys/util.h" #include "zephyr/logging/log.h" #include <zephyr/drivers/gpio.h> LOG_MODULE_REGISTER(ti_bq24190); struct bq24190_config { struct i2c_dt_spec i2c; struct gpio_dt_spec ce_gpio; }; struct bq24190_data { uint8_t ss_reg; unsigned int ichg_ua; unsigned int vreg_uv; enum charger_status state; enum charger_online online; }; static int bq24190_register_reset(const struct device *dev) { const struct bq24190_config *const config = dev->config; int ret, limit = BQ24190_RESET_MAX_TRIES; uint8_t val; ret = i2c_reg_update_byte_dt(&config->i2c, BQ24190_REG_POC, BQ24190_REG_POC_RESET_MASK, BQ24190_REG_POC_RESET_MASK); if (ret) { return ret; } /* * No explicit reset timing characteristcs are provided in the datasheet. * Instead, poll every 100s for 100 attempts to see if the reset request * bit has cleared. */ do { ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_POC, &val); if (ret) { return ret; } if (!(val & BQ24190_REG_POC_RESET_MASK)) { return 0; } k_usleep(100); } while (--limit); return -EIO; } static int bq24190_charger_get_charge_type(const struct device *dev, enum charger_charge_type *charge_type) { const struct bq24190_config *const config = dev->config; uint8_t v; int ret; *charge_type = CHARGER_CHARGE_TYPE_UNKNOWN; ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_POC, &v); if (ret) { return ret; } v = FIELD_GET(BQ24190_REG_POC_CHG_CONFIG_MASK, v); if (!v) { *charge_type = CHARGER_CHARGE_TYPE_NONE; } else { ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_CCC, &v); if (ret) { return ret; } v = FIELD_GET(BQ24190_REG_CCC_FORCE_20PCT_MASK, v); if (v) { *charge_type = CHARGER_CHARGE_TYPE_TRICKLE; } else { *charge_type = CHARGER_CHARGE_TYPE_FAST; } } return 0; } static int bq24190_charger_get_health(const struct device *dev, enum charger_health *health) { const struct bq24190_config *const config = dev->config; uint8_t v; int ret; ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_F, &v); if (ret) { return ret; } if (v & BQ24190_REG_F_NTC_FAULT_MASK) { switch (v >> BQ24190_REG_F_NTC_FAULT_SHIFT & 0x7) { case BQ24190_NTC_FAULT_TS1_COLD: case BQ24190_NTC_FAULT_TS2_COLD: case BQ24190_NTC_FAULT_TS1_TS2_COLD: *health = CHARGER_HEALTH_COLD; break; case BQ24190_NTC_FAULT_TS1_HOT: case BQ24190_NTC_FAULT_TS2_HOT: case BQ24190_NTC_FAULT_TS1_TS2_HOT: *health = CHARGER_HEALTH_HOT; break; default: *health = CHARGER_HEALTH_UNKNOWN; } } else if (v & BQ24190_REG_F_BAT_FAULT_MASK) { *health = CHARGER_HEALTH_OVERVOLTAGE; } else if (v & BQ24190_REG_F_CHRG_FAULT_MASK) { switch (v >> BQ24190_REG_F_CHRG_FAULT_SHIFT & 0x3) { case BQ24190_CHRG_FAULT_INPUT_FAULT: /* * This could be over-voltage or under-voltage * and there's no way to tell which. Instead * of looking foolish and returning 'OVERVOLTAGE' * when its really under-voltage, just return * 'UNSPEC_FAILURE'. */ *health = CHARGER_HEALTH_UNSPEC_FAILURE; break; case BQ24190_CHRG_FAULT_TSHUT: *health = CHARGER_HEALTH_OVERHEAT; break; case BQ24190_CHRG_SAFETY_TIMER: *health = CHARGER_HEALTH_SAFETY_TIMER_EXPIRE; break; default: /* prevent compiler warning */ *health = CHARGER_HEALTH_UNKNOWN; } } else if (v & BQ24190_REG_F_BOOST_FAULT_MASK) { /* * This could be over-current or over-voltage but there's * no way to tell which. Return 'OVERVOLTAGE' since there * isn't an 'OVERCURRENT' value defined that we can return * even if it was over-current. */ *health = CHARGER_HEALTH_OVERVOLTAGE; } else { *health = CHARGER_HEALTH_GOOD; } return 0; } static int bq24190_charger_get_online(const struct device *dev, enum charger_online *online) { const struct bq24190_config *const config = dev->config; uint8_t pg_stat, batfet_disable; int ret; ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_SS, &pg_stat); if (ret) { return ret; } pg_stat = FIELD_GET(BQ24190_REG_SS_PG_STAT_MASK, pg_stat); ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_MOC, &batfet_disable); if (ret) { return ret; } batfet_disable = FIELD_GET(BQ24190_REG_MOC_BATFET_DISABLE_MASK, batfet_disable); if (pg_stat && !batfet_disable) { *online = CHARGER_ONLINE_FIXED; } else { *online = CHARGER_ONLINE_OFFLINE; } return 0; } static int bq24190_charger_get_status(const struct device *dev, enum charger_status *status) { const struct bq24190_config *const config = dev->config; uint8_t ss_reg, chrg_fault; int ret; ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_F, &chrg_fault); if (ret) { return ret; } chrg_fault = FIELD_GET(BQ24190_REG_F_CHRG_FAULT_MASK, chrg_fault); ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_SS, &ss_reg); if (ret) { return ret; } /* * The battery must be discharging when any of these are true: * - there is no good power source; * - there is a charge fault. * Could also be discharging when in "supplement mode" but * there is no way to tell when its in that mode. */ if (!(ss_reg & BQ24190_REG_SS_PG_STAT_MASK) || chrg_fault) { *status = CHARGER_STATUS_DISCHARGING; } else { ss_reg = FIELD_GET(BQ24190_REG_SS_CHRG_STAT_MASK, ss_reg); switch (ss_reg) { case BQ24190_CHRG_STAT_NOT_CHRGING: *status = CHARGER_STATUS_NOT_CHARGING; break; case BQ24190_CHRG_STAT_PRECHRG: case BQ24190_CHRG_STAT_FAST_CHRG: *status = CHARGER_STATUS_CHARGING; break; case BQ24190_CHRG_STAT_CHRG_TERM: *status = CHARGER_STATUS_FULL; break; default: return -EIO; } } return 0; } static int bq24190_charger_get_constant_charge_current(const struct device *dev, uint32_t *current_ua) { const struct bq24190_config *const config = dev->config; bool frc_20pct; uint8_t v; int ret; ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_CCC, &v); if (ret) { return ret; } frc_20pct = v & BQ24190_REG_CCC_FORCE_20PCT_MASK; v = FIELD_GET(BQ24190_REG_CCC_ICHG_MASK, v); *current_ua = (v * BQ24190_REG_CCC_ICHG_STEP_UA) + BQ24190_REG_CCC_ICHG_OFFSET_UA; if (frc_20pct) { *current_ua /= 5; } return 0; } static int bq24190_charger_get_precharge_current(const struct device *dev, uint32_t *current_ua) { const struct bq24190_config *const config = dev->config; bool frc_20pct; uint8_t v; int ret; ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_CCC, &v); if (ret) { return ret; } frc_20pct = v & BQ24190_REG_CCC_FORCE_20PCT_MASK; ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_PCTCC, &v); if (ret) { return ret; } v = FIELD_GET(BQ24190_REG_PCTCC_IPRECHG_MASK, v); *current_ua = (v * BQ24190_REG_PCTCC_IPRECHG_STEP_UA) + BQ24190_REG_PCTCC_IPRECHG_OFFSET_UA; if (frc_20pct) { *current_ua /= 2; } return 0; } static int bq24190_charger_get_charge_term_current(const struct device *dev, uint32_t *current_ua) { const struct bq24190_config *const config = dev->config; uint8_t v; int ret; ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_PCTCC, &v); if (ret) { return ret; } v = FIELD_GET(BQ24190_REG_PCTCC_ITERM_MASK, v); *current_ua = (v * BQ24190_REG_PCTCC_ITERM_STEP_UA) + BQ24190_REG_PCTCC_ITERM_OFFSET_UA; return 0; } static int bq24190_get_constant_charge_voltage(const struct device *dev, uint32_t *voltage_uv) { const struct bq24190_config *const config = dev->config; uint8_t v; int ret; ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_CVC, &v); if (ret < 0) { return ret; } v = FIELD_GET(BQ24190_REG_CVC_VREG_MASK, v); *voltage_uv = (v * BQ24190_REG_CVC_VREG_STEP_UV) + BQ24190_REG_CVC_VREG_OFFSET_UV; return 0; } static int bq24190_set_constant_charge_current(const struct device *dev, uint32_t current_ua) { const struct bq24190_config *const config = dev->config; uint8_t v; int ret; ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_CCC, &v); if (ret < 0) { return ret; } v &= BQ24190_REG_CCC_FORCE_20PCT_MASK; if (v) { current_ua *= 5; } current_ua = CLAMP(current_ua, BQ24190_REG_CCC_ICHG_MIN_UA, BQ24190_REG_CCC_ICHG_MAX_UA); v = (current_ua - BQ24190_REG_CCC_ICHG_OFFSET_UA) / BQ24190_REG_CCC_ICHG_STEP_UA; v = FIELD_PREP(BQ24190_REG_CCC_ICHG_MASK, v); return i2c_reg_update_byte_dt(&config->i2c, BQ24190_REG_CCC, BQ24190_REG_CCC_ICHG_MASK, v); } static int bq24190_set_constant_charge_voltage(const struct device *dev, uint32_t voltage_uv) { const struct bq24190_config *const config = dev->config; uint8_t v; voltage_uv = CLAMP(voltage_uv, BQ24190_REG_CVC_VREG_MIN_UV, BQ24190_REG_CVC_VREG_MAX_UV); v = (voltage_uv - BQ24190_REG_CVC_VREG_OFFSET_UV) / BQ24190_REG_CVC_VREG_STEP_UV; v = FIELD_PREP(BQ24190_REG_CVC_VREG_MASK, v); return i2c_reg_update_byte_dt(&config->i2c, BQ24190_REG_CVC, BQ24190_REG_CVC_VREG_MASK, v); } static int bq24190_set_config(const struct device *dev) { struct bq24190_data *data = dev->data; union charger_propval val; int ret; val.const_charge_current_ua = data->ichg_ua; ret = bq24190_set_constant_charge_current(dev, val.const_charge_current_ua); if (ret < 0) { return ret; } val.const_charge_voltage_uv = data->vreg_uv; return bq24190_set_constant_charge_voltage(dev, val.const_charge_voltage_uv); } static int bq24190_get_prop(const struct device *dev, charger_prop_t prop, union charger_propval *val) { switch (prop) { case CHARGER_PROP_ONLINE: return bq24190_charger_get_online(dev, &val->online); case CHARGER_PROP_CHARGE_TYPE: return bq24190_charger_get_charge_type(dev, &val->charge_type); case CHARGER_PROP_HEALTH: return bq24190_charger_get_health(dev, &val->health); case CHARGER_PROP_STATUS: return bq24190_charger_get_status(dev, &val->status); case CHARGER_PROP_CONSTANT_CHARGE_CURRENT_UA: return bq24190_charger_get_constant_charge_current(dev, &val->const_charge_current_ua); case CHARGER_PROP_CONSTANT_CHARGE_VOLTAGE_UV: return bq24190_get_constant_charge_voltage(dev, &val->const_charge_voltage_uv); case CHARGER_PROP_PRECHARGE_CURRENT_UA: return bq24190_charger_get_precharge_current(dev, &val->precharge_current_ua); case CHARGER_PROP_CHARGE_TERM_CURRENT_UA: return bq24190_charger_get_charge_term_current(dev, &val->charge_term_current_ua); default: return -ENOTSUP; } } static int bq24190_set_prop(const struct device *dev, charger_prop_t prop, const union charger_propval *val) { switch (prop) { case CHARGER_PROP_CONSTANT_CHARGE_CURRENT_UA: return bq24190_set_constant_charge_current(dev, val->const_charge_current_ua); case CHARGER_PROP_CONSTANT_CHARGE_VOLTAGE_UV: return bq24190_set_constant_charge_voltage(dev, val->const_charge_voltage_uv); default: return -ENOTSUP; } } static int bq24190_init(const struct device *dev) { const struct bq24190_config *const config = dev->config; struct bq24190_data *data = dev->data; uint8_t val; int ret; ret = i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_VPRS, &val); if (ret) { return ret; } val = FIELD_GET(BQ24190_REG_VPRS_PN_MASK, val); switch (val) { case BQ24190_REG_VPRS_PN_24190: case BQ24190_REG_VPRS_PN_24192: case BQ24190_REG_VPRS_PN_24192I: break; default: LOG_ERR("Error unknown model: 0x%02x\n", val); return -ENODEV; } ret = bq24190_register_reset(dev); if (ret) { return ret; } ret = bq24190_set_config(dev); if (ret) { return ret; } return i2c_reg_read_byte_dt(&config->i2c, BQ24190_REG_SS, &data->ss_reg); } static const struct charger_driver_api bq24190_driver_api = { .get_property = bq24190_get_prop, .set_property = bq24190_set_prop, }; #define BQ24190_INIT(inst) \ \ static const struct bq24190_config bq24190_config_##inst = { \ .i2c = I2C_DT_SPEC_INST_GET(inst), \ }; \ \ static struct bq24190_data bq24190_data_##inst = { \ .ichg_ua = DT_INST_PROP(inst, constant_charge_current_max_microamp), \ .vreg_uv = DT_INST_PROP(inst, constant_charge_voltage_max_microvolt), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, bq24190_init, NULL, &bq24190_data_##inst, \ &bq24190_config_##inst, POST_KERNEL, CONFIG_CHARGER_INIT_PRIORITY, \ &bq24190_driver_api); DT_INST_FOREACH_STATUS_OKAY(BQ24190_INIT) ```
/content/code_sandbox/drivers/charger/charger_bq24190.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,963
```unknown config SBS_CHARGER bool "Smart Battery Charger" default y depends on DT_HAS_SBS_SBS_CHARGER_ENABLED select I2C help Enable I2C-based/SMBus-based driver for a Smart Battery Charger. config EMUL_SBS_CHARGER bool "Emulate an SBS 1.1 compliant smart battery charger" default y depends on EMUL depends on SBS_CHARGER help It provides reading which follow a simple sequence, thus allowing test code to check that things are working as expected. ```
/content/code_sandbox/drivers/charger/Kconfig.sbs_charger
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
122
```unknown config CHARGER_MAX20335 bool "MAX20335 battery charger driver" default y depends on DT_HAS_MAXIM_MAX20335_CHARGER_ENABLED select GPIO select I2C select MFD help Enable the MAX20335 battery charger driver. ```
/content/code_sandbox/drivers/charger/Kconfig.max20335
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
59
```c /* * * * BQ25180 Datasheet: path_to_url */ #define DT_DRV_COMPAT ti_bq25180 #include <zephyr/device.h> #include <zephyr/drivers/charger.h> #include <zephyr/drivers/i2c.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> LOG_MODULE_REGISTER(bq25180, CONFIG_CHARGER_LOG_LEVEL); #define BQ25180_STAT0 0x00 #define BQ25180_STAT1 0x01 #define BQ25180_FLAG0 0x02 #define BQ25180_VBAT_CTRL 0x03 #define BQ25180_ICHG_CTRL 0x04 #define BQ25180_IC_CTRL 0x07 #define BQ25180_SHIP_RST 0x09 #define BQ25180_MASK_ID 0x0c #define BQ25180_STAT0_CHG_STAT_MASK GENMASK(6, 5) #define BQ25180_STAT0_CHG_STAT_NOT_CHARGING 0x00 #define BQ25180_STAT0_CHG_STAT_CONSTANT_CURRENT 0x01 #define BQ25180_STAT0_CHG_STAT_CONSTANT_VOLTAGE 0x02 #define BQ25180_STAT0_CHG_STAT_DONE 0x03 #define BQ25180_STAT0_VIN_PGOOD_STAT BIT(0) #define BQ25180_ICHG_CHG_DIS BIT(7) #define BQ25180_ICHG_MSK GENMASK(6, 0) #define BQ25180_WATCHDOG_SEL_1_MSK GENMASK(1, 0) #define BQ25180_WATCHDOG_DISABLE 0x03 #define BQ25180_DEVICE_ID_MSK GENMASK(3, 0) #define BQ25180_DEVICE_ID 0x00 #define BQ25180_SHIP_RST_EN_RST_SHIP_MSK GENMASK(6, 5) #define BQ25180_SHIP_RST_EN_RST_SHIP_ADAPTER 0x20 #define BQ25180_SHIP_RST_EN_RST_SHIP_BUTTON 0x40 /* Charging current limits */ #define BQ25180_CURRENT_MIN_MA 5 #define BQ25180_CURRENT_MAX_MA 1000 struct bq25180_config { struct i2c_dt_spec i2c; uint32_t initial_current_microamp; }; /* * For ICHG <= 35mA = ICHGCODE + 5mA * For ICHG > 35mA = 40 + ((ICHGCODE-31)*10)mA. * Maximum programmable current = 1000mA * * Return: value between 0 and 127, negative on error. */ static int bq25180_ma_to_ichg(uint32_t current_ma, uint8_t *ichg) { if (!IN_RANGE(current_ma, BQ25180_CURRENT_MIN_MA, BQ25180_CURRENT_MAX_MA)) { LOG_WRN("charging current out of range: %dmA, " "clamping to the nearest limit", current_ma); } current_ma = CLAMP(current_ma, BQ25180_CURRENT_MIN_MA, BQ25180_CURRENT_MAX_MA); if (current_ma <= 35) { *ichg = current_ma - 5; return 0; } *ichg = (current_ma - 40) / 10 + 31; return 0; } static uint32_t bq25180_ichg_to_ma(uint8_t ichg) { ichg &= BQ25180_ICHG_MSK; if (ichg <= 30) { return (ichg + 5); } return (ichg - 31) * 10 + 40; } static int bq25183_charge_enable(const struct device *dev, const bool enable) { const struct bq25180_config *cfg = dev->config; uint8_t value = enable ? 0 : BQ25180_ICHG_CHG_DIS; int ret; ret = i2c_reg_update_byte_dt(&cfg->i2c, BQ25180_ICHG_CTRL, BQ25180_ICHG_CHG_DIS, value); if (ret < 0) { return ret; } return 0; } static int bq25180_set_charge_current(const struct device *dev, uint32_t const_charge_current_ua) { const struct bq25180_config *cfg = dev->config; uint8_t val; int ret; ret = bq25180_ma_to_ichg(const_charge_current_ua / 1000, &val); if (ret < 0) { return ret; } ret = i2c_reg_update_byte_dt(&cfg->i2c, BQ25180_ICHG_CTRL, BQ25180_ICHG_MSK, val); if (ret < 0) { return ret; } return 0; } static int bq25180_get_charge_current(const struct device *dev, uint32_t *const_charge_current_ua) { const struct bq25180_config *cfg = dev->config; uint8_t val; int ret; ret = i2c_reg_read_byte_dt(&cfg->i2c, BQ25180_ICHG_CTRL, &val); if (ret < 0) { return ret; } *const_charge_current_ua = bq25180_ichg_to_ma(val) * 1000; return 0; } static int bq25180_get_online(const struct device *dev, enum charger_online *online) { const struct bq25180_config *cfg = dev->config; uint8_t val; int ret; ret = i2c_reg_read_byte_dt(&cfg->i2c, BQ25180_STAT0, &val); if (ret < 0) { return ret; } if ((val & BQ25180_STAT0_VIN_PGOOD_STAT) != 0x00) { *online = CHARGER_ONLINE_FIXED; } else { *online = CHARGER_ONLINE_OFFLINE; } return 0; } static int bq25180_get_status(const struct device *dev, enum charger_status *status) { const struct bq25180_config *cfg = dev->config; uint8_t stat0; uint8_t ichg_ctrl; int ret; ret = i2c_reg_read_byte_dt(&cfg->i2c, BQ25180_STAT0, &stat0); if (ret < 0) { return ret; } if ((stat0 & BQ25180_STAT0_VIN_PGOOD_STAT) == 0x00) { *status = CHARGER_STATUS_DISCHARGING; return 0; } ret = i2c_reg_read_byte_dt(&cfg->i2c, BQ25180_ICHG_CTRL, &ichg_ctrl); if (ret < 0) { return ret; } if ((ichg_ctrl & BQ25180_ICHG_CHG_DIS) != 0x00) { *status = CHARGER_STATUS_NOT_CHARGING; return 0; } switch (FIELD_GET(BQ25180_STAT0_CHG_STAT_MASK, stat0)) { case BQ25180_STAT0_CHG_STAT_NOT_CHARGING: *status = CHARGER_STATUS_NOT_CHARGING; break; case BQ25180_STAT0_CHG_STAT_CONSTANT_CURRENT: case BQ25180_STAT0_CHG_STAT_CONSTANT_VOLTAGE: *status = CHARGER_STATUS_CHARGING; break; case BQ25180_STAT0_CHG_STAT_DONE: *status = CHARGER_STATUS_FULL; break; } return 0; } static int bq25180_get_prop(const struct device *dev, charger_prop_t prop, union charger_propval *val) { switch (prop) { case CHARGER_PROP_ONLINE: return bq25180_get_online(dev, &val->online); case CHARGER_PROP_STATUS: return bq25180_get_status(dev, &val->status); case CHARGER_PROP_CONSTANT_CHARGE_CURRENT_UA: return bq25180_get_charge_current(dev, &val->const_charge_current_ua); default: return -ENOTSUP; } } static int bq25180_set_prop(const struct device *dev, charger_prop_t prop, const union charger_propval *val) { switch (prop) { case CHARGER_PROP_CONSTANT_CHARGE_CURRENT_UA: return bq25180_set_charge_current(dev, val->const_charge_current_ua); default: return -ENOTSUP; } } static const struct charger_driver_api bq25180_api = { .get_property = bq25180_get_prop, .set_property = bq25180_set_prop, .charge_enable = bq25183_charge_enable, }; static int bq25180_init(const struct device *dev) { const struct bq25180_config *cfg = dev->config; uint8_t val; int ret; ret = i2c_reg_read_byte_dt(&cfg->i2c, BQ25180_MASK_ID, &val); if (ret < 0) { return ret; } val &= BQ25180_DEVICE_ID_MSK; if (val != BQ25180_DEVICE_ID) { LOG_ERR("Invalid device id: %02x", val); return -EINVAL; } /* Disable the watchdog */ ret = i2c_reg_update_byte_dt(&cfg->i2c, BQ25180_IC_CTRL, BQ25180_WATCHDOG_SEL_1_MSK, BQ25180_WATCHDOG_DISABLE); if (ret < 0) { return ret; } if (cfg->initial_current_microamp > 0) { ret = bq25180_set_charge_current(dev, cfg->initial_current_microamp); if (ret < 0) { return ret; } } return 0; } #define CHARGER_BQ25180_INIT(inst) \ static const struct bq25180_config bq25180_config_##inst = { \ .i2c = I2C_DT_SPEC_INST_GET(inst), \ .initial_current_microamp = DT_INST_PROP( \ inst, constant_charge_current_max_microamp), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, bq25180_init, NULL, NULL, \ &bq25180_config_##inst, POST_KERNEL, \ CONFIG_CHARGER_INIT_PRIORITY, \ &bq25180_api); DT_INST_FOREACH_STATUS_OKAY(CHARGER_BQ25180_INIT) ```
/content/code_sandbox/drivers/charger/charger_bq25180.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,262
```c /* * */ #include <zephyr/internal/syscall_handler.h> #include <zephyr/drivers/charger.h> static inline int z_vrfy_charger_get_prop(const struct device *dev, const charger_prop_t prop, union charger_propval *val) { union charger_propval k_val; K_OOPS(K_SYSCALL_DRIVER_CHARGER(dev, get_property)); int ret = z_impl_charger_get_prop(dev, prop, &k_val); K_OOPS(k_usermode_to_copy(val, &k_val, sizeof(union charger_propval))); return ret; } #include <zephyr/syscalls/charger_get_prop_mrsh.c> static inline int z_vrfy_charger_set_prop(const struct device *dev, const charger_prop_t prop, const union charger_propval *val) { union charger_propval k_val; K_OOPS(K_SYSCALL_DRIVER_CHARGER(dev, set_property)); K_OOPS(k_usermode_from_copy(&k_val, val, sizeof(union charger_propval))); return z_impl_charger_set_prop(dev, prop, &k_val); } #include <zephyr/syscalls/charger_set_prop_mrsh.c> static inline int z_vrfy_charger_charge_enable(const struct device *dev, const bool enable) { K_OOPS(K_SYSCALL_DRIVER_CHARGER(dev, charge_enable)); return z_impl_charger_charge_enable(dev, enable); } #include <zephyr/syscalls/charger_charge_enable_mrsh.c> ```
/content/code_sandbox/drivers/charger/charger_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
304
```c /* * * * Emulator for SBS 1.1 compliant smart battery charger. */ #define DT_DRV_COMPAT sbs_sbs_charger #include <zephyr/device.h> #include <zephyr/drivers/emul.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/i2c_emul.h> #include <zephyr/logging/log.h> #include <zephyr/sys/byteorder.h> #include "sbs_charger.h" LOG_MODULE_REGISTER(sbs_sbs_charger); /** Static configuration for the emulator */ struct sbs_charger_emul_cfg { /** I2C address of emulator */ uint16_t addr; }; /** Run-time data used by the emulator */ struct sbs_charger_emul_data { uint16_t reg_charger_mode; }; static int emul_sbs_charger_reg_write(const struct emul *target, int reg, int val) { struct sbs_charger_emul_data *data = target->data; LOG_INF("write %x = %x", reg, val); switch (reg) { case SBS_CHARGER_REG_CHARGER_MODE: data->reg_charger_mode = val; break; default: LOG_ERR("Unknown write %x", reg); return -EIO; } return 0; } static int emul_sbs_charger_reg_read(const struct emul *target, int reg, int *val) { switch (reg) { case SBS_CHARGER_REG_SPEC_INFO: case SBS_CHARGER_REG_CHARGER_MODE: case SBS_CHARGER_REG_STATUS: case SBS_CHARGER_REG_ALARM_WARNING: /* Arbitrary stub value. */ *val = 1; break; default: LOG_ERR("Unknown register 0x%x read", reg); return -EIO; } LOG_INF("read 0x%x = 0x%x", reg, *val); return 0; } static int sbs_charger_emul_transfer_i2c(const struct emul *target, struct i2c_msg *msgs, int num_msgs, int addr) { /* Largely copied from emul_sbs_gauge.c */ struct sbs_charger_emul_data *data; unsigned int val; int reg; int rc; data = target->data; i2c_dump_msgs_rw(target->dev, msgs, num_msgs, addr, false); switch (num_msgs) { case 2: if (msgs->flags & I2C_MSG_READ) { LOG_ERR("Unexpected read"); return -EIO; } if (msgs->len != 1) { LOG_ERR("Unexpected msg0 length %d", msgs->len); return -EIO; } reg = msgs->buf[0]; /* Now process the 'read' part of the message */ msgs++; if (msgs->flags & I2C_MSG_READ) { switch (msgs->len - 1) { case 1: rc = emul_sbs_charger_reg_read(target, reg, &val); if (rc) { /* Return before writing bad value to message buffer */ return rc; } /* SBS uses SMBus, which sends data in little-endian format. */ sys_put_le16(val, msgs->buf); break; default: LOG_ERR("Unexpected msg1 length %d", msgs->len); return -EIO; } } else { /* We write a word (2 bytes by the SBS spec) */ if (msgs->len != 2) { LOG_ERR("Unexpected msg1 length %d", msgs->len); } uint16_t value = sys_get_le16(msgs->buf); rc = emul_sbs_charger_reg_write(target, reg, value); } break; default: LOG_ERR("Invalid number of messages: %d", num_msgs); return -EIO; } return rc; } static const struct i2c_emul_api sbs_charger_emul_api_i2c = { .transfer = sbs_charger_emul_transfer_i2c, }; static int emul_sbs_sbs_charger_init(const struct emul *target, const struct device *parent) { ARG_UNUSED(target); ARG_UNUSED(parent); return 0; } /* * Main instantiation macro. SBS Charger Emulator only implemented for I2C */ #define SBS_CHARGER_EMUL(n) \ static struct sbs_charger_emul_data sbs_charger_emul_data_##n; \ \ static const struct sbs_charger_emul_cfg sbs_charger_emul_cfg_##n = { \ .addr = DT_INST_REG_ADDR(n), \ }; \ EMUL_DT_INST_DEFINE(n, emul_sbs_sbs_charger_init, &sbs_charger_emul_data_##n, \ &sbs_charger_emul_cfg_##n, &sbs_charger_emul_api_i2c, NULL) DT_INST_FOREACH_STATUS_OKAY(SBS_CHARGER_EMUL) ```
/content/code_sandbox/drivers/charger/emul_sbs_charger.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,099
```objective-c /* * */ #define SBS_CHARGER_REG_SPEC_INFO 0x11 #define SBS_CHARGER_REG_CHARGER_MODE 0x12 #define SBS_CHARGER_REG_STATUS 0x13 #define SBS_CHARGER_REG_ALARM_WARNING 0x16 #define SBS_CHARGER_MODE_INHIBIT_CHARGE BIT(0) #define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(0) #define SBS_CHARGER_STATUS_RES_COLD BIT(9) #define SBS_CHARGER_STATUS_RES_HOT BIT(10) #define SBS_CHARGER_STATUS_BATTERY_PRESENT BIT(14) #define SBS_CHARGER_STATUS_AC_PRESENT BIT(15) #define SBS_CHARGER_POLL_TIME 500 ```
/content/code_sandbox/drivers/charger/sbs_charger.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
155
```unknown # config CHARGER_BQ25180 bool "BQ25180 Battery Charger" default y depends on DT_HAS_TI_BQ25180_ENABLED select I2C help Enable BQ25180 battery charger driver. ```
/content/code_sandbox/drivers/charger/Kconfig.bq25180
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
51
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_CHARGER_BQ24190_H_ #define ZEPHYR_DRIVERS_CHARGER_BQ24190_H_ /* Input Source Control */ #define BQ24190_REG_ISC 0x00 #define BQ24190_REG_ISC_EN_HIZ_MASK BIT(7) #define BQ24190_REG_ISC_EN_HIZ_SHIFT 7 #define BQ24190_REG_ISC_VINDPM_MASK GENMASK(6, 3) #define BQ24190_REG_ISC_VINDPM_SHIFT 3 #define BQ24190_REG_ISC_IINLIM_MASK GENMASK(2, 0) /* Power-On Configuration */ #define BQ24190_REG_POC 0x01 #define BQ24190_REG_POC_RESET_MASK BIT(7) #define BQ24190_REG_POC_RESET_SHIFT 7 #define BQ24190_RESET_MAX_TRIES 100 #define BQ24190_REG_POC_WDT_RESET_MASK BIT(6) #define BQ24190_REG_POC_WDT_RESET_SHIFT 6 #define BQ24190_REG_POC_CHG_CONFIG_MASK GENMASK(5, 4) #define BQ24190_REG_POC_CHG_CONFIG_SHIFT 4 #define BQ24190_REG_POC_CHG_CONFIG_DISABLE 0x0 #define BQ24190_REG_POC_CHG_CONFIG_CHARGE 0x1 #define BQ24190_REG_POC_CHG_CONFIG_OTG 0x2 #define BQ24190_REG_POC_CHG_CONFIG_OTG_ALT 0x3 #define BQ24190_REG_POC_SYS_MIN_MASK GENMASK(3, 1) #define BQ24190_REG_POC_SYS_MIN_SHIFT 1 #define BQ24190_REG_POC_SYS_MIN_MIN_UV 3000000 #define BQ24190_REG_POC_SYS_MIN_MAX_UV 3700000 #define BQ24190_REG_POC_BOOST_LIM_MASK BIT(0) #define BQ24190_REG_POC_BOOST_LIM_SHIFT 0 /* Charge Current Control */ #define BQ24190_REG_CCC 0x02 #define BQ24190_REG_CCC_ICHG_MASK GENMASK(7, 2) #define BQ24190_REG_CCC_ICHG_SHIFT 2 #define BQ24190_REG_CCC_ICHG_STEP_UA 64000 #define BQ24190_REG_CCC_ICHG_OFFSET_UA 512000 #define BQ24190_REG_CCC_ICHG_MIN_UA BQ24190_REG_CCC_ICHG_OFFSET_UA #define BQ24190_REG_CCC_ICHG_MAX_UA 4544000 #define BQ24190_REG_CCC_FORCE_20PCT_MASK BIT(0) #define BQ24190_REG_CCC_FORCE_20PCT_SHIFT 0 /* Pre-charge/Termination Current Cntl */ #define BQ24190_REG_PCTCC 0x03 #define BQ24190_REG_PCTCC_IPRECHG_MASK GENMASK(7, 4) #define BQ24190_REG_PCTCC_IPRECHG_SHIFT 4 #define BQ24190_REG_PCTCC_IPRECHG_STEP_UA 128000 #define BQ24190_REG_PCTCC_IPRECHG_OFFSET_UA 128000 #define BQ24190_REG_PCTCC_IPRECHG_MIN_UA BQ24190_REG_PCTCC_IPRECHG_OFFSET_UA #define BQ24190_REG_PCTCC_IPRECHG_MAX_UA 2048000 #define BQ24190_REG_PCTCC_ITERM_MASK GENMASK(3, 0) #define BQ24190_REG_PCTCC_ITERM_SHIFT 0 #define BQ24190_REG_PCTCC_ITERM_STEP_UA 128000 #define BQ24190_REG_PCTCC_ITERM_OFFSET_UA 128000 #define BQ24190_REG_PCTCC_ITERM_MIN_UA BQ24190_REG_PCTCC_ITERM_OFFSET_UA #define BQ24190_REG_PCTCC_ITERM_MAX_UA 2048000 /* Charge Voltage Control */ #define BQ24190_REG_CVC 0x04 #define BQ24190_REG_CVC_VREG_MASK GENMASK(7, 2) #define BQ24190_REG_CVC_VREG_SHIFT 2 #define BQ24190_REG_CVC_VREG_STEP_UV 16000 #define BQ24190_REG_CVC_VREG_OFFSET_UV 3504000 #define BQ24190_REG_CVC_VREG_MIN_UV BQ24190_REG_CVC_VREG_OFFSET_UV #define BQ24190_REG_CVC_VREG_MAX_UV 4400000 #define BQ24190_REG_CVC_BATLOWV_MASK BIT(1) #define BQ24190_REG_CVC_BATLOWV_SHIFT 1 #define BQ24190_REG_CVC_VRECHG_MASK BIT(0) #define BQ24190_REG_CVC_VRECHG_SHIFT 0 /* Charge Term/Timer Control */ #define BQ24190_REG_CTTC 0x05 #define BQ24190_REG_CTTC_EN_TERM_MASK BIT(7) #define BQ24190_REG_CTTC_EN_TERM_SHIFT 7 #define BQ24190_REG_CTTC_TERM_STAT_MASK BIT(6) #define BQ24190_REG_CTTC_TERM_STAT_SHIFT 6 #define BQ24190_REG_CTTC_WATCHDOG_MASK GENMASK(5, 4) #define BQ24190_REG_CTTC_WATCHDOG_SHIFT 4 #define BQ24190_REG_CTTC_EN_TIMER_MASK BIT(3) #define BQ24190_REG_CTTC_EN_TIMER_SHIFT 3 #define BQ24190_REG_CTTC_CHG_TIMER_MASK GENMASK(2, 1) #define BQ24190_REG_CTTC_CHG_TIMER_SHIFT 1 #define BQ24190_REG_CTTC_JEITA_ISET_MASK BIT(0) #define BQ24190_REG_CTTC_JEITA_ISET_SHIFT 0 /* IR Comp/Thermal Regulation Control */ #define BQ24190_REG_ICTRC 0x06 #define BQ24190_REG_ICTRC_BAT_COMP_MASK GENMASK(7, 5) #define BQ24190_REG_ICTRC_BAT_COMP_SHIFT 5 #define BQ24190_REG_ICTRC_VCLAMP_MASK GENMASK(4, 2) #define BQ24190_REG_ICTRC_VCLAMP_SHIFT 2 #define BQ24190_REG_ICTRC_TREG_MASK GENMASK(1, 0) #define BQ24190_REG_ICTRC_TREG_SHIFT 0 /* Misc. Operation Control */ #define BQ24190_REG_MOC 0x07 #define BQ24190_REG_MOC_DPDM_EN_MASK BIT(7) #define BQ24190_REG_MOC_DPDM_EN_SHIFT 7 #define BQ24190_REG_MOC_TMR2X_EN_MASK BIT(6) #define BQ24190_REG_MOC_TMR2X_EN_SHIFT 6 #define BQ24190_REG_MOC_BATFET_DISABLE_MASK BIT(5) #define BQ24190_REG_MOC_BATFET_DISABLE_SHIFT 5 #define BQ24190_REG_MOC_JEITA_VSET_MASK BIT(4) #define BQ24190_REG_MOC_JEITA_VSET_SHIFT 4 #define BQ24190_REG_MOC_INT_MASK_MASK GENMASK(1, 0) #define BQ24190_REG_MOC_INT_MASK_SHIFT 0 /* System Status */ #define BQ24190_REG_SS 0x08 #define BQ24190_REG_SS_VBUS_STAT_MASK GENMASK(7, 6) #define BQ24190_REG_SS_VBUS_STAT_SHIFT 6 #define BQ24190_REG_SS_CHRG_STAT_MASK GENMASK(5, 4) #define BQ24190_REG_SS_CHRG_STAT_SHIFT 4 #define BQ24190_CHRG_STAT_NOT_CHRGING 0x0 #define BQ24190_CHRG_STAT_PRECHRG 0x1 #define BQ24190_CHRG_STAT_FAST_CHRG 0x2 #define BQ24190_CHRG_STAT_CHRG_TERM 0x3 #define BQ24190_REG_SS_DPM_STAT_MASK BIT(3) #define BQ24190_REG_SS_DPM_STAT_SHIFT 3 #define BQ24190_REG_SS_PG_STAT_MASK BIT(2) #define BQ24190_REG_SS_PG_STAT_SHIFT 2 #define BQ24190_REG_SS_THERM_STAT_MASK BIT(1) #define BQ24190_REG_SS_THERM_STAT_SHIFT 1 #define BQ24190_REG_SS_VSYS_STAT_MASK BIT(0) #define BQ24190_REG_SS_VSYS_STAT_SHIFT 0 /* Fault */ #define BQ24190_REG_F 0x09 #define BQ24190_REG_F_WATCHDOG_FAULT_MASK BIT(7) #define BQ24190_REG_F_WATCHDOG_FAULT_SHIFT 7 #define BQ24190_REG_F_BOOST_FAULT_MASK BIT(6) #define BQ24190_REG_F_BOOST_FAULT_SHIFT 6 #define BQ24190_REG_F_CHRG_FAULT_MASK GENMASK(5, 4) #define BQ24190_REG_F_CHRG_FAULT_SHIFT 4 #define BQ24190_CHRG_FAULT_INPUT_FAULT 0x1 #define BQ24190_CHRG_FAULT_TSHUT 0x2 #define BQ24190_CHRG_SAFETY_TIMER 0x3 #define BQ24190_REG_F_BAT_FAULT_MASK BIT(3) #define BQ24190_REG_F_BAT_FAULT_SHIFT 3 #define BQ24190_REG_F_NTC_FAULT_MASK GENMASK(2, 0) #define BQ24190_REG_F_NTC_FAULT_SHIFT 0 #define BQ24190_NTC_FAULT_TS1_COLD 0x1 #define BQ24190_NTC_FAULT_TS1_HOT 0x2 #define BQ24190_NTC_FAULT_TS2_COLD 0x3 #define BQ24190_NTC_FAULT_TS2_HOT 0x4 #define BQ24190_NTC_FAULT_TS1_TS2_COLD 0x5 #define BQ24190_NTC_FAULT_TS1_TS2_HOT 0x6 /* Vendor/Part/Revision Status */ #define BQ24190_REG_VPRS 0x0A #define BQ24190_REG_VPRS_PN_MASK GENMASK(5, 3) #define BQ24190_REG_VPRS_PN_SHIFT 3 #define BQ24190_REG_VPRS_PN_24190 0x4 #define BQ24190_REG_VPRS_PN_24192 0x5 /* Also 24193, 24196 */ #define BQ24190_REG_VPRS_PN_24192I 0x3 #define BQ24190_REG_VPRS_TS_PROFILE_MASK BIT(2) #define BQ24190_REG_VPRS_TS_PROFILE_SHIFT 2 #define BQ24190_REG_VPRS_DEV_REG_MASK GENMASK(1, 0) #define BQ24190_REG_VPRS_DEV_REG_SHIFT 0 #endif ```
/content/code_sandbox/drivers/charger/bq24190.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,487
```c /* * */ #define DT_DRV_COMPAT maxim_max20335_charger #include <zephyr/device.h> #include <zephyr/drivers/charger.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/i2c.h> #include <zephyr/kernel.h> #include <zephyr/sys/util.h> #include <zephyr/sys/linear_range.h> #include "zephyr/logging/log.h" LOG_MODULE_REGISTER(max20335_charger); #define MAX20335_REG_STATUSA 0x02 #define MAX20335_REG_STATUSB 0x03 #define MAX20335_REG_INTA 0x05 #define MAX20335_REG_INTB 0x06 #define MAX20335_REG_INTMASKA 0x07 #define MAX20335_REG_INTMASKB 0x08 #define MAX20335_REG_ILIMCNTL 0x09 #define MAX20335_REG_CHGCNTLA 0x0A #define MAX20335_REG_THRMCFG 0x18 #define MAX20335_INTA_USBOK_MASK BIT(3) #define MAX20335_INTA_CHGSTAT_MASK BIT(6) #define MAX20335_ILIMCNTL_ILIMCNTL_MASK GENMASK(1, 0) #define MAX20335_ILIMCNTL_SYSMIN_MASK GENMASK(7, 5) #define MAX20335_STATUSA_CHGSTAT_MASK GENMASK(2, 0) #define MAX20335_STATUSB_USBOK_MASK BIT(3) #define MAX20335_CHGCNTLA_BATRECHG_MASK GENMASK(6, 5) #define MAX20335_CHGCNTLA_BATREG_MASK GENMASK(4, 1) #define MAX20335_CHGCNTLA_CHRGEN_MASK BIT(0) #define MAX20335_CHGCNTLA_CHRGEN BIT(0) #define MAX20335_THRMCFG_THERMEN_MASK GENMASK(1, 0) #define MAX20335_REG_CVC_VREG_MIN_UV 4050000U #define MAX20335_REG_CVC_VREG_STEP_UV 50000U #define MAX20335_REG_CVC_VREG_MIN_IDX 0x0U #define MAX20335_REG_CVC_VREG_MAX_IDX 0x0BU #define MAX20335_ILIMCNTL_SYSMIN_MIN_UV 3600000U #define MAX20335_ILIMCNTL_SYSMIN_STEP_UV 100000U #define MAX20335_ILIMCNTL_SYSMIN_MIN_IDX 0x0U #define MAX20335_ILIMCNTL_SYSMIN_MAX_IDX 0x7U #define INT_ENABLE_DELAY K_MSEC(500) enum charger_max20335_therm_mode { MAX20335_THERM_MODE_DISABLED, MAX20335_THERM_MODE_THERMISTOR, MAX20335_THERM_MODE_JEITA_1, MAX20335_THERM_MODE_JEITA_2, MAX20335_THERM_MODE_UNKNOWN, }; struct charger_max20335_config { struct i2c_dt_spec bus; struct gpio_dt_spec int_gpio; uint32_t max_vreg_uv; uint32_t max_ichgin_to_sys_ua; uint32_t min_vsys_uv; uint32_t recharge_threshold_uv; char *therm_mon_mode; }; struct charger_max20335_data { const struct device *dev; struct gpio_callback gpio_cb; struct k_work int_routine_work; struct k_work_delayable int_enable_work; enum charger_status charger_status; enum charger_online charger_online; charger_status_notifier_t charger_status_notifier; charger_online_notifier_t charger_online_notifier; bool charger_enabled; uint32_t charge_voltage_uv; }; static const struct linear_range charger_uv_range = LINEAR_RANGE_INIT(MAX20335_REG_CVC_VREG_MIN_UV, MAX20335_REG_CVC_VREG_STEP_UV, MAX20335_REG_CVC_VREG_MIN_IDX, MAX20335_REG_CVC_VREG_MAX_IDX); static const struct linear_range system_uv_range = LINEAR_RANGE_INIT(MAX20335_ILIMCNTL_SYSMIN_MIN_UV, MAX20335_ILIMCNTL_SYSMIN_STEP_UV, MAX20335_ILIMCNTL_SYSMIN_MIN_IDX, MAX20335_ILIMCNTL_SYSMIN_MAX_IDX); static int max20335_get_charger_status(const struct device *dev, enum charger_status *status) { enum { MAX20335_CHARGER_OFF, MAX20335_CHARGING_SUSPENDED_DUE_TO_TEMPERATURE, MAX20335_PRE_CHARGE_IN_PROGRESS, MAX20335_FAST_CHARGE_IN_PROGRESS_1, MAX20335_FAST_CHARGE_IN_PROGRESS_2, MAX20335_MAINTAIN_CHARGE_IN_PROGRESS, MAX20335_MAIN_CHARGER_TIMER_DONE, MAX20335_CHARGER_FAULT_CONDITION, }; const struct charger_max20335_config *const config = dev->config; uint8_t val; int ret; ret = i2c_reg_read_byte_dt(&config->bus, MAX20335_REG_STATUSA, &val); if (ret) { return ret; } val = FIELD_GET(MAX20335_STATUSA_CHGSTAT_MASK, val); switch (val) { case MAX20335_CHARGER_OFF: __fallthrough; case MAX20335_CHARGING_SUSPENDED_DUE_TO_TEMPERATURE: __fallthrough; case MAX20335_CHARGER_FAULT_CONDITION: *status = CHARGER_STATUS_NOT_CHARGING; break; case MAX20335_PRE_CHARGE_IN_PROGRESS: __fallthrough; case MAX20335_FAST_CHARGE_IN_PROGRESS_1: __fallthrough; case MAX20335_FAST_CHARGE_IN_PROGRESS_2: __fallthrough; case MAX20335_MAINTAIN_CHARGE_IN_PROGRESS: *status = CHARGER_STATUS_CHARGING; break; case MAX20335_MAIN_CHARGER_TIMER_DONE: *status = CHARGER_STATUS_FULL; break; default: *status = CHARGER_STATUS_UNKNOWN; break; }; return 0; } static int max20335_get_charger_online(const struct device *dev, enum charger_online *online) { enum { MAX20335_CHGIN_IN_NOT_PRESENT_OR_INVALID, MAX20335_CHGIN_IN_PRESENT_AND_VALID, }; const struct charger_max20335_config *const config = dev->config; uint8_t val; int ret; ret = i2c_reg_read_byte_dt(&config->bus, MAX20335_REG_STATUSB, &val); if (ret) { return ret; } val = FIELD_GET(MAX20335_STATUSB_USBOK_MASK, val); switch (val) { case MAX20335_CHGIN_IN_PRESENT_AND_VALID: *online = CHARGER_ONLINE_FIXED; break; default: *online = CHARGER_ONLINE_OFFLINE; break; }; return 0; } static int max20335_set_recharge_threshold(const struct device *dev, uint32_t voltage_uv) { const struct charger_max20335_config *const config = dev->config; uint8_t val; switch (voltage_uv) { case 70000: val = 0x00; break; case 120000: val = 0x01; break; case 170000: val = 0x02; break; case 220000: val = 0x03; break; default: return -ENOTSUP; }; val = FIELD_PREP(MAX20335_CHGCNTLA_BATRECHG_MASK, val); return i2c_reg_update_byte_dt(&config->bus, MAX20335_REG_CHGCNTLA, MAX20335_CHGCNTLA_BATRECHG_MASK, val); } static int max20335_set_constant_charge_voltage(const struct device *dev, uint32_t voltage_uv) { const struct charger_max20335_config *const config = dev->config; uint16_t idx; uint8_t val; int ret; ret = linear_range_get_index(&charger_uv_range, voltage_uv, &idx); if (ret < 0) { return ret; } val = FIELD_PREP(MAX20335_CHGCNTLA_BATREG_MASK, idx); return i2c_reg_update_byte_dt(&config->bus, MAX20335_REG_CHGCNTLA, MAX20335_CHGCNTLA_BATREG_MASK, val); } static int max20335_set_chgin_to_sys_current_limit(const struct device *dev, uint32_t current_ua) { const struct charger_max20335_config *const config = dev->config; uint8_t val; switch (current_ua) { case 0: val = 0x00; break; case 100000: val = 0x01; break; case 500000: val = 0x02; break; case 1000000: val = 0x03; break; default: return -ENOTSUP; }; val = FIELD_PREP(MAX20335_ILIMCNTL_ILIMCNTL_MASK, val); return i2c_reg_update_byte_dt(&config->bus, MAX20335_REG_ILIMCNTL, MAX20335_ILIMCNTL_ILIMCNTL_MASK, val); } static int max20335_set_sys_voltage_min_threshold(const struct device *dev, uint32_t voltage_uv) { const struct charger_max20335_config *const config = dev->config; uint16_t idx; uint8_t val; int ret; ret = linear_range_get_index(&system_uv_range, voltage_uv, &idx); if (ret < 0) { return ret; } val = FIELD_PREP(MAX20335_ILIMCNTL_SYSMIN_MASK, idx); return i2c_reg_update_byte_dt(&config->bus, MAX20335_REG_ILIMCNTL, MAX20335_ILIMCNTL_SYSMIN_MASK, val); } static int max20335_set_thermistor_mode(const struct device *dev, enum charger_max20335_therm_mode mode) { const struct charger_max20335_config *const config = dev->config; uint8_t val; switch (mode) { case MAX20335_THERM_MODE_DISABLED: val = 0x00; break; case MAX20335_THERM_MODE_THERMISTOR: val = 0x01; break; case MAX20335_THERM_MODE_JEITA_1: val = 0x02; break; case MAX20335_THERM_MODE_JEITA_2: val = 0x03; break; default: return -ENOTSUP; }; val = FIELD_PREP(MAX20335_THRMCFG_THERMEN_MASK, val); return i2c_reg_update_byte_dt(&config->bus, MAX20335_REG_THRMCFG, MAX20335_THRMCFG_THERMEN_MASK, val); } static int max20335_set_enabled(const struct device *dev, bool enable) { struct charger_max20335_data *data = dev->data; const struct charger_max20335_config *const config = dev->config; data->charger_enabled = enable; return i2c_reg_update_byte_dt(&config->bus, MAX20335_REG_CHGCNTLA, MAX20335_CHGCNTLA_CHRGEN_MASK, enable ? MAX20335_CHGCNTLA_CHRGEN : 0); } static int max20335_get_interrupt_source(const struct device *dev, uint8_t *int_a, uint8_t *int_b) { const struct charger_max20335_config *config = dev->config; uint8_t dummy; uint8_t *int_src; int ret; /* Both INT_A and INT_B registers need to be read to clear all int flags */ int_src = (int_a != NULL) ? int_a : &dummy; ret = i2c_reg_read_byte_dt(&config->bus, MAX20335_REG_INTA, int_src); if (ret < 0) { return ret; } int_src = (int_b != NULL) ? int_b : &dummy; return i2c_reg_read_byte_dt(&config->bus, MAX20335_REG_INTB, int_src); } static int max20335_enable_interrupts(const struct device *dev) { enum {MASKA_VAL_ENABLE = 0xFF}; const struct charger_max20335_config *config = dev->config; int ret; ret = max20335_get_interrupt_source(dev, NULL, NULL); if (ret < 0) { LOG_WRN("Failed to clear pending interrupts: %d", ret); return ret; } ret = i2c_reg_write_byte_dt(&config->bus, MAX20335_REG_INTMASKA, MASKA_VAL_ENABLE); if (ret < 0) { return ret; } return i2c_reg_write_byte_dt(&config->bus, MAX20335_REG_INTMASKB, 0); } static int max20335_init_properties(const struct device *dev) { struct charger_max20335_data *data = dev->data; const struct charger_max20335_config *config = dev->config; int ret; data->charge_voltage_uv = config->max_vreg_uv; data->charger_enabled = true; ret = max20335_get_charger_status(dev, &data->charger_status); if (ret < 0) { LOG_ERR("Failed to read charger status: %d", ret); return ret; } ret = max20335_get_charger_online(dev, &data->charger_online); if (ret < 0) { LOG_ERR("Failed to read charger online: %d", ret); return ret; } return 0; } enum charger_max20335_therm_mode max20335_string_to_therm_mode(const char *mode_string) { static const char * const modes[] = { [MAX20335_THERM_MODE_DISABLED] = "disabled", [MAX20335_THERM_MODE_THERMISTOR] = "thermistor", [MAX20335_THERM_MODE_JEITA_1] = "JEITA-1", [MAX20335_THERM_MODE_JEITA_2] = "JEITA-2", }; enum charger_max20335_therm_mode i; for (i = MAX20335_THERM_MODE_DISABLED; i < ARRAY_SIZE(modes); i++) { if (strncmp(mode_string, modes[i], strlen(modes[i])) == 0) { return i; } } return MAX20335_THERM_MODE_UNKNOWN; } static int max20335_update_properties(const struct device *dev) { struct charger_max20335_data *data = dev->data; const struct charger_max20335_config *config = dev->config; enum charger_max20335_therm_mode therm_mode; int ret; ret = max20335_set_chgin_to_sys_current_limit(dev, config->max_ichgin_to_sys_ua); if (ret < 0) { LOG_ERR("Failed to set chgin-to-sys current limit: %d", ret); return ret; } ret = max20335_set_sys_voltage_min_threshold(dev, config->min_vsys_uv); if (ret < 0) { LOG_ERR("Failed to set minimum system voltage threshold: %d", ret); return ret; } ret = max20335_set_recharge_threshold(dev, config->recharge_threshold_uv); if (ret < 0) { LOG_ERR("Failed to set recharge threshold: %d", ret); return ret; } therm_mode = max20335_string_to_therm_mode(config->therm_mon_mode); ret = max20335_set_thermistor_mode(dev, therm_mode); if (ret < 0) { LOG_ERR("Failed to set thermistor mode: %d", ret); return ret; } ret = max20335_set_constant_charge_voltage(dev, data->charge_voltage_uv); if (ret < 0) { LOG_ERR("Failed to set charge voltage: %d", ret); return ret; } ret = max20335_set_enabled(dev, data->charger_enabled); if (ret < 0) { LOG_ERR("Failed to set enabled: %d", ret); return ret; } return 0; } static int max20335_get_prop(const struct device *dev, charger_prop_t prop, union charger_propval *val) { struct charger_max20335_data *data = dev->data; switch (prop) { case CHARGER_PROP_ONLINE: val->online = data->charger_online; return 0; case CHARGER_PROP_STATUS: val->status = data->charger_status; return 0; case CHARGER_PROP_CONSTANT_CHARGE_VOLTAGE_UV: val->const_charge_voltage_uv = data->charge_voltage_uv; return 0; default: return -ENOTSUP; } } static int max20335_set_prop(const struct device *dev, charger_prop_t prop, const union charger_propval *val) { struct charger_max20335_data *data = dev->data; int ret; switch (prop) { case CHARGER_PROP_CONSTANT_CHARGE_VOLTAGE_UV: ret = max20335_set_constant_charge_voltage(dev, val->const_charge_voltage_uv); if (ret == 0) { data->charge_voltage_uv = val->const_charge_voltage_uv; } return ret; case CHARGER_PROP_STATUS_NOTIFICATION: data->charger_status_notifier = val->status_notification; return 0; case CHARGER_PROP_ONLINE_NOTIFICATION: data->charger_online_notifier = val->online_notification; return 0; default: return -ENOTSUP; } } static int max20335_enable_interrupt_pin(const struct device *dev, bool enabled) { const struct charger_max20335_config *const config = dev->config; gpio_flags_t flags; int ret; flags = enabled ? GPIO_INT_LEVEL_ACTIVE : GPIO_INT_DISABLE; ret = gpio_pin_interrupt_configure_dt(&config->int_gpio, flags); if (ret < 0) { LOG_ERR("Could not %s interrupt GPIO callback: %d", enabled ? "enable" : "disable", ret); } return ret; } static void max20335_gpio_callback(const struct device *dev, struct gpio_callback *cb, uint32_t pins) { struct charger_max20335_data *data = CONTAINER_OF(cb, struct charger_max20335_data, gpio_cb); int ret; (void) max20335_enable_interrupt_pin(data->dev, false); ret = k_work_submit(&data->int_routine_work); if (ret < 0) { LOG_WRN("Could not submit int work: %d", ret); } } static void max20335_int_routine_work_handler(struct k_work *work) { struct charger_max20335_data *data = CONTAINER_OF(work, struct charger_max20335_data, int_routine_work); uint8_t int_src_a; int ret; ret = max20335_get_interrupt_source(data->dev, &int_src_a, NULL); if (ret < 0) { LOG_WRN("Failed to read interrupt source"); return; } if ((int_src_a & MAX20335_INTA_CHGSTAT_MASK) != 0) { ret = max20335_get_charger_status(data->dev, &data->charger_status); if (ret < 0) { LOG_WRN("Failed to read charger status: %d", ret); } else { if (data->charger_status_notifier != NULL) { data->charger_status_notifier(data->charger_status); } } } if ((int_src_a & MAX20335_INTA_USBOK_MASK) != 0) { ret = max20335_get_charger_online(data->dev, &data->charger_online); if (ret < 0) { LOG_WRN("Failed to read charger online %d", ret); } else { if (data->charger_online_notifier != NULL) { data->charger_online_notifier(data->charger_online); } } if (data->charger_online != CHARGER_ONLINE_OFFLINE) { (void) max20335_update_properties(data->dev); } } ret = k_work_reschedule(&data->int_enable_work, INT_ENABLE_DELAY); if (ret < 0) { LOG_WRN("Could not reschedule int_enable_work: %d", ret); } } static void max20335_int_enable_work_handler(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct charger_max20335_data *data = CONTAINER_OF(dwork, struct charger_max20335_data, int_enable_work); (void) max20335_enable_interrupt_pin(data->dev, true); } static int max20335_configure_interrupt_pin(const struct device *dev) { struct charger_max20335_data *data = dev->data; const struct charger_max20335_config *config = dev->config; int ret; if (!gpio_is_ready_dt(&config->int_gpio)) { LOG_ERR("Interrupt GPIO device not ready"); return -ENODEV; } ret = gpio_pin_configure_dt(&config->int_gpio, GPIO_INPUT); if (ret < 0) { LOG_ERR("Could not configure interrupt GPIO"); return ret; } gpio_init_callback(&data->gpio_cb, max20335_gpio_callback, BIT(config->int_gpio.pin)); ret = gpio_add_callback_dt(&config->int_gpio, &data->gpio_cb); if (ret < 0) { LOG_ERR("Could not add interrupt GPIO callback"); return ret; } return 0; } static int max20335_init(const struct device *dev) { struct charger_max20335_data *data = dev->data; const struct charger_max20335_config *config = dev->config; int ret; if (!i2c_is_ready_dt(&config->bus)) { return -ENODEV; } data->dev = dev; ret = max20335_init_properties(dev); if (ret < 0) { return ret; } k_work_init(&data->int_routine_work, max20335_int_routine_work_handler); k_work_init_delayable(&data->int_enable_work, max20335_int_enable_work_handler); ret = max20335_configure_interrupt_pin(dev); if (ret < 0) { return ret; } ret = max20335_enable_interrupt_pin(dev, true); if (ret < 0) { return ret; } ret = max20335_enable_interrupts(dev); if (ret < 0) { LOG_ERR("Failed to enable interrupts"); return ret; } return 0; } static const struct charger_driver_api max20335_driver_api = { .get_property = max20335_get_prop, .set_property = max20335_set_prop, .charge_enable = max20335_set_enabled, }; #define MAX20335_DEFINE(inst) \ static struct charger_max20335_data charger_max20335_data_##inst; \ static const struct charger_max20335_config charger_max20335_config_##inst = { \ .bus = I2C_DT_SPEC_GET(DT_INST_PARENT(inst)), \ .int_gpio = GPIO_DT_SPEC_INST_GET(inst, int_gpios), \ .max_vreg_uv = DT_INST_PROP(inst, constant_charge_voltage_max_microvolt), \ .max_ichgin_to_sys_ua = DT_INST_PROP(inst, chgin_to_sys_current_limit_microamp),\ .min_vsys_uv = DT_INST_PROP(inst, system_voltage_min_threshold_microvolt), \ .recharge_threshold_uv = DT_INST_PROP(inst, re_charge_threshold_microvolt), \ .therm_mon_mode = DT_INST_PROP(inst, thermistor_monitoring_mode), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, &max20335_init, NULL, &charger_max20335_data_##inst, \ &charger_max20335_config_##inst, \ POST_KERNEL, CONFIG_MFD_INIT_PRIORITY, \ &max20335_driver_api); DT_INST_FOREACH_STATUS_OKAY(MAX20335_DEFINE) ```
/content/code_sandbox/drivers/charger/charger_max20335.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,200
```unknown # menuconfig CHARGER bool "Battery charger drivers" help Enable battery charger driver configuration. if CHARGER module = CHARGER module-str = charger source "subsys/logging/Kconfig.template.log_config" config CHARGER_INIT_PRIORITY int "Battery charger init priority" default 90 help Battery charger initialization priority. config CHARGER_HAS_DISCHARGE_CURRENT_NOTIFICATIONS bool help This symbol is y if the charger supports discharge current notifications. config CHARGER_HAS_SYSTEM_VOLTAGE_NOTIFICATIONS bool help This symbol is y if the charger supports system voltage notifications. menu "Discharge current notifications" config CHARGER_DISCHARGE_CURRENT_NOTIFICATIONS bool "Support discharge current notifications" depends on CHARGER_HAS_DISCHARGE_CURRENT_NOTIFICATIONS help Enables support for discharge current notifications endmenu menu "System voltage notification" config CHARGER_SYSTEM_VOLTAGE_NOTIFICATIONS bool "Support system voltage notifications" depends on CHARGER_HAS_SYSTEM_VOLTAGE_NOTIFICATIONS help Enables support for system voltage notifications endmenu source "drivers/charger/Kconfig.sbs_charger" source "drivers/charger/Kconfig.bq24190" source "drivers/charger/Kconfig.bq25180" source "drivers/charger/Kconfig.max20335" endif # CHARGER ```
/content/code_sandbox/drivers/charger/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
280
```unknown config CHARGER_BQ24190 bool "BQ24190 Battery Charger" default y depends on DT_HAS_TI_BQ24190_ENABLED select I2C help Enable I2C-based driver for the TI BQ24190 Battery Charger. ```
/content/code_sandbox/drivers/charger/Kconfig.bq24190
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
58
```unknown menu "Firmware drivers" config ARM_SCMI bool "Support for ARM's SCMI" depends on ARM || ARM64 help Enable support for ARM's System Configuration and Management Interface (SCMI). source "drivers/firmware/scmi/Kconfig" endmenu ```
/content/code_sandbox/drivers/firmware/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
62
```c /* * */ #define DT_DRV_COMPAT sbs_sbs_charger #include <zephyr/drivers/charger.h> #include <zephyr/drivers/i2c.h> #include <zephyr/logging/log.h> #include <zephyr/sys/byteorder.h> #include "sbs_charger.h" struct sbs_charger_config { struct i2c_dt_spec i2c; }; LOG_MODULE_REGISTER(sbs_charger); static int sbs_cmd_reg_read(const struct device *dev, uint8_t reg_addr, uint16_t *val) { const struct sbs_charger_config *cfg; uint8_t i2c_data[2]; int status; cfg = dev->config; status = i2c_burst_read_dt(&cfg->i2c, reg_addr, i2c_data, sizeof(i2c_data)); if (status < 0) { LOG_ERR("Unable to read register"); return status; } *val = sys_get_le16(i2c_data); return 0; } static int sbs_cmd_reg_write(const struct device *dev, uint8_t reg_addr, uint16_t val) { const struct sbs_charger_config *config = dev->config; uint8_t buf[2]; sys_put_le16(val, buf); return i2c_burst_write_dt(&config->i2c, reg_addr, buf, sizeof(buf)); } static int sbs_cmd_reg_update(const struct device *dev, uint8_t reg_addr, uint16_t mask, uint16_t val) { uint16_t old_val, new_val; int ret; ret = sbs_cmd_reg_read(dev, SBS_CHARGER_REG_STATUS, &old_val); if (ret < 0) { return ret; } new_val = (old_val & ~mask) | (val & mask); if (new_val == old_val) { return 0; } return sbs_cmd_reg_write(dev, reg_addr, new_val); } static int sbs_charger_charge_enable(const struct device *dev, const bool enable) { uint16_t reg_val; if (!enable) { reg_val = SBS_CHARGER_MODE_INHIBIT_CHARGE; } else { reg_val = 0; } return sbs_cmd_reg_update(dev, SBS_CHARGER_REG_CHARGER_MODE, SBS_CHARGER_MODE_INHIBIT_CHARGE, reg_val); } static int sbs_charger_get_prop(const struct device *dev, const charger_prop_t prop, union charger_propval *val) { uint16_t reg_val; int ret; switch (prop) { case CHARGER_PROP_ONLINE: ret = sbs_cmd_reg_read(dev, SBS_CHARGER_REG_STATUS, &reg_val); if (ret < 0) { return ret; } if (reg_val & SBS_CHARGER_STATUS_AC_PRESENT) { val->online = CHARGER_ONLINE_FIXED; } else { val->online = CHARGER_ONLINE_OFFLINE; } return 0; case CHARGER_PROP_PRESENT: ret = sbs_cmd_reg_read(dev, SBS_CHARGER_REG_STATUS, &reg_val); if (ret < 0) { return ret; } if (reg_val & SBS_CHARGER_STATUS_BATTERY_PRESENT) { val->present = true; } else { val->present = false; } return 0; case CHARGER_PROP_STATUS: ret = sbs_cmd_reg_read(dev, SBS_CHARGER_REG_STATUS, &reg_val); if (ret < 0) { return ret; } if (!(reg_val & SBS_CHARGER_STATUS_BATTERY_PRESENT)) { val->status = CHARGER_STATUS_NOT_CHARGING; } else if (reg_val & SBS_CHARGER_STATUS_AC_PRESENT && !(reg_val & SBS_CHARGER_STATUS_CHARGE_INHIBITED)) { val->status = CHARGER_STATUS_CHARGING; } else { val->status = CHARGER_STATUS_DISCHARGING; } return 0; default: return -ENOTSUP; } } static int sbs_charger_set_prop(const struct device *dev, const charger_prop_t prop, const union charger_propval *val) { return -ENOTSUP; } /** * @brief initialize the charger * * @return 0 for success */ static int sbs_charger_init(const struct device *dev) { const struct sbs_charger_config *cfg = dev->config; if (!i2c_is_ready_dt(&cfg->i2c)) { LOG_ERR("Bus device is not ready"); return -ENODEV; } return 0; } static const struct charger_driver_api sbs_charger_driver_api = { .get_property = &sbs_charger_get_prop, .set_property = &sbs_charger_set_prop, .charge_enable = &sbs_charger_charge_enable, }; #define SBS_CHARGER_INIT(inst) \ \ static const struct sbs_charger_config sbs_charger_config_##inst = { \ .i2c = I2C_DT_SPEC_INST_GET(inst), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, &sbs_charger_init, NULL, NULL, &sbs_charger_config_##inst, \ POST_KERNEL, CONFIG_CHARGER_INIT_PRIORITY, &sbs_charger_driver_api); DT_INST_FOREACH_STATUS_OKAY(SBS_CHARGER_INIT) ```
/content/code_sandbox/drivers/charger/sbs_charger.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,182
```c /* * */ #include <zephyr/drivers/firmware/scmi/shmem.h> #include <zephyr/drivers/firmware/scmi/protocol.h> #include <zephyr/logging/log.h> #include <string.h> LOG_MODULE_REGISTER(arm_scmi_shmem); #define DT_DRV_COMPAT arm_scmi_shmem #ifndef DEVICE_MMIO_IS_IN_RAM #define device_map(virt, phys, size, flags) *(virt) = (phys) #endif /* DEVICE_MMIO_IS_IN_RAM */ struct scmi_shmem_config { uintptr_t phys_addr; uint32_t size; }; struct scmi_shmem_data { mm_reg_t regmap; }; struct scmi_shmem_layout { volatile uint32_t res0; volatile uint32_t chan_status; volatile uint32_t res1[2]; volatile uint32_t chan_flags; volatile uint32_t len; volatile uint32_t msg_hdr; }; int scmi_shmem_get_channel_status(const struct device *dev, uint32_t *status) { struct scmi_shmem_data *data; struct scmi_shmem_layout *layout; data = dev->data; layout = (struct scmi_shmem_layout *)data->regmap; *status = layout->chan_status; return 0; } static void scmi_shmem_memcpy(mm_reg_t dst, mm_reg_t src, uint32_t bytes) { int i; for (i = 0; i < bytes; i++) { sys_write8(*(uint8_t *)(src + i), dst + i); } } int scmi_shmem_read_message(const struct device *shmem, struct scmi_message *msg) { struct scmi_shmem_layout *layout; struct scmi_shmem_data *data; const struct scmi_shmem_config *cfg; data = shmem->data; cfg = shmem->config; layout = (struct scmi_shmem_layout *)data->regmap; /* some sanity checks first */ if (!msg) { return -EINVAL; } if (!msg->content && msg->len) { return -EINVAL; } if (cfg->size < (sizeof(*layout) + msg->len)) { LOG_ERR("message doesn't fit in shmem area"); return -EINVAL; } /* mismatch between expected reply size and actual size? */ if (msg->len != (layout->len - sizeof(layout->msg_hdr))) { LOG_ERR("bad message len. Expected 0x%x, got 0x%x", msg->len, (uint32_t)(layout->len - sizeof(layout->msg_hdr))); return -EINVAL; } /* header match? */ if (layout->msg_hdr != msg->hdr) { LOG_ERR("bad message header. Expected 0x%x, got 0x%x", msg->hdr, layout->msg_hdr); return -EINVAL; } if (msg->content) { scmi_shmem_memcpy(POINTER_TO_UINT(msg->content), data->regmap + sizeof(*layout), msg->len); } return 0; } int scmi_shmem_write_message(const struct device *shmem, struct scmi_message *msg) { struct scmi_shmem_layout *layout; struct scmi_shmem_data *data; const struct scmi_shmem_config *cfg; data = shmem->data; cfg = shmem->config; layout = (struct scmi_shmem_layout *)data->regmap; /* some sanity checks first */ if (!msg) { return -EINVAL; } if (!msg->content && msg->len) { return -EINVAL; } if (cfg->size < (sizeof(*layout) + msg->len)) { return -EINVAL; } if (!(layout->chan_status & SCMI_SHMEM_CHAN_STATUS_BUSY_BIT)) { return -EBUSY; } layout->len = sizeof(layout->msg_hdr) + msg->len; layout->msg_hdr = msg->hdr; if (msg->content) { scmi_shmem_memcpy(data->regmap + sizeof(*layout), POINTER_TO_UINT(msg->content), msg->len); } /* done, mark channel as busy and proceed */ layout->chan_status &= ~SCMI_SHMEM_CHAN_STATUS_BUSY_BIT; return 0; } uint32_t scmi_shmem_channel_status(const struct device *shmem) { struct scmi_shmem_layout *layout; struct scmi_shmem_data *data; data = shmem->data; layout = (struct scmi_shmem_layout *)data->regmap; return layout->chan_status; } void scmi_shmem_update_flags(const struct device *shmem, uint32_t mask, uint32_t val) { struct scmi_shmem_layout *layout; struct scmi_shmem_data *data; data = shmem->data; layout = (struct scmi_shmem_layout *)data->regmap; layout->chan_flags = (layout->chan_flags & ~mask) | (val & mask); } static int scmi_shmem_init(const struct device *dev) { const struct scmi_shmem_config *cfg; struct scmi_shmem_data *data; cfg = dev->config; data = dev->data; if (cfg->size < sizeof(struct scmi_shmem_layout)) { return -EINVAL; } device_map(&data->regmap, cfg->phys_addr, cfg->size, K_MEM_CACHE_NONE); return 0; } #define SCMI_SHMEM_INIT(inst) \ static const struct scmi_shmem_config config_##inst = { \ .phys_addr = DT_INST_REG_ADDR(inst), \ .size = DT_INST_REG_SIZE(inst), \ }; \ \ static struct scmi_shmem_data data_##inst; \ \ DEVICE_DT_INST_DEFINE(inst, &scmi_shmem_init, NULL, \ &data_##inst, &config_##inst, \ PRE_KERNEL_1, \ CONFIG_ARM_SCMI_SHMEM_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(SCMI_SHMEM_INIT); ```
/content/code_sandbox/drivers/firmware/scmi/shmem.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,300
```c /* * */ #include <zephyr/drivers/firmware/scmi/clk.h> #include <string.h> /* TODO: if extended attributes are supported this should be moved * to the header file so that users will have access to it. */ #define SCMI_CLK_CONFIG_EA_MASK GENMASK(23, 16) struct scmi_clock_attributes_reply { int32_t status; uint32_t attributes; }; struct scmi_clock_rate_set_reply { int32_t status; uint32_t rate[2]; }; int scmi_clock_rate_get(struct scmi_protocol *proto, uint32_t clk_id, uint32_t *rate) { struct scmi_message msg, reply; int ret; struct scmi_clock_rate_set_reply reply_buffer; /* sanity checks */ if (!proto || !rate) { return -EINVAL; } if (proto->id != SCMI_PROTOCOL_CLOCK) { return -EINVAL; } msg.hdr = SCMI_MESSAGE_HDR_MAKE(SCMI_CLK_MSG_CLOCK_RATE_GET, SCMI_COMMAND, proto->id, 0x0); msg.len = sizeof(clk_id); msg.content = &clk_id; reply.hdr = msg.hdr; reply.len = sizeof(reply_buffer); reply.content = &reply_buffer; ret = scmi_send_message(proto, &msg, &reply); if (ret < 0) { return ret; } if (reply_buffer.status != SCMI_SUCCESS) { return scmi_status_to_errno(reply_buffer.status); } *rate = reply_buffer.rate[0]; return 0; } int scmi_clock_config_set(struct scmi_protocol *proto, struct scmi_clock_config *cfg) { struct scmi_message msg, reply; int status, ret; /* sanity checks */ if (!proto || !cfg) { return -EINVAL; } if (proto->id != SCMI_PROTOCOL_CLOCK) { return -EINVAL; } /* extended attributes currently not supported */ if (cfg->attributes & SCMI_CLK_CONFIG_EA_MASK) { return -ENOTSUP; } /* invalid because extended attributes are not supported */ if (SCMI_CLK_CONFIG_ENABLE_DISABLE(cfg->attributes) == 3) { return -ENOTSUP; } /* this is a reserved value */ if (SCMI_CLK_CONFIG_ENABLE_DISABLE(cfg->attributes) == 2) { return -EINVAL; } msg.hdr = SCMI_MESSAGE_HDR_MAKE(SCMI_CLK_MSG_CLOCK_CONFIG_SET, SCMI_COMMAND, proto->id, 0x0); msg.len = sizeof(*cfg); msg.content = cfg; reply.hdr = msg.hdr; reply.len = sizeof(status); reply.content = &status; ret = scmi_send_message(proto, &msg, &reply); if (ret < 0) { return ret; } if (status != SCMI_SUCCESS) { return scmi_status_to_errno(status); } return 0; } int scmi_clock_protocol_attributes(struct scmi_protocol *proto, uint32_t *attributes) { struct scmi_message msg, reply; struct scmi_clock_attributes_reply reply_buffer; int ret; /* sanity checks */ if (!proto || !attributes) { return -EINVAL; } if (proto->id != SCMI_PROTOCOL_CLOCK) { return -EINVAL; } msg.hdr = SCMI_MESSAGE_HDR_MAKE(SCMI_CLK_MSG_PROTOCOL_ATTRIBUTES, SCMI_COMMAND, proto->id, 0x0); /* command has no parameters */ msg.len = 0x0; msg.content = NULL; reply.hdr = msg.hdr; reply.len = sizeof(reply_buffer); reply.content = &reply_buffer; ret = scmi_send_message(proto, &msg, &reply); if (ret < 0) { return ret; } if (reply_buffer.status != 0) { return scmi_status_to_errno(reply_buffer.status); } *attributes = reply_buffer.attributes; return 0; } ```
/content/code_sandbox/drivers/firmware/scmi/clk.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
834
```c /* * */ #include <zephyr/drivers/firmware/scmi/protocol.h> #include <zephyr/drivers/firmware/scmi/transport.h> #include <zephyr/logging/log.h> #include <zephyr/device.h> LOG_MODULE_REGISTER(scmi_core); #define SCMI_CHAN_LOCK_TIMEOUT_USEC 500 #define SCMI_CHAN_SEM_TIMEOUT_USEC 500 int scmi_status_to_errno(int scmi_status) { switch (scmi_status) { case SCMI_SUCCESS: return 0; case SCMI_NOT_SUPPORTED: return -EOPNOTSUPP; case SCMI_INVALID_PARAMETERS: return -EINVAL; case SCMI_DENIED: return -EACCES; case SCMI_NOT_FOUND: return -ENOENT; case SCMI_OUT_OF_RANGE: return -ERANGE; case SCMI_IN_USE: case SCMI_BUSY: return -EBUSY; case SCMI_PROTOCOL_ERROR: return -EPROTO; case SCMI_COMMS_ERROR: case SCMI_GENERIC_ERROR: case SCMI_HARDWARE_ERROR: default: return -EIO; } } static void scmi_core_reply_cb(struct scmi_channel *chan) { if (!k_is_pre_kernel()) { k_sem_give(&chan->sem); } } static int scmi_core_setup_chan(const struct device *transport, struct scmi_channel *chan, bool tx) { int ret; if (!chan) { return -EINVAL; } if (chan->ready) { return 0; } /* no support for RX channels ATM */ if (!tx) { return -ENOTSUP; } k_mutex_init(&chan->lock); k_sem_init(&chan->sem, 0, 1); chan->cb = scmi_core_reply_cb; /* setup transport-related channel data */ ret = scmi_transport_setup_chan(transport, chan, tx); if (ret < 0) { LOG_ERR("failed to setup channel"); return ret; } /* protocols might share a channel. In such cases, this * will stop them from being initialized again. */ chan->ready = true; return 0; } static int scmi_send_message_pre_kernel(struct scmi_protocol *proto, struct scmi_message *msg, struct scmi_message *reply) { int ret; ret = scmi_transport_send_message(proto->transport, proto->tx, msg); if (ret < 0) { return ret; } /* no kernel primitives, we're forced to poll here. * * Cortex-M quirk: no interrupts at this point => no timer => * no timeout mechanism => this can block the whole system. * * TODO: is there a better way to handle this? */ while (!scmi_transport_channel_is_free(proto->transport, proto->tx)) { } ret = scmi_transport_read_message(proto->transport, proto->tx, reply); if (ret < 0) { return ret; } return ret; } static int scmi_send_message_post_kernel(struct scmi_protocol *proto, struct scmi_message *msg, struct scmi_message *reply) { int ret = 0; if (!proto->tx) { return -ENODEV; } /* wait for channel to be free */ ret = k_mutex_lock(&proto->tx->lock, K_USEC(SCMI_CHAN_LOCK_TIMEOUT_USEC)); if (ret < 0) { LOG_ERR("failed to acquire chan lock"); return ret; } ret = scmi_transport_send_message(proto->transport, proto->tx, msg); if (ret < 0) { LOG_ERR("failed to send message"); goto out_release_mutex; } /* only one protocol instance can wait for a message reply at a time */ ret = k_sem_take(&proto->tx->sem, K_USEC(SCMI_CHAN_SEM_TIMEOUT_USEC)); if (ret < 0) { LOG_ERR("failed to wait for msg reply"); goto out_release_mutex; } ret = scmi_transport_read_message(proto->transport, proto->tx, reply); if (ret < 0) { LOG_ERR("failed to read reply"); goto out_release_mutex; } out_release_mutex: k_mutex_unlock(&proto->tx->lock); return ret; } int scmi_send_message(struct scmi_protocol *proto, struct scmi_message *msg, struct scmi_message *reply) { if (!proto->tx) { return -ENODEV; } if (!proto->tx->ready) { return -EINVAL; } if (k_is_pre_kernel()) { return scmi_send_message_pre_kernel(proto, msg, reply); } else { return scmi_send_message_post_kernel(proto, msg, reply); } } static int scmi_core_protocol_setup(const struct device *transport) { int ret; STRUCT_SECTION_FOREACH(scmi_protocol, it) { it->transport = transport; #ifndef CONFIG_ARM_SCMI_TRANSPORT_HAS_STATIC_CHANNELS /* no static channel allocation, attempt dynamic binding */ it->tx = scmi_transport_request_channel(transport, it->id, true); #endif /* CONFIG_ARM_SCMI_TRANSPORT_HAS_STATIC_CHANNELS */ if (!it->tx) { return -ENODEV; } ret = scmi_core_setup_chan(transport, it->tx, true); if (ret < 0) { return ret; } } return 0; } int scmi_core_transport_init(const struct device *transport) { int ret; ret = scmi_transport_init(transport); if (ret < 0) { return ret; } return scmi_core_protocol_setup(transport); } ```
/content/code_sandbox/drivers/firmware/scmi/core.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,219
```unknown if ARM_SCMI config ARM_SCMI_CLK_HELPERS bool "Helper functions for SCMI clock protocol" default y depends on DT_HAS_ARM_SCMI_CLOCK_ENABLED help Enable support for SCMI clock protocol helper functions. config ARM_SCMI_MAILBOX_TRANSPORT bool "SCMI transport based on shared memory and doorbells" default y depends on DT_HAS_ARM_SCMI_ENABLED depends on ARM_SCMI_SHMEM select ARM_SCMI_TRANSPORT_HAS_STATIC_CHANNELS help Enable support for SCMI transport based on shared memory and doorbells. config ARM_SCMI_PINCTRL_HELPERS bool "Helper functions for SCMI pinctrl protocol" default y depends on DT_HAS_ARM_SCMI_PINCTRL_ENABLED help Enable support for SCMI pinctrl protocol helper functions. config ARM_SCMI_SHMEM bool "SCMI shared memory (SHMEM) driver" default y depends on DT_HAS_ARM_SCMI_SHMEM_ENABLED help Enable support for SCMI shared memory (SHMEM) driver. config ARM_SCMI_SHMEM_INIT_PRIORITY int "SCMI shared memory (SHMEM) initialization priority" default 15 help SCMI SHMEM driver device initialization priority. config ARM_SCMI_TRANSPORT_HAS_STATIC_CHANNELS bool "Transport layer has static channels" help Enable this if the SCMI transport layer uses static channels. What this means is that each protocol will have its channels assigned at compile time. This option is recommended for transport layer drivers which can use the default channel allocation scheme (i.e: use protocol-specific channels if they exist, otherwise use base protocol channels). config ARM_SCMI_TRANSPORT_INIT_PRIORITY int "SCMI transport layer initialization priority" default 20 help SCMI transport driver device initialization priority. endif # ARM_SCMI ```
/content/code_sandbox/drivers/firmware/scmi/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
397