text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```c /* * */ #define DT_DRV_COMPAT raspberrypi_pico_spi_pio #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_pico_pio); #include <zephyr/sys/util.h> #include <zephyr/sys/sys_io.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/pinctrl.h> #include "spi_context.h" #include <zephyr/drivers/misc/pio_rpi_pico/pio_rpi_pico.h> #include <hardware/pio.h> #include "hardware/clocks.h" #define SPI_RPI_PICO_PIO_HALF_DUPLEX_ENABLED DT_ANY_INST_HAS_PROP_STATUS_OKAY(sio_gpios) #define PIO_CYCLES (4) #define PIO_FIFO_DEPTH (4) struct spi_pico_pio_config { const struct device *piodev; const struct pinctrl_dev_config *pin_cfg; struct gpio_dt_spec clk_gpio; struct gpio_dt_spec mosi_gpio; struct gpio_dt_spec miso_gpio; struct gpio_dt_spec sio_gpio; const struct device *clk_dev; clock_control_subsys_t clk_id; }; struct spi_pico_pio_data { struct spi_context spi_ctx; uint32_t tx_count; uint32_t rx_count; PIO pio; size_t pio_sm; uint32_t pio_tx_offset; uint32_t pio_rx_offset; uint32_t pio_rx_wrap_target; uint32_t pio_rx_wrap; uint32_t tx_period_ticks; uint32_t bits; uint32_t dfs; }; /* ------------ */ /* spi_mode_0_0 */ /* ------------ */ #define SPI_MODE_0_0_WRAP_TARGET 0 #define SPI_MODE_0_0_WRAP 1 #define SPI_MODE_0_0_CYCLES 4 RPI_PICO_PIO_DEFINE_PROGRAM(spi_mode_0_0, SPI_MODE_0_0_WRAP_TARGET, SPI_MODE_0_0_WRAP, /* .wrap_target */ 0x6101, /* 0: out pins, 1 side 0 [1] */ 0x5101, /* 1: in pins, 1 side 1 [1] */ /* .wrap */ ); /* ------------ */ /* spi_mode_1_1 */ /* ------------ */ #define SPI_MODE_1_1_WRAP_TARGET 0 #define SPI_MODE_1_1_WRAP 2 #define SPI_MODE_1_1_CYCLES 4 RPI_PICO_PIO_DEFINE_PROGRAM(spi_mode_1_1, SPI_MODE_1_1_WRAP_TARGET, SPI_MODE_1_1_WRAP, /* .wrap_target */ 0x7021, /* 0: out x, 1 side 1 */ 0xa101, /* 1: mov pins, x side 0 [1] */ 0x5001, /* 2: in pins, 1 side 1 */ /* .wrap */ ); #if SPI_RPI_PICO_PIO_HALF_DUPLEX_ENABLED /* ------------------- */ /* spi_sio_mode_0_0_tx */ /* ------------------- */ #define SPI_SIO_MODE_0_0_TX_WRAP_TARGET 0 #define SPI_SIO_MODE_0_0_TX_WRAP 2 #define SPI_SIO_MODE_0_0_TX_CYCLES 2 RPI_PICO_PIO_DEFINE_PROGRAM(spi_sio_mode_0_0_tx, SPI_SIO_MODE_0_0_TX_WRAP_TARGET, SPI_SIO_MODE_0_0_TX_WRAP, /* .wrap_target */ 0x80a0, /* 0: pull block side 0 */ 0x6001, /* 1: out pins, 1 side 0 */ 0x10e1, /* 2: jmp !osre, 1 side 1 */ /* .wrap */ ); /* ------------------------- */ /* spi_sio_mode_0_0_8_bit_rx */ /* ------------------------- */ #define SPI_SIO_MODE_0_0_8_BIT_RX_WRAP_TARGET 0 #define SPI_SIO_MODE_0_0_8_BIT_RX_WRAP 6 #define SPI_SIO_MODE_0_0_8_BIT_RX_CYCLES 2 RPI_PICO_PIO_DEFINE_PROGRAM(spi_sio_mode_0_0_8_bit_rx, SPI_SIO_MODE_0_0_8_BIT_RX_WRAP_TARGET, SPI_SIO_MODE_0_0_8_BIT_RX_WRAP, /* .wrap_target */ 0x80a0, /* 0: pull block side 0 */ 0x6020, /* 1: out x, 32 side 0 */ 0xe047, /* 2: set y, 7 side 0 */ 0x5001, /* 3: in pins, 1 side 1 */ 0x0083, /* 4: jmp y--, 3 side 0 */ 0x8020, /* 5: push block side 0 */ 0x0042, /* 6: jmp x--, 2 side 0 */ /* .wrap */ ); /* -------------------------- */ /* spi_sio_mode_0_0_16_bit_rx */ /* -------------------------- */ #define SPI_SIO_MODE_0_0_16_BIT_RX_WRAP_TARGET 0 #define SPI_SIO_MODE_0_0_16_BIT_RX_WRAP 6 #define SPI_SIO_MODE_0_0_16_BIT_RX_CYCLES 2 RPI_PICO_PIO_DEFINE_PROGRAM(spi_sio_mode_0_0_16_bit_rx, SPI_SIO_MODE_0_0_16_BIT_RX_WRAP_TARGET, SPI_SIO_MODE_0_0_16_BIT_RX_WRAP, /* .wrap_target */ 0x80a0, /* 0: pull block side 0 */ 0x6020, /* 1: out x, 32 side 0 */ 0xe04f, /* 2: set y, 15 side 0 */ 0x5001, /* 3: in pins, 1 side 1 */ 0x0083, /* 4: jmp y--, 3 side 0 */ 0x8020, /* 5: push block side 0 */ 0x0042, /* 6: jmp x--, 2 side 0 */ /* .wrap */ ); /* -------------------------- */ /* spi_sio_mode_0_0_32_bit_rx */ /* -------------------------- */ #define SPI_SIO_MODE_0_0_32_BIT_RX_WRAP_TARGET 0 #define SPI_SIO_MODE_0_0_32_BIT_RX_WRAP 6 #define SPI_SIO_MODE_0_0_32_BIT_RX_CYCLES 2 RPI_PICO_PIO_DEFINE_PROGRAM(spi_sio_mode_0_0_32_bit_rx, SPI_SIO_MODE_0_0_32_BIT_RX_WRAP_TARGET, SPI_SIO_MODE_0_0_32_BIT_RX_WRAP, /* .wrap_target */ 0x80a0, /* 0: pull block side 0 */ 0x6020, /* 1: out x, 32 side 0 */ 0xe05f, /* 2: set y, 31 side 0 */ 0x5001, /* 3: in pins, 1 side 1 */ 0x0083, /* 4: jmp y--, 3 side 0 */ 0x8020, /* 5: push block side 0 */ 0x0042, /* 6: jmp x--, 2 side 0 */ /* .wrap */ ); #endif /* SPI_RPI_PICO_PIO_HALF_DUPLEX_ENABLED */ static float spi_pico_pio_clock_divisor(const uint32_t clock_freq, int cycles, uint32_t spi_frequency) { return (float)clock_freq / (float)(cycles * spi_frequency); } static uint32_t spi_pico_pio_maximum_clock_frequency(const uint32_t clock_freq, int cycles) { return clock_freq / cycles; } static uint32_t spi_pico_pio_minimum_clock_frequency(const uint32_t clock_freq, int cycles) { return clock_freq / (cycles * 65536); } static inline bool spi_pico_pio_transfer_ongoing(struct spi_pico_pio_data *data) { return spi_context_tx_on(&data->spi_ctx) || spi_context_rx_on(&data->spi_ctx); } static inline void spi_pico_pio_sm_put8(PIO pio, uint sm, uint8_t data) { /* Do 8 bit accesses on FIFO, so that write data is byte-replicated. This */ /* gets us the left-justification for free (for MSB-first shift-out) */ io_rw_8 *txfifo = (io_rw_8 *)&pio->txf[sm]; *txfifo = data; } static inline uint8_t spi_pico_pio_sm_get8(PIO pio, uint sm) { /* Do 8 bit accesses on FIFO, so that write data is byte-replicated. This */ /* gets us the left-justification for free (for MSB-first shift-out) */ io_rw_8 *rxfifo = (io_rw_8 *)&pio->rxf[sm]; return *rxfifo; } static inline void spi_pico_pio_sm_put16(PIO pio, uint sm, uint16_t data) { /* Do 16 bit accesses on FIFO, so that write data is halfword-replicated. This */ /* gets us the left-justification for free (for MSB-first shift-out) */ io_rw_16 *txfifo = (io_rw_16 *)&pio->txf[sm]; *txfifo = data; } static inline uint16_t spi_pico_pio_sm_get16(PIO pio, uint sm) { io_rw_16 *rxfifo = (io_rw_16 *)&pio->rxf[sm]; return *rxfifo; } static inline void spi_pico_pio_sm_put32(PIO pio, uint sm, uint32_t data) { io_rw_32 *txfifo = (io_rw_32 *)&pio->txf[sm]; *txfifo = data; } static inline uint32_t spi_pico_pio_sm_get32(PIO pio, uint sm) { io_rw_32 *rxfifo = (io_rw_32 *)&pio->rxf[sm]; return *rxfifo; } static inline int spi_pico_pio_sm_complete(struct spi_pico_pio_data *data) { return (data->pio->sm[data->pio_sm].addr == data->pio_tx_offset); } static int spi_pico_pio_configure(const struct spi_pico_pio_config *dev_cfg, struct spi_pico_pio_data *data, const struct spi_config *spi_cfg) { const struct gpio_dt_spec *clk = NULL; pio_sm_config sm_config; bool lsb = false; uint32_t cpol = 0; uint32_t cpha = 0; uint32_t rc = 0; uint32_t clock_freq; rc = clock_control_on(dev_cfg->clk_dev, dev_cfg->clk_id); if (rc < 0) { LOG_ERR("Failed to enable the clock"); return rc; } rc = clock_control_get_rate(dev_cfg->clk_dev, dev_cfg->clk_id, &clock_freq); if (rc < 0) { LOG_ERR("Failed to get clock frequency"); return rc; } if (spi_context_configured(&data->spi_ctx, spi_cfg)) { return 0; } if (spi_cfg->operation & SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode not supported"); return -ENOTSUP; } /* Note that SPI_TRANSFER_LSB controls the direction of shift, not the */ /* "endianness" of the data. In MSB mode, the high-order bit of the */ /* most significant byte is sent first; in LSB mode, the low-order */ /* bit of the least-significant byte is sent first. */ if (spi_cfg->operation & SPI_TRANSFER_LSB) { lsb = true; } #if defined(CONFIG_SPI_EXTENDED_MODES) if (spi_cfg->operation & (SPI_LINES_DUAL | SPI_LINES_QUAD | SPI_LINES_OCTAL)) { LOG_ERR("Unsupported configuration"); return -ENOTSUP; } #endif /* CONFIG_SPI_EXTENDED_MODES */ data->bits = SPI_WORD_SIZE_GET(spi_cfg->operation); if ((data->bits != 8) && (data->bits != 16) && (data->bits != 32)) { LOG_ERR("Only 8, 16, and 32 bit word sizes are supported"); return -ENOTSUP; } data->dfs = ((data->bits - 1) / 8) + 1; if (spi_cfg->operation & SPI_CS_ACTIVE_HIGH) { gpio_set_outover(data->spi_ctx.config->cs.gpio.pin, GPIO_OVERRIDE_INVERT); } if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) { cpol = 1; } if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) { cpha = 1; } if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_LOOP) { LOG_ERR("Loopback not supported"); return -ENOTSUP; } #if SPI_RPI_PICO_PIO_HALF_DUPLEX_ENABLED if (spi_cfg->operation & SPI_HALF_DUPLEX) { if ((cpol != 0) || (cpha != 0)) { LOG_ERR("Only mode (0, 0) supported in 3-wire SIO"); return -ENOTSUP; } if ((spi_cfg->frequency > spi_pico_pio_maximum_clock_frequency( clock_freq, SPI_SIO_MODE_0_0_TX_CYCLES)) || (spi_cfg->frequency < spi_pico_pio_minimum_clock_frequency( clock_freq, SPI_SIO_MODE_0_0_TX_CYCLES))) { LOG_ERR("clock-frequency out of range"); return -EINVAL; } } else if (dev_cfg->sio_gpio.port) { LOG_ERR("SPI_HALF_DUPLEX operation needed for sio-gpios"); return -EINVAL; } #else if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("No sio-gpios defined, half-duplex not enabled"); return -EINVAL; } #endif /* SPI_RPI_PICO_PIO_HALF_DUPLEX_ENABLED */ clk = &dev_cfg->clk_gpio; data->pio = pio_rpi_pico_get_pio(dev_cfg->piodev); rc = pio_rpi_pico_allocate_sm(dev_cfg->piodev, &data->pio_sm); if (rc < 0) { return rc; } if (dev_cfg->sio_gpio.port) { #if SPI_RPI_PICO_PIO_HALF_DUPLEX_ENABLED const struct gpio_dt_spec *sio = &dev_cfg->sio_gpio; float clock_div = spi_pico_pio_clock_divisor(clock_freq, SPI_SIO_MODE_0_0_TX_CYCLES, spi_cfg->frequency); data->tx_period_ticks = DIV_ROUND_UP((data->bits * CONFIG_SYS_CLOCK_TICKS_PER_SEC), spi_cfg->frequency); data->pio_tx_offset = pio_add_program(data->pio, RPI_PICO_PIO_GET_PROGRAM(spi_sio_mode_0_0_tx)); switch (data->dfs) { case 4: data->pio_rx_offset = pio_add_program( data->pio, RPI_PICO_PIO_GET_PROGRAM(spi_sio_mode_0_0_32_bit_rx)); data->pio_rx_wrap_target = data->pio_rx_offset + RPI_PICO_PIO_GET_WRAP_TARGET(spi_sio_mode_0_0_32_bit_rx); data->pio_rx_wrap = data->pio_rx_offset + RPI_PICO_PIO_GET_WRAP(spi_sio_mode_0_0_32_bit_rx); break; case 2: data->pio_rx_offset = pio_add_program( data->pio, RPI_PICO_PIO_GET_PROGRAM(spi_sio_mode_0_0_16_bit_rx)); data->pio_rx_wrap_target = data->pio_rx_offset + RPI_PICO_PIO_GET_WRAP_TARGET(spi_sio_mode_0_0_16_bit_rx); data->pio_rx_wrap = data->pio_rx_offset + RPI_PICO_PIO_GET_WRAP(spi_sio_mode_0_0_16_bit_rx); break; case 1: data->pio_rx_offset = pio_add_program( data->pio, RPI_PICO_PIO_GET_PROGRAM(spi_sio_mode_0_0_8_bit_rx)); data->pio_rx_wrap_target = data->pio_rx_offset + RPI_PICO_PIO_GET_WRAP_TARGET(spi_sio_mode_0_0_8_bit_rx); data->pio_rx_wrap = data->pio_rx_offset + RPI_PICO_PIO_GET_WRAP(spi_sio_mode_0_0_8_bit_rx); break; default: LOG_ERR("Support for %d transfer size not enabled", (data->dfs * 8)); return -EINVAL; } sm_config = pio_get_default_sm_config(); sm_config_set_clkdiv(&sm_config, clock_div); sm_config_set_in_pins(&sm_config, sio->pin); sm_config_set_in_shift(&sm_config, lsb, false, data->bits); sm_config_set_out_pins(&sm_config, sio->pin, 1); sm_config_set_out_shift(&sm_config, lsb, false, data->bits); hw_set_bits(&data->pio->input_sync_bypass, 1u << sio->pin); sm_config_set_sideset_pins(&sm_config, clk->pin); sm_config_set_sideset(&sm_config, 1, false, false); sm_config_set_wrap( &sm_config, data->pio_tx_offset + RPI_PICO_PIO_GET_WRAP_TARGET(spi_sio_mode_0_0_tx), data->pio_tx_offset + RPI_PICO_PIO_GET_WRAP(spi_sio_mode_0_0_tx)); pio_sm_set_pindirs_with_mask(data->pio, data->pio_sm, (BIT(clk->pin) | BIT(sio->pin)), (BIT(clk->pin) | BIT(sio->pin))); pio_sm_set_pins_with_mask(data->pio, data->pio_sm, 0, BIT(clk->pin) | BIT(sio->pin)); pio_gpio_init(data->pio, sio->pin); pio_gpio_init(data->pio, clk->pin); pio_sm_init(data->pio, data->pio_sm, data->pio_tx_offset, &sm_config); pio_sm_set_enabled(data->pio, data->pio_sm, true); #else LOG_ERR("SIO pin requires half-duplex support"); return -EINVAL; #endif /* SPI_RPI_PICO_PIO_HALF_DUPLEX_ENABLED */ } else { /* 4-wire mode */ const struct gpio_dt_spec *miso = miso = &dev_cfg->miso_gpio; const struct gpio_dt_spec *mosi = &dev_cfg->mosi_gpio; const pio_program_t *program; uint32_t wrap_target; uint32_t wrap; int cycles; if ((cpol == 0) && (cpha == 0)) { program = RPI_PICO_PIO_GET_PROGRAM(spi_mode_0_0); wrap_target = RPI_PICO_PIO_GET_WRAP_TARGET(spi_mode_0_0); wrap = RPI_PICO_PIO_GET_WRAP(spi_mode_0_0); cycles = SPI_MODE_0_0_CYCLES; } else if ((cpol == 1) && (cpha == 1)) { program = RPI_PICO_PIO_GET_PROGRAM(spi_mode_1_1); wrap_target = RPI_PICO_PIO_GET_WRAP_TARGET(spi_mode_1_1); wrap = RPI_PICO_PIO_GET_WRAP(spi_mode_1_1); cycles = SPI_MODE_1_1_CYCLES; } else { LOG_ERR("Not supported: cpol=%d, cpha=%d\n", cpol, cpha); return -ENOTSUP; } if ((spi_cfg->frequency > spi_pico_pio_maximum_clock_frequency(clock_freq, cycles)) || (spi_cfg->frequency < spi_pico_pio_minimum_clock_frequency(clock_freq, cycles))) { LOG_ERR("clock-frequency out of range"); return -EINVAL; } float clock_div = spi_pico_pio_clock_divisor(clock_freq, cycles, spi_cfg->frequency); if (!pio_can_add_program(data->pio, program)) { return -EBUSY; } data->pio_tx_offset = pio_add_program(data->pio, program); sm_config = pio_get_default_sm_config(); sm_config_set_clkdiv(&sm_config, clock_div); sm_config_set_in_pins(&sm_config, miso->pin); sm_config_set_in_shift(&sm_config, lsb, true, data->bits); sm_config_set_out_pins(&sm_config, mosi->pin, 1); sm_config_set_out_shift(&sm_config, lsb, true, data->bits); sm_config_set_sideset_pins(&sm_config, clk->pin); sm_config_set_sideset(&sm_config, 1, false, false); sm_config_set_wrap(&sm_config, data->pio_tx_offset + wrap_target, data->pio_tx_offset + wrap); pio_sm_set_consecutive_pindirs(data->pio, data->pio_sm, miso->pin, 1, false); pio_sm_set_pindirs_with_mask(data->pio, data->pio_sm, (BIT(clk->pin) | BIT(mosi->pin)), (BIT(clk->pin) | BIT(mosi->pin))); pio_sm_set_pins_with_mask(data->pio, data->pio_sm, (cpol << clk->pin), BIT(clk->pin) | BIT(mosi->pin)); pio_gpio_init(data->pio, mosi->pin); pio_gpio_init(data->pio, miso->pin); pio_gpio_init(data->pio, clk->pin); pio_sm_init(data->pio, data->pio_sm, data->pio_tx_offset, &sm_config); pio_sm_set_enabled(data->pio, data->pio_sm, true); } data->spi_ctx.config = spi_cfg; return 0; } static void spi_pico_pio_txrx_4_wire(const struct device *dev) { struct spi_pico_pio_data *data = dev->data; const size_t chunk_len = spi_context_max_continuous_chunk(&data->spi_ctx); const void *txbuf = data->spi_ctx.tx_buf; void *rxbuf = data->spi_ctx.rx_buf; uint32_t txrx; size_t fifo_cnt = 0; data->tx_count = 0; data->rx_count = 0; pio_sm_clear_fifos(data->pio, data->pio_sm); while (data->rx_count < chunk_len || data->tx_count < chunk_len) { /* Fill up fifo with available TX data */ while ((!pio_sm_is_tx_fifo_full(data->pio, data->pio_sm)) && data->tx_count < chunk_len && fifo_cnt < PIO_FIFO_DEPTH) { /* Send 0 in the case of read only operation */ txrx = 0; switch (data->dfs) { case 4: { if (txbuf) { txrx = ((uint32_t *)txbuf)[data->tx_count]; } spi_pico_pio_sm_put32(data->pio, data->pio_sm, txrx); data->tx_count += 4; } break; case 2: { if (txbuf) { txrx = ((uint16_t *)txbuf)[data->tx_count]; } spi_pico_pio_sm_put16(data->pio, data->pio_sm, txrx); data->tx_count += 2; } break; case 1: { if (txbuf) { txrx = ((uint8_t *)txbuf)[data->tx_count]; } spi_pico_pio_sm_put8(data->pio, data->pio_sm, txrx); data->tx_count++; } break; default: LOG_ERR("Support fot %d bits not enabled", (data->dfs * 8)); break; } fifo_cnt++; } while ((!pio_sm_is_rx_fifo_empty(data->pio, data->pio_sm)) && data->rx_count < chunk_len && fifo_cnt > 0) { switch (data->dfs) { case 4: { txrx = spi_pico_pio_sm_get32(data->pio, data->pio_sm); /* Discard received data if rx buffer not assigned */ if (rxbuf) { ((uint32_t *)rxbuf)[data->rx_count] = (uint32_t)txrx; } data->rx_count += 4; } break; case 2: { txrx = spi_pico_pio_sm_get16(data->pio, data->pio_sm); /* Discard received data if rx buffer not assigned */ if (rxbuf) { ((uint16_t *)rxbuf)[data->rx_count] = (uint16_t)txrx; } data->rx_count += 2; } break; case 1: { txrx = spi_pico_pio_sm_get8(data->pio, data->pio_sm); /* Discard received data if rx buffer not assigned */ if (rxbuf) { ((uint8_t *)rxbuf)[data->rx_count] = (uint8_t)txrx; } data->rx_count++; } break; default: LOG_ERR("Support fot %d bits not enabled", (data->dfs * 8)); break; } fifo_cnt--; } } } static void spi_pico_pio_txrx_3_wire(const struct device *dev) { #if SPI_RPI_PICO_PIO_HALF_DUPLEX_ENABLED struct spi_pico_pio_data *data = dev->data; const struct spi_pico_pio_config *dev_cfg = dev->config; const void *txbuf = data->spi_ctx.tx_buf; void *rxbuf = data->spi_ctx.rx_buf; uint32_t txrx; int sio_pin = dev_cfg->sio_gpio.pin; uint32_t tx_size = data->spi_ctx.tx_len; /* Number of WORDS to send */ uint32_t rx_size = data->spi_ctx.rx_len; /* Number of WORDS to receive */ data->tx_count = 0; data->rx_count = 0; if (txbuf) { pio_sm_set_enabled(data->pio, data->pio_sm, false); pio_sm_set_wrap(data->pio, data->pio_sm, data->pio_tx_offset + RPI_PICO_PIO_GET_WRAP_TARGET(spi_sio_mode_0_0_tx), data->pio_tx_offset + RPI_PICO_PIO_GET_WRAP(spi_sio_mode_0_0_tx)); pio_sm_clear_fifos(data->pio, data->pio_sm); pio_sm_set_pindirs_with_mask(data->pio, data->pio_sm, BIT(sio_pin), BIT(sio_pin)); pio_sm_restart(data->pio, data->pio_sm); pio_sm_clkdiv_restart(data->pio, data->pio_sm); pio_sm_exec(data->pio, data->pio_sm, pio_encode_jmp(data->pio_tx_offset)); pio_sm_set_enabled(data->pio, data->pio_sm, true); while (data->tx_count < tx_size) { /* Fill up fifo with available TX data */ while ((!pio_sm_is_tx_fifo_full(data->pio, data->pio_sm)) && data->tx_count < tx_size) { switch (data->dfs) { case 4: { txrx = ((uint32_t *)txbuf)[data->tx_count]; spi_pico_pio_sm_put32(data->pio, data->pio_sm, txrx); data->tx_count += 4; } break; case 2: { txrx = ((uint16_t *)txbuf)[data->tx_count]; spi_pico_pio_sm_put16(data->pio, data->pio_sm, txrx); data->tx_count += 2; } break; case 1: { txrx = ((uint8_t *)txbuf)[data->tx_count]; spi_pico_pio_sm_put8(data->pio, data->pio_sm, txrx); data->tx_count++; } break; default: LOG_ERR("Support fot %d bits not enabled", (data->dfs * 8)); break; } } } while ((!pio_sm_is_tx_fifo_empty(data->pio, data->pio_sm)) || (!spi_pico_pio_sm_complete(data))) { k_sleep(K_TICKS(data->tx_period_ticks)); } } if (rxbuf) { pio_sm_set_enabled(data->pio, data->pio_sm, false); pio_sm_set_wrap(data->pio, data->pio_sm, data->pio_rx_wrap_target, data->pio_rx_wrap); pio_sm_clear_fifos(data->pio, data->pio_sm); pio_sm_set_pindirs_with_mask(data->pio, data->pio_sm, 0, BIT(sio_pin)); pio_sm_restart(data->pio, data->pio_sm); pio_sm_clkdiv_restart(data->pio, data->pio_sm); pio_sm_put(data->pio, data->pio_sm, rx_size - 1); pio_sm_exec(data->pio, data->pio_sm, pio_encode_out(pio_x, 32)); pio_sm_exec(data->pio, data->pio_sm, pio_encode_jmp(data->pio_rx_offset)); pio_sm_set_enabled(data->pio, data->pio_sm, true); while (data->rx_count < rx_size) { while ((!pio_sm_is_rx_fifo_empty(data->pio, data->pio_sm)) && data->rx_count < rx_size) { switch (data->dfs) { case 4: { txrx = spi_pico_pio_sm_get32(data->pio, data->pio_sm); ((uint32_t *)rxbuf)[data->rx_count] = (uint32_t)txrx; data->rx_count += 4; } break; case 2: { txrx = spi_pico_pio_sm_get16(data->pio, data->pio_sm); ((uint16_t *)rxbuf)[data->rx_count] = (uint16_t)txrx; data->rx_count += 2; } break; case 1: { txrx = spi_pico_pio_sm_get8(data->pio, data->pio_sm); ((uint8_t *)rxbuf)[data->rx_count] = (uint8_t)txrx; data->rx_count++; } break; default: LOG_ERR("Support fot %d bits not enabled", (data->dfs * 8)); break; } } } } #else LOG_ERR("SIO pin requires half-duplex support"); #endif /* SPI_RPI_PICO_PIO_HALF_DUPLEX_ENABLED */ } static void spi_pico_pio_txrx(const struct device *dev) { const struct spi_pico_pio_config *dev_cfg = dev->config; /* 3-wire or 4-wire mode? */ if (dev_cfg->sio_gpio.port) { spi_pico_pio_txrx_3_wire(dev); } else { spi_pico_pio_txrx_4_wire(dev); } } static int spi_pico_pio_transceive_impl(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { const struct spi_pico_pio_config *dev_cfg = dev->config; struct spi_pico_pio_data *data = dev->data; struct spi_context *spi_ctx = &data->spi_ctx; int rc = 0; spi_context_lock(spi_ctx, asynchronous, cb, userdata, spi_cfg); rc = spi_pico_pio_configure(dev_cfg, data, spi_cfg); if (rc < 0) { goto error; } spi_context_buffers_setup(spi_ctx, tx_bufs, rx_bufs, data->dfs); spi_context_cs_control(spi_ctx, true); do { spi_pico_pio_txrx(dev); spi_context_update_tx(spi_ctx, 1, data->tx_count); spi_context_update_rx(spi_ctx, 1, data->rx_count); } while (spi_pico_pio_transfer_ongoing(data)); spi_context_cs_control(spi_ctx, false); error: spi_context_release(spi_ctx, rc); return rc; } static int spi_pico_pio_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return spi_pico_pio_transceive_impl(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } int spi_pico_pio_release(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_pico_pio_data *data = dev->data; spi_context_unlock_unconditionally(&data->spi_ctx); return 0; } static const struct spi_driver_api spi_pico_pio_api = { .transceive = spi_pico_pio_transceive, .release = spi_pico_pio_release, }; static int config_gpio(const struct gpio_dt_spec *gpio, const char *tag, int mode) { int rc = 0; if (!device_is_ready(gpio->port)) { LOG_ERR("GPIO port for %s pin is not ready", tag); return -ENODEV; } rc = gpio_pin_configure_dt(gpio, mode); if (rc < 0) { LOG_ERR("Couldn't configure %s pin; (%d)", tag, rc); return rc; } return 0; } int spi_pico_pio_init(const struct device *dev) { const struct spi_pico_pio_config *dev_cfg = dev->config; struct spi_pico_pio_data *data = dev->data; int rc; rc = pinctrl_apply_state(dev_cfg->pin_cfg, PINCTRL_STATE_DEFAULT); if (rc) { LOG_ERR("Failed to apply pinctrl state"); return rc; } rc = config_gpio(&dev_cfg->clk_gpio, "clk", GPIO_OUTPUT_ACTIVE); if (rc < 0) { return rc; } if (dev_cfg->mosi_gpio.port != NULL) { rc = config_gpio(&dev_cfg->mosi_gpio, "mosi", GPIO_OUTPUT); if (rc < 0) { return rc; } } if (dev_cfg->miso_gpio.port != NULL) { rc = config_gpio(&dev_cfg->miso_gpio, "miso", GPIO_INPUT); if (rc < 0) { return rc; } } rc = spi_context_cs_configure_all(&data->spi_ctx); if (rc < 0) { LOG_ERR("Failed to configure CS pins: %d", rc); return rc; } spi_context_unlock_unconditionally(&data->spi_ctx); return 0; } #define SPI_PICO_PIO_INIT(inst) \ PINCTRL_DT_INST_DEFINE(inst); \ static struct spi_pico_pio_config spi_pico_pio_config_##inst = { \ .piodev = DEVICE_DT_GET(DT_INST_PARENT(inst)), \ .pin_cfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ .clk_gpio = GPIO_DT_SPEC_INST_GET(inst, clk_gpios), \ .mosi_gpio = GPIO_DT_SPEC_INST_GET_OR(inst, mosi_gpios, {0}), \ .miso_gpio = GPIO_DT_SPEC_INST_GET_OR(inst, miso_gpios, {0}), \ .sio_gpio = GPIO_DT_SPEC_INST_GET_OR(inst, sio_gpios, {0}), \ .clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(inst)), \ .clk_id = (clock_control_subsys_t)DT_INST_PHA_BY_IDX(inst, clocks, 0, clk_id), \ }; \ static struct spi_pico_pio_data spi_pico_pio_data_##inst = { \ SPI_CONTEXT_INIT_LOCK(spi_pico_pio_data_##inst, spi_ctx), \ SPI_CONTEXT_INIT_SYNC(spi_pico_pio_data_##inst, spi_ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(inst), spi_ctx)}; \ DEVICE_DT_INST_DEFINE(inst, spi_pico_pio_init, NULL, &spi_pico_pio_data_##inst, \ &spi_pico_pio_config_##inst, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &spi_pico_pio_api); \ BUILD_ASSERT(DT_INST_NODE_HAS_PROP(inst, clk_gpios), "Missing clock GPIO"); \ BUILD_ASSERT(((DT_INST_NODE_HAS_PROP(inst, mosi_gpios) \ || DT_INST_NODE_HAS_PROP(inst, miso_gpios)) \ && (!DT_INST_NODE_HAS_PROP(inst, sio_gpios))) \ || (DT_INST_NODE_HAS_PROP(inst, sio_gpios) \ && !(DT_INST_NODE_HAS_PROP(inst, mosi_gpios) \ || DT_INST_NODE_HAS_PROP(inst, miso_gpios))), \ "Invalid GPIO Configuration"); DT_INST_FOREACH_STATUS_OKAY(SPI_PICO_PIO_INIT) ```
/content/code_sandbox/drivers/spi/spi_rpi_pico_pio.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,553
```unknown # Gecko SPI configuration option config SPI_GECKO bool "Gecko SPI controller driver" default y depends on DT_HAS_SILABS_GECKO_SPI_USART_ENABLED depends on GPIO select SOC_GECKO_USART help Enable the SPI peripherals on Gecko ```
/content/code_sandbox/drivers/spi/Kconfig.gecko
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
61
```c /* * */ /* * This is not a real SPI driver. It is used to instantiate struct * devices for the "vnd,spi" devicetree compatible used in test code. */ #include <zephyr/kernel.h> #include <zephyr/drivers/spi.h> #define DT_DRV_COMPAT vnd_spi static int vnd_spi_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return -ENOTSUP; } #ifdef CONFIG_SPI_ASYNC static int vnd_spi_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return -ENOTSUP; } #endif static int vnd_spi_release(const struct device *dev, const struct spi_config *spi_cfg) { return -ENOTSUP; } static const struct spi_driver_api vnd_spi_api = { .transceive = vnd_spi_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = vnd_spi_transceive_async, #endif .release = vnd_spi_release, }; #define VND_SPI_INIT(n) \ DEVICE_DT_INST_DEFINE(n, NULL, NULL, NULL, NULL, POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &vnd_spi_api); DT_INST_FOREACH_STATUS_OKAY(VND_SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_test.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
327
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_SPI_SPI_LL_STM32_H_ #define ZEPHYR_DRIVERS_SPI_SPI_LL_STM32_H_ #include "spi_context.h" typedef void (*irq_config_func_t)(const struct device *port); /* This symbol takes the value 1 if one of the device instances */ /* is configured in dts with a domain clock */ #if STM32_DT_INST_DEV_DOMAIN_CLOCK_SUPPORT #define STM32_SPI_DOMAIN_CLOCK_SUPPORT 1 #else #define STM32_SPI_DOMAIN_CLOCK_SUPPORT 0 #endif struct spi_stm32_config { SPI_TypeDef *spi; const struct pinctrl_dev_config *pcfg; #ifdef CONFIG_SPI_STM32_INTERRUPT irq_config_func_t irq_config; #endif #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_subghz) bool use_subghzspi_nss; #endif #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) int midi_clocks; int mssi_clocks; #endif size_t pclk_len; const struct stm32_pclken *pclken; bool fifo_enabled; }; #ifdef CONFIG_SPI_STM32_DMA #define SPI_STM32_DMA_ERROR_FLAG 0x01 #define SPI_STM32_DMA_RX_DONE_FLAG 0x02 #define SPI_STM32_DMA_TX_DONE_FLAG 0x04 #define SPI_STM32_DMA_DONE_FLAG \ (SPI_STM32_DMA_RX_DONE_FLAG | SPI_STM32_DMA_TX_DONE_FLAG) #define SPI_STM32_DMA_TX 0x01 #define SPI_STM32_DMA_RX 0x02 struct stream { const struct device *dma_dev; uint32_t channel; /* stores the channel for dma or mux */ struct dma_config dma_cfg; struct dma_block_config dma_blk_cfg; uint8_t priority; bool src_addr_increment; bool dst_addr_increment; int fifo_threshold; }; #endif struct spi_stm32_data { struct spi_context ctx; #ifdef CONFIG_SPI_STM32_DMA struct k_sem status_sem; volatile uint32_t status_flags; struct stream dma_rx; struct stream dma_tx; #endif /* CONFIG_SPI_STM32_DMA */ bool pm_policy_state_on; }; #ifdef CONFIG_SPI_STM32_DMA static inline uint32_t ll_func_dma_get_reg_addr(SPI_TypeDef *spi, uint32_t location) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) if (location == SPI_STM32_DMA_TX) { /* use direct register location until the LL_SPI_DMA_GetTxRegAddr exists */ return (uint32_t)&(spi->TXDR); } /* use direct register location until the LL_SPI_DMA_GetRxRegAddr exists */ return (uint32_t)&(spi->RXDR); #else ARG_UNUSED(location); return (uint32_t)LL_SPI_DMA_GetRegAddr(spi); #endif /* st_stm32h7_spi */ } /* checks that DMA Tx packet is fully transmitted over the SPI */ static inline uint32_t ll_func_spi_dma_busy(SPI_TypeDef *spi) { #ifdef LL_SPI_SR_TXC return LL_SPI_IsActiveFlag_TXC(spi); #else /* the SPI Tx empty and busy flags are needed */ return (LL_SPI_IsActiveFlag_TXE(spi) && !LL_SPI_IsActiveFlag_BSY(spi)); #endif /* LL_SPI_SR_TXC */ } #endif /* CONFIG_SPI_STM32_DMA */ static inline uint32_t ll_func_tx_is_not_full(SPI_TypeDef *spi) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) return LL_SPI_IsActiveFlag_TXP(spi); #else return LL_SPI_IsActiveFlag_TXE(spi); #endif /* st_stm32h7_spi */ } static inline uint32_t ll_func_rx_is_not_empty(SPI_TypeDef *spi) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) return LL_SPI_IsActiveFlag_RXP(spi); #else return LL_SPI_IsActiveFlag_RXNE(spi); #endif /* st_stm32h7_spi */ } static inline void ll_func_enable_int_tx_empty(SPI_TypeDef *spi) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) LL_SPI_EnableIT_TXP(spi); #else LL_SPI_EnableIT_TXE(spi); #endif /* st_stm32h7_spi */ } static inline void ll_func_enable_int_rx_not_empty(SPI_TypeDef *spi) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) LL_SPI_EnableIT_RXP(spi); #else LL_SPI_EnableIT_RXNE(spi); #endif /* st_stm32h7_spi */ } static inline void ll_func_enable_int_errors(SPI_TypeDef *spi) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) LL_SPI_EnableIT_UDR(spi); LL_SPI_EnableIT_OVR(spi); LL_SPI_EnableIT_CRCERR(spi); LL_SPI_EnableIT_FRE(spi); LL_SPI_EnableIT_MODF(spi); #else LL_SPI_EnableIT_ERR(spi); #endif /* st_stm32h7_spi */ } static inline void ll_func_disable_int_tx_empty(SPI_TypeDef *spi) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) LL_SPI_DisableIT_TXP(spi); #else LL_SPI_DisableIT_TXE(spi); #endif /* st_stm32h7_spi */ } static inline void ll_func_disable_int_rx_not_empty(SPI_TypeDef *spi) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) LL_SPI_DisableIT_RXP(spi); #else LL_SPI_DisableIT_RXNE(spi); #endif /* st_stm32h7_spi */ } static inline void ll_func_disable_int_errors(SPI_TypeDef *spi) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) LL_SPI_DisableIT_UDR(spi); LL_SPI_DisableIT_OVR(spi); LL_SPI_DisableIT_CRCERR(spi); LL_SPI_DisableIT_FRE(spi); LL_SPI_DisableIT_MODF(spi); #else LL_SPI_DisableIT_ERR(spi); #endif /* st_stm32h7_spi */ } static inline uint32_t ll_func_spi_is_busy(SPI_TypeDef *spi) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) if (LL_SPI_GetTransferSize(spi) == 0) { return LL_SPI_IsActiveFlag_TXC(spi) == 0; } else { return LL_SPI_IsActiveFlag_EOT(spi) == 0; } #else return LL_SPI_IsActiveFlag_BSY(spi); #endif /* st_stm32h7_spi */ } /* Header is compiled first, this switch avoid the compiler to lookup for * non-existing LL FIFO functions for SoC without SPI FIFO */ #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_fifo) static inline void ll_func_set_fifo_threshold_8bit(SPI_TypeDef *spi) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) LL_SPI_SetFIFOThreshold(spi, LL_SPI_FIFO_TH_01DATA); #else LL_SPI_SetRxFIFOThreshold(spi, LL_SPI_RX_FIFO_TH_QUARTER); #endif /* st_stm32h7_spi */ } static inline void ll_func_set_fifo_threshold_16bit(SPI_TypeDef *spi) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) LL_SPI_SetFIFOThreshold(spi, LL_SPI_FIFO_TH_02DATA); #else LL_SPI_SetRxFIFOThreshold(spi, LL_SPI_RX_FIFO_TH_HALF); #endif /* st_stm32h7_spi */ } #endif /* st_stm32_spi_fifo */ static inline void ll_func_disable_spi(SPI_TypeDef *spi) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) if (LL_SPI_IsActiveMasterTransfer(spi)) { LL_SPI_SuspendMasterTransfer(spi); while (LL_SPI_IsActiveMasterTransfer(spi)) { /* NOP */ } } LL_SPI_Disable(spi); while (LL_SPI_IsEnabled(spi)) { /* NOP */ } /* Flush RX buffer */ while (LL_SPI_IsActiveFlag_RXP(spi)) { (void)LL_SPI_ReceiveData8(spi); } LL_SPI_ClearFlag_SUSP(spi); #else LL_SPI_Disable(spi); #endif /* st_stm32h7_spi */ } #endif /* ZEPHYR_DRIVERS_SPI_SPI_LL_STM32_H_ */ ```
/content/code_sandbox/drivers/spi/spi_ll_stm32.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,802
```unknown # TI CC13xx / CC26xx SPI configuration options config SPI_CC13XX_CC26XX bool "TI SimpleLink CC13xx / CC26xx SPI driver" default y depends on DT_HAS_TI_CC13XX_CC26XX_SPI_ENABLED help Enable support for the TI SimpleLink CC13xx / CC26xx SPI peripheral ```
/content/code_sandbox/drivers/spi/Kconfig.cc13xx_cc26xx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
76
```c /* * */ #define DT_DRV_COMPAT zephyr_spi_bitbang #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_bitbang); #include <zephyr/sys/sys_io.h> #include <zephyr/drivers/spi.h> #include "spi_context.h" struct spi_bitbang_data { struct spi_context ctx; int bits; int wait_us; int dfs; }; struct spi_bitbang_config { struct gpio_dt_spec clk_gpio; struct gpio_dt_spec mosi_gpio; struct gpio_dt_spec miso_gpio; }; static int spi_bitbang_configure(const struct spi_bitbang_config *info, struct spi_bitbang_data *data, const struct spi_config *config) { if (config->operation & SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode not supported"); return -ENOTSUP; } if (config->operation & (SPI_TRANSFER_LSB | SPI_LINES_DUAL | SPI_LINES_QUAD)) { LOG_ERR("Unsupported configuration"); return -ENOTSUP; } const int bits = SPI_WORD_SIZE_GET(config->operation); if (bits > 16) { LOG_ERR("Word sizes > 16 bits not supported"); return -ENOTSUP; } data->bits = bits; data->dfs = ((data->bits - 1) / 8) + 1; if (config->frequency > 0) { /* convert freq to period, the extra /2 is due to waiting * twice in each clock cycle. The '2000' is an upscale factor. */ data->wait_us = (1000000ul * 2000ul / config->frequency) / 2000ul; data->wait_us /= 2; } else { data->wait_us = 8 / 2; /* 125 kHz */ } data->ctx.config = config; return 0; } static int spi_bitbang_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { const struct spi_bitbang_config *info = dev->config; struct spi_bitbang_data *data = dev->data; struct spi_context *ctx = &data->ctx; int rc; const struct gpio_dt_spec *miso = NULL; const struct gpio_dt_spec *mosi = NULL; gpio_flags_t mosi_flags = GPIO_OUTPUT_INACTIVE; rc = spi_bitbang_configure(info, data, spi_cfg); if (rc < 0) { return rc; } if (spi_cfg->operation & SPI_HALF_DUPLEX) { if (!info->mosi_gpio.port) { LOG_ERR("No MOSI pin specified in half duplex mode"); return -EINVAL; } if (tx_bufs && rx_bufs) { LOG_ERR("Both RX and TX specified in half duplex mode"); return -EINVAL; } else if (tx_bufs && !rx_bufs) { /* TX mode */ mosi = &info->mosi_gpio; } else if (!tx_bufs && rx_bufs) { /* RX mode */ mosi_flags = GPIO_INPUT; miso = &info->mosi_gpio; } } else { if (info->mosi_gpio.port) { mosi = &info->mosi_gpio; } if (info->miso_gpio.port) { miso = &info->miso_gpio; } } if (info->mosi_gpio.port) { rc = gpio_pin_configure_dt(&info->mosi_gpio, mosi_flags); if (rc < 0) { LOG_ERR("Couldn't configure MOSI pin: %d", rc); return rc; } } spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->dfs); int clock_state = 0; int cpha = 0; bool loop = false; if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) { clock_state = 1; } if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) { cpha = 1; } if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_LOOP) { loop = true; } /* set the initial clock state before CS */ gpio_pin_set_dt(&info->clk_gpio, clock_state); spi_context_cs_control(ctx, true); const uint32_t wait_us = data->wait_us; while (spi_context_tx_buf_on(ctx) || spi_context_rx_buf_on(ctx)) { uint16_t w = 0; if (ctx->tx_len) { switch (data->dfs) { case 2: w = *(uint16_t *)(ctx->tx_buf); break; case 1: w = *(uint8_t *)(ctx->tx_buf); break; } } int shift = data->bits - 1; uint16_t r = 0; int b = 0; bool do_read = false; if (miso && spi_context_rx_buf_on(ctx)) { do_read = true; } while (shift >= 0) { const int d = (w >> shift) & 0x1; b = 0; /* setup data out first thing */ if (mosi) { gpio_pin_set_dt(mosi, d); } k_busy_wait(wait_us); if (!loop && do_read && !cpha) { b = gpio_pin_get_dt(miso); } /* first (leading) clock edge */ gpio_pin_set_dt(&info->clk_gpio, !clock_state); k_busy_wait(wait_us); if (!loop && do_read && cpha) { b = gpio_pin_get_dt(miso); } /* second (trailing) clock edge */ gpio_pin_set_dt(&info->clk_gpio, clock_state); if (loop) { b = d; } r = (r << 1) | (b ? 0x1 : 0x0); --shift; } if (spi_context_rx_buf_on(ctx)) { switch (data->dfs) { case 2: *(uint16_t *)(ctx->rx_buf) = r; break; case 1: *(uint8_t *)(ctx->rx_buf) = r; break; } } LOG_DBG(" w: %04x, r: %04x , do_read: %d", w, r, do_read); spi_context_update_tx(ctx, data->dfs, 1); spi_context_update_rx(ctx, data->dfs, 1); } spi_context_cs_control(ctx, false); spi_context_complete(ctx, dev, 0); return 0; } #ifdef CONFIG_SPI_ASYNC static int spi_bitbang_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return -ENOTSUP; } #endif int spi_bitbang_release(const struct device *dev, const struct spi_config *config) { struct spi_bitbang_data *data = dev->data; struct spi_context *ctx = &data->ctx; spi_context_unlock_unconditionally(ctx); return 0; } static const struct spi_driver_api spi_bitbang_api = { .transceive = spi_bitbang_transceive, .release = spi_bitbang_release, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_bitbang_transceive_async, #endif /* CONFIG_SPI_ASYNC */ }; int spi_bitbang_init(const struct device *dev) { const struct spi_bitbang_config *config = dev->config; struct spi_bitbang_data *data = dev->data; int rc; if (!gpio_is_ready_dt(&config->clk_gpio)) { LOG_ERR("GPIO port for clk pin is not ready"); return -ENODEV; } rc = gpio_pin_configure_dt(&config->clk_gpio, GPIO_OUTPUT_INACTIVE); if (rc < 0) { LOG_ERR("Couldn't configure clk pin; (%d)", rc); return rc; } if (config->mosi_gpio.port != NULL) { if (!gpio_is_ready_dt(&config->mosi_gpio)) { LOG_ERR("GPIO port for mosi pin is not ready"); return -ENODEV; } rc = gpio_pin_configure_dt(&config->mosi_gpio, GPIO_OUTPUT_INACTIVE); if (rc < 0) { LOG_ERR("Couldn't configure mosi pin; (%d)", rc); return rc; } } if (config->miso_gpio.port != NULL) { if (!gpio_is_ready_dt(&config->miso_gpio)) { LOG_ERR("GPIO port for miso pin is not ready"); return -ENODEV; } rc = gpio_pin_configure_dt(&config->miso_gpio, GPIO_INPUT); if (rc < 0) { LOG_ERR("Couldn't configure miso pin; (%d)", rc); return rc; } } rc = spi_context_cs_configure_all(&data->ctx); if (rc < 0) { LOG_ERR("Failed to configure CS pins: %d", rc); return rc; } return 0; } #define SPI_BITBANG_INIT(inst) \ static struct spi_bitbang_config spi_bitbang_config_##inst = { \ .clk_gpio = GPIO_DT_SPEC_INST_GET(inst, clk_gpios), \ .mosi_gpio = GPIO_DT_SPEC_INST_GET_OR(inst, mosi_gpios, {0}), \ .miso_gpio = GPIO_DT_SPEC_INST_GET_OR(inst, miso_gpios, {0}), \ }; \ \ static struct spi_bitbang_data spi_bitbang_data_##inst = { \ SPI_CONTEXT_INIT_LOCK(spi_bitbang_data_##inst, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_bitbang_data_##inst, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(inst), ctx) \ }; \ \ DEVICE_DT_INST_DEFINE(inst, \ spi_bitbang_init, \ NULL, \ &spi_bitbang_data_##inst, \ &spi_bitbang_config_##inst, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_bitbang_api); DT_INST_FOREACH_STATUS_OKAY(SPI_BITBANG_INIT) ```
/content/code_sandbox/drivers/spi/spi_bitbang.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,330
```unknown # STM32 SPI driver configuration options menuconfig SPI_STM32 bool "STM32 MCU SPI controller driver" default y depends on DT_HAS_ST_STM32_SPI_ENABLED select USE_STM32_LL_SPI help Enable SPI support on the STM32 family of processors. if SPI_STM32 config SPI_STM32_INTERRUPT bool "STM32 MCU SPI Interrupt Support" help Enable Interrupt support for the SPI Driver of STM32 family. config SPI_STM32_DMA bool "STM32 MCU SPI DMA Support" select DMA select CACHE_MANAGEMENT if CPU_HAS_DCACHE help Enable the SPI DMA mode for SPI instances that enable dma channels in their device tree node. config SPI_STM32_USE_HW_SS bool "STM32 Hardware Slave Select support" default y help Use Slave Select pin instead of software Slave Select. config SPI_STM32_ERRATA_BUSY bool default y depends on SOC_SERIES_STM32F7X || SOC_SERIES_STM32L4X help Handles erratum "BSY bit may stay high at the end of a data transfer in slave mode". Seen for instance in Errata Sheet 0290 2.11.2 if SPI_STM32_ERRATA_BUSY config SPI_STM32_BUSY_FLAG_TIMEOUT int "timeout in us for the STM32 busy flag workaround" default 10000 endif # SPI_STM32_ERRATA_BUSY endif # SPI_STM32 ```
/content/code_sandbox/drivers/spi/Kconfig.stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
319
```c /* * */ #define DT_DRV_COMPAT ambiq_spi #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_ambiq); #include <zephyr/drivers/spi.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <zephyr/pm/device_runtime.h> #include <stdlib.h> #include <errno.h> #include "spi_context.h" #include <am_mcu_apollo.h> #define PWRCTRL_MAX_WAIT_US 5 typedef int (*ambiq_spi_pwr_func_t)(void); struct spi_ambiq_config { uint32_t base; int size; uint32_t clock_freq; const struct pinctrl_dev_config *pcfg; ambiq_spi_pwr_func_t pwr_func; void (*irq_config_func)(void); }; struct spi_ambiq_data { struct spi_context ctx; am_hal_iom_config_t iom_cfg; void *iom_handler; int inst_idx; bool cont; }; typedef void (*spi_context_update_trx)(struct spi_context *ctx, uint8_t dfs, uint32_t len); #define SPI_WORD_SIZE 8 #define SPI_CS_INDEX 3 #ifdef CONFIG_SPI_AMBIQ_DMA static __aligned(32) struct { __aligned(32) uint32_t buf[CONFIG_SPI_DMA_TCB_BUFFER_SIZE]; } spi_dma_tcb_buf[DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT)] __attribute__((__section__(".nocache"))); static void spi_ambiq_callback(void *callback_ctxt, uint32_t status) { const struct device *dev = callback_ctxt; struct spi_ambiq_data *data = dev->data; struct spi_context *ctx = &data->ctx; /* de-assert cs until transfer finished and no need to hold cs */ if (!data->cont) { spi_context_cs_control(ctx, false); } spi_context_complete(ctx, dev, (status == AM_HAL_STATUS_SUCCESS) ? 0 : -EIO); } #endif static void spi_ambiq_reset(const struct device *dev) { struct spi_ambiq_data *data = dev->data; struct spi_context *ctx = &data->ctx; /* cancel timed out transaction */ am_hal_iom_disable(data->iom_handler); /* NULL config to trigger reconfigure on next xfer */ ctx->config = NULL; spi_context_cs_control(ctx, false); /* signal any thread waiting on sync semaphore */ spi_context_complete(ctx, dev, -ETIMEDOUT); /* clean up for next xfer */ k_sem_reset(&ctx->sync); } static void spi_ambiq_isr(const struct device *dev) { uint32_t ui32Status; struct spi_ambiq_data *data = dev->data; am_hal_iom_interrupt_status_get(data->iom_handler, false, &ui32Status); am_hal_iom_interrupt_clear(data->iom_handler, ui32Status); am_hal_iom_interrupt_service(data->iom_handler, ui32Status); } static int spi_config(const struct device *dev, const struct spi_config *config) { struct spi_ambiq_data *data = dev->data; const struct spi_ambiq_config *cfg = dev->config; struct spi_context *ctx = &(data->ctx); data->iom_cfg.eInterfaceMode = AM_HAL_IOM_SPI_MODE; int ret = 0; if (spi_context_configured(ctx, config)) { /* Already configured. No need to do it again. */ return 0; } if (SPI_WORD_SIZE_GET(config->operation) != SPI_WORD_SIZE) { LOG_ERR("Word size must be %d", SPI_WORD_SIZE); return -ENOTSUP; } if ((config->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Only supports single mode"); return -ENOTSUP; } if (config->operation & SPI_LOCK_ON) { LOG_ERR("Lock On not supported"); return -ENOTSUP; } if (config->operation & SPI_TRANSFER_LSB) { LOG_ERR("LSB first not supported"); return -ENOTSUP; } if (config->operation & SPI_MODE_CPOL) { if (config->operation & SPI_MODE_CPHA) { data->iom_cfg.eSpiMode = AM_HAL_IOM_SPI_MODE_3; } else { data->iom_cfg.eSpiMode = AM_HAL_IOM_SPI_MODE_2; } } else { if (config->operation & SPI_MODE_CPHA) { data->iom_cfg.eSpiMode = AM_HAL_IOM_SPI_MODE_1; } else { data->iom_cfg.eSpiMode = AM_HAL_IOM_SPI_MODE_0; } } if (config->operation & SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode not supported"); return -ENOTSUP; } if (config->operation & SPI_MODE_LOOP) { LOG_ERR("Loopback mode not supported"); return -ENOTSUP; } if (cfg->clock_freq > AM_HAL_IOM_MAX_FREQ) { LOG_ERR("Clock frequency too high"); return -ENOTSUP; } /* Select slower of two: SPI bus frequency for SPI device or SPI master clock frequency */ data->iom_cfg.ui32ClockFreq = (config->frequency ? MIN(config->frequency, cfg->clock_freq) : cfg->clock_freq); ctx->config = config; #ifdef CONFIG_SPI_AMBIQ_DMA data->iom_cfg.pNBTxnBuf = spi_dma_tcb_buf[data->inst_idx].buf; data->iom_cfg.ui32NBTxnBufLength = CONFIG_SPI_DMA_TCB_BUFFER_SIZE; #endif /* Disable IOM instance as it cannot be configured when enabled*/ ret = am_hal_iom_disable(data->iom_handler); ret = am_hal_iom_configure(data->iom_handler, &data->iom_cfg); ret = am_hal_iom_enable(data->iom_handler); return ret; } static int spi_ambiq_xfer_half_duplex(const struct device *dev, am_hal_iom_dir_e dir) { am_hal_iom_transfer_t trans = {0}; struct spi_ambiq_data *data = dev->data; struct spi_context *ctx = &data->ctx; bool is_last = false; uint32_t rem_num, cur_num = 0; int ret = 0; spi_context_update_trx ctx_update; if (dir == AM_HAL_IOM_FULLDUPLEX) { return -EINVAL; } else if (dir == AM_HAL_IOM_RX) { trans.eDirection = AM_HAL_IOM_RX; ctx_update = spi_context_update_rx; } else { trans.eDirection = AM_HAL_IOM_TX; ctx_update = spi_context_update_tx; } if (dir == AM_HAL_IOM_RX) { rem_num = ctx->rx_len; } else { rem_num = ctx->tx_len; } while (rem_num) { cur_num = (rem_num > AM_HAL_IOM_MAX_TXNSIZE_SPI) ? AM_HAL_IOM_MAX_TXNSIZE_SPI : rem_num; trans.ui32NumBytes = cur_num; trans.pui32TxBuffer = (uint32_t *)ctx->tx_buf; trans.pui32RxBuffer = (uint32_t *)ctx->rx_buf; ctx_update(ctx, 1, cur_num); if ((!spi_context_tx_buf_on(ctx)) && (!spi_context_rx_buf_on(ctx))) { is_last = true; } #ifdef CONFIG_SPI_AMBIQ_DMA if (AM_HAL_STATUS_SUCCESS != am_hal_iom_nonblocking_transfer(data->iom_handler, &trans, ((is_last == true) ? spi_ambiq_callback : NULL), (void *)dev)) { return -EIO; } if (is_last) { ret = spi_context_wait_for_completion(ctx); } #else ret = am_hal_iom_blocking_transfer(data->iom_handler, &trans); #endif rem_num -= cur_num; if (ret != 0) { return -EIO; } } return 0; } static int spi_ambiq_xfer_full_duplex(const struct device *dev) { am_hal_iom_transfer_t trans = {0}; struct spi_ambiq_data *data = dev->data; struct spi_context *ctx = &data->ctx; bool trx_once = (ctx->tx_len == ctx->rx_len); int ret = 0; /* Tx and Rx length must be the same for am_hal_iom_spi_blocking_fullduplex */ trans.eDirection = AM_HAL_IOM_FULLDUPLEX; trans.ui32NumBytes = MIN(ctx->rx_len, ctx->tx_len); trans.pui32RxBuffer = (uint32_t *)ctx->rx_buf; trans.pui32TxBuffer = (uint32_t *)ctx->tx_buf; spi_context_update_tx(ctx, 1, trans.ui32NumBytes); spi_context_update_rx(ctx, 1, trans.ui32NumBytes); ret = am_hal_iom_spi_blocking_fullduplex(data->iom_handler, &trans); if (ret != 0) { return -EIO; } /* Transfer the remaining bytes */ if (!trx_once) { spi_context_update_trx ctx_update; if (ctx->tx_len) { trans.eDirection = AM_HAL_IOM_TX; trans.ui32NumBytes = ctx->tx_len; trans.pui32TxBuffer = (uint32_t *)ctx->tx_buf; ctx_update = spi_context_update_tx; } else { trans.eDirection = AM_HAL_IOM_RX; trans.ui32NumBytes = ctx->rx_len; trans.pui32RxBuffer = (uint32_t *)ctx->rx_buf; ctx_update = spi_context_update_rx; } ret = am_hal_iom_blocking_transfer(data->iom_handler, &trans); ctx_update(ctx, 1, trans.ui32NumBytes); if (ret != 0) { return -EIO; } } return 0; } static int spi_ambiq_xfer(const struct device *dev, const struct spi_config *config) { struct spi_ambiq_data *data = dev->data; struct spi_context *ctx = &data->ctx; int ret = 0; data->cont = (config->operation & SPI_HOLD_ON_CS) ? true : false; spi_context_cs_control(ctx, true); while (1) { if (spi_context_tx_buf_on(ctx) && spi_context_rx_buf_on(ctx)) { if (ctx->rx_buf == ctx->tx_buf) { spi_context_update_rx(ctx, 1, ctx->rx_len); } else if (!(config->operation & SPI_HALF_DUPLEX)) { ret = spi_ambiq_xfer_full_duplex(dev); if (ret != 0) { spi_ambiq_reset(dev); LOG_ERR("SPI full-duplex comm error: %d", ret); return ret; } } } if (spi_context_tx_on(ctx)) { if (ctx->tx_buf == NULL) { spi_context_update_tx(ctx, 1, ctx->tx_len); } else { ret = spi_ambiq_xfer_half_duplex(dev, AM_HAL_IOM_TX); if (ret != 0) { spi_ambiq_reset(dev); LOG_ERR("SPI TX comm error: %d", ret); return ret; } } } else if (spi_context_rx_on(ctx)) { if (ctx->rx_buf == NULL) { spi_context_update_rx(ctx, 1, ctx->rx_len); } else { ret = spi_ambiq_xfer_half_duplex(dev, AM_HAL_IOM_RX); if (ret != 0) { spi_ambiq_reset(dev); LOG_ERR("SPI Rx comm error: %d", ret); return ret; } } } else { break; } } #ifndef CONFIG_SPI_AMBIQ_DMA if (!data->cont) { spi_context_cs_control(ctx, false); spi_context_complete(ctx, dev, ret); } #endif return ret; } static int spi_ambiq_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { struct spi_ambiq_data *data = dev->data; int ret; if (!tx_bufs && !rx_bufs) { return 0; } ret = pm_device_runtime_get(dev); if (ret < 0) { LOG_ERR("pm_device_runtime_get failed: %d", ret); } /* context setup */ spi_context_lock(&data->ctx, false, NULL, NULL, config); ret = spi_config(dev, config); if (ret) { spi_context_release(&data->ctx, ret); return ret; } spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); ret = spi_ambiq_xfer(dev, config); spi_context_release(&data->ctx, ret); /* Use async put to avoid useless device suspension/resumption * when doing consecutive transmission. */ ret = pm_device_runtime_put_async(dev, K_MSEC(2)); if (ret < 0) { LOG_ERR("pm_device_runtime_put failed: %d", ret); } return ret; } static int spi_ambiq_release(const struct device *dev, const struct spi_config *config) { struct spi_ambiq_data *data = dev->data; am_hal_iom_status_t iom_status; am_hal_iom_status_get(data->iom_handler, &iom_status); if ((iom_status.bStatIdle != IOM0_STATUS_IDLEST_IDLE) || (iom_status.bStatCmdAct == IOM0_STATUS_CMDACT_ACTIVE) || (iom_status.ui32NumPendTransactions)) { return -EBUSY; } spi_context_unlock_unconditionally(&data->ctx); return 0; } static const struct spi_driver_api spi_ambiq_driver_api = { .transceive = spi_ambiq_transceive, .release = spi_ambiq_release, }; static int spi_ambiq_init(const struct device *dev) { struct spi_ambiq_data *data = dev->data; const struct spi_ambiq_config *cfg = dev->config; int ret = 0; if (AM_HAL_STATUS_SUCCESS != am_hal_iom_initialize((cfg->base - IOM0_BASE) / cfg->size, &data->iom_handler)) { LOG_ERR("Fail to initialize SPI\n"); return -ENXIO; } ret = cfg->pwr_func(); ret |= pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); ret |= spi_context_cs_configure_all(&data->ctx); if (ret < 0) { LOG_ERR("Fail to config SPI pins\n"); goto end; } #ifdef CONFIG_SPI_AMBIQ_DMA am_hal_iom_interrupt_clear(data->iom_handler, AM_HAL_IOM_INT_CQUPD | AM_HAL_IOM_INT_ERR); am_hal_iom_interrupt_enable(data->iom_handler, AM_HAL_IOM_INT_CQUPD | AM_HAL_IOM_INT_ERR); cfg->irq_config_func(); #endif end: if (ret < 0) { am_hal_iom_uninitialize(data->iom_handler); } else { spi_context_unlock_unconditionally(&data->ctx); } return ret; } #ifdef CONFIG_PM_DEVICE static int spi_ambiq_pm_action(const struct device *dev, enum pm_device_action action) { struct spi_ambiq_data *data = dev->data; uint32_t ret; am_hal_sysctrl_power_state_e status; switch (action) { case PM_DEVICE_ACTION_RESUME: status = AM_HAL_SYSCTRL_WAKE; break; case PM_DEVICE_ACTION_SUSPEND: status = AM_HAL_SYSCTRL_DEEPSLEEP; break; default: return -ENOTSUP; } ret = am_hal_iom_power_ctrl(data->iom_handler, status, true); if (ret != AM_HAL_STATUS_SUCCESS) { LOG_ERR("am_hal_iom_power_ctrl failed: %d", ret); return -EPERM; } else { return 0; } } #endif /* CONFIG_PM_DEVICE */ #define AMBIQ_SPI_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static int pwr_on_ambiq_spi_##n(void) \ { \ uint32_t addr = DT_REG_ADDR(DT_INST_PHANDLE(n, ambiq_pwrcfg)) + \ DT_INST_PHA(n, ambiq_pwrcfg, offset); \ sys_write32((sys_read32(addr) | DT_INST_PHA(n, ambiq_pwrcfg, mask)), addr); \ k_busy_wait(PWRCTRL_MAX_WAIT_US); \ return 0; \ } \ static void spi_irq_config_func_##n(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), spi_ambiq_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ }; \ static struct spi_ambiq_data spi_ambiq_data##n = { \ SPI_CONTEXT_INIT_LOCK(spi_ambiq_data##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_ambiq_data##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx).inst_idx = n}; \ static const struct spi_ambiq_config spi_ambiq_config##n = { \ .base = DT_INST_REG_ADDR(n), \ .size = DT_INST_REG_SIZE(n), \ .clock_freq = DT_INST_PROP(n, clock_frequency), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .irq_config_func = spi_irq_config_func_##n, \ .pwr_func = pwr_on_ambiq_spi_##n}; \ PM_DEVICE_DT_INST_DEFINE(n, spi_ambiq_pm_action); \ DEVICE_DT_INST_DEFINE(n, spi_ambiq_init, PM_DEVICE_DT_INST_GET(n), &spi_ambiq_data##n, \ &spi_ambiq_config##n, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &spi_ambiq_driver_api); DT_INST_FOREACH_STATUS_OKAY(AMBIQ_SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_ambiq.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,076
```objective-c /* spi_dw.h - Designware SPI driver private definitions */ /* * */ #ifndef ZEPHYR_DRIVERS_SPI_SPI_DW_H_ #define ZEPHYR_DRIVERS_SPI_SPI_DW_H_ #include <string.h> #include <zephyr/device.h> #include <zephyr/drivers/spi.h> #include "spi_context.h" #ifdef __cplusplus extern "C" { #endif typedef void (*spi_dw_config_t)(void); typedef uint32_t (*spi_dw_read_t)(uint8_t size, mm_reg_t addr, uint32_t off); typedef void (*spi_dw_write_t)(uint8_t size, uint32_t data, mm_reg_t addr, uint32_t off); typedef void (*spi_dw_set_bit_t)(uint8_t bit, mm_reg_t addr, uint32_t off); typedef void (*spi_dw_clear_bit_t)(uint8_t bit, mm_reg_t addr, uint32_t off); typedef int (*spi_dw_test_bit_t)(uint8_t bit, mm_reg_t addr, uint32_t off); /* Private structures */ struct spi_dw_config { DEVICE_MMIO_ROM; uint32_t clock_frequency; spi_dw_config_t config_func; bool serial_target; uint8_t fifo_depth; uint8_t max_xfer_size; #ifdef CONFIG_PINCTRL const struct pinctrl_dev_config *pcfg; #endif spi_dw_read_t read_func; spi_dw_write_t write_func; spi_dw_set_bit_t set_bit_func; spi_dw_clear_bit_t clear_bit_func; spi_dw_test_bit_t test_bit_func; }; struct spi_dw_data { DEVICE_MMIO_RAM; struct spi_context ctx; uint8_t dfs; /* dfs in bytes: 1,2 or 4 */ uint8_t fifo_diff; /* cannot be bigger than FIFO depth */ }; /* Register operation functions */ #define DT_INST_NODE_PROP_NOT_OR(inst, prop) \ !DT_INST_PROP(inst, prop) || #define DT_ANY_INST_NOT_PROP_STATUS_OKAY(prop) \ (DT_INST_FOREACH_STATUS_OKAY_VARGS(DT_INST_NODE_PROP_NOT_OR, prop) 0) #define DT_INST_NODE_PROP_AND_OR(inst, prop) \ DT_INST_PROP(inst, prop) || #define DT_ANY_INST_PROP_STATUS_OKAY(prop) \ (DT_INST_FOREACH_STATUS_OKAY_VARGS(DT_INST_NODE_PROP_AND_OR, prop) 0) #if DT_ANY_INST_PROP_STATUS_OKAY(aux_reg) static uint32_t aux_reg_read(uint8_t size, mm_reg_t addr, uint32_t off) { ARG_UNUSED(size); return sys_in32(addr + off/4); } static void aux_reg_write(uint8_t size, uint32_t data, mm_reg_t addr, uint32_t off) { ARG_UNUSED(size); sys_out32(data, addr + off/4); } static void aux_reg_set_bit(uint8_t bit, mm_reg_t addr, uint32_t off) { sys_io_set_bit(addr + off/4, bit); } static void aux_reg_clear_bit(uint8_t bit, mm_reg_t addr, uint32_t off) { sys_io_clear_bit(addr + off/4, bit); } static int aux_reg_test_bit(uint8_t bit, mm_reg_t addr, uint32_t off) { return sys_io_test_bit(addr + off/4, bit); } #endif #if DT_ANY_INST_NOT_PROP_STATUS_OKAY(aux_reg) static uint32_t reg_read(uint8_t size, mm_reg_t addr, uint32_t off) { switch (size) { case 8: return sys_read8(addr + off); case 16: return sys_read16(addr + off); case 32: return sys_read32(addr + off); default: return -EINVAL; } } static void reg_write(uint8_t size, uint32_t data, mm_reg_t addr, uint32_t off) { switch (size) { case 8: sys_write8(data, addr + off); break; case 16: sys_write16(data, addr + off); break; case 32: sys_write32(data, addr + off); break; default: break; } } static void reg_set_bit(uint8_t bit, mm_reg_t addr, uint32_t off) { sys_set_bit(addr + off, bit); } static void reg_clear_bit(uint8_t bit, mm_reg_t addr, uint32_t off) { sys_clear_bit(addr + off, bit); } static int reg_test_bit(uint8_t bit, mm_reg_t addr, uint32_t off) { return sys_test_bit(addr + off, bit); } #endif /* Helper macros */ #define SPI_DW_CLK_DIVIDER(clock_freq, ssi_clk_hz) \ ((clock_freq / ssi_clk_hz) & 0xFFFF) #define DEFINE_MM_REG_READ(__reg, __off, __sz) \ static inline uint32_t read_##__reg(const struct device *dev) \ { \ const struct spi_dw_config *info = dev->config; \ return info->read_func(__sz, (mm_reg_t)DEVICE_MMIO_GET(dev), __off); \ } #define DEFINE_MM_REG_WRITE(__reg, __off, __sz) \ static inline void write_##__reg(const struct device *dev, uint32_t data)\ { \ const struct spi_dw_config *info = dev->config; \ info->write_func(__sz, data, (mm_reg_t)DEVICE_MMIO_GET(dev), __off); \ } #define DEFINE_SET_BIT_OP(__reg_bit, __reg_off, __bit) \ static inline void set_bit_##__reg_bit(const struct device *dev) \ { \ const struct spi_dw_config *info = dev->config; \ info->set_bit_func(__bit, (mm_reg_t)DEVICE_MMIO_GET(dev), __reg_off); \ } #define DEFINE_CLEAR_BIT_OP(__reg_bit, __reg_off, __bit) \ static inline void clear_bit_##__reg_bit(const struct device *dev)\ { \ const struct spi_dw_config *info = dev->config; \ info->clear_bit_func(__bit, (mm_reg_t)DEVICE_MMIO_GET(dev), __reg_off); \ } #define DEFINE_TEST_BIT_OP(__reg_bit, __reg_off, __bit) \ static inline int test_bit_##__reg_bit(const struct device *dev)\ { \ const struct spi_dw_config *info = dev->config; \ return info->test_bit_func(__bit, (mm_reg_t)DEVICE_MMIO_GET(dev), __reg_off); \ } /* Common registers settings, bits etc... */ /* CTRLR0 settings */ #if !defined(CONFIG_SPI_DW_HSSI) #define DW_SPI_CTRLR0_SCPH_BIT (6) #define DW_SPI_CTRLR0_SCPOL_BIT (7) #define DW_SPI_CTRLR0_TMOD_SHIFT (8) #define DW_SPI_CTRLR0_SLV_OE_BIT (10) #define DW_SPI_CTRLR0_SRL_BIT (11) #else /* The register layout is different in the HSSI variant */ #define DW_SPI_CTRLR0_SCPH_BIT (8) #define DW_SPI_CTRLR0_SCPOL_BIT (9) #define DW_SPI_CTRLR0_TMOD_SHIFT (10) #define DW_SPI_CTRLR0_SLV_OE_BIT (12) #define DW_SPI_CTRLR0_SRL_BIT (13) #endif #define DW_SPI_CTRLR0_SCPH BIT(DW_SPI_CTRLR0_SCPH_BIT) #define DW_SPI_CTRLR0_SCPOL BIT(DW_SPI_CTRLR0_SCPOL_BIT) #define DW_SPI_CTRLR0_SRL BIT(DW_SPI_CTRLR0_SRL_BIT) #define DW_SPI_CTRLR0_SLV_OE BIT(DW_SPI_CTRLR0_SLV_OE_BIT) #define DW_SPI_CTRLR0_TMOD_TX_RX (0) #define DW_SPI_CTRLR0_TMOD_TX (1 << DW_SPI_CTRLR0_TMOD_SHIFT) #define DW_SPI_CTRLR0_TMOD_RX (2 << DW_SPI_CTRLR0_TMOD_SHIFT) #define DW_SPI_CTRLR0_TMOD_EEPROM (3 << DW_SPI_CTRLR0_TMOD_SHIFT) #define DW_SPI_CTRLR0_TMOD_RESET (3 << DW_SPI_CTRLR0_TMOD_SHIFT) #define DW_SPI_CTRLR0_DFS_16(__bpw) ((__bpw) - 1) #define DW_SPI_CTRLR0_DFS_32(__bpw) (((__bpw) - 1) << 16) /* 0x38 represents the bits 8, 16 and 32. Knowing that 24 is bits 8 and 16 * These are the bits were when you divide by 8, you keep the result as it is. * For all the other ones, 4 to 7, 9 to 15, etc... you need a +1, * since on such division it takes only the result above 0 */ #define SPI_WS_TO_DFS(__bpw) (((__bpw) & ~0x38) ? \ (((__bpw) / 8) + 1) : \ ((__bpw) / 8)) /* SSIENR bits */ #define DW_SPI_SSIENR_SSIEN_BIT (0) /* CLK_ENA bits */ #define DW_SPI_CLK_ENA_BIT (0) /* SR bits and values */ #define DW_SPI_SR_BUSY_BIT (0) #define DW_SPI_SR_TFNF_BIT (1) #define DW_SPI_SR_RFNE_BIT (3) /* IMR bits (ISR valid as well) */ #define DW_SPI_IMR_TXEIM_BIT (0) #define DW_SPI_IMR_TXOIM_BIT (1) #define DW_SPI_IMR_RXUIM_BIT (2) #define DW_SPI_IMR_RXOIM_BIT (3) #define DW_SPI_IMR_RXFIM_BIT (4) #define DW_SPI_IMR_MSTIM_BIT (5) /* IMR values */ #define DW_SPI_IMR_TXEIM BIT(DW_SPI_IMR_TXEIM_BIT) #define DW_SPI_IMR_TXOIM BIT(DW_SPI_IMR_TXOIM_BIT) #define DW_SPI_IMR_RXUIM BIT(DW_SPI_IMR_RXUIM_BIT) #define DW_SPI_IMR_RXOIM BIT(DW_SPI_IMR_RXOIM_BIT) #define DW_SPI_IMR_RXFIM BIT(DW_SPI_IMR_RXFIM_BIT) #define DW_SPI_IMR_MSTIM BIT(DW_SPI_IMR_MSTIM_BIT) /* ISR values (same as IMR) */ #define DW_SPI_ISR_TXEIS DW_SPI_IMR_TXEIM #define DW_SPI_ISR_TXOIS DW_SPI_IMR_TXOIM #define DW_SPI_ISR_RXUIS DW_SPI_IMR_RXUIM #define DW_SPI_ISR_RXOIS DW_SPI_IMR_RXOIM #define DW_SPI_ISR_RXFIS DW_SPI_IMR_RXFIM #define DW_SPI_ISR_MSTIS DW_SPI_IMR_MSTIM /* Error interrupt */ #define DW_SPI_ISR_ERRORS_MASK (DW_SPI_ISR_TXOIS | \ DW_SPI_ISR_RXUIS | \ DW_SPI_ISR_RXOIS | \ DW_SPI_ISR_MSTIS) /* ICR Bit */ #define DW_SPI_SR_ICR_BIT (0) /* Interrupt mask (IMR) */ #define DW_SPI_IMR_MASK (0x0) #define DW_SPI_IMR_UNMASK (DW_SPI_IMR_TXEIM | \ DW_SPI_IMR_TXOIM | \ DW_SPI_IMR_RXUIM | \ DW_SPI_IMR_RXOIM | \ DW_SPI_IMR_RXFIM) #define DW_SPI_IMR_MASK_TX (~(DW_SPI_IMR_TXEIM | \ DW_SPI_IMR_TXOIM)) #define DW_SPI_IMR_MASK_RX (~(DW_SPI_IMR_RXUIM | \ DW_SPI_IMR_RXOIM | \ DW_SPI_IMR_RXFIM)) /* * Including the right register definition file * SoC SPECIFIC! * * The file included next uses the DEFINE_MM_REG macros above to * declare functions. In this situation we'll leave the containing * extern "C" active in C++ compilations. */ #include "spi_dw_regs.h" #define z_extra_clock_on(...) #define z_extra_clock_off(...) /* Based on those macros above, here are common helpers for some registers */ DEFINE_MM_REG_READ(txflr, DW_SPI_REG_TXFLR, 32) DEFINE_MM_REG_READ(rxflr, DW_SPI_REG_RXFLR, 32) #ifdef CONFIG_SPI_DW_ACCESS_WORD_ONLY DEFINE_MM_REG_WRITE(baudr, DW_SPI_REG_BAUDR, 32) DEFINE_MM_REG_WRITE(imr, DW_SPI_REG_IMR, 32) DEFINE_MM_REG_READ(imr, DW_SPI_REG_IMR, 32) DEFINE_MM_REG_READ(isr, DW_SPI_REG_ISR, 32) #else DEFINE_MM_REG_WRITE(baudr, DW_SPI_REG_BAUDR, 16) DEFINE_MM_REG_WRITE(imr, DW_SPI_REG_IMR, 8) DEFINE_MM_REG_READ(imr, DW_SPI_REG_IMR, 8) DEFINE_MM_REG_READ(isr, DW_SPI_REG_ISR, 8) #endif DEFINE_SET_BIT_OP(ssienr, DW_SPI_REG_SSIENR, DW_SPI_SSIENR_SSIEN_BIT) DEFINE_CLEAR_BIT_OP(ssienr, DW_SPI_REG_SSIENR, DW_SPI_SSIENR_SSIEN_BIT) DEFINE_TEST_BIT_OP(ssienr, DW_SPI_REG_SSIENR, DW_SPI_SSIENR_SSIEN_BIT) DEFINE_TEST_BIT_OP(sr_busy, DW_SPI_REG_SR, DW_SPI_SR_BUSY_BIT) #ifdef __cplusplus } #endif #endif /* ZEPHYR_DRIVERS_SPI_SPI_DW_H_ */ ```
/content/code_sandbox/drivers/spi/spi_dw.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,012
```c /* * */ #define DT_DRV_COMPAT adi_max32_spi #include <string.h> #include <errno.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/clock_control/adi_max32_clock_control.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <wrap_max32_spi.h> LOG_MODULE_REGISTER(spi_max32, CONFIG_SPI_LOG_LEVEL); #include "spi_context.h" struct max32_spi_config { mxc_spi_regs_t *regs; const struct pinctrl_dev_config *pctrl; const struct device *clock; struct max32_perclk perclk; #ifdef CONFIG_SPI_MAX32_INTERRUPT void (*irq_config_func)(const struct device *dev); #endif /* CONFIG_SPI_MAX32_INTERRUPT */ }; /* Device run time data */ struct max32_spi_data { struct spi_context ctx; const struct device *dev; mxc_spi_req_t req; uint8_t dummy[2]; #ifdef CONFIG_SPI_ASYNC struct k_work async_work; #endif /* CONFIG_SPI_ASYNC */ }; #ifdef CONFIG_SPI_MAX32_INTERRUPT static void spi_max32_callback(mxc_spi_req_t *req, int error); #endif /* CONFIG_SPI_MAX32_INTERRUPT */ static int spi_configure(const struct device *dev, const struct spi_config *config) { int ret = 0; const struct max32_spi_config *cfg = dev->config; mxc_spi_regs_t *regs = cfg->regs; struct max32_spi_data *data = dev->data; if (spi_context_configured(&data->ctx, config)) { return 0; } if (SPI_OP_MODE_GET(config->operation) & SPI_OP_MODE_SLAVE) { return -ENOTSUP; } int master_mode = 1; int quad_mode = 0; int num_slaves = 1; int ss_polarity = (config->operation & SPI_CS_ACTIVE_HIGH) ? 1 : 0; unsigned int spi_speed = (unsigned int)config->frequency; ret = Wrap_MXC_SPI_Init(regs, master_mode, quad_mode, num_slaves, ss_polarity, spi_speed); if (ret) { return ret; } int cpol = (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) ? 1 : 0; int cpha = (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) ? 1 : 0; if (cpol && cpha) { ret = MXC_SPI_SetMode(regs, SPI_MODE_3); } else if (cpha) { ret = MXC_SPI_SetMode(regs, SPI_MODE_2); } else if (cpol) { ret = MXC_SPI_SetMode(regs, SPI_MODE_1); } else { ret = MXC_SPI_SetMode(regs, SPI_MODE_0); } if (ret) { return ret; } ret = MXC_SPI_SetDataSize(regs, SPI_WORD_SIZE_GET(config->operation)); if (ret) { return ret; } #if defined(CONFIG_SPI_EXTENDED_MODES) switch (config->operation & SPI_LINES_MASK) { case SPI_LINES_QUAD: ret = MXC_SPI_SetWidth(regs, SPI_WIDTH_QUAD); break; case SPI_LINES_DUAL: ret = MXC_SPI_SetWidth(regs, SPI_WIDTH_DUAL); break; case SPI_LINES_OCTAL: ret = -ENOTSUP; break; case SPI_LINES_SINGLE: default: ret = MXC_SPI_SetWidth(regs, SPI_WIDTH_STANDARD); break; } if (ret) { return ret; } #endif data->ctx.config = config; return ret; } static inline int spi_max32_get_dfs_shift(const struct spi_context *ctx) { if (SPI_WORD_SIZE_GET(ctx->config->operation) < 9) { return 0; } return 1; } static void spi_max32_setup(mxc_spi_regs_t *spi, mxc_spi_req_t *req) { req->rxCnt = 0; req->txCnt = 0; if (spi->ctrl0 & ADI_MAX32_SPI_CTRL_MASTER_MODE) { MXC_SPI_SetSlave(spi, req->ssIdx); } if (req->rxData && req->rxLen) { MXC_SETFIELD(spi->ctrl1, MXC_F_SPI_CTRL1_RX_NUM_CHAR, req->rxLen << MXC_F_SPI_CTRL1_RX_NUM_CHAR_POS); spi->dma |= MXC_F_SPI_DMA_RX_FIFO_EN; } else { spi->ctrl1 &= ~MXC_F_SPI_CTRL1_RX_NUM_CHAR; spi->dma &= ~MXC_F_SPI_DMA_RX_FIFO_EN; } if (req->txLen) { MXC_SETFIELD(spi->ctrl1, MXC_F_SPI_CTRL1_TX_NUM_CHAR, req->txLen << MXC_F_SPI_CTRL1_TX_NUM_CHAR_POS); spi->dma |= MXC_F_SPI_DMA_TX_FIFO_EN; } else { spi->ctrl1 &= ~MXC_F_SPI_CTRL1_TX_NUM_CHAR; spi->dma &= ~MXC_F_SPI_DMA_TX_FIFO_EN; } spi->dma |= (ADI_MAX32_SPI_DMA_TX_FIFO_CLEAR | ADI_MAX32_SPI_DMA_RX_FIFO_CLEAR); spi->ctrl0 |= MXC_F_SPI_CTRL0_EN; MXC_SPI_ClearFlags(spi); } #ifndef CONFIG_SPI_MAX32_INTERRUPT static int spi_max32_transceive_sync(mxc_spi_regs_t *spi, struct max32_spi_data *data, uint8_t dfs_shift) { int ret = 0; mxc_spi_req_t *req = &data->req; uint32_t remain, flags, tx_len, rx_len; MXC_SPI_ClearTXFIFO(spi); MXC_SPI_ClearRXFIFO(spi); tx_len = req->txLen << dfs_shift; rx_len = req->rxLen << dfs_shift; do { remain = tx_len - req->txCnt; if (remain > 0) { if (!data->req.txData) { req->txCnt += MXC_SPI_WriteTXFIFO(spi, data->dummy, MIN(remain, sizeof(data->dummy))); } else { req->txCnt += MXC_SPI_WriteTXFIFO(spi, &req->txData[req->txCnt], remain); } if (!(spi->ctrl0 & MXC_F_SPI_CTRL0_START)) { spi->ctrl0 |= MXC_F_SPI_CTRL0_START; } } if (req->rxCnt < rx_len) { req->rxCnt += MXC_SPI_ReadRXFIFO(spi, &req->rxData[req->rxCnt], rx_len - req->rxCnt); } } while ((req->txCnt < tx_len) || (req->rxCnt < rx_len)); do { flags = MXC_SPI_GetFlags(spi); } while (!(flags & ADI_MAX32_SPI_INT_FL_MST_DONE)); MXC_SPI_ClearFlags(spi); return ret; } #endif /* CONFIG_SPI_MAX32_INTERRUPT */ static int spi_max32_transceive(const struct device *dev) { int ret = 0; const struct max32_spi_config *cfg = dev->config; struct max32_spi_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t len; uint8_t dfs_shift; MXC_SPI_ClearTXFIFO(cfg->regs); dfs_shift = spi_max32_get_dfs_shift(ctx); len = spi_context_max_continuous_chunk(ctx); data->req.txLen = len >> dfs_shift; data->req.txData = (uint8_t *)ctx->tx_buf; data->req.rxLen = len >> dfs_shift; data->req.rxData = ctx->rx_buf; data->req.rxData = ctx->rx_buf; data->req.rxLen = len >> dfs_shift; if (!data->req.rxData) { /* Pass a dummy buffer to HAL if receive buffer is NULL, otherwise * corrupt data is read during subsequent transactions. */ data->req.rxData = data->dummy; data->req.rxLen = 0; } data->req.spi = cfg->regs; data->req.ssIdx = ctx->config->slave; data->req.ssDeassert = 0; data->req.txCnt = 0; data->req.rxCnt = 0; spi_max32_setup(cfg->regs, &data->req); #ifdef CONFIG_SPI_MAX32_INTERRUPT MXC_SPI_SetTXThreshold(cfg->regs, 1); if (data->req.rxLen) { MXC_SPI_SetRXThreshold(cfg->regs, 2); MXC_SPI_EnableInt(cfg->regs, ADI_MAX32_SPI_INT_EN_RX_THD); } MXC_SPI_EnableInt(cfg->regs, ADI_MAX32_SPI_INT_EN_TX_THD | ADI_MAX32_SPI_INT_EN_MST_DONE); if (!data->req.txData) { data->req.txCnt = MXC_SPI_WriteTXFIFO(cfg->regs, data->dummy, MIN(len, sizeof(data->dummy))); } else { data->req.txCnt = MXC_SPI_WriteTXFIFO(cfg->regs, data->req.txData, len); } MXC_SPI_StartTransmission(cfg->regs); #else ret = spi_max32_transceive_sync(cfg->regs, data, dfs_shift); if (ret) { ret = -EIO; } else { spi_context_update_tx(ctx, 1, len); spi_context_update_rx(ctx, 1, len); } #endif return ret; } static int transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool async, spi_callback_t cb, void *userdata) { int ret = 0; const struct max32_spi_config *cfg = dev->config; struct max32_spi_data *data = dev->data; struct spi_context *ctx = &data->ctx; bool hw_cs_ctrl = true; #ifndef CONFIG_SPI_MAX32_INTERRUPT if (async) { return -ENOTSUP; } #endif spi_context_lock(ctx, async, cb, userdata, config); ret = spi_configure(dev, config); if (ret != 0) { spi_context_release(ctx, ret); return -EIO; } spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1); /* Check if CS GPIO exists */ if (spi_cs_is_gpio(config)) { hw_cs_ctrl = false; } MXC_SPI_HWSSControl(cfg->regs, hw_cs_ctrl); /* Assert the CS line if HW control disabled */ if (!hw_cs_ctrl) { spi_context_cs_control(ctx, true); } else { cfg->regs->ctrl0 = (cfg->regs->ctrl0 & ~MXC_F_SPI_CTRL0_START) | MXC_F_SPI_CTRL0_SS_CTRL; } #ifdef CONFIG_SPI_MAX32_INTERRUPT do { ret = spi_max32_transceive(dev); if (!ret) { ret = spi_context_wait_for_completion(ctx); if (ret || async) { break; } } else { break; } } while ((spi_context_tx_on(ctx) || spi_context_rx_on(ctx))); #else do { ret = spi_max32_transceive(dev); if (ret) { break; } } while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)); #endif /* CONFIG_SPI_MAX32_INTERRUPT */ /* Deassert the CS line if hw control disabled */ if (!async) { if (!hw_cs_ctrl) { spi_context_cs_control(ctx, false); } else { cfg->regs->ctrl0 &= ~(MXC_F_SPI_CTRL0_START | MXC_F_SPI_CTRL0_SS_CTRL | MXC_F_SPI_CTRL0_EN); cfg->regs->ctrl0 |= MXC_F_SPI_CTRL0_EN; } } spi_context_release(ctx, ret); return ret; } static int api_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int api_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ #ifdef CONFIG_SPI_MAX32_INTERRUPT static void spi_max32_callback(mxc_spi_req_t *req, int error) { struct max32_spi_data *data = CONTAINER_OF(req, struct max32_spi_data, req); struct spi_context *ctx = &data->ctx; const struct device *dev = data->dev; uint32_t len; len = spi_context_max_continuous_chunk(ctx); spi_context_update_tx(ctx, 1, len); spi_context_update_rx(ctx, 1, len); #ifdef CONFIG_SPI_ASYNC if (ctx->asynchronous && ((spi_context_tx_on(ctx) || spi_context_rx_on(ctx)))) { k_work_submit(&data->async_work); } else { if (spi_cs_is_gpio(ctx->config)) { spi_context_cs_control(ctx, false); } else { req->spi->ctrl0 &= ~(MXC_F_SPI_CTRL0_START | MXC_F_SPI_CTRL0_SS_CTRL | MXC_F_SPI_CTRL0_EN); req->spi->ctrl0 |= MXC_F_SPI_CTRL0_EN; } spi_context_complete(ctx, dev, error == E_NO_ERROR ? 0 : -EIO); } #else spi_context_complete(ctx, dev, error == E_NO_ERROR ? 0 : -EIO); #endif } #ifdef CONFIG_SPI_ASYNC void spi_max32_async_work_handler(struct k_work *work) { struct max32_spi_data *data = CONTAINER_OF(work, struct max32_spi_data, async_work); const struct device *dev = data->dev; int ret; ret = spi_max32_transceive(dev); if (ret) { spi_context_complete(&data->ctx, dev, -EIO); } } #endif /* CONFIG_SPI_ASYNC */ static void spi_max32_isr(const struct device *dev) { const struct max32_spi_config *cfg = dev->config; struct max32_spi_data *data = dev->data; mxc_spi_req_t *req = &data->req; mxc_spi_regs_t *spi = cfg->regs; uint32_t flags, remain; uint8_t dfs_shift = spi_max32_get_dfs_shift(&data->ctx); flags = MXC_SPI_GetFlags(spi); MXC_SPI_ClearFlags(spi); remain = (req->txLen << dfs_shift) - req->txCnt; if (flags & ADI_MAX32_SPI_INT_FL_TX_THD) { if (remain) { if (!data->req.txData) { req->txCnt += MXC_SPI_WriteTXFIFO(cfg->regs, data->dummy, MIN(remain, sizeof(data->dummy))); } else { req->txCnt += MXC_SPI_WriteTXFIFO(spi, &req->txData[req->txCnt], remain); } } else { MXC_SPI_DisableInt(spi, ADI_MAX32_SPI_INT_EN_TX_THD); } } remain = (req->rxLen << dfs_shift) - req->rxCnt; if (remain) { req->rxCnt += MXC_SPI_ReadRXFIFO(spi, &req->rxData[req->rxCnt], remain); remain = (req->rxLen << dfs_shift) - req->rxCnt; if (remain >= MXC_SPI_FIFO_DEPTH) { MXC_SPI_SetRXThreshold(spi, 2); } else { MXC_SPI_SetRXThreshold(spi, remain); } } else { MXC_SPI_DisableInt(spi, ADI_MAX32_SPI_INT_EN_RX_THD); } if ((req->txLen == req->txCnt) && (req->rxLen == req->rxCnt)) { MXC_SPI_DisableInt(spi, ADI_MAX32_SPI_INT_EN_TX_THD | ADI_MAX32_SPI_INT_EN_RX_THD); if (flags & ADI_MAX32_SPI_INT_FL_MST_DONE) { MXC_SPI_DisableInt(spi, ADI_MAX32_SPI_INT_EN_MST_DONE); spi_max32_callback(req, 0); } } } #endif /* CONFIG_SPI_MAX32_INTERRUPT */ static int api_release(const struct device *dev, const struct spi_config *config) { struct max32_spi_data *data = dev->data; if (!spi_context_configured(&data->ctx, config)) { return -EINVAL; } spi_context_unlock_unconditionally(&data->ctx); return 0; } static int spi_max32_init(const struct device *dev) { int ret = 0; const struct max32_spi_config *const cfg = dev->config; mxc_spi_regs_t *regs = cfg->regs; struct max32_spi_data *data = dev->data; if (!device_is_ready(cfg->clock)) { return -ENODEV; } MXC_SPI_Shutdown(regs); ret = clock_control_on(cfg->clock, (clock_control_subsys_t)&cfg->perclk); if (ret) { return ret; } ret = pinctrl_apply_state(cfg->pctrl, PINCTRL_STATE_DEFAULT); if (ret) { return ret; } ret = spi_context_cs_configure_all(&data->ctx); if (ret < 0) { return ret; } data->dev = dev; #ifdef CONFIG_SPI_MAX32_INTERRUPT cfg->irq_config_func(dev); #ifdef CONFIG_SPI_ASYNC k_work_init(&data->async_work, spi_max32_async_work_handler); #endif #endif spi_context_unlock_unconditionally(&data->ctx); return ret; } /* SPI driver APIs structure */ static const struct spi_driver_api spi_max32_api = { .transceive = api_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = api_transceive_async, #endif /* CONFIG_SPI_ASYNC */ .release = api_release, }; /* SPI driver registration */ #ifdef CONFIG_SPI_MAX32_INTERRUPT #define SPI_MAX32_CONFIG_IRQ_FUNC(n) .irq_config_func = spi_max32_irq_config_func_##n, #define SPI_MAX32_IRQ_CONFIG_FUNC(n) \ static void spi_max32_irq_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), spi_max32_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } #else #define SPI_MAX32_CONFIG_IRQ_FUNC(n) #define SPI_MAX32_IRQ_CONFIG_FUNC(n) #endif /* CONFIG_SPI_MAX32_INTERRUPT */ #define DEFINE_SPI_MAX32(_num) \ PINCTRL_DT_INST_DEFINE(_num); \ SPI_MAX32_IRQ_CONFIG_FUNC(_num) \ static const struct max32_spi_config max32_spi_config_##_num = { \ .regs = (mxc_spi_regs_t *)DT_INST_REG_ADDR(_num), \ .pctrl = PINCTRL_DT_INST_DEV_CONFIG_GET(_num), \ .clock = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(_num)), \ .perclk.bus = DT_INST_CLOCKS_CELL(_num, offset), \ .perclk.bit = DT_INST_CLOCKS_CELL(_num, bit), \ SPI_MAX32_CONFIG_IRQ_FUNC(_num)}; \ static struct max32_spi_data max32_spi_data_##_num = { \ SPI_CONTEXT_INIT_LOCK(max32_spi_data_##_num, ctx), \ SPI_CONTEXT_INIT_SYNC(max32_spi_data_##_num, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(_num), ctx)}; \ DEVICE_DT_INST_DEFINE(_num, spi_max32_init, NULL, &max32_spi_data_##_num, \ &max32_spi_config_##_num, PRE_KERNEL_2, CONFIG_SPI_INIT_PRIORITY, \ &spi_max32_api); DT_INST_FOREACH_STATUS_OKAY(DEFINE_SPI_MAX32) ```
/content/code_sandbox/drivers/spi/spi_max32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,467
```unknown config NXP_S32_SPI bool "NXP S32 SPI driver" default y depends on DT_HAS_NXP_S32_SPI_ENABLED select NOCACHE_MEMORY if ARCH_HAS_NOCACHE_MEMORY_SUPPORT help Enable support for NXP S32 SPI driver. if NXP_S32_SPI config NXP_S32_SPI_INTERRUPT bool "NXP S32 SPI Interrupt Support" default y help Enable Interrupt support for SPI communication. endif # NXP_S32_SPI ```
/content/code_sandbox/drivers/spi/Kconfig.nxp_s32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
105
```unknown config SPI_MAX32 bool "MAX32 MCU SPI controller driver" default y depends on DT_HAS_ADI_MAX32_SPI_ENABLED select PINCTRL help Enable SPI support on the MAX32 family of processors. if SPI_MAX32 config SPI_MAX32_INTERRUPT bool "MAX32 MCU SPI Interrupt Support" help Enable interrupt support for MAX32 MCU SPI driver. endif # SPI_MAX32 ```
/content/code_sandbox/drivers/spi/Kconfig.max32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
88
```unknown config SPI_MCUX_ECSPI bool "MCUX ECSPI driver" default y depends on DT_HAS_NXP_IMX_ECSPI_ENABLED depends on CLOCK_CONTROL select PINCTRL help Enable support for NXP's mcux ecspi driver. ```
/content/code_sandbox/drivers/spi/Kconfig.mcux_ecspi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
60
```c /* * */ #define DT_DRV_COMPAT infineon_xmc4xxx_spi #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_xmc4xxx); #include "spi_context.h" #include <zephyr/drivers/dma.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/spi.h> #include <xmc_spi.h> #include <xmc_usic.h> #define USIC_IRQ_MIN 84 #define USIC_IRQ_MAX 101 #define IRQS_PER_USIC 6 #define SPI_XMC4XXX_DMA_ERROR_FLAG BIT(0) #define SPI_XMC4XXX_DMA_RX_DONE_FLAG BIT(1) #define SPI_XMC4XXX_DMA_TX_DONE_FLAG BIT(2) #ifdef CONFIG_SPI_XMC4XXX_DMA static const uint8_t __aligned(4) tx_dummy_data; #endif struct spi_xmc4xxx_config { XMC_USIC_CH_t *spi; const struct pinctrl_dev_config *pcfg; uint8_t miso_src; #if defined(CONFIG_SPI_XMC4XXX_INTERRUPT) void (*irq_config_func)(const struct device *dev); #endif #if defined(CONFIG_SPI_XMC4XXX_DMA) uint8_t irq_num_tx; uint8_t irq_num_rx; #endif }; #ifdef CONFIG_SPI_XMC4XXX_DMA struct spi_xmc4xxx_dma_stream { const struct device *dev_dma; uint32_t dma_channel; struct dma_config dma_cfg; struct dma_block_config blk_cfg; }; #endif struct spi_xmc4xxx_data { struct spi_context ctx; #if defined(CONFIG_SPI_XMC4XXX_DMA) struct spi_xmc4xxx_dma_stream dma_rx; struct spi_xmc4xxx_dma_stream dma_tx; struct k_sem status_sem; uint8_t dma_status_flags; uint8_t dma_completion_flags; uint8_t service_request_tx; uint8_t service_request_rx; #endif }; #if defined(CONFIG_SPI_XMC4XXX_DMA) static void spi_xmc4xxx_dma_callback(const struct device *dev_dma, void *arg, uint32_t dma_channel, int status) { struct spi_xmc4xxx_data *data = arg; if (status != 0) { LOG_ERR("DMA callback error on channel %d.", dma_channel); data->dma_status_flags |= SPI_XMC4XXX_DMA_ERROR_FLAG; } else { if (dev_dma == data->dma_tx.dev_dma && dma_channel == data->dma_tx.dma_channel) { data->dma_status_flags |= SPI_XMC4XXX_DMA_TX_DONE_FLAG; } else if (dev_dma == data->dma_rx.dev_dma && dma_channel == data->dma_rx.dma_channel) { data->dma_status_flags |= SPI_XMC4XXX_DMA_RX_DONE_FLAG; } else { LOG_ERR("DMA callback channel %d is not valid.", dma_channel); data->dma_status_flags |= SPI_XMC4XXX_DMA_ERROR_FLAG; } } k_sem_give(&data->status_sem); } #endif static void spi_xmc4xxx_flush_rx(XMC_USIC_CH_t *spi) { uint32_t recv_status; recv_status = XMC_USIC_CH_GetReceiveBufferStatus(spi); if (recv_status & USIC_CH_RBUFSR_RDV0_Msk) { XMC_SPI_CH_GetReceivedData(spi); } if (recv_status & USIC_CH_RBUFSR_RDV1_Msk) { XMC_SPI_CH_GetReceivedData(spi); } } static void spi_xmc4xxx_shift_frames(const struct device *dev) { struct spi_xmc4xxx_data *data = dev->data; const struct spi_xmc4xxx_config *config = dev->config; struct spi_context *ctx = &data->ctx; uint8_t tx_data = 0; uint8_t rx_data; uint32_t status; if (spi_context_tx_buf_on(ctx)) { tx_data = ctx->tx_buf[0]; } XMC_SPI_CH_ClearStatusFlag(config->spi, XMC_SPI_CH_STATUS_FLAG_TRANSMIT_SHIFT_INDICATION | XMC_SPI_CH_STATUS_FLAG_RECEIVE_INDICATION | XMC_SPI_CH_STATUS_FLAG_ALTERNATIVE_RECEIVE_INDICATION); spi_context_update_tx(ctx, 1, 1); XMC_SPI_CH_Transmit(config->spi, tx_data, XMC_SPI_CH_MODE_STANDARD); #if defined(CONFIG_SPI_XMC4XXX_INTERRUPT) return; #endif /* Wait to finish transmitting */ while (1) { status = XMC_SPI_CH_GetStatusFlag(config->spi); if (status & XMC_SPI_CH_STATUS_FLAG_TRANSMIT_SHIFT_INDICATION) { break; } } /* Wait to finish receiving */ while (1) { status = XMC_SPI_CH_GetStatusFlag(config->spi); if (status & (XMC_SPI_CH_STATUS_FLAG_RECEIVE_INDICATION | XMC_SPI_CH_STATUS_FLAG_ALTERNATIVE_RECEIVE_INDICATION)) { break; } } rx_data = XMC_SPI_CH_GetReceivedData(config->spi); if (spi_context_rx_buf_on(ctx)) { *ctx->rx_buf = rx_data; } spi_context_update_rx(ctx, 1, 1); } #if defined(CONFIG_SPI_XMC4XXX_INTERRUPT) static void spi_xmc4xxx_isr(const struct device *dev) { struct spi_xmc4xxx_data *data = dev->data; const struct spi_xmc4xxx_config *config = dev->config; struct spi_context *ctx = &data->ctx; uint8_t rx_data; rx_data = XMC_SPI_CH_GetReceivedData(config->spi); if (spi_context_rx_buf_on(ctx)) { *ctx->rx_buf = rx_data; } spi_context_update_rx(ctx, 1, 1); if (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)) { spi_xmc4xxx_shift_frames(dev); return; } if (!(ctx->config->operation & SPI_HOLD_ON_CS)) { spi_context_cs_control(ctx, false); } spi_context_complete(ctx, dev, 0); } #endif #define LOOPBACK_SRC 6 static int spi_xmc4xxx_configure(const struct device *dev, const struct spi_config *spi_cfg) { int ret; struct spi_xmc4xxx_data *data = dev->data; const struct spi_xmc4xxx_config *config = dev->config; struct spi_context *ctx = &data->ctx; uint16_t settings = spi_cfg->operation; bool CPOL = SPI_MODE_GET(settings) & SPI_MODE_CPOL; bool CPHA = SPI_MODE_GET(settings) & SPI_MODE_CPHA; XMC_SPI_CH_CONFIG_t usic_cfg = {.baudrate = spi_cfg->frequency}; XMC_SPI_CH_BRG_SHIFT_CLOCK_PASSIVE_LEVEL_t clock_settings = XMC_SPI_CH_BRG_SHIFT_CLOCK_PASSIVE_LEVEL_0_DELAY_ENABLED; if (spi_context_configured(ctx, spi_cfg)) { return 0; } ctx->config = spi_cfg; if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } if (spi_cfg->operation & SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode not supported"); return -ENOTSUP; } if (SPI_WORD_SIZE_GET(spi_cfg->operation) != 8) { LOG_ERR("Only 8 bit word size is supported"); return -ENOTSUP; } ret = XMC_SPI_CH_Stop(config->spi); if (ret != XMC_SPI_CH_STATUS_OK) { return -EBUSY; } XMC_SPI_CH_Init(config->spi, &usic_cfg); XMC_SPI_CH_Start(config->spi); if (SPI_MODE_GET(settings) & SPI_MODE_LOOP) { XMC_SPI_CH_SetInputSource(config->spi, XMC_SPI_CH_INPUT_DIN0, LOOPBACK_SRC); } else { XMC_SPI_CH_SetInputSource(config->spi, XMC_SPI_CH_INPUT_DIN0, config->miso_src); } if (!CPOL && !CPHA) { clock_settings = XMC_SPI_CH_BRG_SHIFT_CLOCK_PASSIVE_LEVEL_0_DELAY_ENABLED; } else if (!CPOL && CPHA) { clock_settings = XMC_SPI_CH_BRG_SHIFT_CLOCK_PASSIVE_LEVEL_0_DELAY_DISABLED; } else if (CPOL && !CPHA) { clock_settings = XMC_SPI_CH_BRG_SHIFT_CLOCK_PASSIVE_LEVEL_1_DELAY_ENABLED; } else if (CPOL && CPHA) { clock_settings = XMC_SPI_CH_BRG_SHIFT_CLOCK_PASSIVE_LEVEL_1_DELAY_DISABLED; } XMC_SPI_CH_ConfigureShiftClockOutput(config->spi, clock_settings, XMC_SPI_CH_BRG_SHIFT_CLOCK_OUTPUT_SCLK); if (settings & SPI_TRANSFER_LSB) { XMC_SPI_CH_SetBitOrderLsbFirst(config->spi); } else { XMC_SPI_CH_SetBitOrderMsbFirst(config->spi); } XMC_SPI_CH_SetWordLength(config->spi, 8); return 0; } static int spi_xmc4xxx_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { struct spi_xmc4xxx_data *data = dev->data; const struct spi_xmc4xxx_config *config = dev->config; struct spi_context *ctx = &data->ctx; int ret; if (!tx_bufs && !rx_bufs) { return 0; } #ifndef CONFIG_SPI_XMC4XXX_INTERRUPT if (asynchronous) { return -ENOTSUP; } #endif spi_context_lock(ctx, asynchronous, cb, userdata, spi_cfg); ret = spi_xmc4xxx_configure(dev, spi_cfg); if (ret) { LOG_DBG("SPI config on device %s failed", dev->name); spi_context_release(ctx, ret); return ret; } spi_xmc4xxx_flush_rx(config->spi); spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1); spi_context_cs_control(ctx, true); #if defined(CONFIG_SPI_XMC4XXX_INTERRUPT) XMC_SPI_CH_EnableEvent(config->spi, XMC_SPI_CH_EVENT_STANDARD_RECEIVE | XMC_SPI_CH_EVENT_ALTERNATIVE_RECEIVE); spi_xmc4xxx_shift_frames(dev); ret = spi_context_wait_for_completion(ctx); /* cs released in isr */ #else while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)) { spi_xmc4xxx_shift_frames(dev); } if (!(spi_cfg->operation & SPI_HOLD_ON_CS)) { spi_context_cs_control(ctx, false); } #endif spi_context_release(ctx, ret); return ret; } #if defined(CONFIG_SPI_ASYNC) static int spi_xmc4xxx_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return spi_xmc4xxx_transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif #if defined(CONFIG_SPI_XMC4XXX_DMA) static int spi_xmc4xxx_dma_rx_tx_done(struct spi_xmc4xxx_data *data) { for (;;) { int ret; ret = k_sem_take(&data->status_sem, K_MSEC(CONFIG_SPI_XMC4XXX_DMA_TIMEOUT_MSEC)); if (ret != 0) { LOG_ERR("Sem take error %d", ret); return ret; } if (data->dma_status_flags & SPI_XMC4XXX_DMA_ERROR_FLAG) { return -EIO; } if (data->dma_status_flags == data->dma_completion_flags) { return 0; } } } static int spi_xmc4xxx_transceive_dma(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { struct spi_xmc4xxx_data *data = dev->data; const struct spi_xmc4xxx_config *config = dev->config; struct spi_context *ctx = &data->ctx; struct spi_xmc4xxx_dma_stream *dma_tx = &data->dma_tx; struct spi_xmc4xxx_dma_stream *dma_rx = &data->dma_rx; int ret; if (!tx_bufs && !rx_bufs) { return 0; } if (asynchronous) { return -ENOTSUP; } spi_context_lock(ctx, asynchronous, cb, userdata, spi_cfg); k_sem_reset(&data->status_sem); ret = spi_xmc4xxx_configure(dev, spi_cfg); if (ret) { LOG_ERR("SPI config on device %s failed", dev->name); spi_context_release(ctx, ret); return ret; } /* stop async isr from triggering */ irq_disable(config->irq_num_rx); spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1); spi_context_cs_control(ctx, true); while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)) { int dma_len; uint8_t dma_completion_flags = SPI_XMC4XXX_DMA_TX_DONE_FLAG; /* make sure the tx is not transmitting */ while (XMC_USIC_CH_GetTransmitBufferStatus(config->spi) == XMC_USIC_CH_TBUF_STATUS_BUSY) { }; if (data->ctx.rx_len == 0) { dma_len = data->ctx.tx_len; } else if (data->ctx.tx_len == 0) { dma_len = data->ctx.rx_len; } else { dma_len = MIN(data->ctx.tx_len, data->ctx.rx_len); } if (ctx->rx_buf) { spi_xmc4xxx_flush_rx(config->spi); dma_rx->blk_cfg.dest_address = (uint32_t)ctx->rx_buf; dma_rx->blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; dma_rx->blk_cfg.block_size = dma_len; dma_rx->blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; ret = dma_config(dma_rx->dev_dma, dma_rx->dma_channel, &dma_rx->dma_cfg); if (ret < 0) { break; } XMC_SPI_CH_EnableEvent(config->spi, XMC_SPI_CH_EVENT_STANDARD_RECEIVE | XMC_SPI_CH_EVENT_ALTERNATIVE_RECEIVE); dma_completion_flags |= SPI_XMC4XXX_DMA_RX_DONE_FLAG; ret = dma_start(dma_rx->dev_dma, dma_rx->dma_channel); if (ret < 0) { break; } } else { XMC_SPI_CH_DisableEvent(config->spi, XMC_SPI_CH_EVENT_STANDARD_RECEIVE | XMC_SPI_CH_EVENT_ALTERNATIVE_RECEIVE); } if (ctx->tx_buf) { dma_tx->blk_cfg.source_address = (uint32_t)ctx->tx_buf; dma_tx->blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { dma_tx->blk_cfg.source_address = (uint32_t)&tx_dummy_data; dma_tx->blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } dma_tx->blk_cfg.block_size = dma_len; ret = dma_config(dma_tx->dev_dma, dma_tx->dma_channel, &dma_tx->dma_cfg); if (ret < 0) { break; } data->dma_status_flags = 0; data->dma_completion_flags = dma_completion_flags; XMC_SPI_CH_EnableEvent(config->spi, XMC_SPI_CH_EVENT_RECEIVE_START); XMC_USIC_CH_TriggerServiceRequest(config->spi, data->service_request_tx); ret = dma_start(dma_tx->dev_dma, dma_tx->dma_channel); if (ret < 0) { break; } ret = spi_xmc4xxx_dma_rx_tx_done(data); if (ret) { break; } spi_context_update_tx(ctx, 1, dma_len); spi_context_update_rx(ctx, 1, dma_len); } if (ret < 0) { dma_stop(dma_tx->dev_dma, dma_tx->dma_channel); dma_stop(dma_rx->dev_dma, dma_rx->dma_channel); } if (!(spi_cfg->operation & SPI_HOLD_ON_CS)) { spi_context_cs_control(ctx, false); } #if defined(CONFIG_SPI_XMC4XXX_INTERRUPT) irq_enable(config->irq_num_rx); #endif spi_context_release(ctx, ret); return ret; } #endif static int spi_xmc4xxx_transceive_sync(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { #if defined(CONFIG_SPI_XMC4XXX_DMA) struct spi_xmc4xxx_data *data = dev->data; if (data->dma_tx.dev_dma != NULL && data->dma_rx.dev_dma != NULL) { return spi_xmc4xxx_transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #endif return spi_xmc4xxx_transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } static int spi_xmc4xxx_release(const struct device *dev, const struct spi_config *config) { struct spi_xmc4xxx_data *data = dev->data; if (!spi_context_configured(&data->ctx, config)) { return -EINVAL; } spi_context_unlock_unconditionally(&data->ctx); return 0; } #if defined(CONFIG_SPI_XMC4XXX_DMA) static void spi_xmc4xxx_configure_rx_service_requests(const struct device *dev) { const struct spi_xmc4xxx_config *config = dev->config; struct spi_xmc4xxx_data *data = dev->data; __ASSERT(config->irq_num_rx >= USIC_IRQ_MIN && config->irq_num_rx <= USIC_IRQ_MAX, "Invalid irq number\n"); data->service_request_rx = (config->irq_num_rx - USIC_IRQ_MIN) % IRQS_PER_USIC; XMC_SPI_CH_SelectInterruptNodePointer(config->spi, XMC_SPI_CH_INTERRUPT_NODE_POINTER_RECEIVE, data->service_request_rx); XMC_SPI_CH_SelectInterruptNodePointer(config->spi, XMC_SPI_CH_INTERRUPT_NODE_POINTER_ALTERNATE_RECEIVE, data->service_request_rx); } static void spi_xmc4xxx_configure_tx_service_requests(const struct device *dev) { const struct spi_xmc4xxx_config *config = dev->config; struct spi_xmc4xxx_data *data = dev->data; __ASSERT(config->irq_num_tx >= USIC_IRQ_MIN && config->irq_num_tx <= USIC_IRQ_MAX, "Invalid irq number\n"); data->service_request_tx = (config->irq_num_tx - USIC_IRQ_MIN) % IRQS_PER_USIC; XMC_USIC_CH_SetInterruptNodePointer(config->spi, XMC_USIC_CH_INTERRUPT_NODE_POINTER_TRANSMIT_BUFFER, data->service_request_tx); } #endif static int spi_xmc4xxx_init(const struct device *dev) { struct spi_xmc4xxx_data *data = dev->data; const struct spi_xmc4xxx_config *config = dev->config; int ret; XMC_USIC_CH_Enable(config->spi); spi_context_unlock_unconditionally(&data->ctx); #if defined(CONFIG_SPI_XMC4XXX_INTERRUPT) config->irq_config_func(dev); #endif #if defined(CONFIG_SPI_XMC4XXX_DMA) spi_xmc4xxx_configure_tx_service_requests(dev); spi_xmc4xxx_configure_rx_service_requests(dev); if (data->dma_rx.dev_dma != NULL) { if (!device_is_ready(data->dma_rx.dev_dma)) { return -ENODEV; } data->dma_rx.blk_cfg.source_address = (uint32_t)&config->spi->RBUF; data->dma_rx.dma_cfg.head_block = &data->dma_rx.blk_cfg; data->dma_rx.dma_cfg.user_data = (void *)data; } if (data->dma_tx.dev_dma != NULL) { if (!device_is_ready(data->dma_tx.dev_dma)) { return -ENODEV; } data->dma_tx.blk_cfg.dest_address = (uint32_t)&config->spi->TBUF[XMC_SPI_CH_MODE_STANDARD]; data->dma_tx.blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; data->dma_tx.dma_cfg.head_block = &data->dma_tx.blk_cfg; data->dma_tx.dma_cfg.user_data = (void *)data; } k_sem_init(&data->status_sem, 0, 2); #endif ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } XMC_SPI_CH_SetInputSource(config->spi, XMC_SPI_CH_INPUT_DIN0, config->miso_src); spi_context_cs_configure_all(&data->ctx); return 0; } static const struct spi_driver_api spi_xmc4xxx_driver_api = { .transceive = spi_xmc4xxx_transceive_sync, #if defined(CONFIG_SPI_ASYNC) .transceive_async = spi_xmc4xxx_transceive_async, #endif .release = spi_xmc4xxx_release, }; #if defined(CONFIG_SPI_XMC4XXX_DMA) #define SPI_DMA_CHANNEL_INIT(index, dir, ch_dir, src_burst, dst_burst) \ .dev_dma = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(index, dir)), \ .dma_channel = DT_INST_DMAS_CELL_BY_NAME(index, dir, channel), \ .dma_cfg = { \ .dma_slot = DT_INST_DMAS_CELL_BY_NAME(index, dir, config), \ .channel_direction = ch_dir, \ .channel_priority = DT_INST_DMAS_CELL_BY_NAME(index, dir, priority), \ .source_data_size = 1, \ .dest_data_size = 1, \ .source_burst_length = src_burst, \ .dest_burst_length = dst_burst, \ .block_count = 1, \ .dma_callback = spi_xmc4xxx_dma_callback, \ .complete_callback_en = true, \ }, #define SPI_DMA_CHANNEL(index, dir, ch_dir, src_burst, dst_burst) \ .dma_##dir = {COND_CODE_1( \ DT_INST_DMAS_HAS_NAME(index, dir), \ (SPI_DMA_CHANNEL_INIT(index, dir, ch_dir, src_burst, dst_burst)), (NULL))}, #else #define SPI_DMA_CHANNEL(index, dir, ch_dir, src_burst, dst_burst) #endif #if defined(CONFIG_SPI_XMC4XXX_INTERRUPT) #define XMC4XXX_IRQ_HANDLER_INIT(index) \ static void spi_xmc4xxx_irq_setup_##index(const struct device *dev) \ { \ const struct spi_xmc4xxx_config *config = dev->config; \ uint8_t service_request; \ uint8_t irq_num; \ \ irq_num = DT_INST_IRQ_BY_NAME(index, rx, irq); \ service_request = (irq_num - USIC_IRQ_MIN) % IRQS_PER_USIC; \ \ XMC_SPI_CH_SelectInterruptNodePointer( \ config->spi, XMC_SPI_CH_INTERRUPT_NODE_POINTER_RECEIVE, service_request); \ XMC_SPI_CH_SelectInterruptNodePointer( \ config->spi, XMC_SPI_CH_INTERRUPT_NODE_POINTER_ALTERNATE_RECEIVE, \ service_request); \ \ XMC_SPI_CH_EnableEvent(config->spi, XMC_SPI_CH_EVENT_STANDARD_RECEIVE | \ XMC_SPI_CH_EVENT_ALTERNATIVE_RECEIVE); \ \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, rx, irq), \ DT_INST_IRQ_BY_NAME(index, rx, priority), spi_xmc4xxx_isr, \ DEVICE_DT_INST_GET(index), 0); \ \ irq_enable(irq_num); \ } #define XMC4XXX_IRQ_HANDLER_STRUCT_INIT(index) .irq_config_func = spi_xmc4xxx_irq_setup_##index, #else #define XMC4XXX_IRQ_HANDLER_INIT(index) #define XMC4XXX_IRQ_HANDLER_STRUCT_INIT(index) #endif #if defined(CONFIG_SPI_XMC4XXX_DMA) #define XMC4XXX_IRQ_DMA_STRUCT_INIT(index) \ .irq_num_rx = DT_INST_IRQ_BY_NAME(index, rx, irq), \ .irq_num_tx = DT_INST_IRQ_BY_NAME(index, tx, irq), #else #define XMC4XXX_IRQ_DMA_STRUCT_INIT(index) #endif #define XMC4XXX_INIT(index) \ PINCTRL_DT_INST_DEFINE(index); \ XMC4XXX_IRQ_HANDLER_INIT(index) \ static struct spi_xmc4xxx_data xmc4xxx_data_##index = { \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(index), ctx) \ SPI_CONTEXT_INIT_LOCK(xmc4xxx_data_##index, ctx), \ SPI_CONTEXT_INIT_SYNC(xmc4xxx_data_##index, ctx), \ SPI_DMA_CHANNEL(index, tx, MEMORY_TO_PERIPHERAL, 8, 1) \ SPI_DMA_CHANNEL(index, rx, PERIPHERAL_TO_MEMORY, 1, 8)}; \ \ static const struct spi_xmc4xxx_config xmc4xxx_config_##index = { \ .spi = (XMC_USIC_CH_t *)DT_INST_REG_ADDR(index), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(index), \ .miso_src = DT_INST_ENUM_IDX(index, miso_src), \ XMC4XXX_IRQ_HANDLER_STRUCT_INIT(index) \ XMC4XXX_IRQ_DMA_STRUCT_INIT(index)}; \ \ DEVICE_DT_INST_DEFINE(index, spi_xmc4xxx_init, NULL, &xmc4xxx_data_##index, \ &xmc4xxx_config_##index, POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, &spi_xmc4xxx_driver_api); DT_INST_FOREACH_STATUS_OKAY(XMC4XXX_INIT) ```
/content/code_sandbox/drivers/spi/spi_xmc4xxx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,799
```c /* * */ #define DT_DRV_COMPAT microchip_xec_qmspi_ldma #include <errno.h> #include <soc.h> #include <zephyr/device.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/spi.h> #include <zephyr/dt-bindings/clock/mchp_xec_pcr.h> #include <zephyr/dt-bindings/interrupt-controller/mchp-xec-ecia.h> #include <zephyr/irq.h> #include <zephyr/pm/device.h> #include <zephyr/sys/sys_io.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_xec, CONFIG_SPI_LOG_LEVEL); #include "spi_context.h" /* #define MCHP_XEC_QMSPI_DEBUG 1 */ /* MEC172x QMSPI controller SPI Mode 3 signalling has an anomaly where * received data is shifted off the input line(s) improperly. Received * data bytes will be left shifted by 1. Work-around for SPI Mode 3 is * to sample input line(s) on same edge as output data is ready. */ #define XEC_QMSPI_SPI_MODE_3_ANOMALY 1 /* common clock control device node for all Microchip XEC chips */ #define MCHP_XEC_CLOCK_CONTROL_NODE DT_NODELABEL(pcr) /* spin loops waiting for HW to clear soft reset bit */ #define XEC_QMSPI_SRST_LOOPS 16 /* microseconds for busy wait and total wait interval */ #define XEC_QMSPI_WAIT_INTERVAL 8 #define XEC_QMSPI_WAIT_COUNT 64 /* QSPI transfer and DMA done */ #define XEC_QSPI_HW_XFR_DMA_DONE (MCHP_QMSPI_STS_DONE | MCHP_QMSPI_STS_DMA_DONE) /* QSPI hardware error status * Misprogrammed control or descriptors (software error) * Overflow TX FIFO * Underflow RX FIFO */ #define XEC_QSPI_HW_ERRORS (MCHP_QMSPI_STS_PROG_ERR | \ MCHP_QMSPI_STS_TXB_ERR | \ MCHP_QMSPI_STS_RXB_ERR) #define XEC_QSPI_HW_ERRORS_LDMA (MCHP_QMSPI_STS_LDMA_RX_ERR | \ MCHP_QMSPI_STS_LDMA_TX_ERR) #define XEC_QSPI_HW_ERRORS_ALL (XEC_QSPI_HW_ERRORS | \ XEC_QSPI_HW_ERRORS_LDMA) #define XEC_QSPI_TIMEOUT_US (100 * 1000) /* 100 ms */ /* Device constant configuration parameters */ struct spi_qmspi_config { struct qmspi_regs *regs; const struct device *clk_dev; struct mchp_xec_pcr_clk_ctrl clksrc; uint32_t clock_freq; uint32_t cs1_freq; uint32_t cs_timing; uint16_t taps_adj; uint8_t girq; uint8_t girq_pos; uint8_t girq_nvic_aggr; uint8_t girq_nvic_direct; uint8_t irq_pri; uint8_t chip_sel; uint8_t width; /* 0(half) 1(single), 2(dual), 4(quad) */ uint8_t unused[1]; const struct pinctrl_dev_config *pcfg; void (*irq_config_func)(void); }; #define XEC_QMSPI_XFR_FLAG_TX BIT(0) #define XEC_QMSPI_XFR_FLAG_RX BIT(1) /* Device run time data */ struct spi_qmspi_data { struct spi_context ctx; uint32_t base_freq_hz; uint32_t spi_freq_hz; uint32_t qstatus; uint8_t np; /* number of data pins: 1, 2, or 4 */ #ifdef CONFIG_SPI_ASYNC spi_callback_t cb; void *userdata; size_t xfr_len; #endif uint32_t tempbuf[2]; #ifdef MCHP_XEC_QMSPI_DEBUG uint32_t bufcnt_status; uint32_t rx_ldma_ctrl0; uint32_t tx_ldma_ctrl0; uint32_t qunits; uint32_t qxfru; uint32_t xfrlen; #endif }; static int xec_qmspi_spin_yield(int *counter, int max_count) { *counter = *counter + 1; if (*counter > max_count) { return -ETIMEDOUT; } k_busy_wait(XEC_QMSPI_WAIT_INTERVAL); return 0; } /* * reset QMSPI controller with save/restore of timing registers. * Some QMSPI timing register may be modified by the Boot-ROM OTP * values. */ static void qmspi_reset(struct qmspi_regs *regs) { uint32_t taps[3]; uint32_t malt1; uint32_t cstm; uint32_t mode; uint32_t cnt = XEC_QMSPI_SRST_LOOPS; taps[0] = regs->TM_TAPS; taps[1] = regs->TM_TAPS_ADJ; taps[2] = regs->TM_TAPS_CTRL; malt1 = regs->MODE_ALT1; cstm = regs->CSTM; mode = regs->MODE; regs->MODE = MCHP_QMSPI_M_SRST; while (regs->MODE & MCHP_QMSPI_M_SRST) { if (cnt == 0) { break; } cnt--; } regs->MODE = 0; regs->MODE = mode & ~MCHP_QMSPI_M_ACTIVATE; regs->CSTM = cstm; regs->MODE_ALT1 = malt1; regs->TM_TAPS = taps[0]; regs->TM_TAPS_ADJ = taps[1]; regs->TM_TAPS_CTRL = taps[2]; } static uint32_t qmspi_encoded_fdiv(const struct device *dev, uint32_t freq_hz) { struct spi_qmspi_data *qdata = dev->data; if (freq_hz == 0u) { return 0u; /* maximum frequency divider */ } return (qdata->base_freq_hz / freq_hz); } /* Program QMSPI frequency divider field in the mode register. * MEC172x QMSPI input clock source is the Fast Peripheral domain whose * clock is controlled by the PCR turbo clock. 96 MHz if turbo mode * enabled else 48 MHz. Query the clock control driver to get clock * rate of fast peripheral domain. MEC172x QMSPI clock divider has * been expanded to a 16-bit field encoded as: * 0 = divide by 0x10000 * 1 to 0xffff = divide by this value. */ static int qmspi_set_frequency(struct spi_qmspi_data *qdata, struct qmspi_regs *regs, uint32_t freq_hz) { uint32_t clk = MCHP_QMSPI_INPUT_CLOCK_FREQ_HZ; uint32_t fdiv = 0u; /* maximum divider */ if (qdata->base_freq_hz) { clk = qdata->base_freq_hz; } if (freq_hz) { fdiv = 1u; if (freq_hz < clk) { fdiv = clk / freq_hz; } } regs->MODE = ((regs->MODE & ~(MCHP_QMSPI_M_FDIV_MASK)) | ((fdiv << MCHP_QMSPI_M_FDIV_POS) & MCHP_QMSPI_M_FDIV_MASK)); if (!fdiv) { fdiv = 0x10000u; } qdata->spi_freq_hz = clk / fdiv; return 0; } /* * SPI signalling mode: CPOL and CPHA * CPOL = 0 is clock idles low, 1 is clock idle high * CPHA = 0 Transmitter changes data on trailing of preceding clock cycle. * Receiver samples data on leading edge of clock cycle. * 1 Transmitter changes data on leading edge of current clock cycle. * Receiver samples data on the trailing edge of clock cycle. * SPI Mode nomenclature: * Mode CPOL CPHA * 0 0 0 * 1 0 1 * 2 1 0 * 3 1 1 * QMSPI has three controls, CPOL, CPHA for output and CPHA for input. * SPI frequency < 48MHz * Mode 0: CPOL=0 CHPA=0 (CHPA_MISO=0 and CHPA_MOSI=0) * Mode 3: CPOL=1 CHPA=1 (CHPA_MISO=1 and CHPA_MOSI=1) * Data sheet recommends when QMSPI set at max. SPI frequency (48MHz). * SPI frequency == 48MHz sample and change data on same edge. * Mode 0: CPOL=0 CHPA=0 (CHPA_MISO=1 and CHPA_MOSI=0) * Mode 3: CPOL=1 CHPA=1 (CHPA_MISO=0 and CHPA_MOSI=1) * * There is an anomaly in MEC172x for SPI signalling mode 3. We must * set CHPA_MISO=0 for SPI Mode 3 at all frequencies. */ const uint8_t smode_tbl[4] = { 0x00u, 0x06u, 0x01u, #ifdef XEC_QMSPI_SPI_MODE_3_ANOMALY 0x03u, /* CPOL=1, CPHA_MOSI=1, CPHA_MISO=0 */ #else 0x07u, /* CPOL=1, CPHA_MOSI=1, CPHA_MISO=1 */ #endif }; const uint8_t smode48_tbl[4] = { 0x04u, 0x02u, 0x05u, 0x03u }; static void qmspi_set_signalling_mode(struct spi_qmspi_data *qdata, struct qmspi_regs *regs, uint32_t smode) { const uint8_t *ptbl; uint32_t m; ptbl = smode_tbl; if (qdata->spi_freq_hz >= MHZ(48)) { ptbl = smode48_tbl; } m = (uint32_t)ptbl[smode & 0x03]; regs->MODE = (regs->MODE & ~(MCHP_QMSPI_M_SIG_MASK)) | (m << MCHP_QMSPI_M_SIG_POS); } #ifdef CONFIG_SPI_EXTENDED_MODES /* * QMSPI HW support single, dual, and quad. * Return QMSPI Control/Descriptor register encoded value. */ static uint32_t encode_lines(const struct spi_config *config) { uint32_t qlines; switch (config->operation & SPI_LINES_MASK) { case SPI_LINES_SINGLE: qlines = MCHP_QMSPI_C_IFM_1X; break; #if DT_INST_PROP(0, lines) > 1 case SPI_LINES_DUAL: qlines = MCHP_QMSPI_C_IFM_2X; break; #endif #if DT_INST_PROP(0, lines) > 2 case SPI_LINES_QUAD: qlines = MCHP_QMSPI_C_IFM_4X; break; #endif default: qlines = 0xffu; } return qlines; } static uint8_t npins_from_spi_config(const struct spi_config *config) { switch (config->operation & SPI_LINES_MASK) { case SPI_LINES_DUAL: return 2u; case SPI_LINES_QUAD: return 4u; default: return 1u; } } #endif /* CONFIG_SPI_EXTENDED_MODES */ static int spi_feature_support(const struct spi_config *config) { if (config->operation & (SPI_TRANSFER_LSB | SPI_OP_MODE_SLAVE | SPI_MODE_LOOP)) { LOG_ERR("Driver does not support LSB first, slave, or loop back"); return -ENOTSUP; } if (config->operation & SPI_CS_ACTIVE_HIGH) { LOG_ERR("CS active high not supported"); return -ENOTSUP; } if (config->operation & SPI_LOCK_ON) { LOG_ERR("Lock On not supported"); return -ENOTSUP; } if (SPI_WORD_SIZE_GET(config->operation) != 8) { LOG_ERR("Word size != 8 not supported"); return -ENOTSUP; } return 0; } /* Configure QMSPI. * NOTE: QMSPI Shared SPI port has two chip selects. * Private SPI and internal SPI ports support one chip select. * Hardware supports dual and quad I/O. Dual and quad are allowed * if SPI extended mode is enabled at build time. User must * provide pin configuration via DTS. */ static int qmspi_configure(const struct device *dev, const struct spi_config *config) { const struct spi_qmspi_config *cfg = dev->config; struct spi_qmspi_data *qdata = dev->data; struct qmspi_regs *regs = cfg->regs; uint32_t smode; int ret; if (!config) { return -EINVAL; } if (spi_context_configured(&qdata->ctx, config)) { return 0; } qmspi_set_frequency(qdata, regs, config->frequency); /* check new configuration */ ret = spi_feature_support(config); if (ret) { return ret; } #ifdef CONFIG_SPI_EXTENDED_MODES smode = encode_lines(config); if (smode == 0xff) { LOG_ERR("Requested lines mode not supported"); return -ENOTSUP; } qdata->np = npins_from_spi_config(config); #else smode = MCHP_QMSPI_C_IFM_1X; qdata->np = 1u; #endif regs->CTRL = smode; smode = 0; if ((config->operation & SPI_MODE_CPHA) != 0U) { smode |= BIT(0); } if ((config->operation & SPI_MODE_CPOL) != 0U) { smode |= BIT(1); } qmspi_set_signalling_mode(qdata, regs, smode); /* chip select */ smode = regs->MODE & ~(MCHP_QMSPI_M_CS_MASK); if (cfg->chip_sel == 0) { smode |= MCHP_QMSPI_M_CS0; } else { smode |= MCHP_QMSPI_M_CS1; } regs->MODE = smode; /* chip select timing and TAPS adjust */ regs->CSTM = cfg->cs_timing; regs->TM_TAPS_ADJ = cfg->taps_adj; /* CS1 alternate mode (frequency) */ regs->MODE_ALT1 = 0; if (cfg->cs1_freq) { uint32_t fdiv = qmspi_encoded_fdiv(dev, cfg->cs1_freq); regs->MODE_ALT1 = (fdiv << MCHP_QMSPI_MA1_CS1_CDIV_POS) & MCHP_QMSPI_MA1_CS1_CDIV_MSK; regs->MODE_ALT1 |= MCHP_QMSPI_MA1_CS1_CDIV_EN; } qdata->ctx.config = config; regs->MODE |= MCHP_QMSPI_M_ACTIVATE; return 0; } static uint32_t encode_npins(uint8_t npins) { if (npins == 4) { return MCHP_QMSPI_C_IFM_4X; } else if (npins == 2) { return MCHP_QMSPI_C_IFM_2X; } else { return MCHP_QMSPI_C_IFM_1X; } } /* Common controller transfer initialziation using Local-DMA. * Full-duplex: controller configured to transmit and receive simultaneouly. * Half-duplex(dual/quad): User may only specify TX or RX buffer sets. * Passing both buffers sets is reported as an error. */ static inline int qmspi_xfr_cm_init(const struct device *dev, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { const struct spi_qmspi_config *devcfg = dev->config; struct spi_qmspi_data *qdata = dev->data; struct qmspi_regs *regs = devcfg->regs; regs->IEN = 0; regs->EXE = MCHP_QMSPI_EXE_CLR_FIFOS; regs->LDMA_RX_DESCR_BM = 0; regs->LDMA_TX_DESCR_BM = 0; regs->MODE &= ~(MCHP_QMSPI_M_LDMA_TX_EN | MCHP_QMSPI_M_LDMA_RX_EN); regs->STS = 0xffffffffu; regs->CTRL = encode_npins(qdata->np); qdata->qstatus = 0; #ifdef CONFIG_SPI_EXTENDED_MODES if (qdata->np != 1) { if (tx_bufs && rx_bufs) { LOG_ERR("Cannot specify both TX and RX buffers in half-duplex(dual/quad)"); return -EPROTONOSUPPORT; } } #endif return 0; } /* QMSPI Local-DMA transfer configuration: * Support full and half(dual/quad) duplex transfers. * Requires caller to have checked that only one direction was setup * in the SPI context: TX or RX not both. (refer to qmspi_xfr_cm_init) * Supports spi_buf's where data pointer is NULL and length non-zero. * These buffers are used as TX tri-state I/O clock only generation or * RX data discard for certain SPI command protocols using dual/quad I/O. * 1. Get largest contiguous data size from SPI context. * 2. If the SPI TX context has a non-zero length configure Local-DMA TX * channel 1 for contiguous data size. If TX context has valid buffer * configure channel to use context buffer with address increment. * If the TX buffer pointer is NULL interpret byte length as the number * of clocks to generate with output line(s) tri-stated. NOTE: The controller * must be configured with TX disabled to not drive output line(s) during * clock generation. Also, no data should be written to TX FIFO. The unit * size can be set to bits. The number of units to transfer must be computed * based upon the number of output pins in the IOM field: full-duplex is one * bit per clock, dual is 2 bits per clock, and quad is 4 bits per clock. * For example, if I/O lines is 4 (quad) meaning 4 bits per clock and the * user wants 7 clocks then the number of bit units is 4 * 7 = 28. * 3. If instead, the SPI RX context has a non-zero length configure Local-DMA * RX channel 1 for the contiguous data size. If RX context has a valid * buffer configure channel to use buffer with address increment else * configure channel for driver data temporary buffer without address * increment. * 4. Update QMSPI Control register. */ static uint32_t qmspi_ldma_encode_unit_size(uint32_t maddr, size_t len) { uint8_t temp = (maddr | (uint32_t)len) & 0x3u; if (temp == 0) { return MCHP_QMSPI_LDC_ASZ_4; } else if (temp == 2) { return MCHP_QMSPI_LDC_ASZ_2; } else { return MCHP_QMSPI_LDC_ASZ_1; } } static uint32_t qmspi_unit_size(size_t xfrlen) { if ((xfrlen & 0xfu) == 0u) { return 16u; } else if ((xfrlen & 0x3u) == 0u) { return 4u; } else { return 1u; } } static uint32_t qmspi_encode_unit_size(uint32_t units_in_bytes) { if (units_in_bytes == 16u) { return MCHP_QMSPI_C_XFR_UNITS_16; } else if (units_in_bytes == 4u) { return MCHP_QMSPI_C_XFR_UNITS_4; } else { return MCHP_QMSPI_C_XFR_UNITS_1; } } static size_t q_ldma_cfg(const struct device *dev) { const struct spi_qmspi_config *devcfg = dev->config; struct spi_qmspi_data *qdata = dev->data; struct spi_context *ctx = &qdata->ctx; struct qmspi_regs *regs = devcfg->regs; size_t ctx_xfr_len = spi_context_max_continuous_chunk(ctx); uint32_t ctrl, ldctrl, mstart, qunits, qxfru, xfrlen; regs->EXE = MCHP_QMSPI_EXE_CLR_FIFOS; regs->MODE &= ~(MCHP_QMSPI_M_LDMA_RX_EN | MCHP_QMSPI_M_LDMA_TX_EN); regs->LDRX[0].CTRL = 0; regs->LDRX[0].MSTART = 0; regs->LDRX[0].LEN = 0; regs->LDTX[0].CTRL = 0; regs->LDTX[0].MSTART = 0; regs->LDTX[0].LEN = 0; if (ctx_xfr_len == 0) { return 0; } qunits = qmspi_unit_size(ctx_xfr_len); ctrl = qmspi_encode_unit_size(qunits); qxfru = ctx_xfr_len / qunits; if (qxfru > 0x7fffu) { qxfru = 0x7fffu; } ctrl |= (qxfru << MCHP_QMSPI_C_XFR_NUNITS_POS); xfrlen = qxfru * qunits; #ifdef MCHP_XEC_QMSPI_DEBUG qdata->qunits = qunits; qdata->qxfru = qxfru; qdata->xfrlen = xfrlen; #endif if (spi_context_tx_buf_on(ctx)) { mstart = (uint32_t)ctx->tx_buf; ctrl |= MCHP_QMSPI_C_TX_DATA | MCHP_QMSPI_C_TX_LDMA_CH0; ldctrl = qmspi_ldma_encode_unit_size(mstart, xfrlen); ldctrl |= MCHP_QMSPI_LDC_INCR_EN | MCHP_QMSPI_LDC_EN; regs->MODE |= MCHP_QMSPI_M_LDMA_TX_EN; regs->LDTX[0].LEN = xfrlen; regs->LDTX[0].MSTART = mstart; regs->LDTX[0].CTRL = ldctrl; } if (spi_context_rx_buf_on(ctx)) { mstart = (uint32_t)ctx->rx_buf; ctrl |= MCHP_QMSPI_C_RX_LDMA_CH0 | MCHP_QMSPI_C_RX_EN; ldctrl = MCHP_QMSPI_LDC_EN | MCHP_QMSPI_LDC_INCR_EN; ldctrl |= qmspi_ldma_encode_unit_size(mstart, xfrlen); regs->MODE |= MCHP_QMSPI_M_LDMA_RX_EN; regs->LDRX[0].LEN = xfrlen; regs->LDRX[0].MSTART = mstart; regs->LDRX[0].CTRL = ldctrl; } regs->CTRL = (regs->CTRL & 0x3u) | ctrl; return xfrlen; } /* Start and wait for QMSPI synchronous transfer(s) to complete. * Initialize QMSPI controller for Local-DMA operation. * Iterate over SPI context with non-zero TX or RX data lengths. * 1. Configure QMSPI Control register and Local-DMA channel(s) * 2. Clear QMSPI status * 3. Start QMSPI transfer * 4. Poll QMSPI status for transfer done and DMA done with timeout. * 5. Hardware anomaly work-around: Poll with timeout QMSPI Local-DMA * TX and RX channels until hardware clears both channel enables. * This indicates hardware is really done with transfer to/from memory. * 6. Update SPI context with amount of data transmitted and received. * If SPI configuration hold chip select on flag is not set then instruct * QMSPI to de-assert chip select. * Set SPI context as complete */ static int qmspi_xfr_sync(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { const struct spi_qmspi_config *devcfg = dev->config; struct spi_qmspi_data *qdata = dev->data; struct spi_context *ctx = &qdata->ctx; struct qmspi_regs *regs = devcfg->regs; size_t xfr_len; int ret = qmspi_xfr_cm_init(dev, tx_bufs, rx_bufs); if (ret) { return ret; } while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)) { xfr_len = q_ldma_cfg(dev); regs->STS = 0xffffffffu; regs->EXE = MCHP_QMSPI_EXE_START; #ifdef MCHP_XEC_QMSPI_DEBUG uint32_t temp = regs->STS; while (!(temp & MCHP_QMSPI_STS_DONE)) { temp = regs->STS; } qdata->qstatus = temp; qdata->bufcnt_status = regs->BCNT_STS; qdata->rx_ldma_ctrl0 = regs->LDRX[0].CTRL; qdata->tx_ldma_ctrl0 = regs->LDTX[0].CTRL; #else uint32_t wcnt = 0; qdata->qstatus = regs->STS; while (!(qdata->qstatus & MCHP_QMSPI_STS_DONE)) { k_busy_wait(1u); if (++wcnt > XEC_QSPI_TIMEOUT_US) { regs->EXE = MCHP_QMSPI_EXE_STOP; return -ETIMEDOUT; } qdata->qstatus = regs->STS; } #endif spi_context_update_tx(ctx, 1, xfr_len); spi_context_update_rx(ctx, 1, xfr_len); } if (!(spi_cfg->operation & SPI_HOLD_ON_CS)) { regs->EXE = MCHP_QMSPI_EXE_STOP; } spi_context_complete(ctx, dev, 0); return 0; } #ifdef CONFIG_SPI_ASYNC /* Configure QMSPI such that QMSPI transfer FSM and LDMA FSM are synchronized. * Transfer length must be programmed into control/descriptor register(s) and * LDMA register(s). LDMA override length bit must NOT be set. */ static int qmspi_xfr_start_async(const struct device *dev, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { const struct spi_qmspi_config *devcfg = dev->config; struct spi_qmspi_data *qdata = dev->data; struct qmspi_regs *regs = devcfg->regs; int ret; ret = qmspi_xfr_cm_init(dev, tx_bufs, rx_bufs); if (ret) { return ret; } qdata->xfr_len = q_ldma_cfg(dev); if (!qdata->xfr_len) { return 0; /* nothing to do */ } regs->STS = 0xffffffffu; regs->EXE = MCHP_QMSPI_EXE_START; regs->IEN = MCHP_QMSPI_IEN_XFR_DONE | MCHP_QMSPI_IEN_PROG_ERR | MCHP_QMSPI_IEN_LDMA_RX_ERR | MCHP_QMSPI_IEN_LDMA_TX_ERR; return 0; } /* Wrapper to start asynchronous (interrupts enabled) SPI transaction */ static int qmspi_xfr_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { struct spi_qmspi_data *qdata = dev->data; int err = 0; qdata->qstatus = 0; qdata->xfr_len = 0; err = qmspi_xfr_start_async(dev, tx_bufs, rx_bufs); return err; } #endif /* CONFIG_SPI_ASYNC */ /* Start (a)synchronous transaction using QMSPI Local-DMA */ static int qmspi_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *user_data) { struct spi_qmspi_data *qdata = dev->data; struct spi_context *ctx = &qdata->ctx; int err = 0; if (!config) { return -EINVAL; } if (!tx_bufs && !rx_bufs) { return 0; } spi_context_lock(&qdata->ctx, asynchronous, cb, user_data, config); err = qmspi_configure(dev, config); if (err != 0) { spi_context_release(ctx, err); return err; } spi_context_cs_control(ctx, true); spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1); #ifdef CONFIG_SPI_ASYNC if (asynchronous) { qdata->cb = cb; qdata->userdata = user_data; err = qmspi_xfr_async(dev, config, tx_bufs, rx_bufs); } else { err = qmspi_xfr_sync(dev, config, tx_bufs, rx_bufs); } #else err = qmspi_xfr_sync(dev, config, tx_bufs, rx_bufs); #endif if (err) { /* de-assert CS# and give semaphore */ spi_context_unlock_unconditionally(ctx); return err; } if (asynchronous) { return err; } err = spi_context_wait_for_completion(ctx); if (!(config->operation & SPI_HOLD_ON_CS)) { spi_context_cs_control(ctx, false); } spi_context_release(ctx, err); return err; } static int qmspi_transceive_sync(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return qmspi_transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int qmspi_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return qmspi_transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int qmspi_release(const struct device *dev, const struct spi_config *config) { struct spi_qmspi_data *data = dev->data; const struct spi_qmspi_config *cfg = dev->config; struct qmspi_regs *regs = cfg->regs; int ret = 0; int counter = 0; if (regs->STS & MCHP_QMSPI_STS_ACTIVE_RO) { /* Force CS# to de-assert on next unit boundary */ regs->EXE = MCHP_QMSPI_EXE_STOP; while (regs->STS & MCHP_QMSPI_STS_ACTIVE_RO) { ret = xec_qmspi_spin_yield(&counter, XEC_QMSPI_WAIT_COUNT); if (ret != 0) { break; } } } spi_context_unlock_unconditionally(&data->ctx); return ret; } /* QMSPI interrupt handler called by Zephyr ISR * All transfers use QMSPI Local-DMA specified by the Control register. * QMSPI descriptor mode not used. * Full-duplex always uses LDMA TX channel 0 and RX channel 0 * Half-duplex(dual/quad) use one of TX channel 0 or RX channel 0 */ void qmspi_xec_isr(const struct device *dev) { const struct spi_qmspi_config *cfg = dev->config; struct spi_qmspi_data *data = dev->data; struct qmspi_regs *regs = cfg->regs; uint32_t qstatus = regs->STS; #ifdef CONFIG_SPI_ASYNC struct spi_context *ctx = &data->ctx; int xstatus = 0; #endif regs->IEN = 0; data->qstatus = qstatus; regs->STS = MCHP_QMSPI_STS_RW1C_MASK; mchp_xec_ecia_girq_src_clr(cfg->girq, cfg->girq_pos); #ifdef CONFIG_SPI_ASYNC if (qstatus & XEC_QSPI_HW_ERRORS_ALL) { xstatus = -EIO; data->qstatus |= BIT(7); regs->EXE = MCHP_QMSPI_EXE_STOP; spi_context_cs_control(ctx, false); spi_context_complete(ctx, dev, xstatus); if (data->cb) { data->cb(dev, xstatus, data->userdata); } return; } /* Clear Local-DMA enables in Mode and Control registers */ regs->MODE &= ~(MCHP_QMSPI_M_LDMA_RX_EN | MCHP_QMSPI_M_LDMA_TX_EN); regs->CTRL &= MCHP_QMSPI_C_IFM_MASK; spi_context_update_tx(ctx, 1, data->xfr_len); spi_context_update_rx(ctx, 1, data->xfr_len); data->xfr_len = q_ldma_cfg(dev); if (data->xfr_len) { regs->STS = 0xffffffffu; regs->EXE = MCHP_QMSPI_EXE_START; regs->IEN = MCHP_QMSPI_IEN_XFR_DONE | MCHP_QMSPI_IEN_PROG_ERR | MCHP_QMSPI_IEN_LDMA_RX_ERR | MCHP_QMSPI_IEN_LDMA_TX_ERR; return; } if (!(ctx->owner->operation & SPI_HOLD_ON_CS)) { regs->EXE = MCHP_QMSPI_EXE_STOP; spi_context_cs_control(&data->ctx, false); } spi_context_complete(&data->ctx, dev, xstatus); if (data->cb) { data->cb(dev, xstatus, data->userdata); } #endif /* CONFIG_SPI_ASYNC */ } #ifdef CONFIG_PM_DEVICE /* If the application wants the QMSPI pins to be disabled in suspend it must * define pinctr-1 values for each pin in the app/project DT overlay. */ static int qmspi_xec_pm_action(const struct device *dev, enum pm_device_action action) { const struct spi_qmspi_config *devcfg = dev->config; int ret; switch (action) { case PM_DEVICE_ACTION_RESUME: ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_DEFAULT); break; case PM_DEVICE_ACTION_SUSPEND: ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_SLEEP); if (ret == -ENOENT) { /* pinctrl-1 does not exist */ ret = 0; } break; default: ret = -ENOTSUP; } return ret; } #endif /* CONFIG_PM_DEVICE */ /* * Called for each QMSPI controller instance * Initialize QMSPI controller. * Disable sleep control. * Disable and clear interrupt status. * Initialize SPI context. * QMSPI will be fully configured and enabled when the transceive API * is called. */ static int qmspi_xec_init(const struct device *dev) { const struct spi_qmspi_config *cfg = dev->config; struct spi_qmspi_data *qdata = dev->data; struct qmspi_regs *regs = cfg->regs; clock_control_subsys_t clkss = (clock_control_subsys_t)MCHP_XEC_PCR_CLK_PERIPH_FAST; int ret = 0; qdata->base_freq_hz = 0u; qdata->qstatus = 0; qdata->np = cfg->width; #ifdef CONFIG_SPI_ASYNC qdata->xfr_len = 0; #endif if (!cfg->clk_dev) { LOG_ERR("XEC QMSPI-LDMA clock device not configured"); return -EINVAL; } ret = clock_control_on(cfg->clk_dev, (clock_control_subsys_t)&cfg->clksrc); if (ret < 0) { LOG_ERR("XEC QMSPI-LDMA enable clock source error %d", ret); return ret; } ret = clock_control_get_rate(cfg->clk_dev, clkss, &qdata->base_freq_hz); if (ret) { LOG_ERR("XEC QMSPI-LDMA clock get rate error %d", ret); return ret; } /* controller in known state before enabling pins */ qmspi_reset(regs); mchp_xec_ecia_girq_src_clr(cfg->girq, cfg->girq_pos); ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { LOG_ERR("XEC QMSPI-LDMA pinctrl setup failed (%d)", ret); return ret; } /* default SPI Mode 0 signalling */ const struct spi_config spi_cfg = { .frequency = cfg->clock_freq, .operation = SPI_LINES_SINGLE | SPI_WORD_SET(8), }; ret = qmspi_configure(dev, &spi_cfg); if (ret) { LOG_ERR("XEC QMSPI-LDMA init configure failed (%d)", ret); return ret; } #ifdef CONFIG_SPI_ASYNC cfg->irq_config_func(); mchp_xec_ecia_enable(cfg->girq, cfg->girq_pos); #endif spi_context_unlock_unconditionally(&qdata->ctx); return 0; } static const struct spi_driver_api spi_qmspi_xec_driver_api = { .transceive = qmspi_transceive_sync, #ifdef CONFIG_SPI_ASYNC .transceive_async = qmspi_transceive_async, #endif .release = qmspi_release, }; #define XEC_QMSPI_CS_TIMING_VAL(a, b, c, d) (((a) & 0xFu) \ | (((b) & 0xFu) << 8) \ | (((c) & 0xFu) << 16) \ | (((d) & 0xFu) << 24)) #define XEC_QMSPI_TAPS_ADJ_VAL(a, b) (((a) & 0xffu) | (((b) & 0xffu) << 8)) #define XEC_QMSPI_CS_TIMING(i) XEC_QMSPI_CS_TIMING_VAL( \ DT_INST_PROP_OR(i, dcsckon, 6), \ DT_INST_PROP_OR(i, dckcsoff, 4), \ DT_INST_PROP_OR(i, dldh, 6), \ DT_INST_PROP_OR(i, dcsda, 6)) #define XEC_QMSPI_TAPS_ADJ(i) XEC_QMSPI_TAPS_ADJ_VAL( \ DT_INST_PROP_OR(i, tctradj, 0), \ DT_INST_PROP_OR(i, tsckadj, 0)) #define XEC_QMSPI_GIRQ(i) \ MCHP_XEC_ECIA_GIRQ(DT_INST_PROP_BY_IDX(i, girqs, 0)) #define XEC_QMSPI_GIRQ_POS(i) \ MCHP_XEC_ECIA_GIRQ_POS(DT_INST_PROP_BY_IDX(i, girqs, 0)) #define XEC_QMSPI_NVIC_AGGR(i) \ MCHP_XEC_ECIA_NVIC_AGGR(DT_INST_PROP_BY_IDX(i, girqs, 0)) #define XEC_QMSPI_NVIC_DIRECT(i) \ MCHP_XEC_ECIA_NVIC_DIRECT(DT_INST_PROP_BY_IDX(i, girqs, 0)) #define XEC_QMSPI_PCR_INFO(i) \ MCHP_XEC_PCR_SCR_ENCODE(DT_INST_CLOCKS_CELL(i, regidx), \ DT_INST_CLOCKS_CELL(i, bitpos), \ DT_INST_CLOCKS_CELL(i, domain)) /* * The instance number, i is not related to block ID's rather the * order the DT tools process all DT files in a build. */ #define QMSPI_XEC_DEVICE(i) \ \ PINCTRL_DT_INST_DEFINE(i); \ \ static void qmspi_xec_irq_config_func_##i(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(i), \ DT_INST_IRQ(i, priority), \ qmspi_xec_isr, \ DEVICE_DT_INST_GET(i), 0); \ irq_enable(DT_INST_IRQN(i)); \ } \ \ static struct spi_qmspi_data qmspi_xec_data_##i = { \ SPI_CONTEXT_INIT_LOCK(qmspi_xec_data_##i, ctx), \ SPI_CONTEXT_INIT_SYNC(qmspi_xec_data_##i, ctx), \ }; \ static const struct spi_qmspi_config qmspi_xec_config_##i = { \ .regs = (struct qmspi_regs *) DT_INST_REG_ADDR(i), \ .clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(i)), \ .clksrc = { .pcr_info = XEC_QMSPI_PCR_INFO(i), }, \ .clock_freq = DT_INST_PROP_OR(i, clock_frequency, MHZ(12)), \ .cs1_freq = DT_INST_PROP_OR(i, cs1_freq, 0), \ .cs_timing = XEC_QMSPI_CS_TIMING(i), \ .taps_adj = XEC_QMSPI_TAPS_ADJ(i), \ .girq = XEC_QMSPI_GIRQ(i), \ .girq_pos = XEC_QMSPI_GIRQ_POS(i), \ .girq_nvic_aggr = XEC_QMSPI_NVIC_AGGR(i), \ .girq_nvic_direct = XEC_QMSPI_NVIC_DIRECT(i), \ .irq_pri = DT_INST_IRQ(i, priority), \ .chip_sel = DT_INST_PROP_OR(i, chip_select, 0), \ .width = DT_INST_PROP_OR(0, lines, 1), \ .irq_config_func = qmspi_xec_irq_config_func_##i, \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(i), \ }; \ PM_DEVICE_DT_INST_DEFINE(i, qmspi_xec_pm_action); \ DEVICE_DT_INST_DEFINE(i, qmspi_xec_init, \ PM_DEVICE_DT_INST_GET(i), \ &qmspi_xec_data_##i, &qmspi_xec_config_##i, \ POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &spi_qmspi_xec_driver_api); DT_INST_FOREACH_STATUS_OKAY(QMSPI_XEC_DEVICE) ```
/content/code_sandbox/drivers/spi/spi_xec_qmspi_ldma.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,770
```unknown # ESP32 SPI configuration menuconfig ESP32_SPIM bool "ESP32 SPI Master driver" default y depends on DT_HAS_ESPRESSIF_ESP32_SPI_ENABLED help Enables support for ESP32 SPI Master driver. if ESP32_SPIM config SPI_ESP32_INTERRUPT bool "ESP32 SPI interrupt mode" help Enables interrupt support for ESP32 SPI driver. endif # ESP32_SPIM ```
/content/code_sandbox/drivers/spi/Kconfig.esp32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
91
```c /* * */ #include <zephyr/drivers/spi.h> #include <zephyr/internal/syscall_handler.h> #include <string.h> /* This assumes that bufs and buf_copy are copies from the values passed * as syscall arguments. */ static struct spi_buf_set *copy_and_check(struct spi_buf_set *bufs, struct spi_buf *buf_copy, int writable) { size_t i; if (bufs->count == 0) { bufs->buffers = NULL; return NULL; } /* Validate the array of struct spi_buf instances */ K_OOPS(K_SYSCALL_MEMORY_ARRAY_READ(bufs->buffers, bufs->count, sizeof(struct spi_buf))); /* Not worried about overflow here: _SYSCALL_MEMORY_ARRAY_READ() * takes care of it. */ bufs->buffers = memcpy(buf_copy, bufs->buffers, bufs->count * sizeof(struct spi_buf)); for (i = 0; i < bufs->count; i++) { /* Now for each array element, validate the memory buffers * that they point to */ const struct spi_buf *buf = &bufs->buffers[i]; K_OOPS(K_SYSCALL_MEMORY(buf->buf, buf->len, writable)); } return bufs; } /* This function is only here so tx_buf_copy and rx_buf_copy can be allocated * using VLA. It assumes that both tx_bufs and rx_bufs will receive a copy of * the values passed to the syscall as arguments. It also assumes that the * count member has been verified and is a value that won't lead to stack * overflow. */ static uint32_t copy_bufs_and_transceive(const struct device *dev, const struct spi_config *config, struct spi_buf_set *tx_bufs, struct spi_buf_set *rx_bufs) { struct spi_buf tx_buf_copy[tx_bufs->count ? tx_bufs->count : 1]; struct spi_buf rx_buf_copy[rx_bufs->count ? rx_bufs->count : 1]; tx_bufs = copy_and_check(tx_bufs, tx_buf_copy, 0); rx_bufs = copy_and_check(rx_bufs, rx_buf_copy, 1); return z_impl_spi_transceive((const struct device *)dev, config, tx_bufs, rx_bufs); } static inline int z_vrfy_spi_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { struct spi_buf_set tx_bufs_copy; struct spi_buf_set rx_bufs_copy; struct spi_config config_copy; K_OOPS(K_SYSCALL_MEMORY_READ(config, sizeof(*config))); K_OOPS(K_SYSCALL_DRIVER_SPI(dev, transceive)); if (tx_bufs) { const struct spi_buf_set *tx = (const struct spi_buf_set *)tx_bufs; K_OOPS(K_SYSCALL_MEMORY_READ(tx_bufs, sizeof(struct spi_buf_set))); memcpy(&tx_bufs_copy, tx, sizeof(tx_bufs_copy)); K_OOPS(K_SYSCALL_VERIFY(tx_bufs_copy.count < 32)); } else { memset(&tx_bufs_copy, 0, sizeof(tx_bufs_copy)); } if (rx_bufs) { const struct spi_buf_set *rx = (const struct spi_buf_set *)rx_bufs; K_OOPS(K_SYSCALL_MEMORY_READ(rx_bufs, sizeof(struct spi_buf_set))); memcpy(&rx_bufs_copy, rx, sizeof(rx_bufs_copy)); K_OOPS(K_SYSCALL_VERIFY(rx_bufs_copy.count < 32)); } else { memset(&rx_bufs_copy, 0, sizeof(rx_bufs_copy)); } memcpy(&config_copy, config, sizeof(*config)); if (spi_cs_is_gpio(&config_copy)) { K_OOPS(K_SYSCALL_OBJ(config_copy.cs.gpio.port, K_OBJ_DRIVER_GPIO)); } return copy_bufs_and_transceive((const struct device *)dev, &config_copy, &tx_bufs_copy, &rx_bufs_copy); } #include <zephyr/syscalls/spi_transceive_mrsh.c> static inline int z_vrfy_spi_release(const struct device *dev, const struct spi_config *config) { K_OOPS(K_SYSCALL_MEMORY_READ(config, sizeof(*config))); K_OOPS(K_SYSCALL_DRIVER_SPI(dev, release)); return z_impl_spi_release((const struct device *)dev, config); } #include <zephyr/syscalls/spi_release_mrsh.c> ```
/content/code_sandbox/drivers/spi/spi_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
990
```c /* * */ #include <errno.h> #include <stdbool.h> #include <stdint.h> #include <stdlib.h> #include <zephyr/drivers/spi.h> #include <zephyr/shell/shell.h> #include <zephyr/sys/util.h> #define TXRX_ARGV_BYTES (1) #define CONF_ARGV_DEV (1) #define CONF_ARGV_FREQUENCY (2) #define CONF_ARGV_SETTINGS (3) /* Maximum bytes we can write and read at once */ #define MAX_SPI_BYTES MIN((CONFIG_SHELL_ARGC_MAX - TXRX_ARGV_BYTES), 32) static struct device *spi_device; static struct spi_config config = {.frequency = 1000000, .operation = SPI_OP_MODE_MASTER | SPI_WORD_SET(8)}; static void device_name_get(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, "spi"); entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = NULL; } SHELL_DYNAMIC_CMD_CREATE(dsub_device_name, device_name_get); static int cmd_spi_transceive(const struct shell *ctx, size_t argc, char **argv) { uint8_t rx_buffer[MAX_SPI_BYTES] = {0}; uint8_t tx_buffer[MAX_SPI_BYTES] = {0}; if (spi_device == NULL) { shell_error(ctx, "SPI device isn't configured. Use `spi conf`"); return -ENODEV; } int bytes_to_send = argc - TXRX_ARGV_BYTES; for (int i = 0; i < bytes_to_send; i++) { tx_buffer[i] = strtol(argv[TXRX_ARGV_BYTES + i], NULL, 16); } const struct spi_buf tx_buffers = {.buf = tx_buffer, .len = bytes_to_send}; const struct spi_buf rx_buffers = {.buf = rx_buffer, .len = bytes_to_send}; const struct spi_buf_set tx_buf_set = {.buffers = &tx_buffers, .count = 1}; const struct spi_buf_set rx_buf_set = {.buffers = &rx_buffers, .count = 1}; int ret = spi_transceive(spi_device, &config, &tx_buf_set, &rx_buf_set); if (ret < 0) { shell_error(ctx, "spi_transceive returned %d", ret); return ret; } shell_print(ctx, "TX:"); shell_hexdump(ctx, tx_buffer, bytes_to_send); shell_print(ctx, "RX:"); shell_hexdump(ctx, rx_buffer, bytes_to_send); return ret; } static int cmd_spi_conf(const struct shell *ctx, size_t argc, char **argv) { spi_operation_t operation = SPI_WORD_SET(8) | SPI_OP_MODE_MASTER; /* warning: initialization discards 'const' qualifier from pointer */ /* target type */ struct device *dev = (struct device *)device_get_binding(argv[CONF_ARGV_DEV]); if (dev == NULL) { shell_error(ctx, "device %s not found.", argv[CONF_ARGV_DEV]); return -ENODEV; } uint32_t frequency = strtol(argv[CONF_ARGV_FREQUENCY], NULL, 10); if (!IN_RANGE(frequency, 100 * 1000, 80 * 1000 * 1000)) { shell_error(ctx, "frequency must be between 100000 and 80000000"); return -EINVAL; } /* no settings */ if (argc == (CONF_ARGV_FREQUENCY + 1)) { goto out; } char *opts = argv[CONF_ARGV_SETTINGS]; bool all_opts_is_valid = true; while (*opts != '\0') { switch (*opts) { case 'o': operation |= SPI_MODE_CPOL; break; case 'h': operation |= SPI_MODE_CPHA; break; case 'l': operation |= SPI_TRANSFER_LSB; break; case 'T': operation |= SPI_FRAME_FORMAT_TI; break; default: all_opts_is_valid = false; shell_error(ctx, "invalid setting %c", *opts); } opts++; } if (!all_opts_is_valid) { return -EINVAL; } out: config.frequency = frequency; config.operation = operation; spi_device = dev; return 0; } SHELL_STATIC_SUBCMD_SET_CREATE(sub_spi_cmds, SHELL_CMD_ARG(conf, &dsub_device_name, "Configure SPI\n" "Usage: spi conf <device> <frequency> [<settings>]\n" "<settings> - any sequence of letters:\n" "o - SPI_MODE_CPOL\n" "h - SPI_MODE_CPHA\n" "l - SPI_TRANSFER_LSB\n" "T - SPI_FRAME_FORMAT_TI\n" "example: spi conf spi1 1000000 ol", cmd_spi_conf, 3, 1), SHELL_CMD_ARG(transceive, NULL, "Transceive data to and from an SPI device\n" "Usage: spi transceive <TX byte 1> [<TX byte 2> ...]", cmd_spi_transceive, 2, MAX_SPI_BYTES - 1), SHELL_SUBCMD_SET_END); SHELL_CMD_REGISTER(spi, &sub_spi_cmds, "SPI commands", NULL); ```
/content/code_sandbox/drivers/spi/spi_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,150
```unknown # Microchip Polarfire SOC QSPI config SPI_MCHP_QSPI bool "Microchip Polarfire SOC QSPI driver" default y depends on DT_HAS_MICROCHIP_MPFS_QSPI_ENABLED help Enable support for the Polarfire SOC QSPI driver. ```
/content/code_sandbox/drivers/spi/Kconfig.mchp_mss_qspi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
60
```unknown # Kconfig Andes ATCSPI200 SPI configuration options # # # config SPI_ANDES_ATCSPI200 bool "Andes ATCSPI200 SPI driver" default y depends on DT_HAS_ANDESTECH_ATCSPI200_ENABLED help Enable driver for Andes ATCSPI200 SPI controller if SPI_ANDES_ATCSPI200 config ANDES_SPI_DMA_MODE bool "Using DMA mode for spi" default y depends on DMA endif # SPI_ANDES_ATCSPI200 ```
/content/code_sandbox/drivers/spi/Kconfig.andes_atcspi200
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
112
```unknown # NUMAKER SPI Driver configuration options config SPI_NUMAKER bool "Nuvoton NuMaker MCU SPI driver" default y select HAS_NUMAKER_SPI depends on DT_HAS_NUVOTON_NUMAKER_SPI_ENABLED help This option enables the SPI driver for Nuvoton NuMaker family of processors. Say y if you wish to enable NuMaker SPI. ```
/content/code_sandbox/drivers/spi/Kconfig.numaker
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
81
```unknown # SiFive SPI Driver configuration options config SPI_SIFIVE bool "SiFive SPI controller driver" default y depends on DT_HAS_SIFIVE_SPI0_ENABLED help Enable the SPI peripherals on SiFive Freedom processors ```
/content/code_sandbox/drivers/spi/Kconfig.sifive
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
51
```c /* * */ #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_opentitan); #include "spi_context.h" #include <zephyr/device.h> #include <zephyr/drivers/spi.h> #include <soc.h> #include <stdbool.h> /* Register offsets within the SPI host register space. */ #define SPI_HOST_INTR_STATE_REG_OFFSET 0x00 #define SPI_HOST_INTR_ENABLE_REG_OFFSET 0x04 #define SPI_HOST_INTR_TEST_REG_OFFSET 0x08 #define SPI_HOST_ALERT_TEST_REG_OFFSET 0x0c #define SPI_HOST_CONTROL_REG_OFFSET 0x10 #define SPI_HOST_STATUS_REG_OFFSET 0x14 #define SPI_HOST_CONFIGOPTS_REG_OFFSET 0x18 #define SPI_HOST_CSID_REG_OFFSET 0x1c #define SPI_HOST_COMMAND_REG_OFFSET 0x20 #define SPI_HOST_RXDATA_REG_OFFSET 0x24 #define SPI_HOST_TXDATA_REG_OFFSET 0x28 #define SPI_HOST_ERROR_ENABLE_REG_OFFSET 0x2c #define SPI_HOST_ERROR_STATUS_REG_OFFSET 0x30 #define SPI_HOST_EVENT_ENABLE_REG_OFFSET 0x34 /* Control register fields. */ #define SPI_HOST_CONTROL_OUTPUT_EN_BIT BIT(29) #define SPI_HOST_CONTROL_SW_RST_BIT BIT(30) #define SPI_HOST_CONTROL_SPIEN_BIT BIT(31) /* Status register fields. */ #define SPI_HOST_STATUS_TXQD_MASK GENMASK(7, 0) #define SPI_HOST_STATUS_RXQD_MASK GENMASK(15, 8) #define SPI_HOST_STATUS_BYTEORDER_BIT BIT(22) #define SPI_HOST_STATUS_RXEMPTY_BIT BIT(24) #define SPI_HOST_STATUS_ACTIVE_BIT BIT(30) #define SPI_HOST_STATUS_READY_BIT BIT(31) /* Command register fields. */ #define SPI_HOST_COMMAND_LEN_MASK GENMASK(8, 0) /* "Chip select active after transaction" */ #define SPI_HOST_COMMAND_CSAAT_BIT BIT(9) #define SPI_HOST_COMMAND_SPEED_MASK GENMASK(11, 10) #define SPI_HOST_COMMAND_SPEED_STANDARD (0 << 10) #define SPI_HOST_COMMAND_SPEED_DUAL (1 << 10) #define SPI_HOST_COMMAND_SPEED_QUAD (2 << 10) #define SPI_HOST_COMMAND_DIRECTION_MASK GENMASK(13, 12) #define SPI_HOST_COMMAND_DIRECTION_RX (0x1 << 12) #define SPI_HOST_COMMAND_DIRECTION_TX (0x2 << 12) #define SPI_HOST_COMMAND_DIRECTION_BOTH (0x3 << 12) /* Configopts register fields. */ #define SPI_HOST_CONFIGOPTS_CPHA0_BIT BIT(30) #define SPI_HOST_CONFIGOPTS_CPOL0_BIT BIT(31) #define DT_DRV_COMPAT lowrisc_opentitan_spi struct spi_opentitan_data { struct spi_context ctx; }; struct spi_opentitan_cfg { uint32_t base; uint32_t f_input; }; static int spi_config(const struct device *dev, uint32_t frequency, uint16_t operation) { const struct spi_opentitan_cfg *cfg = dev->config; uint32_t reg; if (operation & SPI_HALF_DUPLEX) { return -ENOTSUP; } if (SPI_OP_MODE_GET(operation) != SPI_OP_MODE_MASTER) { return -ENOTSUP; } if (operation & SPI_MODE_LOOP) { return -ENOTSUP; } if (SPI_WORD_SIZE_GET(operation) != 8) { return -ENOTSUP; } if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) && (operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { return -ENOTSUP; } /* Most significant bit always transferred first. */ if (operation & SPI_TRANSFER_LSB) { return -ENOTSUP; } /* Set the SPI frequency, polarity, and clock phase in CONFIGOPTS register. * Applied divider (divides f_in / 2) is CLKDIV register (16 bit) + 1. */ reg = cfg->f_input / 2 / frequency; if (reg > 0xffffu) { reg = 0xffffu; } else if (reg > 0) { reg--; } /* Setup phase */ if (operation & SPI_MODE_CPHA) { reg |= SPI_HOST_CONFIGOPTS_CPHA0_BIT; } /* Setup polarity. */ if (operation & SPI_MODE_CPOL) { reg |= SPI_HOST_CONFIGOPTS_CPOL0_BIT; } sys_write32(reg, cfg->base + SPI_HOST_CONFIGOPTS_REG_OFFSET); return 0; } static bool spi_opentitan_rx_available(const struct spi_opentitan_cfg *cfg) { /* Rx bytes are available if Tx FIFO is non-empty. */ return !(sys_read32(cfg->base + SPI_HOST_STATUS_REG_OFFSET) & SPI_HOST_STATUS_RXEMPTY_BIT); } static void spi_opentitan_xfer(const struct device *dev, const bool gpio_cs_control) { const struct spi_opentitan_cfg *cfg = dev->config; struct spi_opentitan_data *data = dev->data; struct spi_context *ctx = &data->ctx; while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)) { const size_t segment_len = MAX(ctx->tx_len, ctx->rx_len); uint32_t host_command_reg; /* Setup transaction duplex. */ if (!spi_context_tx_on(ctx)) { host_command_reg = SPI_HOST_COMMAND_DIRECTION_RX; } else if (!spi_context_rx_on(ctx)) { host_command_reg = SPI_HOST_COMMAND_DIRECTION_TX; } else { host_command_reg = SPI_HOST_COMMAND_DIRECTION_BOTH; } size_t tx_bytes_to_queue = spi_context_tx_buf_on(ctx) ? ctx->tx_len : 0; /* First place Tx bytes in FIFO, packed four to a word. */ while (tx_bytes_to_queue > 0) { uint32_t fifo_word = 0; for (int byte = 0; byte < 4; ++byte) { if (tx_bytes_to_queue == 0) { break; } fifo_word |= *ctx->tx_buf << (8 * byte); spi_context_update_tx(ctx, 1, 1); tx_bytes_to_queue--; } sys_write32(fifo_word, cfg->base + SPI_HOST_TXDATA_REG_OFFSET); } /* Keep CS asserted if another Tx segment remains or if two more Rx * segments remain (because we will handle one Rx segment after the * forthcoming transaction). */ if (ctx->tx_count > 0 || ctx->rx_count > 1) { host_command_reg |= SPI_HOST_COMMAND_CSAAT_BIT; } /* Segment length field holds COMMAND.LEN + 1. */ host_command_reg |= segment_len - 1; /* Issue transaction. */ sys_write32(host_command_reg, cfg->base + SPI_HOST_COMMAND_REG_OFFSET); size_t rx_bytes_to_read = spi_context_rx_buf_on(ctx) ? ctx->rx_len : 0; /* Read from Rx FIFO as required. */ while (rx_bytes_to_read > 0) { while (!spi_opentitan_rx_available(cfg)) { ; } uint32_t rx_word = sys_read32(cfg->base + SPI_HOST_RXDATA_REG_OFFSET); for (int byte = 0; byte < 4; ++byte) { if (rx_bytes_to_read == 0) { break; } *ctx->rx_buf = (rx_word >> (8 * byte)) & 0xff; spi_context_update_rx(ctx, 1, 1); rx_bytes_to_read--; } } } /* Deassert the CS line if required. */ if (gpio_cs_control) { spi_context_cs_control(ctx, false); } spi_context_complete(ctx, dev, 0); } static int spi_opentitan_init(const struct device *dev) { const struct spi_opentitan_cfg *cfg = dev->config; struct spi_opentitan_data *data = dev->data; int err; /* Place SPI host peripheral in reset and wait for reset to complete. */ sys_write32(SPI_HOST_CONTROL_SW_RST_BIT, cfg->base + SPI_HOST_CONTROL_REG_OFFSET); while (sys_read32(cfg->base + SPI_HOST_STATUS_REG_OFFSET) & (SPI_HOST_STATUS_ACTIVE_BIT | SPI_HOST_STATUS_TXQD_MASK | SPI_HOST_STATUS_RXQD_MASK)) { ; } /* Clear reset and enable SPI host peripheral. */ sys_write32(SPI_HOST_CONTROL_OUTPUT_EN_BIT | SPI_HOST_CONTROL_SPIEN_BIT, cfg->base + SPI_HOST_CONTROL_REG_OFFSET); err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } /* Make sure the context is unlocked */ spi_context_unlock_unconditionally(&data->ctx); return 0; } static int spi_opentitan_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { int rc = 0; bool gpio_cs_control = false; struct spi_opentitan_data *data = dev->data; /* Lock the SPI Context */ spi_context_lock(&data->ctx, false, NULL, NULL, config); /* Configure the SPI bus */ data->ctx.config = config; rc = spi_config(dev, config->frequency, config->operation); if (rc < 0) { spi_context_release(&data->ctx, rc); return rc; } spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); /* Assert the CS line. HW will always assert the CS pin identified by CSID * (default CSID: 0), so GPIO CS control will work in addition to HW * asserted (and presumably ignored) CS. */ if (config->cs) { gpio_cs_control = true; spi_context_cs_control(&data->ctx, true); } /* Perform transfer */ spi_opentitan_xfer(dev, gpio_cs_control); rc = spi_context_wait_for_completion(&data->ctx); spi_context_release(&data->ctx, rc); return rc; } #ifdef CONFIG_SPI_ASYNC static int spi_opentitan_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return -ENOTSUP; } #endif static int spi_opentitan_release(const struct device *dev, const struct spi_config *config) { struct spi_opentitan_data *data = dev->data; spi_context_unlock_unconditionally(&data->ctx); return 0; } /* Device Instantiation */ static const struct spi_driver_api spi_opentitan_api = { .transceive = spi_opentitan_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_opentitan_transceive_async, #endif .release = spi_opentitan_release, }; #define SPI_INIT(n) \ static struct spi_opentitan_data spi_opentitan_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_opentitan_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_opentitan_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ }; \ static struct spi_opentitan_cfg spi_opentitan_cfg_##n = { \ .base = DT_INST_REG_ADDR(n), \ .f_input = DT_INST_PROP(n, clock_frequency), \ }; \ DEVICE_DT_INST_DEFINE(n, \ spi_opentitan_init, \ NULL, \ &spi_opentitan_data_##n, \ &spi_opentitan_cfg_##n, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_opentitan_api); DT_INST_FOREACH_STATUS_OKAY(SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_opentitan.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,584
```c /* * */ #include <zephyr/drivers/spi.h> const struct rtio_iodev_api spi_iodev_api = { .submit = spi_iodev_submit, }; ```
/content/code_sandbox/drivers/spi/spi_rtio.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
39
```unknown # RV32M1 SPI config SPI_RV32M1_LPSPI bool "RV32M1 LPSPI driver" default y depends on DT_HAS_OPENISA_RV32M1_LPSPI_ENABLED depends on CLOCK_CONTROL select PINCTRL help Enable the RV32M1 LPSPI driver. ```
/content/code_sandbox/drivers/spi/Kconfig.rv32m1_lpspi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
74
```c /* * */ #define DT_DRV_COMPAT nxp_lpc_spi #include <errno.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/clock_control.h> #include <fsl_spi.h> #include <zephyr/logging/log.h> #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA #include <zephyr/drivers/dma.h> #endif #include <zephyr/drivers/pinctrl.h> #include <zephyr/sys_clock.h> #include <zephyr/irq.h> #include <zephyr/drivers/reset.h> LOG_MODULE_REGISTER(spi_mcux_flexcomm, CONFIG_SPI_LOG_LEVEL); #include "spi_context.h" #define SPI_CHIP_SELECT_COUNT 4 #define SPI_MAX_DATA_WIDTH 16 struct spi_mcux_config { SPI_Type *base; const struct device *clock_dev; clock_control_subsys_t clock_subsys; void (*irq_config_func)(const struct device *dev); uint32_t pre_delay; uint32_t post_delay; uint32_t frame_delay; uint32_t transfer_delay; uint32_t def_char; const struct pinctrl_dev_config *pincfg; const struct reset_dt_spec reset; }; #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA #define SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG 0x01 #define SPI_MCUX_FLEXCOMM_DMA_RX_DONE_FLAG 0x02 #define SPI_MCUX_FLEXCOMM_DMA_TX_DONE_FLAG 0x04 #define SPI_MCUX_FLEXCOMM_DMA_DONE_FLAG \ (SPI_MCUX_FLEXCOMM_DMA_RX_DONE_FLAG | SPI_MCUX_FLEXCOMM_DMA_TX_DONE_FLAG) struct stream { const struct device *dma_dev; uint32_t channel; /* stores the channel for dma */ struct dma_config dma_cfg; struct dma_block_config dma_blk_cfg[2]; }; #endif struct spi_mcux_data { const struct device *dev; spi_master_handle_t handle; struct spi_context ctx; size_t transfer_len; #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA volatile uint32_t status_flags; struct stream dma_rx; struct stream dma_tx; /* dummy value used for transferring NOP when tx buf is null */ uint32_t dummy_tx_buffer; /* Used to send the last word */ uint32_t last_word; #endif }; static void spi_mcux_transfer_next_packet(const struct device *dev) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; SPI_Type *base = config->base; struct spi_context *ctx = &data->ctx; spi_transfer_t transfer; status_t status; if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) { /* nothing left to rx or tx, we're done! */ spi_context_cs_control(&data->ctx, false); spi_context_complete(&data->ctx, dev, 0); return; } transfer.configFlags = 0; if (ctx->tx_len == 0) { /* rx only, nothing to tx */ transfer.txData = NULL; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->rx_len; } else if (ctx->rx_len == 0) { /* tx only, nothing to rx */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = NULL; transfer.dataSize = ctx->tx_len; } else if (ctx->tx_len == ctx->rx_len) { /* rx and tx are the same length */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->tx_len; } else if (ctx->tx_len > ctx->rx_len) { /* Break up the tx into multiple transfers so we don't have to * rx into a longer intermediate buffer. Leave chip select * active between transfers. */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->rx_len; } else { /* Break up the rx into multiple transfers so we don't have to * tx from a longer intermediate buffer. Leave chip select * active between transfers. */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->tx_len; } if (ctx->tx_count <= 1 && ctx->rx_count <= 1) { transfer.configFlags = kSPI_FrameAssert; } data->transfer_len = transfer.dataSize; status = SPI_MasterTransferNonBlocking(base, &data->handle, &transfer); if (status != kStatus_Success) { LOG_ERR("Transfer could not start"); } } static void spi_mcux_isr(const struct device *dev) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; SPI_Type *base = config->base; SPI_MasterTransferHandleIRQ(base, &data->handle); } static void spi_mcux_transfer_callback(SPI_Type *base, spi_master_handle_t *handle, status_t status, void *userData) { struct spi_mcux_data *data = userData; spi_context_update_tx(&data->ctx, 1, data->transfer_len); spi_context_update_rx(&data->ctx, 1, data->transfer_len); spi_mcux_transfer_next_packet(data->dev); } static uint8_t spi_clock_cycles(uint32_t delay_ns, uint32_t sck_frequency_hz) { /* Convert delay_ns to an integer number of clock cycles of frequency * sck_frequency_hz. The maximum delay is 15 clock cycles. */ uint8_t delay_cycles = (uint64_t)delay_ns * sck_frequency_hz / NSEC_PER_SEC; delay_cycles = MIN(delay_cycles, 15); return delay_cycles; } static int spi_mcux_configure(const struct device *dev, const struct spi_config *spi_cfg) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; SPI_Type *base = config->base; uint32_t clock_freq; uint32_t word_size; if (spi_context_configured(&data->ctx, spi_cfg)) { /* This configuration is already in use */ return 0; } if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); if (word_size > SPI_MAX_DATA_WIDTH) { LOG_ERR("Word size %d is greater than %d", word_size, SPI_MAX_DATA_WIDTH); return -EINVAL; } /* * Do master or slave initialisation, depending on the * mode requested. */ if (SPI_OP_MODE_GET(spi_cfg->operation) == SPI_OP_MODE_MASTER) { spi_master_config_t master_config; SPI_MasterGetDefaultConfig(&master_config); if (!device_is_ready(config->clock_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } /* Get the clock frequency */ if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { return -EINVAL; } if (spi_cfg->slave > SPI_CHIP_SELECT_COUNT) { LOG_ERR("Slave %d is greater than %d", spi_cfg->slave, SPI_CHIP_SELECT_COUNT); return -EINVAL; } master_config.sselNum = spi_cfg->slave; master_config.sselPol = kSPI_SpolActiveAllLow; master_config.dataWidth = word_size - 1; master_config.polarity = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) ? kSPI_ClockPolarityActiveLow : kSPI_ClockPolarityActiveHigh; master_config.phase = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) ? kSPI_ClockPhaseSecondEdge : kSPI_ClockPhaseFirstEdge; master_config.direction = (spi_cfg->operation & SPI_TRANSFER_LSB) ? kSPI_LsbFirst : kSPI_MsbFirst; master_config.baudRate_Bps = spi_cfg->frequency; spi_delay_config_t *delayConfig = &master_config.delayConfig; delayConfig->preDelay = spi_clock_cycles(config->pre_delay, spi_cfg->frequency); delayConfig->postDelay = spi_clock_cycles(config->post_delay, spi_cfg->frequency); delayConfig->frameDelay = spi_clock_cycles(config->frame_delay, spi_cfg->frequency); delayConfig->transferDelay = spi_clock_cycles(config->transfer_delay, spi_cfg->frequency); SPI_MasterInit(base, &master_config, clock_freq); SPI_SetDummyData(base, (uint8_t)config->def_char); SPI_MasterTransferCreateHandle(base, &data->handle, spi_mcux_transfer_callback, data); data->ctx.config = spi_cfg; } else { spi_slave_config_t slave_config; SPI_SlaveGetDefaultConfig(&slave_config); slave_config.polarity = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) ? kSPI_ClockPolarityActiveLow : kSPI_ClockPolarityActiveHigh; slave_config.phase = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) ? kSPI_ClockPhaseSecondEdge : kSPI_ClockPhaseFirstEdge; slave_config.direction = (spi_cfg->operation & SPI_TRANSFER_LSB) ? kSPI_LsbFirst : kSPI_MsbFirst; /* SS pin active low */ slave_config.sselPol = kSPI_SpolActiveAllLow; slave_config.dataWidth = word_size - 1; SPI_SlaveInit(base, &slave_config); SPI_SetDummyData(base, (uint8_t)config->def_char); SPI_SlaveTransferCreateHandle(base, &data->handle, spi_mcux_transfer_callback, data); data->ctx.config = spi_cfg; } return 0; } #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA /* Dummy buffer used as a sink when rc buf is null */ uint32_t dummy_rx_buffer; /* This function is executed in the interrupt context */ static void spi_mcux_dma_callback(const struct device *dev, void *arg, uint32_t channel, int status) { /* arg directly holds the spi device */ const struct device *spi_dev = arg; struct spi_mcux_data *data = spi_dev->data; if (status < 0) { LOG_ERR("DMA callback error with channel %d.", channel); data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG; } else { /* identify the origin of this callback */ if (channel == data->dma_tx.channel) { /* this part of the transfer ends */ data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_TX_DONE_FLAG; } else if (channel == data->dma_rx.channel) { /* this part of the transfer ends */ data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_RX_DONE_FLAG; } else { LOG_ERR("DMA callback channel %d is not valid.", channel); data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG; } } spi_context_complete(&data->ctx, spi_dev, 0); } static void spi_mcux_prepare_txlastword(uint32_t *txLastWord, const uint8_t *buf, const struct spi_config *spi_cfg, size_t len) { uint32_t word_size; word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); if (word_size > 8) { *txLastWord = (((uint32_t)buf[len - 1U] << 8U) | (buf[len - 2U])); } else { *txLastWord = buf[len - 1U]; } *txLastWord |= (uint32_t)SPI_FIFOWR_EOT_MASK; *txLastWord |= ((uint32_t)SPI_DEASSERT_ALL & (~(uint32_t)SPI_DEASSERTNUM_SSEL((uint32_t)spi_cfg->slave))); /* set width of data - range asserted at entry */ *txLastWord |= SPI_FIFOWR_LEN(word_size - 1); } static void spi_mcux_prepare_txdummy(uint32_t *dummy, bool last_packet, const struct spi_config *spi_cfg) { uint32_t word_size; word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); if (last_packet) { *dummy |= (uint32_t)SPI_FIFOWR_EOT_MASK; } *dummy |= ((uint32_t)SPI_DEASSERT_ALL & (~(uint32_t)SPI_DEASSERTNUM_SSEL((uint32_t)spi_cfg->slave))); /* set width of data - range asserted at entry */ *dummy |= SPI_FIFOWR_LEN(word_size - 1); } static int spi_mcux_dma_tx_load(const struct device *dev, const uint8_t *buf, const struct spi_config *spi_cfg, size_t len, bool last_packet) { const struct spi_mcux_config *cfg = dev->config; struct spi_mcux_data *data = dev->data; struct dma_block_config *blk_cfg; int ret; SPI_Type *base = cfg->base; uint32_t word_size; word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); /* remember active TX DMA channel (used in callback) */ struct stream *stream = &data->dma_tx; blk_cfg = &stream->dma_blk_cfg[0]; /* prepare the block for this TX DMA channel */ memset(blk_cfg, 0, sizeof(struct dma_block_config)); /* tx direction has memory as source and periph as dest. */ if (buf == NULL) { data->dummy_tx_buffer = 0; data->last_word = 0; spi_mcux_prepare_txdummy(&data->dummy_tx_buffer, last_packet, spi_cfg); if (last_packet && ((word_size > 8) ? (len > 2U) : (len > 1U))) { spi_mcux_prepare_txdummy(&data->last_word, last_packet, spi_cfg); blk_cfg->source_address = (uint32_t)&data->dummy_tx_buffer; blk_cfg->dest_address = (uint32_t)&base->FIFOWR; blk_cfg->block_size = (word_size > 8) ? (len - 2U) : (len - 1U); blk_cfg->next_block = &stream->dma_blk_cfg[1]; blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; blk_cfg = &stream->dma_blk_cfg[1]; /* prepare the block for this TX DMA channel */ memset(blk_cfg, 0, sizeof(struct dma_block_config)); blk_cfg->source_address = (uint32_t)&data->last_word; blk_cfg->dest_address = (uint32_t)&base->FIFOWR; blk_cfg->block_size = sizeof(uint32_t); blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } else { blk_cfg->source_address = (uint32_t)&data->dummy_tx_buffer; blk_cfg->dest_address = (uint32_t)&base->FIFOWR; blk_cfg->block_size = len; blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } } else { if (last_packet) { spi_mcux_prepare_txlastword(&data->last_word, buf, spi_cfg, len); } /* If last packet and data transfer frame is bigger then 1, * use dma descriptor to send the last data. */ if (last_packet && ((word_size > 8) ? (len > 2U) : (len > 1U))) { blk_cfg->source_address = (uint32_t)buf; blk_cfg->dest_address = (uint32_t)&base->FIFOWR; blk_cfg->block_size = (word_size > 8) ? (len - 2U) : (len - 1U); blk_cfg->next_block = &stream->dma_blk_cfg[1]; blk_cfg = &stream->dma_blk_cfg[1]; /* prepare the block for this TX DMA channel */ memset(blk_cfg, 0, sizeof(struct dma_block_config)); blk_cfg->source_address = (uint32_t)&data->last_word; blk_cfg->dest_address = (uint32_t)&base->FIFOWR; blk_cfg->block_size = sizeof(uint32_t); blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } else { blk_cfg->source_address = (uint32_t)buf; blk_cfg->dest_address = (uint32_t)&base->FIFOWR; blk_cfg->block_size = len; } } /* Enables the DMA request from SPI txFIFO */ base->FIFOCFG |= SPI_FIFOCFG_DMATX_MASK; /* direction is given by the DT */ stream->dma_cfg.head_block = &stream->dma_blk_cfg[0]; /* give the client dev as arg, as the callback comes from the dma */ stream->dma_cfg.user_data = (struct device *)dev; /* pass our client origin to the dma: data->dma_tx.dma_channel */ ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.channel, &stream->dma_cfg); /* the channel is the actual stream from 0 */ if (ret != 0) { return ret; } uint32_t tmpData = 0U; spi_mcux_prepare_txdummy(&tmpData, last_packet, spi_cfg); /* Setup the control info. * Halfword writes to just the control bits (offset 0xE22) doesn't push * anything into the FIFO. And the data access type of control bits must * be uint16_t, byte writes or halfword writes to FIFOWR will push the * data and the current control bits into the FIFO. */ if ((last_packet) && ((word_size > 8) ? (len == 2U) : (len == 1U))) { *((uint16_t *)((uint32_t)&base->FIFOWR) + 1) = (uint16_t)(tmpData >> 16U); } else { /* Clear the SPI_FIFOWR_EOT_MASK bit when data is not the last */ tmpData &= (~(uint32_t)SPI_FIFOWR_EOT_MASK); *((uint16_t *)((uint32_t)&base->FIFOWR) + 1) = (uint16_t)(tmpData >> 16U); } /* gives the request ID */ return dma_start(data->dma_tx.dma_dev, data->dma_tx.channel); } static int spi_mcux_dma_rx_load(const struct device *dev, uint8_t *buf, size_t len) { const struct spi_mcux_config *cfg = dev->config; struct spi_mcux_data *data = dev->data; struct dma_block_config *blk_cfg; int ret; SPI_Type *base = cfg->base; /* retrieve active RX DMA channel (used in callback) */ struct stream *stream = &data->dma_rx; blk_cfg = &stream->dma_blk_cfg[0]; /* prepare the block for this RX DMA channel */ memset(blk_cfg, 0, sizeof(struct dma_block_config)); blk_cfg->block_size = len; /* rx direction has periph as source and mem as dest. */ if (buf == NULL) { /* if rx buff is null, then write data to dummy address. */ blk_cfg->dest_address = (uint32_t)&dummy_rx_buffer; blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } else { blk_cfg->dest_address = (uint32_t)buf; } blk_cfg->source_address = (uint32_t)&base->FIFORD; /* direction is given by the DT */ stream->dma_cfg.head_block = blk_cfg; stream->dma_cfg.user_data = (struct device *)dev; /* Enables the DMA request from SPI rxFIFO */ base->FIFOCFG |= SPI_FIFOCFG_DMARX_MASK; /* pass our client origin to the dma: data->dma_rx.channel */ ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.channel, &stream->dma_cfg); /* the channel is the actual stream from 0 */ if (ret != 0) { return ret; } /* gives the request ID */ return dma_start(data->dma_rx.dma_dev, data->dma_rx.channel); } static int spi_mcux_dma_move_buffers(const struct device *dev, size_t len, const struct spi_config *spi_cfg, bool last_packet) { struct spi_mcux_data *data = dev->data; int ret; ret = spi_mcux_dma_rx_load(dev, data->ctx.rx_buf, len); if (ret != 0) { return ret; } ret = spi_mcux_dma_tx_load(dev, data->ctx.tx_buf, spi_cfg, len, last_packet); return ret; } static int wait_dma_rx_tx_done(const struct device *dev) { struct spi_mcux_data *data = dev->data; int ret = -1; while (1) { ret = spi_context_wait_for_completion(&data->ctx); if (data->status_flags & SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG) { return -EIO; } if ((data->status_flags & SPI_MCUX_FLEXCOMM_DMA_DONE_FLAG) == SPI_MCUX_FLEXCOMM_DMA_DONE_FLAG) { return 0; } } } static int transceive_dma(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; SPI_Type *base = config->base; int ret; uint32_t word_size; uint16_t data_size; spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg); ret = spi_mcux_configure(dev, spi_cfg); if (ret) { goto out; } spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); spi_context_cs_control(&data->ctx, true); word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); data_size = (word_size > 8) ? (sizeof(uint16_t)) : (sizeof(uint8_t)); data->dma_rx.dma_cfg.source_data_size = data_size; data->dma_rx.dma_cfg.dest_data_size = data_size; data->dma_tx.dma_cfg.source_data_size = data_size; data->dma_tx.dma_cfg.dest_data_size = data_size; while (data->ctx.rx_len > 0 || data->ctx.tx_len > 0) { size_t dma_len; /* last is used to deassert chip select if this * is the last transfer in the set. */ bool last = false; if (data->ctx.rx_len == 0) { dma_len = data->ctx.tx_len; last = true; } else if (data->ctx.tx_len == 0) { dma_len = data->ctx.rx_len; last = true; } else if (data->ctx.tx_len == data->ctx.rx_len) { dma_len = data->ctx.rx_len; last = true; } else { dma_len = MIN(data->ctx.tx_len, data->ctx.rx_len); last = false; } /* at this point, last just means whether or not * this transfer will completely cover * the current tx/rx buffer in data->ctx * or require additional transfers because * the two buffers are not the same size. * * if it covers the current ctx tx/rx buffers, then * we'll move to the next pair of buffers (if any) * after the transfer, but if there are * no more buffer pairs, then this is the last * transfer in the set and we need to deassert CS. */ if (last) { /* this dma transfer should cover * the entire current data->ctx set * of buffers. if there are more * buffers in the set, then we don't * want to deassert CS. */ if ((data->ctx.tx_count > 1) || (data->ctx.rx_count > 1)) { /* more buffers to transfer so * this isn't last */ last = false; } } data->status_flags = 0; ret = spi_mcux_dma_move_buffers(dev, dma_len, spi_cfg, last); if (ret != 0) { break; } ret = wait_dma_rx_tx_done(dev); if (ret != 0) { break; } /* wait until TX FIFO is really empty */ while (0U == (base->FIFOSTAT & SPI_FIFOSTAT_TXEMPTY_MASK)) { } spi_context_update_tx(&data->ctx, 1, dma_len); spi_context_update_rx(&data->ctx, 1, dma_len); } base->FIFOCFG &= ~SPI_FIFOCFG_DMATX_MASK; base->FIFOCFG &= ~SPI_FIFOCFG_DMARX_MASK; spi_context_cs_control(&data->ctx, false); out: spi_context_release(&data->ctx, ret); return ret; } #endif static int transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { struct spi_mcux_data *data = dev->data; int ret; spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg); ret = spi_mcux_configure(dev, spi_cfg); if (ret) { goto out; } spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); spi_context_cs_control(&data->ctx, true); spi_mcux_transfer_next_packet(dev); ret = spi_context_wait_for_completion(&data->ctx); out: spi_context_release(&data->ctx, ret); return ret; } static int spi_mcux_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); #endif return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_mcux_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); #endif return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int spi_mcux_release(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_mcux_data *data = dev->data; spi_context_unlock_unconditionally(&data->ctx); return 0; } static int spi_mcux_init(const struct device *dev) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; int err = 0; if (!device_is_ready(config->reset.dev)) { LOG_ERR("Reset device not ready"); return -ENODEV; } err = reset_line_toggle(config->reset.dev, config->reset.id); if (err) { return err; } config->irq_config_func(dev); data->dev = dev; err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA if (!device_is_ready(data->dma_tx.dma_dev)) { LOG_ERR("%s device is not ready", data->dma_tx.dma_dev->name); return -ENODEV; } if (!device_is_ready(data->dma_rx.dma_dev)) { LOG_ERR("%s device is not ready", data->dma_rx.dma_dev->name); return -ENODEV; } #endif /* CONFIG_SPI_MCUX_FLEXCOMM_DMA */ err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } spi_context_unlock_unconditionally(&data->ctx); return 0; } static const struct spi_driver_api spi_mcux_driver_api = { .transceive = spi_mcux_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_mcux_transceive_async, #endif .release = spi_mcux_release, }; #define SPI_MCUX_FLEXCOMM_IRQ_HANDLER_DECL(id) \ static void spi_mcux_config_func_##id(const struct device *dev) #define SPI_MCUX_FLEXCOMM_IRQ_HANDLER_FUNC(id) \ .irq_config_func = spi_mcux_config_func_##id, #define SPI_MCUX_FLEXCOMM_IRQ_HANDLER(id) \ static void spi_mcux_config_func_##id(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(id), \ DT_INST_IRQ(id, priority), \ spi_mcux_isr, DEVICE_DT_INST_GET(id), \ 0); \ irq_enable(DT_INST_IRQN(id)); \ } #ifndef CONFIG_SPI_MCUX_FLEXCOMM_DMA #define SPI_DMA_CHANNELS(id) #else #define SPI_DMA_CHANNELS(id) \ .dma_tx = { \ .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)), \ .channel = \ DT_INST_DMAS_CELL_BY_NAME(id, tx, channel), \ .dma_cfg = { \ .channel_direction = MEMORY_TO_PERIPHERAL, \ .dma_callback = spi_mcux_dma_callback, \ .complete_callback_en = true, \ .block_count = 2, \ } \ }, \ .dma_rx = { \ .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)), \ .channel = \ DT_INST_DMAS_CELL_BY_NAME(id, rx, channel), \ .dma_cfg = { \ .channel_direction = PERIPHERAL_TO_MEMORY, \ .dma_callback = spi_mcux_dma_callback, \ .block_count = 1, \ } \ } #endif #define SPI_MCUX_FLEXCOMM_DEVICE(id) \ SPI_MCUX_FLEXCOMM_IRQ_HANDLER_DECL(id); \ PINCTRL_DT_INST_DEFINE(id); \ static const struct spi_mcux_config spi_mcux_config_##id = { \ .base = \ (SPI_Type *)DT_INST_REG_ADDR(id), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(id)), \ .clock_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL(id, name),\ SPI_MCUX_FLEXCOMM_IRQ_HANDLER_FUNC(id) \ .pre_delay = DT_INST_PROP_OR(id, pre_delay, 0), \ .post_delay = DT_INST_PROP_OR(id, post_delay, 0), \ .frame_delay = DT_INST_PROP_OR(id, frame_delay, 0), \ .transfer_delay = DT_INST_PROP_OR(id, transfer_delay, 0), \ .def_char = DT_INST_PROP_OR(id, def_char, 0), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \ .reset = RESET_DT_SPEC_INST_GET(id), \ }; \ static struct spi_mcux_data spi_mcux_data_##id = { \ SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##id, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##id, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(id), ctx) \ SPI_DMA_CHANNELS(id) \ }; \ DEVICE_DT_INST_DEFINE(id, \ spi_mcux_init, \ NULL, \ &spi_mcux_data_##id, \ &spi_mcux_config_##id, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_mcux_driver_api); \ \ SPI_MCUX_FLEXCOMM_IRQ_HANDLER(id) DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_FLEXCOMM_DEVICE) ```
/content/code_sandbox/drivers/spi/spi_mcux_flexcomm.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,574
```unknown # # # config SPI_SEDI bool "Intel SEDI SPI driver" default y depends on DT_HAS_INTEL_SEDI_SPI_ENABLED help This option enables the Intel SEDI SPI driver. This driver is simply a shim driver built upon the SEDI bare metal SPI driver in the hal-intel module ```
/content/code_sandbox/drivers/spi/Kconfig.sedi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
71
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_SPI_NRFX_COMMON_H_ #define ZEPHYR_DRIVERS_SPI_NRFX_COMMON_H_ #include <stdint.h> #include <nrfx_gpiote.h> #define WAKE_PIN_NOT_USED UINT32_MAX #define WAKE_GPIOTE_INSTANCE(node_id) \ COND_CODE_1(DT_NODE_HAS_PROP(node_id, wake_gpios), \ (NRFX_GPIOTE_INSTANCE( \ NRF_DT_GPIOTE_INST(node_id, wake_gpios))), \ ({0})) int spi_nrfx_wake_init(const nrfx_gpiote_t *gpiote, uint32_t wake_pin); int spi_nrfx_wake_request(const nrfx_gpiote_t *gpiote, uint32_t wake_pin); #endif /* ZEPHYR_DRIVERS_SPI_NRFX_COMMON_H_ */ ```
/content/code_sandbox/drivers/spi/spi_nrfx_common.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
192
```unknown config SPI_PL022 default y depends on DT_HAS_ARM_PL022_ENABLED bool "ARM PL022 SPI driver" if SPI_PL022 config SPI_PL022_INTERRUPT bool "PL022 interrupt mode" help Enables interrupt support for PL022 SPI driver. config SPI_PL022_DMA bool "PL022 DMA mode" select DMA help Enables DMA support for PL022 SPI driver. endif ```
/content/code_sandbox/drivers/spi/Kconfig.pl022
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
88
```unknown # Microchip XEC QMSPI config SPI_XEC_QMSPI bool "Microchip MEC15xx XEC QMSPI driver" default y depends on DT_HAS_MICROCHIP_XEC_QMSPI_ENABLED select DMA if SPI_ASYNC help Enable support for Microchip MEC15xx XEC QMSPI driver. config SPI_XEC_QMSPI_LDMA bool "Microchip XEC MEC17xx QMSPI LDMA driver" default y depends on DT_HAS_MICROCHIP_XEC_QMSPI_LDMA_ENABLED help Enable support for Microchip MEC17xx QMSPI with local DMA driver. ```
/content/code_sandbox/drivers/spi/Kconfig.xec_qmspi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
143
```c /* * * * Some Ambiq Apollox Blue SOC (e.g. Apollo3 Blue) uses internal designed BLEIF module which is * different from the general IOM module for SPI transceiver. The called HAL API will also be * independent. This driver is implemented for the BLEIF module usage scenarios. */ #define DT_DRV_COMPAT ambiq_spi_bleif #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_ambiq_bleif); #include <zephyr/drivers/spi.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include <stdlib.h> #include <errno.h> #include "spi_context.h" #include <am_mcu_apollo.h> #define PWRCTRL_MAX_WAIT_US 5 typedef int (*ambiq_spi_pwr_func_t)(void); struct spi_ambiq_config { uint32_t base; int size; const struct pinctrl_dev_config *pcfg; ambiq_spi_pwr_func_t pwr_func; }; struct spi_ambiq_data { struct spi_context ctx; am_hal_ble_config_t ble_cfg; void *BLEhandle; }; #define SPI_BASE (((const struct spi_ambiq_config *)(dev)->config)->base) #define REG_STAT 0x268 #define SPI_STAT(dev) (SPI_BASE + REG_STAT) #define SPI_WORD_SIZE 8 static int spi_config(const struct device *dev, const struct spi_config *config) { struct spi_ambiq_data *data = dev->data; struct spi_context *ctx = &(data->ctx); int ret = 0; if (spi_context_configured(ctx, config)) { /* Already configured. No need to do it again. */ return 0; } if (SPI_WORD_SIZE_GET(config->operation) != SPI_WORD_SIZE) { LOG_ERR("Word size must be %d", SPI_WORD_SIZE); return -ENOTSUP; } if ((config->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Only supports single mode"); return -ENOTSUP; } if (config->operation & SPI_LOCK_ON) { LOG_ERR("Lock On not supported"); return -ENOTSUP; } if (config->operation & SPI_TRANSFER_LSB) { LOG_ERR("LSB first not supported"); return -ENOTSUP; } if (config->operation & SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode not supported"); return -ENOTSUP; } if (config->operation & SPI_MODE_LOOP) { LOG_ERR("Loopback mode not supported"); return -ENOTSUP; } /* We consider only the default configuration defined in HAL is tested and stable. */ data->ble_cfg = am_hal_ble_default_config; ctx->config = config; ret = am_hal_ble_config(data->BLEhandle, &data->ble_cfg); return ret; } static int spi_ambiq_xfer(const struct device *dev, const struct spi_config *config) { struct spi_ambiq_data *data = dev->data; struct spi_context *ctx = &data->ctx; int ret = 0; am_hal_ble_transfer_t trans = {0}; if (ctx->tx_len) { trans.ui8Command = AM_HAL_BLE_WRITE; trans.pui32Data = (uint32_t *)ctx->tx_buf; trans.ui16Length = ctx->tx_len; trans.bContinue = false; } else { trans.ui8Command = AM_HAL_BLE_READ; trans.pui32Data = (uint32_t *)ctx->rx_buf; trans.ui16Length = ctx->rx_len; trans.bContinue = false; } ret = am_hal_ble_blocking_transfer(data->BLEhandle, &trans); spi_context_complete(ctx, dev, 0); return ret; } static int spi_ambiq_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { struct spi_ambiq_data *data = dev->data; int ret; ret = spi_config(dev, config); if (ret) { return ret; } if (!tx_bufs && !rx_bufs) { return 0; } spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); ret = spi_ambiq_xfer(dev, config); return ret; } static int spi_ambiq_release(const struct device *dev, const struct spi_config *config) { struct spi_ambiq_data *data = dev->data; if (!sys_read32(SPI_STAT(dev))) { return -EBUSY; } spi_context_unlock_unconditionally(&data->ctx); return 0; } static struct spi_driver_api spi_ambiq_driver_api = { .transceive = spi_ambiq_transceive, .release = spi_ambiq_release, }; static int spi_ambiq_init(const struct device *dev) { struct spi_ambiq_data *data = dev->data; const struct spi_ambiq_config *cfg = dev->config; int ret; #if defined(CONFIG_SPI_AMBIQ_BLEIF_TIMING_TRACE) ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret) { return ret; } #endif /* CONFIG_SPI_AMBIQ_BLEIF_TIMING_TRACE */ ret = am_hal_ble_initialize((cfg->base - BLEIF_BASE) / cfg->size, &data->BLEhandle); if (ret) { return ret; } ret = am_hal_ble_power_control(data->BLEhandle, AM_HAL_BLE_POWER_ACTIVE); if (ret) { return ret; } ret = cfg->pwr_func(); return ret; } #define AMBIQ_SPI_BLEIF_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static int pwr_on_ambiq_spi_##n(void) \ { \ uint32_t addr = DT_REG_ADDR(DT_INST_PHANDLE(n, ambiq_pwrcfg)) + \ DT_INST_PHA(n, ambiq_pwrcfg, offset); \ sys_write32((sys_read32(addr) | DT_INST_PHA(n, ambiq_pwrcfg, mask)), addr); \ k_busy_wait(PWRCTRL_MAX_WAIT_US); \ return 0; \ } \ static struct spi_ambiq_data spi_ambiq_data##n = { \ SPI_CONTEXT_INIT_LOCK(spi_ambiq_data##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_ambiq_data##n, ctx)}; \ static const struct spi_ambiq_config spi_ambiq_config##n = { \ .base = DT_INST_REG_ADDR(n), \ .size = DT_INST_REG_SIZE(n), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .pwr_func = pwr_on_ambiq_spi_##n}; \ DEVICE_DT_INST_DEFINE(n, spi_ambiq_init, NULL, &spi_ambiq_data##n, &spi_ambiq_config##n, \ POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, &spi_ambiq_driver_api); DT_INST_FOREACH_STATUS_OKAY(AMBIQ_SPI_BLEIF_INIT) ```
/content/code_sandbox/drivers/spi/spi_ambiq_bleif.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,583
```c /* * */ #define DT_DRV_COMPAT st_stm32_spi #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_ll_stm32); #include <zephyr/sys/util.h> #include <zephyr/kernel.h> #include <soc.h> #include <stm32_ll_spi.h> #include <errno.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/toolchain.h> #include <zephyr/pm/policy.h> #include <zephyr/pm/device.h> #include <zephyr/pm/device_runtime.h> #ifdef CONFIG_SPI_STM32_DMA #include <zephyr/drivers/dma/dma_stm32.h> #include <zephyr/drivers/dma.h> #endif #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/irq.h> #include <zephyr/mem_mgmt/mem_attr.h> #ifdef CONFIG_DCACHE #include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h> #endif /* CONFIG_DCACHE */ #ifdef CONFIG_NOCACHE_MEMORY #include <zephyr/linker/linker-defs.h> #elif defined(CONFIG_CACHE_MANAGEMENT) #include <zephyr/arch/cache.h> #endif /* CONFIG_NOCACHE_MEMORY */ #include "spi_ll_stm32.h" #if defined(CONFIG_DCACHE) && \ !defined(CONFIG_NOCACHE_MEMORY) /* currently, manual cache coherency management is only done on dummy_rx_tx_buffer */ #define SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED 1 #else #define SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED 0 #endif /* defined(CONFIG_DCACHE) && !defined(CONFIG_NOCACHE_MEMORY) */ #define WAIT_1US 1U /* * Check for SPI_SR_FRE to determine support for TI mode frame format * error flag, because STM32F1 SoCs do not support it and STM32CUBE * for F1 family defines an unused LL_SPI_SR_FRE. */ #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) #define SPI_STM32_ERR_MSK (LL_SPI_SR_UDR | LL_SPI_SR_CRCE | LL_SPI_SR_MODF | \ LL_SPI_SR_OVR | LL_SPI_SR_TIFRE) #else #if defined(LL_SPI_SR_UDR) #define SPI_STM32_ERR_MSK (LL_SPI_SR_UDR | LL_SPI_SR_CRCERR | LL_SPI_SR_MODF | \ LL_SPI_SR_OVR | LL_SPI_SR_FRE) #elif defined(SPI_SR_FRE) #define SPI_STM32_ERR_MSK (LL_SPI_SR_CRCERR | LL_SPI_SR_MODF | \ LL_SPI_SR_OVR | LL_SPI_SR_FRE) #else #define SPI_STM32_ERR_MSK (LL_SPI_SR_CRCERR | LL_SPI_SR_MODF | LL_SPI_SR_OVR) #endif #endif /* CONFIG_SOC_SERIES_STM32MP1X */ static void spi_stm32_pm_policy_state_lock_get(const struct device *dev) { if (IS_ENABLED(CONFIG_PM)) { struct spi_stm32_data *data = dev->data; if (!data->pm_policy_state_on) { data->pm_policy_state_on = true; pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); if (IS_ENABLED(CONFIG_PM_S2RAM)) { pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES); } pm_device_runtime_get(dev); } } } static void spi_stm32_pm_policy_state_lock_put(const struct device *dev) { if (IS_ENABLED(CONFIG_PM)) { struct spi_stm32_data *data = dev->data; if (data->pm_policy_state_on) { data->pm_policy_state_on = false; pm_device_runtime_put(dev); pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); if (IS_ENABLED(CONFIG_PM_S2RAM)) { pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES); } } } } #ifdef CONFIG_SPI_STM32_DMA static uint32_t bits2bytes(uint32_t bits) { return bits / 8; } /* dummy buffer is used for transferring NOP when tx buf is null * and used as a dummy sink for when rx buf is null. */ /* * If Nocache Memory is supported, buffer will be placed in nocache region by * the linker to avoid potential DMA cache-coherency problems. * If Nocache Memory is not supported, cache coherency might need to be kept * manually. See SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED. */ static __aligned(32) uint32_t dummy_rx_tx_buffer __nocache; /* This function is executed in the interrupt context */ static void dma_callback(const struct device *dma_dev, void *arg, uint32_t channel, int status) { ARG_UNUSED(dma_dev); /* arg holds SPI DMA data * Passed in spi_stm32_dma_tx/rx_load() */ struct spi_stm32_data *spi_dma_data = arg; if (status < 0) { LOG_ERR("DMA callback error with channel %d.", channel); spi_dma_data->status_flags |= SPI_STM32_DMA_ERROR_FLAG; } else { /* identify the origin of this callback */ if (channel == spi_dma_data->dma_tx.channel) { /* this part of the transfer ends */ spi_dma_data->status_flags |= SPI_STM32_DMA_TX_DONE_FLAG; } else if (channel == spi_dma_data->dma_rx.channel) { /* this part of the transfer ends */ spi_dma_data->status_flags |= SPI_STM32_DMA_RX_DONE_FLAG; } else { LOG_ERR("DMA callback channel %d is not valid.", channel); spi_dma_data->status_flags |= SPI_STM32_DMA_ERROR_FLAG; } } k_sem_give(&spi_dma_data->status_sem); } static int spi_stm32_dma_tx_load(const struct device *dev, const uint8_t *buf, size_t len) { const struct spi_stm32_config *cfg = dev->config; struct spi_stm32_data *data = dev->data; struct dma_block_config *blk_cfg; int ret; /* remember active TX DMA channel (used in callback) */ struct stream *stream = &data->dma_tx; blk_cfg = &stream->dma_blk_cfg; /* prepare the block for this TX DMA channel */ memset(blk_cfg, 0, sizeof(struct dma_block_config)); blk_cfg->block_size = len; /* tx direction has memory as source and periph as dest. */ if (buf == NULL) { /* if tx buff is null, then sends NOP on the line. */ dummy_rx_tx_buffer = 0; #if SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED arch_dcache_flush_range((void *)&dummy_rx_tx_buffer, sizeof(uint32_t)); #endif /* SPI_STM32_MANUAL_CACHE_COHERENCY_REQUIRED */ blk_cfg->source_address = (uint32_t)&dummy_rx_tx_buffer; blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } else { blk_cfg->source_address = (uint32_t)buf; if (data->dma_tx.src_addr_increment) { blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } } blk_cfg->dest_address = ll_func_dma_get_reg_addr(cfg->spi, SPI_STM32_DMA_TX); /* fifo mode NOT USED there */ if (data->dma_tx.dst_addr_increment) { blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } /* give the fifo mode from the DT */ blk_cfg->fifo_mode_control = data->dma_tx.fifo_threshold; /* direction is given by the DT */ stream->dma_cfg.head_block = blk_cfg; /* give the dma channel data as arg, as the callback comes from the dma */ stream->dma_cfg.user_data = data; /* pass our client origin to the dma: data->dma_tx.dma_channel */ ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.channel, &stream->dma_cfg); /* the channel is the actual stream from 0 */ if (ret != 0) { return ret; } /* gives the request ID to the dma mux */ return dma_start(data->dma_tx.dma_dev, data->dma_tx.channel); } static int spi_stm32_dma_rx_load(const struct device *dev, uint8_t *buf, size_t len) { const struct spi_stm32_config *cfg = dev->config; struct spi_stm32_data *data = dev->data; struct dma_block_config *blk_cfg; int ret; /* retrieve active RX DMA channel (used in callback) */ struct stream *stream = &data->dma_rx; blk_cfg = &stream->dma_blk_cfg; /* prepare the block for this RX DMA channel */ memset(blk_cfg, 0, sizeof(struct dma_block_config)); blk_cfg->block_size = len; /* rx direction has periph as source and mem as dest. */ if (buf == NULL) { /* if rx buff is null, then write data to dummy address. */ blk_cfg->dest_address = (uint32_t)&dummy_rx_tx_buffer; blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } else { blk_cfg->dest_address = (uint32_t)buf; if (data->dma_rx.dst_addr_increment) { blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } } blk_cfg->source_address = ll_func_dma_get_reg_addr(cfg->spi, SPI_STM32_DMA_RX); if (data->dma_rx.src_addr_increment) { blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } /* give the fifo mode from the DT */ blk_cfg->fifo_mode_control = data->dma_rx.fifo_threshold; /* direction is given by the DT */ stream->dma_cfg.head_block = blk_cfg; stream->dma_cfg.user_data = data; /* pass our client origin to the dma: data->dma_rx.channel */ ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.channel, &stream->dma_cfg); /* the channel is the actual stream from 0 */ if (ret != 0) { return ret; } /* gives the request ID to the dma mux */ return dma_start(data->dma_rx.dma_dev, data->dma_rx.channel); } static int spi_dma_move_buffers(const struct device *dev, size_t len) { struct spi_stm32_data *data = dev->data; int ret; size_t dma_segment_len; dma_segment_len = len * data->dma_rx.dma_cfg.dest_data_size; ret = spi_stm32_dma_rx_load(dev, data->ctx.rx_buf, dma_segment_len); if (ret != 0) { return ret; } dma_segment_len = len * data->dma_tx.dma_cfg.source_data_size; ret = spi_stm32_dma_tx_load(dev, data->ctx.tx_buf, dma_segment_len); return ret; } #endif /* CONFIG_SPI_STM32_DMA */ /* Value to shift out when no application data needs transmitting. */ #define SPI_STM32_TX_NOP 0x00 static void spi_stm32_send_next_frame(SPI_TypeDef *spi, struct spi_stm32_data *data) { const uint8_t frame_size = SPI_WORD_SIZE_GET(data->ctx.config->operation); uint32_t tx_frame = SPI_STM32_TX_NOP; if (frame_size == 8) { if (spi_context_tx_buf_on(&data->ctx)) { tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf)); } LL_SPI_TransmitData8(spi, tx_frame); spi_context_update_tx(&data->ctx, 1, 1); } else { if (spi_context_tx_buf_on(&data->ctx)) { tx_frame = UNALIGNED_GET((uint16_t *)(data->ctx.tx_buf)); } LL_SPI_TransmitData16(spi, tx_frame); spi_context_update_tx(&data->ctx, 2, 1); } } static void spi_stm32_read_next_frame(SPI_TypeDef *spi, struct spi_stm32_data *data) { const uint8_t frame_size = SPI_WORD_SIZE_GET(data->ctx.config->operation); uint32_t rx_frame = 0; if (frame_size == 8) { rx_frame = LL_SPI_ReceiveData8(spi); if (spi_context_rx_buf_on(&data->ctx)) { UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf); } spi_context_update_rx(&data->ctx, 1, 1); } else { rx_frame = LL_SPI_ReceiveData16(spi); if (spi_context_rx_buf_on(&data->ctx)) { UNALIGNED_PUT(rx_frame, (uint16_t *)data->ctx.rx_buf); } spi_context_update_rx(&data->ctx, 2, 1); } } static bool spi_stm32_transfer_ongoing(struct spi_stm32_data *data) { return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx); } static int spi_stm32_get_err(SPI_TypeDef *spi) { uint32_t sr = LL_SPI_ReadReg(spi, SR); if (sr & SPI_STM32_ERR_MSK) { LOG_ERR("%s: err=%d", __func__, sr & (uint32_t)SPI_STM32_ERR_MSK); /* OVR error must be explicitly cleared */ if (LL_SPI_IsActiveFlag_OVR(spi)) { LL_SPI_ClearFlag_OVR(spi); } return -EIO; } return 0; } static void spi_stm32_shift_fifo(SPI_TypeDef *spi, struct spi_stm32_data *data) { if (ll_func_rx_is_not_empty(spi)) { spi_stm32_read_next_frame(spi, data); } if (ll_func_tx_is_not_full(spi)) { spi_stm32_send_next_frame(spi, data); } } /* Shift a SPI frame as master. */ static void spi_stm32_shift_m(const struct spi_stm32_config *cfg, struct spi_stm32_data *data) { if (cfg->fifo_enabled) { spi_stm32_shift_fifo(cfg->spi, data); } else { while (!ll_func_tx_is_not_full(cfg->spi)) { /* NOP */ } spi_stm32_send_next_frame(cfg->spi, data); while (!ll_func_rx_is_not_empty(cfg->spi)) { /* NOP */ } spi_stm32_read_next_frame(cfg->spi, data); } } /* Shift a SPI frame as slave. */ static void spi_stm32_shift_s(SPI_TypeDef *spi, struct spi_stm32_data *data) { if (ll_func_tx_is_not_full(spi) && spi_context_tx_on(&data->ctx)) { uint16_t tx_frame; if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) { tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf)); LL_SPI_TransmitData8(spi, tx_frame); spi_context_update_tx(&data->ctx, 1, 1); } else { tx_frame = UNALIGNED_GET((uint16_t *)(data->ctx.tx_buf)); LL_SPI_TransmitData16(spi, tx_frame); spi_context_update_tx(&data->ctx, 2, 1); } } else { ll_func_disable_int_tx_empty(spi); } if (ll_func_rx_is_not_empty(spi) && spi_context_rx_buf_on(&data->ctx)) { uint16_t rx_frame; if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) { rx_frame = LL_SPI_ReceiveData8(spi); UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf); spi_context_update_rx(&data->ctx, 1, 1); } else { rx_frame = LL_SPI_ReceiveData16(spi); UNALIGNED_PUT(rx_frame, (uint16_t *)data->ctx.rx_buf); spi_context_update_rx(&data->ctx, 2, 1); } } } /* * Without a FIFO, we can only shift out one frame's worth of SPI * data, and read the response back. * * TODO: support 16-bit data frames. */ static int spi_stm32_shift_frames(const struct spi_stm32_config *cfg, struct spi_stm32_data *data) { uint16_t operation = data->ctx.config->operation; if (SPI_OP_MODE_GET(operation) == SPI_OP_MODE_MASTER) { spi_stm32_shift_m(cfg, data); } else { spi_stm32_shift_s(cfg->spi, data); } return spi_stm32_get_err(cfg->spi); } static void spi_stm32_cs_control(const struct device *dev, bool on) { struct spi_stm32_data *data = dev->data; spi_context_cs_control(&data->ctx, on); #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_subghz) const struct spi_stm32_config *cfg = dev->config; if (cfg->use_subghzspi_nss) { if (on) { LL_PWR_SelectSUBGHZSPI_NSS(); } else { LL_PWR_UnselectSUBGHZSPI_NSS(); } } #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_subghz) */ } static void spi_stm32_complete(const struct device *dev, int status) { const struct spi_stm32_config *cfg = dev->config; SPI_TypeDef *spi = cfg->spi; struct spi_stm32_data *data = dev->data; #ifdef CONFIG_SPI_STM32_INTERRUPT ll_func_disable_int_tx_empty(spi); ll_func_disable_int_rx_not_empty(spi); ll_func_disable_int_errors(spi); #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) if (cfg->fifo_enabled) { LL_SPI_DisableIT_EOT(spi); } #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */ #endif /* CONFIG_SPI_STM32_INTERRUPT */ #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_fifo) /* Flush RX buffer */ while (ll_func_rx_is_not_empty(spi)) { (void) LL_SPI_ReceiveData8(spi); } #endif /* compat st_stm32_spi_fifo*/ if (LL_SPI_GetMode(spi) == LL_SPI_MODE_MASTER) { while (ll_func_spi_is_busy(spi)) { /* NOP */ } spi_stm32_cs_control(dev, false); } /* BSY flag is cleared when MODF flag is raised */ if (LL_SPI_IsActiveFlag_MODF(spi)) { LL_SPI_ClearFlag_MODF(spi); } #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) if (cfg->fifo_enabled) { LL_SPI_ClearFlag_TXTF(spi); LL_SPI_ClearFlag_OVR(spi); LL_SPI_ClearFlag_EOT(spi); LL_SPI_SetTransferSize(spi, 0); } #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */ if (!(data->ctx.config->operation & SPI_HOLD_ON_CS)) { ll_func_disable_spi(spi); } #ifdef CONFIG_SPI_STM32_INTERRUPT spi_context_complete(&data->ctx, dev, status); #endif spi_stm32_pm_policy_state_lock_put(dev); } #ifdef CONFIG_SPI_STM32_INTERRUPT static void spi_stm32_isr(const struct device *dev) { const struct spi_stm32_config *cfg = dev->config; struct spi_stm32_data *data = dev->data; SPI_TypeDef *spi = cfg->spi; int err; /* Some spurious interrupts are triggered when SPI is not enabled; ignore them. * Do it only when fifo is enabled to leave non-fifo functionality untouched for now */ if (cfg->fifo_enabled) { if (!LL_SPI_IsEnabled(spi)) { return; } } err = spi_stm32_get_err(spi); if (err) { spi_stm32_complete(dev, err); return; } if (spi_stm32_transfer_ongoing(data)) { err = spi_stm32_shift_frames(cfg, data); } if (err || !spi_stm32_transfer_ongoing(data)) { spi_stm32_complete(dev, err); } } #endif /* CONFIG_SPI_STM32_INTERRUPT */ static int spi_stm32_configure(const struct device *dev, const struct spi_config *config) { const struct spi_stm32_config *cfg = dev->config; struct spi_stm32_data *data = dev->data; const uint32_t scaler[] = { LL_SPI_BAUDRATEPRESCALER_DIV2, LL_SPI_BAUDRATEPRESCALER_DIV4, LL_SPI_BAUDRATEPRESCALER_DIV8, LL_SPI_BAUDRATEPRESCALER_DIV16, LL_SPI_BAUDRATEPRESCALER_DIV32, LL_SPI_BAUDRATEPRESCALER_DIV64, LL_SPI_BAUDRATEPRESCALER_DIV128, LL_SPI_BAUDRATEPRESCALER_DIV256 }; SPI_TypeDef *spi = cfg->spi; uint32_t clock; int br; if (spi_context_configured(&data->ctx, config)) { /* Nothing to do */ return 0; } if ((SPI_WORD_SIZE_GET(config->operation) != 8) && (SPI_WORD_SIZE_GET(config->operation) != 16)) { return -ENOTSUP; } /* configure the frame format Motorola (default) or TI */ if ((config->operation & SPI_FRAME_FORMAT_TI) == SPI_FRAME_FORMAT_TI) { #ifdef LL_SPI_PROTOCOL_TI LL_SPI_SetStandard(spi, LL_SPI_PROTOCOL_TI); #else LOG_ERR("Frame Format TI not supported"); /* on stm32F1 or some stm32L1 (cat1,2) without SPI_CR2_FRF */ return -ENOTSUP; #endif #if defined(LL_SPI_PROTOCOL_MOTOROLA) && defined(SPI_CR2_FRF) } else { LL_SPI_SetStandard(spi, LL_SPI_PROTOCOL_MOTOROLA); #endif } if (IS_ENABLED(STM32_SPI_DOMAIN_CLOCK_SUPPORT) && (cfg->pclk_len > 1)) { if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), (clock_control_subsys_t) &cfg->pclken[1], &clock) < 0) { LOG_ERR("Failed call clock_control_get_rate(pclk[1])"); return -EIO; } } else { if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), (clock_control_subsys_t) &cfg->pclken[0], &clock) < 0) { LOG_ERR("Failed call clock_control_get_rate(pclk[0])"); return -EIO; } } for (br = 1 ; br <= ARRAY_SIZE(scaler) ; ++br) { uint32_t clk = clock >> br; if (clk <= config->frequency) { break; } } if (br > ARRAY_SIZE(scaler)) { LOG_ERR("Unsupported frequency %uHz, max %uHz, min %uHz", config->frequency, clock >> 1, clock >> ARRAY_SIZE(scaler)); return -EINVAL; } LL_SPI_Disable(spi); LL_SPI_SetBaudRatePrescaler(spi, scaler[br - 1]); if (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) { LL_SPI_SetClockPolarity(spi, LL_SPI_POLARITY_HIGH); } else { LL_SPI_SetClockPolarity(spi, LL_SPI_POLARITY_LOW); } if (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) { LL_SPI_SetClockPhase(spi, LL_SPI_PHASE_2EDGE); } else { LL_SPI_SetClockPhase(spi, LL_SPI_PHASE_1EDGE); } LL_SPI_SetTransferDirection(spi, LL_SPI_FULL_DUPLEX); if (config->operation & SPI_TRANSFER_LSB) { LL_SPI_SetTransferBitOrder(spi, LL_SPI_LSB_FIRST); } else { LL_SPI_SetTransferBitOrder(spi, LL_SPI_MSB_FIRST); } LL_SPI_DisableCRC(spi); if (spi_cs_is_gpio(config) || !IS_ENABLED(CONFIG_SPI_STM32_USE_HW_SS)) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) if (SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_MASTER) { if (LL_SPI_GetNSSPolarity(spi) == LL_SPI_NSS_POLARITY_LOW) LL_SPI_SetInternalSSLevel(spi, LL_SPI_SS_LEVEL_HIGH); } #endif LL_SPI_SetNSSMode(spi, LL_SPI_NSS_SOFT); } else { if (config->operation & SPI_OP_MODE_SLAVE) { LL_SPI_SetNSSMode(spi, LL_SPI_NSS_HARD_INPUT); } else { LL_SPI_SetNSSMode(spi, LL_SPI_NSS_HARD_OUTPUT); } } if (config->operation & SPI_OP_MODE_SLAVE) { LL_SPI_SetMode(spi, LL_SPI_MODE_SLAVE); } else { LL_SPI_SetMode(spi, LL_SPI_MODE_MASTER); } if (SPI_WORD_SIZE_GET(config->operation) == 8) { LL_SPI_SetDataWidth(spi, LL_SPI_DATAWIDTH_8BIT); } else { LL_SPI_SetDataWidth(spi, LL_SPI_DATAWIDTH_16BIT); } #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) LL_SPI_SetMasterSSIdleness(spi, cfg->mssi_clocks); LL_SPI_SetInterDataIdleness(spi, (cfg->midi_clocks << SPI_CFG2_MIDI_Pos)); #endif #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_fifo) ll_func_set_fifo_threshold_8bit(spi); #endif /* At this point, it's mandatory to set this on the context! */ data->ctx.config = config; LOG_DBG("Installed config %p: freq %uHz (div = %u)," " mode %u/%u/%u, slave %u", config, clock >> br, 1 << br, (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) ? 1 : 0, (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) ? 1 : 0, (SPI_MODE_GET(config->operation) & SPI_MODE_LOOP) ? 1 : 0, config->slave); return 0; } static int spi_stm32_release(const struct device *dev, const struct spi_config *config) { struct spi_stm32_data *data = dev->data; const struct spi_stm32_config *cfg = dev->config; spi_context_unlock_unconditionally(&data->ctx); ll_func_disable_spi(cfg->spi); return 0; } #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) static int32_t spi_stm32_count_bufset_frames(const struct spi_config *config, const struct spi_buf_set *bufs) { if (bufs == NULL) { return 0; } uint32_t num_bytes = 0; for (size_t i = 0; i < bufs->count; i++) { num_bytes += bufs->buffers[i].len; } uint8_t bytes_per_frame = SPI_WORD_SIZE_GET(config->operation) / 8; if ((num_bytes % bytes_per_frame) != 0) { return -EINVAL; } return num_bytes / bytes_per_frame; } static int32_t spi_stm32_count_total_frames(const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { int tx_frames = spi_stm32_count_bufset_frames(config, tx_bufs); if (tx_frames < 0) { return tx_frames; } int rx_frames = spi_stm32_count_bufset_frames(config, rx_bufs); if (rx_frames < 0) { return rx_frames; } if (tx_frames > UINT16_MAX || rx_frames > UINT16_MAX) { return -EMSGSIZE; } return MAX(rx_frames, tx_frames); } #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */ static int transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { const struct spi_stm32_config *cfg = dev->config; struct spi_stm32_data *data = dev->data; SPI_TypeDef *spi = cfg->spi; int ret; if (!tx_bufs && !rx_bufs) { return 0; } #ifndef CONFIG_SPI_STM32_INTERRUPT if (asynchronous) { return -ENOTSUP; } #endif /* CONFIG_SPI_STM32_INTERRUPT */ spi_context_lock(&data->ctx, asynchronous, cb, userdata, config); spi_stm32_pm_policy_state_lock_get(dev); ret = spi_stm32_configure(dev, config); if (ret) { goto end; } /* Set buffers info */ if (SPI_WORD_SIZE_GET(config->operation) == 8) { spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); } else { spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 2); } #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) if (cfg->fifo_enabled && SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_MASTER) { int total_frames = spi_stm32_count_total_frames( config, tx_bufs, rx_bufs); if (total_frames < 0) { ret = total_frames; goto end; } LL_SPI_SetTransferSize(spi, (uint32_t)total_frames); } #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */ #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_fifo) /* Flush RX buffer */ while (ll_func_rx_is_not_empty(spi)) { (void) LL_SPI_ReceiveData8(spi); } #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_fifo) */ LL_SPI_Enable(spi); #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) /* With the STM32MP1, STM32U5 and the STM32H7, * if the device is the SPI master, * we need to enable the start of the transfer with * LL_SPI_StartMasterTransfer(spi) */ if (LL_SPI_GetMode(spi) == LL_SPI_MODE_MASTER) { LL_SPI_StartMasterTransfer(spi); while (!LL_SPI_IsActiveMasterTransfer(spi)) { /* NOP */ } } #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */ #ifdef CONFIG_SOC_SERIES_STM32H7X /* * Add a small delay after enabling to prevent transfer stalling at high * system clock frequency (see errata sheet ES0392). */ k_busy_wait(WAIT_1US); #endif /* CONFIG_SOC_SERIES_STM32H7X */ /* This is turned off in spi_stm32_complete(). */ spi_stm32_cs_control(dev, true); #ifdef CONFIG_SPI_STM32_INTERRUPT #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) if (cfg->fifo_enabled) { LL_SPI_EnableIT_EOT(spi); } #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) */ ll_func_enable_int_errors(spi); if (rx_bufs) { ll_func_enable_int_rx_not_empty(spi); } ll_func_enable_int_tx_empty(spi); ret = spi_context_wait_for_completion(&data->ctx); #else /* CONFIG_SPI_STM32_INTERRUPT */ do { ret = spi_stm32_shift_frames(cfg, data); } while (!ret && spi_stm32_transfer_ongoing(data)); spi_stm32_complete(dev, ret); #ifdef CONFIG_SPI_SLAVE if (spi_context_is_slave(&data->ctx) && !ret) { ret = data->ctx.recv_frames; } #endif /* CONFIG_SPI_SLAVE */ #endif /* CONFIG_SPI_STM32_INTERRUPT */ end: spi_context_release(&data->ctx, ret); return ret; } #ifdef CONFIG_SPI_STM32_DMA static int wait_dma_rx_tx_done(const struct device *dev) { struct spi_stm32_data *data = dev->data; int res = -1; k_timeout_t timeout; /* * In slave mode we do not know when the transaction will start. Hence, * it doesn't make sense to have timeout in this case. */ if (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(&data->ctx)) { timeout = K_FOREVER; } else { timeout = K_MSEC(1000); } while (1) { res = k_sem_take(&data->status_sem, timeout); if (res != 0) { return res; } if (data->status_flags & SPI_STM32_DMA_ERROR_FLAG) { return -EIO; } if (data->status_flags & SPI_STM32_DMA_DONE_FLAG) { return 0; } } return res; } #ifdef CONFIG_DCACHE static bool buf_in_nocache(uintptr_t buf, size_t len_bytes) { bool buf_within_nocache = false; #ifdef CONFIG_NOCACHE_MEMORY /* Check if buffer is in nocache region defined by the linker */ buf_within_nocache = (buf >= ((uintptr_t)_nocache_ram_start)) && ((buf + len_bytes - 1) <= ((uintptr_t)_nocache_ram_end)); if (buf_within_nocache) { return true; } #endif /* CONFIG_NOCACHE_MEMORY */ /* Check if buffer is in nocache memory region defined in DT */ buf_within_nocache = mem_attr_check_buf( (void *)buf, len_bytes, DT_MEM_ARM(ATTR_MPU_RAM_NOCACHE)) == 0; return buf_within_nocache; } static bool is_dummy_buffer(const struct spi_buf *buf) { return buf->buf == NULL; } static bool spi_buf_set_in_nocache(const struct spi_buf_set *bufs) { for (size_t i = 0; i < bufs->count; i++) { const struct spi_buf *buf = &bufs->buffers[i]; if (!is_dummy_buffer(buf) && !buf_in_nocache((uintptr_t)buf->buf, buf->len)) { return false; } } return true; } #endif /* CONFIG_DCACHE */ static int transceive_dma(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { const struct spi_stm32_config *cfg = dev->config; struct spi_stm32_data *data = dev->data; SPI_TypeDef *spi = cfg->spi; int ret; int err; if (!tx_bufs && !rx_bufs) { return 0; } if (asynchronous) { return -ENOTSUP; } #ifdef CONFIG_DCACHE if ((tx_bufs != NULL && !spi_buf_set_in_nocache(tx_bufs)) || (rx_bufs != NULL && !spi_buf_set_in_nocache(rx_bufs))) { return -EFAULT; } #endif /* CONFIG_DCACHE */ spi_context_lock(&data->ctx, asynchronous, cb, userdata, config); spi_stm32_pm_policy_state_lock_get(dev); k_sem_reset(&data->status_sem); ret = spi_stm32_configure(dev, config); if (ret) { goto end; } /* Set buffers info */ if (SPI_WORD_SIZE_GET(config->operation) == 8) { spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); } else { spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 2); } #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) /* set request before enabling (else SPI CFG1 reg is write protected) */ LL_SPI_EnableDMAReq_RX(spi); LL_SPI_EnableDMAReq_TX(spi); LL_SPI_Enable(spi); if (LL_SPI_GetMode(spi) == LL_SPI_MODE_MASTER) { LL_SPI_StartMasterTransfer(spi); } #else LL_SPI_Enable(spi); #endif /* st_stm32h7_spi */ /* This is turned off in spi_stm32_complete(). */ spi_stm32_cs_control(dev, true); while (data->ctx.rx_len > 0 || data->ctx.tx_len > 0) { size_t dma_len; if (data->ctx.rx_len == 0) { dma_len = data->ctx.tx_len; } else if (data->ctx.tx_len == 0) { dma_len = data->ctx.rx_len; } else { dma_len = MIN(data->ctx.tx_len, data->ctx.rx_len); } data->status_flags = 0; ret = spi_dma_move_buffers(dev, dma_len); if (ret != 0) { break; } #if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) /* toggle the DMA request to restart the transfer */ LL_SPI_EnableDMAReq_RX(spi); LL_SPI_EnableDMAReq_TX(spi); #endif /* ! st_stm32h7_spi */ ret = wait_dma_rx_tx_done(dev); if (ret != 0) { break; } #ifdef SPI_SR_FTLVL while (LL_SPI_GetTxFIFOLevel(spi) > 0) { } #endif /* SPI_SR_FTLVL */ #ifdef CONFIG_SPI_STM32_ERRATA_BUSY WAIT_FOR(ll_func_spi_dma_busy(spi) != 0, CONFIG_SPI_STM32_BUSY_FLAG_TIMEOUT, k_yield()); #else /* wait until spi is no more busy (spi TX fifo is really empty) */ while (ll_func_spi_dma_busy(spi) == 0) { } #endif /* CONFIG_SPI_STM32_ERRATA_BUSY */ #if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi) /* toggle the DMA transfer request */ LL_SPI_DisableDMAReq_TX(spi); LL_SPI_DisableDMAReq_RX(spi); #endif /* ! st_stm32h7_spi */ uint8_t frame_size_bytes = bits2bytes( SPI_WORD_SIZE_GET(config->operation)); spi_context_update_tx(&data->ctx, frame_size_bytes, dma_len); spi_context_update_rx(&data->ctx, frame_size_bytes, dma_len); } /* spi complete relies on SPI Status Reg which cannot be disabled */ spi_stm32_complete(dev, ret); /* disable spi instance after completion */ LL_SPI_Disable(spi); /* The Config. Reg. on some mcus is write un-protected when SPI is disabled */ LL_SPI_DisableDMAReq_TX(spi); LL_SPI_DisableDMAReq_RX(spi); err = dma_stop(data->dma_rx.dma_dev, data->dma_rx.channel); if (err) { LOG_DBG("Rx dma_stop failed with error %d", err); } err = dma_stop(data->dma_tx.dma_dev, data->dma_tx.channel); if (err) { LOG_DBG("Tx dma_stop failed with error %d", err); } #ifdef CONFIG_SPI_SLAVE if (spi_context_is_slave(&data->ctx) && !ret) { ret = data->ctx.recv_frames; } #endif /* CONFIG_SPI_SLAVE */ end: spi_context_release(&data->ctx, ret); spi_stm32_pm_policy_state_lock_put(dev); return ret; } #endif /* CONFIG_SPI_STM32_DMA */ static int spi_stm32_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { #ifdef CONFIG_SPI_STM32_DMA struct spi_stm32_data *data = dev->data; if ((data->dma_tx.dma_dev != NULL) && (data->dma_rx.dma_dev != NULL)) { return transceive_dma(dev, config, tx_bufs, rx_bufs, false, NULL, NULL); } #endif /* CONFIG_SPI_STM32_DMA */ return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_stm32_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static const struct spi_driver_api api_funcs = { .transceive = spi_stm32_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_stm32_transceive_async, #endif .release = spi_stm32_release, }; static inline bool spi_stm32_is_subghzspi(const struct device *dev) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_subghz) const struct spi_stm32_config *cfg = dev->config; return cfg->use_subghzspi_nss; #else ARG_UNUSED(dev); return false; #endif /* st_stm32_spi_subghz */ } static int spi_stm32_init(const struct device *dev) { struct spi_stm32_data *data __attribute__((unused)) = dev->data; const struct spi_stm32_config *cfg = dev->config; int err; if (!device_is_ready(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE))) { LOG_ERR("clock control device not ready"); return -ENODEV; } err = clock_control_on(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), (clock_control_subsys_t) &cfg->pclken[0]); if (err < 0) { LOG_ERR("Could not enable SPI clock"); return err; } if (IS_ENABLED(STM32_SPI_DOMAIN_CLOCK_SUPPORT) && (cfg->pclk_len > 1)) { err = clock_control_configure(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), (clock_control_subsys_t) &cfg->pclken[1], NULL); if (err < 0) { LOG_ERR("Could not select SPI domain clock"); return err; } } if (!spi_stm32_is_subghzspi(dev)) { /* Configure dt provided device signals when available */ err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { LOG_ERR("SPI pinctrl setup failed (%d)", err); return err; } } #ifdef CONFIG_SPI_STM32_INTERRUPT cfg->irq_config(dev); #endif /* CONFIG_SPI_STM32_INTERRUPT */ #ifdef CONFIG_SPI_STM32_DMA if ((data->dma_rx.dma_dev != NULL) && !device_is_ready(data->dma_rx.dma_dev)) { LOG_ERR("%s device not ready", data->dma_rx.dma_dev->name); return -ENODEV; } if ((data->dma_tx.dma_dev != NULL) && !device_is_ready(data->dma_tx.dma_dev)) { LOG_ERR("%s device not ready", data->dma_tx.dma_dev->name); return -ENODEV; } LOG_DBG("SPI with DMA transfer"); #endif /* CONFIG_SPI_STM32_DMA */ err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } spi_context_unlock_unconditionally(&data->ctx); return pm_device_runtime_enable(dev); } #ifdef CONFIG_PM_DEVICE static int spi_stm32_pm_action(const struct device *dev, enum pm_device_action action) { const struct spi_stm32_config *config = dev->config; const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); int err; switch (action) { case PM_DEVICE_ACTION_RESUME: if (!spi_stm32_is_subghzspi(dev)) { /* Set pins to active state */ err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } } /* enable clock */ err = clock_control_on(clk, (clock_control_subsys_t)&config->pclken[0]); if (err != 0) { LOG_ERR("Could not enable SPI clock"); return err; } break; case PM_DEVICE_ACTION_SUSPEND: /* Stop device clock. */ err = clock_control_off(clk, (clock_control_subsys_t)&config->pclken[0]); if (err != 0) { LOG_ERR("Could not disable SPI clock"); return err; } if (!spi_stm32_is_subghzspi(dev)) { /* Move pins to sleep state */ err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP); if ((err < 0) && (err != -ENOENT)) { /* * If returning -ENOENT, no pins where defined for sleep mode : * Do not output on console (might sleep already) when going to * sleep, * "SPI pinctrl sleep state not available" * and don't block PM suspend. * Else return the error. */ return err; } } break; default: return -ENOTSUP; } return 0; } #endif /* CONFIG_PM_DEVICE */ #ifdef CONFIG_SPI_STM32_INTERRUPT #define STM32_SPI_IRQ_HANDLER_DECL(id) \ static void spi_stm32_irq_config_func_##id(const struct device *dev) #define STM32_SPI_IRQ_HANDLER_FUNC(id) \ .irq_config = spi_stm32_irq_config_func_##id, #define STM32_SPI_IRQ_HANDLER(id) \ static void spi_stm32_irq_config_func_##id(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(id), \ DT_INST_IRQ(id, priority), \ spi_stm32_isr, DEVICE_DT_INST_GET(id), 0); \ irq_enable(DT_INST_IRQN(id)); \ } #else #define STM32_SPI_IRQ_HANDLER_DECL(id) #define STM32_SPI_IRQ_HANDLER_FUNC(id) #define STM32_SPI_IRQ_HANDLER(id) #endif /* CONFIG_SPI_STM32_INTERRUPT */ #define SPI_DMA_CHANNEL_INIT(index, dir, dir_cap, src_dev, dest_dev) \ .dma_dev = DEVICE_DT_GET(STM32_DMA_CTLR(index, dir)), \ .channel = DT_INST_DMAS_CELL_BY_NAME(index, dir, channel), \ .dma_cfg = { \ .dma_slot = STM32_DMA_SLOT(index, dir, slot),\ .channel_direction = STM32_DMA_CONFIG_DIRECTION( \ STM32_DMA_CHANNEL_CONFIG(index, dir)), \ .source_data_size = STM32_DMA_CONFIG_##src_dev##_DATA_SIZE( \ STM32_DMA_CHANNEL_CONFIG(index, dir)), \ .dest_data_size = STM32_DMA_CONFIG_##dest_dev##_DATA_SIZE( \ STM32_DMA_CHANNEL_CONFIG(index, dir)), \ .source_burst_length = 1, /* SINGLE transfer */ \ .dest_burst_length = 1, /* SINGLE transfer */ \ .channel_priority = STM32_DMA_CONFIG_PRIORITY( \ STM32_DMA_CHANNEL_CONFIG(index, dir)),\ .dma_callback = dma_callback, \ .block_count = 2, \ }, \ .src_addr_increment = STM32_DMA_CONFIG_##src_dev##_ADDR_INC( \ STM32_DMA_CHANNEL_CONFIG(index, dir)), \ .dst_addr_increment = STM32_DMA_CONFIG_##dest_dev##_ADDR_INC( \ STM32_DMA_CHANNEL_CONFIG(index, dir)), \ .fifo_threshold = STM32_DMA_FEATURES_FIFO_THRESHOLD( \ STM32_DMA_FEATURES(index, dir)), \ #ifdef CONFIG_SPI_STM32_DMA #define SPI_DMA_CHANNEL(id, dir, DIR, src, dest) \ .dma_##dir = { \ COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, dir), \ (SPI_DMA_CHANNEL_INIT(id, dir, DIR, src, dest)),\ (NULL)) \ }, #define SPI_DMA_STATUS_SEM(id) \ .status_sem = Z_SEM_INITIALIZER( \ spi_stm32_dev_data_##id.status_sem, 0, 1), #else #define SPI_DMA_CHANNEL(id, dir, DIR, src, dest) #define SPI_DMA_STATUS_SEM(id) #endif /* CONFIG_SPI_STM32_DMA */ #define SPI_SUPPORTS_FIFO(id) DT_INST_NODE_HAS_PROP(id, fifo_enable) #define SPI_GET_FIFO_PROP(id) DT_INST_PROP(id, fifo_enable) #define SPI_FIFO_ENABLED(id) COND_CODE_1(SPI_SUPPORTS_FIFO(id), (SPI_GET_FIFO_PROP(id)), (0)) #define STM32_SPI_INIT(id) \ STM32_SPI_IRQ_HANDLER_DECL(id); \ \ PINCTRL_DT_INST_DEFINE(id); \ \ static const struct stm32_pclken pclken_##id[] = \ STM32_DT_INST_CLOCKS(id);\ \ static const struct spi_stm32_config spi_stm32_cfg_##id = { \ .spi = (SPI_TypeDef *) DT_INST_REG_ADDR(id), \ .pclken = pclken_##id, \ .pclk_len = DT_INST_NUM_CLOCKS(id), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \ .fifo_enabled = SPI_FIFO_ENABLED(id), \ STM32_SPI_IRQ_HANDLER_FUNC(id) \ IF_ENABLED(DT_HAS_COMPAT_STATUS_OKAY(st_stm32_spi_subghz), \ (.use_subghzspi_nss = \ DT_INST_PROP_OR(id, use_subghzspi_nss, false),))\ IF_ENABLED(DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi), \ (.midi_clocks = \ DT_INST_PROP(id, midi_clock),)) \ IF_ENABLED(DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_spi), \ (.mssi_clocks = \ DT_INST_PROP(id, mssi_clock),)) \ }; \ \ static struct spi_stm32_data spi_stm32_dev_data_##id = { \ SPI_CONTEXT_INIT_LOCK(spi_stm32_dev_data_##id, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_stm32_dev_data_##id, ctx), \ SPI_DMA_CHANNEL(id, rx, RX, PERIPHERAL, MEMORY) \ SPI_DMA_CHANNEL(id, tx, TX, MEMORY, PERIPHERAL) \ SPI_DMA_STATUS_SEM(id) \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(id), ctx) \ }; \ \ PM_DEVICE_DT_INST_DEFINE(id, spi_stm32_pm_action); \ \ DEVICE_DT_INST_DEFINE(id, spi_stm32_init, PM_DEVICE_DT_INST_GET(id), \ &spi_stm32_dev_data_##id, &spi_stm32_cfg_##id, \ POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &api_funcs); \ \ STM32_SPI_IRQ_HANDLER(id) DT_INST_FOREACH_STATUS_OKAY(STM32_SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_ll_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
11,615
```c /* * */ #define DT_DRV_COMPAT espressif_esp32_spi /* Include esp-idf headers first to avoid redefining BIT() macro */ #include <hal/spi_hal.h> #include <esp_attr.h> #include <esp_clk_tree.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(esp32_spi, CONFIG_SPI_LOG_LEVEL); #include <soc.h> #include <esp_memory_utils.h> #include <zephyr/drivers/spi.h> #if defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32C6) #include <zephyr/drivers/interrupt_controller/intc_esp32c3.h> #else #include <zephyr/drivers/interrupt_controller/intc_esp32.h> #endif #ifdef SOC_GDMA_SUPPORTED #include <hal/gdma_hal.h> #include <hal/gdma_ll.h> #endif #include <zephyr/drivers/clock_control.h> #include "spi_context.h" #include "spi_esp32_spim.h" #if defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32C6) #define ISR_HANDLER isr_handler_t #else #define ISR_HANDLER intr_handler_t #endif #define SPI_DMA_MAX_BUFFER_SIZE 4092 static bool spi_esp32_transfer_ongoing(struct spi_esp32_data *data) { return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx); } static inline void spi_esp32_complete(const struct device *dev, struct spi_esp32_data *data, spi_dev_t *spi, int status) { #ifdef CONFIG_SPI_ESP32_INTERRUPT spi_ll_disable_int(spi); spi_ll_clear_int_stat(spi); #endif spi_context_cs_control(&data->ctx, false); #ifdef CONFIG_SPI_ESP32_INTERRUPT spi_context_complete(&data->ctx, dev, status); #endif } static int IRAM_ATTR spi_esp32_transfer(const struct device *dev) { struct spi_esp32_data *data = dev->data; const struct spi_esp32_config *cfg = dev->config; struct spi_context *ctx = &data->ctx; spi_hal_context_t *hal = &data->hal; spi_hal_dev_config_t *hal_dev = &data->dev_config; spi_hal_trans_config_t *hal_trans = &data->trans_config; size_t chunk_len_bytes = spi_context_max_continuous_chunk(&data->ctx) * data->dfs; size_t max_buf_sz = cfg->dma_enabled ? SPI_DMA_MAX_BUFFER_SIZE : SOC_SPI_MAXIMUM_BUFFER_SIZE; size_t transfer_len_bytes = MIN(chunk_len_bytes, max_buf_sz); size_t transfer_len_frames = transfer_len_bytes / data->dfs; size_t bit_len = transfer_len_bytes << 3; uint8_t *rx_temp = NULL; uint8_t *tx_temp = NULL; uint8_t dma_len_tx = MIN(ctx->tx_len * data->dfs, SPI_DMA_MAX_BUFFER_SIZE); uint8_t dma_len_rx = MIN(ctx->rx_len * data->dfs, SPI_DMA_MAX_BUFFER_SIZE); if (cfg->dma_enabled) { /* bit_len needs to be at least one byte long when using DMA */ bit_len = !bit_len ? 8 : bit_len; if (ctx->tx_buf && !esp_ptr_dma_capable((uint32_t *)&ctx->tx_buf[0])) { LOG_DBG("Tx buffer not DMA capable"); tx_temp = k_malloc(dma_len_tx); if (!tx_temp) { LOG_ERR("Error allocating temp buffer Tx"); return -ENOMEM; } memcpy(tx_temp, &ctx->tx_buf[0], dma_len_tx); } if (ctx->rx_buf && (!esp_ptr_dma_capable((uint32_t *)&ctx->rx_buf[0]) || ((int)&ctx->rx_buf[0] % 4 != 0) || (dma_len_rx % 4 != 0))) { /* The rx buffer need to be length of * multiples of 32 bits to avoid heap * corruption. */ LOG_DBG("Rx buffer not DMA capable"); rx_temp = k_calloc(((dma_len_rx << 3) + 31) / 8, sizeof(uint8_t)); if (!rx_temp) { LOG_ERR("Error allocating temp buffer Rx"); k_free(tx_temp); return -ENOMEM; } } } /* clean up and prepare SPI hal */ memset((uint32_t *)hal->hw->data_buf, 0, sizeof(hal->hw->data_buf)); hal_trans->send_buffer = tx_temp ? tx_temp : (uint8_t *)ctx->tx_buf; hal_trans->rcv_buffer = rx_temp ? rx_temp : ctx->rx_buf; hal_trans->tx_bitlen = bit_len; hal_trans->rx_bitlen = bit_len; /* keep cs line active until last transmission */ hal_trans->cs_keep_active = (!ctx->num_cs_gpios && (ctx->rx_count > 1 || ctx->tx_count > 1 || ctx->rx_len > transfer_len_frames || ctx->tx_len > transfer_len_frames)); /* configure SPI */ spi_hal_setup_trans(hal, hal_dev, hal_trans); spi_hal_prepare_data(hal, hal_dev, hal_trans); /* send data */ spi_hal_user_start(hal); spi_context_update_tx(&data->ctx, data->dfs, transfer_len_frames); while (!spi_hal_usr_is_done(hal)) { /* nop */ } /* read data */ spi_hal_fetch_result(hal); if (rx_temp) { memcpy(&ctx->rx_buf[0], rx_temp, transfer_len_bytes); } spi_context_update_rx(&data->ctx, data->dfs, transfer_len_frames); k_free(tx_temp); k_free(rx_temp); return 0; } #ifdef CONFIG_SPI_ESP32_INTERRUPT static void IRAM_ATTR spi_esp32_isr(void *arg) { const struct device *dev = (const struct device *)arg; const struct spi_esp32_config *cfg = dev->config; struct spi_esp32_data *data = dev->data; do { spi_esp32_transfer(dev); } while (spi_esp32_transfer_ongoing(data)); spi_esp32_complete(dev, data, cfg->spi, 0); } #endif static int spi_esp32_init_dma(const struct device *dev) { const struct spi_esp32_config *cfg = dev->config; struct spi_esp32_data *data = dev->data; uint8_t channel_offset; if (clock_control_on(cfg->clock_dev, (clock_control_subsys_t)cfg->dma_clk_src)) { LOG_ERR("Could not enable DMA clock"); return -EIO; } #ifdef SOC_GDMA_SUPPORTED gdma_hal_init(&data->hal_gdma, 0); gdma_ll_enable_clock(data->hal_gdma.dev, true); gdma_ll_tx_reset_channel(data->hal_gdma.dev, cfg->dma_host); gdma_ll_rx_reset_channel(data->hal_gdma.dev, cfg->dma_host); gdma_ll_tx_connect_to_periph(data->hal_gdma.dev, cfg->dma_host, GDMA_TRIG_PERIPH_SPI, cfg->dma_host); gdma_ll_rx_connect_to_periph(data->hal_gdma.dev, cfg->dma_host, GDMA_TRIG_PERIPH_SPI, cfg->dma_host); channel_offset = 0; #else channel_offset = 1; #endif /* SOC_GDMA_SUPPORTED */ #ifdef CONFIG_SOC_SERIES_ESP32 /*Connect SPI and DMA*/ DPORT_SET_PERI_REG_BITS(DPORT_SPI_DMA_CHAN_SEL_REG, 3, cfg->dma_host + 1, ((cfg->dma_host + 1) * 2)); #endif /* CONFIG_SOC_SERIES_ESP32 */ data->hal_config.dma_in = (spi_dma_dev_t *)cfg->spi; data->hal_config.dma_out = (spi_dma_dev_t *)cfg->spi; data->hal_config.dma_enabled = true; data->hal_config.tx_dma_chan = cfg->dma_host + channel_offset; data->hal_config.rx_dma_chan = cfg->dma_host + channel_offset; data->hal_config.dmadesc_n = 1; data->hal_config.dmadesc_rx = &data->dma_desc_rx; data->hal_config.dmadesc_tx = &data->dma_desc_tx; if (data->hal_config.dmadesc_tx == NULL || data->hal_config.dmadesc_rx == NULL) { k_free(data->hal_config.dmadesc_tx); k_free(data->hal_config.dmadesc_rx); return -ENOMEM; } spi_hal_init(&data->hal, cfg->dma_host + 1, &data->hal_config); return 0; } static int spi_esp32_init(const struct device *dev) { int err; const struct spi_esp32_config *cfg = dev->config; struct spi_esp32_data *data = dev->data; spi_hal_context_t *hal = &data->hal; if (!cfg->clock_dev) { return -EINVAL; } if (!device_is_ready(cfg->clock_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } /* Enables SPI peripheral */ err = clock_control_on(cfg->clock_dev, cfg->clock_subsys); if (err < 0) { LOG_ERR("Error enabling SPI clock"); return err; } spi_ll_master_init(hal->hw); if (cfg->dma_enabled) { spi_esp32_init_dma(dev); } #ifdef CONFIG_SPI_ESP32_INTERRUPT spi_ll_disable_int(cfg->spi); spi_ll_clear_int_stat(cfg->spi); esp_intr_alloc(cfg->irq_source, 0, (ISR_HANDLER)spi_esp32_isr, (void *)dev, NULL); #endif err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } err = esp_clk_tree_src_get_freq_hz( cfg->clock_source, ESP_CLK_TREE_SRC_FREQ_PRECISION_APPROX, &data->clock_source_hz); if (err) { LOG_ERR("Could not get clock source frequency (%d)", err); return err; } spi_context_unlock_unconditionally(&data->ctx); return 0; } static inline uint8_t spi_esp32_get_line_mode(uint16_t operation) { if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES)) { switch (operation & SPI_LINES_MASK) { case SPI_LINES_SINGLE: return 1; case SPI_LINES_DUAL: return 2; case SPI_LINES_OCTAL: return 8; case SPI_LINES_QUAD: return 4; default: break; } } return 1; } static int IRAM_ATTR spi_esp32_configure(const struct device *dev, const struct spi_config *spi_cfg) { const struct spi_esp32_config *cfg = dev->config; struct spi_esp32_data *data = dev->data; struct spi_context *ctx = &data->ctx; spi_hal_context_t *hal = &data->hal; spi_hal_dev_config_t *hal_dev = &data->dev_config; spi_dev_t *hw = hal->hw; int freq; if (spi_context_configured(ctx, spi_cfg)) { return 0; } ctx->config = spi_cfg; if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } if (spi_cfg->operation & SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode not supported"); return -ENOTSUP; } if (spi_cfg->operation & SPI_MODE_LOOP) { LOG_ERR("Loopback mode is not supported"); return -ENOTSUP; } hal_dev->cs_pin_id = ctx->config->slave; int ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); /* input parameters to calculate timing configuration */ spi_hal_timing_param_t timing_param = { .half_duplex = hal_dev->half_duplex, .no_compensate = hal_dev->no_compensate, .expected_freq = spi_cfg->frequency, .duty_cycle = cfg->duty_cycle == 0 ? 128 : cfg->duty_cycle, .input_delay_ns = cfg->input_delay_ns, .use_gpio = !cfg->use_iomux, .clk_src_hz = data->clock_source_hz, }; spi_hal_cal_clock_conf(&timing_param, &freq, &hal_dev->timing_conf); data->trans_config.dummy_bits = hal_dev->timing_conf.timing_dummy; hal_dev->tx_lsbfirst = spi_cfg->operation & SPI_TRANSFER_LSB ? 1 : 0; hal_dev->rx_lsbfirst = spi_cfg->operation & SPI_TRANSFER_LSB ? 1 : 0; data->trans_config.line_mode.data_lines = spi_esp32_get_line_mode(spi_cfg->operation); /* multiline for command and address not supported */ data->trans_config.line_mode.addr_lines = 1; data->trans_config.line_mode.cmd_lines = 1; /* SPI mode */ hal_dev->mode = 0; if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) { hal_dev->mode = BIT(0); } if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) { hal_dev->mode |= BIT(1); } /* Chip select setup and hold times */ /* GPIO CS have their own delay parameter*/ if (!spi_cs_is_gpio(spi_cfg)) { hal_dev->cs_hold = cfg->cs_hold; hal_dev->cs_setup = cfg->cs_setup; } spi_hal_setup_device(hal, hal_dev); /* Workaround to handle default state of MISO and MOSI lines */ #ifndef CONFIG_SOC_SERIES_ESP32 if (cfg->line_idle_low) { hw->ctrl.d_pol = 0; hw->ctrl.q_pol = 0; } else { hw->ctrl.d_pol = 1; hw->ctrl.q_pol = 1; } #endif /* * Workaround for ESP32S3 and ESP32Cx SoC. This dummy transaction is needed to sync CLK and * software controlled CS when SPI is in mode 3 */ #if defined(CONFIG_SOC_SERIES_ESP32S3) || defined(CONFIG_SOC_SERIES_ESP32C3) || \ defined(CONFIG_SOC_SERIES_ESP32C6) if (ctx->num_cs_gpios && (hal_dev->mode & (SPI_MODE_CPOL | SPI_MODE_CPHA))) { spi_esp32_transfer(dev); } #endif return 0; } static inline uint8_t spi_esp32_get_frame_size(const struct spi_config *spi_cfg) { uint8_t dfs = SPI_WORD_SIZE_GET(spi_cfg->operation); dfs /= 8; if ((dfs == 0) || (dfs > 4)) { LOG_WRN("Unsupported dfs, 1-byte size will be used"); dfs = 1; } return dfs; } static int transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { const struct spi_esp32_config *cfg = dev->config; struct spi_esp32_data *data = dev->data; int ret; if (!tx_bufs && !rx_bufs) { return 0; } #ifndef CONFIG_SPI_ESP32_INTERRUPT if (asynchronous) { return -ENOTSUP; } #endif spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg); ret = spi_esp32_configure(dev, spi_cfg); if (ret) { goto done; } data->dfs = spi_esp32_get_frame_size(spi_cfg); spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, data->dfs); spi_context_cs_control(&data->ctx, true); #ifdef CONFIG_SPI_ESP32_INTERRUPT spi_ll_enable_int(cfg->spi); spi_ll_set_int_stat(cfg->spi); #else do { spi_esp32_transfer(dev); } while (spi_esp32_transfer_ongoing(data)); spi_esp32_complete(dev, data, cfg->spi, 0); #endif /* CONFIG_SPI_ESP32_INTERRUPT */ done: spi_context_release(&data->ctx, ret); return ret; } static int spi_esp32_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_esp32_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int spi_esp32_release(const struct device *dev, const struct spi_config *config) { struct spi_esp32_data *data = dev->data; spi_context_unlock_unconditionally(&data->ctx); return 0; } static const struct spi_driver_api spi_api = { .transceive = spi_esp32_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_esp32_transceive_async, #endif .release = spi_esp32_release }; #ifdef CONFIG_SOC_SERIES_ESP32 #define GET_AS_CS(idx) .as_cs = DT_INST_PROP(idx, clk_as_cs), #else #define GET_AS_CS(idx) #endif #define ESP32_SPI_INIT(idx) \ \ PINCTRL_DT_INST_DEFINE(idx); \ \ static struct spi_esp32_data spi_data_##idx = { \ SPI_CONTEXT_INIT_LOCK(spi_data_##idx, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_data_##idx, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(idx), ctx) \ .hal = { \ .hw = (spi_dev_t *)DT_INST_REG_ADDR(idx), \ }, \ .dev_config = { \ .half_duplex = DT_INST_PROP(idx, half_duplex), \ GET_AS_CS(idx) \ .positive_cs = DT_INST_PROP(idx, positive_cs), \ .no_compensate = DT_INST_PROP(idx, dummy_comp), \ .sio = DT_INST_PROP(idx, sio) \ } \ }; \ \ static const struct spi_esp32_config spi_config_##idx = { \ .spi = (spi_dev_t *)DT_INST_REG_ADDR(idx), \ \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(idx)), \ .duty_cycle = 0, \ .input_delay_ns = 0, \ .irq_source = DT_INST_IRQN(idx), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx), \ .clock_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL(idx, offset), \ .use_iomux = DT_INST_PROP(idx, use_iomux), \ .dma_enabled = DT_INST_PROP(idx, dma_enabled), \ .dma_clk_src = DT_INST_PROP(idx, dma_clk), \ .dma_host = DT_INST_PROP(idx, dma_host), \ .cs_setup = DT_INST_PROP_OR(idx, cs_setup_time, 0), \ .cs_hold = DT_INST_PROP_OR(idx, cs_hold_time, 0), \ .line_idle_low = DT_INST_PROP(idx, line_idle_low), \ .clock_source = SPI_CLK_SRC_DEFAULT, \ }; \ \ DEVICE_DT_INST_DEFINE(idx, spi_esp32_init, \ NULL, &spi_data_##idx, \ &spi_config_##idx, POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, &spi_api); DT_INST_FOREACH_STATUS_OKAY(ESP32_SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_esp32_spim.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,469
```c /* * */ #define DT_DRV_COMPAT atmel_sam_spi #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_sam); #include "spi_context.h" #include <errno.h> #include <zephyr/spinlock.h> #include <zephyr/device.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control/atmel_sam_pmc.h> #include <zephyr/rtio/rtio.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/util.h> #include <soc.h> #define SAM_SPI_CHIP_SELECT_COUNT 4 /* Number of bytes in transfer before using DMA if available */ #define SAM_SPI_DMA_THRESHOLD 32 /* Device constant configuration parameters */ struct spi_sam_config { Spi *regs; const struct atmel_sam_pmc_config clock_cfg; const struct pinctrl_dev_config *pcfg; bool loopback; #ifdef CONFIG_SPI_SAM_DMA const struct device *dma_dev; const uint32_t dma_tx_channel; const uint32_t dma_tx_perid; const uint32_t dma_rx_channel; const uint32_t dma_rx_perid; #endif /* CONFIG_SPI_SAM_DMA */ }; /* Device run time data */ struct spi_sam_data { struct spi_context ctx; struct k_spinlock lock; #ifdef CONFIG_SPI_RTIO struct rtio *r; /* context for thread calls */ struct mpsc io_q; struct rtio_iodev iodev; struct rtio_iodev_sqe *txn_head; struct rtio_iodev_sqe *txn_curr; struct spi_dt_spec dt_spec; #endif #ifdef CONFIG_SPI_SAM_DMA struct k_sem dma_sem; #endif /* CONFIG_SPI_SAM_DMA */ }; static inline k_spinlock_key_t spi_spin_lock(const struct device *dev) { struct spi_sam_data *data = dev->data; return k_spin_lock(&data->lock); } static inline void spi_spin_unlock(const struct device *dev, k_spinlock_key_t key) { struct spi_sam_data *data = dev->data; k_spin_unlock(&data->lock, key); } static int spi_slave_to_mr_pcs(int slave) { int pcs[SAM_SPI_CHIP_SELECT_COUNT] = {0x0, 0x1, 0x3, 0x7}; /* SPI worked in fixed peripheral mode(SPI_MR.PS = 0) and disabled chip * select decode(SPI_MR.PCSDEC = 0), based on Atmel | SMART ARM-based * Flash MCU DATASHEET 40.8.2 SPI Mode Register: * PCS = xxx0 NPCS[3:0] = 1110 * PCS = xx01 NPCS[3:0] = 1101 * PCS = x011 NPCS[3:0] = 1011 * PCS = 0111 NPCS[3:0] = 0111 */ return pcs[slave]; } static int spi_sam_configure(const struct device *dev, const struct spi_config *config) { const struct spi_sam_config *cfg = dev->config; struct spi_sam_data *data = dev->data; Spi *regs = cfg->regs; uint32_t spi_mr = 0U, spi_csr = 0U; uint16_t spi_csr_idx = spi_cs_is_gpio(config) ? 0 : config->slave; int div; if (spi_context_configured(&data->ctx, config)) { return 0; } if (config->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } if (SPI_OP_MODE_GET(config->operation) != SPI_OP_MODE_MASTER) { /* Slave mode is not implemented. */ return -ENOTSUP; } if (config->slave > (SAM_SPI_CHIP_SELECT_COUNT - 1)) { LOG_ERR("Slave %d is greater than %d", config->slave, SAM_SPI_CHIP_SELECT_COUNT - 1); return -EINVAL; } /* Set master mode, disable mode fault detection, set fixed peripheral * select mode. */ spi_mr |= (SPI_MR_MSTR | SPI_MR_MODFDIS); spi_mr |= SPI_MR_PCS(spi_slave_to_mr_pcs(spi_csr_idx)); if (cfg->loopback) { spi_mr |= SPI_MR_LLB; } if ((config->operation & SPI_MODE_CPOL) != 0U) { spi_csr |= SPI_CSR_CPOL; } if ((config->operation & SPI_MODE_CPHA) == 0U) { spi_csr |= SPI_CSR_NCPHA; } if (SPI_WORD_SIZE_GET(config->operation) != 8) { return -ENOTSUP; } else { spi_csr |= SPI_CSR_BITS(SPI_CSR_BITS_8_BIT); } /* Use the requested or next highest possible frequency */ div = SOC_ATMEL_SAM_MCK_FREQ_HZ / config->frequency; div = CLAMP(div, 1, UINT8_MAX); spi_csr |= SPI_CSR_SCBR(div); regs->SPI_CR = SPI_CR_SPIDIS; /* Disable SPI */ regs->SPI_MR = spi_mr; regs->SPI_CSR[spi_csr_idx] = spi_csr; regs->SPI_CR = SPI_CR_SPIEN; /* Enable SPI */ data->ctx.config = config; return 0; } /* Finish any ongoing writes and drop any remaining read data */ static void spi_sam_finish(Spi *regs) { while ((regs->SPI_SR & SPI_SR_TXEMPTY) == 0) { } while (regs->SPI_SR & SPI_SR_RDRF) { (void)regs->SPI_RDR; } } /* Fast path that transmits a buf */ static void spi_sam_fast_tx(Spi *regs, const uint8_t *tx_buf, const uint32_t tx_buf_len) { const uint8_t *p = tx_buf; const uint8_t *pend = (uint8_t *)tx_buf + tx_buf_len; uint8_t ch; while (p != pend) { ch = *p++; while ((regs->SPI_SR & SPI_SR_TDRE) == 0) { } regs->SPI_TDR = SPI_TDR_TD(ch); } } /* Fast path that reads into a buf */ static void spi_sam_fast_rx(Spi *regs, uint8_t *rx_buf, const uint32_t rx_buf_len) { uint8_t *rx = rx_buf; int len = rx_buf_len; if (len <= 0) { return; } /* Write the first byte */ regs->SPI_TDR = SPI_TDR_TD(0); len--; while (len) { while ((regs->SPI_SR & SPI_SR_TDRE) == 0) { } /* Read byte N+0 from the receive register */ while ((regs->SPI_SR & SPI_SR_RDRF) == 0) { } *rx = (uint8_t)regs->SPI_RDR; rx++; /* Load byte N+1 into the transmit register */ regs->SPI_TDR = SPI_TDR_TD(0); len--; } /* Read the final incoming byte */ while ((regs->SPI_SR & SPI_SR_RDRF) == 0) { } *rx = (uint8_t)regs->SPI_RDR; } /* Fast path that writes and reads bufs of the same length */ static void spi_sam_fast_txrx(Spi *regs, const uint8_t *tx_buf, const uint8_t *rx_buf, const uint32_t len) { const uint8_t *tx = tx_buf; const uint8_t *txend = tx_buf + len; uint8_t *rx = (uint8_t *)rx_buf; if (len == 0) { return; } /* * The code below interleaves the transmit writes with the * receive reads to keep the bus fully utilised. The code is * equivalent to: * * Transmit byte 0 * Loop: * - Transmit byte n+1 * - Receive byte n * Receive the final byte */ /* Write the first byte */ regs->SPI_TDR = SPI_TDR_TD(*tx++); while (tx != txend) { while ((regs->SPI_SR & SPI_SR_TDRE) == 0) { } /* Load byte N+1 into the transmit register. TX is * single buffered and we have at most one byte in * flight so skip the DRE check. */ regs->SPI_TDR = SPI_TDR_TD(*tx++); /* Read byte N+0 from the receive register */ while ((regs->SPI_SR & SPI_SR_RDRF) == 0) { } *rx++ = (uint8_t)regs->SPI_RDR; } /* Read the final incoming byte */ while ((regs->SPI_SR & SPI_SR_RDRF) == 0) { } *rx = (uint8_t)regs->SPI_RDR; } #ifdef CONFIG_SPI_SAM_DMA static __aligned(4) uint32_t tx_dummy; static __aligned(4) uint32_t rx_dummy; #ifdef CONFIG_SPI_RTIO static void spi_sam_iodev_complete(const struct device *dev, int status); #endif static void dma_callback(const struct device *dma_dev, void *user_data, uint32_t channel, int status) { ARG_UNUSED(dma_dev); ARG_UNUSED(channel); ARG_UNUSED(status); const struct device *dev = user_data; struct spi_sam_data *drv_data = dev->data; #ifdef CONFIG_SPI_RTIO if (drv_data->txn_head != NULL) { spi_sam_iodev_complete(dev, status); return; } #endif k_sem_give(&drv_data->dma_sem); } /* DMA transceive path */ static int spi_sam_dma_txrx(const struct device *dev, Spi *regs, const uint8_t *tx_buf, const uint8_t *rx_buf, const uint32_t len) { const struct spi_sam_config *drv_cfg = dev->config; struct spi_sam_data *drv_data = dev->data; #ifdef CONFIG_SPI_RTIO bool blocking = drv_data->txn_head == NULL; #else bool blocking = true; #endif int res = 0; __ASSERT_NO_MSG(rx_buf != NULL || tx_buf != NULL); struct dma_config rx_dma_cfg = { .source_data_size = 1, .dest_data_size = 1, .block_count = 1, .dma_slot = drv_cfg->dma_rx_perid, .channel_direction = PERIPHERAL_TO_MEMORY, .source_burst_length = 1, .dest_burst_length = 1, .complete_callback_en = true, .dma_callback = NULL, .user_data = (void *)dev, }; uint32_t dest_address, dest_addr_adjust; if (rx_buf != NULL) { dest_address = (uint32_t)rx_buf; dest_addr_adjust = DMA_ADDR_ADJ_INCREMENT; } else { dest_address = (uint32_t)&rx_dummy; dest_addr_adjust = DMA_ADDR_ADJ_NO_CHANGE; } struct dma_block_config rx_block_cfg = { .dest_addr_adj = dest_addr_adjust, .block_size = len, .source_address = (uint32_t)&regs->SPI_RDR, .dest_address = dest_address }; rx_dma_cfg.head_block = &rx_block_cfg; struct dma_config tx_dma_cfg = { .source_data_size = 1, .dest_data_size = 1, .block_count = 1, .dma_slot = drv_cfg->dma_tx_perid, .channel_direction = MEMORY_TO_PERIPHERAL, .source_burst_length = 1, .dest_burst_length = 1, .complete_callback_en = true, .dma_callback = dma_callback, .user_data = (void *)dev, }; uint32_t source_address, source_addr_adjust; if (tx_buf != NULL) { source_address = (uint32_t)tx_buf; source_addr_adjust = DMA_ADDR_ADJ_INCREMENT; } else { source_address = (uint32_t)&tx_dummy; source_addr_adjust = DMA_ADDR_ADJ_NO_CHANGE; } struct dma_block_config tx_block_cfg = { .source_addr_adj = source_addr_adjust, .block_size = len, .source_address = source_address, .dest_address = (uint32_t)&regs->SPI_TDR }; tx_dma_cfg.head_block = &tx_block_cfg; res = dma_config(drv_cfg->dma_dev, drv_cfg->dma_rx_channel, &rx_dma_cfg); if (res != 0) { LOG_ERR("failed to configure SPI DMA RX"); goto out; } res = dma_config(drv_cfg->dma_dev, drv_cfg->dma_tx_channel, &tx_dma_cfg); if (res != 0) { LOG_ERR("failed to configure SPI DMA TX"); goto out; } /* Clocking begins on tx, so start rx first */ res = dma_start(drv_cfg->dma_dev, drv_cfg->dma_rx_channel); if (res != 0) { LOG_ERR("failed to start SPI DMA RX"); goto out; } res = dma_start(drv_cfg->dma_dev, drv_cfg->dma_tx_channel); if (res != 0) { LOG_ERR("failed to start SPI DMA TX"); dma_stop(drv_cfg->dma_dev, drv_cfg->dma_rx_channel); } /* Move up a level or wrap in branch when blocking */ if (blocking) { k_sem_take(&drv_data->dma_sem, K_FOREVER); spi_sam_finish(regs); } else { res = -EWOULDBLOCK; } out: return res; } #endif /* CONFIG_SPI_SAM_DMA */ static inline int spi_sam_rx(const struct device *dev, Spi *regs, uint8_t *rx_buf, uint32_t rx_buf_len) { k_spinlock_key_t key; #ifdef CONFIG_SPI_SAM_DMA const struct spi_sam_config *cfg = dev->config; if (rx_buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) { key = spi_spin_lock(dev); spi_sam_fast_rx(regs, rx_buf, rx_buf_len); } else { return spi_sam_dma_txrx(dev, regs, NULL, rx_buf, rx_buf_len); } #else key = spi_spin_lock(dev); spi_sam_fast_rx(regs, rx_buf, rx_buf_len); #endif spi_sam_finish(regs); spi_spin_unlock(dev, key); return 0; } static inline int spi_sam_tx(const struct device *dev, Spi *regs, const uint8_t *tx_buf, uint32_t tx_buf_len) { k_spinlock_key_t key; #ifdef CONFIG_SPI_SAM_DMA const struct spi_sam_config *cfg = dev->config; if (tx_buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) { key = spi_spin_lock(dev); spi_sam_fast_tx(regs, tx_buf, tx_buf_len); } else { return spi_sam_dma_txrx(dev, regs, tx_buf, NULL, tx_buf_len); } #else key = spi_spin_lock(dev); spi_sam_fast_tx(regs, tx_buf, tx_buf_len); #endif spi_sam_finish(regs); spi_spin_unlock(dev, key); return 0; } static inline int spi_sam_txrx(const struct device *dev, Spi *regs, const uint8_t *tx_buf, const uint8_t *rx_buf, uint32_t buf_len) { k_spinlock_key_t key; #ifdef CONFIG_SPI_SAM_DMA const struct spi_sam_config *cfg = dev->config; if (buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) { key = spi_spin_lock(dev); spi_sam_fast_txrx(regs, tx_buf, rx_buf, buf_len); } else { return spi_sam_dma_txrx(dev, regs, tx_buf, rx_buf, buf_len); } #else key = spi_spin_lock(dev); spi_sam_fast_txrx(regs, tx_buf, rx_buf, buf_len); #endif spi_sam_finish(regs); spi_spin_unlock(dev, key); return 0; } #ifndef CONFIG_SPI_RTIO /* Fast path where every overlapping tx and rx buffer is the same length */ static void spi_sam_fast_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { const struct spi_sam_config *cfg = dev->config; size_t tx_count = 0; size_t rx_count = 0; Spi *regs = cfg->regs; const struct spi_buf *tx = NULL; const struct spi_buf *rx = NULL; if (tx_bufs) { tx = tx_bufs->buffers; tx_count = tx_bufs->count; } if (rx_bufs) { rx = rx_bufs->buffers; rx_count = rx_bufs->count; } while (tx_count != 0 && rx_count != 0) { if (tx->buf == NULL) { spi_sam_rx(dev, regs, rx->buf, rx->len); } else if (rx->buf == NULL) { spi_sam_tx(dev, regs, tx->buf, tx->len); } else if (rx->len == tx->len) { spi_sam_txrx(dev, regs, tx->buf, rx->buf, rx->len); } else { __ASSERT_NO_MSG("Invalid fast transceive configuration"); } tx++; tx_count--; rx++; rx_count--; } for (; tx_count != 0; tx_count--) { spi_sam_tx(dev, regs, tx->buf, tx->len); tx++; } for (; rx_count != 0; rx_count--) { spi_sam_rx(dev, regs, rx->buf, rx->len); rx++; } } static bool spi_sam_transfer_ongoing(struct spi_sam_data *data) { return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx); } static void spi_sam_shift_master(Spi *regs, struct spi_sam_data *data) { uint8_t tx; uint8_t rx; if (spi_context_tx_buf_on(&data->ctx)) { tx = *(uint8_t *)(data->ctx.tx_buf); } else { tx = 0U; } while ((regs->SPI_SR & SPI_SR_TDRE) == 0) { } regs->SPI_TDR = SPI_TDR_TD(tx); spi_context_update_tx(&data->ctx, 1, 1); while ((regs->SPI_SR & SPI_SR_RDRF) == 0) { } rx = (uint8_t)regs->SPI_RDR; if (spi_context_rx_buf_on(&data->ctx)) { *data->ctx.rx_buf = rx; } spi_context_update_rx(&data->ctx, 1, 1); } /* Returns true if the request is suitable for the fast * path. Specifically, the bufs are a sequence of: * * - Zero or more RX and TX buf pairs where each is the same length. * - Zero or more trailing RX only bufs * - Zero or more trailing TX only bufs */ static bool spi_sam_is_regular(const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { const struct spi_buf *tx = NULL; const struct spi_buf *rx = NULL; size_t tx_count = 0; size_t rx_count = 0; if (tx_bufs) { tx = tx_bufs->buffers; tx_count = tx_bufs->count; } if (rx_bufs) { rx = rx_bufs->buffers; rx_count = rx_bufs->count; } if (!tx || !rx) { return true; } while (tx_count != 0 && rx_count != 0) { if (tx->len != rx->len) { return false; } tx++; tx_count--; rx++; rx_count--; } return true; } #else static void spi_sam_iodev_complete(const struct device *dev, int status); static void spi_sam_iodev_next(const struct device *dev, bool completion); static void spi_sam_iodev_start(const struct device *dev) { const struct spi_sam_config *cfg = dev->config; struct spi_sam_data *data = dev->data; struct rtio_sqe *sqe = &data->txn_curr->sqe; int ret = 0; switch (sqe->op) { case RTIO_OP_RX: ret = spi_sam_rx(dev, cfg->regs, sqe->rx.buf, sqe->rx.buf_len); break; case RTIO_OP_TX: ret = spi_sam_tx(dev, cfg->regs, sqe->tx.buf, sqe->tx.buf_len); break; case RTIO_OP_TINY_TX: ret = spi_sam_tx(dev, cfg->regs, sqe->tiny_tx.buf, sqe->tiny_tx.buf_len); break; case RTIO_OP_TXRX: ret = spi_sam_txrx(dev, cfg->regs, sqe->txrx.tx_buf, sqe->txrx.rx_buf, sqe->txrx.buf_len); break; default: LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe); struct rtio_iodev_sqe *txn_head = data->txn_head; spi_sam_iodev_next(dev, true); rtio_iodev_sqe_err(txn_head, -EINVAL); ret = 0; } if (ret == 0) { spi_sam_iodev_complete(dev, 0); } } static void spi_sam_iodev_next(const struct device *dev, bool completion) { struct spi_sam_data *data = dev->data; k_spinlock_key_t key = spi_spin_lock(dev); if (!completion && data->txn_curr != NULL) { spi_spin_unlock(dev, key); return; } struct mpsc_node *next = mpsc_pop(&data->io_q); if (next != NULL) { struct rtio_iodev_sqe *next_sqe = CONTAINER_OF(next, struct rtio_iodev_sqe, q); data->txn_head = next_sqe; data->txn_curr = next_sqe; } else { data->txn_head = NULL; data->txn_curr = NULL; } spi_spin_unlock(dev, key); if (data->txn_curr != NULL) { struct spi_dt_spec *spi_dt_spec = data->txn_curr->sqe.iodev->data; struct spi_config *spi_cfg = &spi_dt_spec->config; spi_sam_configure(dev, spi_cfg); spi_context_cs_control(&data->ctx, true); spi_sam_iodev_start(dev); } } static void spi_sam_iodev_complete(const struct device *dev, int status) { struct spi_sam_data *data = dev->data; if (data->txn_curr->sqe.flags & RTIO_SQE_TRANSACTION) { data->txn_curr = rtio_txn_next(data->txn_curr); spi_sam_iodev_start(dev); } else { struct rtio_iodev_sqe *txn_head = data->txn_head; spi_context_cs_control(&data->ctx, false); spi_sam_iodev_next(dev, true); rtio_iodev_sqe_ok(txn_head, status); } } static void spi_sam_iodev_submit(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe) { struct spi_sam_data *data = dev->data; mpsc_push(&data->io_q, &iodev_sqe->q); spi_sam_iodev_next(dev, false); } #endif static int spi_sam_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { struct spi_sam_data *data = dev->data; int err = 0; spi_context_lock(&data->ctx, false, NULL, NULL, config); #if CONFIG_SPI_RTIO struct rtio_sqe *sqe; struct rtio_cqe *cqe; struct spi_dt_spec *dt_spec = &data->dt_spec; dt_spec->config = *config; int ret = spi_rtio_copy(data->r, &data->iodev, tx_bufs, rx_bufs, &sqe); if (ret < 0) { err = ret; goto done; } /* Submit request and wait */ rtio_submit(data->r, ret); while (ret > 0) { cqe = rtio_cqe_consume(data->r); if (cqe->result < 0) { err = cqe->result; } rtio_cqe_release(data->r, cqe); ret--; } #else const struct spi_sam_config *cfg = dev->config; err = spi_sam_configure(dev, config); if (err != 0) { goto done; } spi_context_cs_control(&data->ctx, true); if (spi_sam_is_regular(tx_bufs, rx_bufs)) { spi_sam_fast_transceive(dev, config, tx_bufs, rx_bufs); } else { spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); do { spi_sam_shift_master(cfg->regs, data); } while (spi_sam_transfer_ongoing(data)); } spi_context_cs_control(&data->ctx, false); #endif done: spi_context_release(&data->ctx, err); return err; } static int spi_sam_transceive_sync(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return spi_sam_transceive(dev, config, tx_bufs, rx_bufs); } #ifdef CONFIG_SPI_ASYNC static int spi_sam_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { /* TODO: implement async transceive */ return -ENOTSUP; } #endif /* CONFIG_SPI_ASYNC */ static int spi_sam_release(const struct device *dev, const struct spi_config *config) { struct spi_sam_data *data = dev->data; spi_context_unlock_unconditionally(&data->ctx); return 0; } static int spi_sam_init(const struct device *dev) { int err; const struct spi_sam_config *cfg = dev->config; struct spi_sam_data *data = dev->data; /* Enable SPI clock in PMC */ (void)clock_control_on(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&cfg->clock_cfg); err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } #ifdef CONFIG_SPI_SAM_DMA k_sem_init(&data->dma_sem, 0, K_SEM_MAX_LIMIT); #endif #ifdef CONFIG_SPI_RTIO data->dt_spec.bus = dev; data->iodev.api = &spi_iodev_api; data->iodev.data = &data->dt_spec; mpsc_init(&data->io_q); #endif spi_context_unlock_unconditionally(&data->ctx); /* The device will be configured and enabled when transceive * is called. */ return 0; } static const struct spi_driver_api spi_sam_driver_api = { .transceive = spi_sam_transceive_sync, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_sam_transceive_async, #endif #ifdef CONFIG_SPI_RTIO .iodev_submit = spi_sam_iodev_submit, #endif .release = spi_sam_release, }; #define SPI_DMA_INIT(n) \ .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \ .dma_tx_channel = DT_INST_DMAS_CELL_BY_NAME(n, tx, channel), \ .dma_tx_perid = DT_INST_DMAS_CELL_BY_NAME(n, tx, perid), \ .dma_rx_channel = DT_INST_DMAS_CELL_BY_NAME(n, rx, channel), \ .dma_rx_perid = DT_INST_DMAS_CELL_BY_NAME(n, rx, perid), #ifdef CONFIG_SPI_SAM_DMA #define SPI_SAM_USE_DMA(n) DT_INST_DMAS_HAS_NAME(n, tx) #else #define SPI_SAM_USE_DMA(n) 0 #endif #define SPI_SAM_DEFINE_CONFIG(n) \ static const struct spi_sam_config spi_sam_config_##n = { \ .regs = (Spi *)DT_INST_REG_ADDR(n), \ .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(n), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .loopback = DT_INST_PROP(n, loopback), \ COND_CODE_1(SPI_SAM_USE_DMA(n), (SPI_DMA_INIT(n)), ()) \ } #define SPI_SAM_RTIO_DEFINE(n) RTIO_DEFINE(spi_sam_rtio_##n, CONFIG_SPI_SAM_RTIO_SQ_SIZE, \ CONFIG_SPI_SAM_RTIO_SQ_SIZE) #define SPI_SAM_DEVICE_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ SPI_SAM_DEFINE_CONFIG(n); \ COND_CODE_1(CONFIG_SPI_RTIO, (SPI_SAM_RTIO_DEFINE(n)), ()); \ static struct spi_sam_data spi_sam_dev_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_sam_dev_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_sam_dev_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ IF_ENABLED(CONFIG_SPI_RTIO, (.r = &spi_sam_rtio_##n)) \ }; \ DEVICE_DT_INST_DEFINE(n, &spi_sam_init, NULL, \ &spi_sam_dev_data_##n, \ &spi_sam_config_##n, POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, &spi_sam_driver_api); DT_INST_FOREACH_STATUS_OKAY(SPI_SAM_DEVICE_INIT) ```
/content/code_sandbox/drivers/spi/spi_sam.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,912
```unknown # Xilinx SPI config SPI_XLNX_AXI_QUADSPI bool "Xilinx AXI Quad SPI driver" default y depends on DT_HAS_XLNX_XPS_SPI_2_00_A_ENABLED select EVENTS help Enable Xilinx AXI Quad SPI v3.2 driver. ```
/content/code_sandbox/drivers/spi/Kconfig.xlnx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
67
```objective-c /* * */ #include "spi_context.h" #define SPI_OC_SIMPLE_DATA(dev) \ ((struct spi_oc_simple_data *) ((dev)->data)) #define SPI_OC_SIMPLE_REG(info, offset) \ ((mem_addr_t) (info->base + \ (offset * CONFIG_SPI_OC_SIMPLE_BUS_WIDTH / 8))) #define SPI_OC_SIMPLE_SPCR(dev) SPI_OC_SIMPLE_REG(dev, 0x0) #define SPI_OC_SIMPLE_SPSR(dev) SPI_OC_SIMPLE_REG(dev, 0x1) #define SPI_OC_SIMPLE_SPDR(dev) SPI_OC_SIMPLE_REG(dev, 0x2) #define SPI_OC_SIMPLE_SPER(dev) SPI_OC_SIMPLE_REG(dev, 0x3) #define SPI_OC_SIMPLE_SPSS(dev) SPI_OC_SIMPLE_REG(dev, 0x4) #define SPI_OC_SIMPLE_SPCR_SPE BIT(6) #define SPI_OC_SIMPLE_SPCR_CPOL BIT(3) #define SPI_OC_SIMPLE_SPCR_CPHA BIT(2) struct spi_oc_simple_cfg { uint32_t base; uint32_t f_sys; }; struct spi_oc_simple_data { struct spi_context ctx; }; ```
/content/code_sandbox/drivers/spi/spi_oc_simple.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
229
```c /* * * * This driver creates fake SPI buses which can contain emulated devices, * implemented by a separate emulation driver. The API between this driver and * its emulators is defined by struct spi_emul_driver_api. */ #define DT_DRV_COMPAT zephyr_spi_emul_controller #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_emul_ctlr); #include <zephyr/device.h> #include <zephyr/drivers/emul.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/spi_emul.h> /** Working data for the device */ struct spi_emul_data { /* List of struct spi_emul associated with the device */ sys_slist_t emuls; /* SPI host configuration */ uint32_t config; }; uint32_t spi_emul_get_config(const struct device *dev) { struct spi_emul_data *data = dev->data; return data->config; } /** * Find an emulator for a SPI bus * * At present only a single emulator is supported on the bus, since we do not * support chip selects, despite there being a chipsel field. It cannot be * implemented until we have a GPIO emulator. * * @param dev SPI emulation controller device * @param chipsel Chip-select value * @return emulator to use * @return NULL if not found */ static struct spi_emul *spi_emul_find(const struct device *dev, unsigned int chipsel) { struct spi_emul_data *data = dev->data; sys_snode_t *node; SYS_SLIST_FOR_EACH_NODE(&data->emuls, node) { struct spi_emul *emul; emul = CONTAINER_OF(node, struct spi_emul, node); if (emul->chipsel == chipsel) { return emul; } } return NULL; } static int spi_emul_io(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { struct spi_emul *emul; const struct spi_emul_api *api; int ret; emul = spi_emul_find(dev, config->slave); if (!emul) { return -EIO; } api = emul->api; __ASSERT_NO_MSG(emul->api); __ASSERT_NO_MSG(emul->api->io); if (emul->mock_api != NULL && emul->mock_api->io != NULL) { ret = emul->mock_api->io(emul->target, config, tx_bufs, rx_bufs); if (ret != -ENOSYS) { return ret; } } return api->io(emul->target, config, tx_bufs, rx_bufs); } /** * @brief This is a no-op stub of the SPI API's `release` method to protect drivers under test * from hitting a segmentation fault when using SPI_LOCK_ON plus spi_release() */ static int spi_emul_release(const struct device *dev, const struct spi_config *config) { ARG_UNUSED(dev); ARG_UNUSED(config); return 0; } /** * Set up a new emulator and add it to the list * * @param dev SPI emulation controller device */ static int spi_emul_init(const struct device *dev) { struct spi_emul_data *data = dev->data; sys_slist_init(&data->emuls); return emul_init_for_bus(dev); } int spi_emul_register(const struct device *dev, struct spi_emul *emul) { struct spi_emul_data *data = dev->data; const char *name = emul->target->dev->name; sys_slist_append(&data->emuls, &emul->node); LOG_INF("Register emulator '%s' at cs %u\n", name, emul->chipsel); return 0; } /* Device instantiation */ static const struct spi_driver_api spi_emul_api = { .transceive = spi_emul_io, .release = spi_emul_release, }; #define EMUL_LINK_AND_COMMA(node_id) \ { \ .dev = DEVICE_DT_GET(node_id), \ }, #define SPI_EMUL_INIT(n) \ static const struct emul_link_for_bus emuls_##n[] = { \ DT_FOREACH_CHILD_STATUS_OKAY(DT_DRV_INST(n), EMUL_LINK_AND_COMMA)}; \ static struct emul_list_for_bus spi_emul_cfg_##n = { \ .children = emuls_##n, \ .num_children = ARRAY_SIZE(emuls_##n), \ }; \ static struct spi_emul_data spi_emul_data_##n; \ DEVICE_DT_INST_DEFINE(n, spi_emul_init, NULL, &spi_emul_data_##n, &spi_emul_cfg_##n, \ POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, &spi_emul_api); DT_INST_FOREACH_STATUS_OKAY(SPI_EMUL_INIT) ```
/content/code_sandbox/drivers/spi/spi_emul.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,081
```unknown menuconfig SPI_PW bool "Penwell SPI driver" default y depends on DT_HAS_INTEL_PENWELL_SPI_ENABLED help Enable the Penwell SPI driver. if SPI_PW config SPI_PW_INTERRUPT bool "Penwell SPI Interrupt mode Support" help Enable Interrupt support for the SPI Driver. endif # SPI_PW ```
/content/code_sandbox/drivers/spi/Kconfig.pw
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
75
```c /* * */ #define DT_DRV_COMPAT microchip_mpfs_qspi #include <zephyr/device.h> #include <zephyr/drivers/spi.h> #include <zephyr/sys/sys_io.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(mss_qspi, CONFIG_SPI_LOG_LEVEL); #include "spi_context.h" /*MSS QSPI Register offsets */ #define MSS_QSPI_REG_CONTROL (0x00) #define MSS_QSPI_REG_FRAMES (0x04) #define MSS_QSPI_REG_IEN (0x0c) #define MSS_QSPI_REG_STATUS (0x10) #define MSS_QSPI_REG_DIRECT_ACCESS (0x14) #define MSS_QSPI_REG_UPPER_ACCESS (0x18) #define MSS_QSPI_REG_RX_DATA (0x40) #define MSS_QSPI_REG_TX_DATA (0x44) #define MSS_QSPI_REG_X4_RX_DATA (0x48) #define MSS_QSPI_REG_X4_TX_DATA (0x4c) #define MSS_QSPI_REG_FRAMESUP (0x50) /* QSPICR bit definitions */ #define MSS_QSPI_CONTROL_ENABLE BIT(0) #define MSS_QSPI_CONTROL_MASTER BIT(1) #define MSS_QSPI_CONTROL_XIP BIT(2) #define MSS_QSPI_CONTROL_XIPADDR BIT(3) #define MSS_QSPI_CONTROL_CLKIDLE BIT(10) #define MSS_QSPI_CONTROL_SAMPLE_MSK (3 << 11) #define MSS_QSPI_CONTROL_MODE0 BIT(13) #define MSS_QSPI_CONTROL_MODE_EXQUAD (0x6 << 13) #define MSS_QSPI_CONTROL_MODE_EXDUAL (0x2 << 13) #define MSS_QSPI_CONTROL_MODE12_MSK (3 << 14) #define MSS_QSPI_CONTROL_FLAGSX4 BIT(16) #define MSS_QSPI_CONTROL_CLKRATE_MSK (0xf << 24) #define MSS_QSPI_CONTROL_CLKRATE 24 /* QSPIFRAMES bit definitions */ #define MSS_QSPI_FRAMES_TOTALBYTES_MSK (0xffff << 0) #define MSS_QSPI_FRAMES_TOTALBYTES_MSK (0xffff << 0) #define MSS_QSPI_FRAMES_CMDBYTES_MSK (0x1ff << 16) #define MSS_QSPI_FRAMES_CMDBYTES 16 #define MSS_QSPI_FRAMES_QSPI BIT(25) #define MSS_QSPI_FRAMES_IDLE_MSK (0xf << 26) #define MSS_QSPI_FRAMES_FLAGBYTE BIT(30) #define MSS_QSPI_FRAMES_FLAGWORD BIT(31) /* QSPIIEN bit definitions */ #define MSS_QSPI_IEN_TXDONE BIT(0) #define MSS_QSPI_IEN_RXDONE BIT(1) #define MSS_QSPI_IEN_RXAVAILABLE BIT(2) #define MSS_QSPI_IEN_TXAVAILABLE BIT(3) #define MSS_QSPI_IEN_RXFIFOEMPTY BIT(4) #define MSS_QSPI_IEN_TXFIFOFULL BIT(5) #define MSS_QSPI_IEN_FLAGSX4 BIT(8) /* QSPIST bit definitions */ #define MSS_QSPI_STATUS_TXDONE BIT(0) #define MSS_QSPI_STATUS_RXDONE BIT(1) #define MSS_QSPI_STATUS_RXAVAILABLE BIT(2) #define MSS_QSPI_STATUS_TXAVAILABLE BIT(3) #define MSS_QSPI_STATUS_RXFIFOEMPTY BIT(4) #define MSS_QSPI_STATUS_TXFIFOFULL BIT(5) #define MSS_QSPI_STATUS_READY BIT(7) #define MSS_QSPI_STATUS_FLAGSX4 BIT(8) /* QSPIDA bit definitions */ #define MSS_QSPI_DA_EN_SSEL BIT(0) #define MSS_QSPI_DA_OP_SSEL BIT(1) #define MSS_QSPI_DA_EN_SCLK BIT(2) #define MSS_QSPI_DA_OP_SCLK BIT(3) #define MSS_QSPI_DA_EN_SDO_MSK (0xf << 4) #define MSS_QSPI_DA_OP_SDO_MSK (0xf << 8) #define MSS_QSPI_DA_OP_SDATA_MSK (0xf << 12) #define MSS_QSPI_DA_IP_SDI_MSK (0xf << 16) #define MSS_QSPI_DA_IP_SCLK BIT(21) #define MSS_QSPI_DA_IP_SSEL BIT(22) #define MSS_QSPI_DA_IDLE BIT(23) #define MSS_QSPI_RXDATA_MSK (0xff << 0) #define MSS_QSPI_TXDATA_MSK (0xff << 0) /* QSPIFRAMESUP bit definitions */ #define MSS_QSPI_FRAMESUP_UP_BYTES_MSK (0xFFFF << 16) #define MSS_QSPI_FRAMESUP_LO_BYTES_MSK (0xFFFF << 0) /* * Private data structure for an SPI slave */ struct mss_qspi_config { mm_reg_t base; void (*irq_config_func)(const struct device *dev); int irq; uint32_t clock_freq; }; /* Device run time data */ struct mss_qspi_data { struct spi_context ctx; }; static inline uint32_t mss_qspi_read(const struct mss_qspi_config *cfg, mm_reg_t offset) { return sys_read32(cfg->base + offset); } static inline void mss_qspi_write(const struct mss_qspi_config *cfg, uint32_t val, mm_reg_t offset) { sys_write32(val, cfg->base + offset); } static void mss_qspi_enable_ints(const struct mss_qspi_config *s) { uint32_t mask = MSS_QSPI_IEN_TXDONE | MSS_QSPI_IEN_RXDONE | MSS_QSPI_IEN_RXAVAILABLE; mss_qspi_write(s, mask, MSS_QSPI_REG_IEN); } static void mss_qspi_disable_ints(const struct mss_qspi_config *s) { uint32_t mask = 0; mss_qspi_write(s, mask, MSS_QSPI_REG_IEN); } static inline void mss_qspi_transmit_x8(const struct device *dev, uint32_t len) { const struct mss_qspi_config *s = dev->config; struct mss_qspi_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t count, skips; skips = mss_qspi_read(s, MSS_QSPI_REG_CONTROL); skips &= ~MSS_QSPI_CONTROL_FLAGSX4; mss_qspi_write(s, skips, MSS_QSPI_REG_CONTROL); for (count = 0; count < len; ++count) { while (mss_qspi_read(s, MSS_QSPI_REG_STATUS) & MSS_QSPI_STATUS_TXFIFOFULL) { ; } if (spi_context_tx_buf_on(ctx)) { mss_qspi_write(s, ctx->tx_buf[0], MSS_QSPI_REG_TX_DATA); spi_context_update_tx(ctx, 1, 1); } } } static inline void mss_qspi_transmit_x32(const struct device *dev, uint32_t len) { const struct mss_qspi_config *s = dev->config; struct mss_qspi_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t count, ctrl, wdata; ctrl = mss_qspi_read(s, MSS_QSPI_REG_CONTROL); ctrl |= MSS_QSPI_CONTROL_FLAGSX4; mss_qspi_write(s, ctrl, MSS_QSPI_REG_CONTROL); for (count = 0; count < len / 4; ++count) { while (mss_qspi_read(s, MSS_QSPI_REG_STATUS) & MSS_QSPI_STATUS_TXFIFOFULL) { ; } if (spi_context_tx_buf_on(ctx)) { wdata = UNALIGNED_GET((uint32_t *)(ctx->tx_buf)); mss_qspi_write(s, wdata, MSS_QSPI_REG_X4_TX_DATA); spi_context_update_tx(ctx, 1, 4); } } } static inline void mss_qspi_receive_x32(const struct device *dev, uint32_t len) { const struct mss_qspi_config *s = dev->config; struct mss_qspi_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t count, ctrl, temp; ctrl = mss_qspi_read(s, MSS_QSPI_REG_CONTROL); ctrl |= MSS_QSPI_CONTROL_FLAGSX4; mss_qspi_write(s, ctrl, MSS_QSPI_REG_CONTROL); for (count = 0; count < len / 4; ++count) { while ((mss_qspi_read(s, MSS_QSPI_REG_STATUS) & MSS_QSPI_STATUS_RXFIFOEMPTY)) { ; } if (spi_context_rx_buf_on(ctx)) { temp = mss_qspi_read(s, MSS_QSPI_REG_X4_RX_DATA); UNALIGNED_PUT(temp, (uint32_t *)ctx->rx_buf); spi_context_update_rx(ctx, 1, 4); } } } static inline void mss_qspi_receive_x8(const struct device *dev, uint32_t len) { const struct mss_qspi_config *s = dev->config; struct mss_qspi_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t rdata, count; rdata = mss_qspi_read(s, MSS_QSPI_REG_CONTROL); rdata &= ~MSS_QSPI_CONTROL_FLAGSX4; mss_qspi_write(s, rdata, MSS_QSPI_REG_CONTROL); for (count = 0; count < len; ++count) { while (mss_qspi_read(s, MSS_QSPI_REG_STATUS) & MSS_QSPI_STATUS_RXFIFOEMPTY) { ; } if (spi_context_rx_buf_on(ctx)) { rdata = mss_qspi_read(s, MSS_QSPI_REG_RX_DATA); UNALIGNED_PUT(rdata, (uint8_t *)ctx->rx_buf); spi_context_update_rx(ctx, 1, 1); } } } static inline void mss_qspi_config_frames(const struct device *dev, uint32_t total_bytes, uint32_t cmd_bytes, bool x8) { const struct mss_qspi_config *s = dev->config; uint32_t skips; mss_qspi_write(s, (total_bytes & MSS_QSPI_FRAMESUP_UP_BYTES_MSK), MSS_QSPI_REG_FRAMESUP); skips = (total_bytes & MSS_QSPI_FRAMESUP_LO_BYTES_MSK); if (cmd_bytes) { skips |= ((cmd_bytes << MSS_QSPI_FRAMES_CMDBYTES) & MSS_QSPI_FRAMES_CMDBYTES_MSK); } else { skips |= ((total_bytes << MSS_QSPI_FRAMES_CMDBYTES) & MSS_QSPI_FRAMES_CMDBYTES_MSK); } if (mss_qspi_read(s, MSS_QSPI_REG_CONTROL) & MSS_QSPI_CONTROL_MODE0) { skips |= MSS_QSPI_FRAMES_QSPI; } skips &= ~MSS_QSPI_FRAMES_IDLE_MSK; if (x8) { skips |= MSS_QSPI_FRAMES_FLAGBYTE; } else { skips |= MSS_QSPI_FRAMES_FLAGWORD; } mss_qspi_write(s, skips, MSS_QSPI_REG_FRAMES); } static inline void mss_qspi_transmit(const struct device *dev) { const struct mss_qspi_config *s = dev->config; struct mss_qspi_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t total_byte_cnt, cmd_bytes; cmd_bytes = spi_context_longest_current_buf(ctx); total_byte_cnt = spi_context_total_tx_len(ctx); /* * As per the MSS QSPI IP spec, * The number of command and data bytes are controlled by the frames register * for each SPI sequence. This supports the SPI flash memory read and writes * sequences as below. so configure the cmd and total bytes accordingly. * your_sha256_hash----- * TOTAL BYTES | CMD BYTES | What happens | * your_sha256_hash______ * | | | * 1 | 1 | The SPI core will transmit a single byte | * | | and receive data is discarded | * | | | * 1 | 0 | The SPI core will transmit a single byte | * | | and return a single byte | * | | | * 10 | 4 | The SPI core will transmit 4 command | * | | bytes discarding the receive data and | * | | transmits 6 dummy bytes returning the 6 | * | | received bytes and return a single byte | * | | | * 10 | 10 | The SPI core will transmit 10 command | * | | | * 10 | 0 | The SPI core will transmit 10 command | * | | bytes and returning 10 received bytes | * your_sha256_hash______ */ if (!ctx->rx_buf) { if (total_byte_cnt - cmd_bytes) { mss_qspi_config_frames(dev, total_byte_cnt, 0, false); mss_qspi_transmit_x8(dev, cmd_bytes); mss_qspi_transmit_x32(dev, (total_byte_cnt - cmd_bytes)); } else { mss_qspi_config_frames(dev, total_byte_cnt, cmd_bytes, true); mss_qspi_transmit_x8(dev, cmd_bytes); } } else { mss_qspi_config_frames(dev, total_byte_cnt, cmd_bytes, true); mss_qspi_transmit_x8(dev, cmd_bytes); } mss_qspi_enable_ints(s); } static inline void mss_qspi_receive(const struct device *dev) { const struct mss_qspi_config *s = dev->config; struct mss_qspi_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t rd_bytes, skips, idx, rdata; /* * Point the rx buffer where the actual read data * will be stored */ spi_context_update_rx(ctx, 1, ctx->rx_len); rd_bytes = spi_context_longest_current_buf(ctx); if (rd_bytes) { if (rd_bytes >= 4) { mss_qspi_receive_x32(dev, rd_bytes); } skips = mss_qspi_read(s, MSS_QSPI_REG_CONTROL); skips &= ~MSS_QSPI_CONTROL_FLAGSX4; mss_qspi_write(s, skips, MSS_QSPI_REG_CONTROL); idx = (rd_bytes - (rd_bytes % 4u)); for (; idx < rd_bytes; ++idx) { while (mss_qspi_read(s, MSS_QSPI_REG_STATUS) & MSS_QSPI_STATUS_RXFIFOEMPTY) { ; } if (spi_context_rx_buf_on(ctx)) { rdata = mss_qspi_read(s, MSS_QSPI_REG_RX_DATA); UNALIGNED_PUT(rdata, (uint8_t *)ctx->rx_buf); spi_context_update_rx(ctx, 1, 1); } } } } static inline int mss_qspi_clk_gen_set(const struct mss_qspi_config *s, const struct spi_config *spi_cfg) { uint32_t control = mss_qspi_read(s, MSS_QSPI_REG_CONTROL); uint32_t idx, clkrate, val = 0, speed; if (spi_cfg->frequency > s->clock_freq) { speed = s->clock_freq / 2; } for (idx = 1; idx < 16; idx++) { clkrate = s->clock_freq / (2 * idx); if (clkrate <= spi_cfg->frequency) { val = idx; break; } } if (val) { control = mss_qspi_read(s, MSS_QSPI_REG_CONTROL); control &= ~MSS_QSPI_CONTROL_CLKRATE_MSK; control |= (val << MSS_QSPI_CONTROL_CLKRATE); mss_qspi_write(s, control, MSS_QSPI_REG_CONTROL); } else { return -1; } return 0; } static inline int mss_qspi_hw_mode_set(const struct mss_qspi_config *s, uint16_t mode) { uint32_t ctrl = mss_qspi_read(s, MSS_QSPI_REG_CONTROL); if ((mode & SPI_MODE_CPHA) && (mode & SPI_MODE_CPOL)) { /* mode 3 */ ctrl |= MSS_QSPI_CONTROL_CLKIDLE; } else if (!(mode & SPI_MODE_CPHA) && !(mode & SPI_MODE_CPOL)) { /* mode 0 */ ctrl &= ~MSS_QSPI_CONTROL_CLKIDLE; } else { return -1; } if ((mode & SPI_LINES_QUAD)) { /* Quad mode */ ctrl &= ~(MSS_QSPI_CONTROL_MODE0); ctrl |= (MSS_QSPI_CONTROL_MODE_EXQUAD); } else if ((mode & SPI_LINES_DUAL)) { /* Dual mode */ ctrl &= ~(MSS_QSPI_CONTROL_MODE0); ctrl |= (MSS_QSPI_CONTROL_MODE_EXDUAL); } else { /* Normal mode */ ctrl &= ~(MSS_QSPI_CONTROL_MODE0); } mss_qspi_write(s, ctrl, MSS_QSPI_REG_CONTROL); return 0; } static int mss_qspi_release(const struct device *dev, const struct spi_config *config) { struct mss_qspi_data *data = dev->data; const struct mss_qspi_config *cfg = dev->config; uint32_t control = mss_qspi_read(cfg, MSS_QSPI_REG_CONTROL); mss_qspi_disable_ints(cfg); control &= ~MSS_QSPI_CONTROL_ENABLE; mss_qspi_write(cfg, control, MSS_QSPI_REG_CONTROL); spi_context_unlock_unconditionally(&data->ctx); return 0; } static void mss_qspi_interrupt(const struct device *dev) { const struct mss_qspi_config *cfg = dev->config; struct mss_qspi_data *data = dev->data; struct spi_context *ctx = &data->ctx; int intfield = mss_qspi_read(cfg, MSS_QSPI_REG_STATUS); int ienfield = mss_qspi_read(cfg, MSS_QSPI_REG_IEN); if ((intfield & ienfield) == 0) { return; } if (intfield & MSS_QSPI_IEN_TXDONE) { mss_qspi_write(cfg, MSS_QSPI_IEN_TXDONE, MSS_QSPI_REG_STATUS); } if (intfield & MSS_QSPI_IEN_RXAVAILABLE) { mss_qspi_write(cfg, MSS_QSPI_IEN_RXAVAILABLE, MSS_QSPI_REG_STATUS); mss_qspi_receive(dev); } if ((intfield & MSS_QSPI_IEN_RXDONE)) { mss_qspi_write(cfg, MSS_QSPI_IEN_RXDONE, MSS_QSPI_REG_STATUS); spi_context_complete(ctx, dev, 0); } if (intfield & MSS_QSPI_IEN_TXAVAILABLE) { mss_qspi_write(cfg, MSS_QSPI_IEN_TXAVAILABLE, MSS_QSPI_REG_STATUS); } if (intfield & MSS_QSPI_IEN_RXFIFOEMPTY) { mss_qspi_write(cfg, MSS_QSPI_IEN_RXFIFOEMPTY, MSS_QSPI_REG_STATUS); } if (intfield & MSS_QSPI_IEN_TXFIFOFULL) { mss_qspi_write(cfg, MSS_QSPI_IEN_TXFIFOFULL, MSS_QSPI_REG_STATUS); } } static int mss_qspi_configure(const struct device *dev, const struct spi_config *spi_cfg) { const struct mss_qspi_config *cfg = dev->config; if (spi_cfg->operation & SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode is not supported\n\r"); return -ENOTSUP; } if (spi_cfg->operation & SPI_MODE_LOOP) { LOG_ERR("Loop back mode is not supported\n\r"); return -ENOTSUP; } if (spi_cfg->operation & (SPI_TRANSFER_LSB) || ((IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) && (spi_cfg->operation & (SPI_LINES_DUAL | SPI_LINES_QUAD | SPI_LINES_OCTAL))))) { LOG_ERR("Unsupported configuration\n\r"); return -ENOTSUP; } if (mss_qspi_clk_gen_set(cfg, spi_cfg)) { LOG_ERR("can't set clk divider\n"); return -EINVAL; } return 0; } static int mss_qspi_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool async, spi_callback_t cb, void *userdata) { const struct mss_qspi_config *config = dev->config; struct mss_qspi_data *data = dev->data; struct spi_context *ctx = &data->ctx; int ret = 0; spi_context_lock(ctx, async, cb, userdata, spi_cfg); ret = mss_qspi_configure(dev, spi_cfg); if (ret) { goto out; } mss_qspi_hw_mode_set(config, spi_cfg->operation); spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1); mss_qspi_transmit(dev); ret = spi_context_wait_for_completion(ctx); out: spi_context_release(ctx, ret); mss_qspi_disable_ints(config); return ret; } static int mss_qspi_transceive_blocking(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return mss_qspi_transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int mss_qspi_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return mss_qspi_transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int mss_qspi_init(const struct device *dev) { const struct mss_qspi_config *cfg = dev->config; struct mss_qspi_data *data = dev->data; unsigned int ret = 0; uint32_t control = 0; cfg->irq_config_func(dev); control &= ~(MSS_QSPI_CONTROL_SAMPLE_MSK); control &= ~(MSS_QSPI_CONTROL_MODE0); control |= (MSS_QSPI_CONTROL_CLKRATE_MSK); control &= ~(MSS_QSPI_CONTROL_XIP); control |= (MSS_QSPI_CONTROL_CLKIDLE | MSS_QSPI_CONTROL_ENABLE); mss_qspi_write(cfg, control, MSS_QSPI_REG_CONTROL); mss_qspi_disable_ints(cfg); spi_context_unlock_unconditionally(&data->ctx); return ret; } static const struct spi_driver_api mss_qspi_driver_api = { .transceive = mss_qspi_transceive_blocking, #ifdef CONFIG_SPI_ASYNC .transceive_async = mss_qspi_transceive_async, #endif /* CONFIG_SPI_ASYNC */ .release = mss_qspi_release, }; #define MSS_QSPI_INIT(n) \ static void mss_qspi_config_func_##n(const struct device *dev); \ \ static const struct mss_qspi_config mss_qspi_config_##n = { \ .base = DT_INST_REG_ADDR(n), \ .irq_config_func = mss_qspi_config_func_##n, \ .clock_freq = DT_INST_PROP(n, clock_frequency), \ }; \ \ static struct mss_qspi_data mss_qspi_data_##n = { \ SPI_CONTEXT_INIT_LOCK(mss_qspi_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(mss_qspi_data_##n, ctx), \ }; \ \ DEVICE_DT_INST_DEFINE(n, mss_qspi_init, \ NULL, \ &mss_qspi_data_##n, \ &mss_qspi_config_##n, POST_KERNEL, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &mss_qspi_driver_api); \ \ static void mss_qspi_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ mss_qspi_interrupt, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } DT_INST_FOREACH_STATUS_OKAY(MSS_QSPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_mchp_mss_qspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,506
```c /* * */ #define DT_DRV_COMPAT snps_designware_spi /* spi_dw.c - Designware SPI driver implementation */ #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_dw); #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/pm/device.h> #include <zephyr/sys/sys_io.h> #include <zephyr/sys/util.h> #ifdef CONFIG_IOAPIC #include <zephyr/drivers/interrupt_controller/ioapic.h> #endif #include <zephyr/drivers/spi.h> #include <zephyr/irq.h> #include "spi_dw.h" #include "spi_context.h" #ifdef CONFIG_PINCTRL #include <zephyr/drivers/pinctrl.h> #endif static inline bool spi_dw_is_slave(struct spi_dw_data *spi) { return (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(&spi->ctx)); } static void completed(const struct device *dev, int error) { struct spi_dw_data *spi = dev->data; struct spi_context *ctx = &spi->ctx; if (error) { goto out; } if (spi_context_tx_on(&spi->ctx) || spi_context_rx_on(&spi->ctx)) { return; } out: /* need to give time for FIFOs to drain before issuing more commands */ while (test_bit_sr_busy(dev)) { } /* Disabling interrupts */ write_imr(dev, DW_SPI_IMR_MASK); /* Disabling the controller */ clear_bit_ssienr(dev); if (!spi_dw_is_slave(spi)) { if (spi_cs_is_gpio(ctx->config)) { spi_context_cs_control(ctx, false); } else { write_ser(dev, 0); } } LOG_DBG("SPI transaction completed %s error", error ? "with" : "without"); spi_context_complete(&spi->ctx, dev, error); } static void push_data(const struct device *dev) { const struct spi_dw_config *info = dev->config; struct spi_dw_data *spi = dev->data; uint32_t data = 0U; uint32_t f_tx; if (spi_context_rx_on(&spi->ctx)) { f_tx = info->fifo_depth - read_txflr(dev) - read_rxflr(dev); if ((int)f_tx < 0) { f_tx = 0U; /* if rx-fifo is full, hold off tx */ } } else { f_tx = info->fifo_depth - read_txflr(dev); } while (f_tx) { if (spi_context_tx_buf_on(&spi->ctx)) { switch (spi->dfs) { case 1: data = UNALIGNED_GET((uint8_t *) (spi->ctx.tx_buf)); break; case 2: data = UNALIGNED_GET((uint16_t *) (spi->ctx.tx_buf)); break; case 4: data = UNALIGNED_GET((uint32_t *) (spi->ctx.tx_buf)); break; } } else if (spi_context_rx_on(&spi->ctx)) { /* No need to push more than necessary */ if ((int)(spi->ctx.rx_len - spi->fifo_diff) <= 0) { break; } data = 0U; } else if (spi_context_tx_on(&spi->ctx)) { data = 0U; } else { /* Nothing to push anymore */ break; } write_dr(dev, data); spi_context_update_tx(&spi->ctx, spi->dfs, 1); spi->fifo_diff++; f_tx--; } if (!spi_context_tx_on(&spi->ctx)) { /* prevents any further interrupts demanding TX fifo fill */ write_txftlr(dev, 0); } } static void pull_data(const struct device *dev) { const struct spi_dw_config *info = dev->config; struct spi_dw_data *spi = dev->data; while (read_rxflr(dev)) { uint32_t data = read_dr(dev); if (spi_context_rx_buf_on(&spi->ctx)) { switch (spi->dfs) { case 1: UNALIGNED_PUT(data, (uint8_t *)spi->ctx.rx_buf); break; case 2: UNALIGNED_PUT(data, (uint16_t *)spi->ctx.rx_buf); break; case 4: UNALIGNED_PUT(data, (uint32_t *)spi->ctx.rx_buf); break; } } spi_context_update_rx(&spi->ctx, spi->dfs, 1); spi->fifo_diff--; } if (!spi->ctx.rx_len && spi->ctx.tx_len < info->fifo_depth) { write_rxftlr(dev, spi->ctx.tx_len - 1); } else if (read_rxftlr(dev) >= spi->ctx.rx_len) { write_rxftlr(dev, spi->ctx.rx_len - 1); } } static int spi_dw_configure(const struct device *dev, struct spi_dw_data *spi, const struct spi_config *config) { const struct spi_dw_config *info = dev->config; uint32_t ctrlr0 = 0U; LOG_DBG("%p (prev %p)", config, spi->ctx.config); if (spi_context_configured(&spi->ctx, config)) { /* Nothing to do */ return 0; } if (config->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } /* Verify if requested op mode is relevant to this controller */ if (config->operation & SPI_OP_MODE_SLAVE) { if (!(info->serial_target)) { LOG_ERR("Slave mode not supported"); return -ENOTSUP; } } else { if (info->serial_target) { LOG_ERR("Master mode not supported"); return -ENOTSUP; } } if ((config->operation & SPI_TRANSFER_LSB) || (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) && (config->operation & (SPI_LINES_DUAL | SPI_LINES_QUAD | SPI_LINES_OCTAL)))) { LOG_ERR("Unsupported configuration"); return -EINVAL; } if (info->max_xfer_size < SPI_WORD_SIZE_GET(config->operation)) { LOG_ERR("Max xfer size is %u, word size of %u not allowed", info->max_xfer_size, SPI_WORD_SIZE_GET(config->operation)); return -ENOTSUP; } /* Word size */ if (info->max_xfer_size == 32) { ctrlr0 |= DW_SPI_CTRLR0_DFS_32(SPI_WORD_SIZE_GET(config->operation)); } else { ctrlr0 |= DW_SPI_CTRLR0_DFS_16(SPI_WORD_SIZE_GET(config->operation)); } /* Determine how many bytes are required per-frame */ spi->dfs = SPI_WS_TO_DFS(SPI_WORD_SIZE_GET(config->operation)); /* SPI mode */ if (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) { ctrlr0 |= DW_SPI_CTRLR0_SCPOL; } if (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) { ctrlr0 |= DW_SPI_CTRLR0_SCPH; } if (SPI_MODE_GET(config->operation) & SPI_MODE_LOOP) { ctrlr0 |= DW_SPI_CTRLR0_SRL; } /* Installing the configuration */ write_ctrlr0(dev, ctrlr0); /* At this point, it's mandatory to set this on the context! */ spi->ctx.config = config; if (!spi_dw_is_slave(spi)) { /* Baud rate and Slave select, for master only */ write_baudr(dev, SPI_DW_CLK_DIVIDER(info->clock_frequency, config->frequency)); } if (spi_dw_is_slave(spi)) { LOG_DBG("Installed slave config %p:" " ws/dfs %u/%u, mode %u/%u/%u", config, SPI_WORD_SIZE_GET(config->operation), spi->dfs, (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) ? 1 : 0, (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) ? 1 : 0, (SPI_MODE_GET(config->operation) & SPI_MODE_LOOP) ? 1 : 0); } else { LOG_DBG("Installed master config %p: freq %uHz (div = %u)," " ws/dfs %u/%u, mode %u/%u/%u, slave %u", config, config->frequency, SPI_DW_CLK_DIVIDER(info->clock_frequency, config->frequency), SPI_WORD_SIZE_GET(config->operation), spi->dfs, (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) ? 1 : 0, (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) ? 1 : 0, (SPI_MODE_GET(config->operation) & SPI_MODE_LOOP) ? 1 : 0, config->slave); } return 0; } static uint32_t spi_dw_compute_ndf(const struct spi_buf *rx_bufs, size_t rx_count, uint8_t dfs) { uint32_t len = 0U; for (; rx_count; rx_bufs++, rx_count--) { if (len > (UINT16_MAX - rx_bufs->len)) { goto error; } len += rx_bufs->len; } if (len) { return (len / dfs) - 1; } error: return UINT32_MAX; } static void spi_dw_update_txftlr(const struct device *dev, struct spi_dw_data *spi) { const struct spi_dw_config *info = dev->config; uint32_t dw_spi_txftlr_dflt = (info->fifo_depth * 1) / 2; uint32_t reg_data = dw_spi_txftlr_dflt; if (spi_dw_is_slave(spi)) { if (!spi->ctx.tx_len) { reg_data = 0U; } else if (spi->ctx.tx_len < dw_spi_txftlr_dflt) { reg_data = spi->ctx.tx_len - 1; } } LOG_DBG("TxFTLR: %u", reg_data); write_txftlr(dev, reg_data); } static int transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { const struct spi_dw_config *info = dev->config; struct spi_dw_data *spi = dev->data; uint32_t tmod = DW_SPI_CTRLR0_TMOD_TX_RX; uint32_t dw_spi_rxftlr_dflt = (info->fifo_depth * 5) / 8; uint32_t reg_data; int ret; spi_context_lock(&spi->ctx, asynchronous, cb, userdata, config); #ifdef CONFIG_PM_DEVICE if (!pm_device_is_busy(dev)) { pm_device_busy_set(dev); } #endif /* CONFIG_PM_DEVICE */ /* Configure */ ret = spi_dw_configure(dev, spi, config); if (ret) { goto out; } if (!rx_bufs || !rx_bufs->buffers) { tmod = DW_SPI_CTRLR0_TMOD_TX; } else if (!tx_bufs || !tx_bufs->buffers) { tmod = DW_SPI_CTRLR0_TMOD_RX; } /* ToDo: add a way to determine EEPROM mode */ if (tmod >= DW_SPI_CTRLR0_TMOD_RX && !spi_dw_is_slave(spi)) { reg_data = spi_dw_compute_ndf(rx_bufs->buffers, rx_bufs->count, spi->dfs); if (reg_data == UINT32_MAX) { ret = -EINVAL; goto out; } write_ctrlr1(dev, reg_data); } else { write_ctrlr1(dev, 0); } if (spi_dw_is_slave(spi)) { /* Enabling MISO line relevantly */ if (tmod == DW_SPI_CTRLR0_TMOD_RX) { tmod |= DW_SPI_CTRLR0_SLV_OE; } else { tmod &= ~DW_SPI_CTRLR0_SLV_OE; } } /* Updating TMOD in CTRLR0 register */ reg_data = read_ctrlr0(dev); reg_data &= ~DW_SPI_CTRLR0_TMOD_RESET; reg_data |= tmod; write_ctrlr0(dev, reg_data); /* Set buffers info */ spi_context_buffers_setup(&spi->ctx, tx_bufs, rx_bufs, spi->dfs); spi->fifo_diff = 0U; /* Tx Threshold */ spi_dw_update_txftlr(dev, spi); /* Does Rx thresholds needs to be lower? */ reg_data = dw_spi_rxftlr_dflt; if (spi_dw_is_slave(spi)) { if (spi->ctx.rx_len && spi->ctx.rx_len < dw_spi_rxftlr_dflt) { reg_data = spi->ctx.rx_len - 1; } } else { if (spi->ctx.rx_len && spi->ctx.rx_len < info->fifo_depth) { reg_data = spi->ctx.rx_len - 1; } } /* Rx Threshold */ write_rxftlr(dev, reg_data); /* Enable interrupts */ reg_data = !rx_bufs ? DW_SPI_IMR_UNMASK & DW_SPI_IMR_MASK_RX : DW_SPI_IMR_UNMASK; write_imr(dev, reg_data); if (!spi_dw_is_slave(spi)) { /* if cs is not defined as gpio, use hw cs */ if (spi_cs_is_gpio(config)) { spi_context_cs_control(&spi->ctx, true); } else { write_ser(dev, BIT(config->slave)); } } LOG_DBG("Enabling controller"); set_bit_ssienr(dev); ret = spi_context_wait_for_completion(&spi->ctx); #ifdef CONFIG_SPI_SLAVE if (spi_context_is_slave(&spi->ctx) && !ret) { ret = spi->ctx.recv_frames; } #endif /* CONFIG_SPI_SLAVE */ out: spi_context_release(&spi->ctx, ret); pm_device_busy_clear(dev); return ret; } static int spi_dw_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { LOG_DBG("%p, %p, %p", dev, tx_bufs, rx_bufs); return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_dw_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { LOG_DBG("%p, %p, %p, %p, %p", dev, tx_bufs, rx_bufs, cb, userdata); return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int spi_dw_release(const struct device *dev, const struct spi_config *config) { struct spi_dw_data *spi = dev->data; if (!spi_context_configured(&spi->ctx, config)) { return -EINVAL; } spi_context_unlock_unconditionally(&spi->ctx); return 0; } void spi_dw_isr(const struct device *dev) { uint32_t int_status; int error; int_status = read_isr(dev); LOG_DBG("SPI %p int_status 0x%x - (tx: %d, rx: %d)", dev, int_status, read_txflr(dev), read_rxflr(dev)); if (int_status & DW_SPI_ISR_ERRORS_MASK) { error = -EIO; goto out; } error = 0; if (int_status & DW_SPI_ISR_RXFIS) { pull_data(dev); } if (int_status & DW_SPI_ISR_TXEIS) { push_data(dev); } out: clear_interrupts(dev); completed(dev, error); } static const struct spi_driver_api dw_spi_api = { .transceive = spi_dw_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_dw_transceive_async, #endif /* CONFIG_SPI_ASYNC */ .release = spi_dw_release, }; int spi_dw_init(const struct device *dev) { int err; const struct spi_dw_config *info = dev->config; struct spi_dw_data *spi = dev->data; #ifdef CONFIG_PINCTRL pinctrl_apply_state(info->pcfg, PINCTRL_STATE_DEFAULT); #endif DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); info->config_func(); /* Masking interrupt and making sure controller is disabled */ write_imr(dev, DW_SPI_IMR_MASK); clear_bit_ssienr(dev); LOG_DBG("Designware SPI driver initialized on device: %p", dev); err = spi_context_cs_configure_all(&spi->ctx); if (err < 0) { return err; } spi_context_unlock_unconditionally(&spi->ctx); return 0; } #define SPI_CFG_IRQS_SINGLE_ERR_LINE(inst) \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, rx_avail, irq), \ DT_INST_IRQ_BY_NAME(inst, rx_avail, priority), \ spi_dw_isr, DEVICE_DT_INST_GET(inst), \ 0); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, tx_req, irq), \ DT_INST_IRQ_BY_NAME(inst, tx_req, priority), \ spi_dw_isr, DEVICE_DT_INST_GET(inst), \ 0); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, err_int, irq), \ DT_INST_IRQ_BY_NAME(inst, err_int, priority), \ spi_dw_isr, DEVICE_DT_INST_GET(inst), \ 0); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, rx_avail, irq)); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, tx_req, irq)); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, err_int, irq)); #define SPI_CFG_IRQS_MULTIPLE_ERR_LINES(inst) \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, rx_avail, irq), \ DT_INST_IRQ_BY_NAME(inst, rx_avail, priority), \ spi_dw_isr, DEVICE_DT_INST_GET(inst), \ 0); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, tx_req, irq), \ DT_INST_IRQ_BY_NAME(inst, tx_req, priority), \ spi_dw_isr, DEVICE_DT_INST_GET(inst), \ 0); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, txo_err, irq), \ DT_INST_IRQ_BY_NAME(inst, txo_err, priority), \ spi_dw_isr, DEVICE_DT_INST_GET(inst), \ 0); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, rxo_err, irq), \ DT_INST_IRQ_BY_NAME(inst, rxo_err, priority), \ spi_dw_isr, DEVICE_DT_INST_GET(inst), \ 0); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, rxu_err, irq), \ DT_INST_IRQ_BY_NAME(inst, rxu_err, priority), \ spi_dw_isr, DEVICE_DT_INST_GET(inst), \ 0); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, mst_err, irq), \ DT_INST_IRQ_BY_NAME(inst, mst_err, priority), \ spi_dw_isr, DEVICE_DT_INST_GET(inst), \ 0); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, rx_avail, irq)); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, tx_req, irq)); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, txo_err, irq)); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, rxo_err, irq)); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, rxu_err, irq)); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, mst_err, irq)); #define SPI_DW_IRQ_HANDLER(inst) \ void spi_dw_irq_config_##inst(void) \ { \ COND_CODE_1(IS_EQ(DT_NUM_IRQS(DT_DRV_INST(inst)), 1), \ (IRQ_CONNECT(DT_INST_IRQN(inst), \ DT_INST_IRQ(inst, priority), \ spi_dw_isr, DEVICE_DT_INST_GET(inst), \ 0); \ irq_enable(DT_INST_IRQN(inst));), \ (COND_CODE_1(IS_EQ(DT_NUM_IRQS(DT_DRV_INST(inst)), 3), \ (SPI_CFG_IRQS_SINGLE_ERR_LINE(inst)), \ (SPI_CFG_IRQS_MULTIPLE_ERR_LINES(inst))))) \ } #define SPI_DW_INIT(inst) \ IF_ENABLED(CONFIG_PINCTRL, (PINCTRL_DT_INST_DEFINE(inst);)) \ SPI_DW_IRQ_HANDLER(inst); \ static struct spi_dw_data spi_dw_data_##inst = { \ SPI_CONTEXT_INIT_LOCK(spi_dw_data_##inst, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_dw_data_##inst, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(inst), ctx) \ }; \ static const struct spi_dw_config spi_dw_config_##inst = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(inst)), \ .clock_frequency = COND_CODE_1( \ DT_NODE_HAS_PROP(DT_INST_PHANDLE(inst, clocks), clock_frequency), \ (DT_INST_PROP_BY_PHANDLE(inst, clocks, clock_frequency)), \ (DT_INST_PROP(inst, clock_frequency))), \ .config_func = spi_dw_irq_config_##inst, \ .serial_target = DT_INST_PROP(inst, serial_target), \ .fifo_depth = DT_INST_PROP(inst, fifo_depth), \ .max_xfer_size = DT_INST_PROP(inst, max_xfer_size), \ IF_ENABLED(CONFIG_PINCTRL, (.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst),)) \ COND_CODE_1(DT_INST_PROP(inst, aux_reg), \ (.read_func = aux_reg_read, \ .write_func = aux_reg_write, \ .set_bit_func = aux_reg_set_bit, \ .clear_bit_func = aux_reg_clear_bit, \ .test_bit_func = aux_reg_test_bit,), \ (.read_func = reg_read, \ .write_func = reg_write, \ .set_bit_func = reg_set_bit, \ .clear_bit_func = reg_clear_bit, \ .test_bit_func = reg_test_bit,)) \ }; \ DEVICE_DT_INST_DEFINE(inst, \ spi_dw_init, \ NULL, \ &spi_dw_data_##inst, \ &spi_dw_config_##inst, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &dw_spi_api); DT_INST_FOREACH_STATUS_OKAY(SPI_DW_INIT) ```
/content/code_sandbox/drivers/spi/spi_dw.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,229
```objective-c /* * */ /** * @file * @brief Private API for SPI drivers */ #ifndef ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_ #define ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_ #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/spi.h> #include <zephyr/kernel.h> #ifdef __cplusplus extern "C" { #endif enum spi_ctx_runtime_op_mode { SPI_CTX_RUNTIME_OP_MODE_MASTER = BIT(0), SPI_CTX_RUNTIME_OP_MODE_SLAVE = BIT(1), }; struct spi_context { const struct spi_config *config; const struct spi_config *owner; const struct gpio_dt_spec *cs_gpios; size_t num_cs_gpios; struct k_sem lock; struct k_sem sync; int sync_status; #ifdef CONFIG_SPI_ASYNC spi_callback_t callback; void *callback_data; bool asynchronous; #endif /* CONFIG_SPI_ASYNC */ const struct spi_buf *current_tx; size_t tx_count; const struct spi_buf *current_rx; size_t rx_count; const uint8_t *tx_buf; size_t tx_len; uint8_t *rx_buf; size_t rx_len; #ifdef CONFIG_SPI_SLAVE int recv_frames; #endif /* CONFIG_SPI_SLAVE */ }; #define SPI_CONTEXT_INIT_LOCK(_data, _ctx_name) \ ._ctx_name.lock = Z_SEM_INITIALIZER(_data._ctx_name.lock, 0, 1) #define SPI_CONTEXT_INIT_SYNC(_data, _ctx_name) \ ._ctx_name.sync = Z_SEM_INITIALIZER(_data._ctx_name.sync, 0, 1) #define SPI_CONTEXT_CS_GPIO_SPEC_ELEM(_node_id, _prop, _idx) \ GPIO_DT_SPEC_GET_BY_IDX(_node_id, _prop, _idx), #define SPI_CONTEXT_CS_GPIOS_FOREACH_ELEM(_node_id) \ DT_FOREACH_PROP_ELEM(_node_id, cs_gpios, \ SPI_CONTEXT_CS_GPIO_SPEC_ELEM) #define SPI_CONTEXT_CS_GPIOS_INITIALIZE(_node_id, _ctx_name) \ ._ctx_name.cs_gpios = (const struct gpio_dt_spec []) { \ COND_CODE_1(DT_SPI_HAS_CS_GPIOS(_node_id), \ (SPI_CONTEXT_CS_GPIOS_FOREACH_ELEM(_node_id)), ({0})) \ }, \ ._ctx_name.num_cs_gpios = DT_PROP_LEN_OR(_node_id, cs_gpios, 0), static inline bool spi_context_configured(struct spi_context *ctx, const struct spi_config *config) { return !!(ctx->config == config); } static inline bool spi_context_is_slave(struct spi_context *ctx) { return (ctx->config->operation & SPI_OP_MODE_SLAVE); } static inline void spi_context_lock(struct spi_context *ctx, bool asynchronous, spi_callback_t callback, void *callback_data, const struct spi_config *spi_cfg) { if ((spi_cfg->operation & SPI_LOCK_ON) && (k_sem_count_get(&ctx->lock) == 0) && (ctx->owner == spi_cfg)) { return; } k_sem_take(&ctx->lock, K_FOREVER); ctx->owner = spi_cfg; #ifdef CONFIG_SPI_ASYNC ctx->asynchronous = asynchronous; ctx->callback = callback; ctx->callback_data = callback_data; #endif /* CONFIG_SPI_ASYNC */ } static inline void spi_context_release(struct spi_context *ctx, int status) { #ifdef CONFIG_SPI_SLAVE if (status >= 0 && (ctx->config->operation & SPI_LOCK_ON)) { return; } #endif /* CONFIG_SPI_SLAVE */ #ifdef CONFIG_SPI_ASYNC if (!ctx->asynchronous || (status < 0)) { ctx->owner = NULL; k_sem_give(&ctx->lock); } #else if (!(ctx->config->operation & SPI_LOCK_ON)) { ctx->owner = NULL; k_sem_give(&ctx->lock); } #endif /* CONFIG_SPI_ASYNC */ } static inline size_t spi_context_total_tx_len(struct spi_context *ctx); static inline size_t spi_context_total_rx_len(struct spi_context *ctx); static inline int spi_context_wait_for_completion(struct spi_context *ctx) { int status = 0; bool wait; #ifdef CONFIG_SPI_ASYNC wait = !ctx->asynchronous; #else wait = true; #endif if (wait) { k_timeout_t timeout; /* Do not use any timeout in the slave mode, as in this case * it is not known when the transfer will actually start and * what the frequency will be. */ if (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(ctx)) { timeout = K_FOREVER; } else { uint32_t tx_len = spi_context_total_tx_len(ctx); uint32_t rx_len = spi_context_total_rx_len(ctx); uint32_t timeout_ms; timeout_ms = MAX(tx_len, rx_len) * 8 * 1000 / ctx->config->frequency; timeout_ms += CONFIG_SPI_COMPLETION_TIMEOUT_TOLERANCE; timeout = K_MSEC(timeout_ms); } if (k_sem_take(&ctx->sync, timeout)) { LOG_ERR("Timeout waiting for transfer complete"); return -ETIMEDOUT; } status = ctx->sync_status; } #ifdef CONFIG_SPI_SLAVE if (spi_context_is_slave(ctx) && !status) { return ctx->recv_frames; } #endif /* CONFIG_SPI_SLAVE */ return status; } static inline void spi_context_complete(struct spi_context *ctx, const struct device *dev, int status) { #ifdef CONFIG_SPI_ASYNC if (!ctx->asynchronous) { ctx->sync_status = status; k_sem_give(&ctx->sync); } else { if (ctx->callback) { #ifdef CONFIG_SPI_SLAVE if (spi_context_is_slave(ctx) && !status) { /* Let's update the status so it tells * about number of received frames. */ status = ctx->recv_frames; } #endif /* CONFIG_SPI_SLAVE */ ctx->callback(dev, status, ctx->callback_data); } if (!(ctx->config->operation & SPI_LOCK_ON)) { ctx->owner = NULL; k_sem_give(&ctx->lock); } } #else ctx->sync_status = status; k_sem_give(&ctx->sync); #endif /* CONFIG_SPI_ASYNC */ } static inline int spi_context_cs_configure_all(struct spi_context *ctx) { int ret; const struct gpio_dt_spec *cs_gpio; for (cs_gpio = ctx->cs_gpios; cs_gpio < &ctx->cs_gpios[ctx->num_cs_gpios]; cs_gpio++) { if (!device_is_ready(cs_gpio->port)) { LOG_ERR("CS GPIO port %s pin %d is not ready", cs_gpio->port->name, cs_gpio->pin); return -ENODEV; } ret = gpio_pin_configure_dt(cs_gpio, GPIO_OUTPUT_INACTIVE); if (ret < 0) { return ret; } } return 0; } static inline void _spi_context_cs_control(struct spi_context *ctx, bool on, bool force_off) { if (ctx->config && spi_cs_is_gpio(ctx->config)) { if (on) { gpio_pin_set_dt(&ctx->config->cs.gpio, 1); k_busy_wait(ctx->config->cs.delay); } else { if (!force_off && ctx->config->operation & SPI_HOLD_ON_CS) { return; } k_busy_wait(ctx->config->cs.delay); gpio_pin_set_dt(&ctx->config->cs.gpio, 0); } } } static inline void spi_context_cs_control(struct spi_context *ctx, bool on) { _spi_context_cs_control(ctx, on, false); } static inline void spi_context_unlock_unconditionally(struct spi_context *ctx) { /* Forcing CS to go to inactive status */ _spi_context_cs_control(ctx, false, true); if (!k_sem_count_get(&ctx->lock)) { ctx->owner = NULL; k_sem_give(&ctx->lock); } } static inline void *spi_context_get_next_buf(const struct spi_buf **current, size_t *count, size_t *buf_len, uint8_t dfs) { /* This loop skips zero-length buffers in the set, if any. */ while (*count) { if (((*current)->len / dfs) != 0) { *buf_len = (*current)->len / dfs; return (*current)->buf; } ++(*current); --(*count); } *buf_len = 0; return NULL; } static inline void spi_context_buffers_setup(struct spi_context *ctx, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, uint8_t dfs) { LOG_DBG("tx_bufs %p - rx_bufs %p - %u", tx_bufs, rx_bufs, dfs); ctx->current_tx = tx_bufs ? tx_bufs->buffers : NULL; ctx->tx_count = ctx->current_tx ? tx_bufs->count : 0; ctx->tx_buf = (const uint8_t *) spi_context_get_next_buf(&ctx->current_tx, &ctx->tx_count, &ctx->tx_len, dfs); ctx->current_rx = rx_bufs ? rx_bufs->buffers : NULL; ctx->rx_count = ctx->current_rx ? rx_bufs->count : 0; ctx->rx_buf = (uint8_t *) spi_context_get_next_buf(&ctx->current_rx, &ctx->rx_count, &ctx->rx_len, dfs); ctx->sync_status = 0; #ifdef CONFIG_SPI_SLAVE ctx->recv_frames = 0; #endif /* CONFIG_SPI_SLAVE */ LOG_DBG("current_tx %p (%zu), current_rx %p (%zu)," " tx buf/len %p/%zu, rx buf/len %p/%zu", ctx->current_tx, ctx->tx_count, ctx->current_rx, ctx->rx_count, (void *)ctx->tx_buf, ctx->tx_len, (void *)ctx->rx_buf, ctx->rx_len); } static ALWAYS_INLINE void spi_context_update_tx(struct spi_context *ctx, uint8_t dfs, uint32_t len) { if (!ctx->tx_len) { return; } if (len > ctx->tx_len) { LOG_ERR("Update exceeds current buffer"); return; } ctx->tx_len -= len; if (!ctx->tx_len) { /* Current buffer is done. Get the next one to be processed. */ ++ctx->current_tx; --ctx->tx_count; ctx->tx_buf = (const uint8_t *) spi_context_get_next_buf(&ctx->current_tx, &ctx->tx_count, &ctx->tx_len, dfs); } else if (ctx->tx_buf) { ctx->tx_buf += dfs * len; } LOG_DBG("tx buf/len %p/%zu", (void *)ctx->tx_buf, ctx->tx_len); } static ALWAYS_INLINE bool spi_context_tx_on(struct spi_context *ctx) { return !!(ctx->tx_len); } static ALWAYS_INLINE bool spi_context_tx_buf_on(struct spi_context *ctx) { return !!(ctx->tx_buf && ctx->tx_len); } static ALWAYS_INLINE void spi_context_update_rx(struct spi_context *ctx, uint8_t dfs, uint32_t len) { #ifdef CONFIG_SPI_SLAVE if (spi_context_is_slave(ctx)) { ctx->recv_frames += len; } #endif /* CONFIG_SPI_SLAVE */ if (!ctx->rx_len) { return; } if (len > ctx->rx_len) { LOG_ERR("Update exceeds current buffer"); return; } ctx->rx_len -= len; if (!ctx->rx_len) { /* Current buffer is done. Get the next one to be processed. */ ++ctx->current_rx; --ctx->rx_count; ctx->rx_buf = (uint8_t *) spi_context_get_next_buf(&ctx->current_rx, &ctx->rx_count, &ctx->rx_len, dfs); } else if (ctx->rx_buf) { ctx->rx_buf += dfs * len; } LOG_DBG("rx buf/len %p/%zu", (void *)ctx->rx_buf, ctx->rx_len); } static ALWAYS_INLINE bool spi_context_rx_on(struct spi_context *ctx) { return !!(ctx->rx_len); } static ALWAYS_INLINE bool spi_context_rx_buf_on(struct spi_context *ctx) { return !!(ctx->rx_buf && ctx->rx_len); } /* * Returns the maximum length of a transfer for which all currently active * directions have a continuous buffer, i.e. the maximum SPI transfer that * can be done with DMA that handles only non-scattered buffers. */ static inline size_t spi_context_max_continuous_chunk(struct spi_context *ctx) { if (!ctx->tx_len) { return ctx->rx_len; } else if (!ctx->rx_len) { return ctx->tx_len; } return MIN(ctx->tx_len, ctx->rx_len); } static inline size_t spi_context_longest_current_buf(struct spi_context *ctx) { return ctx->tx_len > ctx->rx_len ? ctx->tx_len : ctx->rx_len; } static inline size_t spi_context_total_tx_len(struct spi_context *ctx) { size_t n; size_t total_len = 0; for (n = 0; n < ctx->tx_count; ++n) { total_len += ctx->current_tx[n].len; } return total_len; } static inline size_t spi_context_total_rx_len(struct spi_context *ctx) { size_t n; size_t total_len = 0; for (n = 0; n < ctx->rx_count; ++n) { total_len += ctx->current_rx[n].len; } return total_len; } #ifdef __cplusplus } #endif #endif /* ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_ */ ```
/content/code_sandbox/drivers/spi/spi_context.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,049
```c /* * */ #define DT_DRV_COMPAT litex_spi_litespi #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_litex_litespi); #include <zephyr/sys/byteorder.h> #include "spi_litex_common.h" #define SPIFLASH_CORE_MASTER_PHYCONFIG_LEN_OFFSET 0x0 #define SPIFLASH_CORE_MASTER_PHYCONFIG_WIDTH_OFFSET 0x1 #define SPIFLASH_CORE_MASTER_PHYCONFIG_MASK_OFFSET 0x2 #define SPIFLASH_CORE_MASTER_STATUS_TX_READY_OFFSET 0x0 #define SPIFLASH_CORE_MASTER_STATUS_RX_READY_OFFSET 0x1 #define SPI_MAX_WORD_SIZE 32 #define SPI_MAX_CS_SIZE 4 struct spi_litex_dev_config { uint32_t core_mmap_dummy_bits_addr; uint32_t core_master_cs_addr; uint32_t core_master_phyconfig_addr; uint32_t core_master_rxtx_addr; uint32_t core_master_rxtx_size; uint32_t core_master_status_addr; uint32_t phy_clk_divisor_addr; bool phy_clk_divisor_exists; }; struct spi_litex_data { struct spi_context ctx; uint8_t dfs; /* dfs in bytes: 1,2 or 4 */ }; static int spi_litex_set_frequency(const struct device *dev, const struct spi_config *config) { const struct spi_litex_dev_config *dev_config = dev->config; if (!dev_config->phy_clk_divisor_exists) { /* In the LiteX Simulator the phy_clk_divisor doesn't exists, thats why we check. */ LOG_WRN("No phy_clk_divisor found, can't change frequency"); return 0; } uint32_t divisor = DIV_ROUND_UP(sys_clock_hw_cycles_per_sec(), (2 * config->frequency)) - 1; litex_write32(divisor, dev_config->phy_clk_divisor_addr); return 0; } /* Helper Functions */ static int spi_config(const struct device *dev, const struct spi_config *config) { struct spi_litex_data *dev_data = dev->data; if (config->slave != 0) { if (config->slave >= SPI_MAX_CS_SIZE) { LOG_ERR("More slaves than supported"); return -ENOTSUP; } } if (config->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } if (SPI_WORD_SIZE_GET(config->operation) > SPI_MAX_WORD_SIZE) { LOG_ERR("Word size must be <= %d, is %d", SPI_MAX_WORD_SIZE, SPI_WORD_SIZE_GET(config->operation)); return -ENOTSUP; } if (config->operation & SPI_CS_ACTIVE_HIGH) { LOG_ERR("CS active high not supported"); return -ENOTSUP; } if (config->operation & SPI_LOCK_ON) { LOG_ERR("Lock On not supported"); return -ENOTSUP; } if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) && (config->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Only supports single mode"); return -ENOTSUP; } if (config->operation & SPI_TRANSFER_LSB) { LOG_ERR("LSB first not supported"); return -ENOTSUP; } if (config->operation & (SPI_MODE_CPOL | SPI_MODE_CPHA)) { LOG_ERR("Only supports CPOL=CPHA=0"); return -ENOTSUP; } if (config->operation & SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode not supported"); return -ENOTSUP; } if (config->operation & SPI_MODE_LOOP) { LOG_ERR("Loopback mode not supported"); return -ENOTSUP; } dev_data->dfs = get_dfs_value(config); spi_litex_set_frequency(dev, config); return 0; } static void spiflash_len_mask_width_write(uint32_t len, uint32_t width, uint32_t mask, uint32_t addr) { uint32_t tmp = len & BIT_MASK(8); uint32_t word = tmp << (SPIFLASH_CORE_MASTER_PHYCONFIG_LEN_OFFSET * 8); tmp = width & BIT_MASK(8); word |= tmp << (SPIFLASH_CORE_MASTER_PHYCONFIG_WIDTH_OFFSET * 8); tmp = mask & BIT_MASK(8); word |= tmp << (SPIFLASH_CORE_MASTER_PHYCONFIG_MASK_OFFSET * 8); litex_write32(word, addr); } static int spi_litex_xfer(const struct device *dev, const struct spi_config *config) { const struct spi_litex_dev_config *dev_config = dev->config; struct spi_litex_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t txd, rxd; int ret = 0; uint8_t len = data->dfs; /* SPI Xfer length*/ uint8_t old_len = len; /* old SPI Xfer length*/ uint8_t width = BIT(0); /* SPI Xfer width*/ uint8_t mask = BIT(0); /* SPI Xfer mask*/ spiflash_len_mask_width_write(len * 8, width, mask, dev_config->core_master_phyconfig_addr); litex_write32(BIT(config->slave), dev_config->core_master_cs_addr); do { len = MIN(spi_context_max_continuous_chunk(ctx), dev_config->core_master_rxtx_size); if (len != old_len) { spiflash_len_mask_width_write(len * 8, width, mask, dev_config->core_master_phyconfig_addr); old_len = len; } if (spi_context_tx_buf_on(ctx)) { litex_spi_tx_put(len, &txd, ctx->tx_buf); } else { txd = 0U; } while (!(litex_read8(dev_config->core_master_status_addr) & BIT(SPIFLASH_CORE_MASTER_STATUS_TX_READY_OFFSET))) { ; } LOG_DBG("txd: 0x%x", txd); litex_write32(txd, dev_config->core_master_rxtx_addr); spi_context_update_tx(ctx, data->dfs, len / data->dfs); while (!(litex_read8(dev_config->core_master_status_addr) & BIT(SPIFLASH_CORE_MASTER_STATUS_RX_READY_OFFSET))) { ; } rxd = litex_read32(dev_config->core_master_rxtx_addr); LOG_DBG("rxd: 0x%x", rxd); if (spi_context_rx_buf_on(ctx)) { litex_spi_rx_put(len, &rxd, ctx->rx_buf); } spi_context_update_rx(ctx, data->dfs, len / data->dfs); } while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)); litex_write32(0, dev_config->core_master_cs_addr); spi_context_complete(ctx, dev, 0); return ret; } static int spi_litex_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { struct spi_litex_data *data = dev->data; int ret = spi_config(dev, config); if (ret) { return ret; } if (!tx_bufs && !rx_bufs) { return 0; } spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, data->dfs); ret = spi_litex_xfer(dev, config); return ret; } #ifdef CONFIG_SPI_ASYNC static int spi_litex_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, struct k_poll_signal *async) { return -ENOTSUP; } #endif /* CONFIG_SPI_ASYNC */ static int spi_litex_release(const struct device *dev, const struct spi_config *config) { return 0; } /* Device Instantiation */ static const struct spi_driver_api spi_litex_api = { .transceive = spi_litex_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_litex_transceive_async, #endif /* CONFIG_SPI_ASYNC */ .release = spi_litex_release, }; #define SPI_INIT(n) \ static struct spi_litex_data spi_litex_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_litex_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_litex_data_##n, ctx), \ }; \ static struct spi_litex_dev_config spi_litex_cfg_##n = { \ .core_mmap_dummy_bits_addr = DT_INST_REG_ADDR_BY_NAME(n, core_mmap_dummy_bits), \ .core_master_cs_addr = DT_INST_REG_ADDR_BY_NAME(n, core_master_cs), \ .core_master_phyconfig_addr = DT_INST_REG_ADDR_BY_NAME(n, core_master_phyconfig), \ .core_master_rxtx_addr = DT_INST_REG_ADDR_BY_NAME(n, core_master_rxtx), \ .core_master_rxtx_size = DT_INST_REG_SIZE_BY_NAME(n, core_master_rxtx), \ .core_master_status_addr = DT_INST_REG_ADDR_BY_NAME(n, core_master_status), \ .phy_clk_divisor_exists = DT_INST_REG_HAS_NAME(n, phy_clk_divisor), \ .phy_clk_divisor_addr = DT_INST_REG_ADDR_BY_NAME_OR(n, phy_clk_divisor, 0) \ \ }; \ DEVICE_DT_INST_DEFINE(n, NULL, NULL, &spi_litex_data_##n, &spi_litex_cfg_##n, POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, &spi_litex_api); DT_INST_FOREACH_STATUS_OKAY(SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_litex_litespi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,126
```unknown config SPI_SMARTBOND bool "Renesas SmartBond(tm) SPI driver" default y depends on DT_HAS_RENESAS_SMARTBOND_SPI_ENABLED help Enables SPI driver for Renesas SmartBond(tm) DA1469x series MCU. config SPI_SMARTBOND_DMA bool "Renesas Smartbond(tm) SPI with DMA acceleration" default y depends on SPI_SMARTBOND select DMA help Enables using the DMA engine instead of interrupt-driven approach. This acceleration is available only for asynchronous transfers. ```
/content/code_sandbox/drivers/spi/Kconfig.smartbond
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
124
```unknown # Microchip Polarfire SOC SPI config SPI_MCHP_MSS bool "Microchip Polarfire SOC SPI driver" default y depends on DT_HAS_MICROCHIP_MPFS_SPI_ENABLED help Enable support for the Polarfire SOC SPI driver. ```
/content/code_sandbox/drivers/spi/Kconfig.mchp_mss
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
56
```c /* * */ #define DT_DRV_COMPAT silabs_gecko_spi_usart #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_gecko); #include "spi_context.h" #include <zephyr/sys/sys_io.h> #include <zephyr/device.h> #include <zephyr/drivers/spi.h> #include <soc.h> #include "em_cmu.h" #include "em_usart.h" #include <stdbool.h> #ifdef CONFIG_PINCTRL #include <zephyr/drivers/pinctrl.h> #else #ifndef CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION #error "Individual pin location support is required" #endif #endif /* CONFIG_PINCTRL */ #if DT_NODE_HAS_PROP(n, peripheral_id) #define CLOCK_USART(id) _CONCAT(cmuClock_USART, id) #define GET_GECKO_USART_CLOCK(n) CLOCK_USART(DT_INST_PROP(n, peripheral_id)) #else #if (USART_COUNT == 1) #define CLOCK_USART(ref) (((ref) == USART0) ? cmuClock_USART0 \ : -1) #elif (USART_COUNT == 2) #define CLOCK_USART(ref) (((ref) == USART0) ? cmuClock_USART0 \ : ((ref) == USART1) ? cmuClock_USART1 \ : -1) #elif (USART_COUNT == 3) #define CLOCK_USART(ref) (((ref) == USART0) ? cmuClock_USART0 \ : ((ref) == USART1) ? cmuClock_USART1 \ : ((ref) == USART2) ? cmuClock_USART2 \ : -1) #elif (USART_COUNT == 4) #define CLOCK_USART(ref) (((ref) == USART0) ? cmuClock_USART0 \ : ((ref) == USART1) ? cmuClock_USART1 \ : ((ref) == USART2) ? cmuClock_USART2 \ : ((ref) == USART3) ? cmuClock_USART3 \ : -1) #elif (USART_COUNT == 5) #define CLOCK_USART(ref) (((ref) == USART0) ? cmuClock_USART0 \ : ((ref) == USART1) ? cmuClock_USART1 \ : ((ref) == USART2) ? cmuClock_USART2 \ : ((ref) == USART3) ? cmuClock_USART3 \ : ((ref) == USART4) ? cmuClock_USART4 \ : -1) #elif (USART_COUNT == 6) #define CLOCK_USART(ref) (((ref) == USART0) ? cmuClock_USART0 \ : ((ref) == USART1) ? cmuClock_USART1 \ : ((ref) == USART2) ? cmuClock_USART2 \ : ((ref) == USART3) ? cmuClock_USART3 \ : ((ref) == USART4) ? cmuClock_USART4 \ : ((ref) == USART5) ? cmuClock_USART5 \ : -1) #else #error "Undefined number of USARTs." #endif /* USART_COUNT */ #define GET_GECKO_USART_CLOCK(id) CLOCK_USART((USART_TypeDef *)DT_INST_REG_ADDR(id)) #endif /* DT_NODE_HAS_PROP(n, peripheral_id) */ #define SPI_WORD_SIZE 8 /* Structure Declarations */ struct spi_gecko_data { struct spi_context ctx; }; struct spi_gecko_config { USART_TypeDef *base; CMU_Clock_TypeDef clock; uint32_t clock_frequency; #ifdef CONFIG_PINCTRL const struct pinctrl_dev_config *pcfg; #else struct soc_gpio_pin pin_rx; struct soc_gpio_pin pin_tx; struct soc_gpio_pin pin_clk; uint8_t loc_rx; uint8_t loc_tx; uint8_t loc_clk; #endif /* CONFIG_PINCTRL */ }; /* Helper Functions */ static int spi_config(const struct device *dev, const struct spi_config *config, uint16_t *control) { const struct spi_gecko_config *gecko_config = dev->config; struct spi_gecko_data *data = dev->data; uint32_t spi_frequency = CMU_ClockFreqGet(gecko_config->clock) / 2; if (config->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } if (SPI_WORD_SIZE_GET(config->operation) != SPI_WORD_SIZE) { LOG_ERR("Word size must be %d", SPI_WORD_SIZE); return -ENOTSUP; } if (config->operation & SPI_CS_ACTIVE_HIGH) { LOG_ERR("CS active high not supported"); return -ENOTSUP; } if (config->operation & SPI_LOCK_ON) { LOG_ERR("Lock On not supported"); return -ENOTSUP; } if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) && (config->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Only supports single mode"); return -ENOTSUP; } if (config->operation & SPI_TRANSFER_LSB) { LOG_ERR("LSB first not supported"); return -ENOTSUP; } if (config->operation & SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode not supported"); return -ENOTSUP; } /* Set frequency to the minimum of what the device supports, what the * user has configured the controller to, and the max frequency for the * transaction. */ if (gecko_config->clock_frequency > spi_frequency) { LOG_ERR("SPI clock-frequency too high"); return -EINVAL; } spi_frequency = MIN(gecko_config->clock_frequency, spi_frequency); if (config->frequency) { spi_frequency = MIN(config->frequency, spi_frequency); } USART_BaudrateSyncSet(gecko_config->base, 0, spi_frequency); /* Set Loopback */ if (config->operation & SPI_MODE_LOOP) { gecko_config->base->CTRL |= USART_CTRL_LOOPBK; } else { gecko_config->base->CTRL &= ~USART_CTRL_LOOPBK; } /* Set CPOL */ if (config->operation & SPI_MODE_CPOL) { gecko_config->base->CTRL |= USART_CTRL_CLKPOL; } else { gecko_config->base->CTRL &= ~USART_CTRL_CLKPOL; } /* Set CPHA */ if (config->operation & SPI_MODE_CPHA) { gecko_config->base->CTRL |= USART_CTRL_CLKPHA; } else { gecko_config->base->CTRL &= ~USART_CTRL_CLKPHA; } /* Set word size */ gecko_config->base->FRAME = usartDatabits8 | USART_FRAME_STOPBITS_DEFAULT | USART_FRAME_PARITY_DEFAULT; /* At this point, it's mandatory to set this on the context! */ data->ctx.config = config; return 0; } static void spi_gecko_send(USART_TypeDef *usart, uint8_t frame) { /* Write frame to register */ USART_Tx(usart, frame); /* Wait until the transfer ends */ while (!(usart->STATUS & USART_STATUS_TXC)) { } } static uint8_t spi_gecko_recv(USART_TypeDef *usart) { /* Return data inside rx register */ return (uint8_t)usart->RXDATA; } static bool spi_gecko_transfer_ongoing(struct spi_gecko_data *data) { return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx); } static inline uint8_t spi_gecko_next_tx(struct spi_gecko_data *data) { uint8_t tx_frame = 0; if (spi_context_tx_buf_on(&data->ctx)) { tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf)); } return tx_frame; } static int spi_gecko_shift_frames(USART_TypeDef *usart, struct spi_gecko_data *data) { uint8_t tx_frame; uint8_t rx_frame; tx_frame = spi_gecko_next_tx(data); spi_gecko_send(usart, tx_frame); spi_context_update_tx(&data->ctx, 1, 1); rx_frame = spi_gecko_recv(usart); if (spi_context_rx_buf_on(&data->ctx)) { UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf); } spi_context_update_rx(&data->ctx, 1, 1); return 0; } static void spi_gecko_xfer(const struct device *dev, const struct spi_config *config) { int ret; struct spi_gecko_data *data = dev->data; struct spi_context *ctx = &data->ctx; const struct spi_gecko_config *gecko_config = dev->config; spi_context_cs_control(ctx, true); do { ret = spi_gecko_shift_frames(gecko_config->base, data); } while (!ret && spi_gecko_transfer_ongoing(data)); spi_context_cs_control(ctx, false); spi_context_complete(ctx, dev, 0); } #ifndef CONFIG_PINCTRL static void spi_gecko_init_pins(const struct device *dev) { const struct spi_gecko_config *config = dev->config; GPIO_PinModeSet(config->pin_rx.port, config->pin_rx.pin, config->pin_rx.mode, config->pin_rx.out); GPIO_PinModeSet(config->pin_tx.port, config->pin_tx.pin, config->pin_tx.mode, config->pin_tx.out); GPIO_PinModeSet(config->pin_clk.port, config->pin_clk.pin, config->pin_clk.mode, config->pin_clk.out); /* disable all pins while configuring */ config->base->ROUTEPEN = 0; config->base->ROUTELOC0 = (config->loc_tx << _USART_ROUTELOC0_TXLOC_SHIFT) | (config->loc_rx << _USART_ROUTELOC0_RXLOC_SHIFT) | (config->loc_clk << _USART_ROUTELOC0_CLKLOC_SHIFT); config->base->ROUTELOC1 = _USART_ROUTELOC1_RESETVALUE; config->base->ROUTEPEN = USART_ROUTEPEN_RXPEN | USART_ROUTEPEN_TXPEN | USART_ROUTEPEN_CLKPEN; } #endif /* !CONFIG_PINCTRL */ /* API Functions */ static int spi_gecko_init(const struct device *dev) { int err; const struct spi_gecko_config *config = dev->config; struct spi_gecko_data *data = dev->data; USART_InitSync_TypeDef usartInit = USART_INITSYNC_DEFAULT; /* The peripheral and gpio clock are already enabled from soc and gpio * driver */ usartInit.enable = usartDisable; usartInit.baudrate = 1000000; usartInit.databits = usartDatabits8; usartInit.master = 1; usartInit.msbf = 1; usartInit.clockMode = usartClockMode0; #if defined(USART_INPUT_RXPRS) && defined(USART_TRIGCTRL_AUTOTXTEN) usartInit.prsRxEnable = 0; usartInit.prsRxCh = 0; usartInit.autoTx = 0; #endif /* Enable USART clock */ CMU_ClockEnable(config->clock, true); /* Init USART */ USART_InitSync(config->base, &usartInit); #ifdef CONFIG_PINCTRL err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } #else /* Initialize USART pins */ spi_gecko_init_pins(dev); #endif /* CONFIG_PINCTRL */ err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } /* Enable the peripheral */ config->base->CMD = (uint32_t) usartEnable; return 0; } static int spi_gecko_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { struct spi_gecko_data *data = dev->data; uint16_t control = 0; int ret; ret = spi_config(dev, config, &control); if (ret < 0) { return ret; } spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); spi_gecko_xfer(dev, config); return 0; } #ifdef CONFIG_SPI_ASYNC static int spi_gecko_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, struct k_poll_signal *async) { return -ENOTSUP; } #endif /* CONFIG_SPI_ASYNC */ static int spi_gecko_release(const struct device *dev, const struct spi_config *config) { const struct spi_gecko_config *gecko_config = dev->config; if (!(gecko_config->base->STATUS & USART_STATUS_TXIDLE)) { return -EBUSY; } return 0; } /* Device Instantiation */ static const struct spi_driver_api spi_gecko_api = { .transceive = spi_gecko_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_gecko_transceive_async, #endif /* CONFIG_SPI_ASYNC */ .release = spi_gecko_release, }; #ifdef CONFIG_PINCTRL #define SPI_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static struct spi_gecko_data spi_gecko_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_gecko_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_gecko_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ }; \ static struct spi_gecko_config spi_gecko_cfg_##n = { \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .base = (USART_TypeDef *) \ DT_INST_REG_ADDR(n), \ .clock = GET_GECKO_USART_CLOCK(n), \ .clock_frequency = DT_INST_PROP_OR(n, clock_frequency, 1000000) \ }; \ DEVICE_DT_INST_DEFINE(n, \ spi_gecko_init, \ NULL, \ &spi_gecko_data_##n, \ &spi_gecko_cfg_##n, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_gecko_api); #else #define SPI_INIT(n) \ static struct spi_gecko_data spi_gecko_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_gecko_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_gecko_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ }; \ static struct spi_gecko_config spi_gecko_cfg_##n = { \ .base = (USART_TypeDef *) \ DT_INST_REG_ADDR(n), \ .clock = GET_GECKO_USART_CLOCK(n), \ .clock_frequency = DT_INST_PROP_OR(n, clock_frequency, 1000000), \ .pin_rx = { DT_INST_PROP_BY_IDX(n, location_rx, 1), \ DT_INST_PROP_BY_IDX(n, location_rx, 2), \ gpioModeInput, 1}, \ .pin_tx = { DT_INST_PROP_BY_IDX(n, location_tx, 1), \ DT_INST_PROP_BY_IDX(n, location_tx, 2), \ gpioModePushPull, 1}, \ .pin_clk = { DT_INST_PROP_BY_IDX(n, location_clk, 1), \ DT_INST_PROP_BY_IDX(n, location_clk, 2), \ gpioModePushPull, 1}, \ .loc_rx = DT_INST_PROP_BY_IDX(n, location_rx, 0), \ .loc_tx = DT_INST_PROP_BY_IDX(n, location_tx, 0), \ .loc_clk = DT_INST_PROP_BY_IDX(n, location_clk, 0), \ }; \ DEVICE_DT_INST_DEFINE(n, \ spi_gecko_init, \ NULL, \ &spi_gecko_data_##n, \ &spi_gecko_cfg_##n, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_gecko_api); #endif /* CONFIG_PINCTRL */ DT_INST_FOREACH_STATUS_OKAY(SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_gecko.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,526
```c /* * */ #define DT_DRV_COMPAT atmel_sam0_spi #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_sam0); #include "spi_context.h" #include <errno.h> #include <zephyr/device.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/pinctrl.h> #include <soc.h> #ifndef SERCOM_SPI_CTRLA_MODE_SPI_MASTER_Val #define SERCOM_SPI_CTRLA_MODE_SPI_MASTER_Val (0x3) #endif /* Device constant configuration parameters */ struct spi_sam0_config { SercomSpi *regs; uint32_t pads; const struct pinctrl_dev_config *pcfg; #ifdef MCLK volatile uint32_t *mclk; uint32_t mclk_mask; uint16_t gclk_core_id; #else uint32_t pm_apbcmask; uint16_t gclk_clkctrl_id; #endif #ifdef CONFIG_SPI_ASYNC const struct device *dma_dev; uint8_t tx_dma_request; uint8_t tx_dma_channel; uint8_t rx_dma_request; uint8_t rx_dma_channel; #endif }; /* Device run time data */ struct spi_sam0_data { struct spi_context ctx; #ifdef CONFIG_SPI_ASYNC const struct device *dev; uint32_t dma_segment_len; #endif }; static void wait_synchronization(SercomSpi *regs) { #if defined(SERCOM_SPI_SYNCBUSY_MASK) /* SYNCBUSY is a register */ while ((regs->SYNCBUSY.reg & SERCOM_SPI_SYNCBUSY_MASK) != 0) { } #elif defined(SERCOM_SPI_STATUS_SYNCBUSY) /* SYNCBUSY is a bit */ while ((regs->STATUS.reg & SERCOM_SPI_STATUS_SYNCBUSY) != 0) { } #else #error Unsupported device #endif } static int spi_sam0_configure(const struct device *dev, const struct spi_config *config) { const struct spi_sam0_config *cfg = dev->config; struct spi_sam0_data *data = dev->data; SercomSpi *regs = cfg->regs; SERCOM_SPI_CTRLA_Type ctrla = {.reg = 0}; SERCOM_SPI_CTRLB_Type ctrlb = {.reg = 0}; int div; if (spi_context_configured(&data->ctx, config)) { return 0; } if (config->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } if (SPI_OP_MODE_GET(config->operation) != SPI_OP_MODE_MASTER) { /* Slave mode is not implemented. */ return -ENOTSUP; } ctrla.bit.MODE = SERCOM_SPI_CTRLA_MODE_SPI_MASTER_Val; if ((config->operation & SPI_TRANSFER_LSB) != 0U) { ctrla.bit.DORD = 1; } if ((config->operation & SPI_MODE_CPOL) != 0U) { ctrla.bit.CPOL = 1; } if ((config->operation & SPI_MODE_CPHA) != 0U) { ctrla.bit.CPHA = 1; } ctrla.reg |= cfg->pads; if ((config->operation & SPI_MODE_LOOP) != 0U) { /* Put MISO and MOSI on the same pad */ ctrla.bit.DOPO = 0; ctrla.bit.DIPO = 0; } ctrla.bit.ENABLE = 1; ctrlb.bit.RXEN = 1; if (SPI_WORD_SIZE_GET(config->operation) != 8) { return -ENOTSUP; } /* 8 bits per transfer */ ctrlb.bit.CHSIZE = 0; /* Use the requested or next highest possible frequency */ div = (SOC_ATMEL_SAM0_GCLK0_FREQ_HZ / config->frequency) / 2U - 1; div = CLAMP(div, 0, UINT8_MAX); /* Update the configuration only if it has changed */ if (regs->CTRLA.reg != ctrla.reg || regs->CTRLB.reg != ctrlb.reg || regs->BAUD.reg != div) { regs->CTRLA.bit.ENABLE = 0; wait_synchronization(regs); regs->CTRLB = ctrlb; wait_synchronization(regs); regs->BAUD.reg = div; wait_synchronization(regs); regs->CTRLA = ctrla; wait_synchronization(regs); } data->ctx.config = config; return 0; } static bool spi_sam0_transfer_ongoing(struct spi_sam0_data *data) { return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx); } static void spi_sam0_shift_master(SercomSpi *regs, struct spi_sam0_data *data) { uint8_t tx; uint8_t rx; if (spi_context_tx_buf_on(&data->ctx)) { tx = *(uint8_t *)(data->ctx.tx_buf); } else { tx = 0U; } while (!regs->INTFLAG.bit.DRE) { } regs->DATA.reg = tx; spi_context_update_tx(&data->ctx, 1, 1); while (!regs->INTFLAG.bit.RXC) { } rx = regs->DATA.reg; if (spi_context_rx_buf_on(&data->ctx)) { *data->ctx.rx_buf = rx; } spi_context_update_rx(&data->ctx, 1, 1); } /* Finish any ongoing writes and drop any remaining read data */ static void spi_sam0_finish(SercomSpi *regs) { while (!regs->INTFLAG.bit.TXC) { } while (regs->INTFLAG.bit.RXC) { (void)regs->DATA.reg; } } /* Fast path that transmits a buf */ static void spi_sam0_fast_tx(SercomSpi *regs, const struct spi_buf *tx_buf) { const uint8_t *p = tx_buf->buf; const uint8_t *pend = (uint8_t *)tx_buf->buf + tx_buf->len; uint8_t ch; while (p != pend) { ch = *p++; while (!regs->INTFLAG.bit.DRE) { } regs->DATA.reg = ch; } spi_sam0_finish(regs); } /* Fast path that reads into a buf */ static void spi_sam0_fast_rx(SercomSpi *regs, const struct spi_buf *rx_buf) { uint8_t *rx = rx_buf->buf; int len = rx_buf->len; if (len <= 0) { return; } while (len) { /* Send the next byte */ regs->DATA.reg = 0; len--; /* Wait for completion, and read */ while (!regs->INTFLAG.bit.RXC) { } *rx++ = regs->DATA.reg; } spi_sam0_finish(regs); } /* Fast path that writes and reads bufs of the same length */ static void spi_sam0_fast_txrx(SercomSpi *regs, const struct spi_buf *tx_buf, const struct spi_buf *rx_buf) { const uint8_t *tx = tx_buf->buf; const uint8_t *txend = (uint8_t *)tx_buf->buf + tx_buf->len; uint8_t *rx = rx_buf->buf; size_t len = rx_buf->len; if (len == 0) { return; } while (tx != txend) { /* Send the next byte */ regs->DATA.reg = *tx++; /* Wait for completion, and read */ while (!regs->INTFLAG.bit.RXC) { } *rx++ = regs->DATA.reg; } spi_sam0_finish(regs); } /* Fast path where every overlapping tx and rx buffer is the same length */ static void spi_sam0_fast_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { const struct spi_sam0_config *cfg = dev->config; size_t tx_count = 0; size_t rx_count = 0; SercomSpi *regs = cfg->regs; const struct spi_buf *tx = NULL; const struct spi_buf *rx = NULL; if (tx_bufs) { tx = tx_bufs->buffers; tx_count = tx_bufs->count; } if (rx_bufs) { rx = rx_bufs->buffers; rx_count = rx_bufs->count; } else { rx = NULL; } while (tx_count != 0 && rx_count != 0) { if (tx->buf == NULL) { spi_sam0_fast_rx(regs, rx); } else if (rx->buf == NULL) { spi_sam0_fast_tx(regs, tx); } else { spi_sam0_fast_txrx(regs, tx, rx); } tx++; tx_count--; rx++; rx_count--; } for (; tx_count != 0; tx_count--) { spi_sam0_fast_tx(regs, tx++); } for (; rx_count != 0; rx_count--) { spi_sam0_fast_rx(regs, rx++); } } /* Returns true if the request is suitable for the fast * path. Specifically, the bufs are a sequence of: * * - Zero or more RX and TX buf pairs where each is the same length. * - Zero or more trailing RX only bufs * - Zero or more trailing TX only bufs */ static bool spi_sam0_is_regular(const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { const struct spi_buf *tx = NULL; const struct spi_buf *rx = NULL; size_t tx_count = 0; size_t rx_count = 0; if (tx_bufs) { tx = tx_bufs->buffers; tx_count = tx_bufs->count; } if (rx_bufs) { rx = rx_bufs->buffers; rx_count = rx_bufs->count; } while (tx_count != 0 && rx_count != 0) { if (tx->len != rx->len) { return false; } tx++; tx_count--; rx++; rx_count--; } return true; } static int spi_sam0_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { const struct spi_sam0_config *cfg = dev->config; struct spi_sam0_data *data = dev->data; SercomSpi *regs = cfg->regs; int err; spi_context_lock(&data->ctx, false, NULL, NULL, config); err = spi_sam0_configure(dev, config); if (err != 0) { goto done; } spi_context_cs_control(&data->ctx, true); /* This driver special cases the common send only, receive * only, and transmit then receive operations. This special * casing is 4x faster than the spi_context() routines * and allows the transmit and receive to be interleaved. */ if (spi_sam0_is_regular(tx_bufs, rx_bufs)) { spi_sam0_fast_transceive(dev, config, tx_bufs, rx_bufs); } else { spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); do { spi_sam0_shift_master(regs, data); } while (spi_sam0_transfer_ongoing(data)); } spi_context_cs_control(&data->ctx, false); done: spi_context_release(&data->ctx, err); return err; } static int spi_sam0_transceive_sync(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return spi_sam0_transceive(dev, config, tx_bufs, rx_bufs); } #ifdef CONFIG_SPI_ASYNC static void spi_sam0_dma_rx_done(const struct device *dma_dev, void *arg, uint32_t id, int error_code); static int spi_sam0_dma_rx_load(const struct device *dev, uint8_t *buf, size_t len) { const struct spi_sam0_config *cfg = dev->config; struct spi_sam0_data *data = dev->data; SercomSpi *regs = cfg->regs; struct dma_config dma_cfg = { 0 }; struct dma_block_config dma_blk = { 0 }; int retval; dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY; dma_cfg.source_data_size = 1; dma_cfg.dest_data_size = 1; dma_cfg.user_data = data; dma_cfg.dma_callback = spi_sam0_dma_rx_done; dma_cfg.block_count = 1; dma_cfg.head_block = &dma_blk; dma_cfg.dma_slot = cfg->rx_dma_request; dma_blk.block_size = len; if (buf != NULL) { dma_blk.dest_address = (uint32_t)buf; } else { static uint8_t dummy; dma_blk.dest_address = (uint32_t)&dummy; dma_blk.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } dma_blk.source_address = (uint32_t)(&(regs->DATA.reg)); dma_blk.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; retval = dma_config(cfg->dma_dev, cfg->rx_dma_channel, &dma_cfg); if (retval != 0) { return retval; } return dma_start(cfg->dma_dev, cfg->rx_dma_channel); } static int spi_sam0_dma_tx_load(const struct device *dev, const uint8_t *buf, size_t len) { const struct spi_sam0_config *cfg = dev->config; SercomSpi *regs = cfg->regs; struct dma_config dma_cfg = { 0 }; struct dma_block_config dma_blk = { 0 }; int retval; dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY; dma_cfg.source_data_size = 1; dma_cfg.dest_data_size = 1; dma_cfg.block_count = 1; dma_cfg.head_block = &dma_blk; dma_cfg.dma_slot = cfg->tx_dma_request; dma_blk.block_size = len; if (buf != NULL) { dma_blk.source_address = (uint32_t)buf; } else { static const uint8_t dummy; dma_blk.source_address = (uint32_t)&dummy; dma_blk.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } dma_blk.dest_address = (uint32_t)(&(regs->DATA.reg)); dma_blk.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; retval = dma_config(cfg->dma_dev, cfg->tx_dma_channel, &dma_cfg); if (retval != 0) { return retval; } return dma_start(cfg->dma_dev, cfg->tx_dma_channel); } static bool spi_sam0_dma_advance_segment(const struct device *dev) { struct spi_sam0_data *data = dev->data; uint32_t segment_len; /* Pick the shorter buffer of ones that have an actual length */ if (data->ctx.rx_len != 0) { segment_len = data->ctx.rx_len; if (data->ctx.tx_len != 0) { segment_len = MIN(segment_len, data->ctx.tx_len); } } else { segment_len = data->ctx.tx_len; } if (segment_len == 0) { return false; } segment_len = MIN(segment_len, 65535); data->dma_segment_len = segment_len; return true; } static int spi_sam0_dma_advance_buffers(const struct device *dev) { struct spi_sam0_data *data = dev->data; int retval; if (data->dma_segment_len == 0) { return -EINVAL; } /* Load receive first, so it can accept transmit data */ if (data->ctx.rx_len) { retval = spi_sam0_dma_rx_load(dev, data->ctx.rx_buf, data->dma_segment_len); } else { retval = spi_sam0_dma_rx_load(dev, NULL, data->dma_segment_len); } if (retval != 0) { return retval; } /* Now load the transmit, which starts the actual bus clocking */ if (data->ctx.tx_len) { retval = spi_sam0_dma_tx_load(dev, data->ctx.tx_buf, data->dma_segment_len); } else { retval = spi_sam0_dma_tx_load(dev, NULL, data->dma_segment_len); } if (retval != 0) { return retval; } return 0; } static void spi_sam0_dma_rx_done(const struct device *dma_dev, void *arg, uint32_t id, int error_code) { struct spi_sam0_data *data = arg; const struct device *dev = data->dev; const struct spi_sam0_config *cfg = dev->config; int retval; ARG_UNUSED(id); ARG_UNUSED(error_code); spi_context_update_tx(&data->ctx, 1, data->dma_segment_len); spi_context_update_rx(&data->ctx, 1, data->dma_segment_len); if (!spi_sam0_dma_advance_segment(dev)) { /* Done */ spi_context_cs_control(&data->ctx, false); spi_context_complete(&data->ctx, dev, 0); return; } retval = spi_sam0_dma_advance_buffers(dev); if (retval != 0) { dma_stop(cfg->dma_dev, cfg->tx_dma_channel); dma_stop(cfg->dma_dev, cfg->rx_dma_channel); spi_context_cs_control(&data->ctx, false); spi_context_complete(&data->ctx, dev, retval); return; } } static int spi_sam0_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { const struct spi_sam0_config *cfg = dev->config; struct spi_sam0_data *data = dev->data; int retval; /* * Transmit clocks the output and we use receive to determine when * the transmit is done, so we always need both */ if (cfg->tx_dma_channel == 0xFF || cfg->rx_dma_channel == 0xFF) { return -ENOTSUP; } spi_context_lock(&data->ctx, true, cb, userdata, config); retval = spi_sam0_configure(dev, config); if (retval != 0) { goto err_unlock; } spi_context_cs_control(&data->ctx, true); spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); spi_sam0_dma_advance_segment(dev); retval = spi_sam0_dma_advance_buffers(dev); if (retval != 0) { goto err_cs; } return 0; err_cs: dma_stop(cfg->dma_dev, cfg->tx_dma_channel); dma_stop(cfg->dma_dev, cfg->rx_dma_channel); spi_context_cs_control(&data->ctx, false); err_unlock: spi_context_release(&data->ctx, retval); return retval; } #endif /* CONFIG_SPI_ASYNC */ static int spi_sam0_release(const struct device *dev, const struct spi_config *config) { struct spi_sam0_data *data = dev->data; spi_context_unlock_unconditionally(&data->ctx); return 0; } static int spi_sam0_init(const struct device *dev) { int err; const struct spi_sam0_config *cfg = dev->config; struct spi_sam0_data *data = dev->data; SercomSpi *regs = cfg->regs; #ifdef MCLK /* Enable the GCLK */ GCLK->PCHCTRL[cfg->gclk_core_id].reg = GCLK_PCHCTRL_GEN_GCLK0 | GCLK_PCHCTRL_CHEN; /* Enable the MCLK */ *cfg->mclk |= cfg->mclk_mask; #else /* Enable the GCLK */ GCLK->CLKCTRL.reg = cfg->gclk_clkctrl_id | GCLK_CLKCTRL_GEN_GCLK0 | GCLK_CLKCTRL_CLKEN; /* Enable SERCOM clock in PM */ PM->APBCMASK.reg |= cfg->pm_apbcmask; #endif /* Disable all SPI interrupts */ regs->INTENCLR.reg = SERCOM_SPI_INTENCLR_MASK; wait_synchronization(regs); err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } #ifdef CONFIG_SPI_ASYNC if (!device_is_ready(cfg->dma_dev)) { return -ENODEV; } data->dev = dev; #endif err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } spi_context_unlock_unconditionally(&data->ctx); /* The device will be configured and enabled when transceive * is called. */ return 0; } static const struct spi_driver_api spi_sam0_driver_api = { .transceive = spi_sam0_transceive_sync, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_sam0_transceive_async, #endif .release = spi_sam0_release, }; #if CONFIG_SPI_ASYNC #define SPI_SAM0_DMA_CHANNELS(n) \ .dma_dev = DEVICE_DT_GET(ATMEL_SAM0_DT_INST_DMA_CTLR(n, tx)), \ .tx_dma_request = ATMEL_SAM0_DT_INST_DMA_TRIGSRC(n, tx), \ .tx_dma_channel = ATMEL_SAM0_DT_INST_DMA_CHANNEL(n, tx), \ .rx_dma_request = ATMEL_SAM0_DT_INST_DMA_TRIGSRC(n, rx), \ .rx_dma_channel = ATMEL_SAM0_DT_INST_DMA_CHANNEL(n, rx), #else #define SPI_SAM0_DMA_CHANNELS(n) #endif #define SPI_SAM0_SERCOM_PADS(n) \ SERCOM_SPI_CTRLA_DIPO(DT_INST_PROP(n, dipo)) | \ SERCOM_SPI_CTRLA_DOPO(DT_INST_PROP(n, dopo)) #ifdef MCLK #define SPI_SAM0_DEFINE_CONFIG(n) \ static const struct spi_sam0_config spi_sam0_config_##n = { \ .regs = (SercomSpi *)DT_INST_REG_ADDR(n), \ .mclk = (volatile uint32_t *)MCLK_MASK_DT_INT_REG_ADDR(n), \ .mclk_mask = BIT(DT_INST_CLOCKS_CELL_BY_NAME(n, mclk, bit)), \ .gclk_core_id = DT_INST_CLOCKS_CELL_BY_NAME(n, gclk, periph_ch),\ .pads = SPI_SAM0_SERCOM_PADS(n), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ SPI_SAM0_DMA_CHANNELS(n) \ } #else #define SPI_SAM0_DEFINE_CONFIG(n) \ static const struct spi_sam0_config spi_sam0_config_##n = { \ .regs = (SercomSpi *)DT_INST_REG_ADDR(n), \ .pm_apbcmask = BIT(DT_INST_CLOCKS_CELL_BY_NAME(n, pm, bit)), \ .gclk_clkctrl_id = DT_INST_CLOCKS_CELL_BY_NAME(n, gclk, clkctrl_id),\ .pads = SPI_SAM0_SERCOM_PADS(n), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ SPI_SAM0_DMA_CHANNELS(n) \ } #endif /* MCLK */ #define SPI_SAM0_DEVICE_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ SPI_SAM0_DEFINE_CONFIG(n); \ static struct spi_sam0_data spi_sam0_dev_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_sam0_dev_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_sam0_dev_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ }; \ DEVICE_DT_INST_DEFINE(n, spi_sam0_init, NULL, \ &spi_sam0_dev_data_##n, \ &spi_sam0_config_##n, POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_sam0_driver_api); DT_INST_FOREACH_STATUS_OKAY(SPI_SAM0_DEVICE_INIT) ```
/content/code_sandbox/drivers/spi/spi_sam0.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,480
```c /* * */ #define DT_DRV_COMPAT arm_pl022 #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/reset.h> #include <zephyr/drivers/spi.h> #include <zephyr/sys/util.h> #include <zephyr/spinlock.h> #include <soc.h> #if defined(CONFIG_PINCTRL) #include <zephyr/drivers/pinctrl.h> #endif #if defined(CONFIG_SPI_PL022_DMA) #include <zephyr/drivers/dma.h> #endif #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(spi_pl022); #include "spi_context.h" #define SSP_MASK(regname, name) GENMASK(SSP_##regname##_##name##_MSB, SSP_##regname##_##name##_LSB) /* PL022 Register definitions */ /* * Macros to access SSP Registers with their offsets */ #define SSP_CR0(r) (r + 0x000) #define SSP_CR1(r) (r + 0x004) #define SSP_DR(r) (r + 0x008) #define SSP_SR(r) (r + 0x00C) #define SSP_CPSR(r) (r + 0x010) #define SSP_IMSC(r) (r + 0x014) #define SSP_RIS(r) (r + 0x018) #define SSP_MIS(r) (r + 0x01C) #define SSP_ICR(r) (r + 0x020) #define SSP_DMACR(r) (r + 0x024) /* * Control Register 0 */ #define SSP_CR0_SCR_MSB 15 #define SSP_CR0_SCR_LSB 8 #define SSP_CR0_SPH_MSB 7 #define SSP_CR0_SPH_LSB 7 #define SSP_CR0_SPO_MSB 6 #define SSP_CR0_SPO_LSB 6 #define SSP_CR0_FRF_MSB 5 #define SSP_CR0_FRF_LSB 4 #define SSP_CR0_DSS_MSB 3 #define SSP_CR0_DSS_LSB 0 /* Data size select */ #define SSP_CR0_MASK_DSS SSP_MASK(CR0, DSS) /* Frame format */ #define SSP_CR0_MASK_FRF SSP_MASK(CR0, FRF) /* Polarity */ #define SSP_CR0_MASK_SPO SSP_MASK(CR0, SPO) /* Phase */ #define SSP_CR0_MASK_SPH SSP_MASK(CR0, SPH) /* Serial Clock Rate */ #define SSP_CR0_MASK_SCR SSP_MASK(CR0, SCR) /* * Control Register 1 */ #define SSP_CR1_SOD_MSB 3 #define SSP_CR1_SOD_LSB 3 #define SSP_CR1_MS_MSB 2 #define SSP_CR1_MS_LSB 2 #define SSP_CR1_SSE_MSB 1 #define SSP_CR1_SSE_LSB 1 #define SSP_CR1_LBM_MSB 0 #define SSP_CR1_LBM_LSB 0 /* Loopback Mode */ #define SSP_CR1_MASK_LBM SSP_MASK(CR1, LBM) /* Port Enable */ #define SSP_CR1_MASK_SSE SSP_MASK(CR1, SSE) /* Controller/Peripheral (Master/Slave) select */ #define SSP_CR1_MASK_MS SSP_MASK(CR1, MS) /* Peripheral (Slave) mode output disabled */ #define SSP_CR1_MASK_SOD SSP_MASK(CR1, SOD) /* * Status Register */ #define SSP_SR_BSY_MSB 4 #define SSP_SR_BSY_LSB 4 #define SSP_SR_RFF_MSB 3 #define SSP_SR_RFF_LSB 3 #define SSP_SR_RNE_MSB 2 #define SSP_SR_RNE_LSB 2 #define SSP_SR_TNF_MSB 1 #define SSP_SR_TNF_LSB 1 #define SSP_SR_TFE_MSB 0 #define SSP_SR_TFE_LSB 0 /* TX FIFO empty */ #define SSP_SR_MASK_TFE SSP_MASK(SR, TFE) /* TX FIFO not full */ #define SSP_SR_MASK_TNF SSP_MASK(SR, TNF) /* RX FIFO not empty */ #define SSP_SR_MASK_RNE SSP_MASK(SR, RNE) /* RX FIFO full */ #define SSP_SR_MASK_RFF SSP_MASK(SR, RFF) /* Busy Flag */ #define SSP_SR_MASK_BSY SSP_MASK(SR, BSY) /* * Clock Prescale Register */ #define SSP_CPSR_CPSDVSR_MSB 7 #define SSP_CPSR_CPSDVSR_LSB 0 /* Clock prescale divider */ #define SSP_CPSR_MASK_CPSDVSR SSP_MASK(CPSR, CPSDVSR) /* * Interrupt Mask Set/Clear Register */ #define SSP_IMSC_TXIM_MSB 3 #define SSP_IMSC_TXIM_LSB 3 #define SSP_IMSC_RXIM_MSB 2 #define SSP_IMSC_RXIM_LSB 2 #define SSP_IMSC_RTIM_MSB 1 #define SSP_IMSC_RTIM_LSB 1 #define SSP_IMSC_RORIM_MSB 0 #define SSP_IMSC_RORIM_LSB 0 /* Receive Overrun Interrupt mask */ #define SSP_IMSC_MASK_RORIM SSP_MASK(IMSC, RORIM) /* Receive timeout Interrupt mask */ #define SSP_IMSC_MASK_RTIM SSP_MASK(IMSC, RTIM) /* Receive FIFO Interrupt mask */ #define SSP_IMSC_MASK_RXIM SSP_MASK(IMSC, RXIM) /* Transmit FIFO Interrupt mask */ #define SSP_IMSC_MASK_TXIM SSP_MASK(IMSC, TXIM) /* * Raw Interrupt Status Register */ #define SSP_RIS_TXRIS_MSB 3 #define SSP_RIS_TXRIS_LSB 3 #define SSP_RIS_RXRIS_MSB 2 #define SSP_RIS_RXRIS_LSB 2 #define SSP_RIS_RTRIS_MSB 1 #define SSP_RIS_RTRIS_LSB 1 #define SSP_RIS_RORRIS_MSB 0 #define SSP_RIS_RORRIS_LSB 0 /* Receive Overrun Raw Interrupt status */ #define SSP_RIS_MASK_RORRIS SSP_MASK(RIS, RORRIS) /* Receive Timeout Raw Interrupt status */ #define SSP_RIS_MASK_RTRIS SSP_MASK(RIS, RTRIS) /* Receive FIFO Raw Interrupt status */ #define SSP_RIS_MASK_RXRIS SSP_MASK(RIS, RXRIS) /* Transmit FIFO Raw Interrupt status */ #define SSP_RIS_MASK_TXRIS SSP_MASK(RIS, TXRIS) /* * Masked Interrupt Status Register */ #define SSP_MIS_TXMIS_MSB 3 #define SSP_MIS_TXMIS_LSB 3 #define SSP_MIS_RXMIS_MSB 2 #define SSP_MIS_RXMIS_LSB 2 #define SSP_MIS_RTMIS_MSB 1 #define SSP_MIS_RTMIS_LSB 1 #define SSP_MIS_RORMIS_MSB 0 #define SSP_MIS_RORMIS_LSB 0 /* Receive Overrun Masked Interrupt status */ #define SSP_MIS_MASK_RORMIS SSP_MASK(MIS, RORMIS) /* Receive Timeout Masked Interrupt status */ #define SSP_MIS_MASK_RTMIS SSP_MASK(MIS, RTMIS) /* Receive FIFO Masked Interrupt status */ #define SSP_MIS_MASK_RXMIS SSP_MASK(MIS, RXMIS) /* Transmit FIFO Masked Interrupt status */ #define SSP_MIS_MASK_TXMIS SSP_MASK(MIS, TXMIS) /* * Interrupt Clear Register */ #define SSP_ICR_RTIC_MSB 1 #define SSP_ICR_RTIC_LSB 1 #define SSP_ICR_RORIC_MSB 0 #define SSP_ICR_RORIC_LSB 0 /* Receive Overrun Raw Clear Interrupt bit */ #define SSP_ICR_MASK_RORIC SSP_MASK(ICR, RORIC) /* Receive Timeout Clear Interrupt bit */ #define SSP_ICR_MASK_RTIC SSP_MASK(ICR, RTIC) /* * DMA Control Register */ #define SSP_DMACR_TXDMAE_MSB 1 #define SSP_DMACR_TXDMAE_LSB 1 #define SSP_DMACR_RXDMAE_MSB 0 #define SSP_DMACR_RXDMAE_LSB 0 /* Receive DMA Enable bit */ #define SSP_DMACR_MASK_RXDMAE SSP_MASK(DMACR, RXDMAE) /* Transmit DMA Enable bit */ #define SSP_DMACR_MASK_TXDMAE SSP_MASK(DMACR, TXDMAE) /* End register definitions */ /* * Clock Parameter ranges */ #define CPSDVR_MIN 0x02 #define CPSDVR_MAX 0xFE #define SCR_MIN 0x00 #define SCR_MAX 0xFF /* Fifo depth */ #define SSP_FIFO_DEPTH 8 /* * Register READ/WRITE macros */ #define SSP_READ_REG(reg) (*((volatile uint32_t *)reg)) #define SSP_WRITE_REG(reg, val) (*((volatile uint32_t *)reg) = val) #define SSP_CLEAR_REG(reg, val) (*((volatile uint32_t *)reg) &= ~(val)) /* * Status check macros */ #define SSP_BUSY(reg) (SSP_READ_REG(SSP_SR(reg)) & SSP_SR_MASK_BSY) #define SSP_RX_FIFO_NOT_EMPTY(reg) (SSP_READ_REG(SSP_SR(reg)) & SSP_SR_MASK_RNE) #define SSP_TX_FIFO_EMPTY(reg) (SSP_READ_REG(SSP_SR(reg)) & SSP_SR_MASK_TFE) #define SSP_TX_FIFO_NOT_FULL(reg) (SSP_READ_REG(SSP_SR(reg)) & SSP_SR_MASK_TNF) #if defined(CONFIG_SPI_PL022_DMA) enum spi_pl022_dma_direction { TX = 0, RX, NUM_OF_DIRECTION }; struct spi_pl022_dma_config { const struct device *dev; uint32_t channel; uint32_t channel_config; uint32_t slot; }; struct spi_pl022_dma_data { struct dma_config config; struct dma_block_config block; uint32_t count; bool callbacked; }; #endif /* * Max frequency */ #define MAX_FREQ_CONTROLLER_MODE(pclk) ((pclk) / 2) #define MAX_FREQ_PERIPHERAL_MODE(pclk) ((pclk) / 12) struct spi_pl022_cfg { const uint32_t reg; const uint32_t pclk; const bool dma_enabled; #if defined(CONFIG_CLOCK_CONTROL) const struct device *clk_dev; const clock_control_subsys_t clk_id; #endif #if defined(CONFIG_RESET) const struct reset_dt_spec reset; #endif #if defined(CONFIG_PINCTRL) const struct pinctrl_dev_config *pincfg; #endif #if defined(CONFIG_SPI_PL022_INTERRUPT) void (*irq_config)(const struct device *port); #endif #if defined(CONFIG_SPI_PL022_DMA) const struct spi_pl022_dma_config dma[NUM_OF_DIRECTION]; #endif }; struct spi_pl022_data { struct spi_context ctx; uint32_t tx_count; uint32_t rx_count; struct k_spinlock lock; #if defined(CONFIG_SPI_PL022_DMA) struct spi_pl022_dma_data dma[NUM_OF_DIRECTION]; #endif }; #if defined(CONFIG_SPI_PL022_DMA) static uint32_t dummy_tx; static uint32_t dummy_rx; #endif /* Helper Functions */ static inline uint32_t spi_pl022_calc_prescale(const uint32_t pclk, const uint32_t baud) { uint32_t prescale; /* prescale only can take even number */ for (prescale = CPSDVR_MIN; prescale < CPSDVR_MAX; prescale += 2) { if (pclk < (prescale + 2) * CPSDVR_MAX * baud) { break; } } return prescale; } static inline uint32_t spi_pl022_calc_postdiv(const uint32_t pclk, const uint32_t baud, const uint32_t prescale) { uint32_t postdiv; for (postdiv = SCR_MAX + 1; postdiv > SCR_MIN + 1; --postdiv) { if (pclk / (prescale * (postdiv - 1)) > baud) { break; } } return postdiv - 1; } static int spi_pl022_configure(const struct device *dev, const struct spi_config *spicfg) { const struct spi_pl022_cfg *cfg = dev->config; struct spi_pl022_data *data = dev->data; const uint16_t op = spicfg->operation; uint32_t prescale; uint32_t postdiv; uint32_t pclk = 0; uint32_t cr0; uint32_t cr1; int ret; if (spi_context_configured(&data->ctx, spicfg)) { return 0; } #if defined(CONFIG_CLOCK_CONTROL) ret = clock_control_get_rate(cfg->clk_dev, cfg->clk_id, &pclk); if (ret < 0 || pclk == 0) { return -EINVAL; } #endif if (spicfg->frequency > MAX_FREQ_CONTROLLER_MODE(pclk)) { LOG_ERR("Frequency is up to %u in controller mode.", MAX_FREQ_CONTROLLER_MODE(pclk)); return -ENOTSUP; } if (op & SPI_TRANSFER_LSB) { LOG_ERR("LSB-first not supported"); return -ENOTSUP; } /* Half-duplex mode has not been implemented */ if (op & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } /* Peripheral mode has not been implemented */ if (SPI_OP_MODE_GET(op) != SPI_OP_MODE_MASTER) { LOG_ERR("Peripheral mode is not supported"); return -ENOTSUP; } /* Word sizes other than 8 bits has not been implemented */ if (SPI_WORD_SIZE_GET(op) != 8) { LOG_ERR("Word sizes other than 8 bits are not supported"); return -ENOTSUP; } /* configure registers */ prescale = spi_pl022_calc_prescale(pclk, spicfg->frequency); postdiv = spi_pl022_calc_postdiv(pclk, spicfg->frequency, prescale); cr0 = 0; cr0 |= (postdiv << SSP_CR0_SCR_LSB); cr0 |= (SPI_WORD_SIZE_GET(op) - 1); cr0 |= (op & SPI_MODE_CPOL) ? SSP_CR0_MASK_SPO : 0; cr0 |= (op & SPI_MODE_CPHA) ? SSP_CR0_MASK_SPH : 0; cr1 = 0; cr1 |= SSP_CR1_MASK_SSE; /* Always enable SPI */ cr1 |= (op & SPI_MODE_LOOP) ? SSP_CR1_MASK_LBM : 0; SSP_WRITE_REG(SSP_CPSR(cfg->reg), prescale); SSP_WRITE_REG(SSP_CR0(cfg->reg), cr0); SSP_WRITE_REG(SSP_CR1(cfg->reg), cr1); #if defined(CONFIG_SPI_PL022_INTERRUPT) if (!cfg->dma_enabled) { SSP_WRITE_REG(SSP_IMSC(cfg->reg), SSP_IMSC_MASK_RORIM | SSP_IMSC_MASK_RTIM | SSP_IMSC_MASK_RXIM); } #endif data->ctx.config = spicfg; return 0; } static inline bool spi_pl022_transfer_ongoing(struct spi_pl022_data *data) { return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx); } #if defined(CONFIG_SPI_PL022_DMA) static void spi_pl022_dma_callback(const struct device *dma_dev, void *arg, uint32_t channel, int status); static size_t spi_pl022_dma_enabled_num(const struct device *dev) { const struct spi_pl022_cfg *cfg = dev->config; return cfg->dma_enabled ? 2 : 0; } static uint32_t spi_pl022_dma_setup(const struct device *dev, const uint32_t dir) { const struct spi_pl022_cfg *cfg = dev->config; struct spi_pl022_data *data = dev->data; struct dma_config *dma_cfg = &data->dma[dir].config; struct dma_block_config *block_cfg = &data->dma[dir].block; const struct spi_pl022_dma_config *dma = &cfg->dma[dir]; int ret; memset(dma_cfg, 0, sizeof(struct dma_config)); memset(block_cfg, 0, sizeof(struct dma_block_config)); dma_cfg->source_burst_length = 1; dma_cfg->dest_burst_length = 1; dma_cfg->user_data = (void *)dev; dma_cfg->block_count = 1U; dma_cfg->head_block = block_cfg; dma_cfg->dma_slot = cfg->dma[dir].slot; dma_cfg->channel_direction = dir == TX ? MEMORY_TO_PERIPHERAL : PERIPHERAL_TO_MEMORY; if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) { dma_cfg->source_data_size = 1; dma_cfg->dest_data_size = 1; } else { dma_cfg->source_data_size = 2; dma_cfg->dest_data_size = 2; } block_cfg->block_size = spi_context_max_continuous_chunk(&data->ctx); if (dir == TX) { dma_cfg->dma_callback = spi_pl022_dma_callback; block_cfg->dest_address = SSP_DR(cfg->reg); block_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; if (spi_context_tx_buf_on(&data->ctx)) { block_cfg->source_address = (uint32_t)data->ctx.tx_buf; block_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { block_cfg->source_address = (uint32_t)&dummy_tx; block_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } } if (dir == RX) { dma_cfg->dma_callback = spi_pl022_dma_callback; block_cfg->source_address = SSP_DR(cfg->reg); block_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; if (spi_context_rx_buf_on(&data->ctx)) { block_cfg->dest_address = (uint32_t)data->ctx.rx_buf; block_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { block_cfg->dest_address = (uint32_t)&dummy_rx; block_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } } ret = dma_config(dma->dev, dma->channel, dma_cfg); if (ret < 0) { LOG_ERR("dma_config %p failed %d\n", dma->dev, ret); return ret; } data->dma[dir].callbacked = false; ret = dma_start(dma->dev, dma->channel); if (ret < 0) { LOG_ERR("dma_start %p failed %d\n", dma->dev, ret); return ret; } return 0; } static int spi_pl022_start_dma_transceive(const struct device *dev) { const struct spi_pl022_cfg *cfg = dev->config; int ret = 0; SSP_CLEAR_REG(SSP_DMACR(cfg->reg), SSP_DMACR_MASK_RXDMAE | SSP_DMACR_MASK_TXDMAE); for (size_t i = 0; i < spi_pl022_dma_enabled_num(dev); i++) { ret = spi_pl022_dma_setup(dev, i); if (ret < 0) { goto on_error; } } SSP_WRITE_REG(SSP_DMACR(cfg->reg), SSP_DMACR_MASK_RXDMAE | SSP_DMACR_MASK_TXDMAE); on_error: if (ret < 0) { for (size_t i = 0; i < spi_pl022_dma_enabled_num(dev); i++) { dma_stop(cfg->dma[i].dev, cfg->dma[i].channel); } } return ret; } static bool spi_pl022_chunk_transfer_finished(const struct device *dev) { struct spi_pl022_data *data = dev->data; struct spi_pl022_dma_data *dma = data->dma; const size_t chunk_len = spi_context_max_continuous_chunk(&data->ctx); return (MIN(dma[TX].count, dma[RX].count) >= chunk_len); } static void spi_pl022_complete(const struct device *dev, int status) { struct spi_pl022_data *data = dev->data; const struct spi_pl022_cfg *cfg = dev->config; for (size_t i = 0; i < spi_pl022_dma_enabled_num(dev); i++) { dma_stop(cfg->dma[i].dev, cfg->dma[i].channel); } spi_context_complete(&data->ctx, dev, status); } static void spi_pl022_dma_callback(const struct device *dma_dev, void *arg, uint32_t channel, int status) { const struct device *dev = (const struct device *)arg; const struct spi_pl022_cfg *cfg = dev->config; struct spi_pl022_data *data = dev->data; bool complete = false; k_spinlock_key_t key; size_t chunk_len; int err = 0; if (status < 0) { key = k_spin_lock(&data->lock); LOG_ERR("dma:%p ch:%d callback gets error: %d", dma_dev, channel, status); spi_pl022_complete(dev, status); k_spin_unlock(&data->lock, key); return; } key = k_spin_lock(&data->lock); chunk_len = spi_context_max_continuous_chunk(&data->ctx); for (size_t i = 0; i < ARRAY_SIZE(cfg->dma); i++) { if (dma_dev == cfg->dma[i].dev && channel == cfg->dma[i].channel) { data->dma[i].count += chunk_len; data->dma[i].callbacked = true; } } /* Check transfer finished. * The transmission of this chunk is complete if both the dma[TX].count * and the dma[RX].count reach greater than or equal to the chunk_len. * chunk_len is zero here means the transfer is already complete. */ if (spi_pl022_chunk_transfer_finished(dev)) { if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) { spi_context_update_tx(&data->ctx, 1, chunk_len); spi_context_update_rx(&data->ctx, 1, chunk_len); } else { spi_context_update_tx(&data->ctx, 2, chunk_len); spi_context_update_rx(&data->ctx, 2, chunk_len); } if (spi_pl022_transfer_ongoing(data)) { /* Next chunk is available, reset the count and * continue processing */ data->dma[TX].count = 0; data->dma[RX].count = 0; } else { /* All data is processed, complete the process */ complete = true; } } if (!complete && data->dma[TX].callbacked && data->dma[RX].callbacked) { err = spi_pl022_start_dma_transceive(dev); if (err) { complete = true; } } if (complete) { spi_pl022_complete(dev, err); } k_spin_unlock(&data->lock, key); } #endif /* DMA */ #if defined(CONFIG_SPI_PL022_INTERRUPT) static void spi_pl022_async_xfer(const struct device *dev) { const struct spi_pl022_cfg *cfg = dev->config; struct spi_pl022_data *data = dev->data; struct spi_context *ctx = &data->ctx; /* Process by per chunk */ size_t chunk_len = spi_context_max_continuous_chunk(ctx); uint32_t txrx; /* Read RX FIFO */ while (SSP_RX_FIFO_NOT_EMPTY(cfg->reg) && (data->rx_count < chunk_len)) { txrx = SSP_READ_REG(SSP_DR(cfg->reg)); /* Discard received data if rx buffer not assigned */ if (ctx->rx_buf) { *(((uint8_t *)ctx->rx_buf) + data->rx_count) = (uint8_t)txrx; } data->rx_count++; } /* Check transfer finished. * The transmission of this chunk is complete if both the tx_count * and the rx_count reach greater than or equal to the chunk_len. * chunk_len is zero here means the transfer is already complete. */ if (MIN(data->tx_count, data->rx_count) >= chunk_len && chunk_len > 0) { spi_context_update_tx(ctx, 1, chunk_len); spi_context_update_rx(ctx, 1, chunk_len); if (spi_pl022_transfer_ongoing(data)) { /* Next chunk is available, reset the count and continue processing */ data->tx_count = 0; data->rx_count = 0; chunk_len = spi_context_max_continuous_chunk(ctx); } else { /* All data is processed, complete the process */ spi_context_complete(ctx, dev, 0); return; } } /* Fill up TX FIFO */ for (uint32_t i = 0; i < SSP_FIFO_DEPTH; i++) { if ((data->tx_count < chunk_len) && SSP_TX_FIFO_NOT_FULL(cfg->reg)) { /* Send 0 in the case of read only operation */ txrx = 0; if (ctx->tx_buf) { txrx = *(((uint8_t *)ctx->tx_buf) + data->tx_count); } SSP_WRITE_REG(SSP_DR(cfg->reg), txrx); data->tx_count++; } else { break; } } } static void spi_pl022_start_async_xfer(const struct device *dev) { const struct spi_pl022_cfg *cfg = dev->config; struct spi_pl022_data *data = dev->data; /* Ensure writable */ while (!SSP_TX_FIFO_EMPTY(cfg->reg)) ; /* Drain RX FIFO */ while (SSP_RX_FIFO_NOT_EMPTY(cfg->reg)) SSP_READ_REG(SSP_DR(cfg->reg)); data->tx_count = 0; data->rx_count = 0; SSP_WRITE_REG(SSP_ICR(cfg->reg), SSP_ICR_MASK_RORIC | SSP_ICR_MASK_RTIC); spi_pl022_async_xfer(dev); } static void spi_pl022_isr(const struct device *dev) { const struct spi_pl022_cfg *cfg = dev->config; struct spi_pl022_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t mis = SSP_READ_REG(SSP_MIS(cfg->reg)); if (mis & SSP_MIS_MASK_RORMIS) { SSP_WRITE_REG(SSP_IMSC(cfg->reg), 0); spi_context_complete(ctx, dev, -EIO); } else { spi_pl022_async_xfer(dev); } SSP_WRITE_REG(SSP_ICR(cfg->reg), SSP_ICR_MASK_RORIC | SSP_ICR_MASK_RTIC); } #else static void spi_pl022_xfer(const struct device *dev) { const struct spi_pl022_cfg *cfg = dev->config; struct spi_pl022_data *data = dev->data; const size_t chunk_len = spi_context_max_continuous_chunk(&data->ctx); const void *txbuf = data->ctx.tx_buf; void *rxbuf = data->ctx.rx_buf; uint32_t txrx; size_t fifo_cnt = 0; data->tx_count = 0; data->rx_count = 0; /* Ensure writable */ while (!SSP_TX_FIFO_EMPTY(cfg->reg)) { ; } /* Drain RX FIFO */ while (SSP_RX_FIFO_NOT_EMPTY(cfg->reg)) SSP_READ_REG(SSP_DR(cfg->reg)); while (data->rx_count < chunk_len || data->tx_count < chunk_len) { /* Fill up fifo with available TX data */ while (SSP_TX_FIFO_NOT_FULL(cfg->reg) && data->tx_count < chunk_len && fifo_cnt < SSP_FIFO_DEPTH) { /* Send 0 in the case of read only operation */ txrx = 0; if (txbuf) { txrx = ((uint8_t *)txbuf)[data->tx_count]; } SSP_WRITE_REG(SSP_DR(cfg->reg), txrx); data->tx_count++; fifo_cnt++; } while (data->rx_count < chunk_len && fifo_cnt > 0) { if (!SSP_RX_FIFO_NOT_EMPTY(cfg->reg)) { continue; } txrx = SSP_READ_REG(SSP_DR(cfg->reg)); /* Discard received data if rx buffer not assigned */ if (rxbuf) { ((uint8_t *)rxbuf)[data->rx_count] = (uint8_t)txrx; } data->rx_count++; fifo_cnt--; } } } #endif static int spi_pl022_transceive_impl(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { const struct spi_pl022_cfg *cfg = dev->config; struct spi_pl022_data *data = dev->data; struct spi_context *ctx = &data->ctx; int ret; spi_context_lock(&data->ctx, (cb ? true : false), cb, userdata, config); ret = spi_pl022_configure(dev, config); if (ret < 0) { goto error; } spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1); spi_context_cs_control(ctx, true); if (cfg->dma_enabled) { #if defined(CONFIG_SPI_PL022_DMA) for (size_t i = 0; i < ARRAY_SIZE(data->dma); i++) { struct dma_status stat = {.busy = true}; dma_stop(cfg->dma[i].dev, cfg->dma[i].channel); while (stat.busy) { dma_get_status(cfg->dma[i].dev, cfg->dma[i].channel, &stat); } data->dma[i].count = 0; } ret = spi_pl022_start_dma_transceive(dev); if (ret < 0) { spi_context_cs_control(ctx, false); goto error; } ret = spi_context_wait_for_completion(ctx); #endif } else #if defined(CONFIG_SPI_PL022_INTERRUPT) { spi_pl022_start_async_xfer(dev); ret = spi_context_wait_for_completion(ctx); } #else { do { spi_pl022_xfer(dev); spi_context_update_tx(ctx, 1, data->tx_count); spi_context_update_rx(ctx, 1, data->rx_count); } while (spi_pl022_transfer_ongoing(data)); #if defined(CONFIG_SPI_ASYNC) spi_context_complete(&data->ctx, dev, ret); #endif } #endif spi_context_cs_control(ctx, false); error: spi_context_release(&data->ctx, ret); return ret; } /* API Functions */ static int spi_pl022_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return spi_pl022_transceive_impl(dev, config, tx_bufs, rx_bufs, NULL, NULL); } #if defined(CONFIG_SPI_ASYNC) static int spi_pl022_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return spi_pl022_transceive_impl(dev, config, tx_bufs, rx_bufs, cb, userdata); } #endif static int spi_pl022_release(const struct device *dev, const struct spi_config *config) { struct spi_pl022_data *data = dev->data; spi_context_unlock_unconditionally(&data->ctx); return 0; } static const struct spi_driver_api spi_pl022_api = { .transceive = spi_pl022_transceive, #if defined(CONFIG_SPI_ASYNC) .transceive_async = spi_pl022_transceive_async, #endif .release = spi_pl022_release }; static int spi_pl022_init(const struct device *dev) { /* Initialize with lowest frequency */ const struct spi_config spicfg = { .frequency = 0, .operation = SPI_WORD_SET(8), .slave = 0, }; const struct spi_pl022_cfg *cfg = dev->config; struct spi_pl022_data *data = dev->data; int ret; #if defined(CONFIG_CLOCK_CONTROL) if (cfg->clk_dev) { ret = clock_control_on(cfg->clk_dev, cfg->clk_id); if (ret < 0) { LOG_ERR("Failed to enable the clock"); return ret; } } #endif #if defined(CONFIG_RESET) if (cfg->reset.dev) { ret = reset_line_toggle_dt(&cfg->reset); if (ret < 0) { return ret; } } #endif #if defined(CONFIG_PINCTRL) ret = pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("Failed to apply pinctrl state"); return ret; } #endif if (cfg->dma_enabled) { #if defined(CONFIG_SPI_PL022_DMA) for (size_t i = 0; i < spi_pl022_dma_enabled_num(dev); i++) { uint32_t ch_filter = BIT(cfg->dma[i].channel); if (!device_is_ready(cfg->dma[i].dev)) { LOG_ERR("DMA %s not ready", cfg->dma[i].dev->name); return -ENODEV; } ret = dma_request_channel(cfg->dma[i].dev, &ch_filter); if (ret < 0) { LOG_ERR("dma_request_channel failed %d", ret); return ret; } } #endif } else { #if defined(CONFIG_SPI_PL022_INTERRUPT) cfg->irq_config(dev); #endif } ret = spi_pl022_configure(dev, &spicfg); if (ret < 0) { LOG_ERR("Failed to configure spi"); return ret; } ret = spi_context_cs_configure_all(&data->ctx); if (ret < 0) { LOG_ERR("Failed to spi_context configure"); return ret; } /* Make sure the context is unlocked */ spi_context_unlock_unconditionally(&data->ctx); return 0; } #define DMA_INITIALIZER(idx, dir) \ { \ .dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(idx, dir)), \ .channel = DT_INST_DMAS_CELL_BY_NAME(idx, dir, channel), \ .slot = DT_INST_DMAS_CELL_BY_NAME(idx, dir, slot), \ .channel_config = DT_INST_DMAS_CELL_BY_NAME(idx, dir, channel_config), \ } #define DMAS_DECL(idx) \ { \ COND_CODE_1(DT_INST_DMAS_HAS_NAME(idx, tx), (DMA_INITIALIZER(idx, tx)), ({0})), \ COND_CODE_1(DT_INST_DMAS_HAS_NAME(idx, rx), (DMA_INITIALIZER(idx, rx)), ({0})), \ } #define DMAS_ENABLED(idx) (DT_INST_DMAS_HAS_NAME(idx, tx) && DT_INST_DMAS_HAS_NAME(idx, rx)) #define CLOCK_ID_DECL(idx) \ IF_ENABLED(DT_INST_NODE_HAS_PROP(0, clocks), \ (static const clock_control_subsys_t pl022_clk_id##idx = \ (clock_control_subsys_t)DT_INST_PHA_BY_IDX(idx, clocks, 0, clk_id);)) \ #define SPI_PL022_INIT(idx) \ IF_ENABLED(CONFIG_PINCTRL, (PINCTRL_DT_INST_DEFINE(idx);)) \ IF_ENABLED(CONFIG_SPI_PL022_INTERRUPT, \ (static void spi_pl022_irq_config_##idx(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(idx), DT_INST_IRQ(idx, priority), \ spi_pl022_isr, DEVICE_DT_INST_GET(idx), 0); \ irq_enable(DT_INST_IRQN(idx)); \ })) \ IF_ENABLED(CONFIG_CLOCK_CONTROL, (CLOCK_ID_DECL(idx))) \ static struct spi_pl022_data spi_pl022_data_##idx = { \ SPI_CONTEXT_INIT_LOCK(spi_pl022_data_##idx, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_pl022_data_##idx, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(idx), ctx)}; \ static struct spi_pl022_cfg spi_pl022_cfg_##idx = { \ .reg = DT_INST_REG_ADDR(idx), \ IF_ENABLED(CONFIG_CLOCK_CONTROL, (IF_ENABLED(DT_INST_NODE_HAS_PROP(0, clocks), \ (.clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(idx)), \ .clk_id = pl022_clk_id##idx,)))) \ IF_ENABLED(CONFIG_RESET, (IF_ENABLED(DT_INST_NODE_HAS_PROP(0, resets), \ (.reset = RESET_DT_SPEC_INST_GET(idx),)))) \ IF_ENABLED(CONFIG_PINCTRL, (.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx),)) \ IF_ENABLED(CONFIG_SPI_PL022_DMA, (.dma = DMAS_DECL(idx),)) COND_CODE_1( \ CONFIG_SPI_PL022_DMA, (.dma_enabled = DMAS_ENABLED(idx),), \ (.dma_enabled = false,)) \ IF_ENABLED(CONFIG_SPI_PL022_INTERRUPT, \ (.irq_config = spi_pl022_irq_config_##idx,))}; \ DEVICE_DT_INST_DEFINE(idx, spi_pl022_init, NULL, &spi_pl022_data_##idx, \ &spi_pl022_cfg_##idx, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &spi_pl022_api); DT_INST_FOREACH_STATUS_OKAY(SPI_PL022_INIT) ```
/content/code_sandbox/drivers/spi/spi_pl022.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,231
```unknown config SPI_GD32 bool "Gigadevice GD32 SPI driver" default y depends on DT_HAS_GD_GD32_SPI_ENABLED help Enables Gigadevice GD32 SPI driver. if SPI_GD32 config SPI_GD32_INTERRUPT bool "GD32 MCU SPI Interrupt Support" default y if SPI_ASYNC default y if SPI_GD32_DMA help Enable the interrupt driven mode for SPI instances config SPI_GD32_DMA bool "GD32 MCU SPI DMA Support" select DMA select SPI_GD32_INTERRUPT help Use the DMA for SPI transfer that enable dma channels in their device tree node. endif # SPI_GD32 ```
/content/code_sandbox/drivers/spi/Kconfig.gd32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
150
```c /* * */ #define DT_DRV_COMPAT ti_cc13xx_cc26xx_spi #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_cc13xx_cc26xx); #include <zephyr/drivers/spi.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <driverlib/prcm.h> #include <driverlib/ssi.h> #include <ti/drivers/Power.h> #include <ti/drivers/power/PowerCC26X2.h> #include "spi_context.h" struct spi_cc13xx_cc26xx_config { uint32_t base; const struct pinctrl_dev_config *pcfg; }; struct spi_cc13xx_cc26xx_data { struct spi_context ctx; }; #define CPU_FREQ DT_PROP(DT_PATH(cpus, cpu_0), clock_frequency) static int spi_cc13xx_cc26xx_configure(const struct device *dev, const struct spi_config *config) { const struct spi_cc13xx_cc26xx_config *cfg = dev->config; struct spi_cc13xx_cc26xx_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t prot; int ret; if (spi_context_configured(ctx, config)) { return 0; } if (config->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } /* Slave mode has not been implemented */ if (SPI_OP_MODE_GET(config->operation) != SPI_OP_MODE_MASTER) { LOG_ERR("Slave mode is not supported"); return -ENOTSUP; } /* Word sizes other than 8 bits has not been implemented */ if (SPI_WORD_SIZE_GET(config->operation) != 8) { LOG_ERR("Word sizes other than 8 bits are not supported"); return -ENOTSUP; } if (config->operation & SPI_TRANSFER_LSB) { LOG_ERR("Transfer LSB first mode is not supported"); return -EINVAL; } if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) && (config->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Multiple lines are not supported"); return -EINVAL; } if (config->operation & SPI_CS_ACTIVE_HIGH && !spi_cs_is_gpio(config)) { LOG_ERR("Active high CS requires emulation through a GPIO line."); return -EINVAL; } if (config->frequency < 2000000) { LOG_ERR("Frequencies lower than 2 MHz are not supported"); return -EINVAL; } if (2 * config->frequency > CPU_FREQ) { LOG_ERR("Frequency greater than supported in master mode"); return -EINVAL; } if (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) { if (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) { prot = SSI_FRF_MOTO_MODE_3; } else { prot = SSI_FRF_MOTO_MODE_2; } } else { if (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) { prot = SSI_FRF_MOTO_MODE_1; } else { prot = SSI_FRF_MOTO_MODE_0; } } ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("applying SPI pinctrl state failed"); return ret; } ctx->config = config; /* Disable SSI before making configuration changes */ SSIDisable(cfg->base); /* Configure SSI */ SSIConfigSetExpClk(cfg->base, CPU_FREQ, prot, SSI_MODE_MASTER, config->frequency, 8); if (SPI_MODE_GET(config->operation) & SPI_MODE_LOOP) { sys_set_bit(cfg->base + SSI_O_CR1, 0); } /* Re-enable SSI after making configuration changes */ SSIEnable(cfg->base); return 0; } static int spi_cc13xx_cc26xx_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { const struct spi_cc13xx_cc26xx_config *cfg = dev->config; struct spi_cc13xx_cc26xx_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t txd, rxd; int err; spi_context_lock(ctx, false, NULL, NULL, config); pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); err = spi_cc13xx_cc26xx_configure(dev, config); if (err) { goto done; } spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1); spi_context_cs_control(ctx, true); do { if (spi_context_tx_buf_on(ctx)) { txd = *ctx->tx_buf; } else { txd = 0U; } SSIDataPut(cfg->base, txd); spi_context_update_tx(ctx, 1, 1); SSIDataGet(cfg->base, &rxd); if (spi_context_rx_buf_on(ctx)) { *ctx->rx_buf = rxd; } spi_context_update_rx(ctx, 1, 1); } while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)); spi_context_cs_control(ctx, false); done: pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES); spi_context_release(ctx, err); return err; } static int spi_cc13xx_cc26xx_release(const struct device *dev, const struct spi_config *config) { const struct spi_cc13xx_cc26xx_config *cfg = dev->config; struct spi_cc13xx_cc26xx_data *data = dev->data; struct spi_context *ctx = &data->ctx; if (!spi_context_configured(ctx, config)) { return -EINVAL; } if (SSIBusy(cfg->base)) { return -EBUSY; } spi_context_unlock_unconditionally(ctx); return 0; } #ifdef CONFIG_PM_DEVICE static int spi_cc13xx_cc26xx_pm_action(const struct device *dev, enum pm_device_action action) { const struct spi_cc13xx_cc26xx_config *config = dev->config; switch (action) { case PM_DEVICE_ACTION_RESUME: if (config->base == DT_INST_REG_ADDR(0)) { Power_setDependency(PowerCC26XX_PERIPH_SSI0); } else { Power_setDependency(PowerCC26XX_PERIPH_SSI1); } break; case PM_DEVICE_ACTION_SUSPEND: SSIDisable(config->base); /* * Release power dependency */ if (config->base == DT_INST_REG_ADDR(0)) { Power_releaseDependency(PowerCC26XX_PERIPH_SSI0); } else { Power_releaseDependency(PowerCC26XX_PERIPH_SSI1); } break; default: return -ENOTSUP; } return 0; } #endif /* CONFIG_PM_DEVICE */ static const struct spi_driver_api spi_cc13xx_cc26xx_driver_api = { .transceive = spi_cc13xx_cc26xx_transceive, .release = spi_cc13xx_cc26xx_release, }; #ifdef CONFIG_PM #define SPI_CC13XX_CC26XX_POWER_SPI(n) \ do { \ /* Set Power dependencies & constraints */ \ if (DT_INST_REG_ADDR(n) == 0x40000000) { \ Power_setDependency(PowerCC26XX_PERIPH_SSI0); \ } else { \ Power_setDependency(PowerCC26XX_PERIPH_SSI1); \ } \ } while (false) #else #define SPI_CC13XX_CC26XX_POWER_SPI(n) \ do { \ uint32_t domain, periph; \ \ /* Enable UART power domain */ \ if (DT_INST_REG_ADDR(n) == 0x40000000) { \ domain = PRCM_DOMAIN_SERIAL; \ periph = PRCM_PERIPH_SSI0; \ } else { \ domain = PRCM_DOMAIN_PERIPH; \ periph = PRCM_PERIPH_SSI1; \ } \ /* Enable SSI##n power domain */ \ PRCMPowerDomainOn(domain); \ \ /* Enable SSI##n peripherals */ \ PRCMPeripheralRunEnable(periph); \ PRCMPeripheralSleepEnable(periph); \ PRCMPeripheralDeepSleepEnable(periph); \ \ /* Load PRCM settings */ \ PRCMLoadSet(); \ while (!PRCMLoadGet()) { \ continue; \ } \ \ /* SSI should not be accessed until power domain is on. */\ while (PRCMPowerDomainsAllOn(domain) != \ PRCM_DOMAIN_POWER_ON) { \ continue; \ } \ } while (false) #endif #define SPI_CC13XX_CC26XX_DEVICE_INIT(n) \ PM_DEVICE_DT_INST_DEFINE(n, spi_cc13xx_cc26xx_pm_action); \ \ DEVICE_DT_INST_DEFINE(n, \ spi_cc13xx_cc26xx_init_##n, \ PM_DEVICE_DT_INST_GET(n), \ &spi_cc13xx_cc26xx_data_##n, &spi_cc13xx_cc26xx_config_##n, \ POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &spi_cc13xx_cc26xx_driver_api) #define SPI_CC13XX_CC26XX_INIT_FUNC(n) \ static int spi_cc13xx_cc26xx_init_##n(const struct device *dev) \ { \ struct spi_cc13xx_cc26xx_data *data = dev->data; \ int err; \ SPI_CC13XX_CC26XX_POWER_SPI(n); \ \ err = spi_context_cs_configure_all(&data->ctx); \ if (err < 0) { \ return err; \ } \ \ spi_context_unlock_unconditionally(&data->ctx); \ \ return 0; \ } #define SPI_CC13XX_CC26XX_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ SPI_CC13XX_CC26XX_INIT_FUNC(n) \ \ static const struct spi_cc13xx_cc26xx_config \ spi_cc13xx_cc26xx_config_##n = { \ .base = DT_INST_REG_ADDR(n), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n) \ }; \ \ static struct spi_cc13xx_cc26xx_data \ spi_cc13xx_cc26xx_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_cc13xx_cc26xx_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_cc13xx_cc26xx_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ }; \ \ SPI_CC13XX_CC26XX_DEVICE_INIT(n); DT_INST_FOREACH_STATUS_OKAY(SPI_CC13XX_CC26XX_INIT) ```
/content/code_sandbox/drivers/spi/spi_cc13xx_cc26xx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,577
```c /* * */ #define DT_DRV_COMPAT cypress_psoc6_spi #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_psoc6); #include <errno.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/spi.h> #include <soc.h> #include "spi_context.h" #include "cy_syslib.h" #include "cy_sysclk.h" #include "cy_scb_spi.h" #include "cy_sysint.h" #define SPI_CHIP_SELECT_COUNT 4 #define SPI_MAX_DATA_WIDTH 16 #define SPI_PSOC6_CLK_DIV_NUMBER 1 struct spi_psoc6_config { CySCB_Type *base; uint32_t periph_id; void (*irq_config_func)(const struct device *dev); const struct pinctrl_dev_config *pcfg; }; struct spi_psoc6_transfer { uint8_t *txData; uint8_t *rxData; size_t dataSize; }; struct spi_psoc6_data { struct spi_context ctx; struct cy_stc_scb_spi_config cfg; struct spi_psoc6_transfer xfer; }; static void spi_psoc6_transfer_next_packet(const struct device *dev) { const struct spi_psoc6_config *config = dev->config; struct spi_psoc6_data *data = dev->data; struct spi_context *ctx = &data->ctx; struct spi_psoc6_transfer *xfer = &data->xfer; uint32_t count; LOG_DBG("TX L: %d, RX L: %d", ctx->tx_len, ctx->rx_len); if ((ctx->tx_len == 0U) && (ctx->rx_len == 0U)) { /* nothing left to rx or tx, we're done! */ xfer->dataSize = 0U; spi_context_cs_control(ctx, false); spi_context_complete(ctx, dev, 0U); return; } if (ctx->tx_len == 0U) { /* rx only, nothing to tx */ xfer->txData = NULL; xfer->rxData = ctx->rx_buf; xfer->dataSize = ctx->rx_len; } else if (ctx->rx_len == 0U) { /* tx only, nothing to rx */ xfer->txData = (uint8_t *) ctx->tx_buf; xfer->rxData = NULL; xfer->dataSize = ctx->tx_len; } else if (ctx->tx_len == ctx->rx_len) { /* rx and tx are the same length */ xfer->txData = (uint8_t *) ctx->tx_buf; xfer->rxData = ctx->rx_buf; xfer->dataSize = ctx->tx_len; } else if (ctx->tx_len > ctx->rx_len) { /* Break up the tx into multiple transfers so we don't have to * rx into a longer intermediate buffer. Leave chip select * active between transfers. */ xfer->txData = (uint8_t *) ctx->tx_buf; xfer->rxData = ctx->rx_buf; xfer->dataSize = ctx->rx_len; } else { /* Break up the rx into multiple transfers so we don't have to * tx from a longer intermediate buffer. Leave chip select * active between transfers. */ xfer->txData = (uint8_t *) ctx->tx_buf; xfer->rxData = ctx->rx_buf; xfer->dataSize = ctx->tx_len; } if (xfer->txData != NULL) { if (Cy_SCB_SPI_WriteArray(config->base, xfer->txData, xfer->dataSize) != xfer->dataSize) { goto err; } } else { /* Need fill TX fifo with garbage to perform read. * This keeps logic simple and saves stack. * Use 0 as dummy data. */ for (count = 0U; count < xfer->dataSize; count++) { if (Cy_SCB_SPI_Write(config->base, 0U) == 0U) { goto err; } } } LOG_DBG("TRX L: %d", xfer->dataSize); return; err: /* no FIFO available to run the transfer */ xfer->dataSize = 0U; spi_context_cs_control(ctx, false); spi_context_complete(ctx, dev, -ENOMEM); } static void spi_psoc6_isr(const struct device *dev) { const struct spi_psoc6_config *config = dev->config; struct spi_psoc6_data *data = dev->data; Cy_SCB_ClearMasterInterrupt(config->base, CY_SCB_MASTER_INTR_SPI_DONE); /* extract data from RX FIFO */ if (data->xfer.rxData != NULL) { Cy_SCB_SPI_ReadArray(config->base, data->xfer.rxData, data->xfer.dataSize); } else { Cy_SCB_ClearRxFifo(config->base); } /* Set next data block */ spi_context_update_tx(&data->ctx, 1, data->xfer.dataSize); spi_context_update_rx(&data->ctx, 1, data->xfer.dataSize); /* Start next block * Since 1 byte at TX FIFO will start transfer data, let's try * minimize ISR recursion disabling all interrupt sources when add * data on TX FIFO */ Cy_SCB_SetMasterInterruptMask(config->base, 0U); spi_psoc6_transfer_next_packet(dev); if (data->xfer.dataSize > 0U) { Cy_SCB_SetMasterInterruptMask(config->base, CY_SCB_MASTER_INTR_SPI_DONE); } } static uint32_t spi_psoc6_get_freqdiv(uint32_t frequency) { uint32_t oversample; uint32_t bus_freq = 100000000UL; /* * TODO: Get PerBusSpeed when clocks are available to PSoC-6. * Currently the bus freq is fixed to 50Mhz and max SPI clk can be * 12.5MHz. */ for (oversample = 4; oversample < 16; oversample++) { if ((bus_freq / oversample) <= frequency) { break; } } /* Oversample [4, 16] */ return oversample; } static void spi_psoc6_master_get_defaults(struct cy_stc_scb_spi_config *cfg) { cfg->spiMode = CY_SCB_SPI_MASTER; cfg->subMode = CY_SCB_SPI_MOTOROLA; cfg->sclkMode = 0U; cfg->oversample = 0U; cfg->rxDataWidth = 0U; cfg->txDataWidth = 0U; cfg->enableMsbFirst = false; cfg->enableFreeRunSclk = false; cfg->enableInputFilter = false; cfg->enableMisoLateSample = false; cfg->enableTransferSeperation = false; cfg->ssPolarity = 0U; cfg->enableWakeFromSleep = false; cfg->rxFifoTriggerLevel = 0U; cfg->rxFifoIntEnableMask = 0U; cfg->txFifoTriggerLevel = 0U; cfg->txFifoIntEnableMask = 0U; cfg->masterSlaveIntEnableMask = 0U; } static int spi_psoc6_configure(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_psoc6_data *data = dev->data; uint32_t word_size; if (spi_context_configured(&data->ctx, spi_cfg)) { /* This configuration is already in use */ return 0; } if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); if (word_size > SPI_MAX_DATA_WIDTH) { LOG_ERR("Word size %d is greater than %d", word_size, SPI_MAX_DATA_WIDTH); return -EINVAL; } if (SPI_OP_MODE_GET(spi_cfg->operation) == SPI_OP_MODE_MASTER) { spi_psoc6_master_get_defaults(&data->cfg); if (spi_cfg->slave > SPI_CHIP_SELECT_COUNT) { LOG_ERR("Slave %d is greater than %d", spi_cfg->slave, SPI_CHIP_SELECT_COUNT); return -EINVAL; } data->cfg.rxDataWidth = data->cfg.txDataWidth = word_size; if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) { if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) { data->cfg.sclkMode = CY_SCB_SPI_CPHA1_CPOL1; } else { data->cfg.sclkMode = CY_SCB_SPI_CPHA1_CPOL0; } } else { if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) { data->cfg.sclkMode = CY_SCB_SPI_CPHA0_CPOL1; } else { data->cfg.sclkMode = CY_SCB_SPI_CPHA0_CPOL0; } } data->cfg.enableMsbFirst = !!!(spi_cfg->operation & SPI_TRANSFER_LSB); data->cfg.oversample = spi_psoc6_get_freqdiv(spi_cfg->frequency); data->ctx.config = spi_cfg; } else { /* Slave mode is not implemented yet. */ return -ENOTSUP; } return 0; } static void spi_psoc6_transceive_sync_loop(const struct device *dev) { const struct spi_psoc6_config *config = dev->config; struct spi_psoc6_data *data = dev->data; while (data->xfer.dataSize > 0U) { while (!Cy_SCB_IsTxComplete(config->base)) { ; } if (data->xfer.rxData != NULL) { Cy_SCB_SPI_ReadArray(config->base, data->xfer.rxData, data->xfer.dataSize); } else { Cy_SCB_ClearRxFifo(config->base); } spi_context_update_tx(&data->ctx, 1, data->xfer.dataSize); spi_context_update_rx(&data->ctx, 1, data->xfer.dataSize); spi_psoc6_transfer_next_packet(dev); } } static int spi_psoc6_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { const struct spi_psoc6_config *config = dev->config; struct spi_psoc6_data *data = dev->data; int ret; spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg); LOG_DBG("\n\n"); ret = spi_psoc6_configure(dev, spi_cfg); if (ret) { goto out; } Cy_SCB_SPI_Init(config->base, &data->cfg, NULL); Cy_SCB_SPI_SetActiveSlaveSelect(config->base, spi_cfg->slave); Cy_SCB_SPI_Enable(config->base); spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); spi_context_cs_control(&data->ctx, true); spi_psoc6_transfer_next_packet(dev); if (asynchronous) { Cy_SCB_SetMasterInterruptMask(config->base, CY_SCB_MASTER_INTR_SPI_DONE); } else { spi_psoc6_transceive_sync_loop(dev); } ret = spi_context_wait_for_completion(&data->ctx); Cy_SCB_SPI_Disable(config->base, NULL); out: spi_context_release(&data->ctx, ret); return ret; } static int spi_psoc6_transceive_sync(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return spi_psoc6_transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_psoc6_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return spi_psoc6_transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int spi_psoc6_release(const struct device *dev, const struct spi_config *config) { struct spi_psoc6_data *data = dev->data; spi_context_unlock_unconditionally(&data->ctx); return 0; } static int spi_psoc6_init(const struct device *dev) { int err; const struct spi_psoc6_config *config = dev->config; struct spi_psoc6_data *data = dev->data; /* Configure dt provided device signals when available */ err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } Cy_SysClk_PeriphAssignDivider(config->periph_id, CY_SYSCLK_DIV_8_BIT, SPI_PSOC6_CLK_DIV_NUMBER); Cy_SysClk_PeriphSetDivider(CY_SYSCLK_DIV_8_BIT, SPI_PSOC6_CLK_DIV_NUMBER, 0U); Cy_SysClk_PeriphEnableDivider(CY_SYSCLK_DIV_8_BIT, SPI_PSOC6_CLK_DIV_NUMBER); #ifdef CONFIG_SPI_ASYNC config->irq_config_func(dev); #endif err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } return spi_psoc6_release(dev, NULL); } static const struct spi_driver_api spi_psoc6_driver_api = { .transceive = spi_psoc6_transceive_sync, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_psoc6_transceive_async, #endif .release = spi_psoc6_release, }; #define SPI_PSOC6_DEVICE_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static void spi_psoc6_spi##n##_irq_cfg(const struct device *port); \ static const struct spi_psoc6_config spi_psoc6_config_##n = { \ .base = (CySCB_Type *)DT_INST_REG_ADDR(n), \ .periph_id = DT_INST_PROP(n, peripheral_id), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .irq_config_func = spi_psoc6_spi##n##_irq_cfg, \ }; \ static struct spi_psoc6_data spi_psoc6_dev_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_psoc6_dev_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_psoc6_dev_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ }; \ DEVICE_DT_INST_DEFINE(n, spi_psoc6_init, NULL, \ &spi_psoc6_dev_data_##n, \ &spi_psoc6_config_##n, POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_psoc6_driver_api); \ static void spi_psoc6_spi##n##_irq_cfg(const struct device *port) \ { \ CY_PSOC6_DT_INST_NVIC_INSTALL(n, \ spi_psoc6_isr); \ }; DT_INST_FOREACH_STATUS_OKAY(SPI_PSOC6_DEVICE_INIT) ```
/content/code_sandbox/drivers/spi/spi_psoc6.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,502
```c /* * */ #define DT_DRV_COMPAT nxp_imx_ecspi #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_mcux_ecspi, CONFIG_SPI_LOG_LEVEL); #include <zephyr/device.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/spi.h> #include <fsl_ecspi.h> #include "spi_context.h" #define SPI_MCUX_ECSPI_MAX_BURST 4096 struct spi_mcux_config { ECSPI_Type *base; const struct pinctrl_dev_config *pincfg; const struct device *clock_dev; clock_control_subsys_t clock_subsys; void (*irq_config_func)(const struct device *dev); }; struct spi_mcux_data { ecspi_master_handle_t handle; struct spi_context ctx; uint16_t dfs; uint16_t word_size; uint32_t rx_data; uint32_t tx_data; }; static inline uint16_t bytes_per_word(uint16_t bits_per_word) { if (bits_per_word <= 8U) { return 1U; } if (bits_per_word <= 16U) { return 2U; } return 4U; } static void spi_mcux_transfer_next_packet(const struct device *dev) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; ECSPI_Type *base = config->base; struct spi_context *ctx = &data->ctx; ecspi_transfer_t transfer; status_t status; if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) { /* nothing left to rx or tx, we're done! */ spi_context_cs_control(&data->ctx, false); spi_context_complete(&data->ctx, dev, 0); return; } transfer.channel = ctx->config->slave; if (spi_context_rx_buf_on(ctx)) { transfer.rxData = &data->rx_data; } else { transfer.rxData = NULL; } if (spi_context_tx_buf_on(ctx)) { switch (data->dfs) { case 1U: data->tx_data = UNALIGNED_GET((uint8_t *)ctx->tx_buf); break; case 2U: data->tx_data = UNALIGNED_GET((uint16_t *)ctx->tx_buf); break; case 4U: data->tx_data = UNALIGNED_GET((uint32_t *)ctx->tx_buf); break; } transfer.txData = &data->tx_data; } else { transfer.txData = NULL; } transfer.dataSize = data->dfs; status = ECSPI_MasterTransferNonBlocking(base, &data->handle, &transfer); if (status != kStatus_Success) { LOG_ERR("Transfer could not start"); spi_context_cs_control(&data->ctx, false); spi_context_complete(&data->ctx, dev, -EIO); } } static void spi_mcux_isr(const struct device *dev) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; ECSPI_Type *base = config->base; ECSPI_MasterTransferHandleIRQ(base, &data->handle); } static void spi_mcux_master_transfer_callback(ECSPI_Type *base, ecspi_master_handle_t *handle, status_t status, void *user_data) { const struct device *dev = (const struct device *)user_data; struct spi_mcux_data *data = dev->data; if (spi_context_rx_buf_on(&data->ctx)) { switch (data->dfs) { case 1: UNALIGNED_PUT(data->rx_data, (uint8_t *)data->ctx.rx_buf); break; case 2: UNALIGNED_PUT(data->rx_data, (uint16_t *)data->ctx.rx_buf); break; case 4: UNALIGNED_PUT(data->rx_data, (uint32_t *)data->ctx.rx_buf); break; } } spi_context_update_tx(&data->ctx, data->dfs, 1); spi_context_update_rx(&data->ctx, data->dfs, 1); spi_mcux_transfer_next_packet(dev); } static int spi_mcux_configure(const struct device *dev, const struct spi_config *spi_cfg) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; ECSPI_Type *base = config->base; ecspi_master_config_t master_config; uint32_t clock_freq; uint16_t word_size; if (spi_context_configured(&data->ctx, spi_cfg)) { /* This configuration is already in use */ return 0; } if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } if (spi_cfg->operation & SPI_TRANSFER_LSB) { LOG_ERR("HW byte re-ordering not supported"); return -ENOTSUP; } if (spi_cfg->slave > kECSPI_Channel3) { LOG_ERR("Slave %d is greater than %d", spi_cfg->slave, kECSPI_Channel3); return -EINVAL; } if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { LOG_ERR("Failed to get clock rate"); return -EINVAL; } word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); if (0 == word_size || word_size > 32) { LOG_ERR("Invalid word size (0 < %d <= 32)", word_size); return -EINVAL; } ECSPI_MasterGetDefaultConfig(&master_config); master_config.channel = (ecspi_channel_source_t)spi_cfg->slave; master_config.channelConfig.polarity = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) ? kECSPI_PolarityActiveLow : kECSPI_PolarityActiveHigh; master_config.channelConfig.phase = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) ? kECSPI_ClockPhaseSecondEdge : kECSPI_ClockPhaseFirstEdge; master_config.baudRate_Bps = spi_cfg->frequency; master_config.burstLength = word_size; master_config.enableLoopback = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_LOOP); if (!spi_cs_is_gpio(spi_cfg)) { uint32_t clock_cycles = DIV_ROUND_UP(spi_cfg->cs.delay * USEC_PER_SEC, spi_cfg->frequency); if (clock_cycles > 63U) { LOG_ERR("CS delay is greater than 63 clock cycles (%u)", clock_cycles); return -EINVAL; } master_config.chipSelectDelay = (uint8_t)clock_cycles; } ECSPI_MasterInit(base, &master_config, clock_freq); ECSPI_MasterTransferCreateHandle(base, &data->handle, spi_mcux_master_transfer_callback, (void *)dev); data->word_size = word_size; data->dfs = bytes_per_word(word_size); data->ctx.config = spi_cfg; return 0; } static int transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { struct spi_mcux_data *data = dev->data; int ret; spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg); ret = spi_mcux_configure(dev, spi_cfg); if (ret) { goto out; } spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, data->dfs); spi_context_cs_control(&data->ctx, true); spi_mcux_transfer_next_packet(dev); ret = spi_context_wait_for_completion(&data->ctx); out: spi_context_release(&data->ctx, ret); return ret; } static int spi_mcux_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_mcux_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int spi_mcux_release(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_mcux_data *data = dev->data; ARG_UNUSED(spi_cfg); spi_context_unlock_unconditionally(&data->ctx); return 0; } static int spi_mcux_init(const struct device *dev) { int ret; const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; config->irq_config_func(dev); ret = spi_context_cs_configure_all(&data->ctx); if (ret < 0) { return ret; } ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } spi_context_unlock_unconditionally(&data->ctx); return 0; } static const struct spi_driver_api spi_mcux_driver_api = { .transceive = spi_mcux_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_mcux_transceive_async, #endif .release = spi_mcux_release, }; #define SPI_MCUX_ECSPI_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static void spi_mcux_config_func_##n(const struct device *dev); \ \ static const struct spi_mcux_config spi_mcux_config_##n = { \ .base = (ECSPI_Type *) DT_INST_REG_ADDR(n), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \ .irq_config_func = spi_mcux_config_func_##n, \ }; \ \ static struct spi_mcux_data spi_mcux_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ }; \ \ DEVICE_DT_INST_DEFINE(n, spi_mcux_init, NULL, \ &spi_mcux_data_##n, &spi_mcux_config_##n, \ POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &spi_mcux_driver_api); \ \ static void spi_mcux_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ spi_mcux_isr, DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_ECSPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_mcux_ecspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,593
```unknown # MCUXpresso SDK SPI config SPI_MCUX_LPSPI bool "MCUX SPI driver" default y depends on DT_HAS_NXP_IMX_LPSPI_ENABLED depends on CLOCK_CONTROL select PINCTRL help Enable support for mcux spi driver. if SPI_MCUX_LPSPI config SPI_MCUX_LPSPI_DMA bool "MCUX LPSPI SPI DMA Support" select DMA help Enable the SPI DMA mode for SPI instances that enable dma channels in their device tree node. if SPI_RTIO config SPI_MCUX_RTIO_SQ_SIZE int "number of available submission queue entries" default 8 # sensible default that covers most common spi transactions help when rtio is use with spi each driver holds a context with which blocking api calls use to perform spi transactions. this queue needs to be as deep as the longest set of spi_buf_sets used, where normal spi operations are used (equal length buffers). it may need to be slightly deeper where the spi buffer sets for transmit/receive are not always matched equally in length as these are transformed into normal transceives. endif # SPI_RTIO endif # SPI_MCUX_LPSPI ```
/content/code_sandbox/drivers/spi/Kconfig.mcux_lpspi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
264
```unknown # Nuvoton NPCX SPI Driver configuration options menuconfig SPI_NPCX_SPIP bool "Nuvoton NPCX embedded controller (EC) SPI driver" default y depends on DT_HAS_NUVOTON_NPCX_SPIP_ENABLED help Enable the SPI peripherals on NPCX MCU. config SPI_NPCX_SPIP_INTERRUPT bool "NPCX SPIP Interrupt Support" depends on SPI_NPCX_SPIP help Enable Interrupt support for the SPI Driver of NPCX chip. ```
/content/code_sandbox/drivers/spi/Kconfig.npcx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
106
```unknown # DesignWare SPI driver configuration options menuconfig SPI_DW bool "Designware SPI controller driver" default y depends on DT_HAS_SNPS_DESIGNWARE_SPI_ENABLED help Enable support for Designware's SPI controllers. if SPI_DW config SPI_DW_ACCESS_WORD_ONLY bool "DesignWare SPI only allows word access" help In some case, e.g. ARC HS Development kit, the peripheral space of DesignWare SPI only allows word access, byte access will raise exception. config SPI_DW_HSSI bool "Designware SPI HSSI variant" help Use register layout compatible with the SPI DW HSSI variant of the peripheral. endif # SPI_DW ```
/content/code_sandbox/drivers/spi/Kconfig.dw
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
153
```unknown config SPI_OPENTITAN bool "OpenTitan SPI controller driver" default y depends on DT_HAS_LOWRISC_OPENTITAN_SPI_ENABLED help Enable the SPI peripherals on OpenTitan ```
/content/code_sandbox/drivers/spi/Kconfig.opentitan
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
46
```c /* * */ #define DT_DRV_COMPAT xlnx_xps_spi_2_00_a #include <zephyr/device.h> #include <zephyr/drivers/spi.h> #include <zephyr/sys/sys_io.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <zephyr/kernel.h> LOG_MODULE_REGISTER(xlnx_quadspi, CONFIG_SPI_LOG_LEVEL); #include "spi_context.h" /* AXI Quad SPI v3.2 register offsets (See Xilinx PG153 for details) */ #define SRR_OFFSET 0x40 #define SPICR_OFFSET 0x60 #define SPISR_OFFSET 0x64 #define SPI_DTR_OFFSET 0x68 #define SPI_DRR_OFFSET 0x6c #define SPISSR_OFFSET 0x70 #define SPI_TX_FIFO_OCR_OFFSET 0x74 #define SPI_RX_FIFO_OCR_OFFSET 0x78 #define DGIER_OFFSET 0x1c #define IPISR_OFFSET 0x20 #define IPIER_OFFSET 0x28 /* SRR bit definitions */ #define SRR_SOFTRESET_MAGIC 0xa /* SPICR bit definitions */ #define SPICR_LOOP BIT(0) #define SPICR_SPE BIT(1) #define SPICR_MASTER BIT(2) #define SPICR_CPOL BIT(3) #define SPICR_CPHA BIT(4) #define SPICR_TX_FIFO_RESET BIT(5) #define SPICR_RX_FIFO_RESET BIT(6) #define SPICR_MANUAL_SS BIT(7) #define SPICR_MASTER_XFER_INH BIT(8) #define SPICR_LSB_FIRST BIT(9) /* SPISR bit definitions */ #define SPISR_RX_EMPTY BIT(0) #define SPISR_RX_FULL BIT(1) #define SPISR_TX_EMPTY BIT(2) #define SPISR_TX_FULL BIT(3) #define SPISR_MODF BIT(4) #define SPISR_SLAVE_MODE_SELECT BIT(5) #define SPISR_CPOL_CPHA_ERROR BIT(6) #define SPISR_SLAVE_MODE_ERROR BIT(7) #define SPISR_MSB_ERROR BIT(8) #define SPISR_LOOPBACK_ERROR BIT(9) #define SPISR_COMMAND_ERROR BIT(10) #define SPISR_ERROR_MASK (SPISR_COMMAND_ERROR | \ SPISR_LOOPBACK_ERROR | \ SPISR_MSB_ERROR | \ SPISR_SLAVE_MODE_ERROR | \ SPISR_CPOL_CPHA_ERROR) /* DGIER bit definitions */ #define DGIER_GIE BIT(31) /* IPISR and IPIER bit definitions */ #define IPIXR_MODF BIT(0) #define IPIXR_SLAVE_MODF BIT(1) #define IPIXR_DTR_EMPTY BIT(2) #define IPIXR_DTR_UNDERRUN BIT(3) #define IPIXR_DRR_FULL BIT(4) #define IPIXR_DRR_OVERRUN BIT(5) #define IPIXR_TX_FIFO_HALF_EMPTY BIT(6) #define IPIXR_SLAVE_MODE_SELECT BIT(7) #define IPIXR_DDR_NOT_EMPTY BIT(8) #define IPIXR_CPOL_CPHA_ERROR BIT(9) #define IPIXR_SLAVE_MODE_ERROR BIT(10) #define IPIXR_MSB_ERROR BIT(11) #define IPIXR_LOOPBACK_ERROR BIT(12) #define IPIXR_COMMAND_ERROR BIT(13) struct xlnx_quadspi_config { mm_reg_t base; void (*irq_config_func)(const struct device *dev); uint8_t num_ss_bits; uint8_t num_xfer_bytes; uint16_t fifo_size; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(xlnx_startup_block) bool startup_block; #endif }; struct xlnx_quadspi_data { struct spi_context ctx; struct k_event dtr_empty; }; static inline uint32_t xlnx_quadspi_read32(const struct device *dev, mm_reg_t offset) { const struct xlnx_quadspi_config *config = dev->config; return sys_read32(config->base + offset); } static inline void xlnx_quadspi_write32(const struct device *dev, uint32_t value, mm_reg_t offset) { const struct xlnx_quadspi_config *config = dev->config; sys_write32(value, config->base + offset); } static void xlnx_quadspi_cs_control(const struct device *dev, bool on) { const struct xlnx_quadspi_config *config = dev->config; struct xlnx_quadspi_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t spissr = BIT_MASK(config->num_ss_bits); if (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(ctx)) { /* Skip slave select assert/de-assert in slave mode */ return; } if (on) { /* SPISSR is one-hot, active-low */ spissr &= ~BIT(ctx->config->slave); } else if (ctx->config->operation & SPI_HOLD_ON_CS) { /* Skip slave select de-assert */ return; } xlnx_quadspi_write32(dev, spissr, SPISSR_OFFSET); spi_context_cs_control(ctx, on); } static int xlnx_quadspi_configure(const struct device *dev, const struct spi_config *spi_cfg) { const struct xlnx_quadspi_config *config = dev->config; struct xlnx_quadspi_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t word_size; uint32_t spicr; uint32_t spisr; if (spi_context_configured(ctx, spi_cfg)) { /* Configuration already active, just enable SPI IOs */ spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET); spicr |= SPICR_SPE; xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET); return 0; } if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } if (spi_cfg->slave >= config->num_ss_bits) { LOG_ERR("unsupported slave %d, num_ss_bits %d", spi_cfg->slave, config->num_ss_bits); return -ENOTSUP; } if (spi_cfg->operation & SPI_CS_ACTIVE_HIGH) { LOG_ERR("unsupported CS polarity active high"); return -ENOTSUP; } if (!IS_ENABLED(CONFIG_SPI_SLAVE) && \ (spi_cfg->operation & SPI_OP_MODE_SLAVE)) { LOG_ERR("slave mode support not enabled"); return -ENOTSUP; } word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); if (word_size != (config->num_xfer_bytes * 8)) { LOG_ERR("unsupported word size %d bits, num_xfer_bytes %d", word_size, config->num_xfer_bytes); return -ENOTSUP; } /* Reset FIFOs, SPI IOs enabled */ spicr = SPICR_TX_FIFO_RESET | SPICR_RX_FIFO_RESET | SPICR_SPE; /* Master mode, inhibit master transmit, manual slave select */ if (!IS_ENABLED(CONFIG_SPI_SLAVE) || (spi_cfg->operation & SPI_OP_MODE_SLAVE) == 0U) { spicr |= SPICR_MASTER | SPICR_MASTER_XFER_INH | SPICR_MANUAL_SS; } if (spi_cfg->operation & SPI_MODE_CPOL) { spicr |= SPICR_CPOL; } if (spi_cfg->operation & SPI_MODE_CPHA) { spicr |= SPICR_CPHA; } if (spi_cfg->operation & SPI_MODE_LOOP) { spicr |= SPICR_LOOP; } if (spi_cfg->operation & SPI_TRANSFER_LSB) { spicr |= SPICR_LSB_FIRST; } /* * Write configuration and verify it is compliant with the IP core * configuration. Tri-state SPI IOs on error. */ xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET); spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET); if (spisr & SPISR_ERROR_MASK) { LOG_ERR("unsupported configuration, spisr = 0x%08x", spisr); xlnx_quadspi_write32(dev, SPICR_MASTER_XFER_INH, SPICR_OFFSET); ctx->config = NULL; return -ENOTSUP; } ctx->config = spi_cfg; return 0; } static bool xlnx_quadspi_start_tx(const struct device *dev) { const struct xlnx_quadspi_config *config = dev->config; struct xlnx_quadspi_data *data = dev->data; struct spi_context *ctx = &data->ctx; size_t xfer_len; uint32_t spicr = 0U; uint32_t spisr; uint32_t dtr = 0U; uint32_t fifo_avail_words = config->fifo_size ? config->fifo_size : 1; bool complete = false; if (!spi_context_tx_on(ctx) && !spi_context_rx_on(ctx)) { /* All done, de-assert slave select */ xlnx_quadspi_cs_control(dev, false); if ((ctx->config->operation & SPI_HOLD_ON_CS) == 0U) { /* Tri-state SPI IOs */ spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET); spicr &= ~(SPICR_SPE); xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET); } spi_context_complete(ctx, dev, 0); complete = true; return complete; } if (!IS_ENABLED(CONFIG_SPI_SLAVE) || !spi_context_is_slave(ctx)) { /* Inhibit master transaction while writing TX data */ spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET); spicr |= SPICR_MASTER_XFER_INH; xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET); } /* We can only see as far as the current rx buffer */ xfer_len = spi_context_longest_current_buf(ctx); /* Write TX data */ while (xfer_len--) { if (spi_context_tx_buf_on(ctx)) { switch (config->num_xfer_bytes) { case 1: dtr = UNALIGNED_GET((uint8_t *)(ctx->tx_buf)); break; case 2: dtr = UNALIGNED_GET((uint16_t *)(ctx->tx_buf)); break; case 4: dtr = UNALIGNED_GET((uint32_t *)(ctx->tx_buf)); break; default: __ASSERT(0, "unsupported num_xfer_bytes"); } } else { /* No TX buffer. Use dummy TX data */ dtr = 0U; } xlnx_quadspi_write32(dev, dtr, SPI_DTR_OFFSET); spi_context_update_tx(ctx, config->num_xfer_bytes, 1); if (--fifo_avail_words == 0) { spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET); if (spisr & SPISR_TX_FULL) { break; } if (!config->fifo_size) { fifo_avail_words = 1; } else if (spisr & SPISR_TX_EMPTY) { fifo_avail_words = config->fifo_size; } else { fifo_avail_words = config->fifo_size - xlnx_quadspi_read32(dev, SPI_TX_FIFO_OCR_OFFSET) - 1; } } } spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET); if (spisr & SPISR_COMMAND_ERROR) { /* Command not supported by memory type configured in IP core */ LOG_ERR("unsupported command"); xlnx_quadspi_cs_control(dev, false); spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET); if ((ctx->config->operation & SPI_HOLD_ON_CS) == 0U) { /* Tri-state SPI IOs */ spicr &= ~(SPICR_SPE); } xlnx_quadspi_write32(dev, spicr | SPICR_TX_FIFO_RESET, SPICR_OFFSET); spi_context_complete(ctx, dev, -ENOTSUP); complete = true; } if (!IS_ENABLED(CONFIG_SPI_SLAVE) || !spi_context_is_slave(ctx)) { /* Uninhibit master transaction */ spicr &= ~(SPICR_MASTER_XFER_INH); xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET); } return complete; } static void xlnx_quadspi_read_fifo(const struct device *dev) { const struct xlnx_quadspi_config *config = dev->config; struct xlnx_quadspi_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET); /* RX FIFO occupancy register only exists if FIFO is implemented */ uint32_t rx_fifo_words = config->fifo_size ? xlnx_quadspi_read32(dev, SPI_RX_FIFO_OCR_OFFSET) + 1 : 1; /* Read RX data */ while (!(spisr & SPISR_RX_EMPTY)) { uint32_t drr = xlnx_quadspi_read32(dev, SPI_DRR_OFFSET); if (spi_context_rx_buf_on(ctx)) { switch (config->num_xfer_bytes) { case 1: UNALIGNED_PUT(drr, (uint8_t *)ctx->rx_buf); break; case 2: UNALIGNED_PUT(drr, (uint16_t *)ctx->rx_buf); break; case 4: UNALIGNED_PUT(drr, (uint32_t *)ctx->rx_buf); break; default: __ASSERT(0, "unsupported num_xfer_bytes"); } } spi_context_update_rx(ctx, config->num_xfer_bytes, 1); if (--rx_fifo_words == 0) { spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET); rx_fifo_words = config->fifo_size ? xlnx_quadspi_read32(dev, SPI_RX_FIFO_OCR_OFFSET) + 1 : 1; } } } static int xlnx_quadspi_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool async, spi_callback_t cb, void *userdata) { const struct xlnx_quadspi_config *config = dev->config; struct xlnx_quadspi_data *data = dev->data; struct spi_context *ctx = &data->ctx; int ret; spi_context_lock(ctx, async, cb, userdata, spi_cfg); ret = xlnx_quadspi_configure(dev, spi_cfg); if (ret) { goto out; } spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, config->num_xfer_bytes); xlnx_quadspi_cs_control(dev, true); while (true) { k_event_clear(&data->dtr_empty, 1); bool complete = xlnx_quadspi_start_tx(dev); if (complete || async) { break; } /** * 20ms should be long enough for 256 byte FIFO at any * reasonable clock speed. */ if (!k_event_wait(&data->dtr_empty, 1, false, K_MSEC(20 + CONFIG_SPI_COMPLETION_TIMEOUT_TOLERANCE))) { /* Timeout */ LOG_ERR("DTR empty timeout"); spi_context_complete(ctx, dev, -ETIMEDOUT); break; } xlnx_quadspi_read_fifo(dev); } ret = spi_context_wait_for_completion(ctx); out: spi_context_release(ctx, ret); return ret; } static int xlnx_quadspi_transceive_blocking(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return xlnx_quadspi_transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int xlnx_quadspi_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return xlnx_quadspi_transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int xlnx_quadspi_release(const struct device *dev, const struct spi_config *spi_cfg) { const struct xlnx_quadspi_config *config = dev->config; struct xlnx_quadspi_data *data = dev->data; uint32_t spicr; /* Force slave select de-assert */ xlnx_quadspi_write32(dev, BIT_MASK(config->num_ss_bits), SPISSR_OFFSET); /* Tri-state SPI IOs */ spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET); spicr &= ~(SPICR_SPE); xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET); spi_context_unlock_unconditionally(&data->ctx); return 0; } static void xlnx_quadspi_isr(const struct device *dev) { struct xlnx_quadspi_data *data = dev->data; uint32_t ipisr; /* Acknowledge interrupt */ ipisr = xlnx_quadspi_read32(dev, IPISR_OFFSET); xlnx_quadspi_write32(dev, ipisr, IPISR_OFFSET); if (ipisr & IPIXR_DTR_EMPTY) { /** * For async mode, we need to read the RX FIFO and refill the TX FIFO * if needed here. * For sync mode, we do this in the caller's context to avoid doing too much * work in the ISR, so just post the event. */ #ifdef CONFIG_SPI_ASYNC struct spi_context *ctx = &data->ctx; if (ctx->asynchronous) { xlnx_quadspi_read_fifo(dev); xlnx_quadspi_start_tx(dev); return; } #endif k_event_post(&data->dtr_empty, 1); } else { LOG_WRN("unhandled interrupt, ipisr = 0x%08x", ipisr); } } #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(xlnx_startup_block) static int xlnx_quadspi_startup_block_workaround(const struct device *dev) { const struct xlnx_quadspi_config *config = dev->config; uint32_t spissr = BIT_MASK(config->num_ss_bits); uint32_t spicr; /** * See path_to_url * Up to 3 clock cycles must be issued before the output clock signal * is passed to the output CCLK pin from the SPI core. * Use JEDEC READ ID as dummy command to chip select 0. */ spissr &= ~BIT(0); xlnx_quadspi_write32(dev, spissr, SPISSR_OFFSET); xlnx_quadspi_write32(dev, 0x9F, SPI_DTR_OFFSET); xlnx_quadspi_write32(dev, 0, SPI_DTR_OFFSET); xlnx_quadspi_write32(dev, 0, SPI_DTR_OFFSET); spicr = SPICR_MANUAL_SS | SPICR_MASTER | SPICR_SPE; xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET); for (int i = 0; i < 10 && (xlnx_quadspi_read32(dev, SPISR_OFFSET) & SPISR_TX_EMPTY) == 0; i++) { k_msleep(1); } if ((xlnx_quadspi_read32(dev, SPISR_OFFSET) & SPISR_TX_EMPTY) == 0) { LOG_ERR("timeout waiting for TX_EMPTY"); return -EIO; } spicr |= SPICR_MASTER_XFER_INH; xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET); while ((xlnx_quadspi_read32(dev, SPISR_OFFSET) & SPISR_RX_EMPTY) == 0) { xlnx_quadspi_read32(dev, SPI_DRR_OFFSET); } spissr = BIT_MASK(config->num_ss_bits); xlnx_quadspi_write32(dev, spissr, SPISSR_OFFSET); /* Reset controller to clean up */ xlnx_quadspi_write32(dev, SRR_SOFTRESET_MAGIC, SRR_OFFSET); return 0; } #endif static int xlnx_quadspi_init(const struct device *dev) { int err; const struct xlnx_quadspi_config *config = dev->config; struct xlnx_quadspi_data *data = dev->data; k_event_init(&data->dtr_empty); /* Reset controller */ xlnx_quadspi_write32(dev, SRR_SOFTRESET_MAGIC, SRR_OFFSET); config->irq_config_func(dev); err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } spi_context_unlock_unconditionally(&data->ctx); #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(xlnx_startup_block) if (config->startup_block) { err = xlnx_quadspi_startup_block_workaround(dev); if (err < 0) { return err; } } #endif /* Enable DTR Empty interrupt */ xlnx_quadspi_write32(dev, IPIXR_DTR_EMPTY, IPIER_OFFSET); xlnx_quadspi_write32(dev, DGIER_GIE, DGIER_OFFSET); return 0; } static const struct spi_driver_api xlnx_quadspi_driver_api = { .transceive = xlnx_quadspi_transceive_blocking, #ifdef CONFIG_SPI_ASYNC .transceive_async = xlnx_quadspi_transceive_async, #endif /* CONFIG_SPI_ASYNC */ .release = xlnx_quadspi_release, }; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(xlnx_startup_block) #define STARTUP_BLOCK_INIT(n) .startup_block = DT_INST_PROP(n, xlnx_startup_block), #else #define STARTUP_BLOCK_INIT(n) #endif #define XLNX_QUADSPI_INIT(n) \ static void xlnx_quadspi_config_func_##n(const struct device *dev); \ \ static const struct xlnx_quadspi_config xlnx_quadspi_config_##n = { \ .base = DT_INST_REG_ADDR(n), \ .irq_config_func = xlnx_quadspi_config_func_##n, \ .num_ss_bits = DT_INST_PROP(n, xlnx_num_ss_bits), \ .num_xfer_bytes = \ DT_INST_PROP(n, xlnx_num_transfer_bits) / 8, \ .fifo_size = DT_INST_PROP_OR(n, fifo_size, 0), \ STARTUP_BLOCK_INIT(n) \ }; \ \ static struct xlnx_quadspi_data xlnx_quadspi_data_##n = { \ SPI_CONTEXT_INIT_LOCK(xlnx_quadspi_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(xlnx_quadspi_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ }; \ \ DEVICE_DT_INST_DEFINE(n, &xlnx_quadspi_init, \ NULL, \ &xlnx_quadspi_data_##n, \ &xlnx_quadspi_config_##n, POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &xlnx_quadspi_driver_api); \ \ static void xlnx_quadspi_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ xlnx_quadspi_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } DT_INST_FOREACH_STATUS_OKAY(XLNX_QUADSPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_xlnx_axi_quadspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,409
```c /* * */ #include "spi_nrfx_common.h" #include <zephyr/kernel.h> int spi_nrfx_wake_init(const nrfx_gpiote_t *gpiote, uint32_t wake_pin) { nrf_gpio_pin_pull_t pull_config = NRF_GPIO_PIN_PULLDOWN; uint8_t ch; nrfx_gpiote_trigger_config_t trigger_config = { .trigger = NRFX_GPIOTE_TRIGGER_HITOLO, .p_in_channel = &ch, }; nrfx_gpiote_input_pin_config_t input_config = { .p_pull_config = &pull_config, .p_trigger_config = &trigger_config, .p_handler_config = NULL, }; nrfx_err_t res; res = nrfx_gpiote_channel_alloc(gpiote, &ch); if (res != NRFX_SUCCESS) { return -ENODEV; } res = nrfx_gpiote_input_configure(gpiote, wake_pin, &input_config); if (res != NRFX_SUCCESS) { nrfx_gpiote_channel_free(gpiote, ch); return -EIO; } return 0; } int spi_nrfx_wake_request(const nrfx_gpiote_t *gpiote, uint32_t wake_pin) { nrf_gpiote_event_t trigger_event = nrfx_gpiote_in_event_get(gpiote, wake_pin); uint32_t start_cycles; uint32_t max_wait_cycles = DIV_ROUND_UP(CONFIG_SPI_NRFX_WAKE_TIMEOUT_US * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC, 1000000); int err = 0; /* Enable the trigger (a high-to-low transition) without its interrupt. * The expected time to wait is quite short so it is not worth paying * the overhead of context switching to handle the interrupt. */ nrfx_gpiote_trigger_enable(gpiote, wake_pin, false); /* Enable pull-up on the WAKE line. After the slave device sees the * WAKE line going high, it will force the line to go low. This will * be caught by the enabled trigger and the loop below waits for that. */ nrf_gpio_cfg_input(wake_pin, NRF_GPIO_PIN_PULLUP); start_cycles = k_cycle_get_32(); while (!nrf_gpiote_event_check(gpiote->p_reg, trigger_event)) { uint32_t elapsed_cycles = k_cycle_get_32() - start_cycles; if (elapsed_cycles >= max_wait_cycles) { err = -ETIMEDOUT; break; } } nrfx_gpiote_trigger_disable(gpiote, wake_pin); nrf_gpio_cfg_input(wake_pin, NRF_GPIO_PIN_PULLDOWN); return err; } ```
/content/code_sandbox/drivers/spi/spi_nrfx_common.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
596
```objective-c /* * */ #ifndef _SPI_SIFIVE__H #define _SPI_SIFIVE__H #include "spi_context.h" #include <zephyr/sys/sys_io.h> #include <zephyr/device.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/pinctrl.h> #define SPI_CFG(dev) ((struct spi_sifive_cfg *) ((dev)->config)) #define SPI_DATA(dev) ((struct spi_sifive_data *) ((dev)->data)) #define SPI_REG(dev, offset) ((mem_addr_t) (SPI_CFG(dev)->base + (offset))) /* Register Offsets */ #define REG_SCKDIV 0x000 #define REG_SCKMODE 0x004 #define REG_CSID 0x010 #define REG_CSDEF 0x014 #define REG_CSMODE 0x018 #define REG_DELAY0 0x028 #define REG_DELAY1 0x02C #define REG_FMT 0x040 #define REG_TXDATA 0x048 #define REG_RXDATA 0x04C #define REG_TXMARK 0x050 #define REG_RXMARK 0x054 #define REG_FCTRL 0x060 #define REG_FFMT 0x064 #define REG_IE 0x070 #define REG_IP 0x074 /* Masks */ #define SF_SCKDIV_DIV_MASK (0xFFF << 0) #define SF_FMT_PROTO_MASK (0x3 << 0) #define SF_FMT_LEN_MASK (0xF << 16) /* Offsets */ #define SF_SCKMODE_POL 1 #define SF_SCKMODE_PHA 0 #define SF_FMT_LEN 16 #define SF_FMT_ENDIAN 2 #define SF_FCTRL_EN 0 /* Values */ #define SF_CSMODE_AUTO 0 #define SF_CSMODE_HOLD 2 #define SF_CSMODE_OFF 3 #define SF_FMT_PROTO_SINGLE 0 #define SF_TXDATA_FULL (1 << 31) #define SF_RXDATA_EMPTY (1 << 31) /* Structure Declarations */ struct spi_sifive_data { struct spi_context ctx; }; struct spi_sifive_cfg { uint32_t base; uint32_t f_sys; const struct pinctrl_dev_config *pcfg; }; #endif /* _SPI_SIFIVE__H */ ```
/content/code_sandbox/drivers/spi/spi_sifive.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
506
```c /* * */ #define DT_DRV_COMPAT intel_sedi_spi #include <zephyr/kernel.h> #include <zephyr/drivers/spi.h> #include <zephyr/pm/device.h> #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_sedi); #include "sedi_driver_spi.h" #include "spi_context.h" struct spi_sedi_config { DEVICE_MMIO_ROM; sedi_spi_t spi_device; void (*irq_config)(void); }; struct spi_sedi_data { DEVICE_MMIO_RAM; struct spi_context ctx; bool tx_data_updated; bool rx_data_updated; uint32_t tx_dummy_len; uint32_t rx_dummy_len; }; static int spi_sedi_configure(const struct device *dev, const struct spi_config *config) { struct spi_sedi_data *data = dev->data; const struct spi_sedi_config *info = dev->config; uint32_t word_size, cpol, cpha, loopback; if (spi_context_configured(&data->ctx, config) == true) { return 0; } word_size = SPI_WORD_SIZE_GET(config->operation); sedi_spi_control(info->spi_device, SEDI_SPI_IOCTL_DATA_WIDTH, word_size); /* CPOL and CPHA */ cpol = SPI_MODE_GET(config->operation) & SPI_MODE_CPOL; cpha = SPI_MODE_GET(config->operation) & SPI_MODE_CPHA; if ((cpol == 0) && (cpha == 0)) { sedi_spi_control(info->spi_device, SEDI_SPI_IOCTL_CPOL0_CPHA0, 0); } else if ((cpol == 0) && (cpha == 1U)) { sedi_spi_control(info->spi_device, SEDI_SPI_IOCTL_CPOL0_CPHA1, 0); } else if ((cpol == 1) && (cpha == 0U)) { sedi_spi_control(info->spi_device, SEDI_SPI_IOCTL_CPOL1_CPHA0, 0); } else { sedi_spi_control(info->spi_device, SEDI_SPI_IOCTL_CPOL1_CPHA1, 0); } /* MSB and LSB */ if (config->operation & SPI_TRANSFER_LSB) { sedi_spi_control(info->spi_device, SEDI_SPI_IOCTL_LSB, 0); } /* Set loopack */ loopback = SPI_MODE_GET(config->operation) & SPI_MODE_LOOP; sedi_spi_control(info->spi_device, SEDI_SPI_IOCTL_LOOPBACK, loopback); /* Set baudrate */ sedi_spi_control(info->spi_device, SEDI_SPI_IOCTL_SPEED_SET, config->frequency); sedi_spi_control(info->spi_device, SEDI_SPI_IOCTL_CS_HW, config->slave); data->ctx.config = config; spi_context_cs_control(&data->ctx, true); return 0; } static int transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { const struct spi_sedi_config *info = dev->config; struct spi_sedi_data *spi = dev->data; struct spi_context *ctx = &spi->ctx; int ret; uint32_t transfer_bytes = 0; uint8_t *data_out = NULL, *data_in = NULL; uint32_t i, dummy_len = 0; const struct spi_buf *buf; bool is_multibufs = false; spi_context_lock(&spi->ctx, asynchronous, cb, userdata, config); pm_device_busy_set(dev); /* Power up use default setting */ ret = sedi_spi_set_power(info->spi_device, SEDI_POWER_FULL); if (ret) { goto out; } /* If need to configure, re-configure */ spi_sedi_configure(dev, config); spi->tx_data_updated = false; spi->rx_data_updated = false; /* Set buffers info */ spi_context_buffers_setup(&spi->ctx, tx_bufs, rx_bufs, 1); if ((ctx->tx_count > 1) || (ctx->rx_count > 1)) { is_multibufs = true; } if (ctx->tx_count > ctx->rx_count) { spi->tx_dummy_len = 0; for (i = ctx->rx_count; i < ctx->tx_count; i++) { buf = ctx->current_tx + i; dummy_len += buf->len; } spi->rx_dummy_len = dummy_len; } else if (ctx->tx_count < ctx->rx_count) { spi->rx_dummy_len = 0; for (i = ctx->tx_count; i < ctx->rx_count; i++) { buf = ctx->current_rx + i; dummy_len += buf->len; } spi->tx_dummy_len = dummy_len; } else { spi->tx_dummy_len = 0; spi->rx_dummy_len = 0; } if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) { spi_context_cs_control(&spi->ctx, true); spi_context_complete(&spi->ctx, dev, 0); return 0; } /* For multiple buffers, using continuous mode */ if (is_multibufs) { sedi_spi_control(info->spi_device, SEDI_SPI_IOCTL_BUFFER_SETS, 1); } if (ctx->tx_len == 0) { /* rx only, nothing to tx */ data_out = NULL; data_in = (uint8_t *)ctx->rx_buf; transfer_bytes = ctx->rx_len; spi->tx_dummy_len -= transfer_bytes; } else if (ctx->rx_len == 0) { /* tx only, nothing to rx */ data_out = (uint8_t *)ctx->tx_buf; data_in = NULL; transfer_bytes = ctx->tx_len; spi->rx_dummy_len -= transfer_bytes; } else if (ctx->tx_len == ctx->rx_len) { /* rx and tx are the same length */ data_out = (uint8_t *)ctx->tx_buf; data_in = (uint8_t *)ctx->rx_buf; transfer_bytes = ctx->tx_len; } else if (ctx->tx_len > ctx->rx_len) { /* Break up the tx into multiple transfers so we don't have to * rx into a longer intermediate buffer. Leave chip select * active between transfers. */ data_out = (uint8_t *)ctx->tx_buf; data_in = ctx->rx_buf; transfer_bytes = ctx->rx_len; } else { /* Break up the rx into multiple transfers so we don't have to * tx from a longer intermediate buffer. Leave chip select * active between transfers. */ data_out = (uint8_t *)ctx->tx_buf; data_in = ctx->rx_buf; transfer_bytes = ctx->tx_len; } spi_context_cs_control(&spi->ctx, false); ret = sedi_spi_transfer(info->spi_device, data_out, data_in, transfer_bytes); if (ret != SEDI_DRIVER_OK) { goto out; } ret = spi_context_wait_for_completion(&spi->ctx); if (ret != 0) { sedi_spi_status_t spi_status = {0}; sedi_spi_get_status(info->spi_device, &spi_status); /* SPI ABORT */ sedi_spi_control(info->spi_device, SEDI_SPI_IOCTL_ABORT, 0); /* Toggle GPIO back */ spi_context_cs_control(&spi->ctx, true); } out: spi_context_release(&spi->ctx, ret); pm_device_busy_clear(dev); return ret; } static int spi_sedi_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_sedi_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int spi_sedi_release(const struct device *dev, const struct spi_config *config) { struct spi_sedi_data *spi = dev->data; if (!spi_context_configured(&spi->ctx, config)) { return -EINVAL; } spi_context_unlock_unconditionally(&spi->ctx); return 0; } extern void spi_isr(sedi_spi_t device); void spi_sedi_callback(uint32_t event, void *param) { const struct device *dev = (const struct device *)param; const struct spi_sedi_config *info = dev->config; struct spi_sedi_data *spi = dev->data; struct spi_context *ctx = &spi->ctx; int error; if (event == SEDI_SPI_EVENT_DATA_LOST) { error = -EIO; } else { error = 0; } if ((event == SEDI_SPI_EVENT_COMPLETE) || (event == SEDI_SPI_EVENT_DATA_LOST)) { spi_context_cs_control(&spi->ctx, true); spi_context_complete(&spi->ctx, dev, error); } else if (event == SEDI_SPI_EVENT_TX_FINISHED) { spi_context_update_tx(ctx, 1, ctx->tx_len); if (ctx->tx_len != 0) { sedi_spi_update_tx_buf(info->spi_device, ctx->tx_buf, ctx->tx_len); if ((ctx->rx_len == 0) && (spi->rx_data_updated == false)) { /* Update rx length if always no rx */ sedi_spi_update_rx_buf(info->spi_device, NULL, spi->rx_dummy_len); spi->rx_data_updated = true; } } else if (spi->tx_data_updated == false) { sedi_spi_update_tx_buf(info->spi_device, NULL, spi->tx_dummy_len); spi->tx_data_updated = true; } } else if (event == SEDI_SPI_EVENT_RX_FINISHED) { spi_context_update_rx(ctx, 1, ctx->rx_len); if (ctx->rx_len != 0) { sedi_spi_update_rx_buf(info->spi_device, ctx->rx_buf, ctx->rx_len); } } } static const struct spi_driver_api sedi_spi_api = { .transceive = spi_sedi_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_sedi_transceive_async, #endif /* CONFIG_SPI_ASYNC */ .release = spi_sedi_release, }; static int spi_sedi_init(const struct device *dev) { const struct spi_sedi_config *info = dev->config; struct spi_sedi_data *spi = dev->data; int ret; DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); ret = sedi_spi_init(info->spi_device, spi_sedi_callback, (void *)dev, DEVICE_MMIO_GET(dev)); if (ret != SEDI_DRIVER_OK) { return -ENODEV; } /* Init and connect IRQ */ info->irq_config(); spi_context_unlock_unconditionally(&spi->ctx); return 0; } #ifdef CONFIG_PM_DEVICE static int spi_suspend_device(const struct device *dev) { const struct spi_sedi_config *config = dev->config; if (pm_device_is_busy(dev)) { return -EBUSY; } int ret = sedi_spi_set_power(config->spi_device, SEDI_POWER_SUSPEND); if (ret != SEDI_DRIVER_OK) { return -EIO; } return 0; } static int spi_resume_device_from_suspend(const struct device *dev) { const struct spi_sedi_config *config = dev->config; int ret; ret = sedi_spi_set_power(config->spi_device, SEDI_POWER_FULL); if (ret != SEDI_DRIVER_OK) { return -EIO; } pm_device_busy_clear(dev); return 0; } static int spi_sedi_device_ctrl(const struct device *dev, enum pm_device_action action) { int ret = 0; switch (action) { case PM_DEVICE_ACTION_SUSPEND: ret = spi_suspend_device(dev); break; case PM_DEVICE_ACTION_RESUME: ret = spi_resume_device_from_suspend(dev); break; default: ret = -ENOTSUP; } return ret; } #endif /* CONFIG_PM_DEVICE */ #define SPI_SEDI_IRQ_FLAGS_SENSE0(n) 0 #define SPI_SEDI_IRQ_FLAGS_SENSE1(n) DT_INST_IRQ(n, sense) #define SPI_SEDI_IRQ_FLAGS(n) \ _CONCAT(SPI_SEDI_IRQ_FLAGS_SENSE, DT_INST_IRQ_HAS_CELL(n, sense))(n) #define CREATE_SEDI_SPI_INSTANCE(num) \ static void spi_##num##_irq_init(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(num), \ DT_INST_IRQ(num, priority), \ spi_isr, num, SPI_SEDI_IRQ_FLAGS(num)); \ irq_enable(DT_INST_IRQN(num)); \ } \ static struct spi_sedi_data spi_##num##_data = { \ SPI_CONTEXT_INIT_LOCK(spi_##num##_data, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_##num##_data, ctx), \ }; \ const static struct spi_sedi_config spi_##num##_config = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(num)), \ .spi_device = num, .irq_config = spi_##num##_irq_init, \ }; \ PM_DEVICE_DEFINE(spi_##num, spi_sedi_device_ctrl); \ DEVICE_DT_INST_DEFINE(num, \ spi_sedi_init, \ PM_DEVICE_GET(spi_##num), \ &spi_##num##_data, \ &spi_##num##_config, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &sedi_spi_api); DT_INST_FOREACH_STATUS_OKAY(CREATE_SEDI_SPI_INSTANCE) ```
/content/code_sandbox/drivers/spi/spi_sedi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,165
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_spip #include <zephyr/drivers/spi.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_npcx_spip, CONFIG_SPI_LOG_LEVEL); #include "spi_context.h" /* Transfer this NOP value when tx buf is null */ #define SPI_NPCX_SPIP_TX_NOP 0x00 #define SPI_NPCX_SPIP_WAIT_STATUS_TIMEOUT_US 1000 /* The max allowed prescaler divider */ #define SPI_NPCX_MAX_PRESCALER_DIV INT8_MAX struct spi_npcx_spip_data { struct spi_context ctx; uint32_t src_clock_freq; uint8_t bytes_per_frame; }; struct spi_npcx_spip_cfg { struct spip_reg *reg_base; struct npcx_clk_cfg clk_cfg; #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT /* routine for configuring SPIP ISR */ void (*irq_cfg_func)(const struct device *dev); #endif const struct pinctrl_dev_config *pcfg; }; static int spi_npcx_spip_configure(const struct device *dev, const struct spi_config *spi_cfg) { uint8_t prescaler_divider; const struct spi_npcx_spip_cfg *const config = dev->config; struct spi_npcx_spip_data *const data = dev->data; struct spip_reg *const reg_base = config->reg_base; spi_operation_t operation = spi_cfg->operation; uint8_t frame_size; if (spi_context_configured(&data->ctx, spi_cfg)) { /* This configuration is already in use */ return 0; } if (operation & SPI_HALF_DUPLEX) { LOG_ERR("Half duplex mode is not supported"); return -ENOTSUP; } if (SPI_OP_MODE_GET(operation) != SPI_OP_MODE_MASTER) { LOG_ERR("Only SPI controller mode is supported"); return -ENOTSUP; } if (operation & SPI_MODE_LOOP) { LOG_ERR("Loopback mode is not supported"); return -ENOTSUP; } /* * If the GPIO CS configuration is not present, return error because the hardware CS is * not supported. */ if (!spi_cs_is_gpio(spi_cfg)) { LOG_ERR("Only GPIO CS is supported"); return -ENOTSUP; } /* Get the frame length */ frame_size = SPI_WORD_SIZE_GET(operation); if (frame_size == 8) { data->bytes_per_frame = 1; reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_MOD); } else if (frame_size == 16) { reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_MOD); data->bytes_per_frame = 2; } else { LOG_ERR("Only support word sizes either 8 or 16 bits"); return -ENOTSUP; } if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) && (operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Only single line mode is supported"); return -ENOTSUP; } /* Set the endianness */ if (operation & SPI_TRANSFER_LSB) { LOG_ERR("Shift out with LSB is not supported"); return -ENOTSUP; } /* * Set CPOL and CPHA. * The following is how to map npcx spip control register to CPOL and CPHA * CPOL CPHA | SCIDL SCM * ----------------------------- * 0 0 | 0 0 * 0 1 | 0 1 * 1 0 | 1 1 * 1 1 | 1 0 */ if (operation & SPI_MODE_CPOL) { reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_SCIDL); } else { reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_SCIDL); } if (((operation & SPI_MODE_CPOL) == SPI_MODE_CPOL) != ((operation & SPI_MODE_CPHA) == SPI_MODE_CPHA)) { reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_SCM); } else { reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_SCM); } /* Set the SPI frequency */ prescaler_divider = data->src_clock_freq / 2 / spi_cfg->frequency; if (prescaler_divider >= 1) { prescaler_divider -= 1; } if (prescaler_divider >= SPI_NPCX_MAX_PRESCALER_DIV) { LOG_ERR("SPI divider %d exceeds the max allowed value %d.", prescaler_divider, SPI_NPCX_MAX_PRESCALER_DIV); return -ENOTSUP; } SET_FIELD(reg_base->SPIP_CTL1, NPCX_SPIP_CTL1_SCDV, prescaler_divider); data->ctx.config = spi_cfg; return 0; } static void spi_npcx_spip_process_tx_buf(struct spi_npcx_spip_data *const data, uint16_t *tx_frame) { /* Get the tx_frame from tx_buf only when tx_buf != NULL */ if (spi_context_tx_buf_on(&data->ctx)) { if (data->bytes_per_frame == 1) { *tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf)); } else { *tx_frame = UNALIGNED_GET((uint16_t *)(data->ctx.tx_buf)); } } /* * The update is ignored if TX is off (tx_len == 0). * Note: if tx_buf == NULL && tx_len != 0, the update still counts. */ spi_context_update_tx(&data->ctx, data->bytes_per_frame, 1); } static void spi_npcx_spip_process_rx_buf(struct spi_npcx_spip_data *const data, uint16_t rx_frame) { if (spi_context_rx_buf_on(&data->ctx)) { if (data->bytes_per_frame == 1) { UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf); } else { UNALIGNED_PUT(rx_frame, (uint16_t *)data->ctx.rx_buf); } } spi_context_update_rx(&data->ctx, data->bytes_per_frame, 1); } #ifndef CONFIG_SPI_NPCX_SPIP_INTERRUPT static int spi_npcx_spip_xfer_frame(const struct device *dev) { const struct spi_npcx_spip_cfg *const config = dev->config; struct spip_reg *const reg_base = config->reg_base; struct spi_npcx_spip_data *const data = dev->data; uint16_t tx_frame = SPI_NPCX_SPIP_TX_NOP; uint16_t rx_frame; spi_npcx_spip_process_tx_buf(data, &tx_frame); if (WAIT_FOR(!IS_BIT_SET(reg_base->SPIP_STAT, NPCX_SPIP_STAT_BSY), SPI_NPCX_SPIP_WAIT_STATUS_TIMEOUT_US, NULL) == false) { LOG_ERR("Check Status BSY Timeout"); return -ETIMEDOUT; } reg_base->SPIP_DATA = tx_frame; if (WAIT_FOR(IS_BIT_SET(reg_base->SPIP_STAT, NPCX_SPIP_STAT_RBF), SPI_NPCX_SPIP_WAIT_STATUS_TIMEOUT_US, NULL) == false) { LOG_ERR("Check Status RBF Timeout"); return -ETIMEDOUT; } rx_frame = reg_base->SPIP_DATA; spi_npcx_spip_process_rx_buf(data, rx_frame); return 0; } #endif static bool spi_npcx_spip_transfer_ongoing(struct spi_npcx_spip_data *data) { return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx); } #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT static void spi_npcx_spip_isr(const struct device *dev) { const struct spi_npcx_spip_cfg *const config = dev->config; struct spip_reg *const reg_base = config->reg_base; struct spi_npcx_spip_data *const data = dev->data; struct spi_context *ctx = &data->ctx; uint16_t tx_frame = SPI_NPCX_SPIP_TX_NOP; uint16_t rx_frame; uint8_t status; status = reg_base->SPIP_STAT; if (!IS_BIT_SET(status, NPCX_SPIP_STAT_BSY) && !IS_BIT_SET(status, NPCX_SPIP_STAT_RBF)) { reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_EIW); spi_npcx_spip_process_tx_buf(data, &tx_frame); reg_base->SPIP_DATA = tx_frame; } else if (IS_BIT_SET(status, NPCX_SPIP_STAT_RBF)) { rx_frame = reg_base->SPIP_DATA; spi_npcx_spip_process_rx_buf(data, rx_frame); if (!spi_npcx_spip_transfer_ongoing(data)) { reg_base->SPIP_CTL1 &= ~BIT(NPCX_SPIP_CTL1_EIR); /* * The CS might not de-assert if SPI_HOLD_ON_CS is configured. * In this case, CS de-assertion reles on the caller to explicitly call * spi_release() API. */ spi_context_cs_control(ctx, false); spi_context_complete(ctx, dev, 0); } else { spi_npcx_spip_process_tx_buf(data, &tx_frame); reg_base->SPIP_DATA = tx_frame; } } } #endif static int transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { const struct spi_npcx_spip_cfg *const config = dev->config; struct spip_reg *const reg_base = config->reg_base; struct spi_npcx_spip_data *const data = dev->data; struct spi_context *ctx = &data->ctx; int rc; if (!tx_bufs && !rx_bufs) { return 0; } #ifndef CONFIG_SPI_NPCX_SPIP_INTERRUPT if (asynchronous) { return -ENOTSUP; } #endif /* Lock the SPI Context */ spi_context_lock(ctx, asynchronous, cb, userdata, spi_cfg); rc = spi_npcx_spip_configure(dev, spi_cfg); if (rc < 0) { spi_context_release(ctx, rc); return rc; } spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->bytes_per_frame); if (!spi_npcx_spip_transfer_ongoing(data)) { spi_context_release(ctx, 0); return 0; } /* Enable SPIP module */ reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_SPIEN); /* Cleaning junk data in the buffer */ while (IS_BIT_SET(reg_base->SPIP_STAT, NPCX_SPIP_STAT_RBF)) { uint8_t unused __attribute__((unused)); unused = reg_base->SPIP_DATA; } /* Assert the CS line */ spi_context_cs_control(ctx, true); #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_EIR) | BIT(NPCX_SPIP_CTL1_EIW); rc = spi_context_wait_for_completion(&data->ctx); #else do { rc = spi_npcx_spip_xfer_frame(dev); if (rc < 0) { break; } } while (spi_npcx_spip_transfer_ongoing(data)); /* * The CS might not de-assert if SPI_HOLD_ON_CS is configured. * In this case, CS de-assertion reles on the caller to explicitly call spi_release() API. */ spi_context_cs_control(ctx, false); #endif spi_context_release(ctx, rc); return rc; } static int spi_npcx_spip_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_npcx_spip_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif static int spi_npcx_spip_release(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_npcx_spip_data *const data = dev->data; struct spi_context *ctx = &data->ctx; if (!spi_context_configured(ctx, spi_cfg)) { return -EINVAL; } spi_context_unlock_unconditionally(ctx); return 0; } static int spi_npcx_spip_init(const struct device *dev) { int ret; struct spi_npcx_spip_data *const data = dev->data; const struct spi_npcx_spip_cfg *const config = dev->config; struct spip_reg *const reg_base = config->reg_base; const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE); if (!device_is_ready(clk_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clk_cfg); if (ret < 0) { LOG_ERR("Turn on SPIP clock fail %d", ret); return ret; } ret = clock_control_get_rate(clk_dev, (clock_control_subsys_t)&config->clk_cfg, &data->src_clock_freq); if (ret < 0) { LOG_ERR("Get SPIP clock source rate error %d", ret); return ret; } ret = spi_context_cs_configure_all(&data->ctx); if (ret < 0) { return ret; } ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /* Make sure the context is unlocked */ spi_context_unlock_unconditionally(&data->ctx); #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT config->irq_cfg_func(dev); #endif /* Enable SPIP module */ reg_base->SPIP_CTL1 |= BIT(NPCX_SPIP_CTL1_SPIEN); return 0; } static struct spi_driver_api spi_npcx_spip_api = { .transceive = spi_npcx_spip_transceive, .release = spi_npcx_spip_release, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_npcx_spip_transceive_async, #endif }; #ifdef CONFIG_SPI_NPCX_SPIP_INTERRUPT #define NPCX_SPIP_IRQ_HANDLER(n) \ static void spi_npcx_spip_irq_cfg_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), spi_npcx_spip_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } #define NPCX_SPIP_IRQ_HANDLER_FUNC(n) .irq_cfg_func = spi_npcx_spip_irq_cfg_func_##n, #else #define NPCX_SPIP_IRQ_HANDLER_FUNC(n) #define NPCX_SPIP_IRQ_HANDLER(n) #endif #define NPCX_SPI_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ NPCX_SPIP_IRQ_HANDLER(n) \ \ static struct spi_npcx_spip_data spi_npcx_spip_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_npcx_spip_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_npcx_spip_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx)}; \ \ static struct spi_npcx_spip_cfg spi_npcx_spip_cfg_##n = { \ .reg_base = (struct spip_reg *)DT_INST_REG_ADDR(n), \ .clk_cfg = NPCX_DT_CLK_CFG_ITEM(n), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ NPCX_SPIP_IRQ_HANDLER_FUNC(n)}; \ \ DEVICE_DT_INST_DEFINE(n, spi_npcx_spip_init, NULL, &spi_npcx_spip_data_##n, \ &spi_npcx_spip_cfg_##n, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &spi_npcx_spip_api); DT_INST_FOREACH_STATUS_OKAY(NPCX_SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_npcx_spip.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,728
```unknown menuconfig SPI_NRFX bool "nRF SPI nrfx drivers" default y depends on SOC_FAMILY_NORDIC_NRF depends on MULTITHREADING select PINCTRL help Enable support for nrfx SPI drivers for nRF MCU series. if SPI_NRFX config SPI_NRFX_SPI def_bool y depends on DT_HAS_NORDIC_NRF_SPI_ENABLED select NRFX_SPI0 if HAS_HW_NRF_SPI0 select NRFX_SPI1 if HAS_HW_NRF_SPI1 select NRFX_SPI2 if HAS_HW_NRF_SPI2 config SPI_NRFX_SPIM def_bool y depends on DT_HAS_NORDIC_NRF_SPIM_ENABLED select NRFX_SPIM0 if HAS_HW_NRF_SPIM0 select NRFX_SPIM1 if HAS_HW_NRF_SPIM1 select NRFX_SPIM2 if HAS_HW_NRF_SPIM2 select NRFX_SPIM3 if HAS_HW_NRF_SPIM3 select NRFX_SPIM4 if HAS_HW_NRF_SPIM4 select NRFX_SPIM00 if HAS_HW_NRF_SPIM00 select NRFX_SPIM20 if HAS_HW_NRF_SPIM20 select NRFX_SPIM21 if HAS_HW_NRF_SPIM21 select NRFX_SPIM22 if HAS_HW_NRF_SPIM22 select NRFX_SPIM30 if HAS_HW_NRF_SPIM30 select NRFX_SPIM120 if HAS_HW_NRF_SPIM120 select NRFX_SPIM121 if HAS_HW_NRF_SPIM121 select NRFX_SPIM130 if HAS_HW_NRF_SPIM130 select NRFX_SPIM131 if HAS_HW_NRF_SPIM131 select NRFX_SPIM132 if HAS_HW_NRF_SPIM132 select NRFX_SPIM133 if HAS_HW_NRF_SPIM133 select NRFX_SPIM134 if HAS_HW_NRF_SPIM134 select NRFX_SPIM135 if HAS_HW_NRF_SPIM135 select NRFX_SPIM136 if HAS_HW_NRF_SPIM136 select NRFX_SPIM137 if HAS_HW_NRF_SPIM137 config SPI_NRFX_SPIS def_bool y depends on DT_HAS_NORDIC_NRF_SPIS_ENABLED select SPI_SLAVE select NRFX_SPIS0 if HAS_HW_NRF_SPIS0 select NRFX_SPIS1 if HAS_HW_NRF_SPIS1 select NRFX_SPIS2 if HAS_HW_NRF_SPIS2 select NRFX_SPIS3 if HAS_HW_NRF_SPIS3 select NRFX_SPIS00 if HAS_HW_NRF_SPIS00 select NRFX_SPIS20 if HAS_HW_NRF_SPIS20 select NRFX_SPIS21 if HAS_HW_NRF_SPIS21 select NRFX_SPIS22 if HAS_HW_NRF_SPIS22 select NRFX_SPIS30 if HAS_HW_NRF_SPIS30 select NRFX_SPIS120 if HAS_HW_NRF_SPIS120 select NRFX_SPIS130 if HAS_HW_NRF_SPIS130 select NRFX_SPIS131 if HAS_HW_NRF_SPIS131 select NRFX_SPIS132 if HAS_HW_NRF_SPIS132 select NRFX_SPIS133 if HAS_HW_NRF_SPIS133 select NRFX_SPIS134 if HAS_HW_NRF_SPIS134 select NRFX_SPIS135 if HAS_HW_NRF_SPIS135 select NRFX_SPIS136 if HAS_HW_NRF_SPIS136 select NRFX_SPIS137 if HAS_HW_NRF_SPIS137 config SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58 depends on SOC_NRF52832 select NRFX_PPI bool "Allow enabling the SPIM driver despite PAN 58" help Allow enabling the nRF SPI Master with EasyDMA, despite Product Anomaly Notice 58 (SPIM: An additional byte is clocked out when RXD.MAXCNT == 1 and TXD.MAXCNT <= 1). Without this override, the SPI Master is only available without EasyDMA. Note that the 'SPIM' and 'SPIS' drivers use EasyDMA, while the 'SPI' driver does not. When used in conjunction with nRF SPIM Devicetree property 'anomaly-58-workaround' a workaround can be enabled per SPIM instance. If you are certain that transactions with RXD.MAXCNT == 1 and TXD.MAXCNT <= 1 will NOT be executed then nRF52832 PPI and GPIOTE resources can be saved by not enabling 'anomaly-58-workaround' via the Devicetree. config SPI_NRFX_RAM_BUFFER_SIZE int "Size of RAM buffers for SPIM peripherals" default 8 depends on SPI_NRFX_SPIM help Because of using EasyDMA, SPIM peripherals cannot use transmit and receive buffers from all memory locations. They are restricted to buffers located in certain RAM memories only. Therefore, each SPIM driver instance needs to use an intermediate local RAM buffer, to transfer data in chunks not exceeding the size of that buffer, and to copy those chunks between the local buffer and the one specified in the transfer request if the latter is not accessible by EasyDMA. This option specifies the size in bytes of such local RAM buffers for both TX and RX paths. A size of 0 means that this feature should be disabled and the driver user must take care of not making transfer requests with buffers not accessible by EasyDMA since such transfers will fail. config SPI_NRFX_WAKE_TIMEOUT_US int "Maximum time to wait for SPI slave to wake up" default 200 help Maximum amount of time (in microseconds) that SPI master should wait for SPI slave to wake up after the WAKE line is asserted. Used only by instances that have the WAKE line configured (see the wake-gpios devicetree property). endif # SPI_NRFX ```
/content/code_sandbox/drivers/spi/Kconfig.nrfx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,286
```c /* * */ #include <zephyr/drivers/spi.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/gpio.h> #include <soc.h> #include <nrfx_spis.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(spi_nrfx_spis, CONFIG_SPI_LOG_LEVEL); #include "spi_context.h" struct spi_nrfx_data { struct spi_context ctx; const struct device *dev; struct k_sem wake_sem; struct gpio_callback wake_cb_data; }; struct spi_nrfx_config { nrfx_spis_t spis; nrfx_spis_config_t config; void (*irq_connect)(void); uint16_t max_buf_len; const struct pinctrl_dev_config *pcfg; struct gpio_dt_spec wake_gpio; }; static inline nrf_spis_mode_t get_nrf_spis_mode(uint16_t operation) { if (SPI_MODE_GET(operation) & SPI_MODE_CPOL) { if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) { return NRF_SPIS_MODE_3; } else { return NRF_SPIS_MODE_2; } } else { if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) { return NRF_SPIS_MODE_1; } else { return NRF_SPIS_MODE_0; } } } static inline nrf_spis_bit_order_t get_nrf_spis_bit_order(uint16_t operation) { if (operation & SPI_TRANSFER_LSB) { return NRF_SPIS_BIT_ORDER_LSB_FIRST; } else { return NRF_SPIS_BIT_ORDER_MSB_FIRST; } } static int configure(const struct device *dev, const struct spi_config *spi_cfg) { const struct spi_nrfx_config *dev_config = dev->config; struct spi_nrfx_data *dev_data = dev->data; struct spi_context *ctx = &dev_data->ctx; if (spi_context_configured(ctx, spi_cfg)) { /* Already configured. No need to do it again. */ return 0; } if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } if (SPI_OP_MODE_GET(spi_cfg->operation) == SPI_OP_MODE_MASTER) { LOG_ERR("Master mode is not supported on %s", dev->name); return -EINVAL; } if (spi_cfg->operation & SPI_MODE_LOOP) { LOG_ERR("Loopback mode is not supported"); return -EINVAL; } if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) && (spi_cfg->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Only single line mode is supported"); return -EINVAL; } if (SPI_WORD_SIZE_GET(spi_cfg->operation) != 8) { LOG_ERR("Word sizes other than 8 bits are not supported"); return -EINVAL; } if (spi_cs_is_gpio(spi_cfg)) { LOG_ERR("CS control via GPIO is not supported"); return -EINVAL; } ctx->config = spi_cfg; nrf_spis_configure(dev_config->spis.p_reg, get_nrf_spis_mode(spi_cfg->operation), get_nrf_spis_bit_order(spi_cfg->operation)); return 0; } static int prepare_for_transfer(const struct device *dev, const uint8_t *tx_buf, size_t tx_buf_len, uint8_t *rx_buf, size_t rx_buf_len) { const struct spi_nrfx_config *dev_config = dev->config; nrfx_err_t result; if (tx_buf_len > dev_config->max_buf_len || rx_buf_len > dev_config->max_buf_len) { LOG_ERR("Invalid buffer sizes: Tx %d/Rx %d", tx_buf_len, rx_buf_len); return -EINVAL; } result = nrfx_spis_buffers_set(&dev_config->spis, tx_buf, tx_buf_len, rx_buf, rx_buf_len); if (result != NRFX_SUCCESS) { return -EIO; } return 0; } static void wake_callback(const struct device *dev, struct gpio_callback *cb, uint32_t pins) { struct spi_nrfx_data *dev_data = CONTAINER_OF(cb, struct spi_nrfx_data, wake_cb_data); const struct spi_nrfx_config *dev_config = dev_data->dev->config; (void)gpio_pin_interrupt_configure_dt(&dev_config->wake_gpio, GPIO_INT_DISABLE); k_sem_give(&dev_data->wake_sem); } static void wait_for_wake(struct spi_nrfx_data *dev_data, const struct spi_nrfx_config *dev_config) { /* If the WAKE line is low, wait until it goes high - this is a signal * from the master that it wants to perform a transfer. */ if (gpio_pin_get_raw(dev_config->wake_gpio.port, dev_config->wake_gpio.pin) == 0) { (void)gpio_pin_interrupt_configure_dt(&dev_config->wake_gpio, GPIO_INT_LEVEL_HIGH); (void)k_sem_take(&dev_data->wake_sem, K_FOREVER); } } static int transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { struct spi_nrfx_data *dev_data = dev->data; const struct spi_nrfx_config *dev_config = dev->config; const struct spi_buf *tx_buf = tx_bufs ? tx_bufs->buffers : NULL; const struct spi_buf *rx_buf = rx_bufs ? rx_bufs->buffers : NULL; int error; spi_context_lock(&dev_data->ctx, asynchronous, cb, userdata, spi_cfg); error = configure(dev, spi_cfg); if (error != 0) { /* Invalid configuration. */ } else if ((tx_bufs && tx_bufs->count > 1) || (rx_bufs && rx_bufs->count > 1)) { LOG_ERR("Scattered buffers are not supported"); error = -ENOTSUP; } else if (tx_buf && tx_buf->len && !nrfx_is_in_ram(tx_buf->buf)) { LOG_ERR("Only buffers located in RAM are supported"); error = -ENOTSUP; } else { if (dev_config->wake_gpio.port) { wait_for_wake(dev_data, dev_config); nrf_spis_enable(dev_config->spis.p_reg); } error = prepare_for_transfer(dev, tx_buf ? tx_buf->buf : NULL, tx_buf ? tx_buf->len : 0, rx_buf ? rx_buf->buf : NULL, rx_buf ? rx_buf->len : 0); if (error == 0) { if (dev_config->wake_gpio.port) { /* Set the WAKE line low (tie it to ground) * to signal readiness to handle the transfer. */ gpio_pin_set_raw(dev_config->wake_gpio.port, dev_config->wake_gpio.pin, 0); /* Set the WAKE line back high (i.e. disconnect * output for its pin since it's configured in * open drain mode) so that it can be controlled * by the other side again. */ gpio_pin_set_raw(dev_config->wake_gpio.port, dev_config->wake_gpio.pin, 1); } error = spi_context_wait_for_completion(&dev_data->ctx); } if (dev_config->wake_gpio.port) { nrf_spis_disable(dev_config->spis.p_reg); } } spi_context_release(&dev_data->ctx, error); return error; } static int spi_nrfx_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_nrfx_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int spi_nrfx_release(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_nrfx_data *dev_data = dev->data; if (!spi_context_configured(&dev_data->ctx, spi_cfg)) { return -EINVAL; } spi_context_unlock_unconditionally(&dev_data->ctx); return 0; } static const struct spi_driver_api spi_nrfx_driver_api = { .transceive = spi_nrfx_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_nrfx_transceive_async, #endif .release = spi_nrfx_release, }; static void event_handler(const nrfx_spis_evt_t *p_event, void *p_context) { struct spi_nrfx_data *dev_data = p_context; if (p_event->evt_type == NRFX_SPIS_XFER_DONE) { spi_context_complete(&dev_data->ctx, dev_data->dev, p_event->rx_amount); } } static int spi_nrfx_init(const struct device *dev) { const struct spi_nrfx_config *dev_config = dev->config; struct spi_nrfx_data *dev_data = dev->data; nrfx_err_t result; int err; err = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } /* This sets only default values of mode and bit order. The ones to be * actually used are set in configure() when a transfer is prepared. */ result = nrfx_spis_init(&dev_config->spis, &dev_config->config, event_handler, dev_data); if (result != NRFX_SUCCESS) { LOG_ERR("Failed to initialize device: %s", dev->name); return -EBUSY; } if (dev_config->wake_gpio.port) { if (!gpio_is_ready_dt(&dev_config->wake_gpio)) { return -ENODEV; } /* In open drain mode, the output is disconnected when set to * the high state, so the following will effectively configure * the pin as an input only. */ err = gpio_pin_configure_dt(&dev_config->wake_gpio, GPIO_INPUT | GPIO_OUTPUT_HIGH | GPIO_OPEN_DRAIN); if (err < 0) { return err; } gpio_init_callback(&dev_data->wake_cb_data, wake_callback, BIT(dev_config->wake_gpio.pin)); err = gpio_add_callback(dev_config->wake_gpio.port, &dev_data->wake_cb_data); if (err < 0) { return err; } /* When the WAKE line is used, the SPIS peripheral is enabled * only after the master signals that it wants to perform a * transfer and it is disabled right after the transfer is done. * Waiting for the WAKE line to go high, what can be done using * the GPIO PORT event, instead of just waiting for the transfer * with the SPIS peripheral enabled, significantly reduces idle * power consumption. */ nrf_spis_disable(dev_config->spis.p_reg); } spi_context_unlock_unconditionally(&dev_data->ctx); return 0; } /* * Current factors requiring use of DT_NODELABEL: * * - HAL design (requirement of drv_inst_idx in nrfx_spis_t) * - Name-based HAL IRQ handlers, e.g. nrfx_spis_0_irq_handler */ #define SPIS(idx) DT_NODELABEL(spi##idx) #define SPIS_PROP(idx, prop) DT_PROP(SPIS(idx), prop) #define SPI_NRFX_SPIS_DEFINE(idx) \ static void irq_connect##idx(void) \ { \ IRQ_CONNECT(DT_IRQN(SPIS(idx)), DT_IRQ(SPIS(idx), priority), \ nrfx_isr, nrfx_spis_##idx##_irq_handler, 0); \ } \ static struct spi_nrfx_data spi_##idx##_data = { \ SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_##idx##_data, ctx), \ .dev = DEVICE_DT_GET(SPIS(idx)), \ .wake_sem = Z_SEM_INITIALIZER( \ spi_##idx##_data.wake_sem, 0, 1), \ }; \ PINCTRL_DT_DEFINE(SPIS(idx)); \ static const struct spi_nrfx_config spi_##idx##z_config = { \ .spis = { \ .p_reg = (NRF_SPIS_Type *)DT_REG_ADDR(SPIS(idx)), \ .drv_inst_idx = NRFX_SPIS##idx##_INST_IDX, \ }, \ .config = { \ .skip_gpio_cfg = true, \ .skip_psel_cfg = true, \ .mode = NRF_SPIS_MODE_0, \ .bit_order = NRF_SPIS_BIT_ORDER_MSB_FIRST, \ .orc = SPIS_PROP(idx, overrun_character), \ .def = SPIS_PROP(idx, def_char), \ }, \ .irq_connect = irq_connect##idx, \ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(SPIS(idx)), \ .max_buf_len = BIT_MASK(SPIS_PROP(idx, easydma_maxcnt_bits)), \ .wake_gpio = GPIO_DT_SPEC_GET_OR(SPIS(idx), wake_gpios, {0}), \ }; \ BUILD_ASSERT(!DT_NODE_HAS_PROP(SPIS(idx), wake_gpios) || \ !(DT_GPIO_FLAGS(SPIS(idx), wake_gpios) & GPIO_ACTIVE_LOW),\ "WAKE line must be configured as active high"); \ DEVICE_DT_DEFINE(SPIS(idx), \ spi_nrfx_init, \ NULL, \ &spi_##idx##_data, \ &spi_##idx##z_config, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_nrfx_driver_api) /* Macro creates device instance if it is enabled in devicetree. */ #define SPIS_DEVICE(periph, prefix, id, _) \ IF_ENABLED(CONFIG_HAS_HW_NRF_SPIS##prefix##id, (SPI_NRFX_SPIS_DEFINE(prefix##id);)) /* Macro iterates over nrfx_spis instances enabled in the nrfx_config.h. */ NRFX_FOREACH_ENABLED(SPIS, SPIS_DEVICE, (), (), _) ```
/content/code_sandbox/drivers/spi/spi_nrfx_spis.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,312
```c /* * */ #define DT_DRV_COMPAT renesas_smartbond_spi #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_smartbond); #include "spi_context.h" #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/spi.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <zephyr/pm/device_runtime.h> #include <zephyr/drivers/dma.h> #include <zephyr/sys/byteorder.h> #include <DA1469xAB.h> #include <da1469x_pd.h> #define DIVN_CLK 32000000 /* DIVN clock: fixed @32MHz */ #define SCLK_FREQ_2MHZ (DIVN_CLK / 14) /* 2.285714 MHz*/ #define SCLK_FREQ_4MHZ (DIVN_CLK / 8) /* 4 MHz */ #define SCLK_FREQ_8MHZ (DIVN_CLK / 4) /* 8 MHz */ #define SCLK_FREQ_16MHZ (DIVN_CLK / 2) /* 16 MHz */ enum spi_smartbond_transfer { SPI_SMARTBOND_TRANSFER_TX_ONLY, SPI_SMARTBOND_TRANSFER_RX_ONLY, SPI_SMARTBOND_TRANSFER_TX_RX, SPI_SMARTBOND_TRANSFER_NONE }; enum spi_smartbond_dma_channel { SPI_SMARTBOND_DMA_TX_CHANNEL, SPI_SMARTBOND_DMA_RX_CHANNEL }; enum spi_smartbond_fifo_mode { /* Bi-directional mode */ SPI_SMARTBOND_FIFO_MODE_TX_RX, /* TX FIFO single depth, no flow control */ SPI_SMARTBOND_FIFO_MODE_RX_ONLY, /* RX FIFO single depth, no flow control */ SPI_SMARTBOND_FIFO_MODE_TX_ONLY, SPI_SMARTBOND_FIFO_NONE }; struct spi_smartbond_cfg { SPI_Type *regs; int periph_clock_config; const struct pinctrl_dev_config *pcfg; #ifdef CONFIG_SPI_SMARTBOND_DMA int tx_dma_chan; int rx_dma_chan; uint8_t tx_slot_mux; uint8_t rx_slot_mux; const struct device *tx_dma_ctrl; const struct device *rx_dma_ctrl; #endif }; struct spi_smartbond_data { struct spi_context ctx; uint8_t dfs; #if defined(CONFIG_PM_DEVICE) uint32_t spi_ctrl_reg; #endif #ifdef CONFIG_SPI_SMARTBOND_DMA struct dma_config tx_dma_cfg; struct dma_config rx_dma_cfg; struct dma_block_config tx_dma_block_cfg; struct dma_block_config rx_dma_block_cfg; struct k_sem rx_dma_sync; struct k_sem tx_dma_sync; ATOMIC_DEFINE(dma_channel_atomic_flag, 2); #endif #if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA) size_t rx_len; size_t tx_len; size_t transferred; enum spi_smartbond_transfer transfer_mode; #endif }; #define SPI_CTRL_REG_SET_FIELD(_field, _var, _val) \ (_var) = \ (((_var) & ~SPI_SPI_CTRL_REG_ ## _field ## _Msk) | \ (((_val) << SPI_SPI_CTRL_REG_ ## _field ## _Pos) & SPI_SPI_CTRL_REG_ ## _field ## _Msk)) static inline void spi_smartbond_enable(const struct spi_smartbond_cfg *cfg, bool enable) { if (enable) { cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_ON_Msk; cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_RST_Msk; } else { cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_ON_Msk; cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_RST_Msk; } } static inline bool spi_smartbond_isenabled(const struct spi_smartbond_cfg *cfg) { return (!!(cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_ON_Msk)) && (!(cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_RST_Msk)); } static inline void spi_smartbond_write_word(const struct device *dev) { const struct spi_smartbond_cfg *cfg = dev->config; struct spi_smartbond_data *data = dev->data; /* * No need to typecast the register address as the controller will automatically * generate the necessary clock cycles based on the data size. */ switch (data->dfs) { case 1: cfg->regs->SPI_RX_TX_REG = *(uint8_t *)data->ctx.tx_buf; break; case 2: cfg->regs->SPI_RX_TX_REG = sys_get_le16(data->ctx.tx_buf); break; case 4: cfg->regs->SPI_RX_TX_REG = sys_get_le32(data->ctx.tx_buf); break; } } static inline void spi_smartbond_write_dummy(const struct device *dev) { const struct spi_smartbond_cfg *cfg = dev->config; cfg->regs->SPI_RX_TX_REG = 0x0; } static inline void spi_smartbond_read_word(const struct device *dev) { const struct spi_smartbond_cfg *cfg = dev->config; struct spi_smartbond_data *data = dev->data; switch (data->dfs) { case 1: *(uint8_t *)data->ctx.rx_buf = cfg->regs->SPI_RX_TX_REG; break; case 2: sys_put_le16((uint16_t)cfg->regs->SPI_RX_TX_REG, data->ctx.rx_buf); break; case 4: sys_put_le32(cfg->regs->SPI_RX_TX_REG, data->ctx.rx_buf); break; } } static inline void spi_smartbond_read_discard(const struct device *dev) { const struct spi_smartbond_cfg *cfg = dev->config; (void)cfg->regs->SPI_RX_TX_REG; } static inline int spi_smartbond_set_speed(const struct spi_smartbond_cfg *cfg, const uint32_t frequency) { if (frequency < SCLK_FREQ_2MHZ) { LOG_ERR("Frequency is lower than minimal SCLK %d", SCLK_FREQ_2MHZ); return -ENOTSUP; } else if (frequency < SCLK_FREQ_4MHZ) { cfg->regs->SPI_CTRL_REG = (cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_CLK_Msk) | 3UL << SPI_SPI_CTRL_REG_SPI_CLK_Pos; } else if (frequency < SCLK_FREQ_8MHZ) { cfg->regs->SPI_CTRL_REG = (cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_CLK_Msk); } else if (frequency < SCLK_FREQ_16MHZ) { cfg->regs->SPI_CTRL_REG = (cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_CLK_Msk) | 1UL << SPI_SPI_CTRL_REG_SPI_CLK_Pos; } else { cfg->regs->SPI_CTRL_REG = (cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_CLK_Msk) | 2UL << SPI_SPI_CTRL_REG_SPI_CLK_Pos; } return 0; } static inline int spi_smartbond_set_word_size(const struct spi_smartbond_cfg *cfg, struct spi_smartbond_data *data, const uint32_t operation) { switch (SPI_WORD_SIZE_GET(operation)) { case 8: data->dfs = 1; cfg->regs->SPI_CTRL_REG = (cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_WORD_Msk); break; case 16: data->dfs = 2; cfg->regs->SPI_CTRL_REG = (cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_WORD_Msk) | (1UL << SPI_SPI_CTRL_REG_SPI_WORD_Pos); break; case 32: data->dfs = 4; cfg->regs->SPI_CTRL_REG = (cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_WORD_Msk) | (2UL << SPI_SPI_CTRL_REG_SPI_WORD_Pos); break; default: LOG_ERR("Word size not supported"); return -ENOTSUP; } return 0; } static inline void spi_smartbond_pm_policy_state_lock_get(const struct device *dev) { #if defined(CONFIG_PM_DEVICE) /* * Prevent the SoC from entering the normal sleep state as PDC does not support * waking up the application core following SPI events. */ pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); pm_device_runtime_get(dev); #endif } static inline void spi_smartbond_pm_policy_state_lock_put(const struct device *dev) { #if defined(CONFIG_PM_DEVICE) pm_device_runtime_put(dev); /* * Allow the SoC to enter the normal sleep state once SPI transactions are done. */ pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES); #endif } static int spi_smartbond_configure(const struct spi_smartbond_cfg *cfg, struct spi_smartbond_data *data, const struct spi_config *spi_cfg) { int rc; if (spi_context_configured(&data->ctx, spi_cfg)) { #ifdef CONFIG_PM_DEVICE spi_smartbond_enable(cfg, true); #endif return 0; } if (spi_cfg->operation & SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode not yet supported"); return -ENOTSUP; } if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) && (spi_cfg->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Only single line mode is supported"); return -ENOTSUP; } if (spi_cfg->operation & SPI_MODE_LOOP) { LOG_ERR("Loopback mode is not supported"); return -ENOTSUP; } if (spi_smartbond_isenabled(cfg)) { spi_smartbond_enable(cfg, false); } rc = spi_smartbond_set_speed(cfg, spi_cfg->frequency); if (rc) { return rc; } cfg->regs->SPI_CTRL_REG = (spi_cfg->operation & SPI_MODE_CPOL) ? (cfg->regs->SPI_CTRL_REG | SPI_SPI_CTRL_REG_SPI_POL_Msk) : (cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_POL_Msk); cfg->regs->SPI_CTRL_REG = (spi_cfg->operation & SPI_MODE_CPHA) ? (cfg->regs->SPI_CTRL_REG | SPI_SPI_CTRL_REG_SPI_PHA_Msk) : (cfg->regs->SPI_CTRL_REG & ~SPI_SPI_CTRL_REG_SPI_PHA_Msk); rc = spi_smartbond_set_word_size(cfg, data, spi_cfg->operation); if (rc) { return rc; } cfg->regs->SPI_CTRL_REG &= ~(SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Msk); spi_smartbond_enable(cfg, true); cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_MINT_Msk; data->ctx.config = spi_cfg; return 0; } #if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA) static inline void spi_smartbond_isr_set_status(const struct device *dev, bool status) { const struct spi_smartbond_cfg *cfg = dev->config; if (status) { cfg->regs->SPI_CTRL_REG |= SPI_SPI_CTRL_REG_SPI_MINT_Msk; } else { cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_MINT_Msk; } } static inline bool spi_smartbond_is_busy(const struct device *dev) { const struct spi_smartbond_cfg *cfg = dev->config; return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_BUSY_Msk); } static inline void spi_smartbond_clear_interrupt(const struct device *dev) { const struct spi_smartbond_cfg *cfg = dev->config; cfg->regs->SPI_CLEAR_INT_REG = 0x1; } /* 0 = No RX data available, 1 = data has been transmitted and received */ static inline bool spi_smartbond_is_rx_data(const struct device *dev) { const struct spi_smartbond_cfg *cfg = dev->config; return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_INT_BIT_Msk); } static inline uint8_t spi_smartbond_get_fifo_mode(const struct device *dev) { const struct spi_smartbond_cfg *cfg = dev->config; return ((cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Msk) >> SPI_SPI_CTRL_REG_SPI_FIFO_MODE_Pos); } static void spi_smartbond_set_fifo_mode(const struct device *dev, enum spi_smartbond_fifo_mode mode) { const struct spi_smartbond_cfg *cfg = dev->config; bool is_enabled = spi_smartbond_isenabled(cfg); enum spi_smartbond_fifo_mode current_mode = spi_smartbond_get_fifo_mode(dev); uint32_t spi_ctrl_reg = cfg->regs->SPI_CTRL_REG; #ifdef CONFIG_SPI_SMARTBOND_DMA struct spi_smartbond_data *data = dev->data; #endif if ((current_mode != mode) #ifdef CONFIG_SPI_SMARTBOND_DMA || (data->dfs == 4) #endif ) { if (current_mode != SPI_SMARTBOND_FIFO_MODE_RX_ONLY) { while (spi_smartbond_is_busy(dev)) { ; } } /* Controller should be disabled when FIFO mode is updated */ cfg->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_ON_Msk; #ifdef CONFIG_SPI_SMARTBOND_DMA /* * Workaround for the controller that cannot generate DMA requests * for 4-byte bus length. */ if (data->dfs == 4) { mode = SPI_SMARTBOND_FIFO_NONE; } #endif SPI_CTRL_REG_SET_FIELD(SPI_FIFO_MODE, spi_ctrl_reg, mode); if (mode != SPI_SMARTBOND_FIFO_NONE) { SPI_CTRL_REG_SET_FIELD(SPI_DMA_TXREQ_MODE, spi_ctrl_reg, 0); } else { SPI_CTRL_REG_SET_FIELD(SPI_DMA_TXREQ_MODE, spi_ctrl_reg, 1); } if (is_enabled) { SPI_CTRL_REG_SET_FIELD(SPI_ON, spi_ctrl_reg, 1); } cfg->regs->SPI_CTRL_REG = spi_ctrl_reg; } } static int spi_smartbond_transfer_mode_get(const struct device *dev) { struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; if (spi_context_rx_buf_on(ctx) || spi_context_tx_buf_on(ctx)) { /* * Check only buffers' length as it might happen that current buffer is NULL. * In such a case the context should be updated and a dummy write/read should * take place. */ if (ctx->rx_len || ctx->tx_len) { spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_RX); return SPI_SMARTBOND_TRANSFER_TX_RX; } if (!spi_context_rx_buf_on(ctx)) { spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_ONLY); return SPI_SMARTBOND_TRANSFER_TX_ONLY; } if (!spi_context_tx_buf_on(ctx)) { /* * Use the TX/RX mode with TX being dummy. Using the RX only mode * is a bit tricky as the controller should generate clock cycles * automatically and immediately after the ISR is enabled. */ spi_smartbond_set_fifo_mode(dev, SPI_SMARTBOND_FIFO_MODE_TX_RX); return SPI_SMARTBOND_TRANSFER_RX_ONLY; } } /* Return waiting updating the fifo mode */ return SPI_SMARTBOND_TRANSFER_NONE; } static inline void spi_smartbond_transfer_mode_check_and_update(const struct device *dev) { struct spi_smartbond_data *data = dev->data; data->transfer_mode = spi_smartbond_transfer_mode_get(dev); } #endif #ifdef CONFIG_SPI_ASYNC static inline bool spi_smartbond_is_tx_full(const struct device *dev) { const struct spi_smartbond_cfg *cfg = dev->config; return (cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_TXH_Msk); } static void spi_smartbond_write(const struct device *dev) { struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; while (spi_context_tx_buf_on(ctx)) { /* Check if TX FIFO is full as otherwise undefined data should be transmitted. */ if (spi_smartbond_is_tx_full(dev)) { spi_smartbond_clear_interrupt(dev); break; } /* Send to TX FIFO and update buffer pointer. */ spi_smartbond_write_word(dev); spi_context_update_tx(ctx, data->dfs, 1); /* * It might happen that a NULL buffer with a non-zero length is provided. * In that case, the bytes should be consumed. */ if (ctx->rx_len && !ctx->rx_buf) { spi_smartbond_read_discard(dev); spi_context_update_rx(ctx, data->dfs, 1); } } } static void spi_smartbond_transfer(const struct device *dev) { struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; while (data->rx_len) { /* Zero means that RX FIFO or register is empty */ if (!spi_smartbond_is_rx_data(dev)) { break; } if (ctx->rx_buf) { spi_smartbond_read_word(dev); } else { spi_smartbond_read_discard(dev); } spi_context_update_rx(ctx, data->dfs, 1); spi_smartbond_clear_interrupt(dev); data->rx_len--; data->transferred++; } while (data->tx_len) { /* Check if TX FIFO is full as otherwise undefined data should be transmitted. */ if (spi_smartbond_is_tx_full(dev)) { break; } if (ctx->tx_buf) { spi_smartbond_write_word(dev); } else { spi_smartbond_write_dummy(dev); } spi_context_update_tx(ctx, data->dfs, 1); data->tx_len--; } } static void spi_smartbond_read(const struct device *dev) { struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; while (spi_context_rx_buf_on(ctx)) { /* Zero means that RX FIFO or register is empty */ if (!spi_smartbond_is_rx_data(dev)) { break; } spi_smartbond_read_word(dev); spi_context_update_rx(ctx, data->dfs, 1); spi_smartbond_clear_interrupt(dev); } /* Perform dummy access to generate the required clock cycles */ while (data->tx_len) { if (spi_smartbond_is_tx_full(dev)) { break; } spi_smartbond_write_dummy(dev); data->tx_len--; } } static void spi_smartbond_isr_trigger(const struct device *dev) { struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; data->transfer_mode = spi_smartbond_transfer_mode_get(dev); switch (data->transfer_mode) { case SPI_SMARTBOND_TRANSFER_RX_ONLY: data->tx_len = spi_context_total_rx_len(ctx); spi_smartbond_read(dev); break; case SPI_SMARTBOND_TRANSFER_TX_ONLY: spi_smartbond_write(dev); break; case SPI_SMARTBOND_TRANSFER_TX_RX: /* * Each sub-transfer in the descriptor list should be exercised * separately as it might happen that a buffer is NULL with * non-zero length. */ data->rx_len = spi_context_max_continuous_chunk(ctx); data->tx_len = data->rx_len; spi_smartbond_transfer(dev); break; case SPI_SMARTBOND_TRANSFER_NONE: __fallthrough; default: __ASSERT_MSG_INFO("Invalid transfer mode"); break; } spi_smartbond_isr_set_status(dev, true); } static int spi_smartbond_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { const struct spi_smartbond_cfg *cfg = dev->config; struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; int rc; spi_context_lock(ctx, true, cb, userdata, spi_cfg); spi_smartbond_pm_policy_state_lock_get(dev); rc = spi_smartbond_configure(cfg, data, spi_cfg); if (rc == 0) { spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->dfs); spi_context_cs_control(ctx, true); /* * PM constraints will be released within ISR once all transfers * are exercised along with de-asserting the #CS line. */ spi_smartbond_isr_trigger(dev); } /* * Context will actually be released when \sa spi_context_complete * is called. */ spi_context_release(ctx, rc); return rc; } #endif #if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA) static void spi_smartbond_isr(void *args) { #ifdef CONFIG_SPI_ASYNC struct device *dev = args; struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; switch (data->transfer_mode) { case SPI_SMARTBOND_TRANSFER_RX_ONLY: spi_smartbond_read(dev); break; case SPI_SMARTBOND_TRANSFER_TX_ONLY: spi_smartbond_write(dev); break; case SPI_SMARTBOND_TRANSFER_TX_RX: /* Exersice the type of the next sub-transfer */ if (!data->rx_len && !data->tx_len) { spi_smartbond_transfer_mode_check_and_update(dev); if (data->transfer_mode == SPI_SMARTBOND_TRANSFER_RX_ONLY) { data->tx_len = spi_context_total_rx_len(ctx) - data->transferred; /* Clear in case another truncated transfer should be executed */ data->transferred = 0; spi_smartbond_read(dev); } else if (data->transfer_mode == SPI_SMARTBOND_TRANSFER_TX_ONLY) { spi_smartbond_write(dev); } else if (data->transfer_mode == SPI_SMARTBOND_TRANSFER_TX_RX) { data->rx_len = spi_context_max_continuous_chunk(ctx); data->tx_len = data->rx_len; spi_smartbond_transfer(dev); } } else { spi_smartbond_transfer(dev); } break; case SPI_SMARTBOND_TRANSFER_NONE: __fallthrough; default: __ASSERT_MSG_INFO("Invalid transfer mode"); break; } /* All buffers have been exercised, signal completion */ if (!spi_context_tx_buf_on(ctx) && !spi_context_rx_buf_on(ctx)) { spi_smartbond_isr_set_status(dev, false); /* Mark completion to trigger callback function */ spi_context_complete(ctx, dev, 0); spi_context_cs_control(ctx, false); spi_smartbond_pm_policy_state_lock_put(data); } #endif } #endif #ifdef CONFIG_SPI_SMARTBOND_DMA static uint32_t spi_smartbond_read_dummy_buf; /* * Should be used to flush the RX FIFO in case a transaction is requested * with NULL pointer and non-zero length. In such a case, data will be * shifted into the RX FIFO (regardless of whether or not the RX mode is * disabled) which should then be flushed. Otherwise, a next read operation * will result in fetching old bytes. */ static void spi_smartbond_flush_rx_fifo(const struct device *dev) { while (spi_smartbond_is_busy(dev)) { }; while (spi_smartbond_is_rx_data(dev)) { spi_smartbond_read_discard(dev); spi_smartbond_clear_interrupt(dev); } } static int spi_smartbond_dma_tx_channel_request(const struct device *dev) { struct spi_smartbond_data *data = dev->data; const struct spi_smartbond_cfg *config = dev->config; if (!atomic_test_and_set_bit(data->dma_channel_atomic_flag, SPI_SMARTBOND_DMA_TX_CHANNEL)) { if (dma_request_channel(config->tx_dma_ctrl, (void *)&config->tx_dma_chan) < 0) { atomic_clear_bit(data->dma_channel_atomic_flag, SPI_SMARTBOND_DMA_TX_CHANNEL); return -EIO; } } return 0; } #ifdef CONFIG_PM_DEVICE static void spi_smartbond_dma_tx_channel_release(const struct device *dev) { struct spi_smartbond_data *data = dev->data; const struct spi_smartbond_cfg *config = dev->config; if (atomic_test_and_clear_bit(data->dma_channel_atomic_flag, SPI_SMARTBOND_DMA_TX_CHANNEL)) { dma_release_channel(config->tx_dma_ctrl, config->tx_dma_chan); } } #endif static int spi_smartbond_dma_rx_channel_request(const struct device *dev) { struct spi_smartbond_data *data = dev->data; const struct spi_smartbond_cfg *config = dev->config; if (!atomic_test_and_set_bit(data->dma_channel_atomic_flag, SPI_SMARTBOND_DMA_RX_CHANNEL)) { if (dma_request_channel(config->rx_dma_ctrl, (void *)&config->rx_dma_chan) < 0) { atomic_clear_bit(data->dma_channel_atomic_flag, SPI_SMARTBOND_DMA_RX_CHANNEL); return -EIO; } } return 0; } #ifdef CONFIG_PM_DEVICE static void spi_smartbond_dma_rx_channel_release(const struct device *dev) { struct spi_smartbond_data *data = dev->data; const struct spi_smartbond_cfg *config = dev->config; if (atomic_test_and_clear_bit(data->dma_channel_atomic_flag, SPI_SMARTBOND_DMA_RX_CHANNEL)) { dma_release_channel(config->rx_dma_ctrl, config->rx_dma_chan); } } #endif static void spi_smartbond_tx_dma_cb(const struct device *dma, void *arg, uint32_t id, int status) { const struct device *dev = arg; struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; if (status < 0) { LOG_WRN("DMA transfer did not complete"); } spi_context_update_tx(ctx, data->dfs, data->tx_len); k_sem_give(&data->tx_dma_sync); } static void spi_smartbond_rx_dma_cb(const struct device *dma, void *arg, uint32_t id, int status) { const struct device *dev = arg; struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; if (status < 0) { LOG_WRN("DMA transfer did not complete"); } spi_context_update_rx(ctx, data->dfs, data->rx_len); k_sem_give(&data->rx_dma_sync); } #ifdef CONFIG_PM_DEVICE static void spi_smartbond_dma_deconfig(const struct device *dev) { const struct spi_smartbond_cfg *config = dev->config; if (config->rx_dma_ctrl && config->tx_dma_ctrl) { dma_stop(config->rx_dma_ctrl, config->rx_dma_chan); dma_stop(config->tx_dma_ctrl, config->tx_dma_chan); spi_smartbond_dma_rx_channel_release(dev); spi_smartbond_dma_tx_channel_release(dev); } } #endif static int spi_smartbond_dma_config(const struct device *dev) { struct spi_smartbond_data *data = dev->data; const struct spi_smartbond_cfg *config = dev->config; struct dma_config *tx = &data->tx_dma_cfg; struct dma_config *rx = &data->rx_dma_cfg; struct dma_block_config *tx_block = &data->tx_dma_block_cfg; struct dma_block_config *rx_block = &data->rx_dma_block_cfg; /* * DMA RX should be assigned an even number and * DMA TX should be assigned the right next * channel (odd number). */ if (!(config->tx_dma_chan & 0x1) || (config->rx_dma_chan & 0x1) || (config->tx_dma_chan != (config->rx_dma_chan + 1))) { LOG_ERR("Invalid RX/TX channel selection"); return -EINVAL; } if (config->tx_slot_mux != config->rx_slot_mux) { LOG_ERR("TX/RX DMA slots mismatch"); return -EINVAL; } if (!device_is_ready(config->tx_dma_ctrl) || !device_is_ready(config->rx_dma_ctrl)) { LOG_ERR("TX/RX DMA device is not ready"); return -ENODEV; } if (spi_smartbond_dma_tx_channel_request(dev) < 0) { LOG_ERR("TX DMA channel is already occupied"); return -EIO; } if (spi_smartbond_dma_rx_channel_request(dev) < 0) { LOG_ERR("RX DMA channel is already occupied"); return -EIO; } tx->channel_direction = MEMORY_TO_PERIPHERAL; tx->dma_callback = spi_smartbond_tx_dma_cb; tx->user_data = (void *)dev; tx->block_count = 1; tx->head_block = &data->tx_dma_block_cfg; tx->error_callback_dis = 1; tx->dma_slot = config->tx_slot_mux; tx->channel_priority = 2; /* Burst mode is not using when DREQ is one */ tx->source_burst_length = 1; tx->dest_burst_length = 1; /* Source and destination data size should reflect DFS value */ tx->source_data_size = 0; tx->dest_data_size = 0; /* Do not change */ tx_block->dest_addr_adj = 0x2; /* Incremental */ tx_block->source_addr_adj = 0x0; tx_block->dest_address = (uint32_t)&config->regs->SPI_RX_TX_REG; /* * To be filled when a transaction is requested and * should reflect the total number of bytes. */ tx_block->block_size = 0; /* Should reflect the TX buffer */ tx_block->source_address = 0; rx->channel_direction = PERIPHERAL_TO_MEMORY; rx->dma_callback = spi_smartbond_rx_dma_cb; rx->user_data = (void *)dev; rx->block_count = 1; rx->head_block = &data->rx_dma_block_cfg; rx->error_callback_dis = 1; rx->dma_slot = config->rx_slot_mux; rx->channel_priority = 2; /* Burst mode is not using when DREQ is one */ rx->source_burst_length = 1; rx->dest_burst_length = 1; /* Source and destination data size should reflect DFS value */ rx->source_data_size = 0; rx->dest_data_size = 0; /* Do not change */ rx_block->source_addr_adj = 0x2; /* Incremenetal */ rx_block->dest_addr_adj = 0x0; rx_block->source_address = (uint32_t)&config->regs->SPI_RX_TX_REG; /* * To be filled when a transaction is requested and * should reflect the total number of bytes. */ rx_block->block_size = 0; /* Should reflect the RX buffer */ rx_block->dest_address = 0; return 0; } static int spi_smartbond_dma_trigger(const struct device *dev) { struct spi_smartbond_data *data = dev->data; const struct spi_smartbond_cfg *config = dev->config; struct spi_context *ctx = &data->ctx; struct dma_config *tx = &data->tx_dma_cfg; struct dma_config *rx = &data->rx_dma_cfg; struct dma_block_config *tx_block = &data->tx_dma_block_cfg; struct dma_block_config *rx_block = &data->rx_dma_block_cfg; rx->source_data_size = data->dfs; rx->dest_data_size = data->dfs; tx->source_data_size = data->dfs; tx->dest_data_size = data->dfs; data->transfer_mode = spi_smartbond_transfer_mode_get(dev); do { switch (data->transfer_mode) { case SPI_SMARTBOND_TRANSFER_RX_ONLY: spi_smartbond_flush_rx_fifo(dev); data->rx_len = spi_context_max_continuous_chunk(ctx); data->tx_len = data->rx_len; rx_block->block_size = data->rx_len * data->dfs; tx_block->block_size = rx_block->block_size; rx_block->dest_address = (uint32_t)ctx->rx_buf; rx_block->dest_addr_adj = 0x0; tx_block->source_address = (uint32_t)&spi_smartbond_read_dummy_buf; /* Non-incremental */ tx_block->source_addr_adj = 0x2; if (dma_config(config->tx_dma_ctrl, config->tx_dma_chan, tx) < 0) { LOG_ERR("TX DMA configuration failed"); return -EINVAL; } if (dma_config(config->rx_dma_ctrl, config->rx_dma_chan, rx) < 0) { LOG_ERR("RX DMA configuration failed"); return -EINVAL; } dma_start(config->rx_dma_ctrl, config->rx_dma_chan); dma_start(config->tx_dma_ctrl, config->tx_dma_chan); /* Wait for the current DMA transfer to complete */ k_sem_take(&data->tx_dma_sync, K_FOREVER); k_sem_take(&data->rx_dma_sync, K_FOREVER); break; case SPI_SMARTBOND_TRANSFER_TX_ONLY: spi_smartbond_flush_rx_fifo(dev); data->tx_len = spi_context_max_continuous_chunk(ctx); data->rx_len = data->tx_len; tx_block->block_size = data->tx_len * data->dfs; tx_block->source_address = (uint32_t)ctx->tx_buf; tx_block->source_addr_adj = 0x0; if (dma_config(config->tx_dma_ctrl, config->tx_dma_chan, tx) < 0) { LOG_ERR("TX DMA configuration failed"); return -EINVAL; } dma_start(config->tx_dma_ctrl, config->tx_dma_chan); /* Wait for the current DMA transfer to complete */ k_sem_take(&data->tx_dma_sync, K_FOREVER); break; case SPI_SMARTBOND_TRANSFER_TX_RX: spi_smartbond_flush_rx_fifo(dev); data->rx_len = spi_context_max_continuous_chunk(ctx); data->tx_len = data->rx_len; /* * DMA block size represents total number of bytes whilist, * context length is divided by the data size (dfs). */ tx_block->block_size = data->tx_len * data->dfs; rx_block->block_size = tx_block->block_size; if (ctx->tx_buf) { tx_block->source_address = (uint32_t)ctx->tx_buf; tx_block->source_addr_adj = 0x0; } else { tx_block->source_address = (uint32_t)&spi_smartbond_read_dummy_buf; tx_block->source_addr_adj = 0x2; } if (ctx->rx_buf) { rx_block->dest_address = (uint32_t)ctx->rx_buf; rx_block->dest_addr_adj = 0x0; } else { rx_block->dest_address = (uint32_t)&spi_smartbond_read_dummy_buf; rx_block->dest_addr_adj = 0x2; } if (dma_config(config->tx_dma_ctrl, config->tx_dma_chan, tx) < 0) { LOG_ERR("TX DMA configuration failed"); return -EINVAL; } if (dma_config(config->rx_dma_ctrl, config->rx_dma_chan, rx) < 0) { LOG_ERR("RX DMA configuration failed"); return -EINVAL; } dma_start(config->rx_dma_ctrl, config->rx_dma_chan); dma_start(config->tx_dma_ctrl, config->tx_dma_chan); k_sem_take(&data->tx_dma_sync, K_FOREVER); k_sem_take(&data->rx_dma_sync, K_FOREVER); /* * Regardless of whether or not the RX FIFO is enabled, received * bytes are pushed into it. As such, the RXI FIFO should be * flushed so that a next read access retrives the correct bytes * and not old ones. */ if (!ctx->rx_buf) { spi_smartbond_flush_rx_fifo(dev); } break; case SPI_SMARTBOND_TRANSFER_NONE: __fallthrough; default: __ASSERT_MSG_INFO("Invalid transfer mode"); break; } spi_smartbond_transfer_mode_check_and_update(dev); } while (data->transfer_mode != SPI_SMARTBOND_TRANSFER_NONE); return 0; } #endif static int spi_smartbond_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { const struct spi_smartbond_cfg *cfg = dev->config; struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; int rc; spi_context_lock(&data->ctx, false, NULL, NULL, spi_cfg); spi_smartbond_pm_policy_state_lock_get(dev); rc = spi_smartbond_configure(cfg, data, spi_cfg); if (rc == 0) { spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->dfs); spi_context_cs_control(ctx, true); #ifdef CONFIG_SPI_SMARTBOND_DMA rc = spi_smartbond_dma_trigger(dev); /* Mark completion to trigger callback function */ spi_context_complete(ctx, dev, 0); #else while (spi_context_tx_buf_on(ctx) || spi_context_rx_buf_on(ctx)) { if (spi_context_tx_buf_on(ctx)) { spi_smartbond_write_word(dev); spi_context_update_tx(ctx, data->dfs, 1); } else { spi_smartbond_write_dummy(dev); } while (!(cfg->regs->SPI_CTRL_REG & SPI_SPI_CTRL_REG_SPI_INT_BIT_Msk)) { }; if (spi_context_rx_buf_on(ctx)) { spi_smartbond_read_word(dev); spi_context_update_rx(ctx, data->dfs, 1); } else { spi_smartbond_read_discard(dev); /* * It might happen that a NULL buffer with a non-zero length * is provided. In that case, the bytes should be consumed. */ if (ctx->rx_len) { spi_context_update_rx(ctx, data->dfs, 1); } } cfg->regs->SPI_CLEAR_INT_REG = 1UL; } #endif spi_context_cs_control(ctx, false); } spi_context_release(&data->ctx, rc); spi_smartbond_pm_policy_state_lock_put(dev); return rc; } static int spi_smartbond_release(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_smartbond_data *data = dev->data; struct spi_context *ctx = &data->ctx; if (!spi_context_configured(ctx, spi_cfg)) { LOG_ERR("SPI configuration was not the last one to be used"); return -EINVAL; } spi_context_unlock_unconditionally(ctx); return 0; } static const struct spi_driver_api spi_smartbond_driver_api = { .transceive = spi_smartbond_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_smartbond_transceive_async, #endif .release = spi_smartbond_release, }; static int spi_smartbond_resume(const struct device *dev) { const struct spi_smartbond_cfg *cfg = dev->config; struct spi_smartbond_data *data = dev->data; int rc; CRG_COM->RESET_CLK_COM_REG = cfg->periph_clock_config << 1; CRG_COM->SET_CLK_COM_REG = cfg->periph_clock_config; rc = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (rc < 0) { LOG_ERR("Failed to configure SPI pins"); return rc; } rc = spi_context_cs_configure_all(&data->ctx); if (rc < 0) { LOG_ERR("Failed to configure CS pins: %d", rc); return rc; } #ifdef CONFIG_SPI_SMARTBOND_DMA rc = spi_smartbond_dma_config(dev); if (rc < 0) { LOG_ERR("Failed to configure DMA"); return rc; } #endif spi_context_unlock_unconditionally(&data->ctx); return 0; } #if defined(CONFIG_PM_DEVICE) static int spi_smartbond_suspend(const struct device *dev) { int ret; const struct spi_smartbond_cfg *config = dev->config; struct spi_smartbond_data *data = dev->data; data->spi_ctrl_reg = config->regs->SPI_CTRL_REG; /* Disable the SPI digital block */ config->regs->SPI_CTRL_REG &= ~SPI_SPI_CTRL_REG_SPI_EN_CTRL_Msk; /* Gate SPI clocking */ CRG_COM->RESET_CLK_COM_REG = config->periph_clock_config; ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP); if (ret < 0) { LOG_WRN("Failed to configure the SPI pins to inactive state"); } #ifdef CONFIG_SPI_SMARTBOND_DMA spi_smartbond_dma_deconfig(dev); #endif return ret; } static int spi_smartbond_pm_action(const struct device *dev, enum pm_device_action action) { int ret; switch (action) { case PM_DEVICE_ACTION_RESUME: da1469x_pd_acquire(MCU_PD_DOMAIN_COM); ret = spi_smartbond_resume(dev); break; case PM_DEVICE_ACTION_SUSPEND: ret = spi_smartbond_suspend(dev); da1469x_pd_release(MCU_PD_DOMAIN_COM); break; default: ret = -ENOTSUP; } return ret; } #endif #define SPI_SMARTBOND_ISR_CONNECT \ IRQ_CONNECT(DT_IRQN(DT_NODELABEL(spi)), DT_IRQ(DT_NODELABEL(spi), priority), \ spi_smartbond_isr, DEVICE_DT_GET(DT_NODELABEL(spi)), 0); \ irq_enable(DT_IRQN(DT_NODELABEL(spi))); #define SPI2_SMARTBOND_ISR_CONNECT \ IRQ_CONNECT(DT_IRQN(DT_NODELABEL(spi2)), DT_IRQ(DT_NODELABEL(spi2), priority), \ spi_smartbond_isr, DEVICE_DT_GET(DT_NODELABEL(spi2)), 0); \ irq_enable(DT_IRQN(DT_NODELABEL(spi2))); #if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA) static int spi_smartbond_isr_connect(const struct device *dev) { const struct spi_smartbond_cfg *cfg = dev->config; switch ((uint32_t)cfg->regs) { case (uint32_t)SPI: COND_CODE_1(DT_NODE_HAS_STATUS(DT_NODELABEL(spi), okay), (SPI_SMARTBOND_ISR_CONNECT), (NULL)); break; case (uint32_t)SPI2: COND_CODE_1(DT_NODE_HAS_STATUS(DT_NODELABEL(spi2), okay), (SPI2_SMARTBOND_ISR_CONNECT), (NULL)); break; default: return -EINVAL; } return 0; } #endif static int spi_smartbond_init(const struct device *dev) { int ret; struct spi_smartbond_data *data = dev->data; #if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA) data->transfer_mode = SPI_SMARTBOND_TRANSFER_NONE; #endif #ifdef CONFIG_SPI_SMARTBOND_DMA k_sem_init(&data->tx_dma_sync, 0, 1); k_sem_init(&data->rx_dma_sync, 0, 1); #endif #ifdef CONFIG_PM_DEVICE_RUNTIME /* Make sure device state is marked as suspended */ pm_device_init_suspended(dev); ret = pm_device_runtime_enable(dev); #else da1469x_pd_acquire(MCU_PD_DOMAIN_COM); ret = spi_smartbond_resume(dev); #endif spi_context_unlock_unconditionally(&data->ctx); #if defined(CONFIG_SPI_ASYNC) || defined(CONFIG_SPI_SMARTBOND_DMA) ret = spi_smartbond_isr_connect(dev); #endif return ret; } #ifdef CONFIG_SPI_SMARTBOND_DMA #define SPI_SMARTBOND_DMA_TX_INIT(id) \ .tx_dma_chan = DT_INST_DMAS_CELL_BY_NAME(id, tx, channel), \ .tx_slot_mux = (uint8_t)DT_INST_DMAS_CELL_BY_NAME(id, tx, config), \ .tx_dma_ctrl = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)), #else #define SPI_SMARTBOND_DMA_TX_INIT(id) #endif #ifdef CONFIG_SPI_SMARTBOND_DMA #define SPI_SMARTBOND_DMA_RX_INIT(id) \ .rx_dma_chan = DT_INST_DMAS_CELL_BY_NAME(id, rx, channel), \ .rx_slot_mux = (uint8_t)DT_INST_DMAS_CELL_BY_NAME(id, rx, config), \ .rx_dma_ctrl = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)), #else #define SPI_SMARTBOND_DMA_RX_INIT(id) #endif #ifdef CONFIG_SPI_SMARTBOND_DMA #define SPI_SMARTBOND_DMA_TX_INVALIDATE(id) \ .tx_dma_chan = 255, \ .tx_slot_mux = 255, \ .tx_dma_ctrl = NULL, #else #define SPI_SMARTBOND_DMA_TX_INVALIDATE(id) #endif #ifdef CONFIG_SPI_SMARTBOND_DMA #define SPI_SMARTBOND_DMA_RX_INVALIDATE(id) \ .rx_dma_chan = 255, \ .rx_slot_mux = 255, \ .rx_dma_ctrl = NULL, #else #define SPI_SMARTBOND_DMA_RX_INVALIDATE(id) #endif #define SPI_SMARTBOND_DEVICE(id) \ PINCTRL_DT_INST_DEFINE(id); \ static const struct spi_smartbond_cfg spi_smartbond_##id##_cfg = { \ .regs = (SPI_Type *)DT_INST_REG_ADDR(id), \ .periph_clock_config = DT_INST_PROP(id, periph_clock_config), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \ COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, tx), \ (SPI_SMARTBOND_DMA_TX_INIT(id)), \ (SPI_SMARTBOND_DMA_TX_INVALIDATE(id))) \ COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, rx), \ (SPI_SMARTBOND_DMA_RX_INIT(id)), \ (SPI_SMARTBOND_DMA_RX_INVALIDATE(id))) \ }; \ static struct spi_smartbond_data spi_smartbond_##id##_data = { \ SPI_CONTEXT_INIT_LOCK(spi_smartbond_##id##_data, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_smartbond_##id##_data, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(id), ctx)}; \ PM_DEVICE_DT_INST_DEFINE(id, spi_smartbond_pm_action); \ DEVICE_DT_INST_DEFINE(id, \ spi_smartbond_init, \ PM_DEVICE_DT_INST_GET(id), \ &spi_smartbond_##id##_data, \ &spi_smartbond_##id##_cfg, \ POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &spi_smartbond_driver_api); DT_INST_FOREACH_STATUS_OKAY(SPI_SMARTBOND_DEVICE) ```
/content/code_sandbox/drivers/spi/spi_smartbond.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
10,467
```c /* * */ #define DT_DRV_COMPAT nxp_s32_spi #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/pinctrl.h> #include "spi_nxp_s32.h" static bool spi_nxp_s32_last_packet(struct spi_nxp_s32_data *data) { struct spi_context *ctx = &data->ctx; if (ctx->tx_count <= 1U && ctx->rx_count <= 1U) { if (!spi_context_tx_on(ctx) && (data->transfer_len == ctx->rx_len)) { return true; } if (!spi_context_rx_on(ctx) && (data->transfer_len == ctx->tx_len)) { return true; } if ((ctx->rx_len == ctx->tx_len) && (data->transfer_len == ctx->tx_len)) { return true; } } return false; } static inline bool spi_nxp_s32_transfer_done(struct spi_context *ctx) { return !spi_context_tx_on(ctx) && !spi_context_rx_on(ctx); } static int spi_nxp_s32_transfer_next_packet(const struct device *dev) { const struct spi_nxp_s32_config *config = dev->config; struct spi_nxp_s32_data *data = dev->data; Spi_Ip_StatusType status; Spi_Ip_CallbackType data_cb; Spi_Ip_TransferAdjustmentType param; #ifdef CONFIG_NXP_S32_SPI_INTERRUPT data_cb = config->cb; #else data_cb = NULL; #endif /* CONFIG_NXP_S32_SPI_INTERRUPT */ data->transfer_len = spi_context_max_continuous_chunk(&data->ctx); data->transfer_len = MIN(data->transfer_len, SPI_NXP_S32_MAX_BYTES_PER_PACKAGE(data->bytes_per_frame)); param.KeepCs = !spi_nxp_s32_last_packet(data); param.DeviceParams = NULL; Spi_Ip_UpdateTransferParam(&data->transfer_cfg, &param); status = Spi_Ip_AsyncTransmit(&data->transfer_cfg, (uint8_t *)data->ctx.tx_buf, data->ctx.rx_buf, data->transfer_len, data_cb); if (status) { LOG_ERR("Transfer could not start"); return -EIO; } #ifdef CONFIG_NXP_S32_SPI_INTERRUPT return 0; #else while (Spi_Ip_GetStatus(config->spi_hw_cfg->Instance) == SPI_IP_BUSY) { Spi_Ip_ManageBuffers(config->spi_hw_cfg->Instance); } if (Spi_Ip_GetStatus(config->spi_hw_cfg->Instance) == SPI_IP_FAULT) { return -EIO; } return 0; #endif /* CONFIG_NXP_S32_SPI_INTERRUPT */ } /* * The function to get Scaler and Prescaler for corresponding registers * to configure the baudrate for the transmission. The real frequency is * computated to ensure it will always equal or the nearest approximation * lower to the expected one. */ static void spi_nxp_s32_getbestfreq(uint32_t clock_frequency, uint32_t requested_baud, struct spi_nxp_s32_baudrate_param *best_baud) { uint8_t scaler; uint8_t prescaler; uint32_t low, high; uint32_t curr_freq; uint32_t best_freq = 0U; static const uint8_t prescaler_arr[SPI_NXP_S32_NUM_PRESCALER] = {2U, 3U, 5U, 7U}; static const uint16_t scaller_arr[SPI_NXP_S32_NUM_SCALER] = { 2U, 4U, 6U, 8U, 16U, 32U, 64U, 128U, 256U, 512U, 1024U, 2048U, 4096U, 8192U, 16384U, 32768U }; for (prescaler = 0U; prescaler < SPI_NXP_S32_NUM_PRESCALER; prescaler++) { low = 0U; high = SPI_NXP_S32_NUM_SCALER - 1U; /* Implement golden section search algorithm */ do { scaler = (low + high) / 2U; curr_freq = clock_frequency * 1U / (prescaler_arr[prescaler] * scaller_arr[scaler]); /* * If the scaler make current frequency higher than the * expected one, skip the next step */ if (curr_freq > requested_baud) { low = scaler; continue; } else { high = scaler; } if ((requested_baud - best_freq) > (requested_baud - curr_freq)) { best_freq = curr_freq; best_baud->prescaler = prescaler; best_baud->scaler = scaler; } if (best_freq == requested_baud) { break; } } while ((high - low) > 1U); if ((high - low) <= 1U) { if (high == scaler) { /* use low value */ scaler = low; } else { scaler = high; } curr_freq = clock_frequency * 1U / (prescaler_arr[prescaler] * scaller_arr[scaler]); if (curr_freq <= requested_baud) { if ((requested_baud - best_freq) > (requested_baud - curr_freq)) { best_freq = curr_freq; best_baud->prescaler = prescaler; best_baud->scaler = scaler; } } } if (best_freq == requested_baud) { break; } } best_baud->frequency = best_freq; } /* * The function to get Scaler and Prescaler for corresponding registers * to configure the delay for the transmission. The real delay is computated * to ensure it will always equal or the nearest approximation higher to * the expected one. In the worst case, use the delay as much as possible. */ static void spi_nxp_s32_getbestdelay(uint32_t clock_frequency, uint32_t requested_delay, uint8_t *best_scaler, uint8_t *best_prescaler) { uint32_t current_delay; uint8_t scaler, prescaler; uint32_t low, high; uint32_t best_delay = 0xFFFFFFFFU; /* The scaler array is a power of two, so do not need to be defined */ static const uint8_t prescaler_arr[SPI_NXP_S32_NUM_PRESCALER] = {1U, 3U, 5U, 7U}; clock_frequency = clock_frequency / MHZ(1); for (prescaler = 0; prescaler < SPI_NXP_S32_NUM_PRESCALER; prescaler++) { low = 0U; high = SPI_NXP_S32_NUM_SCALER - 1U; do { scaler = (low + high) / 2U; current_delay = NSEC_PER_USEC * prescaler_arr[prescaler] * (1U << (scaler + 1)) / clock_frequency; /* * If the scaler make current delay smaller than * the expected one, skip the next step */ if (current_delay < requested_delay) { low = scaler; continue; } else { high = scaler; } if ((best_delay - requested_delay) > (current_delay - requested_delay)) { best_delay = current_delay; *best_prescaler = prescaler; *best_scaler = scaler; } if (best_delay == requested_delay) { break; } } while ((high - low) > 1U); if ((high - low) <= 1U) { if (high == scaler) { /* use low value */ scaler = low; } else { scaler = high; } current_delay = NSEC_PER_USEC * prescaler_arr[prescaler] * (1U << (scaler + 1)) / clock_frequency; if (current_delay >= requested_delay) { if ((best_delay - requested_delay) > (current_delay - requested_delay)) { best_delay = current_delay; *best_prescaler = prescaler; *best_scaler = scaler; } } } if (best_delay == requested_delay) { break; } } if (best_delay == 0xFFFFFFFFU) { /* Use the delay as much as possible */ *best_prescaler = SPI_NXP_S32_NUM_PRESCALER - 1U; *best_scaler = SPI_NXP_S32_NUM_SCALER - 1U; } } static int spi_nxp_s32_configure(const struct device *dev, const struct spi_config *spi_cfg) { const struct spi_nxp_s32_config *config = dev->config; struct spi_nxp_s32_data *data = dev->data; bool clk_phase, clk_polarity; bool lsb, hold_cs; bool slave_mode, cs_active_high; uint8_t frame_size; struct spi_nxp_s32_baudrate_param best_baud = {0}; uint32_t clock_rate; int err; if (spi_context_configured(&data->ctx, spi_cfg)) { /* This configuration is already in use */ return 0; } err = clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_rate); if (err) { LOG_ERR("Failed to get clock frequency"); return err; } clk_phase = !!(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA); clk_polarity = !!(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL); hold_cs = !!(spi_cfg->operation & SPI_HOLD_ON_CS); lsb = !!(spi_cfg->operation & SPI_TRANSFER_LSB); slave_mode = !!(SPI_OP_MODE_GET(spi_cfg->operation)); frame_size = SPI_WORD_SIZE_GET(spi_cfg->operation); cs_active_high = !!(spi_cfg->operation & SPI_CS_ACTIVE_HIGH); if (slave_mode == (!!(config->spi_hw_cfg->Mcr & SPI_MCR_MSTR_MASK))) { LOG_ERR("SPI mode (master/slave) must be same as configured in DT"); return -ENOTSUP; } if (slave_mode && !IS_ENABLED(CONFIG_SPI_SLAVE)) { LOG_ERR("Kconfig for enable SPI in slave mode is not enabled"); return -ENOTSUP; } if (slave_mode && lsb) { LOG_ERR("SPI does not support to shifting out with LSB in slave mode"); return -ENOTSUP; } if (spi_cfg->slave >= config->num_cs) { LOG_ERR("Slave %d excess the allowed maximum value (%d)", spi_cfg->slave, config->num_cs - 1); return -ENOTSUP; } if (frame_size > 32U) { LOG_ERR("Unsupported frame size %d bits", frame_size); return -ENOTSUP; } if ((spi_cfg->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Only single line mode is supported"); return -ENOTSUP; } if (spi_cfg->operation & SPI_MODE_LOOP) { LOG_ERR("Loopback mode is not supported"); return -ENOTSUP; } if (cs_active_high && !spi_cs_is_gpio(spi_cfg)) { LOG_ERR("For CS has active state is high, a GPIO pin must be used to" " control CS line instead"); return -ENOTSUP; } if (!slave_mode) { if ((spi_cfg->frequency < SPI_NXP_S32_MIN_FREQ) || (spi_cfg->frequency > SPI_NXP_S32_MAX_FREQ)) { LOG_ERR("The frequency is out of range"); return -ENOTSUP; } spi_nxp_s32_getbestfreq(clock_rate, spi_cfg->frequency, &best_baud); data->transfer_cfg.Ctar &= ~(SPI_CTAR_BR_MASK | SPI_CTAR_PBR_MASK); data->transfer_cfg.Ctar |= SPI_CTAR_BR(best_baud.scaler) | SPI_CTAR_PBR(best_baud.prescaler); data->transfer_cfg.PushrCmd &= ~((SPI_PUSHR_CONT_MASK | SPI_PUSHR_PCS_MASK) >> 16U); if (!spi_cs_is_gpio(spi_cfg)) { /* Use inner CS signal from SPI module */ data->transfer_cfg.PushrCmd |= hold_cs << 15U; data->transfer_cfg.PushrCmd |= (1U << spi_cfg->slave); } } data->transfer_cfg.Ctar &= ~(SPI_CTAR_CPHA_MASK | SPI_CTAR_CPOL_MASK); data->transfer_cfg.Ctar |= SPI_CTAR_CPHA(clk_phase) | SPI_CTAR_CPOL(clk_polarity); Spi_Ip_UpdateFrameSize(&data->transfer_cfg, frame_size); Spi_Ip_UpdateLsb(&data->transfer_cfg, lsb); data->ctx.config = spi_cfg; data->bytes_per_frame = SPI_NXP_S32_BYTE_PER_FRAME(frame_size); if (slave_mode) { LOG_DBG("SPI configuration: cpol = %u, cpha = %u," " lsb = %u, frame_size = %u, mode: slave", clk_polarity, clk_phase, lsb, frame_size); } else { LOG_DBG("SPI configuration: frequency = %uHz, cpol = %u," " cpha = %u, lsb = %u, hold_cs = %u, frame_size = %u," " mode: master, CS = %u\n", best_baud.frequency, clk_polarity, clk_phase, lsb, hold_cs, frame_size, spi_cfg->slave); } return 0; } static int transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { struct spi_nxp_s32_data *data = dev->data; struct spi_context *context = &data->ctx; int ret; if (!tx_bufs && !rx_bufs) { return 0; } #ifndef CONFIG_NXP_S32_SPI_INTERRUPT if (asynchronous) { return -ENOTSUP; } #endif /* CONFIG_NXP_S32_SPI_INTERRUPT */ spi_context_lock(context, asynchronous, cb, userdata, spi_cfg); ret = spi_nxp_s32_configure(dev, spi_cfg); if (ret) { LOG_ERR("An error occurred in the SPI configuration"); spi_context_release(context, ret); return ret; } spi_context_buffers_setup(context, tx_bufs, rx_bufs, 1U); if (spi_nxp_s32_transfer_done(context)) { spi_context_release(context, 0); return 0; } spi_context_cs_control(context, true); #ifdef CONFIG_NXP_S32_SPI_INTERRUPT ret = spi_nxp_s32_transfer_next_packet(dev); if (!ret) { ret = spi_context_wait_for_completion(context); } else { spi_context_cs_control(context, false); } #else do { ret = spi_nxp_s32_transfer_next_packet(dev); if (!ret) { spi_context_update_tx(context, 1U, data->transfer_len); spi_context_update_rx(context, 1U, data->transfer_len); } } while (!ret && !spi_nxp_s32_transfer_done(context)); spi_context_cs_control(context, false); #ifdef CONFIG_SPI_SLAVE if (spi_context_is_slave(context) && !ret) { ret = data->ctx.recv_frames; } #endif /* CONFIG_SPI_SLAVE */ #endif /* CONFIG_NXP_S32_SPI_INTERRUPT */ spi_context_release(context, ret); return ret; } static int spi_nxp_s32_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_nxp_s32_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t callback, void *userdata) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, callback, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int spi_nxp_s32_release(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_nxp_s32_data *data = dev->data; (void)spi_cfg; spi_context_unlock_unconditionally(&data->ctx); return 0; } static int spi_nxp_s32_init(const struct device *dev) { const struct spi_nxp_s32_config *config = dev->config; struct spi_nxp_s32_data *data = dev->data; uint32_t clock_rate; uint8_t scaler, prescaler; uint32_t ctar = 0; int ret = 0; if (!device_is_ready(config->clock_dev)) { LOG_ERR("Clock control device not ready"); return -ENODEV; } ret = clock_control_on(config->clock_dev, config->clock_subsys); if (ret) { LOG_ERR("Failed to enable clock"); return ret; } ret = clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_rate); if (ret) { LOG_ERR("Failed to get clock frequency"); return ret; } ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } if (Spi_Ip_Init(config->spi_hw_cfg)) { return -EBUSY; } #ifdef CONFIG_NXP_S32_SPI_INTERRUPT if (Spi_Ip_UpdateTransferMode(config->spi_hw_cfg->Instance, SPI_IP_INTERRUPT)) { return -EBUSY; } config->irq_config_func(dev); #endif /* CONFIG_NXP_S32_SPI_INTERRUPT */ /* * Update the delay timings configuration that are * applied for all inner CS signals of SPI module. */ spi_nxp_s32_getbestdelay(clock_rate, config->sck_cs_delay, &scaler, &prescaler); ctar |= SPI_CTAR_ASC(scaler) | SPI_CTAR_PASC(prescaler); spi_nxp_s32_getbestdelay(clock_rate, config->cs_sck_delay, &scaler, &prescaler); ctar |= SPI_CTAR_CSSCK(scaler) | SPI_CTAR_PCSSCK(prescaler); spi_nxp_s32_getbestdelay(clock_rate, config->cs_cs_delay, &scaler, &prescaler); ctar |= SPI_CTAR_DT(scaler) | SPI_CTAR_PDT(prescaler); data->transfer_cfg.Ctar |= ctar; data->transfer_cfg.DeviceParams = &data->transfer_params; ret = spi_context_cs_configure_all(&data->ctx); if (ret < 0) { return ret; } spi_context_unlock_unconditionally(&data->ctx); return 0; } #ifdef CONFIG_NXP_S32_SPI_INTERRUPT void spi_nxp_s32_isr(const struct device *dev) { const struct spi_nxp_s32_config *config = dev->config; Spi_Ip_IrqHandler(config->spi_hw_cfg->Instance); } static void spi_nxp_s32_transfer_callback(const struct device *dev, Spi_Ip_EventType event) { struct spi_nxp_s32_data *data = dev->data; int ret = 0; if (event == SPI_IP_EVENT_END_TRANSFER) { spi_context_update_tx(&data->ctx, 1U, data->transfer_len); spi_context_update_rx(&data->ctx, 1U, data->transfer_len); if (spi_nxp_s32_transfer_done(&data->ctx)) { spi_context_complete(&data->ctx, dev, 0); spi_context_cs_control(&data->ctx, false); } else { ret = spi_nxp_s32_transfer_next_packet(dev); } } else { LOG_ERR("Failing in transfer_callback"); ret = -EIO; } if (ret) { spi_context_complete(&data->ctx, dev, ret); spi_context_cs_control(&data->ctx, false); } } #endif /*CONFIG_NXP_S32_SPI_INTERRUPT*/ static const struct spi_driver_api spi_nxp_s32_driver_api = { .transceive = spi_nxp_s32_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_nxp_s32_transceive_async, #endif .release = spi_nxp_s32_release, }; #define SPI_NXP_S32_HW_INSTANCE_CHECK(i, n) \ ((DT_INST_REG_ADDR(n) == IP_SPI_##i##_BASE) ? i : 0) #define SPI_NXP_S32_HW_INSTANCE(n) \ LISTIFY(__DEBRACKET SPI_INSTANCE_COUNT, SPI_NXP_S32_HW_INSTANCE_CHECK, (|), n) #define SPI_NXP_S32_NUM_CS(n) DT_INST_PROP(n, num_cs) #define SPI_NXP_S32_IS_MASTER(n) !DT_INST_PROP(n, slave) #ifdef CONFIG_SPI_SLAVE #define SPI_NXP_S32_SET_SLAVE(n) .SlaveMode = DT_INST_PROP(n, slave), #else #define SPI_NXP_S32_SET_SLAVE(n) #endif #ifdef CONFIG_NXP_S32_SPI_INTERRUPT #define SPI_NXP_S32_CONFIG_INTERRUPT_FUNC(n) \ .irq_config_func = spi_nxp_s32_config_func_##n, #define SPI_NXP_S32_INTERRUPT_DEFINE(n) \ static void spi_nxp_s32_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ spi_nxp_s32_isr, DEVICE_DT_INST_GET(n), \ DT_INST_IRQ(n, flags)); \ irq_enable(DT_INST_IRQN(n)); \ } #define SPI_NXP_S32_CONFIG_CALLBACK_FUNC(n) \ .cb = spi_nxp_s32_##n##_callback, #define SPI_NXP_S32_CALLBACK_DEFINE(n) \ static void spi_nxp_s32_##n##_callback(uint8 instance, Spi_Ip_EventType event) \ { \ ARG_UNUSED(instance); \ const struct device *dev = DEVICE_DT_INST_GET(n); \ \ spi_nxp_s32_transfer_callback(dev, event); \ } #else #define SPI_NXP_S32_CONFIG_INTERRUPT_FUNC(n) #define SPI_NXP_S32_INTERRUPT_DEFINE(n) #define SPI_NXP_S32_CONFIG_CALLBACK_FUNC(n) #define SPI_NXP_S32_CALLBACK_DEFINE(n) #endif /*CONFIG_NXP_S32_SPI_INTERRUPT*/ /* * Declare the default configuration for SPI driver, no DMA * support, all inner module Chip Selects are active low. */ #define SPI_NXP_S32_INSTANCE_CONFIG(n) \ static const Spi_Ip_ConfigType spi_nxp_s32_default_config_##n = { \ .Instance = SPI_NXP_S32_HW_INSTANCE(n), \ .Mcr = (SPI_MCR_MSTR(SPI_NXP_S32_IS_MASTER(n)) | \ SPI_MCR_CONT_SCKE(0U) | SPI_MCR_FRZ(0U) | \ SPI_MCR_MTFE(0U) | SPI_MCR_SMPL_PT(0U) | \ SPI_MCR_PCSIS(BIT_MASK(SPI_NXP_S32_NUM_CS(n))) | \ SPI_MCR_MDIS(0U) | SPI_MCR_XSPI(1U) | SPI_MCR_HALT(1U)), \ .TransferMode = SPI_IP_POLLING, \ .StateIndex = n, \ SPI_NXP_S32_SET_SLAVE(n) \ } #define SPI_NXP_S32_TRANSFER_CONFIG(n) \ .transfer_cfg = { \ .Instance = SPI_NXP_S32_HW_INSTANCE(n), \ .Ctare = SPI_CTARE_FMSZE(0U) | SPI_CTARE_DTCP(1U), \ } #define SPI_NXP_S32_DEVICE(n) \ PINCTRL_DT_INST_DEFINE(n); \ SPI_NXP_S32_CALLBACK_DEFINE(n) \ SPI_NXP_S32_INTERRUPT_DEFINE(n) \ SPI_NXP_S32_INSTANCE_CONFIG(n); \ static const struct spi_nxp_s32_config spi_nxp_s32_config_##n = { \ .num_cs = SPI_NXP_S32_NUM_CS(n), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \ .sck_cs_delay = DT_INST_PROP_OR(n, spi_sck_cs_delay, 0U), \ .cs_sck_delay = DT_INST_PROP_OR(n, spi_cs_sck_delay, 0U), \ .cs_cs_delay = DT_INST_PROP_OR(n, spi_cs_cs_delay, 0U), \ .spi_hw_cfg = (Spi_Ip_ConfigType *)&spi_nxp_s32_default_config_##n, \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ SPI_NXP_S32_CONFIG_CALLBACK_FUNC(n) \ SPI_NXP_S32_CONFIG_INTERRUPT_FUNC(n) \ }; \ static struct spi_nxp_s32_data spi_nxp_s32_data_##n = { \ SPI_NXP_S32_TRANSFER_CONFIG(n), \ SPI_CONTEXT_INIT_LOCK(spi_nxp_s32_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_nxp_s32_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ }; \ DEVICE_DT_INST_DEFINE(n, \ spi_nxp_s32_init, NULL, \ &spi_nxp_s32_data_##n, &spi_nxp_s32_config_##n, \ POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &spi_nxp_s32_driver_api); DT_INST_FOREACH_STATUS_OKAY(SPI_NXP_S32_DEVICE) ```
/content/code_sandbox/drivers/spi/spi_nxp_s32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,884
```c /* * */ #include "spi_andes_atcspi200.h" #include <zephyr/irq.h> #define DT_DRV_COMPAT andestech_atcspi200 typedef void (*atcspi200_cfg_func_t)(void); #ifdef CONFIG_ANDES_SPI_DMA_MODE #define ANDES_SPI_DMA_ERROR_FLAG 0x01 #define ANDES_SPI_DMA_RX_DONE_FLAG 0x02 #define ANDES_SPI_DMA_TX_DONE_FLAG 0x04 #define ANDES_SPI_DMA_DONE_FLAG \ (ANDES_SPI_DMA_RX_DONE_FLAG | ANDES_SPI_DMA_TX_DONE_FLAG) struct stream { const struct device *dma_dev; uint32_t channel; uint32_t block_idx; struct dma_config dma_cfg; struct dma_block_config dma_blk_cfg; struct dma_block_config chain_block[MAX_CHAIN_SIZE]; uint8_t priority; bool src_addr_increment; bool dst_addr_increment; }; #endif struct spi_atcspi200_data { struct spi_context ctx; uint32_t tx_fifo_size; uint32_t rx_fifo_size; int tx_cnt; size_t chunk_len; bool busy; #ifdef CONFIG_ANDES_SPI_DMA_MODE struct stream dma_rx; struct stream dma_tx; #endif }; struct spi_atcspi200_cfg { atcspi200_cfg_func_t cfg_func; uint32_t base; uint32_t irq_num; uint32_t f_sys; bool xip; }; /* API Functions */ static int spi_config(const struct device *dev, const struct spi_config *config) { const struct spi_atcspi200_cfg * const cfg = dev->config; uint32_t sclk_div, data_len; /* Set the divisor for SPI interface sclk */ sclk_div = (cfg->f_sys / (config->frequency << 1)) - 1; sys_clear_bits(SPI_TIMIN(cfg->base), TIMIN_SCLK_DIV_MSK); sys_set_bits(SPI_TIMIN(cfg->base), sclk_div); /* Set Master mode */ sys_clear_bits(SPI_TFMAT(cfg->base), TFMAT_SLVMODE_MSK); /* Disable data merge mode */ sys_clear_bits(SPI_TFMAT(cfg->base), TFMAT_DATA_MERGE_MSK); /* Set data length */ data_len = SPI_WORD_SIZE_GET(config->operation) - 1; sys_clear_bits(SPI_TFMAT(cfg->base), TFMAT_DATA_LEN_MSK); sys_set_bits(SPI_TFMAT(cfg->base), (data_len << TFMAT_DATA_LEN_OFFSET)); /* Set SPI frame format */ if (config->operation & SPI_MODE_CPHA) { sys_set_bits(SPI_TFMAT(cfg->base), TFMAT_CPHA_MSK); } else { sys_clear_bits(SPI_TFMAT(cfg->base), TFMAT_CPHA_MSK); } if (config->operation & SPI_MODE_CPOL) { sys_set_bits(SPI_TFMAT(cfg->base), TFMAT_CPOL_MSK); } else { sys_clear_bits(SPI_TFMAT(cfg->base), TFMAT_CPOL_MSK); } /* Set SPI bit order */ if (config->operation & SPI_TRANSFER_LSB) { sys_set_bits(SPI_TFMAT(cfg->base), TFMAT_LSB_MSK); } else { sys_clear_bits(SPI_TFMAT(cfg->base), TFMAT_LSB_MSK); } /* Set TX/RX FIFO threshold */ sys_clear_bits(SPI_CTRL(cfg->base), CTRL_TX_THRES_MSK); sys_clear_bits(SPI_CTRL(cfg->base), CTRL_RX_THRES_MSK); sys_set_bits(SPI_CTRL(cfg->base), TX_FIFO_THRESHOLD << CTRL_TX_THRES_OFFSET); sys_set_bits(SPI_CTRL(cfg->base), RX_FIFO_THRESHOLD << CTRL_RX_THRES_OFFSET); return 0; } static int spi_transfer(const struct device *dev) { struct spi_atcspi200_data * const data = dev->data; const struct spi_atcspi200_cfg * const cfg = dev->config; struct spi_context *ctx = &data->ctx; uint32_t data_len, tctrl, int_msk; if (data->chunk_len != 0) { data_len = data->chunk_len - 1; } else { data_len = 0; } if (data_len > MAX_TRANSFER_CNT) { return -EINVAL; } data->tx_cnt = 0; if (!spi_context_rx_on(ctx)) { tctrl = (TRNS_MODE_WRITE_ONLY << TCTRL_TRNS_MODE_OFFSET) | (data_len << TCTRL_WR_TCNT_OFFSET); int_msk = IEN_TX_FIFO_MSK | IEN_END_MSK; } else if (!spi_context_tx_on(ctx)) { tctrl = (TRNS_MODE_READ_ONLY << TCTRL_TRNS_MODE_OFFSET) | (data_len << TCTRL_RD_TCNT_OFFSET); int_msk = IEN_RX_FIFO_MSK | IEN_END_MSK; } else { tctrl = (TRNS_MODE_WRITE_READ << TCTRL_TRNS_MODE_OFFSET) | (data_len << TCTRL_WR_TCNT_OFFSET) | (data_len << TCTRL_RD_TCNT_OFFSET); int_msk = IEN_TX_FIFO_MSK | IEN_RX_FIFO_MSK | IEN_END_MSK; } sys_write32(tctrl, SPI_TCTRL(cfg->base)); /* Enable TX/RX FIFO interrupts */ sys_write32(int_msk, SPI_INTEN(cfg->base)); /* Start transferring */ sys_write32(0, SPI_CMD(cfg->base)); return 0; } static int configure(const struct device *dev, const struct spi_config *config) { struct spi_atcspi200_data * const data = dev->data; struct spi_context *ctx = &(data->ctx); if (spi_context_configured(ctx, config)) { /* Already configured. No need to do it again. */ return 0; } if (SPI_OP_MODE_GET(config->operation) != SPI_OP_MODE_MASTER) { LOG_ERR("Slave mode is not supported on %s", dev->name); return -EINVAL; } if (config->operation & SPI_MODE_LOOP) { LOG_ERR("Loopback mode is not supported"); return -EINVAL; } if ((config->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Only single line mode is supported"); return -EINVAL; } ctx->config = config; /* SPI configuration */ spi_config(dev, config); return 0; } #ifdef CONFIG_ANDES_SPI_DMA_MODE static int spi_dma_tx_load(const struct device *dev); static int spi_dma_rx_load(const struct device *dev); static inline void spi_tx_dma_enable(const struct device *dev) { const struct spi_atcspi200_cfg * const cfg = dev->config; /* Enable TX DMA */ sys_set_bits(SPI_CTRL(cfg->base), CTRL_TX_DMA_EN_MSK); } static inline void spi_tx_dma_disable(const struct device *dev) { const struct spi_atcspi200_cfg * const cfg = dev->config; /* Disable TX DMA */ sys_clear_bits(SPI_CTRL(cfg->base), CTRL_TX_DMA_EN_MSK); } static inline void spi_rx_dma_enable(const struct device *dev) { const struct spi_atcspi200_cfg * const cfg = dev->config; /* Enable RX DMA */ sys_set_bits(SPI_CTRL(cfg->base), CTRL_RX_DMA_EN_MSK); } static inline void spi_rx_dma_disable(const struct device *dev) { const struct spi_atcspi200_cfg * const cfg = dev->config; /* Disable RX DMA */ sys_clear_bits(SPI_CTRL(cfg->base), CTRL_RX_DMA_EN_MSK); } static int spi_dma_move_buffers(const struct device *dev) { struct spi_atcspi200_data *data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t error = 0; data->dma_rx.dma_blk_cfg.next_block = NULL; data->dma_tx.dma_blk_cfg.next_block = NULL; if (spi_context_tx_on(ctx)) { error = spi_dma_tx_load(dev); if (error != 0) { return error; } } if (spi_context_rx_on(ctx)) { error = spi_dma_rx_load(dev); if (error != 0) { return error; } } return 0; } static inline void dma_rx_callback(const struct device *dev, void *user_data, uint32_t channel, int status) { const struct device *spi_dev = (struct device *)user_data; struct spi_atcspi200_data *data = spi_dev->data; struct spi_context *ctx = &data->ctx; int error; dma_stop(data->dma_rx.dma_dev, data->dma_rx.channel); spi_rx_dma_disable(spi_dev); if (spi_context_rx_on(ctx)) { if (spi_dma_rx_load(spi_dev) != 0) { return; } spi_rx_dma_enable(spi_dev); error = dma_start(data->dma_rx.dma_dev, data->dma_rx.channel); __ASSERT(error == 0, "dma_start was failed in rx callback"); } } static inline void dma_tx_callback(const struct device *dev, void *user_data, uint32_t channel, int status) { const struct device *spi_dev = (struct device *)user_data; struct spi_atcspi200_data *data = spi_dev->data; struct spi_context *ctx = &data->ctx; int error; dma_stop(data->dma_tx.dma_dev, data->dma_tx.channel); spi_tx_dma_disable(spi_dev); if (spi_context_tx_on(ctx)) { if (spi_dma_tx_load(spi_dev) != 0) { return; } spi_tx_dma_enable(spi_dev); error = dma_start(data->dma_tx.dma_dev, data->dma_tx.channel); __ASSERT(error == 0, "dma_start was failed in tx callback"); } } /* * dummy value used for transferring NOP when tx buf is null * and use as dummy sink for when rx buf is null */ uint32_t dummy_rx_tx_buffer; static int spi_dma_tx_load(const struct device *dev) { const struct spi_atcspi200_cfg * const cfg = dev->config; struct spi_atcspi200_data *data = dev->data; struct spi_context *ctx = &data->ctx; int remain_len, ret, dfs; /* prepare the block for this TX DMA channel */ memset(&data->dma_tx.dma_blk_cfg, 0, sizeof(struct dma_block_config)); if (ctx->current_tx->len > data->chunk_len) { data->dma_tx.dma_blk_cfg.block_size = data->chunk_len / data->dma_tx.dma_cfg.dest_data_size; } else { data->dma_tx.dma_blk_cfg.block_size = ctx->current_tx->len / data->dma_tx.dma_cfg.dest_data_size; } /* tx direction has memory as source and periph as dest. */ if (ctx->current_tx->buf == NULL) { dummy_rx_tx_buffer = 0; /* if tx buff is null, then sends NOP on the line. */ data->dma_tx.dma_blk_cfg.source_address = (uintptr_t)&dummy_rx_tx_buffer; data->dma_tx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } else { data->dma_tx.dma_blk_cfg.source_address = (uintptr_t)ctx->current_tx->buf; if (data->dma_tx.src_addr_increment) { data->dma_tx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { data->dma_tx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } } dfs = SPI_WORD_SIZE_GET(ctx->config->operation) >> 3; remain_len = data->chunk_len - ctx->current_tx->len; spi_context_update_tx(ctx, dfs, ctx->current_tx->len); data->dma_tx.dma_blk_cfg.dest_address = (uint32_t)SPI_DATA(cfg->base); /* fifo mode NOT USED there */ if (data->dma_tx.dst_addr_increment) { data->dma_tx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { data->dma_tx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } /* direction is given by the DT */ data->dma_tx.dma_cfg.head_block = &data->dma_tx.dma_blk_cfg; data->dma_tx.dma_cfg.head_block->next_block = NULL; /* give the client dev as arg, as the callback comes from the dma */ data->dma_tx.dma_cfg.user_data = (void *)dev; if (data->dma_tx.dma_cfg.source_chaining_en) { data->dma_tx.dma_cfg.block_count = ctx->tx_count; data->dma_tx.dma_cfg.dma_callback = NULL; data->dma_tx.block_idx = 0; struct dma_block_config *blk_cfg = &data->dma_tx.dma_blk_cfg; const struct spi_buf *current_tx = ctx->current_tx; while (remain_len > 0) { struct dma_block_config *next_blk_cfg; next_blk_cfg = &data->dma_tx.chain_block[data->dma_tx.block_idx]; data->dma_tx.block_idx += 1; blk_cfg->next_block = next_blk_cfg; current_tx = ctx->current_tx; next_blk_cfg->block_size = current_tx->len / data->dma_tx.dma_cfg.dest_data_size; /* tx direction has memory as source and periph as dest. */ if (current_tx->buf == NULL) { dummy_rx_tx_buffer = 0; /* if tx buff is null, then sends NOP on the line. */ next_blk_cfg->source_address = (uintptr_t)&dummy_rx_tx_buffer; next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } else { next_blk_cfg->source_address = (uintptr_t)current_tx->buf; if (data->dma_tx.src_addr_increment) { next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } } next_blk_cfg->dest_address = (uint32_t)SPI_DATA(cfg->base); /* fifo mode NOT USED there */ if (data->dma_tx.dst_addr_increment) { next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } blk_cfg = next_blk_cfg; next_blk_cfg->next_block = NULL; remain_len -= ctx->current_tx->len; spi_context_update_tx(ctx, dfs, ctx->current_tx->len); } } else { data->dma_tx.dma_blk_cfg.next_block = NULL; data->dma_tx.dma_cfg.block_count = 1; data->dma_tx.dma_cfg.dma_callback = dma_tx_callback; } /* pass our client origin to the dma: data->dma_tx.dma_channel */ ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.channel, &data->dma_tx.dma_cfg); /* the channel is the actual stream from 0 */ if (ret != 0) { data->dma_tx.block_idx = 0; data->dma_tx.dma_blk_cfg.next_block = NULL; return ret; } return 0; } static int spi_dma_rx_load(const struct device *dev) { const struct spi_atcspi200_cfg * const cfg = dev->config; struct spi_atcspi200_data *data = dev->data; struct spi_context *ctx = &data->ctx; int remain_len, ret, dfs; /* prepare the block for this RX DMA channel */ memset(&data->dma_rx.dma_blk_cfg, 0, sizeof(struct dma_block_config)); if (ctx->current_rx->len > data->chunk_len) { data->dma_rx.dma_blk_cfg.block_size = data->chunk_len / data->dma_rx.dma_cfg.dest_data_size; } else { data->dma_rx.dma_blk_cfg.block_size = ctx->current_rx->len / data->dma_rx.dma_cfg.dest_data_size; } /* rx direction has periph as source and mem as dest. */ if (ctx->current_rx->buf == NULL) { /* if rx buff is null, then write data to dummy address. */ data->dma_rx.dma_blk_cfg.dest_address = (uintptr_t)&dummy_rx_tx_buffer; data->dma_rx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } else { data->dma_rx.dma_blk_cfg.dest_address = (uintptr_t)ctx->current_rx->buf; if (data->dma_rx.dst_addr_increment) { data->dma_rx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { data->dma_rx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } } dfs = SPI_WORD_SIZE_GET(ctx->config->operation) >> 3; remain_len = data->chunk_len - ctx->current_rx->len; spi_context_update_rx(ctx, dfs, ctx->current_rx->len); data->dma_rx.dma_blk_cfg.source_address = (uint32_t)SPI_DATA(cfg->base); if (data->dma_rx.src_addr_increment) { data->dma_rx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { data->dma_rx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } data->dma_rx.dma_cfg.head_block = &data->dma_rx.dma_blk_cfg; data->dma_rx.dma_cfg.head_block->next_block = NULL; data->dma_rx.dma_cfg.user_data = (void *)dev; if (data->dma_rx.dma_cfg.source_chaining_en) { data->dma_rx.dma_cfg.block_count = ctx->rx_count; data->dma_rx.dma_cfg.dma_callback = NULL; data->dma_rx.block_idx = 0; struct dma_block_config *blk_cfg = &data->dma_rx.dma_blk_cfg; const struct spi_buf *current_rx = ctx->current_rx; while (remain_len > 0) { struct dma_block_config *next_blk_cfg; next_blk_cfg = &data->dma_rx.chain_block[data->dma_rx.block_idx]; data->dma_rx.block_idx += 1; blk_cfg->next_block = next_blk_cfg; current_rx = ctx->current_rx; next_blk_cfg->block_size = current_rx->len / data->dma_rx.dma_cfg.dest_data_size; /* rx direction has periph as source and mem as dest. */ if (current_rx->buf == NULL) { /* if rx buff is null, then write data to dummy address. */ next_blk_cfg->dest_address = (uintptr_t)&dummy_rx_tx_buffer; next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } else { next_blk_cfg->dest_address = (uintptr_t)current_rx->buf; if (data->dma_rx.dst_addr_increment) { next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } } next_blk_cfg->source_address = (uint32_t)SPI_DATA(cfg->base); if (data->dma_rx.src_addr_increment) { next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } blk_cfg = next_blk_cfg; next_blk_cfg->next_block = NULL; remain_len -= ctx->current_rx->len; spi_context_update_rx(ctx, dfs, ctx->current_rx->len); } } else { data->dma_rx.dma_blk_cfg.next_block = NULL; data->dma_rx.dma_cfg.block_count = 1; data->dma_rx.dma_cfg.dma_callback = dma_rx_callback; } /* pass our client origin to the dma: data->dma_rx.channel */ ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.channel, &data->dma_rx.dma_cfg); /* the channel is the actual stream from 0 */ if (ret != 0) { data->dma_rx.block_idx = 0; data->dma_rx.dma_blk_cfg.next_block = NULL; return ret; } return 0; } static int spi_transfer_dma(const struct device *dev) { const struct spi_atcspi200_cfg * const cfg = dev->config; struct spi_atcspi200_data * const data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t data_len, tctrl, dma_rx_enable, dma_tx_enable; int error = 0; data_len = data->chunk_len - 1; if (data_len > MAX_TRANSFER_CNT) { return -EINVAL; } if (!spi_context_rx_on(ctx)) { tctrl = (TRNS_MODE_WRITE_ONLY << TCTRL_TRNS_MODE_OFFSET) | (data_len << TCTRL_WR_TCNT_OFFSET); dma_rx_enable = 0; dma_tx_enable = 1; } else if (!spi_context_tx_on(ctx)) { tctrl = (TRNS_MODE_READ_ONLY << TCTRL_TRNS_MODE_OFFSET) | (data_len << TCTRL_RD_TCNT_OFFSET); dma_rx_enable = 1; dma_tx_enable = 0; } else { tctrl = (TRNS_MODE_WRITE_READ << TCTRL_TRNS_MODE_OFFSET) | (data_len << TCTRL_WR_TCNT_OFFSET) | (data_len << TCTRL_RD_TCNT_OFFSET); dma_rx_enable = 1; dma_tx_enable = 1; } sys_write32(tctrl, SPI_TCTRL(cfg->base)); /* Set sclk_div to zero */ sys_clear_bits(SPI_TIMIN(cfg->base), 0xff); /* Enable END Interrupts */ sys_write32(IEN_END_MSK, SPI_INTEN(cfg->base)); /* Setting DMA config*/ error = spi_dma_move_buffers(dev); if (error != 0) { return error; } /* Start transferring */ sys_write32(0, SPI_CMD(cfg->base)); if (dma_rx_enable) { spi_rx_dma_enable(dev); error = dma_start(data->dma_rx.dma_dev, data->dma_rx.channel); if (error != 0) { return error; } } if (dma_tx_enable) { spi_tx_dma_enable(dev); error = dma_start(data->dma_tx.dma_dev, data->dma_tx.channel); if (error != 0) { return error; } } return 0; } #endif static int transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { const struct spi_atcspi200_cfg * const cfg = dev->config; struct spi_atcspi200_data * const data = dev->data; struct spi_context *ctx = &data->ctx; int error, dfs; size_t chunk_len; spi_context_lock(ctx, asynchronous, cb, userdata, config); error = configure(dev, config); if (error == 0) { data->busy = true; dfs = SPI_WORD_SIZE_GET(ctx->config->operation) >> 3; spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, dfs); spi_context_cs_control(ctx, true); sys_set_bits(SPI_CTRL(cfg->base), CTRL_TX_FIFO_RST_MSK); sys_set_bits(SPI_CTRL(cfg->base), CTRL_RX_FIFO_RST_MSK); if (!spi_context_rx_on(ctx)) { chunk_len = spi_context_total_tx_len(ctx); } else if (!spi_context_tx_on(ctx)) { chunk_len = spi_context_total_rx_len(ctx); } else { size_t rx_len = spi_context_total_rx_len(ctx); size_t tx_len = spi_context_total_tx_len(ctx); chunk_len = MIN(rx_len, tx_len); } data->chunk_len = chunk_len; #ifdef CONFIG_ANDES_SPI_DMA_MODE if ((data->dma_tx.dma_dev != NULL) && (data->dma_rx.dma_dev != NULL)) { error = spi_transfer_dma(dev); if (error != 0) { spi_context_cs_control(ctx, false); goto out; } } else { #endif /* CONFIG_ANDES_SPI_DMA_MODE */ error = spi_transfer(dev); if (error != 0) { spi_context_cs_control(ctx, false); goto out; } #ifdef CONFIG_ANDES_SPI_DMA_MODE } #endif /* CONFIG_ANDES_SPI_DMA_MODE */ error = spi_context_wait_for_completion(ctx); spi_context_cs_control(ctx, false); } out: spi_context_release(ctx, error); return error; } int spi_atcspi200_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC int spi_atcspi200_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata); } #endif int spi_atcspi200_release(const struct device *dev, const struct spi_config *config) { struct spi_atcspi200_data * const data = dev->data; if (data->busy) { return -EBUSY; } spi_context_unlock_unconditionally(&data->ctx); return 0; } int spi_atcspi200_init(const struct device *dev) { const struct spi_atcspi200_cfg * const cfg = dev->config; struct spi_atcspi200_data * const data = dev->data; int err = 0; /* we should not configure the device we are running on */ if (cfg->xip) { return -EINVAL; } spi_context_unlock_unconditionally(&data->ctx); #ifdef CONFIG_ANDES_SPI_DMA_MODE if (!data->dma_tx.dma_dev) { LOG_ERR("DMA device not found"); return -ENODEV; } if (!data->dma_rx.dma_dev) { LOG_ERR("DMA device not found"); return -ENODEV; } #endif /* Get the TX/RX FIFO size of this device */ data->tx_fifo_size = TX_FIFO_SIZE(cfg->base); data->rx_fifo_size = RX_FIFO_SIZE(cfg->base); cfg->cfg_func(); irq_enable(cfg->irq_num); err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } return 0; } static const struct spi_driver_api spi_atcspi200_api = { .transceive = spi_atcspi200_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_atcspi200_transceive_async, #endif .release = spi_atcspi200_release }; static void spi_atcspi200_irq_handler(void *arg) { const struct device * const dev = (const struct device *) arg; const struct spi_atcspi200_cfg * const cfg = dev->config; struct spi_atcspi200_data * const data = dev->data; struct spi_context *ctx = &data->ctx; uint32_t rx_data, cur_tx_fifo_num, cur_rx_fifo_num; uint32_t i, dfs, intr_status, spi_status; uint32_t tx_num = 0, tx_data = 0; int error = 0; intr_status = sys_read32(SPI_INTST(cfg->base)); dfs = SPI_WORD_SIZE_GET(ctx->config->operation) >> 3; if ((intr_status & INTST_TX_FIFO_INT_MSK) && !(intr_status & INTST_END_INT_MSK)) { spi_status = sys_read32(SPI_STAT(cfg->base)); cur_tx_fifo_num = GET_TX_NUM(cfg->base); tx_num = data->tx_fifo_size - cur_tx_fifo_num; for (i = tx_num; i > 0; i--) { if (data->tx_cnt >= data->chunk_len) { /* Have already sent a chunk of data, so stop * sending data! */ sys_clear_bits(SPI_INTEN(cfg->base), IEN_TX_FIFO_MSK); break; } if (spi_context_tx_buf_on(ctx)) { switch (dfs) { case 1: tx_data = *ctx->tx_buf; break; case 2: tx_data = *(uint16_t *)ctx->tx_buf; break; } } else if (spi_context_tx_on(ctx)) { tx_data = 0; } else { sys_clear_bits(SPI_INTEN(cfg->base), IEN_TX_FIFO_MSK); break; } sys_write32(tx_data, SPI_DATA(cfg->base)); spi_context_update_tx(ctx, dfs, 1); data->tx_cnt++; } sys_write32(INTST_TX_FIFO_INT_MSK, SPI_INTST(cfg->base)); } if (intr_status & INTST_RX_FIFO_INT_MSK) { cur_rx_fifo_num = GET_RX_NUM(cfg->base); for (i = cur_rx_fifo_num; i > 0; i--) { rx_data = sys_read32(SPI_DATA(cfg->base)); if (spi_context_rx_buf_on(ctx)) { switch (dfs) { case 1: *ctx->rx_buf = rx_data; break; case 2: *(uint16_t *)ctx->rx_buf = rx_data; break; } } else if (!spi_context_rx_on(ctx)) { sys_clear_bits(SPI_INTEN(cfg->base), IEN_RX_FIFO_MSK); } spi_context_update_rx(ctx, dfs, 1); } sys_write32(INTST_RX_FIFO_INT_MSK, SPI_INTST(cfg->base)); } if (intr_status & INTST_END_INT_MSK) { /* Clear end interrupt */ sys_write32(INTST_END_INT_MSK, SPI_INTST(cfg->base)); /* Disable all SPI interrupts */ sys_write32(0, SPI_INTEN(cfg->base)); #ifdef CONFIG_ANDES_SPI_DMA_MODE if ((data->dma_tx.dma_dev != NULL) && data->dma_tx.dma_cfg.source_chaining_en) { spi_tx_dma_disable(dev); dma_stop(data->dma_tx.dma_dev, data->dma_tx.channel); data->dma_tx.block_idx = 0; data->dma_tx.dma_blk_cfg.next_block = NULL; } if ((data->dma_rx.dma_dev != NULL) && data->dma_rx.dma_cfg.source_chaining_en) { spi_rx_dma_disable(dev); dma_stop(data->dma_rx.dma_dev, data->dma_rx.channel); data->dma_rx.block_idx = 0; data->dma_rx.dma_blk_cfg.next_block = NULL; } #endif /* CONFIG_ANDES_SPI_DMA_MODE */ data->busy = false; spi_context_complete(ctx, dev, error); } } #if CONFIG_ANDES_SPI_DMA_MODE #define ANDES_DMA_CONFIG_DIRECTION(config) (FIELD_GET(GENMASK(1, 0), config)) #define ANDES_DMA_CONFIG_PERIPHERAL_ADDR_INC(config) (FIELD_GET(BIT(2), config)) #define ANDES_DMA_CONFIG_MEMORY_ADDR_INC(config) (FIELD_GET(BIT(3), config)) #define ANDES_DMA_CONFIG_PERIPHERAL_DATA_SIZE(config) (1 << (FIELD_GET(GENMASK(6, 4), config))) #define ANDES_DMA_CONFIG_MEMORY_DATA_SIZE(config) (1 << (FIELD_GET(GENMASK(9, 7), config))) #define ANDES_DMA_CONFIG_PRIORITY(config) (FIELD_GET(BIT(10), config)) #define DMA_CHANNEL_CONFIG(id, dir) \ DT_INST_DMAS_CELL_BY_NAME(id, dir, channel_config) #define SPI_DMA_CHANNEL_INIT(index, dir, dir_cap, src_dev, dest_dev) \ .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(index, dir)), \ .channel = \ DT_INST_DMAS_CELL_BY_NAME(index, dir, channel), \ .dma_cfg = { \ .dma_slot = \ DT_INST_DMAS_CELL_BY_NAME(index, dir, slot), \ .channel_direction = ANDES_DMA_CONFIG_DIRECTION( \ DMA_CHANNEL_CONFIG(index, dir)), \ .complete_callback_en = 0, \ .error_callback_dis = 0, \ .source_data_size = \ ANDES_DMA_CONFIG_##src_dev##_DATA_SIZE( \ DMA_CHANNEL_CONFIG(index, dir) \ ), \ .dest_data_size = \ ANDES_DMA_CONFIG_##dest_dev##_DATA_SIZE( \ DMA_CHANNEL_CONFIG(index, dir) \ ), \ .source_burst_length = 1, /* SINGLE transfer */ \ .dest_burst_length = 1, /* SINGLE transfer */ \ .channel_priority = ANDES_DMA_CONFIG_PRIORITY( \ DMA_CHANNEL_CONFIG(index, dir) \ ), \ .source_chaining_en = DT_PROP(DT_INST_DMAS_CTLR_BY_NAME( \ index, dir), chain_transfer), \ .dest_chaining_en = DT_PROP(DT_INST_DMAS_CTLR_BY_NAME( \ index, dir), chain_transfer), \ }, \ .src_addr_increment = \ ANDES_DMA_CONFIG_##src_dev##_ADDR_INC( \ DMA_CHANNEL_CONFIG(index, dir) \ ), \ .dst_addr_increment = \ ANDES_DMA_CONFIG_##dest_dev##_ADDR_INC( \ DMA_CHANNEL_CONFIG(index, dir) \ ) #define SPI_DMA_CHANNEL(id, dir, DIR, src, dest) \ .dma_##dir = { \ COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, dir), \ (SPI_DMA_CHANNEL_INIT(id, dir, DIR, src, dest)), \ (NULL)) \ }, #else #define SPI_DMA_CHANNEL(id, dir, DIR, src, dest) #endif #define SPI_BUSY_INIT .busy = false, #if (CONFIG_XIP) #define SPI_ROM_CFG_XIP(node_id) DT_SAME_NODE(node_id, DT_BUS(DT_CHOSEN(zephyr_flash))) #else #define SPI_ROM_CFG_XIP(node_id) false #endif #define SPI_INIT(n) \ static struct spi_atcspi200_data spi_atcspi200_dev_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_atcspi200_dev_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_atcspi200_dev_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ SPI_BUSY_INIT \ SPI_DMA_CHANNEL(n, rx, RX, PERIPHERAL, MEMORY) \ SPI_DMA_CHANNEL(n, tx, TX, MEMORY, PERIPHERAL) \ }; \ static void spi_atcspi200_cfg_##n(void); \ static struct spi_atcspi200_cfg spi_atcspi200_dev_cfg_##n = { \ .cfg_func = spi_atcspi200_cfg_##n, \ .base = DT_INST_REG_ADDR(n), \ .irq_num = DT_INST_IRQN(n), \ .f_sys = DT_INST_PROP(n, clock_frequency), \ .xip = SPI_ROM_CFG_XIP(DT_DRV_INST(n)), \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ spi_atcspi200_init, \ NULL, \ &spi_atcspi200_dev_data_##n, \ &spi_atcspi200_dev_cfg_##n, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_atcspi200_api); \ \ static void spi_atcspi200_cfg_##n(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ spi_atcspi200_irq_handler, \ DEVICE_DT_INST_GET(n), \ 0); \ }; DT_INST_FOREACH_STATUS_OKAY(SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_andes_atcspi200.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,140
```unknown # MCUXpresso SDK SPI config SPI_MCUX_DSPI bool "MCUX SPI driver" default y depends on DT_HAS_NXP_KINETIS_DSPI_ENABLED depends on CLOCK_CONTROL select PINCTRL help Enable support for mcux spi driver. if SPI_MCUX_DSPI config DSPI_MCUX_EDMA bool "ENABLE EDMA for DSPI driver" depends on HAS_MCUX && HAS_MCUX_EDMA help Enable the MCUX DSPI driver. if DSPI_MCUX_EDMA config MCUX_DSPI_BUFFER_SIZE int "buffer size" default 144 help data buffer size config MCUX_DSPI_EDMA_SHUFFLE_DATA bool "use the internal data process" default y help TX data need pre-fix with command, user can choose not to use it endif # DSPI_MCUX_EDMA endif # SPI_MCUX_DSPI ```
/content/code_sandbox/drivers/spi/Kconfig.mcux_dspi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
200
```c /* * */ #define DT_DRV_COMPAT nxp_kinetis_dspi #include <errno.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/clock_control.h> #include <fsl_dspi.h> #include <zephyr/drivers/pinctrl.h> #ifdef CONFIG_DSPI_MCUX_EDMA #include <zephyr/drivers/dma.h> #include <fsl_edma.h> #endif #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(spi_mcux_dspi); #include "spi_context.h" #ifdef CONFIG_DSPI_MCUX_EDMA struct spi_edma_config { const struct device *dma_dev; int32_t state; uint32_t dma_channel; void (*irq_call_back)(void); struct dma_config dma_cfg; }; #endif struct spi_mcux_config { SPI_Type *base; const struct device *clock_dev; clock_control_subsys_t clock_subsys; void (*irq_config_func)(const struct device *dev); uint32_t pcs_sck_delay; uint32_t sck_pcs_delay; uint32_t transfer_delay; uint32_t which_ctar; uint32_t samplePoint; bool enable_continuous_sck; bool enable_rxfifo_overwrite; bool enable_modified_timing_format; bool is_dma_chn_shared; const struct pinctrl_dev_config *pincfg; }; struct spi_mcux_data { const struct device *dev; dspi_master_handle_t handle; struct spi_context ctx; size_t transfer_len; #ifdef CONFIG_DSPI_MCUX_EDMA struct dma_block_config tx_dma_block; struct dma_block_config tx_dma_block_end; struct dma_block_config rx_dma_block; struct spi_edma_config rx_dma_config; struct spi_edma_config tx_dma_config; int frame_size; int tx_transfer_count; int rx_transfer_count; uint32_t which_pcs; struct spi_buf *inner_tx_buffer; struct spi_buf *inner_rx_buffer; #endif }; #ifdef CONFIG_DSPI_MCUX_EDMA static int get_size_byte_by_frame_size(int len, int frame_size) { if (frame_size == 8) { return (len * 4); } else { /* frame_size == 16*/ return (len * 2); } } #endif static int spi_mcux_transfer_next_packet(const struct device *dev) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; SPI_Type *base = config->base; struct spi_context *ctx = &data->ctx; dspi_transfer_t transfer; status_t status; if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) { /* nothing left to rx or tx, we're done! */ LOG_DBG("spi transceive done"); spi_context_cs_control(&data->ctx, false); spi_context_complete(&data->ctx, dev, 0); return 0; } #ifdef CONFIG_DSPI_MCUX_EDMA if (!config->is_dma_chn_shared) { /* start dma directly in not shared mode */ if (ctx->tx_len != 0) { int ret = 0; LOG_DBG("Starting DMA Ch%u", data->tx_dma_config.dma_channel); ret = dma_start(data->tx_dma_config.dma_dev, data->tx_dma_config.dma_channel); if (ret < 0) { LOG_ERR("Failed to start DMA Ch%d (%d)", data->tx_dma_config.dma_channel, ret); return ret; } } if (ctx->rx_len != 0) { int ret = 0; LOG_DBG("Starting DMA Ch%u", data->rx_dma_config.dma_channel); ret = dma_start(data->rx_dma_config.dma_dev, data->rx_dma_config.dma_channel); if (ret < 0) { LOG_ERR("Failed to start DMA Ch%d (%d)", data->rx_dma_config.dma_channel, ret); return ret; } } } DSPI_EnableDMA(base, (uint32_t)kDSPI_RxDmaEnable | (uint32_t)kDSPI_TxDmaEnable); DSPI_StartTransfer(base); if (config->is_dma_chn_shared) { /* in master mode start tx */ dma_start(data->tx_dma_config.dma_dev, data->tx_dma_config.dma_channel); /* TBD kDSPI_TxFifoFillRequestFlag */ DSPI_EnableInterrupts(base, (uint32_t)kDSPI_RxFifoDrainRequestFlag); LOG_DBG("trigger tx to start master"); } return 0; #endif transfer.configFlags = kDSPI_MasterCtar0 | kDSPI_MasterPcsContinuous | (ctx->config->slave << DSPI_MASTER_PCS_SHIFT); if (ctx->tx_len == 0) { /* rx only, nothing to tx */ transfer.txData = NULL; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->rx_len; } else if (ctx->rx_len == 0) { /* tx only, nothing to rx */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = NULL; transfer.dataSize = ctx->tx_len; } else if (ctx->tx_len == ctx->rx_len) { /* rx and tx are the same length */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->tx_len; } else if (ctx->tx_len > ctx->rx_len) { /* Break up the tx into multiple transfers so we don't have to * rx into a longer intermediate buffer. Leave chip select * active between transfers. */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->rx_len; transfer.configFlags |= kDSPI_MasterActiveAfterTransfer; } else { /* Break up the rx into multiple transfers so we don't have to * tx from a longer intermediate buffer. Leave chip select * active between transfers. */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->tx_len; transfer.configFlags |= kDSPI_MasterActiveAfterTransfer; } if (!(ctx->tx_count <= 1 && ctx->rx_count <= 1)) { transfer.configFlags |= kDSPI_MasterActiveAfterTransfer; } data->transfer_len = transfer.dataSize; status = DSPI_MasterTransferNonBlocking(base, &data->handle, &transfer); if (status != kStatus_Success) { LOG_ERR("Transfer could not start on %s: %d", dev->name, status); return status == kDSPI_Busy ? -EBUSY : -EINVAL; } return 0; } static void spi_mcux_isr(const struct device *dev) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; SPI_Type *base = config->base; #ifdef CONFIG_DSPI_MCUX_EDMA LOG_DBG("isr is called"); if (0U != (DSPI_GetStatusFlags(base) & (uint32_t)kDSPI_RxFifoDrainRequestFlag)) { /* start rx */ dma_start(data->rx_dma_config.dma_dev, data->rx_dma_config.dma_channel); } #else DSPI_MasterTransferHandleIRQ(base, &data->handle); #endif } #ifdef CONFIG_DSPI_MCUX_EDMA static void mcux_init_inner_buffer_with_cmd(const struct device *dev, uint16_t dummy) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; dspi_command_data_config_t commandStruct; uint32_t *pbuf = data->inner_tx_buffer->buf; uint32_t command; int i = 0; commandStruct.whichPcs = data->which_pcs; commandStruct.isEndOfQueue = false; commandStruct.clearTransferCount = false; commandStruct.whichCtar = config->which_ctar; commandStruct.isPcsContinuous = config->enable_continuous_sck; command = DSPI_MasterGetFormattedCommand(&(commandStruct)); for (i = 0; i < data->inner_tx_buffer->len / 4; i++) { *pbuf = command | dummy; pbuf++; } } /** * @brief update the tx data to internal buffer with command embedded, * if no tx data, use dummy value. * tx data frame size shall not bigger than 16 bits * the overall transfer data in one batch shall not larger than FIFO size */ static int mcux_spi_context_data_update(const struct device *dev) { struct spi_mcux_data *data = dev->data; uint32_t frame_size_bit = data->frame_size; struct spi_context *ctx = (struct spi_context *)&data->ctx; uint32_t *pcdata = data->inner_tx_buffer->buf; if (frame_size_bit > FSL_FEATURE_DSPI_MAX_DATA_WIDTH) { /* TODO need set to continues PCS to have frame size larger than 16 */ LOG_ERR("frame size is larger than 16"); return -EINVAL; } #ifdef CONFIG_MCUX_DSPI_EDMA_SHUFFLE_DATA /* only used when use inner buffer to translate tx format */ if (CONFIG_MCUX_DSPI_BUFFER_SIZE * 4 < get_size_byte_by_frame_size(ctx->current_tx->len, frame_size_bit)) { /* inner buffer can not hold all transferred data */ LOG_ERR("inner buffer is too small to hold all data esp %d, act %d", ctx->current_tx->len * 8 / frame_size_bit, (CONFIG_MCUX_DSPI_BUFFER_SIZE * 4 / frame_size_bit)); return -EINVAL; } if (frame_size_bit == 8) { int i = 0; uint8_t *pdata = (uint8_t *)ctx->tx_buf; if (pdata) { do { uint16_t temp_data = 0; temp_data = *pdata; pdata++; *pcdata |= temp_data; pcdata++; i++; } while (i < ctx->current_tx->len && i < data->inner_tx_buffer->len); } /* indicate it is the last data */ if (i == ctx->current_tx->len) { --pcdata; *pcdata |= SPI_PUSHR_EOQ(1) | SPI_PUSHR_CTCNT(1); LOG_DBG("last pcdata is %x", *pcdata); } } else if (frame_size_bit == 16) { int i = 0; uint16_t *pdata = (uint16_t *)ctx->tx_buf; if (pdata) { do { *pcdata |= *pdata; LOG_DBG("pcdata %d is %x", i / 2, *pcdata); pdata++; pcdata++; i += 2; } while (i < ctx->current_tx->len && i < data->inner_tx_buffer->len); } if (i == ctx->current_tx->len) { /* indicate it is the last data */ --pcdata; *pcdata |= SPI_PUSHR_EOQ(1); LOG_DBG("last pcdata is %x", *pcdata); } } else { /* TODO for other size */ LOG_ERR("DMA mode only support 8/16 bits frame size"); return -EINVAL; } #endif /* CONFIG_MCUX_DSPI_EDMA_SHUFFLE_DATA */ return 0; } static int update_tx_dma(const struct device *dev) { uint32_t tx_size = 0; uint8_t *tx_buf; struct spi_mcux_data *data = dev->data; const struct spi_mcux_config *config = dev->config; SPI_Type *base = config->base; uint32_t frame_size = data->frame_size; bool rx_only = false; DSPI_DisableDMA(base, (uint32_t)kDSPI_TxDmaEnable); if (data->ctx.tx_len == 0) { LOG_DBG("empty data no need to setup DMA"); return 0; } if (data->ctx.current_tx && data->ctx.current_tx->len > 0 && data->ctx.current_tx->buf != NULL) { #ifdef CONFIG_MCUX_DSPI_EDMA_SHUFFLE_DATA tx_size = get_size_byte_by_frame_size(data->transfer_len, frame_size); tx_buf = data->inner_tx_buffer->buf; #else /* expect the buffer is pre-set */ tx_size = get_size_byte_by_frame_size(data->ctx.current_tx->len, frame_size); LOG_DBG("tx size is %d", tx_size); tx_buf = data->ctx.current_tx->buf; #endif } else { tx_buf = data->inner_tx_buffer->buf; tx_size = get_size_byte_by_frame_size(data->transfer_len, frame_size); rx_only = true; LOG_DBG("rx only 0x%x, size %d", (uint32_t)tx_buf, tx_size); } data->tx_dma_block.source_address = (uint32_t)tx_buf; data->tx_dma_block.dest_address = DSPI_MasterGetTxRegisterAddress(base); data->tx_dma_block.next_block = NULL; if (config->is_dma_chn_shared) { /* transfer FIFO size data */ data->tx_dma_block.block_size = 4; } else { data->tx_dma_block.block_size = tx_size; } data->tx_dma_config.dma_cfg.user_data = (void *) dev; dma_config(data->tx_dma_config.dma_dev, data->tx_dma_config.dma_channel, (struct dma_config *)&data->tx_dma_config.dma_cfg); return 0; } static int update_rx_dma(const struct device *dev) { uint32_t rx_size = 0; uint8_t *rx_buf; struct spi_mcux_data *data = dev->data; const struct spi_mcux_config *config = dev->config; SPI_Type *base = config->base; uint32_t frame_size_byte = (data->frame_size >> 3); bool tx_only = false; DSPI_DisableDMA(base, (uint32_t)kDSPI_RxDmaEnable); if (data->ctx.rx_len == 0) { LOG_DBG("empty data no need to setup DMA"); return 0; } if (data->ctx.current_rx) { rx_size = data->transfer_len; if (data->ctx.rx_buf != NULL) { rx_buf = data->ctx.rx_buf; } else { rx_buf = data->inner_rx_buffer->buf; } } else { /* tx only */ rx_buf = data->inner_rx_buffer->buf; rx_size = data->transfer_len; tx_only = true; LOG_DBG("tx only 0x%x, size %d", (uint32_t)rx_buf, rx_size); } if (config->is_dma_chn_shared) { if (data->ctx.rx_len == 1) { /* do not link tx on last frame*/ LOG_DBG("do not link tx/rx channel for last one"); data->rx_dma_config.dma_cfg.source_chaining_en = 0; data->rx_dma_config.dma_cfg.dest_chaining_en = 0; } else { LOG_DBG("shared mux mode, link tx/rx channel"); data->rx_dma_config.dma_cfg.source_chaining_en = 1; data->rx_dma_config.dma_cfg.dest_chaining_en = 1; data->rx_dma_config.dma_cfg.linked_channel = data->tx_dma_config.dma_channel; } data->rx_dma_block.dest_address = (uint32_t)rx_buf; data->rx_dma_block.source_address = DSPI_GetRxRegisterAddress(base); /* do once in share mode */ data->rx_dma_block.block_size = frame_size_byte; data->rx_dma_config.dma_cfg.source_burst_length = frame_size_byte; data->rx_dma_config.dma_cfg.dest_burst_length = frame_size_byte; data->rx_dma_config.dma_cfg.source_data_size = frame_size_byte; data->rx_dma_config.dma_cfg.dest_data_size = frame_size_byte; } else { data->rx_dma_block.dest_address = (uint32_t)rx_buf; data->rx_dma_block.source_address = DSPI_GetRxRegisterAddress(base); data->rx_dma_block.block_size = rx_size; data->rx_dma_config.dma_cfg.source_burst_length = frame_size_byte; data->rx_dma_config.dma_cfg.dest_burst_length = frame_size_byte; data->rx_dma_config.dma_cfg.source_data_size = frame_size_byte; data->rx_dma_config.dma_cfg.dest_data_size = frame_size_byte; } data->rx_dma_config.dma_cfg.user_data = (void *) dev; dma_config(data->rx_dma_config.dma_dev, data->rx_dma_config.dma_channel, (struct dma_config *)&data->rx_dma_config.dma_cfg); return 0; } static int configure_dma(const struct device *dev) { const struct spi_mcux_config *config = dev->config; if (config->is_dma_chn_shared) { LOG_DBG("shard DMA request"); } update_tx_dma(dev); update_rx_dma(dev); return 0; } static void dma_callback(const struct device *dma_dev, void *callback_arg, uint32_t channel, int error_code) { const struct device *dev = (const struct device *)callback_arg; const struct spi_mcux_config *config = dev->config; SPI_Type *base = config->base; struct spi_mcux_data *data = dev->data; LOG_DBG("=dma call back @channel %d=", channel); if (error_code < 0) { LOG_ERR("error happened no callback process %d", error_code); return; } if (channel == data->tx_dma_config.dma_channel) { LOG_DBG("ctx.tx_len is %d", data->ctx.tx_len); LOG_DBG("tx count %d", data->ctx.tx_count); spi_context_update_tx(&data->ctx, 1, data->transfer_len); LOG_DBG("tx count %d", data->ctx.tx_count); LOG_DBG("tx buf/len %p/%zu", data->ctx.tx_buf, data->ctx.tx_len); data->tx_transfer_count++; /* tx done */ } else { LOG_DBG("ctx.rx_len is %d", data->ctx.rx_len); LOG_DBG("rx count %d", data->ctx.rx_count); spi_context_update_rx(&data->ctx, 1, data->transfer_len); LOG_DBG("rx count %d", data->ctx.rx_count); /* setup the inner tx buffer */ LOG_DBG("rx buf/len %p/%zu", data->ctx.rx_buf, data->ctx.rx_len); data->rx_transfer_count++; } if (data->tx_transfer_count == data->rx_transfer_count) { LOG_DBG("start next packet"); DSPI_StopTransfer(base); DSPI_FlushFifo(base, true, true); DSPI_ClearStatusFlags(base, (uint32_t)kDSPI_AllStatusFlag); mcux_init_inner_buffer_with_cmd(dev, 0); mcux_spi_context_data_update(dev); if (config->is_dma_chn_shared) { data->transfer_len = data->frame_size >> 3; } else { if (data->ctx.tx_len == 0) { data->transfer_len = data->ctx.rx_len; } else if (data->ctx.rx_len == 0) { data->transfer_len = data->ctx.tx_len; } else { data->transfer_len = data->ctx.tx_len > data->ctx.rx_len ? data->ctx.rx_len : data->ctx.tx_len; } } update_tx_dma(dev); update_rx_dma(dev); spi_mcux_transfer_next_packet(dev); } else if (data->ctx.rx_len == 0 && data->ctx.tx_len == 0) { LOG_DBG("end of transfer"); DSPI_StopTransfer(base); DSPI_FlushFifo(base, true, true); DSPI_ClearStatusFlags(base, (uint32_t)kDSPI_AllStatusFlag); data->transfer_len = 0; spi_mcux_transfer_next_packet(dev); } LOG_DBG("TX/RX DMA callback done"); } #else static void spi_mcux_master_transfer_callback(SPI_Type *base, dspi_master_handle_t *handle, status_t status, void *userData) { struct spi_mcux_data *data = userData; spi_context_update_tx(&data->ctx, 1, data->transfer_len); spi_context_update_rx(&data->ctx, 1, data->transfer_len); spi_mcux_transfer_next_packet(data->dev); } #endif /* CONFIG_DSPI_MCUX_EDMA */ static int spi_mcux_configure(const struct device *dev, const struct spi_config *spi_cfg) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; SPI_Type *base = config->base; dspi_master_config_t master_config; uint32_t clock_freq; uint32_t word_size; dspi_master_ctar_config_t *ctar_config = &master_config.ctarConfig; if (spi_context_configured(&data->ctx, spi_cfg)) { /* This configuration is already in use */ return 0; } if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } DSPI_MasterGetDefaultConfig(&master_config); master_config.whichPcs = 1U << spi_cfg->slave; master_config.whichCtar = config->which_ctar; master_config.pcsActiveHighOrLow = (spi_cfg->operation & SPI_CS_ACTIVE_HIGH) ? kDSPI_PcsActiveHigh : kDSPI_PcsActiveLow; master_config.samplePoint = config->samplePoint; master_config.enableContinuousSCK = config->enable_continuous_sck; master_config.enableRxFifoOverWrite = config->enable_rxfifo_overwrite; master_config.enableModifiedTimingFormat = config->enable_modified_timing_format; if (spi_cfg->slave > FSL_FEATURE_DSPI_CHIP_SELECT_COUNT) { LOG_ERR("Slave %d is greater than %d", spi_cfg->slave, FSL_FEATURE_DSPI_CHIP_SELECT_COUNT); return -EINVAL; } word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); if (word_size > FSL_FEATURE_DSPI_MAX_DATA_WIDTH) { LOG_ERR("Word size %d is greater than %d", word_size, FSL_FEATURE_DSPI_MAX_DATA_WIDTH); return -EINVAL; } ctar_config->bitsPerFrame = word_size; ctar_config->cpol = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) ? kDSPI_ClockPolarityActiveLow : kDSPI_ClockPolarityActiveHigh; ctar_config->cpha = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) ? kDSPI_ClockPhaseSecondEdge : kDSPI_ClockPhaseFirstEdge; ctar_config->direction = (spi_cfg->operation & SPI_TRANSFER_LSB) ? kDSPI_LsbFirst : kDSPI_MsbFirst; ctar_config->baudRate = spi_cfg->frequency; ctar_config->pcsToSckDelayInNanoSec = config->pcs_sck_delay; ctar_config->lastSckToPcsDelayInNanoSec = config->sck_pcs_delay; ctar_config->betweenTransferDelayInNanoSec = config->transfer_delay; if (!device_is_ready(config->clock_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { return -EINVAL; } LOG_DBG("clock_freq is %d", clock_freq); DSPI_MasterInit(base, &master_config, clock_freq); #ifdef CONFIG_DSPI_MCUX_EDMA DSPI_StopTransfer(base); DSPI_FlushFifo(base, true, true); DSPI_ClearStatusFlags(base, (uint32_t)kDSPI_AllStatusFlag); /* record frame_size setting for DMA */ data->frame_size = word_size; /* keep the pcs settings */ data->which_pcs = 1U << spi_cfg->slave; #ifdef CONFIG_MCUX_DSPI_EDMA_SHUFFLE_DATA mcux_init_inner_buffer_with_cmd(dev, 0); #endif #else DSPI_MasterTransferCreateHandle(base, &data->handle, spi_mcux_master_transfer_callback, data); DSPI_SetDummyData(base, 0); #endif data->ctx.config = spi_cfg; return 0; } static int transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { struct spi_mcux_data *data = dev->data; int ret; #ifdef CONFIG_DSPI_MCUX_EDMA const struct spi_mcux_config *config = dev->config; SPI_Type *base = config->base; #endif spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg); ret = spi_mcux_configure(dev, spi_cfg); if (ret) { goto out; } spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); spi_context_cs_control(&data->ctx, true); #ifdef CONFIG_DSPI_MCUX_EDMA DSPI_StopTransfer(base); DSPI_FlushFifo(base, true, true); DSPI_ClearStatusFlags(base, (uint32_t)kDSPI_AllStatusFlag); /* setup the tx buffer with end */ mcux_init_inner_buffer_with_cmd(dev, 0); mcux_spi_context_data_update(dev); if (config->is_dma_chn_shared) { data->transfer_len = data->frame_size >> 3; } else { data->transfer_len = data->ctx.tx_len > data->ctx.rx_len ? data->ctx.rx_len : data->ctx.tx_len; } data->tx_transfer_count = 0; data->rx_transfer_count = 0; configure_dma(dev); #endif ret = spi_mcux_transfer_next_packet(dev); if (ret) { goto out; } ret = spi_context_wait_for_completion(&data->ctx); out: spi_context_release(&data->ctx, ret); return ret; } static int spi_mcux_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_mcux_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int spi_mcux_release(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_mcux_data *data = dev->data; spi_context_unlock_unconditionally(&data->ctx); return 0; } static int spi_mcux_init(const struct device *dev) { int err; struct spi_mcux_data *data = dev->data; const struct spi_mcux_config *config = dev->config; #ifdef CONFIG_DSPI_MCUX_EDMA enum dma_channel_filter spi_filter = DMA_CHANNEL_NORMAL; const struct device *dma_dev; dma_dev = data->rx_dma_config.dma_dev; data->rx_dma_config.dma_channel = dma_request_channel(dma_dev, (void *)&spi_filter); dma_dev = data->tx_dma_config.dma_dev; data->tx_dma_config.dma_channel = dma_request_channel(dma_dev, (void *)&spi_filter); #else config->irq_config_func(dev); #endif err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err != 0) { return err; } data->dev = dev; err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } spi_context_unlock_unconditionally(&data->ctx); return 0; } static const struct spi_driver_api spi_mcux_driver_api = { .transceive = spi_mcux_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_mcux_transceive_async, #endif .release = spi_mcux_release, }; /* if a then b otherwise return 1 */ #define _UTIL_AND2(a, b) COND_CODE_1(UTIL_BOOL(a), (b), (1)) #ifdef CONFIG_DSPI_MCUX_EDMA #define TX_BUFFER(id) \ static uint32_t \ edma_tx_buffer_##id[CONFIG_MCUX_DSPI_BUFFER_SIZE >> 2]; \ static struct spi_buf spi_edma_tx_buffer_##id = { \ .buf = edma_tx_buffer_##id, \ .len = CONFIG_MCUX_DSPI_BUFFER_SIZE, \ } #define RX_BUFFER(id) \ static uint32_t \ edma_rx_buffer_##id[CONFIG_MCUX_DSPI_BUFFER_SIZE >> 2]; \ static struct spi_buf spi_edma_rx_buffer_##id = { \ .buf = edma_rx_buffer_##id, \ .len = CONFIG_MCUX_DSPI_BUFFER_SIZE, \ } #define TX_DMA_CONFIG(id) \ .inner_tx_buffer = &spi_edma_tx_buffer_##id, \ .tx_dma_config = { \ .dma_dev = \ DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)), \ .dma_cfg = { \ .source_burst_length = 4, \ .dest_burst_length = 4, \ .source_data_size = 4, \ .dest_data_size = 4, \ .dma_callback = dma_callback, \ .complete_callback_en = 1, \ .error_callback_dis = 0, \ .block_count = 1, \ .head_block = &spi_mcux_data_##id.tx_dma_block, \ .channel_direction = MEMORY_TO_PERIPHERAL, \ .dma_slot = DT_INST_DMAS_CELL_BY_NAME( \ id, tx, source), \ }, \ }, #define RX_DMA_CONFIG(id) \ .inner_rx_buffer = &spi_edma_rx_buffer_##id, \ .rx_dma_config = { \ .dma_dev = \ DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)), \ .dma_cfg = { \ .source_burst_length = 2, \ .dest_burst_length = 2, \ .source_data_size = 2, \ .dest_data_size = 2, \ .dma_callback = dma_callback, \ .complete_callback_en = 1, \ .error_callback_dis = 0, \ .block_count = \ _UTIL_AND2(DT_INST_NODE_HAS_PROP( \ id, nxp_rx_tx_chn_share), 2), \ .head_block = &spi_mcux_data_##id.rx_dma_block, \ .channel_direction = PERIPHERAL_TO_MEMORY, \ .dma_slot = DT_INST_DMAS_CELL_BY_NAME( \ id, rx, source), \ }, \ }, #else #define TX_BUFFER(id) #define RX_BUFFER(id) #define TX_DMA_CONFIG(id) #define RX_DMA_CONFIG(id) #endif #define SPI_MCUX_DSPI_DEVICE(id) \ PINCTRL_DT_INST_DEFINE(id); \ static void spi_mcux_config_func_##id(const struct device *dev);\ TX_BUFFER(id); \ RX_BUFFER(id); \ static struct spi_mcux_data spi_mcux_data_##id = { \ SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##id, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##id, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(id), ctx) \ TX_DMA_CONFIG(id) RX_DMA_CONFIG(id) \ }; \ static const struct spi_mcux_config spi_mcux_config_##id = { \ .base = (SPI_Type *)DT_INST_REG_ADDR(id), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(id)), \ .clock_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL(id, name), \ .irq_config_func = spi_mcux_config_func_##id, \ .pcs_sck_delay = \ DT_INST_PROP_OR(id, pcs_sck_delay, 0), \ .sck_pcs_delay = \ DT_INST_PROP_OR(id, sck_pcs_delay, 0), \ .transfer_delay = \ DT_INST_PROP_OR(id, transfer_delay, 0), \ .which_ctar = \ DT_INST_PROP_OR(id, ctar, 0), \ .samplePoint = \ DT_INST_PROP_OR(id, sample_point, 0), \ .enable_continuous_sck = \ DT_INST_PROP(id, continuous_sck), \ .enable_rxfifo_overwrite = \ DT_INST_PROP(id, rx_fifo_overwrite), \ .enable_modified_timing_format = \ DT_INST_PROP(id, modified_timing_format), \ .is_dma_chn_shared = \ DT_INST_PROP(id, nxp_rx_tx_chn_share), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \ }; \ DEVICE_DT_INST_DEFINE(id, \ spi_mcux_init, \ NULL, \ &spi_mcux_data_##id, \ &spi_mcux_config_##id, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_mcux_driver_api); \ static void spi_mcux_config_func_##id(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(id), \ DT_INST_IRQ(id, priority), \ spi_mcux_isr, DEVICE_DT_INST_GET(id), \ 0); \ irq_enable(DT_INST_IRQN(id)); \ } DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_DSPI_DEVICE) ```
/content/code_sandbox/drivers/spi/spi_mcux_dspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,910
```c /* * */ #define DT_DRV_COMPAT litex_spi #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_litex); #include "spi_litex_common.h" #define POSITION_WORD_SIZE 8 struct spi_litex_data { struct spi_context ctx; uint8_t dfs; /* dfs in bytes: 1,2,3 or 4 */ }; struct spi_litex_cfg { uint32_t control_addr; uint32_t status_addr; uint32_t mosi_addr; uint32_t miso_addr; uint32_t cs_addr; uint32_t loopback_addr; uint32_t clk_divider_addr; bool clk_divider_exists; int data_width; int max_cs; }; static void spi_set_frequency(const struct device *dev, const struct spi_config *config) { const struct spi_litex_cfg *dev_config = dev->config; if (!dev_config->clk_divider_exists) { /* The clk_divider is optional, thats why we check. */ LOG_WRN("No clk_divider found, can't change frequency"); return; } uint16_t divisor = DIV_ROUND_UP(sys_clock_hw_cycles_per_sec(), config->frequency); litex_write16(divisor, dev_config->clk_divider_addr); } /* Helper Functions */ static int spi_config(const struct device *dev, const struct spi_config *config, uint16_t *control) { const struct spi_litex_cfg *dev_config = dev->config; struct spi_litex_data *dev_data = dev->data; if (config->slave >= dev_config->max_cs) { LOG_ERR("More slaves than supported"); return -ENOTSUP; } if (config->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } if (SPI_WORD_SIZE_GET(config->operation) > dev_config->data_width) { LOG_ERR("Word size must be <= %d", dev_config->data_width); return -ENOTSUP; } if (config->operation & SPI_CS_ACTIVE_HIGH) { LOG_ERR("CS active high not supported"); return -ENOTSUP; } if (config->operation & SPI_LOCK_ON) { LOG_ERR("Lock On not supported"); return -ENOTSUP; } if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) && (config->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Only supports single mode"); return -ENOTSUP; } if (config->operation & SPI_TRANSFER_LSB) { LOG_ERR("LSB first not supported"); return -ENOTSUP; } if (config->operation & (SPI_MODE_CPOL | SPI_MODE_CPHA)) { LOG_ERR("Only supports CPOL=CPHA=0"); return -ENOTSUP; } if (config->operation & SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode not supported"); return -ENOTSUP; } /* Set Loopback */ if (!litex_read8(dev_config->loopback_addr) != !(config->operation & SPI_MODE_LOOP)) { litex_write8(((config->operation & SPI_MODE_LOOP) ? 0x1 : 0x0), dev_config->loopback_addr); } /* Set word size */ *control = (uint16_t) (SPI_WORD_SIZE_GET(config->operation) << POSITION_WORD_SIZE); dev_data->dfs = get_dfs_value(config); /* Write configurations */ litex_write16(*control, dev_config->control_addr); spi_set_frequency(dev, config); return 0; } static void spi_litex_send(const struct device *dev, uint32_t frame, uint16_t control) { const struct spi_litex_cfg *dev_config = dev->config; /* Write frame to register */ litex_write32(frame, dev_config->mosi_addr); /* Start the transfer */ litex_write16(control | BIT(0), dev_config->control_addr); /* Wait until the transfer ends */ while (!(litex_read8(dev_config->status_addr) & BIT(0))) { ;/* Wait */ } } static uint32_t spi_litex_recv(const struct device *dev) { const struct spi_litex_cfg *dev_config = dev->config; /* Return data inside MISO register */ return litex_read32(dev_config->miso_addr); } static void spi_litex_xfer(const struct device *dev, const struct spi_config *config, uint16_t control) { const struct spi_litex_cfg *dev_config = dev->config; struct spi_litex_data *dev_data = dev->data; struct spi_context *ctx = &dev_data->ctx; uint32_t txd, rxd; /* Set CS */ litex_write16(BIT(config->slave), dev_config->cs_addr); do { /* Send a frame */ if (spi_context_tx_buf_on(ctx)) { litex_spi_tx_put(dev_data->dfs, &txd, ctx->tx_buf); } else { txd = 0U; } LOG_DBG("txd: 0x%x", txd); spi_litex_send(dev, txd, control); spi_context_update_tx(ctx, dev_data->dfs, 1); rxd = spi_litex_recv(dev); LOG_DBG("rxd: 0x%x", rxd); if (spi_context_rx_buf_on(ctx)) { litex_spi_rx_put(dev_data->dfs, &rxd, ctx->rx_buf); } spi_context_update_rx(ctx, dev_data->dfs, 1); } while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)); spi_context_complete(ctx, dev, 0); /* Clear CS */ litex_write16(0, dev_config->cs_addr); } /* API Functions */ static int spi_litex_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { struct spi_litex_data *dev_data = dev->data; uint16_t control = 0; int ret = 0; ret = spi_config(dev, config, &control); if (ret < 0) { return ret; } spi_context_buffers_setup(&dev_data->ctx, tx_bufs, rx_bufs, dev_data->dfs); spi_litex_xfer(dev, config, control); return 0; } #ifdef CONFIG_SPI_ASYNC static int spi_litex_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, struct k_poll_signal *async) { return -ENOTSUP; } #endif /* CONFIG_SPI_ASYNC */ static int spi_litex_release(const struct device *dev, const struct spi_config *config) { const struct spi_litex_cfg *dev_config = dev->config; if (!(litex_read8(dev_config->status_addr) & BIT(0))) { return -EBUSY; } return 0; } /* Device Instantiation */ static const struct spi_driver_api spi_litex_api = { .transceive = spi_litex_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_litex_transceive_async, #endif /* CONFIG_SPI_ASYNC */ .release = spi_litex_release, }; #define SPI_INIT(n) \ static struct spi_litex_data spi_litex_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_litex_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_litex_data_##n, ctx), \ }; \ static struct spi_litex_cfg spi_litex_cfg_##n = { \ .control_addr = DT_INST_REG_ADDR_BY_NAME(n, control), \ .status_addr = DT_INST_REG_ADDR_BY_NAME(n, status), \ .mosi_addr = DT_INST_REG_ADDR_BY_NAME(n, mosi), \ .miso_addr = DT_INST_REG_ADDR_BY_NAME(n, miso), \ .cs_addr = DT_INST_REG_ADDR_BY_NAME(n, cs), \ .loopback_addr = DT_INST_REG_ADDR_BY_NAME(n, loopback), \ .clk_divider_exists = DT_INST_REG_HAS_NAME(n, clk_divider), \ .clk_divider_addr = DT_INST_REG_ADDR_BY_NAME_OR(n, clk_divider, 0), \ .data_width = DT_INST_PROP(n, data_width), \ .max_cs = DT_INST_PROP(n, max_cs), \ }; \ DEVICE_DT_INST_DEFINE(n, \ NULL, \ NULL, \ &spi_litex_data_##n, \ &spi_litex_cfg_##n, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_litex_api); DT_INST_FOREACH_STATUS_OKAY(SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_litex.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,952
```unknown config SPI_MCUX_FLEXCOMM bool "MCUX FLEXCOMM SPI driver" default y depends on DT_HAS_NXP_LPC_SPI_ENABLED select PINCTRL select RESET help Enable support for mcux flexcomm spi driver. if SPI_MCUX_FLEXCOMM config SPI_MCUX_FLEXCOMM_DMA bool "MCUX FLEXCOMM SPI DMA Support" select DMA help Enable the SPI DMA mode for SPI instances that enable dma channels in their device tree node. endif # SPI_MCUX_FLEXCOMM ```
/content/code_sandbox/drivers/spi/Kconfig.mcux_flexcomm
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
118
```unknown # SPI driver configuration options # # SPI Drivers # menuconfig SPI bool "Serial Peripheral Interface (SPI) bus drivers" help Enable support for the SPI hardware bus. if SPI config SPI_SHELL bool "SPI Shell" depends on SHELL help Enable SPI Shell. The currently SPI shell supports simple SPI write/read (transceive) operation. config SPI_ASYNC bool "Asynchronous call support" select POLL help This option enables the asynchronous API calls. config SPI_RTIO bool "RTIO support [EXPERIMENTAL]" select EXPERIMENTAL select RTIO help This option enables the RTIO API calls. RTIO support is experimental as the API itself is unstable. config SPI_SLAVE bool "Slave support [EXPERIMENTAL]" select EXPERIMENTAL help Enables Driver SPI slave operations. Slave support depends on the driver and the hardware it runs on. config SPI_EXTENDED_MODES bool "Extended modes [EXPERIMENTAL]" select EXPERIMENTAL help Enables extended operations in the SPI API. Currently, this enables the possibility to select the line mode (single/dual/ quad/octal), though none of these mode are really supported as it would require more features exposed into the SPI buffer. config SPI_INIT_PRIORITY int "Init priority" default KERNEL_INIT_PRIORITY_DEVICE help Device driver initialization priority. config SPI_COMPLETION_TIMEOUT_TOLERANCE int "Completion timeout tolerance (ms)" default 200 help The tolerance value in ms for the SPI completion timeout logic. config SPI_STATS bool "SPI device statistics" depends on STATS help Enable SPI device statistics. module = SPI module-str = spi source "subsys/logging/Kconfig.template.log_config" source "drivers/spi/Kconfig.b91" source "drivers/spi/Kconfig.stm32" source "drivers/spi/Kconfig.dw" source "drivers/spi/Kconfig.mcux_dspi" source "drivers/spi/Kconfig.mcux_ecspi" source "drivers/spi/Kconfig.mcux_flexcomm" source "drivers/spi/Kconfig.mcux_flexio" source "drivers/spi/Kconfig.mcux_lpspi" source "drivers/spi/Kconfig.rv32m1_lpspi" source "drivers/spi/Kconfig.sam" source "drivers/spi/Kconfig.sam0" source "drivers/spi/Kconfig.sifive" source "drivers/spi/Kconfig.spi_emul" source "drivers/spi/Kconfig.nrfx" source "drivers/spi/Kconfig.cc13xx_cc26xx" source "drivers/spi/Kconfig.litex" source "drivers/spi/Kconfig.oc_simple" source "drivers/spi/Kconfig.xec_qmspi" source "drivers/spi/Kconfig.gecko" source "drivers/spi/Kconfig.xlnx" source "drivers/spi/Kconfig.esp32" source "drivers/spi/Kconfig.test" source "drivers/spi/Kconfig.psoc6" source "drivers/spi/Kconfig.bitbang" source "drivers/spi/Kconfig.gd32" source "drivers/spi/Kconfig.mchp_mss_qspi" source "drivers/spi/Kconfig.pl022" source "drivers/spi/Kconfig.andes_atcspi200" source "drivers/spi/Kconfig.nxp_s32" source "drivers/spi/Kconfig.xmc4xxx" source "drivers/spi/Kconfig.pw" source "drivers/spi/Kconfig.smartbond" source "drivers/spi/Kconfig.opentitan" source "drivers/spi/Kconfig.numaker" source "drivers/spi/Kconfig.ambiq" source "drivers/spi/Kconfig.rpi_pico" source "drivers/spi/Kconfig.ifx_cat1" source "drivers/spi/Kconfig.sedi" source "drivers/spi/Kconfig.npcx" source "drivers/spi/Kconfig.mchp_mss" source "drivers/spi/Kconfig.grlib_spimctrl" source "drivers/spi/Kconfig.max32" endif # SPI ```
/content/code_sandbox/drivers/spi/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
863
```c /* * * Forked off the spi_mcux_lpi2c driver. * */ #define DT_DRV_COMPAT openisa_rv32m1_lpspi #include <errno.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/clock_control.h> #include <fsl_lpspi.h> #include <zephyr/drivers/pinctrl.h> #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(spi_rv32m1_lpspi); #include <soc.h> #include "spi_context.h" #define CHIP_SELECT_COUNT 4 #define MAX_DATA_WIDTH 4096 struct spi_mcux_config { LPSPI_Type *base; const struct device *clock_dev; clock_control_subsys_t clock_subsys; clock_ip_name_t clock_ip_name; uint32_t clock_ip_src; void (*irq_config_func)(const struct device *dev); const struct pinctrl_dev_config *pincfg; }; struct spi_mcux_data { const struct device *dev; lpspi_master_handle_t handle; struct spi_context ctx; size_t transfer_len; }; static void spi_mcux_transfer_next_packet(const struct device *dev) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; LPSPI_Type *base = config->base; struct spi_context *ctx = &data->ctx; lpspi_transfer_t transfer; status_t status; if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) { /* nothing left to rx or tx, we're done! */ spi_context_cs_control(&data->ctx, false); spi_context_complete(&data->ctx, dev, 0); return; } transfer.configFlags = kLPSPI_MasterPcsContinuous | (ctx->config->slave << LPSPI_MASTER_PCS_SHIFT); if (ctx->tx_len == 0) { /* rx only, nothing to tx */ transfer.txData = NULL; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->rx_len; } else if (ctx->rx_len == 0) { /* tx only, nothing to rx */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = NULL; transfer.dataSize = ctx->tx_len; } else if (ctx->tx_len == ctx->rx_len) { /* rx and tx are the same length */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->tx_len; } else if (ctx->tx_len > ctx->rx_len) { /* Break up the tx into multiple transfers so we don't have to * rx into a longer intermediate buffer. Leave chip select * active between transfers. */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->rx_len; transfer.configFlags |= kLPSPI_MasterPcsContinuous; } else { /* Break up the rx into multiple transfers so we don't have to * tx from a longer intermediate buffer. Leave chip select * active between transfers. */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->tx_len; transfer.configFlags |= kLPSPI_MasterPcsContinuous; } if (!(ctx->tx_count <= 1 && ctx->rx_count <= 1)) { transfer.configFlags |= kLPSPI_MasterPcsContinuous; } data->transfer_len = transfer.dataSize; status = LPSPI_MasterTransferNonBlocking(base, &data->handle, &transfer); if (status != kStatus_Success) { LOG_ERR("Transfer could not start"); } } static void spi_mcux_isr(const struct device *dev) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; LPSPI_Type *base = config->base; LPSPI_MasterTransferHandleIRQ(base, &data->handle); } static void spi_mcux_master_transfer_callback(LPSPI_Type *base, lpspi_master_handle_t *handle, status_t status, void *userData) { struct spi_mcux_data *data = userData; spi_context_update_tx(&data->ctx, 1, data->transfer_len); spi_context_update_rx(&data->ctx, 1, data->transfer_len); spi_mcux_transfer_next_packet(data->dev); } static int spi_mcux_configure(const struct device *dev, const struct spi_config *spi_cfg) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; LPSPI_Type *base = config->base; lpspi_master_config_t master_config; uint32_t clock_freq; uint32_t word_size; if (spi_context_configured(&data->ctx, spi_cfg)) { /* This configuration is already in use */ return 0; } if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } LPSPI_MasterGetDefaultConfig(&master_config); if (spi_cfg->slave > CHIP_SELECT_COUNT) { LOG_ERR("Slave %d is greater than %d", spi_cfg->slave, CHIP_SELECT_COUNT); return -EINVAL; } word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); if (word_size > MAX_DATA_WIDTH) { LOG_ERR("Word size %d is greater than %d", word_size, MAX_DATA_WIDTH); return -EINVAL; } master_config.bitsPerFrame = word_size; master_config.cpol = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) ? kLPSPI_ClockPolarityActiveLow : kLPSPI_ClockPolarityActiveHigh; master_config.cpha = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) ? kLPSPI_ClockPhaseSecondEdge : kLPSPI_ClockPhaseFirstEdge; master_config.direction = (spi_cfg->operation & SPI_TRANSFER_LSB) ? kLPSPI_LsbFirst : kLPSPI_MsbFirst; master_config.baudRate = spi_cfg->frequency; if (!device_is_ready(config->clock_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { return -EINVAL; } LPSPI_MasterInit(base, &master_config, clock_freq); LPSPI_MasterTransferCreateHandle(base, &data->handle, spi_mcux_master_transfer_callback, data); LPSPI_SetDummyData(base, 0); data->ctx.config = spi_cfg; return 0; } static int transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { struct spi_mcux_data *data = dev->data; int ret; spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg); ret = spi_mcux_configure(dev, spi_cfg); if (ret) { goto out; } spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); spi_context_cs_control(&data->ctx, true); spi_mcux_transfer_next_packet(dev); ret = spi_context_wait_for_completion(&data->ctx); out: spi_context_release(&data->ctx, ret); return ret; } static int spi_mcux_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_mcux_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int spi_mcux_release(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_mcux_data *data = dev->data; spi_context_unlock_unconditionally(&data->ctx); return 0; } static int spi_mcux_init(const struct device *dev) { int err; const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; CLOCK_SetIpSrc(config->clock_ip_name, config->clock_ip_src); config->irq_config_func(dev); data->dev = dev; err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err != 0) { return err; } spi_context_unlock_unconditionally(&data->ctx); return 0; } static const struct spi_driver_api spi_mcux_driver_api = { .transceive = spi_mcux_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_mcux_transceive_async, #endif .release = spi_mcux_release, }; #define SPI_RV32M1_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ \ static void spi_mcux_config_func_##n(const struct device *dev); \ \ static const struct spi_mcux_config spi_mcux_config_##n = { \ .base = (LPSPI_Type *) DT_INST_REG_ADDR(n), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = (clock_control_subsys_t) \ DT_INST_CLOCKS_CELL(n, name), \ .irq_config_func = spi_mcux_config_func_##n, \ .clock_ip_name = INST_DT_CLOCK_IP_NAME(n), \ .clock_ip_src = kCLOCK_IpSrcFircAsync, \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ }; \ \ static struct spi_mcux_data spi_mcux_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ }; \ \ DEVICE_DT_INST_DEFINE(n, spi_mcux_init, NULL, \ &spi_mcux_data_##n, \ &spi_mcux_config_##n, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_mcux_driver_api); \ \ static void spi_mcux_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ 0, \ spi_mcux_isr, DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } DT_INST_FOREACH_STATUS_OKAY(SPI_RV32M1_INIT) ```
/content/code_sandbox/drivers/spi/spi_rv32m1_lpspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,622
```unknown # Simple SPI Driver configuration options menuconfig SPI_OC_SIMPLE bool "OpenCores Simple SPI controller driver" default y depends on DT_HAS_OPENCORES_SPI_SIMPLE_ENABLED help Enable the Simple SPI controller config SPI_OC_SIMPLE_BUS_WIDTH def_int 8 depends on SPI_OC_SIMPLE ```
/content/code_sandbox/drivers/spi/Kconfig.oc_simple
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
68
```c /* * */ #define DT_DRV_COMPAT nxp_flexio_spi #include <errno.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/clock_control.h> #include <fsl_flexio_spi.h> #include <zephyr/logging/log.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/misc/nxp_flexio/nxp_flexio.h> LOG_MODULE_REGISTER(spi_mcux_flexio_spi, CONFIG_SPI_LOG_LEVEL); #include "spi_context.h" struct spi_mcux_flexio_config { FLEXIO_SPI_Type *flexio_spi; const struct device *flexio_dev; const struct pinctrl_dev_config *pincfg; const struct nxp_flexio_child *child; }; struct spi_mcux_flexio_data { const struct device *dev; flexio_spi_master_handle_t handle; struct spi_context ctx; size_t transfer_len; uint8_t transfer_flags; }; static void spi_mcux_transfer_next_packet(const struct device *dev) { const struct spi_mcux_flexio_config *config = dev->config; struct spi_mcux_flexio_data *data = dev->data; struct spi_context *ctx = &data->ctx; flexio_spi_transfer_t transfer; status_t status; if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) { /* nothing left to rx or tx, we're done! */ spi_context_cs_control(&data->ctx, false); spi_context_complete(&data->ctx, dev, 0); return; } transfer.flags = kFLEXIO_SPI_csContinuous | data->transfer_flags; if (ctx->tx_len == 0) { /* rx only, nothing to tx */ transfer.txData = NULL; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->rx_len; } else if (ctx->rx_len == 0) { /* tx only, nothing to rx */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = NULL; transfer.dataSize = ctx->tx_len; } else if (ctx->tx_len == ctx->rx_len) { /* rx and tx are the same length */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->tx_len; } else if (ctx->tx_len > ctx->rx_len) { /* Break up the tx into multiple transfers so we don't have to * rx into a longer intermediate buffer. Leave chip select * active between transfers. */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->rx_len; } else { /* Break up the rx into multiple transfers so we don't have to * tx from a longer intermediate buffer. Leave chip select * active between transfers. */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->tx_len; } data->transfer_len = transfer.dataSize; status = FLEXIO_SPI_MasterTransferNonBlocking(config->flexio_spi, &data->handle, &transfer); if (status != kStatus_Success) { LOG_ERR("Transfer could not start"); } } static int spi_mcux_flexio_isr(void *user_data) { const struct device *dev = (const struct device *)user_data; const struct spi_mcux_flexio_config *config = dev->config; struct spi_mcux_flexio_data *data = dev->data; FLEXIO_SPI_MasterTransferHandleIRQ(config->flexio_spi, &data->handle); return 0; } static void spi_mcux_master_transfer_callback(FLEXIO_SPI_Type *flexio_spi, flexio_spi_master_handle_t *handle, status_t status, void *userData) { struct spi_mcux_flexio_data *data = userData; spi_context_update_tx(&data->ctx, 1, data->transfer_len); spi_context_update_rx(&data->ctx, 1, data->transfer_len); spi_mcux_transfer_next_packet(data->dev); } static void spi_flexio_master_init(FLEXIO_SPI_Type *base, flexio_spi_master_config_t *masterConfig, uint8_t pol, uint32_t srcClock_Hz) { assert(base != NULL); assert(masterConfig != NULL); flexio_shifter_config_t shifterConfig; flexio_timer_config_t timerConfig; uint32_t ctrlReg = 0; uint16_t timerDiv = 0; uint16_t timerCmp = 0; /* Clear the shifterConfig & timerConfig struct. */ (void)memset(&shifterConfig, 0, sizeof(shifterConfig)); (void)memset(&timerConfig, 0, sizeof(timerConfig)); /* Configure FLEXIO SPI Master */ ctrlReg = base->flexioBase->CTRL; ctrlReg &= ~(FLEXIO_CTRL_DOZEN_MASK | FLEXIO_CTRL_DBGE_MASK | FLEXIO_CTRL_FASTACC_MASK | FLEXIO_CTRL_FLEXEN_MASK); ctrlReg |= (FLEXIO_CTRL_DBGE(masterConfig->enableInDebug) | FLEXIO_CTRL_FASTACC(masterConfig->enableFastAccess) | FLEXIO_CTRL_FLEXEN(masterConfig->enableMaster)); if (!masterConfig->enableInDoze) { ctrlReg |= FLEXIO_CTRL_DOZEN_MASK; } base->flexioBase->CTRL = ctrlReg; /* Do hardware configuration. */ /* 1. Configure the shifter 0 for tx. */ shifterConfig.timerSelect = base->timerIndex[0]; shifterConfig.pinConfig = kFLEXIO_PinConfigOutput; shifterConfig.pinSelect = base->SDOPinIndex; shifterConfig.pinPolarity = kFLEXIO_PinActiveHigh; shifterConfig.shifterMode = kFLEXIO_ShifterModeTransmit; shifterConfig.inputSource = kFLEXIO_ShifterInputFromPin; if (masterConfig->phase == kFLEXIO_SPI_ClockPhaseFirstEdge) { shifterConfig.timerPolarity = kFLEXIO_ShifterTimerPolarityOnNegitive; shifterConfig.shifterStop = kFLEXIO_ShifterStopBitDisable; shifterConfig.shifterStart = kFLEXIO_ShifterStartBitDisabledLoadDataOnEnable; } else { shifterConfig.timerPolarity = kFLEXIO_ShifterTimerPolarityOnPositive; shifterConfig.shifterStop = kFLEXIO_ShifterStopBitLow; shifterConfig.shifterStart = kFLEXIO_ShifterStartBitDisabledLoadDataOnShift; } FLEXIO_SetShifterConfig(base->flexioBase, base->shifterIndex[0], &shifterConfig); /* 2. Configure the shifter 1 for rx. */ shifterConfig.timerSelect = base->timerIndex[0]; shifterConfig.pinConfig = kFLEXIO_PinConfigOutputDisabled; shifterConfig.pinSelect = base->SDIPinIndex; shifterConfig.pinPolarity = kFLEXIO_PinActiveHigh; shifterConfig.shifterMode = kFLEXIO_ShifterModeReceive; shifterConfig.inputSource = kFLEXIO_ShifterInputFromPin; shifterConfig.shifterStop = kFLEXIO_ShifterStopBitDisable; shifterConfig.shifterStart = kFLEXIO_ShifterStartBitDisabledLoadDataOnEnable; if (masterConfig->phase == kFLEXIO_SPI_ClockPhaseFirstEdge) { shifterConfig.timerPolarity = kFLEXIO_ShifterTimerPolarityOnPositive; } else { shifterConfig.timerPolarity = kFLEXIO_ShifterTimerPolarityOnNegitive; } FLEXIO_SetShifterConfig(base->flexioBase, base->shifterIndex[1], &shifterConfig); /*3. Configure the timer 0 for SCK. */ timerConfig.triggerSelect = FLEXIO_TIMER_TRIGGER_SEL_SHIFTnSTAT(base->shifterIndex[0]); timerConfig.triggerPolarity = kFLEXIO_TimerTriggerPolarityActiveLow; timerConfig.triggerSource = kFLEXIO_TimerTriggerSourceInternal; timerConfig.pinConfig = kFLEXIO_PinConfigOutput; timerConfig.pinSelect = base->SCKPinIndex; timerConfig.pinPolarity = pol ? kFLEXIO_PinActiveLow : kFLEXIO_PinActiveHigh; timerConfig.timerMode = kFLEXIO_TimerModeDual8BitBaudBit; timerConfig.timerOutput = kFLEXIO_TimerOutputZeroNotAffectedByReset; timerConfig.timerDecrement = kFLEXIO_TimerDecSrcOnFlexIOClockShiftTimerOutput; timerConfig.timerReset = kFLEXIO_TimerResetNever; timerConfig.timerDisable = kFLEXIO_TimerDisableOnTimerCompare; timerConfig.timerEnable = kFLEXIO_TimerEnableOnTriggerHigh; timerConfig.timerStop = kFLEXIO_TimerStopBitEnableOnTimerDisable; timerConfig.timerStart = kFLEXIO_TimerStartBitEnabled; /* Low 8-bits are used to configure baudrate. */ timerDiv = (uint16_t)(srcClock_Hz / masterConfig->baudRate_Bps); timerDiv = timerDiv / 2U - 1U; /* High 8-bits are used to configure shift clock edges(transfer width). */ timerCmp = ((uint16_t)masterConfig->dataMode * 2U - 1U) << 8U; timerCmp |= timerDiv; timerConfig.timerCompare = timerCmp; FLEXIO_SetTimerConfig(base->flexioBase, base->timerIndex[0], &timerConfig); } static int spi_mcux_flexio_configure(const struct device *dev, const struct spi_config *spi_cfg) { const struct spi_mcux_flexio_config *config = dev->config; struct spi_mcux_flexio_data *data = dev->data; flexio_spi_master_config_t master_config; uint32_t clock_freq; uint32_t word_size; if (spi_context_configured(&data->ctx, spi_cfg)) { /* This configuration is already in use */ return 0; } if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } if (SPI_OP_MODE_GET(spi_cfg->operation) != SPI_OP_MODE_MASTER) { LOG_ERR("Mode Slave not supported"); return -ENOTSUP; } FLEXIO_SPI_MasterGetDefaultConfig(&master_config); word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); if ((word_size != 8) && (word_size != 16) && (word_size != 32)) { LOG_ERR("Word size %d must be 8, 16 or 32", word_size); return -EINVAL; } master_config.dataMode = word_size; if (spi_cfg->operation & SPI_TRANSFER_LSB) { if (word_size == 8) { data->transfer_flags = kFLEXIO_SPI_8bitLsb; } else if (word_size == 16) { data->transfer_flags = kFLEXIO_SPI_16bitLsb; } else { data->transfer_flags = kFLEXIO_SPI_32bitLsb; } } else { if (word_size == 8) { data->transfer_flags = kFLEXIO_SPI_8bitMsb; } else if (word_size == 16) { data->transfer_flags = kFLEXIO_SPI_16bitMsb; } else { data->transfer_flags = kFLEXIO_SPI_32bitMsb; } } if (nxp_flexio_get_rate(config->flexio_dev, &clock_freq)) { return -EINVAL; } master_config.phase = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) ? kFLEXIO_SPI_ClockPhaseSecondEdge : kFLEXIO_SPI_ClockPhaseFirstEdge; master_config.baudRate_Bps = spi_cfg->frequency; spi_flexio_master_init(config->flexio_spi, &master_config, (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL), clock_freq); FLEXIO_SPI_MasterTransferCreateHandle(config->flexio_spi, &data->handle, spi_mcux_master_transfer_callback, data); /* No SetDummyData() for FlexIO_SPI */ data->ctx.config = spi_cfg; return 0; } static int transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { const struct spi_mcux_flexio_config *config = dev->config; struct spi_mcux_flexio_data *data = dev->data; int ret; spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg); nxp_flexio_lock(config->flexio_dev); ret = spi_mcux_flexio_configure(dev, spi_cfg); nxp_flexio_unlock(config->flexio_dev); if (ret) { goto out; } spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); spi_context_cs_control(&data->ctx, true); nxp_flexio_lock(config->flexio_dev); nxp_flexio_irq_disable(config->flexio_dev); spi_mcux_transfer_next_packet(dev); nxp_flexio_irq_enable(config->flexio_dev); nxp_flexio_unlock(config->flexio_dev); ret = spi_context_wait_for_completion(&data->ctx); out: spi_context_release(&data->ctx, ret); return ret; } static int spi_mcux_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_mcux_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int spi_mcux_release(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_mcux_flexio_data *data = dev->data; spi_context_unlock_unconditionally(&data->ctx); return 0; } static int spi_mcux_init(const struct device *dev) { const struct spi_mcux_flexio_config *config = dev->config; struct spi_mcux_flexio_data *data = dev->data; int err; err = nxp_flexio_child_attach(config->flexio_dev, config->child); if (err < 0) { return err; } err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } spi_context_unlock_unconditionally(&data->ctx); data->dev = dev; /* TODO: DMA */ err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } spi_context_unlock_unconditionally(&data->ctx); return 0; } static const struct spi_driver_api spi_mcux_driver_api = { .transceive = spi_mcux_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_mcux_transceive_async, #endif .release = spi_mcux_release, }; #define SPI_MCUX_FLEXIO_SPI_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ \ static FLEXIO_SPI_Type flexio_spi_##n = { \ .flexioBase = (FLEXIO_Type *)DT_REG_ADDR(DT_INST_PARENT(n)), \ .SDOPinIndex = DT_INST_PROP(n, sdo_pin), \ .SDIPinIndex = DT_INST_PROP(n, sdi_pin), \ .SCKPinIndex = DT_INST_PROP(n, sck_pin), \ }; \ \ static const struct nxp_flexio_child nxp_flexio_spi_child_##n = { \ .isr = spi_mcux_flexio_isr, \ .user_data = (void *)DEVICE_DT_INST_GET(n), \ .res = { \ .shifter_index = flexio_spi_##n.shifterIndex, \ .shifter_count = ARRAY_SIZE(flexio_spi_##n.shifterIndex), \ .timer_index = flexio_spi_##n.timerIndex, \ .timer_count = ARRAY_SIZE(flexio_spi_##n.timerIndex) \ } \ }; \ \ static const struct spi_mcux_flexio_config spi_mcux_flexio_config_##n = { \ .flexio_spi = &flexio_spi_##n, \ .flexio_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .child = &nxp_flexio_spi_child_##n, \ }; \ \ static struct spi_mcux_flexio_data spi_mcux_flexio_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_mcux_flexio_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_mcux_flexio_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ }; \ \ DEVICE_DT_INST_DEFINE(n, spi_mcux_init, NULL, \ &spi_mcux_flexio_data_##n, \ &spi_mcux_flexio_config_##n, POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_mcux_driver_api); \ DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_FLEXIO_SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_mcux_flexio.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,065
```objective-c /* spi_dw_regs.h - Designware SPI driver private definitions */ /* * */ #ifndef ZEPHYR_DRIVERS_SPI_SPI_DW_REGS_H_ #define ZEPHYR_DRIVERS_SPI_SPI_DW_REGS_H_ #ifdef __cplusplus extern "C" { #endif #define DW_SPI_REG_CTRLR0 (0x00) #define DW_SPI_REG_CTRLR1 (0x04) #define DW_SPI_REG_SSIENR (0x08) #define DW_SPI_REG_MWCR (0x0c) #define DW_SPI_REG_SER (0x10) #define DW_SPI_REG_BAUDR (0x14) #define DW_SPI_REG_TXFTLR (0x18) #define DW_SPI_REG_RXFTLR (0x1c) #define DW_SPI_REG_TXFLR (0x20) #define DW_SPI_REG_RXFLR (0x24) #define DW_SPI_REG_SR (0x28) #define DW_SPI_REG_IMR (0x2c) #define DW_SPI_REG_ISR (0x30) #define DW_SPI_REG_RISR (0x34) #define DW_SPI_REG_TXOICR (0x38) #define DW_SPI_REG_RXOICR (0x3c) #define DW_SPI_REG_RXUICR (0x40) #define DW_SPI_REG_MSTICR (0x44) #define DW_SPI_REG_ICR (0x48) #define DW_SPI_REG_DMACR (0x4c) #define DW_SPI_REG_DMATDLR (0x50) #define DW_SPI_REG_DMARDLR (0x54) #define DW_SPI_REG_IDR (0x58) #define DW_SPI_REG_SSI_COMP_VERSION (0x5c) #define DW_SPI_REG_DR (0x60) #define DW_SPI_REG_RX_SAMPLE_DLY (0xf0) /* Register helpers */ DEFINE_MM_REG_WRITE(ctrlr0, DW_SPI_REG_CTRLR0, 32) DEFINE_MM_REG_READ(ctrlr0, DW_SPI_REG_CTRLR0, 32) DEFINE_MM_REG_WRITE(txftlr, DW_SPI_REG_TXFTLR, 32) DEFINE_MM_REG_WRITE(rxftlr, DW_SPI_REG_RXFTLR, 32) DEFINE_MM_REG_READ(rxftlr, DW_SPI_REG_RXFTLR, 32) DEFINE_MM_REG_READ(txftlr, DW_SPI_REG_TXFTLR, 32) DEFINE_MM_REG_WRITE(dr, DW_SPI_REG_DR, 32) DEFINE_MM_REG_READ(dr, DW_SPI_REG_DR, 32) DEFINE_MM_REG_READ(ssi_comp_version, DW_SPI_REG_SSI_COMP_VERSION, 32) #ifdef CONFIG_SPI_DW_ACCESS_WORD_ONLY DEFINE_MM_REG_WRITE(ctrlr1, DW_SPI_REG_CTRLR1, 32) DEFINE_MM_REG_READ(ctrlr1, DW_SPI_REG_CTRLR1, 32) DEFINE_MM_REG_WRITE(ser, DW_SPI_REG_SER, 32) #else DEFINE_MM_REG_WRITE(ctrlr1, DW_SPI_REG_CTRLR1, 16) DEFINE_MM_REG_READ(ctrlr1, DW_SPI_REG_CTRLR1, 16) DEFINE_MM_REG_WRITE(ser, DW_SPI_REG_SER, 8) #endif /* ICR is on a unique bit */ DEFINE_TEST_BIT_OP(icr, DW_SPI_REG_ICR, DW_SPI_SR_ICR_BIT) #define clear_interrupts(dev) test_bit_icr(dev) #ifdef __cplusplus } #endif #endif /* ZEPHYR_DRIVERS_SPI_SPI_DW_REGS_H_ */ ```
/content/code_sandbox/drivers/spi/spi_dw_regs.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
759
```unknown config SPI_EMUL bool "SPI emulator" default y depends on DT_HAS_ZEPHYR_SPI_EMUL_CONTROLLER_ENABLED depends on EMUL help Enable the SPI emulator driver. This is a fake driver in that it does not talk to real hardware. Instead it talks to emulation drivers that pretend to be devices on the emulated SPI bus. It is used for testing drivers for SPI devices. ```
/content/code_sandbox/drivers/spi/Kconfig.spi_emul
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
94
```c /* * */ #include <zephyr/drivers/spi.h> #include <zephyr/pm/device.h> #include <zephyr/drivers/pinctrl.h> #include <soc.h> #include <nrfx_spi.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(spi_nrfx_spi, CONFIG_SPI_LOG_LEVEL); #include "spi_context.h" #include "spi_nrfx_common.h" struct spi_nrfx_data { struct spi_context ctx; const struct device *dev; size_t chunk_len; bool busy; bool initialized; }; struct spi_nrfx_config { nrfx_spi_t spi; nrfx_spi_config_t def_config; void (*irq_connect)(void); const struct pinctrl_dev_config *pcfg; uint32_t wake_pin; nrfx_gpiote_t wake_gpiote; }; static void event_handler(const nrfx_spi_evt_t *p_event, void *p_context); static inline nrf_spi_frequency_t get_nrf_spi_frequency(uint32_t frequency) { /* Get the highest supported frequency not exceeding the requested one. */ if (frequency < 250000) { return NRF_SPI_FREQ_125K; } else if (frequency < 500000) { return NRF_SPI_FREQ_250K; } else if (frequency < 1000000) { return NRF_SPI_FREQ_500K; } else if (frequency < 2000000) { return NRF_SPI_FREQ_1M; } else if (frequency < 4000000) { return NRF_SPI_FREQ_2M; } else if (frequency < 8000000) { return NRF_SPI_FREQ_4M; } else { return NRF_SPI_FREQ_8M; } } static inline nrf_spi_mode_t get_nrf_spi_mode(uint16_t operation) { if (SPI_MODE_GET(operation) & SPI_MODE_CPOL) { if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) { return NRF_SPI_MODE_3; } else { return NRF_SPI_MODE_2; } } else { if (SPI_MODE_GET(operation) & SPI_MODE_CPHA) { return NRF_SPI_MODE_1; } else { return NRF_SPI_MODE_0; } } } static inline nrf_spi_bit_order_t get_nrf_spi_bit_order(uint16_t operation) { if (operation & SPI_TRANSFER_LSB) { return NRF_SPI_BIT_ORDER_LSB_FIRST; } else { return NRF_SPI_BIT_ORDER_MSB_FIRST; } } static int configure(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_nrfx_data *dev_data = dev->data; const struct spi_nrfx_config *dev_config = dev->config; struct spi_context *ctx = &dev_data->ctx; nrfx_spi_config_t config; nrfx_err_t result; if (dev_data->initialized && spi_context_configured(ctx, spi_cfg)) { /* Already configured. No need to do it again. */ return 0; } if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } if (SPI_OP_MODE_GET(spi_cfg->operation) != SPI_OP_MODE_MASTER) { LOG_ERR("Slave mode is not supported on %s", dev->name); return -EINVAL; } if (spi_cfg->operation & SPI_MODE_LOOP) { LOG_ERR("Loopback mode is not supported"); return -EINVAL; } if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) && (spi_cfg->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Only single line mode is supported"); return -EINVAL; } if (SPI_WORD_SIZE_GET(spi_cfg->operation) != 8) { LOG_ERR("Word sizes other than 8 bits are not supported"); return -EINVAL; } if (spi_cfg->frequency < 125000) { LOG_ERR("Frequencies lower than 125 kHz are not supported"); return -EINVAL; } config = dev_config->def_config; config.frequency = get_nrf_spi_frequency(spi_cfg->frequency); config.mode = get_nrf_spi_mode(spi_cfg->operation); config.bit_order = get_nrf_spi_bit_order(spi_cfg->operation); nrf_gpio_pin_write(nrf_spi_sck_pin_get(dev_config->spi.p_reg), spi_cfg->operation & SPI_MODE_CPOL ? 1 : 0); if (dev_data->initialized) { nrfx_spi_uninit(&dev_config->spi); dev_data->initialized = false; } result = nrfx_spi_init(&dev_config->spi, &config, event_handler, dev_data); if (result != NRFX_SUCCESS) { LOG_ERR("Failed to initialize nrfx driver: %08x", result); return -EIO; } dev_data->initialized = true; ctx->config = spi_cfg; return 0; } static void finish_transaction(const struct device *dev, int error) { struct spi_nrfx_data *dev_data = dev->data; struct spi_context *ctx = &dev_data->ctx; LOG_DBG("Transaction finished with status %d", error); spi_context_complete(ctx, dev, error); dev_data->busy = false; } static void transfer_next_chunk(const struct device *dev) { const struct spi_nrfx_config *dev_config = dev->config; struct spi_nrfx_data *dev_data = dev->data; struct spi_context *ctx = &dev_data->ctx; int error = 0; size_t chunk_len = spi_context_max_continuous_chunk(ctx); if (chunk_len > 0) { nrfx_spi_xfer_desc_t xfer; nrfx_err_t result; dev_data->chunk_len = chunk_len; xfer.p_tx_buffer = ctx->tx_buf; xfer.tx_length = spi_context_tx_buf_on(ctx) ? chunk_len : 0; xfer.p_rx_buffer = ctx->rx_buf; xfer.rx_length = spi_context_rx_buf_on(ctx) ? chunk_len : 0; result = nrfx_spi_xfer(&dev_config->spi, &xfer, 0); if (result == NRFX_SUCCESS) { return; } error = -EIO; } finish_transaction(dev, error); } static void event_handler(const nrfx_spi_evt_t *p_event, void *p_context) { struct spi_nrfx_data *dev_data = p_context; if (p_event->type == NRFX_SPI_EVENT_DONE) { /* Chunk length is set to 0 when a transaction is aborted * due to a timeout. */ if (dev_data->chunk_len == 0) { finish_transaction(dev_data->dev, -ETIMEDOUT); return; } spi_context_update_tx(&dev_data->ctx, 1, dev_data->chunk_len); spi_context_update_rx(&dev_data->ctx, 1, dev_data->chunk_len); transfer_next_chunk(dev_data->dev); } } static int transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { struct spi_nrfx_data *dev_data = dev->data; const struct spi_nrfx_config *dev_config = dev->config; int error; spi_context_lock(&dev_data->ctx, asynchronous, cb, userdata, spi_cfg); error = configure(dev, spi_cfg); if (error == 0) { dev_data->busy = true; if (dev_config->wake_pin != WAKE_PIN_NOT_USED) { error = spi_nrfx_wake_request(&dev_config->wake_gpiote, dev_config->wake_pin); if (error == -ETIMEDOUT) { LOG_WRN("Waiting for WAKE acknowledgment timed out"); /* If timeout occurs, try to perform the transfer * anyway, just in case the slave device was unable * to signal that it was already awaken and prepared * for the transfer. */ } } spi_context_buffers_setup(&dev_data->ctx, tx_bufs, rx_bufs, 1); spi_context_cs_control(&dev_data->ctx, true); transfer_next_chunk(dev); error = spi_context_wait_for_completion(&dev_data->ctx); if (error == -ETIMEDOUT) { /* Set the chunk length to 0 so that event_handler() * knows that the transaction timed out and is to be * aborted. */ dev_data->chunk_len = 0; /* Abort the current transfer by deinitializing * the nrfx driver. */ nrfx_spi_uninit(&dev_config->spi); dev_data->initialized = false; /* Make sure the transaction is finished (it may be * already finished if it actually did complete before * the nrfx driver was deinitialized). */ finish_transaction(dev, -ETIMEDOUT); /* Clean up the driver state. */ k_sem_reset(&dev_data->ctx.sync); } spi_context_cs_control(&dev_data->ctx, false); } spi_context_release(&dev_data->ctx, error); return error; } static int spi_nrfx_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_nrfx_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int spi_nrfx_release(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_nrfx_data *dev_data = dev->data; if (!spi_context_configured(&dev_data->ctx, spi_cfg)) { return -EINVAL; } if (dev_data->busy) { return -EBUSY; } spi_context_unlock_unconditionally(&dev_data->ctx); return 0; } static const struct spi_driver_api spi_nrfx_driver_api = { .transceive = spi_nrfx_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_nrfx_transceive_async, #endif .release = spi_nrfx_release, }; #ifdef CONFIG_PM_DEVICE static int spi_nrfx_pm_action(const struct device *dev, enum pm_device_action action) { int ret = 0; struct spi_nrfx_data *dev_data = dev->data; const struct spi_nrfx_config *dev_config = dev->config; switch (action) { case PM_DEVICE_ACTION_RESUME: ret = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /* nrfx_spi_init() will be called at configuration before * the next transfer. */ break; case PM_DEVICE_ACTION_SUSPEND: if (dev_data->initialized) { nrfx_spi_uninit(&dev_config->spi); dev_data->initialized = false; } ret = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_SLEEP); if (ret < 0) { return ret; } break; default: ret = -ENOTSUP; } return ret; } #endif /* CONFIG_PM_DEVICE */ static int spi_nrfx_init(const struct device *dev) { const struct spi_nrfx_config *dev_config = dev->config; struct spi_nrfx_data *dev_data = dev->data; int err; err = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } if (dev_config->wake_pin != WAKE_PIN_NOT_USED) { err = spi_nrfx_wake_init(&dev_config->wake_gpiote, dev_config->wake_pin); if (err == -ENODEV) { LOG_ERR("Failed to allocate GPIOTE channel for WAKE"); return err; } if (err == -EIO) { LOG_ERR("Failed to configure WAKE pin"); return err; } } dev_config->irq_connect(); err = spi_context_cs_configure_all(&dev_data->ctx); if (err < 0) { return err; } spi_context_unlock_unconditionally(&dev_data->ctx); return 0; } /* * Current factors requiring use of DT_NODELABEL: * * - HAL design (requirement of drv_inst_idx in nrfx_spi_t) * - Name-based HAL IRQ handlers, e.g. nrfx_spi_0_irq_handler */ #define SPI(idx) DT_NODELABEL(spi##idx) #define SPI_PROP(idx, prop) DT_PROP(SPI(idx), prop) #define SPI_NRFX_SPI_DEFINE(idx) \ NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(SPI(idx)); \ static void irq_connect##idx(void) \ { \ IRQ_CONNECT(DT_IRQN(SPI(idx)), DT_IRQ(SPI(idx), priority), \ nrfx_isr, nrfx_spi_##idx##_irq_handler, 0); \ } \ static struct spi_nrfx_data spi_##idx##_data = { \ SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_##idx##_data, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(SPI(idx), ctx) \ .dev = DEVICE_DT_GET(SPI(idx)), \ .busy = false, \ }; \ PINCTRL_DT_DEFINE(SPI(idx)); \ static const struct spi_nrfx_config spi_##idx##z_config = { \ .spi = { \ .p_reg = (NRF_SPI_Type *)DT_REG_ADDR(SPI(idx)), \ .drv_inst_idx = NRFX_SPI##idx##_INST_IDX, \ }, \ .def_config = { \ .skip_gpio_cfg = true, \ .skip_psel_cfg = true, \ .ss_pin = NRFX_SPI_PIN_NOT_USED, \ .orc = SPI_PROP(idx, overrun_character), \ }, \ .irq_connect = irq_connect##idx, \ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(SPI(idx)), \ .wake_pin = NRF_DT_GPIOS_TO_PSEL_OR(SPI(idx), wake_gpios, \ WAKE_PIN_NOT_USED), \ .wake_gpiote = WAKE_GPIOTE_INSTANCE(SPI(idx)), \ }; \ BUILD_ASSERT(!DT_NODE_HAS_PROP(SPI(idx), wake_gpios) || \ !(DT_GPIO_FLAGS(SPI(idx), wake_gpios) & GPIO_ACTIVE_LOW), \ "WAKE line must be configured as active high"); \ PM_DEVICE_DT_DEFINE(SPI(idx), spi_nrfx_pm_action); \ DEVICE_DT_DEFINE(SPI(idx), \ spi_nrfx_init, \ PM_DEVICE_DT_GET(SPI(idx)), \ &spi_##idx##_data, \ &spi_##idx##z_config, \ POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &spi_nrfx_driver_api) #ifdef CONFIG_HAS_HW_NRF_SPI0 SPI_NRFX_SPI_DEFINE(0); #endif #ifdef CONFIG_HAS_HW_NRF_SPI1 SPI_NRFX_SPI_DEFINE(1); #endif #ifdef CONFIG_HAS_HW_NRF_SPI2 SPI_NRFX_SPI_DEFINE(2); #endif ```
/content/code_sandbox/drivers/spi/spi_nrfx_spi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,546
```c /* * */ /** * @file * @brief Async callback used with signal notifier */ #include <zephyr/kernel.h> #include <zephyr/device.h> #ifdef CONFIG_POLL void z_spi_transfer_signal_cb(const struct device *dev, int result, void *userdata) { ARG_UNUSED(dev); struct k_poll_signal *sig = userdata; k_poll_signal_raise(sig, result); } #endif /* CONFIG_POLL */ ```
/content/code_sandbox/drivers/spi/spi_signal.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
90
```objective-c /* * */ #include <zephyr/kernel.h> #include <zephyr/drivers/spi.h> #include <zephyr/sys/byteorder.h> #include <zephyr/sys_clock.h> #include "spi_context.h" #include <soc.h> static inline uint8_t get_dfs_value(const struct spi_config *config) { switch (SPI_WORD_SIZE_GET(config->operation)) { case 8: return 1; case 16: return 2; case 24: return 3; case 32: return 4; default: return 1; } } static inline void litex_spi_tx_put(uint8_t len, uint32_t *txd, const uint8_t *tx_buf) { switch (len) { case 4: *txd = sys_get_be32(tx_buf); break; case 3: *txd = sys_get_be24(tx_buf); break; case 2: *txd = sys_get_be16(tx_buf); break; default: *txd = *tx_buf; break; } } static inline void litex_spi_rx_put(uint8_t len, uint32_t *rxd, uint8_t *rx_buf) { switch (len) { case 4: sys_put_be32(*rxd, rx_buf); break; case 3: sys_put_be24(*rxd, rx_buf); break; case 2: sys_put_be16(*rxd, rx_buf); break; default: *rx_buf = *rxd; break; } } ```
/content/code_sandbox/drivers/spi/spi_litex_common.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
347
```c /* * */ #define DT_DRV_COMPAT gaisler_spimctrl #include <zephyr/drivers/spi.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_spimctrl); #include "spi_context.h" struct spimctrl_regs { uint32_t conf; uint32_t ctrl; uint32_t stat; uint32_t rx; uint32_t tx; }; #define CONF_READCMD 0x0000007f #define CTRL_RST 0x00000010 #define CTRL_CSN 0x00000008 #define CTRL_EAS 0x00000004 #define CTRL_IEN 0x00000002 #define CTRL_USRC 0x00000001 #define STAT_INIT 0x00000004 #define STAT_BUSY 0x00000002 #define STAT_DONE 0x00000001 #define SPI_DATA(dev) ((struct data *) ((dev)->data)) struct cfg { volatile struct spimctrl_regs *regs; int interrupt; }; struct data { struct spi_context ctx; }; static int spi_config(struct spi_context *ctx, const struct spi_config *config) { if (config->slave != 0) { LOG_ERR("More slaves than supported"); return -ENOTSUP; } if (SPI_WORD_SIZE_GET(config->operation) != 8) { LOG_ERR("Word size must be 8"); return -ENOTSUP; } if (config->operation & SPI_CS_ACTIVE_HIGH) { LOG_ERR("CS active high not supported"); return -ENOTSUP; } if (config->operation & SPI_LOCK_ON) { LOG_ERR("Lock On not supported"); return -ENOTSUP; } if ((config->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) { LOG_ERR("Only supports single mode"); return -ENOTSUP; } if (config->operation & SPI_TRANSFER_LSB) { LOG_ERR("LSB first not supported"); return -ENOTSUP; } if (config->operation & (SPI_MODE_CPOL | SPI_MODE_CPHA)) { LOG_ERR("Only supports CPOL=CPHA=0"); return -ENOTSUP; } if (config->operation & SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode not supported"); return -ENOTSUP; } if (config->operation & SPI_MODE_LOOP) { LOG_ERR("Loopback not supported"); return -ENOTSUP; } ctx->config = config; return 0; } static int transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { const struct cfg *const cfg = dev->config; volatile struct spimctrl_regs *const regs = cfg->regs; struct spi_context *ctx = &SPI_DATA(dev)->ctx; uint8_t txval; int rc; spi_context_lock(ctx, false, NULL, NULL, config); rc = spi_config(ctx, config); if (rc) { LOG_ERR("%s: config", __func__); spi_context_release(ctx, rc); return rc; } spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1); regs->ctrl |= (CTRL_USRC | CTRL_IEN); regs->ctrl &= ~CTRL_CSN; if (spi_context_tx_buf_on(ctx)) { txval = *ctx->tx_buf; spi_context_update_tx(ctx, 1, 1); } else { txval = 0; } /* This will eventually trig the interrupt */ regs->tx = txval; rc = spi_context_wait_for_completion(ctx); regs->ctrl |= CTRL_CSN; regs->ctrl &= ~CTRL_USRC; spi_context_release(ctx, rc); return 0; } #ifdef CONFIG_SPI_ASYNC static int transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, struct k_poll_signal *async) { return -ENOTSUP; } #endif /* CONFIG_SPI_ASYNC */ static int release(const struct device *dev, const struct spi_config *config) { spi_context_unlock_unconditionally(&SPI_DATA(dev)->ctx); return 0; } static void spim_isr(struct device *dev) { const struct cfg *const cfg = dev->config; volatile struct spimctrl_regs *const regs = cfg->regs; struct spi_context *ctx = &SPI_DATA(dev)->ctx; uint8_t rx_byte; uint8_t val; if ((regs->stat & STAT_DONE) == 0) { return; } regs->stat = STAT_DONE; /* Always read register and maybe write mem. */ rx_byte = regs->rx; if (spi_context_rx_on(ctx)) { *ctx->rx_buf = rx_byte; spi_context_update_rx(ctx, 1, 1); } if (spi_context_tx_buf_on(ctx) == false && spi_context_rx_buf_on(ctx) == false) { regs->ctrl &= ~CTRL_IEN; spi_context_complete(ctx, dev, 0); return; } val = 0; if (spi_context_tx_buf_on(ctx)) { val = *ctx->tx_buf; spi_context_update_tx(ctx, 1, 1); } regs->tx = val; } static int init(const struct device *dev) { const struct cfg *const cfg = dev->config; volatile struct spimctrl_regs *const regs = cfg->regs; regs->ctrl = CTRL_CSN; while (regs->stat & STAT_BUSY) { ; } regs->stat = STAT_DONE; irq_connect_dynamic( cfg->interrupt, 0, (void (*)(const void *)) spim_isr, dev, 0 ); irq_enable(cfg->interrupt); spi_context_unlock_unconditionally(&SPI_DATA(dev)->ctx); return 0; } static struct spi_driver_api api = { .transceive = transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = transceive_async, #endif /* CONFIG_SPI_ASYNC */ .release = release, }; #define SPI_INIT(n) \ static const struct cfg cfg_##n = { \ .regs = (struct spimctrl_regs *) \ DT_INST_REG_ADDR(n), \ .interrupt = DT_INST_IRQN(n), \ }; \ static struct data data_##n = { \ SPI_CONTEXT_INIT_LOCK(data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(data_##n, ctx), \ }; \ DEVICE_DT_INST_DEFINE(n, \ init, \ NULL, \ &data_##n, \ &cfg_##n, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &api); DT_INST_FOREACH_STATUS_OKAY(SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_grlib_spimctrl.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,514
```c /* * */ #define DT_DRV_COMPAT nxp_imx_lpspi #include <errno.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/clock_control.h> #include <fsl_lpspi.h> #if CONFIG_NXP_LP_FLEXCOMM #include <zephyr/drivers/mfd/nxp_lp_flexcomm.h> #endif #include <zephyr/logging/log.h> #include <zephyr/irq.h> #ifdef CONFIG_SPI_MCUX_LPSPI_DMA #include <zephyr/drivers/dma.h> #endif #include <zephyr/drivers/pinctrl.h> #ifdef CONFIG_SPI_RTIO #include <zephyr/rtio/rtio.h> #include <zephyr/spinlock.h> #endif LOG_MODULE_REGISTER(spi_mcux_lpspi, CONFIG_SPI_LOG_LEVEL); #include "spi_context.h" #define CHIP_SELECT_COUNT 4 #define MAX_DATA_WIDTH 4096 /* Required by DEVICE_MMIO_NAMED_* macros */ #define DEV_CFG(_dev) \ ((const struct spi_mcux_config *)(_dev)->config) #define DEV_DATA(_dev) ((struct spi_mcux_data *)(_dev)->data) struct spi_mcux_config { DEVICE_MMIO_NAMED_ROM(reg_base); #ifdef CONFIG_NXP_LP_FLEXCOMM const struct device *parent_dev; #endif const struct device *clock_dev; clock_control_subsys_t clock_subsys; void (*irq_config_func)(const struct device *dev); uint32_t pcs_sck_delay; uint32_t sck_pcs_delay; uint32_t transfer_delay; const struct pinctrl_dev_config *pincfg; lpspi_pin_config_t data_pin_config; }; #ifdef CONFIG_SPI_MCUX_LPSPI_DMA #define SPI_MCUX_LPSPI_DMA_ERROR_FLAG 0x01 #define SPI_MCUX_LPSPI_DMA_RX_DONE_FLAG 0x02 #define SPI_MCUX_LPSPI_DMA_TX_DONE_FLAG 0x04 #define SPI_MCUX_LPSPI_DMA_DONE_FLAG \ (SPI_MCUX_LPSPI_DMA_RX_DONE_FLAG | SPI_MCUX_LPSPI_DMA_TX_DONE_FLAG) struct stream { const struct device *dma_dev; uint32_t channel; /* stores the channel for dma */ struct dma_config dma_cfg; struct dma_block_config dma_blk_cfg; }; #endif struct spi_mcux_data { DEVICE_MMIO_NAMED_RAM(reg_base); const struct device *dev; lpspi_master_handle_t handle; struct spi_context ctx; size_t transfer_len; #ifdef CONFIG_SPI_RTIO struct rtio *r; struct mpsc io_q; struct rtio_iodev iodev; struct rtio_iodev_sqe *txn_head; struct rtio_iodev_sqe *txn_curr; struct spi_dt_spec dt_spec; struct k_spinlock lock; #endif #ifdef CONFIG_SPI_MCUX_LPSPI_DMA volatile uint32_t status_flags; struct stream dma_rx; struct stream dma_tx; /* dummy value used for transferring NOP when tx buf is null */ uint32_t dummy_tx_buffer; /* dummy value used to read RX data into when rx buf is null */ uint32_t dummy_rx_buffer; #endif }; static int spi_mcux_transfer_next_packet(const struct device *dev) { /* const struct spi_mcux_config *config = dev->config; */ struct spi_mcux_data *data = dev->data; LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); struct spi_context *ctx = &data->ctx; lpspi_transfer_t transfer; status_t status; if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) { /* nothing left to rx or tx, we're done! */ spi_context_cs_control(&data->ctx, false); spi_context_complete(&data->ctx, dev, 0); return 0; } transfer.configFlags = kLPSPI_MasterPcsContinuous | (ctx->config->slave << LPSPI_MASTER_PCS_SHIFT); if (ctx->tx_len == 0) { /* rx only, nothing to tx */ transfer.txData = NULL; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->rx_len; } else if (ctx->rx_len == 0) { /* tx only, nothing to rx */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = NULL; transfer.dataSize = ctx->tx_len; } else if (ctx->tx_len == ctx->rx_len) { /* rx and tx are the same length */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->tx_len; } else if (ctx->tx_len > ctx->rx_len) { /* Break up the tx into multiple transfers so we don't have to * rx into a longer intermediate buffer. Leave chip select * active between transfers. */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->rx_len; } else { /* Break up the rx into multiple transfers so we don't have to * tx from a longer intermediate buffer. Leave chip select * active between transfers. */ transfer.txData = (uint8_t *) ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->tx_len; } data->transfer_len = transfer.dataSize; status = LPSPI_MasterTransferNonBlocking(base, &data->handle, &transfer); if (status != kStatus_Success) { LOG_ERR("Transfer could not start on %s: %d", dev->name, status); return status == kStatus_LPSPI_Busy ? -EBUSY : -EINVAL; } return 0; } static void spi_mcux_isr(const struct device *dev) { /* const struct spi_mcux_config *config = dev->config; */ struct spi_mcux_data *data = dev->data; LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); #if CONFIG_NXP_LP_FLEXCOMM LPSPI_MasterTransferHandleIRQ(LPSPI_GetInstance(base), &data->handle); #else LPSPI_MasterTransferHandleIRQ(base, &data->handle); #endif } #ifdef CONFIG_SPI_RTIO static void spi_mcux_iodev_complete(const struct device *dev, int status); #endif static void spi_mcux_master_transfer_callback(LPSPI_Type *base, lpspi_master_handle_t *handle, status_t status, void *userData) { struct spi_mcux_data *data = userData; #ifdef CONFIG_SPI_RTIO if (data->txn_head != NULL) { spi_mcux_iodev_complete(data->dev, status); return; } #endif spi_context_update_tx(&data->ctx, 1, data->transfer_len); spi_context_update_rx(&data->ctx, 1, data->transfer_len); spi_mcux_transfer_next_packet(data->dev); } static int spi_mcux_configure(const struct device *dev, const struct spi_config *spi_cfg) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); lpspi_master_config_t master_config; uint32_t clock_freq; uint32_t word_size; if (spi_context_configured(&data->ctx, spi_cfg)) { /* This configuration is already in use */ return 0; } if (spi_cfg->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } LPSPI_MasterGetDefaultConfig(&master_config); if (spi_cfg->slave > CHIP_SELECT_COUNT) { LOG_ERR("Slave %d is greater than %d", spi_cfg->slave, CHIP_SELECT_COUNT); return -EINVAL; } word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); if (word_size > MAX_DATA_WIDTH) { LOG_ERR("Word size %d is greater than %d", word_size, MAX_DATA_WIDTH); return -EINVAL; } master_config.bitsPerFrame = word_size; master_config.cpol = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) ? kLPSPI_ClockPolarityActiveLow : kLPSPI_ClockPolarityActiveHigh; master_config.cpha = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) ? kLPSPI_ClockPhaseSecondEdge : kLPSPI_ClockPhaseFirstEdge; master_config.direction = (spi_cfg->operation & SPI_TRANSFER_LSB) ? kLPSPI_LsbFirst : kLPSPI_MsbFirst; master_config.baudRate = spi_cfg->frequency; master_config.pcsToSckDelayInNanoSec = config->pcs_sck_delay; master_config.lastSckToPcsDelayInNanoSec = config->sck_pcs_delay; master_config.betweenTransferDelayInNanoSec = config->transfer_delay; master_config.pinCfg = config->data_pin_config; if (!device_is_ready(config->clock_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { return -EINVAL; } if (data->ctx.config != NULL) { /* Setting the baud rate in LPSPI_MasterInit requires module to be disabled. Only * disable if already configured, otherwise the clock is not enabled and the * CR register cannot be written. */ LPSPI_Enable(base, false); while ((base->CR & LPSPI_CR_MEN_MASK) != 0U) { /* Wait until LPSPI is disabled. Datasheet: * After writing 0, MEN (Module Enable) remains set until the LPSPI has * completed the current transfer and is idle. */ } } LPSPI_MasterInit(base, &master_config, clock_freq); LPSPI_MasterTransferCreateHandle(base, &data->handle, spi_mcux_master_transfer_callback, data); LPSPI_SetDummyData(base, 0); data->ctx.config = spi_cfg; return 0; } #ifdef CONFIG_SPI_MCUX_LPSPI_DMA static int spi_mcux_dma_rxtx_load(const struct device *dev, size_t *dma_size); /* This function is executed in the interrupt context */ static void spi_mcux_dma_callback(const struct device *dev, void *arg, uint32_t channel, int status) { /* arg directly holds the spi device */ const struct device *spi_dev = arg; struct spi_mcux_data *data = (struct spi_mcux_data *)spi_dev->data; if (status < 0) { LOG_ERR("DMA callback error with channel %d.", channel); data->status_flags |= SPI_MCUX_LPSPI_DMA_ERROR_FLAG; } else { /* identify the origin of this callback */ if (channel == data->dma_tx.channel) { /* this part of the transfer ends */ data->status_flags |= SPI_MCUX_LPSPI_DMA_TX_DONE_FLAG; LOG_DBG("DMA TX Block Complete"); } else if (channel == data->dma_rx.channel) { /* this part of the transfer ends */ data->status_flags |= SPI_MCUX_LPSPI_DMA_RX_DONE_FLAG; LOG_DBG("DMA RX Block Complete"); } else { LOG_ERR("DMA callback channel %d is not valid.", channel); data->status_flags |= SPI_MCUX_LPSPI_DMA_ERROR_FLAG; } } #if CONFIG_SPI_ASYNC if (data->ctx.asynchronous && ((data->status_flags & SPI_MCUX_LPSPI_DMA_DONE_FLAG) == SPI_MCUX_LPSPI_DMA_DONE_FLAG)) { /* Load dma blocks of equal length */ size_t dma_size = MIN(data->ctx.tx_len, data->ctx.rx_len); if (dma_size == 0) { dma_size = MAX(data->ctx.tx_len, data->ctx.rx_len); } spi_context_update_tx(&data->ctx, 1, dma_size); spi_context_update_rx(&data->ctx, 1, dma_size); if (data->ctx.tx_len == 0 && data->ctx.rx_len == 0) { spi_context_complete(&data->ctx, spi_dev, 0); } return; } #endif spi_context_complete(&data->ctx, spi_dev, 0); } static int spi_mcux_dma_tx_load(const struct device *dev, const uint8_t *buf, size_t len) { /* const struct spi_mcux_config *cfg = dev->config; */ struct spi_mcux_data *data = dev->data; struct dma_block_config *blk_cfg; LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); /* remember active TX DMA channel (used in callback) */ struct stream *stream = &data->dma_tx; blk_cfg = &stream->dma_blk_cfg; /* prepare the block for this TX DMA channel */ memset(blk_cfg, 0, sizeof(struct dma_block_config)); if (buf == NULL) { /* Treat the transfer as a peripheral to peripheral one, so that DMA * reads from this address each time */ blk_cfg->source_address = (uint32_t)&data->dummy_tx_buffer; stream->dma_cfg.channel_direction = PERIPHERAL_TO_PERIPHERAL; } else { /* tx direction has memory as source and periph as dest. */ blk_cfg->source_address = (uint32_t)buf; stream->dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL; } /* Enable scatter/gather */ blk_cfg->source_gather_en = 1; /* Dest is LPSPI tx fifo */ blk_cfg->dest_address = LPSPI_GetTxRegisterAddress(base); blk_cfg->block_size = len; /* Transfer 1 byte each DMA loop */ stream->dma_cfg.source_burst_length = 1; stream->dma_cfg.head_block = &stream->dma_blk_cfg; /* give the client dev as arg, as the callback comes from the dma */ stream->dma_cfg.user_data = (struct device *)dev; /* pass our client origin to the dma: data->dma_tx.dma_channel */ return dma_config(data->dma_tx.dma_dev, data->dma_tx.channel, &stream->dma_cfg); } static int spi_mcux_dma_rx_load(const struct device *dev, uint8_t *buf, size_t len) { /*const struct spi_mcux_config *cfg = dev->config; */ struct spi_mcux_data *data = dev->data; struct dma_block_config *blk_cfg; LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); /* retrieve active RX DMA channel (used in callback) */ struct stream *stream = &data->dma_rx; blk_cfg = &stream->dma_blk_cfg; /* prepare the block for this RX DMA channel */ memset(blk_cfg, 0, sizeof(struct dma_block_config)); if (buf == NULL) { /* Treat the transfer as a peripheral to peripheral one, so that DMA * reads from this address each time */ blk_cfg->dest_address = (uint32_t)&data->dummy_rx_buffer; stream->dma_cfg.channel_direction = PERIPHERAL_TO_PERIPHERAL; } else { /* rx direction has periph as source and mem as dest. */ blk_cfg->dest_address = (uint32_t)buf; stream->dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY; } blk_cfg->block_size = len; /* Enable scatter/gather */ blk_cfg->dest_scatter_en = 1; /* Source is LPSPI rx fifo */ blk_cfg->source_address = LPSPI_GetRxRegisterAddress(base); stream->dma_cfg.source_burst_length = 1; stream->dma_cfg.head_block = blk_cfg; stream->dma_cfg.user_data = (struct device *)dev; /* pass our client origin to the dma: data->dma_rx.channel */ return dma_config(data->dma_rx.dma_dev, data->dma_rx.channel, &stream->dma_cfg); } static int wait_dma_rx_tx_done(const struct device *dev) { struct spi_mcux_data *data = dev->data; int ret = -1; while (1) { ret = spi_context_wait_for_completion(&data->ctx); if (ret) { LOG_DBG("Timed out waiting for SPI context to complete"); return ret; } if (data->status_flags & SPI_MCUX_LPSPI_DMA_ERROR_FLAG) { return -EIO; } if ((data->status_flags & SPI_MCUX_LPSPI_DMA_DONE_FLAG) == SPI_MCUX_LPSPI_DMA_DONE_FLAG) { LOG_DBG("DMA block completed"); return 0; } } } static inline int spi_mcux_dma_rxtx_load(const struct device *dev, size_t *dma_size) { struct spi_mcux_data *lpspi_data = dev->data; int ret = 0; /* Clear status flags */ lpspi_data->status_flags = 0U; /* Load dma blocks of equal length */ *dma_size = MIN(lpspi_data->ctx.tx_len, lpspi_data->ctx.rx_len); if (*dma_size == 0) { *dma_size = MAX(lpspi_data->ctx.tx_len, lpspi_data->ctx.rx_len); } ret = spi_mcux_dma_tx_load(dev, lpspi_data->ctx.tx_buf, *dma_size); if (ret != 0) { return ret; } ret = spi_mcux_dma_rx_load(dev, lpspi_data->ctx.rx_buf, *dma_size); if (ret != 0) { return ret; } /* Start DMA */ ret = dma_start(lpspi_data->dma_tx.dma_dev, lpspi_data->dma_tx.channel); if (ret != 0) { return ret; } ret = dma_start(lpspi_data->dma_rx.dma_dev, lpspi_data->dma_rx.channel); return ret; } static int transceive_dma(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { /* const struct spi_mcux_config *config = dev->config; */ struct spi_mcux_data *data = dev->data; LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); int ret; size_t dma_size; if (!asynchronous) { spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg); } ret = spi_mcux_configure(dev, spi_cfg); if (ret) { if (!asynchronous) { spi_context_release(&data->ctx, ret); } return ret; } /* DMA is fast enough watermarks are not required */ LPSPI_SetFifoWatermarks(base, 0U, 0U); if (!asynchronous) { spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); spi_context_cs_control(&data->ctx, true); /* Send each spi buf via DMA, updating context as DMA completes */ while (data->ctx.rx_len > 0 || data->ctx.tx_len > 0) { /* Load dma block */ ret = spi_mcux_dma_rxtx_load(dev, &dma_size); if (ret != 0) { goto out; } /* Enable DMA Requests */ LPSPI_EnableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable); /* Wait for DMA to finish */ ret = wait_dma_rx_tx_done(dev); if (ret != 0) { goto out; } while ((LPSPI_GetStatusFlags(base) & kLPSPI_ModuleBusyFlag)) { /* wait until module is idle */ } /* Disable DMA */ LPSPI_DisableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable); /* Update SPI contexts with amount of data we just sent */ spi_context_update_tx(&data->ctx, 1, dma_size); spi_context_update_rx(&data->ctx, 1, dma_size); } spi_context_cs_control(&data->ctx, false); out: spi_context_release(&data->ctx, ret); } #if CONFIG_SPI_ASYNC else { data->ctx.asynchronous = asynchronous; data->ctx.callback = cb; data->ctx.callback_data = userdata; ret = spi_mcux_dma_rxtx_load(dev, &dma_size); if (ret != 0) { goto out; } /* Enable DMA Requests */ LPSPI_EnableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable); } #endif return ret; } #endif static int transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { struct spi_mcux_data *data = dev->data; int ret; spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg); ret = spi_mcux_configure(dev, spi_cfg); if (ret) { goto out; } spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); spi_context_cs_control(&data->ctx, true); ret = spi_mcux_transfer_next_packet(dev); if (ret) { goto out; } ret = spi_context_wait_for_completion(&data->ctx); out: spi_context_release(&data->ctx, ret); return ret; } static int spi_mcux_transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { #ifdef CONFIG_SPI_MCUX_LPSPI_DMA const struct spi_mcux_data *data = dev->data; if (data->dma_rx.dma_dev && data->dma_tx.dma_dev) { return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #endif /* CONFIG_SPI_MCUX_LPSPI_DMA */ return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_mcux_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { #ifdef CONFIG_SPI_MCUX_LPSPI_DMA struct spi_mcux_data *data = dev->data; if (data->dma_rx.dma_dev && data->dma_tx.dma_dev) { spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); } return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); #else return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); #endif /* CONFIG_SPI_MCUX_LPSPI_DMA */ } #endif /* CONFIG_SPI_ASYNC */ static int spi_mcux_release(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_mcux_data *data = dev->data; spi_context_unlock_unconditionally(&data->ctx); return 0; } static int spi_mcux_init(const struct device *dev) { int err; const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; DEVICE_MMIO_NAMED_MAP(dev, reg_base, K_MEM_CACHE_NONE | K_MEM_DIRECT_MAP); #if CONFIG_NXP_LP_FLEXCOMM /* When using LP Flexcomm driver, register the interrupt handler * so we receive notification from the LP Flexcomm interrupt handler. */ nxp_lp_flexcomm_setirqhandler(config->parent_dev, dev, LP_FLEXCOMM_PERIPH_LPSPI, spi_mcux_isr); #else /* Interrupt is managed by this driver */ config->irq_config_func(dev); #endif err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } spi_context_unlock_unconditionally(&data->ctx); data->dev = dev; #ifdef CONFIG_SPI_MCUX_LPSPI_DMA if (data->dma_tx.dma_dev && data->dma_rx.dma_dev) { if (!device_is_ready(data->dma_tx.dma_dev)) { LOG_ERR("%s device is not ready", data->dma_tx.dma_dev->name); return -ENODEV; } if (!device_is_ready(data->dma_rx.dma_dev)) { LOG_ERR("%s device is not ready", data->dma_rx.dma_dev->name); return -ENODEV; } } #endif /* CONFIG_SPI_MCUX_LPSPI_DMA */ #ifdef CONFIG_SPI_RTIO data->dt_spec.bus = dev; data->iodev.api = &spi_iodev_api; data->iodev.data = &data->dt_spec; mpsc_init(&data->io_q); #endif err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } spi_context_unlock_unconditionally(&data->ctx); return 0; } #ifdef CONFIG_SPI_RTIO static inline k_spinlock_key_t spi_spin_lock(const struct device *dev) { struct spi_mcux_data *data = dev->data; return k_spin_lock(&data->lock); } static inline void spi_spin_unlock(const struct device *dev, k_spinlock_key_t key) { struct spi_mcux_data *data = dev->data; k_spin_unlock(&data->lock, key); } static void spi_mcux_iodev_next(const struct device *dev, bool completion); static void spi_mcux_iodev_start(const struct device *dev) { /* const struct spi_mcux_config *config = dev->config; */ struct spi_mcux_data *data = dev->data; struct rtio_sqe *sqe = &data->txn_curr->sqe; struct spi_dt_spec *spi_dt_spec = sqe->iodev->data; struct spi_config *spi_cfg = &spi_dt_spec->config; struct rtio_iodev_sqe *txn_head = data->txn_head; LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); lpspi_transfer_t transfer; status_t status; transfer.configFlags = kLPSPI_MasterPcsContinuous | (spi_cfg->slave << LPSPI_MASTER_PCS_SHIFT); switch (sqe->op) { case RTIO_OP_RX: transfer.txData = NULL; transfer.rxData = sqe->rx.buf; transfer.dataSize = sqe->rx.buf_len; break; case RTIO_OP_TX: transfer.rxData = NULL; transfer.txData = sqe->tx.buf; transfer.dataSize = sqe->tx.buf_len; break; case RTIO_OP_TINY_TX: transfer.rxData = NULL; transfer.txData = sqe->tiny_tx.buf; transfer.dataSize = sqe->tiny_tx.buf_len; break; case RTIO_OP_TXRX: transfer.txData = sqe->txrx.tx_buf; transfer.rxData = sqe->txrx.rx_buf; transfer.dataSize = sqe->txrx.buf_len; break; default: LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe); spi_mcux_iodev_next(dev, true); rtio_iodev_sqe_err(txn_head, -EINVAL); spi_mcux_iodev_complete(dev, 0); return; } data->transfer_len = transfer.dataSize; k_spinlock_key_t key = spi_spin_lock(dev); status = LPSPI_MasterTransferNonBlocking(base, &data->handle, &transfer); spi_spin_unlock(dev, key); if (status != kStatus_Success) { LOG_ERR("Transfer could not start"); rtio_iodev_sqe_err(txn_head, -EIO); } } static void spi_mcux_iodev_next(const struct device *dev, bool completion) { struct spi_mcux_data *data = dev->data; k_spinlock_key_t key = spi_spin_lock(dev); if (!completion && data->txn_curr != NULL) { spi_spin_unlock(dev, key); return; } struct mpsc_node *next = mpsc_pop(&data->io_q); if (next != NULL) { struct rtio_iodev_sqe *next_sqe = CONTAINER_OF(next, struct rtio_iodev_sqe, q); data->txn_head = next_sqe; data->txn_curr = next_sqe; } else { data->txn_head = NULL; data->txn_curr = NULL; } spi_spin_unlock(dev, key); if (data->txn_curr != NULL) { struct spi_dt_spec *spi_dt_spec = data->txn_curr->sqe.iodev->data; struct spi_config *spi_cfg = &spi_dt_spec->config; spi_mcux_configure(dev, spi_cfg); spi_context_cs_control(&data->ctx, true); spi_mcux_iodev_start(dev); } } static void spi_mcux_iodev_submit(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe) { struct spi_mcux_data *data = dev->data; mpsc_push(&data->io_q, &iodev_sqe->q); spi_mcux_iodev_next(dev, false); } static void spi_mcux_iodev_complete(const struct device *dev, int status) { struct spi_mcux_data *data = dev->data; if (data->txn_curr->sqe.flags & RTIO_SQE_TRANSACTION) { data->txn_curr = rtio_txn_next(data->txn_curr); spi_mcux_iodev_start(dev); } else { struct rtio_iodev_sqe *txn_head = data->txn_head; spi_context_cs_control(&data->ctx, false); spi_mcux_iodev_next(dev, true); rtio_iodev_sqe_ok(txn_head, status); } } #endif static const struct spi_driver_api spi_mcux_driver_api = { .transceive = spi_mcux_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_mcux_transceive_async, #endif #ifdef CONFIG_SPI_RTIO .iodev_submit = spi_mcux_iodev_submit, #endif .release = spi_mcux_release, }; #define SPI_MCUX_RTIO_DEFINE(n) RTIO_DEFINE(spi_mcux_rtio_##n, CONFIG_SPI_MCUX_RTIO_SQ_SIZE, \ CONFIG_SPI_MCUX_RTIO_SQ_SIZE) #ifdef CONFIG_SPI_MCUX_LPSPI_DMA #define SPI_DMA_CHANNELS(n) \ IF_ENABLED(DT_INST_DMAS_HAS_NAME(n, tx), \ ( \ .dma_tx = { \ .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \ .channel = \ DT_INST_DMAS_CELL_BY_NAME(n, tx, mux), \ .dma_cfg = { \ .channel_direction = MEMORY_TO_PERIPHERAL, \ .dma_callback = spi_mcux_dma_callback, \ .source_data_size = 1, \ .dest_data_size = 1, \ .block_count = 1, \ .dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, tx, source) \ } \ }, \ )) \ IF_ENABLED(DT_INST_DMAS_HAS_NAME(n, rx), \ ( \ .dma_rx = { \ .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, rx)), \ .channel = \ DT_INST_DMAS_CELL_BY_NAME(n, rx, mux), \ .dma_cfg = { \ .channel_direction = PERIPHERAL_TO_MEMORY, \ .dma_callback = spi_mcux_dma_callback, \ .source_data_size = 1, \ .dest_data_size = 1, \ .block_count = 1, \ .dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, rx, source) \ } \ }, \ )) #else #define SPI_DMA_CHANNELS(n) #endif /* CONFIG_SPI_MCUX_LPSPI_DMA */ #define SPI_MCUX_LPSPI_MODULE_IRQ_CONNECT(n) \ do { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ spi_mcux_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } while (false) #define SPI_MCUX_LPSPI_MODULE_IRQ(n) \ IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 0), \ (SPI_MCUX_LPSPI_MODULE_IRQ_CONNECT(n))) #ifdef CONFIG_NXP_LP_FLEXCOMM #define PARENT_DEV(n) \ .parent_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), #else #define PARENT_DEV(n) #endif /* CONFIG_NXP_LP_FLEXCOMM */ #define SPI_MCUX_LPSPI_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ COND_CODE_1(CONFIG_SPI_RTIO, (SPI_MCUX_RTIO_DEFINE(n)), ()); \ \ static void spi_mcux_config_func_##n(const struct device *dev); \ \ static const struct spi_mcux_config spi_mcux_config_##n = { \ DEVICE_MMIO_NAMED_ROM_INIT(reg_base, DT_DRV_INST(n)), \ PARENT_DEV(n) \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \ .irq_config_func = spi_mcux_config_func_##n, \ .pcs_sck_delay = UTIL_AND( \ DT_INST_NODE_HAS_PROP(n, pcs_sck_delay), \ DT_INST_PROP(n, pcs_sck_delay)), \ .sck_pcs_delay = UTIL_AND( \ DT_INST_NODE_HAS_PROP(n, sck_pcs_delay), \ DT_INST_PROP(n, sck_pcs_delay)), \ .transfer_delay = UTIL_AND( \ DT_INST_NODE_HAS_PROP(n, transfer_delay), \ DT_INST_PROP(n, transfer_delay)), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .data_pin_config = DT_INST_ENUM_IDX(n, data_pin_config),\ }; \ \ static struct spi_mcux_data spi_mcux_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ SPI_DMA_CHANNELS(n) \ IF_ENABLED(CONFIG_SPI_RTIO, \ (.r = &spi_mcux_rtio_##n,)) \ \ }; \ \ DEVICE_DT_INST_DEFINE(n, spi_mcux_init, NULL, \ &spi_mcux_data_##n, \ &spi_mcux_config_##n, POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_mcux_driver_api); \ \ static void spi_mcux_config_func_##n(const struct device *dev) \ { \ SPI_MCUX_LPSPI_MODULE_IRQ(n); \ } DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_LPSPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_mcux_lpspi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,195
```unknown # Infineon CAT1 SPI configuration options config SPI_INFINEON_CAT1 bool "Infineon CAT1 SPI driver" default y depends on DT_HAS_INFINEON_CAT1_SPI_ENABLED select USE_INFINEON_SPI help This option enables the SPI driver for Infineon CAT1 family. ```
/content/code_sandbox/drivers/spi/Kconfig.ifx_cat1
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
69
```c /* * */ #define DT_DRV_COMPAT opencores_spi_simple #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_oc_simple); #include <zephyr/sys/sys_io.h> #include <zephyr/drivers/spi.h> #include "spi_context.h" #include "spi_oc_simple.h" /* Bit 5:4 == ESPR, Bit 1:0 == SPR */ uint8_t DIVIDERS[] = { 0x00, /* 2 */ 0x01, /* 4 */ 0x10, /* 8 */ 0x02, /* 16 */ 0x03, /* 32 */ 0x11, /* 64 */ 0x12, /* 128 */ 0x13, /* 256 */ 0x20, /* 512 */ 0x21, /* 1024 */ 0x22, /* 2048 */ 0x23 }; /* 4096 */ static int spi_oc_simple_configure(const struct spi_oc_simple_cfg *info, struct spi_oc_simple_data *spi, const struct spi_config *config) { uint8_t spcr = 0U; int i; if (spi_context_configured(&spi->ctx, config)) { /* Nothing to do */ return 0; } if (config->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } /* Simple SPI only supports master mode */ if (spi_context_is_slave(&spi->ctx)) { LOG_ERR("Slave mode not supported"); return -ENOTSUP; } if ((config->operation & (SPI_MODE_LOOP | SPI_TRANSFER_LSB)) || (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) && (config->operation & (SPI_LINES_DUAL | SPI_LINES_QUAD | SPI_LINES_OCTAL)))) { LOG_ERR("Unsupported configuration"); return -EINVAL; } /* SPI mode */ if (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) { spcr |= SPI_OC_SIMPLE_SPCR_CPOL; } if (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) { spcr |= SPI_OC_SIMPLE_SPCR_CPHA; } /* Set clock divider */ for (i = 0; i < 12; i++) { if ((config->frequency << (i + 1)) > CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) { break; } } sys_write8((DIVIDERS[i] >> 4) & 0x3, SPI_OC_SIMPLE_SPER(info)); spcr |= (DIVIDERS[i] & 0x3); /* Configure and Enable SPI controller */ sys_write8(spcr | SPI_OC_SIMPLE_SPCR_SPE, SPI_OC_SIMPLE_SPCR(info)); spi->ctx.config = config; return 0; } int spi_oc_simple_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { const struct spi_oc_simple_cfg *info = dev->config; struct spi_oc_simple_data *spi = SPI_OC_SIMPLE_DATA(dev); struct spi_context *ctx = &spi->ctx; uint8_t rx_byte; size_t i; size_t cur_xfer_len; int rc; /* Lock the SPI Context */ spi_context_lock(ctx, false, NULL, NULL, config); spi_oc_simple_configure(info, spi, config); /* Set chip select */ if (spi_cs_is_gpio(config)) { spi_context_cs_control(&spi->ctx, true); } else { sys_write8(1 << config->slave, SPI_OC_SIMPLE_SPSS(info)); } spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1); while (spi_context_tx_buf_on(ctx) || spi_context_rx_buf_on(ctx)) { cur_xfer_len = spi_context_longest_current_buf(ctx); for (i = 0; i < cur_xfer_len; i++) { /* Write byte */ if (spi_context_tx_buf_on(ctx)) { sys_write8(*ctx->tx_buf, SPI_OC_SIMPLE_SPDR(info)); spi_context_update_tx(ctx, 1, 1); } else { sys_write8(0, SPI_OC_SIMPLE_SPDR(info)); } /* Wait for rx FIFO empty flag to clear */ while (sys_read8(SPI_OC_SIMPLE_SPSR(info)) & 0x1) { } /* Get received byte */ rx_byte = sys_read8(SPI_OC_SIMPLE_SPDR(info)); /* Store received byte if rx buffer is on */ if (spi_context_rx_on(ctx)) { *ctx->rx_buf = rx_byte; spi_context_update_rx(ctx, 1, 1); } } } /* Clear chip-select */ if (spi_cs_is_gpio(config)) { spi_context_cs_control(&spi->ctx, false); } else { sys_write8(0 << config->slave, SPI_OC_SIMPLE_SPSS(info)); } spi_context_complete(ctx, dev, 0); rc = spi_context_wait_for_completion(ctx); spi_context_release(ctx, rc); return rc; } #ifdef CONFIG_SPI_ASYNC static int spi_oc_simple_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, struct k_poll_signal *async) { return -ENOTSUP; } #endif /* CONFIG_SPI_ASYNC */ int spi_oc_simple_release(const struct device *dev, const struct spi_config *config) { spi_context_unlock_unconditionally(&SPI_OC_SIMPLE_DATA(dev)->ctx); return 0; } static const struct spi_driver_api spi_oc_simple_api = { .transceive = spi_oc_simple_transceive, .release = spi_oc_simple_release, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_oc_simple_transceive_async, #endif /* CONFIG_SPI_ASYNC */ }; int spi_oc_simple_init(const struct device *dev) { int err; const struct spi_oc_simple_cfg *info = dev->config; struct spi_oc_simple_data *data = dev->data; /* Clear chip selects */ sys_write8(0, SPI_OC_SIMPLE_SPSS(info)); err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { return err; } /* Make sure the context is unlocked */ spi_context_unlock_unconditionally(&SPI_OC_SIMPLE_DATA(dev)->ctx); /* Initial clock stucks high, so add this workaround */ sys_write8(SPI_OC_SIMPLE_SPCR_SPE, SPI_OC_SIMPLE_SPCR(info)); sys_write8(0, SPI_OC_SIMPLE_SPDR(info)); while (sys_read8(SPI_OC_SIMPLE_SPSR(info)) & 0x1) { } sys_read8(SPI_OC_SIMPLE_SPDR(info)); return 0; } #define SPI_OC_INIT(inst) \ static struct spi_oc_simple_cfg spi_oc_simple_cfg_##inst = { \ .base = DT_INST_REG_ADDR_BY_NAME(inst, control), \ }; \ \ static struct spi_oc_simple_data spi_oc_simple_data_##inst = { \ SPI_CONTEXT_INIT_LOCK(spi_oc_simple_data_##inst, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_oc_simple_data_##inst, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(inst), ctx) \ }; \ \ DEVICE_DT_INST_DEFINE(inst, \ spi_oc_simple_init, \ NULL, \ &spi_oc_simple_data_##inst, \ &spi_oc_simple_cfg_##inst, \ POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_oc_simple_api); DT_INST_FOREACH_STATUS_OKAY(SPI_OC_INIT) ```
/content/code_sandbox/drivers/spi/spi_oc_simple.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,757
```c /* */ #define DT_DRV_COMPAT intel_penwell_spi #include <errno.h> #include <stdint.h> #include <stdbool.h> #include <zephyr/kernel.h> #include <zephyr/drivers/spi.h> #if DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie) BUILD_ASSERT(IS_ENABLED(CONFIG_PCIE), "DT need CONFIG_PCIE"); #include <zephyr/drivers/pcie/pcie.h> #endif #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_pw, CONFIG_SPI_LOG_LEVEL); #include "spi_pw.h" static uint32_t spi_pw_reg_read(const struct device *dev, uint32_t offset) { return sys_read32(DEVICE_MMIO_GET(dev) + offset); } static void spi_pw_reg_write(const struct device *dev, uint32_t offset, uint32_t val) { return sys_write32(val, DEVICE_MMIO_GET(dev) + offset); } static void spi_pw_ssp_reset(const struct device *dev) { /* Bring the controller from reset state in to operational mode */ spi_pw_reg_write(dev, PW_SPI_REG_RESETS, 0x00); spi_pw_reg_write(dev, PW_SPI_REG_RESETS, PW_SPI_INST_RESET); } #ifndef CONFIG_SPI_PW_INTERRUPT static bool is_spi_transfer_ongoing(struct spi_pw_data *spi) { return spi_context_tx_on(&spi->ctx) || spi_context_rx_on(&spi->ctx); } #endif static void spi_pw_enable_cs_hw_ctrl(const struct device *dev) { uint32_t cs_ctrl; cs_ctrl = spi_pw_reg_read(dev, PW_SPI_REG_CS_CTRL); cs_ctrl &= PW_SPI_CS_CTRL_HW_MODE; spi_pw_reg_write(dev, PW_SPI_REG_CS_CTRL, cs_ctrl); } static void spi_pw_cs_sw_ctrl(const struct device *dev, bool enable) { uint32_t cs_ctrl; cs_ctrl = spi_pw_reg_read(dev, PW_SPI_REG_CS_CTRL); cs_ctrl &= ~(PW_SPI_CS_CTRL_CS_MASK); /* Enable chip select software control method */ cs_ctrl |= PW_SPI_CS_CTRL_SW_MODE; if (enable) { cs_ctrl &= PW_SPI_CS_LOW; } else { cs_ctrl |= PW_SPI_CS_HIGH; } spi_pw_reg_write(dev, PW_SPI_REG_CS_CTRL, cs_ctrl); } #ifdef CONFIG_SPI_PW_INTERRUPT static void spi_pw_intr_enable(const struct device *dev, bool rx_mask) { uint32_t ctrlr1; ctrlr1 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR1); if (rx_mask) { ctrlr1 |= PW_SPI_INTR_BITS; } else { ctrlr1 |= PW_SPI_INTR_BITS; ctrlr1 &= ~(PW_SPI_INTR_MASK_RX); } spi_pw_reg_write(dev, PW_SPI_REG_CTRLR1, ctrlr1); } static void spi_pw_intr_disable(const struct device *dev) { uint32_t ctrlr1; ctrlr1 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR1); ctrlr1 &= ~(PW_SPI_INTR_BITS); spi_pw_reg_write(dev, PW_SPI_REG_CTRLR1, ctrlr1); } #endif static void spi_pw_ssp_enable(const struct device *dev) { uint32_t ctrlr0; ctrlr0 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR0); ctrlr0 |= PW_SPI_CTRLR0_SSE_BIT; spi_pw_reg_write(dev, PW_SPI_REG_CTRLR0, ctrlr0); } static void spi_pw_ssp_disable(const struct device *dev) { uint32_t ctrlr0; ctrlr0 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR0); ctrlr0 &= ~(PW_SPI_CTRLR0_SSE_BIT); spi_pw_reg_write(dev, PW_SPI_REG_CTRLR0, ctrlr0); } static bool is_pw_ssp_busy(const struct device *dev) { uint32_t status; status = spi_pw_reg_read(dev, PW_SPI_REG_SSSR); return (status & PW_SPI_SSSR_BSY_BIT) ? true : false; } static uint8_t spi_pw_get_frame_size(const struct spi_config *config) { uint8_t dfs = SPI_WORD_SIZE_GET(config->operation); dfs /= PW_SPI_WIDTH_8BITS; if ((dfs == 0) || (dfs > PW_SPI_FRAME_SIZE_4_BYTES)) { LOG_WRN("Unsupported dfs, 1-byte size will be used"); dfs = PW_SPI_FRAME_SIZE_1_BYTE; } return dfs; } void spi_pw_cs_ctrl_enable(const struct device *dev, bool enable) { struct spi_pw_data *spi = dev->data; if (enable == true) { if (spi->cs_mode == CS_SW_MODE) { spi_pw_cs_sw_ctrl(dev, true); } else if (spi->cs_mode == CS_GPIO_MODE) { spi_context_cs_control(&spi->ctx, true); } } else { if (spi->cs_mode == CS_SW_MODE) { spi_pw_cs_sw_ctrl(dev, false); } else if (spi->cs_mode == CS_GPIO_MODE) { spi_context_cs_control(&spi->ctx, false); } } } static void spi_pw_cs_ctrl_init(const struct device *dev) { uint32_t cs_ctrl; struct spi_pw_data *spi = dev->data; /* Enable chip select output CS0/CS1 */ cs_ctrl = spi_pw_reg_read(dev, PW_SPI_REG_CS_CTRL); if (spi->cs_output == PW_SPI_CS1_OUTPUT_SELECT) { cs_ctrl &= ~(PW_SPI_CS_CTRL_CS_MASK << PW_SPI_CS_EN_SHIFT); /* Set chip select CS1 */ cs_ctrl |= PW_SPI_CS1_SELECT; } else { /* Set chip select CS0 */ cs_ctrl &= ~(PW_SPI_CS_CTRL_CS_MASK << PW_SPI_CS_EN_SHIFT); } spi_pw_reg_write(dev, PW_SPI_REG_CS_CTRL, cs_ctrl); if (spi->cs_mode == CS_HW_MODE) { spi_pw_enable_cs_hw_ctrl(dev); } else if (spi->cs_mode == CS_SW_MODE) { spi_pw_cs_sw_ctrl(dev, false); } else if (spi->cs_mode == CS_GPIO_MODE) { spi_pw_cs_sw_ctrl(dev, false); } } static void spi_pw_tx_thld_set(const struct device *dev) { uint32_t reg_data; /* Tx threshold */ reg_data = spi_pw_reg_read(dev, PW_SPI_REG_SITF); /* mask high water mark bits in tx fifo reg */ reg_data &= ~(PW_SPI_WM_MASK); /* mask low water mark bits in tx fifo reg */ reg_data &= ~(PW_SPI_WM_MASK << PW_SPI_SITF_LWMTF_SHIFT); reg_data |= (PW_SPI_SITF_HIGH_WM_DFLT | PW_SPI_SITF_LOW_WM_DFLT); spi_pw_reg_write(dev, PW_SPI_REG_SITF, reg_data); } static void spi_pw_rx_thld_set(const struct device *dev, struct spi_pw_data *spi) { uint32_t reg_data; /* Rx threshold */ reg_data = spi_pw_reg_read(dev, PW_SPI_REG_SIRF); reg_data &= (uint32_t) ~(PW_SPI_WM_MASK); reg_data |= PW_SPI_SIRF_WM_DFLT; if (spi->ctx.rx_len && spi->ctx.rx_len < spi->fifo_depth) { reg_data = spi->ctx.rx_len - 1; } spi_pw_reg_write(dev, PW_SPI_REG_SIRF, reg_data); } static int spi_pw_set_data_size(const struct device *dev, const struct spi_config *config) { uint32_t ctrlr0; ctrlr0 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR0); /* Full duplex mode */ ctrlr0 &= ~(PW_SPI_CTRLR0_MOD_BIT); ctrlr0 &= PW_SPI_CTRLR0_DATA_MASK; ctrlr0 &= PW_SPI_CTRLR0_EDSS_MASK; /* Set the word size */ if (SPI_WORD_SIZE_GET(config->operation) == 4) { ctrlr0 |= PW_SPI_DATA_SIZE_4_BIT; } else if (SPI_WORD_SIZE_GET(config->operation) == 8) { ctrlr0 |= PW_SPI_DATA_SIZE_8_BIT; } else if (SPI_WORD_SIZE_GET(config->operation) == 16) { ctrlr0 |= PW_SPI_DATA_SIZE_16_BIT; } else if (SPI_WORD_SIZE_GET(config->operation) == 32) { ctrlr0 |= PW_SPI_DATA_SIZE_32_BIT; } else { LOG_ERR("Invalid word size"); return -ENOTSUP; } spi_pw_reg_write(dev, PW_SPI_REG_CTRLR0, ctrlr0); return 0; } static void spi_pw_config_phase_polarity(const struct device *dev, const struct spi_config *config) { uint8_t mode; uint32_t ctrlr1; ctrlr1 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR1); mode = (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) | (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA); LOG_DBG("mode: 0x%x", (mode >> 1)); switch (mode >> 1) { case SPI_PW_MODE0: ctrlr1 &= ~(PW_SPI_CTRL1_SPO_SPH_MASK); ctrlr1 &= ~(PW_SPI_CTRL1_SPO_BIT); ctrlr1 &= ~(PW_SPI_CTRL1_SPH_BIT); break; case SPI_PW_MODE1: ctrlr1 &= ~(PW_SPI_CTRL1_SPO_SPH_MASK); ctrlr1 |= PW_SPI_CTRL1_SPO_BIT; ctrlr1 &= ~(PW_SPI_CTRL1_SPH_BIT); break; case SPI_PW_MODE2: ctrlr1 &= ~(PW_SPI_CTRL1_SPO_SPH_MASK); ctrlr1 &= ~(PW_SPI_CTRL1_SPO_BIT); ctrlr1 |= PW_SPI_CTRL1_SPH_BIT; break; case SPI_PW_MODE3: ctrlr1 |= PW_SPI_CTRL1_SPO_BIT; ctrlr1 |= PW_SPI_CTRL1_SPH_BIT; break; } /* Set Polarity & Phase */ spi_pw_reg_write(dev, PW_SPI_REG_CTRLR1, ctrlr1); } static void spi_pw_enable_clk(const struct device *dev) { uint32_t clks; /*Update M:N value & enable clock */ clks = spi_pw_reg_read(dev, PW_SPI_REG_CLKS); clks &= ~(PW_SPI_CLKS_MVAL_MASK); clks &= ~(PW_SPI_CLKS_NVAL_MASK); clks |= (PW_SPI_CLKS_MVAL | PW_SPI_CLKS_NVAL | PW_SPI_CLKS_EN_BIT | PW_SPI_CLKS_UPDATE_BIT); spi_pw_reg_write(dev, PW_SPI_REG_CLKS, clks); } static void spi_pw_config_clk(const struct device *dev, const struct spi_pw_config *info, const struct spi_config *config) { uint32_t ctrlr0, scr; /* Update scr control bits */ if (!config->frequency) { scr = PW_SPI_BR_2MHZ; } else if (config->frequency > PW_SPI_BR_MAX_FRQ) { scr = (info->clock_freq / PW_SPI_BR_MAX_FRQ) - 1; } else { scr = (info->clock_freq / config->frequency) - 1; } ctrlr0 = spi_pw_reg_read(dev, PW_SPI_REG_CTRLR0); ctrlr0 &= ~(PW_SPI_SCR_MASK); ctrlr0 |= (scr << PW_SPI_SCR_SHIFT); spi_pw_reg_write(dev, PW_SPI_REG_CTRLR0, ctrlr0); } static void spi_pw_completed(const struct device *dev, int err) { struct spi_pw_data *spi = dev->data; if (!err && (spi_context_tx_on(&spi->ctx) || spi_context_rx_on(&spi->ctx))) { return; } /* need to give time for FIFOs to drain before issuing more commands */ while (is_pw_ssp_busy(dev)) { } #ifdef CONFIG_SPI_PW_INTERRUPT /* Disabling interrupts */ spi_pw_intr_disable(dev); #endif /* Disabling the controller operation, which also clear's all status bits * in status register */ spi_pw_ssp_disable(dev); spi_pw_cs_ctrl_enable(dev, false); LOG_DBG("SPI transaction completed %s error\n", err ? "with" : "without"); spi_context_complete(&spi->ctx, dev, err); } static void spi_pw_clear_intr(const struct device *dev) { uint32_t sssr; sssr = spi_pw_reg_read(dev, PW_SPI_REG_SSSR); sssr &= ~(PW_SPI_INTR_ERRORS_MASK); spi_pw_reg_write(dev, PW_SPI_REG_SSSR, sssr); } static int spi_pw_get_tx_fifo_level(const struct device *dev) { uint32_t tx_fifo_level; tx_fifo_level = spi_pw_reg_read(dev, PW_SPI_REG_SITF); tx_fifo_level = ((tx_fifo_level & PW_SPI_SITF_SITFL_MASK) >> PW_SPI_SITF_SITFL_SHIFT); return tx_fifo_level; } static int spi_pw_get_rx_fifo_level(const struct device *dev) { uint32_t rx_fifo_level; rx_fifo_level = spi_pw_reg_read(dev, PW_SPI_REG_SIRF); rx_fifo_level = ((rx_fifo_level & PW_SPI_SIRF_SIRFL_MASK) >> PW_SPI_SIRF_SIRFL_SHIFT); return rx_fifo_level; } static void spi_pw_reset_tx_fifo_level(const struct device *dev) { uint32_t tx_fifo_level; tx_fifo_level = spi_pw_reg_read(dev, PW_SPI_REG_SITF); tx_fifo_level &= ~(PW_SPI_SITF_SITFL_MASK); spi_pw_reg_write(dev, PW_SPI_REG_SITF, tx_fifo_level); } static void spi_pw_update_rx_fifo_level(uint32_t len, const struct device *dev) { uint32_t rx_fifo_level; rx_fifo_level = spi_pw_reg_read(dev, PW_SPI_REG_SIRF); rx_fifo_level &= ~(PW_SPI_SIRF_SIRFL_MASK); rx_fifo_level |= (len << PW_SPI_SIRF_SIRFL_SHIFT); spi_pw_reg_write(dev, PW_SPI_REG_SIRF, rx_fifo_level); } static void spi_pw_tx_data(const struct device *dev) { struct spi_pw_data *spi = dev->data; uint32_t data = 0U; int32_t fifo_len; if (spi_context_rx_on(&spi->ctx)) { fifo_len = spi->fifo_depth - spi_pw_get_tx_fifo_level(dev) - spi_pw_get_rx_fifo_level(dev); if (fifo_len < 0) { fifo_len = 0U; } } else { fifo_len = spi->fifo_depth - spi_pw_get_tx_fifo_level(dev); } while (fifo_len > 0) { if (spi_context_tx_buf_on(&spi->ctx)) { switch (spi->dfs) { case 1: data = UNALIGNED_GET((uint8_t *) (spi->ctx.tx_buf)); break; case 2: data = UNALIGNED_GET((uint16_t *) (spi->ctx.tx_buf)); break; case 4: data = UNALIGNED_GET((uint32_t *) (spi->ctx.tx_buf)); break; } } else if (spi_context_rx_on(&spi->ctx)) { if ((int)(spi->ctx.rx_len - spi->fifo_diff) <= 0) { break; } data = 0U; } else if (spi_context_tx_on(&spi->ctx)) { data = 0U; } else { break; } spi_pw_reg_write(dev, PW_SPI_REG_SSDR, data); spi_context_update_tx(&spi->ctx, spi->dfs, 1); spi->fifo_diff++; fifo_len--; } if (!spi_context_tx_on(&spi->ctx)) { spi_pw_reset_tx_fifo_level(dev); } } static void spi_pw_rx_data(const struct device *dev) { struct spi_pw_data *spi = dev->data; while (spi_pw_get_rx_fifo_level(dev)) { uint32_t data = spi_pw_reg_read(dev, PW_SPI_REG_SSDR); if (spi_context_rx_buf_on(&spi->ctx)) { switch (spi->dfs) { case 1: UNALIGNED_PUT(data, (uint8_t *)spi->ctx.rx_buf); break; case 2: UNALIGNED_PUT(data, (uint16_t *)spi->ctx.rx_buf); break; case 4: UNALIGNED_PUT(data, (uint32_t *)spi->ctx.rx_buf); break; } } spi_context_update_rx(&spi->ctx, spi->dfs, 1); spi->fifo_diff--; } if (!spi->ctx.rx_len && spi->ctx.tx_len < spi->fifo_depth) { spi_pw_update_rx_fifo_level(spi->ctx.tx_len - 1, dev); } else if (spi_pw_get_rx_fifo_level(dev) >= spi->ctx.rx_len) { spi_pw_update_rx_fifo_level(spi->ctx.rx_len - 1, dev); } } static int spi_pw_transfer(const struct device *dev) { uint32_t intr_status; int err; intr_status = spi_pw_reg_read(dev, PW_SPI_REG_SSSR); if (intr_status & PW_SPI_SSSR_ROR_BIT) { LOG_ERR("Receive FIFO overrun"); err = -EIO; goto out; } if (intr_status & PW_SPI_SSSR_TUR_BIT) { LOG_ERR("Transmit FIFO underrun"); err = -EIO; goto out; } if (intr_status & PW_SPI_SSSR_TINT_BIT) { LOG_ERR("Receiver timeout interrupt"); err = -EIO; goto out; } err = 0; if (intr_status & PW_SPI_SSSR_RNE_BIT) { spi_pw_rx_data(dev); } if (intr_status & PW_SPI_SSSR_TNF_BIT) { spi_pw_tx_data(dev); } out: if (err) { spi_pw_clear_intr(dev); } return err; } static int spi_pw_configure(const struct device *dev, const struct spi_pw_config *info, struct spi_pw_data *spi, const struct spi_config *config) { int err; /* At this point, it's mandatory to set this on the context! */ spi->ctx.config = config; if (!spi_cs_is_gpio(spi->ctx.config)) { if (spi->cs_mode == CS_GPIO_MODE) { LOG_DBG("cs gpio is NULL, switch to hw mode"); spi->cs_mode = CS_HW_MODE; spi_pw_enable_cs_hw_ctrl(dev); } } if (config->operation & SPI_HALF_DUPLEX) { LOG_ERR("Half-duplex not supported"); return -ENOTSUP; } /* Verify if requested op mode is relevant to this controller */ if (config->operation & SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode not supported"); return -ENOTSUP; } if ((config->operation & SPI_TRANSFER_LSB) || (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) && (config->operation & (SPI_LINES_DUAL | SPI_LINES_QUAD | SPI_LINES_OCTAL)))) { LOG_ERR("Extended mode Unsupported configuration"); return -EINVAL; } if (config->operation & SPI_FRAME_FORMAT_TI) { LOG_ERR("TI frame format not supported"); return -ENOTSUP; } if (config->operation & SPI_HOLD_ON_CS) { LOG_ERR("Chip select hold not supported"); return -ENOTSUP; } /* Set mode & data size */ err = spi_pw_set_data_size(dev, config); if (err) { LOG_ERR("Invalid data size"); return -ENOTSUP; } /* Set Polarity & Phase */ spi_pw_config_phase_polarity(dev, config); /* enable clock */ spi_pw_enable_clk(dev); /* configure */ spi_pw_config_clk(dev, info, config); return 0; } static int transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { const struct spi_pw_config *info = dev->config; struct spi_pw_data *spi = dev->data; int err; if (!tx_bufs && !rx_bufs) { LOG_ERR(" Tx & Rx buff null"); return 0; } if (asynchronous) { LOG_ERR("Async not supported"); return -ENOTSUP; } spi_context_lock(&spi->ctx, asynchronous, cb, userdata, config); /* Configure */ err = spi_pw_configure(dev, info, spi, config); if (err) { LOG_ERR("spi pw config fail"); goto out; } /* Frame size in number of data bytes */ spi->dfs = spi_pw_get_frame_size(config); spi_context_buffers_setup(&spi->ctx, tx_bufs, rx_bufs, spi->dfs); spi->fifo_diff = 0U; /* Tx threshold */ spi_pw_tx_thld_set(dev); /* Rx threshold */ spi_pw_rx_thld_set(dev, spi); spi_pw_cs_ctrl_enable(dev, true); /* Enable ssp operation */ spi_pw_ssp_enable(dev); #ifdef CONFIG_SPI_PW_INTERRUPT LOG_DBG("Interrupt Mode"); /* Enable interrupts */ if (rx_bufs) { spi_pw_intr_enable(dev, true); } else { spi_pw_intr_enable(dev, false); } err = spi_context_wait_for_completion(&spi->ctx); #else LOG_DBG("Polling Mode"); do { err = spi_pw_transfer(dev); } while ((!err) && is_spi_transfer_ongoing(spi)); spi_pw_completed(dev, err); #endif out: spi_context_release(&spi->ctx, err); return err; } static int spi_pw_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { LOG_DBG("%p, %p, %p\n", dev, tx_bufs, rx_bufs); return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC static int spi_pw_transceive_async(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { LOG_DBG("%p, %p, %p, %p, %p\n", dev, tx_bufs, rx_bufs, cb, userdata); return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ static int spi_pw_release(const struct device *dev, const struct spi_config *config) { struct spi_pw_data *spi = dev->data; if (!spi_context_configured(&spi->ctx, config)) { return -EINVAL; } spi_context_unlock_unconditionally(&spi->ctx); return 0; } #ifdef CONFIG_SPI_PW_INTERRUPT static void spi_pw_isr(const void *arg) { const struct device *dev = (const struct device *)arg; int err; err = spi_pw_transfer(dev); spi_pw_completed(dev, err); } #endif static const struct spi_driver_api pw_spi_api = { .transceive = spi_pw_transceive, .release = spi_pw_release, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_pw_transceive_async, #endif /* CONFIG_SPI_ASYNC */ }; static int spi_pw_init(const struct device *dev) { const struct spi_pw_config *info = dev->config; struct spi_pw_data *spi = dev->data; int err; #if DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie) if (info->pcie) { struct pcie_bar mbar; if (info->pcie->bdf == PCIE_BDF_NONE) { LOG_ERR("Cannot probe PCI device"); return -ENODEV; } if (!pcie_probe_mbar(info->pcie->bdf, 0, &mbar)) { LOG_ERR("MBAR not found"); return -EINVAL; } pcie_set_cmd(info->pcie->bdf, PCIE_CONF_CMDSTAT_MEM, true); device_map(DEVICE_MMIO_RAM_PTR(dev), mbar.phys_addr, mbar.size, K_MEM_CACHE_NONE); pcie_set_cmd(info->pcie->bdf, PCIE_CONF_CMDSTAT_MASTER, true); } else { DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); } #else DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); #endif /* Bring ssp out of reset */ spi_pw_ssp_reset(dev); /* Disable ssp operation */ spi_pw_ssp_disable(dev); /* Chip select control */ spi_pw_cs_ctrl_init(dev); #if defined(CONFIG_SPI_PW_INTERRUPT) /* Mask interrupts */ spi_pw_intr_disable(dev); /* Init and connect IRQ */ info->irq_config(dev); #endif if (spi->cs_mode == CS_GPIO_MODE) { err = spi_context_cs_configure_all(&spi->ctx); if (err < 0) { LOG_ERR("Failed to configure CS pins: %d", err); return err; } } spi_context_unlock_unconditionally(&spi->ctx); LOG_DBG("SPI pw init success"); return 0; } #define INIT_PCIE0(n) #define INIT_PCIE1(n) DEVICE_PCIE_INST_INIT(n, pcie), #define INIT_PCIE(n) _CONCAT(INIT_PCIE, DT_INST_ON_BUS(n, pcie))(n) #define DEFINE_PCIE0(n) #define DEFINE_PCIE1(n) DEVICE_PCIE_INST_DECLARE(n) #define SPI_PCIE_DEFINE(n) _CONCAT(DEFINE_PCIE, DT_INST_ON_BUS(n, pcie))(n) #ifdef CONFIG_SPI_PW_INTERRUPT #define SPI_INTEL_IRQ_FLAGS_SENSE0(n) 0 #define SPI_INTEL_IRQ_FLAGS_SENSE1(n) DT_INST_IRQ(n, sense) #define SPI_INTEL_IRQ_FLAGS(n) \ _CONCAT(SPI_INTEL_IRQ_FLAGS_SENSE, DT_INST_IRQ_HAS_CELL(n, sense))(n) #define SPI_INTEL_IRQ_INIT(n) \ BUILD_ASSERT(IS_ENABLED(CONFIG_DYNAMIC_INTERRUPTS), \ "SPI PCIe requires dynamic interrupts"); \ static void spi_##n##_irq_init(const struct device *dev) \ { \ const struct spi_pw_config *info = dev->config; \ unsigned int irq; \ if (DT_INST_IRQN(n) == PCIE_IRQ_DETECT) { \ irq = pcie_alloc_irq(info->pcie->bdf); \ if (irq == PCIE_CONF_INTR_IRQ_NONE) { \ return; \ } \ } else { \ irq = DT_INST_IRQN(n); \ pcie_conf_write(info->pcie->bdf, \ PCIE_CONF_INTR, irq); \ } \ pcie_connect_dynamic_irq(info->pcie->bdf, irq, \ DT_INST_IRQ(n, priority), \ (void (*)(const void *))spi_pw_isr, \ DEVICE_DT_INST_GET(n), \ SPI_INTEL_IRQ_FLAGS(n)); \ pcie_irq_enable(info->pcie->bdf, irq); \ LOG_DBG("lpass spi Configure irq %d", irq); \ } #define SPI_PW_DEV_INIT(n) \ static struct spi_pw_data spi_##n##_data = { \ SPI_CONTEXT_INIT_LOCK(spi_##n##_data, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_##n##_data, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ .cs_mode = DT_INST_PROP(n, pw_cs_mode), \ .cs_output = DT_INST_PROP(n, pw_cs_output), \ .fifo_depth = DT_INST_PROP(n, pw_fifo_depth), \ }; \ SPI_PCIE_DEFINE(n); \ SPI_INTEL_IRQ_INIT(n) \ static const struct spi_pw_config spi_##n##_config = { \ .irq_config = spi_##n##_irq_init, \ .clock_freq = DT_INST_PROP(n, clock_frequency), \ INIT_PCIE(n) \ }; \ DEVICE_DT_INST_DEFINE(n, spi_pw_init, NULL, \ &spi_##n##_data, &spi_##n##_config, \ POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &pw_spi_api); #else #define SPI_PW_DEV_INIT(n) \ static struct spi_pw_data spi_##n##_data = { \ SPI_CONTEXT_INIT_LOCK(spi_##n##_data, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_##n##_data, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ .cs_mode = DT_INST_PROP(n, pw_cs_mode), \ .cs_output = DT_INST_PROP(n, pw_cs_output), \ .fifo_depth = DT_INST_PROP(n, pw_fifo_depth), \ }; \ SPI_PCIE_DEFINE(n); \ static const struct spi_pw_config spi_##n##_config = { \ .clock_freq = DT_INST_PROP(n, clock_frequency), \ INIT_PCIE(n) \ }; \ DEVICE_DT_INST_DEFINE(n, spi_pw_init, NULL, \ &spi_##n##_data, &spi_##n##_config, \ POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &pw_spi_api); #endif DT_INST_FOREACH_STATUS_OKAY(SPI_PW_DEV_INIT) ```
/content/code_sandbox/drivers/spi/spi_pw.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,544
```unknown menuconfig SPI_XMC4XXX bool "XMC4XX SPI driver" default y depends on DT_HAS_INFINEON_XMC4XXX_SPI_ENABLED select GPIO help Enable XMC4XXX SPI driver. if SPI_XMC4XXX config SPI_XMC4XXX_INTERRUPT bool "XMC4XXX SPI interrupt mode" help Enables interrupt support for XMC4XXX SPI driver. config SPI_XMC4XXX_DMA bool "XMC4XXX SPI DMA support" select DMA help Enables DMA for SPI transfers. if SPI_XMC4XXX_DMA config SPI_XMC4XXX_DMA_TIMEOUT_MSEC int "Timeout in milliseconds for an SPI transaction to complete if using DMA" default 1000 help Sets timeout in milliseconds for an SPI transaction to complete when using DMA. endif # SPI_XMC4XXX_DMA endif # SPI_XMC4XXX ```
/content/code_sandbox/drivers/spi/Kconfig.xmc4xxx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
193
```unknown # Atmel SAM0 SERCOM SPI config SPI_SAM0 bool "Atmel SAM0 series SERCOM SPI driver" default y depends on DT_HAS_ATMEL_SAM0_SPI_ENABLED select DMA if SPI_ASYNC help Enable support for the SAM0 SERCOM SPI driver. ```
/content/code_sandbox/drivers/spi/Kconfig.sam0
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
65
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_SPI_ESP32_SPIM_H_ #define ZEPHYR_DRIVERS_SPI_ESP32_SPIM_H_ #include <zephyr/drivers/pinctrl.h> #include <hal/spi_hal.h> #ifdef SOC_GDMA_SUPPORTED #include <hal/gdma_hal.h> #endif #define SPI_MASTER_FREQ_8M (APB_CLK_FREQ/10) #define SPI_MASTER_FREQ_9M (APB_CLK_FREQ/9) /* 8.89MHz */ #define SPI_MASTER_FREQ_10M (APB_CLK_FREQ/8) /* 10MHz */ #define SPI_MASTER_FREQ_11M (APB_CLK_FREQ/7) /* 11.43MHz */ #define SPI_MASTER_FREQ_13M (APB_CLK_FREQ/6) /* 13.33MHz */ #define SPI_MASTER_FREQ_16M (APB_CLK_FREQ/5) /* 16MHz */ #define SPI_MASTER_FREQ_20M (APB_CLK_FREQ/4) /* 20MHz */ #define SPI_MASTER_FREQ_26M (APB_CLK_FREQ/3) /* 26.67MHz */ #define SPI_MASTER_FREQ_40M (APB_CLK_FREQ/2) /* 40MHz */ #define SPI_MASTER_FREQ_80M (APB_CLK_FREQ/1) /* 80MHz */ struct spi_esp32_config { spi_dev_t *spi; const struct device *clock_dev; int duty_cycle; int input_delay_ns; int irq_source; const struct pinctrl_dev_config *pcfg; clock_control_subsys_t clock_subsys; bool use_iomux; bool dma_enabled; int dma_clk_src; int dma_host; int cs_setup; int cs_hold; bool line_idle_low; spi_clock_source_t clock_source; }; struct spi_esp32_data { struct spi_context ctx; spi_hal_context_t hal; spi_hal_config_t hal_config; #ifdef SOC_GDMA_SUPPORTED gdma_hal_context_t hal_gdma; #endif spi_hal_timing_conf_t timing_config; spi_hal_dev_config_t dev_config; spi_hal_trans_config_t trans_config; uint8_t dfs; lldesc_t dma_desc_tx; lldesc_t dma_desc_rx; uint32_t clock_source_hz; }; #endif /* ZEPHYR_DRIVERS_SPI_ESP32_SPIM_H_ */ ```
/content/code_sandbox/drivers/spi/spi_esp32_spim.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
503
```unknown config SPI_TELINK_B91 bool "Telink Semiconductor B91 SPI driver" default y depends on DT_HAS_TELINK_B91_SPI_ENABLED help Enables Telink B91 SPI driver. ```
/content/code_sandbox/drivers/spi/Kconfig.b91
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
47
```unknown config SPI_BITBANG bool "Bitbang SPI controller driver" default y depends on DT_HAS_ZEPHYR_SPI_BITBANG_ENABLED help Enable the Bitbang SPI controller ```
/content/code_sandbox/drivers/spi/Kconfig.bitbang
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
44
```c /* * an affiliate of Cypress Semiconductor Corporation * */ #define DT_DRV_COMPAT infineon_cat1_spi #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(cat1_spi); #include "spi_context.h" #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/spi.h> #include <zephyr/kernel.h> #include <cyhal_scb_common.h> #include <cyhal_spi.h> #define IFX_CAT1_SPI_LOCK_TMOUT_MS (30 * 1000) #define IFX_CAT1_SPI_DEFAULT_OVERSAMPLE (4) #define IFX_CAT1_SPI_MIN_DATA_WIDTH (8) #define IFX_CAT1_SPI_MAX_DATA_WIDTH (32) /* Device config structure */ struct ifx_cat1_spi_config { CySCB_Type *reg_addr; const struct pinctrl_dev_config *pcfg; cy_stc_scb_spi_config_t scb_spi_config; uint8_t irq_priority; }; /* Data structure */ struct ifx_cat1_spi_data { struct spi_context ctx; cyhal_spi_t obj; /* SPI CYHAL object */ cyhal_resource_inst_t hw_resource; uint8_t dfs_value; size_t chunk_len; }; static int32_t get_hw_block_num(CySCB_Type *reg_addr) { uint32_t i; for (i = 0u; i < _SCB_ARRAY_SIZE; i++) { if (_CYHAL_SCB_BASE_ADDRESSES[i] == reg_addr) { return i; } } return -ENOMEM; } static uint8_t get_dfs_value(struct spi_context *ctx) { switch (SPI_WORD_SIZE_GET(ctx->config->operation)) { case 8: return 1; case 16: return 2; case 32: return 4; default: return 1; } } static void transfer_chunk(const struct device *dev) { struct ifx_cat1_spi_data *const data = dev->data; struct spi_context *ctx = &data->ctx; int ret = 0; size_t chunk_len = spi_context_max_continuous_chunk(ctx); if (chunk_len == 0) { goto exit; } data->chunk_len = chunk_len; cy_rslt_t result = cyhal_spi_transfer_async( &data->obj, ctx->tx_buf, spi_context_tx_buf_on(ctx) ? chunk_len : 0, ctx->rx_buf, spi_context_rx_buf_on(ctx) ? chunk_len : 0); if (result == CY_RSLT_SUCCESS) { return; } ret = -EIO; exit: spi_context_cs_control(ctx, false); spi_context_complete(ctx, dev, ret); } static void spi_interrupt_callback(void *arg, cyhal_spi_event_t event) { const struct device *dev = (const struct device *)arg; struct ifx_cat1_spi_data *const data = dev->data; struct spi_context *ctx = &data->ctx; if (event & CYHAL_SPI_IRQ_ERROR) { #if defined(CONFIG_SPI_ASYNC) cyhal_spi_abort_async(&data->obj); #endif spi_context_cs_control(ctx, false); spi_context_complete(ctx, dev, -EIO); } if (event & CYHAL_SPI_IRQ_DONE) { spi_context_update_tx(ctx, data->dfs_value, data->chunk_len); spi_context_update_rx(ctx, data->dfs_value, data->chunk_len); transfer_chunk(dev); } } int spi_config(const struct device *dev, const struct spi_config *spi_cfg) { cy_rslt_t result; struct ifx_cat1_spi_data *const data = dev->data; const struct ifx_cat1_spi_config *const config = dev->config; cy_stc_scb_spi_config_t scb_spi_config = config->scb_spi_config; struct spi_context *ctx = &data->ctx; bool spi_mode_cpol = false; bool spi_mode_cpha = false; /* check if configuration was changed from previous run, if so skip setup again */ if (spi_context_configured(ctx, spi_cfg)) { /* Already configured. No need to do it again. */ return 0; } if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_LOOP) { return -ENOTSUP; } if (SPI_WORD_SIZE_GET(spi_cfg->operation) > IFX_CAT1_SPI_MAX_DATA_WIDTH) { LOG_ERR("Word size %d is greater than %d", SPI_WORD_SIZE_GET(spi_cfg->operation), IFX_CAT1_SPI_MAX_DATA_WIDTH); return -EINVAL; } if (SPI_WORD_SIZE_GET(spi_cfg->operation) < IFX_CAT1_SPI_MIN_DATA_WIDTH) { LOG_ERR("Word size %d is less than %d", SPI_WORD_SIZE_GET(spi_cfg->operation), IFX_CAT1_SPI_MIN_DATA_WIDTH); return -EINVAL; } if (SPI_OP_MODE_GET(spi_cfg->operation) == SPI_OP_MODE_SLAVE) { scb_spi_config.spiMode = CY_SCB_SPI_SLAVE; scb_spi_config.oversample = 0; scb_spi_config.enableMisoLateSample = false; } else { scb_spi_config.spiMode = CY_SCB_SPI_MASTER; } if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) { spi_mode_cpol = true; } if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) { spi_mode_cpha = true; } if (SPI_WORD_SIZE_GET(spi_cfg->operation)) { scb_spi_config.txDataWidth = SPI_WORD_SIZE_GET(spi_cfg->operation); scb_spi_config.rxDataWidth = SPI_WORD_SIZE_GET(spi_cfg->operation); } if (spi_mode_cpha) { scb_spi_config.sclkMode = spi_mode_cpol ? CY_SCB_SPI_CPHA1_CPOL1 : CY_SCB_SPI_CPHA1_CPOL0; } else { scb_spi_config.sclkMode = spi_mode_cpol ? CY_SCB_SPI_CPHA0_CPOL1 : CY_SCB_SPI_CPHA0_CPOL0; } scb_spi_config.enableMsbFirst = (spi_cfg->operation & SPI_TRANSFER_LSB) ? false : true; /* Force free resource */ if (data->obj.base != NULL) { cyhal_spi_free(&data->obj); } /* Initialize the SPI peripheral */ cyhal_spi_configurator_t spi_init_cfg = {.resource = &data->hw_resource, .config = &scb_spi_config, .gpios = {NC, {NC, NC, NC, NC}, NC, NC}}; result = cyhal_spi_init_cfg(&data->obj, &spi_init_cfg); if (result != CY_RSLT_SUCCESS) { return -ENOTSUP; } /* Assigns a programmable divider to a selected IP block */ en_clk_dst_t clk_idx = _cyhal_scb_get_clock_index(spi_init_cfg.resource->block_num); result = _cyhal_utils_peri_pclk_assign_divider(clk_idx, &data->obj.clock); if (result != CY_RSLT_SUCCESS) { return -ENOTSUP; } /* Configure Slave select polarity */ if (SPI_OP_MODE_GET(spi_cfg->operation) == SPI_OP_MODE_SLAVE) { Cy_SCB_SPI_SetActiveSlaveSelectPolarity(data->obj.base, CY_SCB_SPI_SLAVE_SELECT0, scb_spi_config.ssPolarity); } /* Set the data rate */ result = cyhal_spi_set_frequency(&data->obj, spi_cfg->frequency); if (result != CY_RSLT_SUCCESS) { return -EIO; } /* Write 0 when NULL buffer is provided for Tx/Rx */ data->obj.write_fill = 0; /* Register common SPI callback */ cyhal_spi_register_callback(&data->obj, spi_interrupt_callback, (void *)dev); cyhal_spi_enable_event(&data->obj, CYHAL_SPI_IRQ_DONE, config->irq_priority, true); /* Store spi config in context */ ctx->config = spi_cfg; data->dfs_value = get_dfs_value(ctx); return 0; } static int transceive(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, bool asynchronous, spi_callback_t cb, void *userdata) { int result; struct ifx_cat1_spi_data *const data = dev->data; struct spi_context *ctx = &data->ctx; spi_context_lock(ctx, asynchronous, cb, userdata, spi_cfg); result = spi_config(dev, spi_cfg); if (result) { LOG_ERR("Error in SPI Configuration (result: 0x%x)", result); return result; } spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, data->dfs_value); spi_context_cs_control(ctx, true); transfer_chunk(dev); result = spi_context_wait_for_completion(&data->ctx); spi_context_release(ctx, result); return result; } static int ifx_cat1_spi_transceive_sync(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #if defined(CONFIG_SPI_ASYNC) static int ifx_cat1_spi_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, spi_callback_t cb, void *userdata) { return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif static int ifx_cat1_spi_release(const struct device *dev, const struct spi_config *spi_cfg) { struct ifx_cat1_spi_data *const data = dev->data; cyhal_spi_free(&data->obj); return 0; } static const struct spi_driver_api ifx_cat1_spi_api = { .transceive = ifx_cat1_spi_transceive_sync, #if defined(CONFIG_SPI_ASYNC) .transceive_async = ifx_cat1_spi_transceive_async, #endif .release = ifx_cat1_spi_release, }; static int ifx_cat1_spi_init(const struct device *dev) { struct ifx_cat1_spi_data *const data = dev->data; const struct ifx_cat1_spi_config *const config = dev->config; int ret; /* Dedicate SCB HW resource */ data->hw_resource.type = CYHAL_RSC_SCB; data->hw_resource.block_num = get_hw_block_num(config->reg_addr); /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /* Configure slave select (master) */ spi_context_cs_configure_all(&data->ctx); spi_context_unlock_unconditionally(&data->ctx); return 0; } #define IFX_CAT1_SPI_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static struct ifx_cat1_spi_data spi_cat1_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_cat1_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_cat1_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx)}; \ static struct ifx_cat1_spi_config spi_cat1_config_##n = { \ .reg_addr = (CySCB_Type *)DT_INST_REG_ADDR(n), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .scb_spi_config = \ {.spiMode = CY_SCB_SPI_MASTER, /* overwrite by cfg */ \ .sclkMode = CY_SCB_SPI_CPHA0_CPOL0, /* overwrite by cfg */ \ .rxDataWidth = 8, /* overwrite by cfg */ \ .txDataWidth = 8, /* overwrite by cfg */ \ .enableMsbFirst = true, /* overwrite by cfg */ \ .subMode = CY_SCB_SPI_MOTOROLA, \ .oversample = IFX_CAT1_SPI_DEFAULT_OVERSAMPLE, \ .enableMisoLateSample = true, \ .ssPolarity = CY_SCB_SPI_ACTIVE_LOW, \ }, \ .irq_priority = DT_INST_IRQ(n, priority), \ }; \ DEVICE_DT_INST_DEFINE(n, ifx_cat1_spi_init, NULL, &spi_cat1_data_##n, \ &spi_cat1_config_##n, POST_KERNEL, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &ifx_cat1_spi_api); DT_INST_FOREACH_STATUS_OKAY(IFX_CAT1_SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_ifx_cat1.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,816
```unknown config SPI_RPI_PICO_PIO bool "Raspberry Pi PICO PIO SPI controller driver" default y depends on DT_HAS_RASPBERRYPI_PICO_SPI_PIO_ENABLED select PICOSDK_USE_PIO select PICOSDK_USE_CLAIM help Enable driving SPI via PIO on the PICO ```
/content/code_sandbox/drivers/spi/Kconfig.rpi_pico
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
72
```unknown # Dai driver configuration options # # DAI Drivers # menuconfig DAI bool "Digital Audio Interface (DAI) drivers" help Enable support for the DAI interface drivers. if DAI config DAI_INIT_PRIORITY int "Init priority" default 70 help Device driver initialization priority. module = DAI module-str = dai source "subsys/logging/Kconfig.template.log_config" comment "Device Drivers" source "drivers/dai/intel/ssp/Kconfig.ssp" source "drivers/dai/intel/alh/Kconfig.alh" source "drivers/dai/intel/dmic/Kconfig.dmic" source "drivers/dai/intel/hda/Kconfig.hda" source "drivers/dai/nxp/sai/Kconfig.sai" source "drivers/dai/nxp/esai/Kconfig.esai" endif # DAI ```
/content/code_sandbox/drivers/dai/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
186
```objective-c /* * */ #ifndef __INTEL_DAI_DRIVER_ALH_MAP_H__ #define __INTEL_DAI_DRIVER_ALH_MAP_H__ /** * \brief ALH Handshakes for audio I/O * Stream ID -> DMA Handshake map * -1 identifies invalid handshakes/streams */ static const uint8_t alh_handshake_map[64] = { -1, /* 0 - INVALID */ -1, /* 1 - INVALID */ -1, /* 2 - INVALID */ -1, /* 3 - INVALID */ -1, /* 4 - INVALID */ -1, /* 5 - INVALID */ -1, /* 6 - INVALID */ 22, /* 7 - BIDIRECTIONAL */ 23, /* 8 - BIDIRECTIONAL */ 24, /* 9 - BIDIRECTIONAL */ 25, /* 10 - BIDIRECTIONAL */ 26, /* 11 - BIDIRECTIONAL */ 27, /* 12 - BIDIRECTIONAL */ -1, /* 13 - INVALID */ -1, /* 14 - INVALID */ -1, /* 15 - INVALID */ -1, /* 16 - INVALID */ -1, /* 17 - INVALID */ -1, /* 18 - INVALID */ -1, /* 19 - INVALID */ -1, /* 20 - INVALID */ -1, /* 21 - INVALID */ -1, /* 22 - INVALID */ 32, /* 23 - BIDIRECTIONAL */ 33, /* 24 - BIDIRECTIONAL */ 34, /* 25 - BIDIRECTIONAL */ 35, /* 26 - BIDIRECTIONAL */ 36, /* 27 - BIDIRECTIONAL */ 37, /* 28 - BIDIRECTIONAL */ -1, /* 29 - INVALID */ -1, /* 30 - INVALID */ -1, /* 31 - INVALID */ -1, /* 32 - INVALID */ -1, /* 33 - INVALID */ -1, /* 34 - INVALID */ -1, /* 35 - INVALID */ -1, /* 36 - INVALID */ -1, /* 37 - INVALID */ -1, /* 38 - INVALID */ 42, /* 39 - BIDIRECTIONAL */ 43, /* 40 - BIDIRECTIONAL */ 44, /* 41 - BIDIRECTIONAL */ 45, /* 42 - BIDIRECTIONAL */ 46, /* 43 - BIDIRECTIONAL */ 47, /* 44 - BIDIRECTIONAL */ -1, /* 45 - INVALID */ -1, /* 46 - INVALID */ -1, /* 47 - INVALID */ -1, /* 48 - INVALID */ -1, /* 49 - INVALID */ -1, /* 50 - INVALID */ -1, /* 51 - INVALID */ -1, /* 52 - INVALID */ -1, /* 53 - INVALID */ -1, /* 54 - INVALID */ 52, /* 55 - BIDIRECTIONAL */ 53, /* 56 - BIDIRECTIONAL */ 54, /* 57 - BIDIRECTIONAL */ 55, /* 58 - BIDIRECTIONAL */ 56, /* 59 - BIDIRECTIONAL */ 57, /* 60 - BIDIRECTIONAL */ -1, /* 61 - INVALID */ -1, /* 62 - INVALID */ -1, /* 63 - INVALID */ }; #endif ```
/content/code_sandbox/drivers/dai/intel/alh/alh_map.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
843
```c /* * */ #define DT_DRV_COMPAT nuvoton_numaker_spi #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/drivers/reset.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/clock_control_numaker.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(spi_numaker, CONFIG_SPI_LOG_LEVEL); #include "spi_context.h" #include <NuMicro.h> #define SPI_NUMAKER_TX_NOP 0x00 struct spi_numaker_config { SPI_T *spi; bool is_qspi; const struct reset_dt_spec reset; /* clock configuration */ uint32_t clk_modidx; uint32_t clk_src; uint32_t clk_div; const struct device *clk_dev; const struct pinctrl_dev_config *pincfg; }; struct spi_numaker_data { struct spi_context ctx; }; /* * CPOL/CPHA = 0/0 --> SPI_MODE_0 * CPOL/CPHA = 0/1 --> SPI_MODE_1 * CPOL/CPHA = 1/0 --> SPI_MODE_2 * CPOL/CPHA = 1/1 --> SPI_MODE_3 */ static const uint32_t smode_tbl[4] = { SPI_MODE_0, SPI_MODE_1, SPI_MODE_2, SPI_MODE_3 }; static const uint32_t qsmode_tbl[4] = { QSPI_MODE_0, QSPI_MODE_1, QSPI_MODE_2, QSPI_MODE_3 }; static int spi_numaker_configure(const struct device *dev, const struct spi_config *config) { int mode; struct spi_numaker_data *data = dev->data; const struct spi_numaker_config *dev_cfg = dev->config; LOG_DBG("%s", __func__); if (spi_context_configured(&data->ctx, config)) { return 0; } if (SPI_MODE_GET(config->operation) & SPI_MODE_LOOP) { LOG_ERR("Loop back mode not support"); return -ENOTSUP; } if (SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_SLAVE) { LOG_ERR("Slave mode not support"); return -ENOTSUP; } /* Clear FIFO */ SPI_ClearRxFIFO(dev_cfg->spi); SPI_ClearTxFIFO(dev_cfg->spi); if (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) { mode = (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) ? 3 : 2; } else { mode = (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) ? 1 : 0; } /* Make SPI module be ready to transfer */ if (dev_cfg->is_qspi) { QSPI_Open((QSPI_T *)dev_cfg->spi, (SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_SLAVE) ? QSPI_SLAVE : QSPI_MASTER, qsmode_tbl[mode], SPI_WORD_SIZE_GET(config->operation), config->frequency); } else { SPI_Open(dev_cfg->spi, (SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_SLAVE) ? SPI_SLAVE : SPI_MASTER, smode_tbl[mode], SPI_WORD_SIZE_GET(config->operation), config->frequency); } /* Set Transfer LSB or MSB first */ if ((config->operation) & SPI_TRANSFER_LSB) { SPI_SET_LSB_FIRST(dev_cfg->spi); } else { SPI_SET_MSB_FIRST(dev_cfg->spi); } /* full/half duplex */ if (config->operation & SPI_HALF_DUPLEX) { /* half duplex, which results in 3-wire usage */ SPI_ENABLE_3WIRE_MODE(dev_cfg->spi); } else { /* full duplex */ SPI_DISABLE_3WIRE_MODE(dev_cfg->spi); } /* Active high CS logic */ if (config->operation & SPI_CS_ACTIVE_HIGH) { SPI_SET_SS_HIGH(dev_cfg->spi); } else { SPI_SET_SS_LOW(dev_cfg->spi); } /* Enable the automatic hardware slave select function. Select the SS pin and configure as * low-active. */ if (data->ctx.num_cs_gpios != 0) { SPI_EnableAutoSS(dev_cfg->spi, SPI_SS, SPI_SS_ACTIVE_LOW); } else { SPI_DisableAutoSS(dev_cfg->spi); } /* Be able to set TX/RX FIFO threshold, for ex: SPI_SetFIFO(dev_cfg->spi, 2, 2) */ data->ctx.config = config; return 0; } static int spi_numaker_txrx(const struct device *dev) { struct spi_numaker_data *data = dev->data; const struct spi_numaker_config *dev_cfg = dev->config; struct spi_context *ctx = &data->ctx; uint32_t tx_frame, rx_frame; uint8_t word_size, spi_dfs; uint32_t time_out_cnt; LOG_DBG("%s", __func__); word_size = SPI_WORD_SIZE_GET(ctx->config->operation); switch (word_size) { case 8: spi_dfs = 1; break; case 16: spi_dfs = 2; break; case 24: spi_dfs = 3; break; case 32: spi_dfs = 4; break; default: spi_dfs = 0; LOG_ERR("Not support SPI WORD size as [%d] bits", word_size); return -EIO; } LOG_DBG("%s -->word_size [%d]", __func__, word_size); if (spi_context_tx_on(ctx)) { tx_frame = ((ctx->tx_buf == NULL) ? SPI_NUMAKER_TX_NOP : UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf))); /* Write to TX register */ SPI_WRITE_TX(dev_cfg->spi, tx_frame); spi_context_update_tx(ctx, spi_dfs, 1); /* Check SPI busy status */ time_out_cnt = SystemCoreClock; /* 1 second time-out */ while (SPI_IS_BUSY(dev_cfg->spi)) { if (--time_out_cnt == 0) { LOG_ERR("Wait for SPI time-out"); return -EIO; } } LOG_DBG("%s --> TX [0x%x] done", __func__, tx_frame); } else { /* Write dummy data to TX register */ SPI_WRITE_TX(dev_cfg->spi, 0x00U); time_out_cnt = SystemCoreClock; /* 1 second time-out */ while (SPI_IS_BUSY(dev_cfg->spi)) { if (--time_out_cnt == 0) { LOG_ERR("Wait for SPI time-out"); return -EIO; } } } /* Read received data */ if (spi_context_rx_on(ctx)) { if (SPI_GET_RX_FIFO_COUNT(dev_cfg->spi) > 0) { rx_frame = SPI_READ_RX(dev_cfg->spi); if (ctx->rx_buf != NULL) { UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf); } spi_context_update_rx(ctx, spi_dfs, 1); LOG_DBG("%s --> RX [0x%x] done", __func__, rx_frame); } } LOG_DBG("%s --> exit", __func__); return 0; } /* Remain TX/RX Data in spi_context TX/RX buffer */ static bool spi_numaker_remain_words(struct spi_numaker_data *data) { return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx); } static int spi_numaker_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { struct spi_numaker_data *data = dev->data; struct spi_context *ctx = &data->ctx; const struct spi_numaker_config *dev_cfg = dev->config; int ret; LOG_DBG("%s", __func__); spi_context_lock(ctx, false, NULL, NULL, config); ctx->config = config; ret = spi_numaker_configure(dev, config); if (ret < 0) { goto done; } SPI_ENABLE(dev_cfg->spi); spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); /* if cs is defined: software cs control, set active true */ if (spi_cs_is_gpio(config)) { spi_context_cs_control(&data->ctx, true); } /* transceive tx/rx data */ do { ret = spi_numaker_txrx(dev); if (ret < 0) { break; } } while (spi_numaker_remain_words(data)); /* if cs is defined: software cs control, set active false */ if (spi_cs_is_gpio(config)) { spi_context_cs_control(&data->ctx, false); } SPI_DISABLE(dev_cfg->spi); done: spi_context_release(ctx, ret); LOG_DBG("%s --> [%d]", __func__, ret); return ret; } static int spi_numaker_release(const struct device *dev, const struct spi_config *config) { struct spi_numaker_data *data = dev->data; struct spi_context *ctx = &data->ctx; if (!spi_context_configured(ctx, config)) { return -EINVAL; } spi_context_unlock_unconditionally(ctx); return 0; } static const struct spi_driver_api spi_numaker_driver_api = { .transceive = spi_numaker_transceive, .release = spi_numaker_release }; static int spi_numaker_init(const struct device *dev) { struct spi_numaker_data *data = dev->data; const struct spi_numaker_config *dev_cfg = dev->config; int err = 0; struct numaker_scc_subsys scc_subsys; SYS_UnlockReg(); /* CLK controller */ memset(&scc_subsys, 0x00, sizeof(scc_subsys)); scc_subsys.subsys_id = NUMAKER_SCC_SUBSYS_ID_PCC; scc_subsys.pcc.clk_modidx = dev_cfg->clk_modidx; scc_subsys.pcc.clk_src = dev_cfg->clk_src; scc_subsys.pcc.clk_div = dev_cfg->clk_div; /* Equivalent to CLK_EnableModuleClock() */ err = clock_control_on(dev_cfg->clk_dev, (clock_control_subsys_t)&scc_subsys); if (err != 0) { goto done; } /* Equivalent to CLK_SetModuleClock() */ err = clock_control_configure(dev_cfg->clk_dev, (clock_control_subsys_t)&scc_subsys, NULL); if (err != 0) { goto done; } err = pinctrl_apply_state(dev_cfg->pincfg, PINCTRL_STATE_DEFAULT); if (err) { LOG_ERR("Failed to apply pinctrl state"); goto done; } err = spi_context_cs_configure_all(&data->ctx); if (err < 0) { goto done; } spi_context_unlock_unconditionally(&data->ctx); /* Reset this module, same as BSP's SYS_ResetModule(id_rst) */ if (!device_is_ready(dev_cfg->reset.dev)) { LOG_ERR("reset controller not ready"); err = -ENODEV; goto done; } /* Reset SPI to default state */ reset_line_toggle_dt(&dev_cfg->reset); done: SYS_LockReg(); return err; } #define NUMAKER_SPI_INIT(inst) \ PINCTRL_DT_INST_DEFINE(inst); \ static struct spi_numaker_data spi_numaker_data_##inst = { \ SPI_CONTEXT_INIT_LOCK(spi_numaker_data_##inst, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_numaker_data_##inst, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(inst), ctx)}; \ static struct spi_numaker_config spi_numaker_config_##inst = { \ .spi = (SPI_T *)DT_INST_REG_ADDR(inst), \ .is_qspi = DT_INST_NODE_HAS_PROP(inst, qspi), \ .reset = RESET_DT_SPEC_INST_GET(inst), \ .clk_modidx = DT_INST_CLOCKS_CELL(inst, clock_module_index), \ .clk_src = DT_INST_CLOCKS_CELL(inst, clock_source), \ .clk_div = DT_INST_CLOCKS_CELL(inst, clock_divider), \ .clk_dev = DEVICE_DT_GET(DT_PARENT(DT_INST_CLOCKS_CTLR(inst))), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ }; \ DEVICE_DT_INST_DEFINE(inst, spi_numaker_init, NULL, &spi_numaker_data_##inst, \ &spi_numaker_config_##inst, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \ &spi_numaker_driver_api); DT_INST_FOREACH_STATUS_OKAY(NUMAKER_SPI_INIT) ```
/content/code_sandbox/drivers/spi/spi_numaker.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,852
```objective-c /* * */ #ifndef __INTEL_DAI_DRIVER_ALH_H__ #define __INTEL_DAI_DRIVER_ALH_H__ #include <stdint.h> #include <zephyr/drivers/dai.h> #include "alh_map.h" #define DAI_NUM_ALH_BI_DIR_LINKS_GROUP 4 #define ALH_STREAM_OFFSET 0x4 #define IPC4_ALH_MAX_NUMBER_OF_GTW 16 #define IPC4_ALH_DAI_INDEX_OFFSET 7 /* copier id = (group id << 4) + codec id + IPC4_ALH_DAI_INDEX_OFFSET * dai_index = (group id << 8) + codec id; */ #define IPC4_ALH_DAI_INDEX(x) ((((x) & 0xF0) << DAI_NUM_ALH_BI_DIR_LINKS_GROUP) + \ (((x) & 0xF) - IPC4_ALH_DAI_INDEX_OFFSET)) #define ALH_GPDMA_BURST_LENGTH 4 #define ALH_SET_BITS(b_hi, b_lo, x) \ (((x) & ((1ULL << ((b_hi) - (b_lo) + 1ULL)) - 1ULL)) << (b_lo)) #define ALHASCTL_OSEL(x) ALH_SET_BITS(25, 24, x) #define dai_get_drvdata(dai) &dai->priv_data #define dai_base(dai) dai->plat_data.base #define DAI_DIR_PLAYBACK 0 #define DAI_DIR_CAPTURE 1 #define ALH_CHANNELS_DEFAULT 2 #define ALH_RATE_DEFAULT 48000 #define ALH_WORD_SIZE_DEFAULT 32 #if CONFIG_INTEL_ADSP_CAVS #define ALH_TXDA_OFFSET 0x400 #define ALH_RXDA_OFFSET 0x500 #else #define ALH_TXDA_OFFSET 0 #define ALH_RXDA_OFFSET 0x100 #endif union dai_intel_ipc4_gateway_attributes { /**< Raw value */ uint32_t dw; /**< Access to the fields */ struct { /**< Gateway data requested in low power memory. */ uint32_t lp_buffer_alloc : 1; /**< Gateway data requested in register file memory. */ uint32_t alloc_from_reg_file : 1; /**< Reserved field */ uint32_t _rsvd : 30; } bits; /**<< Bits */ } __packed; /* ALH Configuration Request - SOF_IPC_DAI_ALH_CONFIG */ struct dai_intel_ipc3_alh_params { uint32_t reserved0; uint32_t stream_id; uint32_t rate; uint32_t channels; /* reserved for future use */ uint32_t reserved[13]; } __packed; struct ipc4_alh_multi_gtw_cfg { /* Number of single channels (valid items in mapping array). */ uint32_t count; /* Single to multi aggregation mapping item. */ struct { /* Vindex of a single ALH channel aggregated. */ uint32_t alh_id; /* Channel mask */ uint32_t channel_mask; } mapping[IPC4_ALH_MAX_NUMBER_OF_GTW]; /* < Mapping items */ } __packed; struct dai_intel_ipc4_alh_configuration_blob { union dai_intel_ipc4_gateway_attributes gtw_attributes; struct ipc4_alh_multi_gtw_cfg alh_cfg; } __packed; struct dai_intel_alh_plat_data { uint32_t base; uint32_t fifo_depth[2]; }; struct dai_intel_alh_pdata { struct dai_config config; struct dai_properties props; struct dai_intel_ipc3_alh_params params; }; struct dai_intel_alh { uint32_t index; /**< index */ struct dai_intel_alh_plat_data plat_data; struct dai_intel_alh_pdata priv_data; }; /* Common data for all ALH DAI instances */ struct dai_alh_global_shared { struct k_spinlock lock; /**< locking mechanism */ int sref; /**< simple ref counter, guarded by lock */ }; #endif ```
/content/code_sandbox/drivers/dai/intel/alh/alh.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
851
```unknown # SOF ALH configuration options config DAI_INTEL_ALH bool "Intel ALH driver for Dai interface" default y depends on DT_HAS_INTEL_ALH_DAI_ENABLED select DMA help Select this to enable Intel ALH driver. The ALH is an intermediary device, which acts as a hub and provides an abstracted support for numerous sound interfaces (e.g. SoundWire). if DAI_INTEL_ALH config DAI_ALH_HAS_OWNERSHIP bool "Intel ALH driver has ownership only on ACE 1.5" default y depends on SOC_INTEL_ACE15_MTPM help Select this to enable programming HW ownership endif ```
/content/code_sandbox/drivers/dai/intel/alh/Kconfig.alh
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
155
```unknown # SOF SSP configuration options config DAI_INTEL_SSP bool "Intel I2S (SSP) Bus Driver for Dai interface" default y depends on DT_HAS_INTEL_SSP_DAI_ENABLED select DMA depends on PM_DEVICE_RUNTIME help Enable Inter Sound (I2S) bus driver based on Synchronous Serial Port (SSP) module. config DAI_SSP_HAS_POWER_CONTROL bool "DAI ssp pm_runtime en/dis ssp power" default y if SOC_SERIES_INTEL_ADSP_ACE depends on DAI_INTEL_SSP if DAI_INTEL_SSP config DAI_SSP_CLK_FORCE_DYNAMIC_CLOCK_GATING bool config DAI_INTEL_SSP_NUM_BASE int default 6 config DAI_INTEL_SSP_NUM_EXT int default 0 endif ```
/content/code_sandbox/drivers/dai/intel/ssp/Kconfig.ssp
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
187
```c /* * */ #include <errno.h> #include <stdbool.h> #include <stdint.h> #include <zephyr/spinlock.h> #include <zephyr/devicetree.h> #include <zephyr/logging/log.h> #ifdef CONFIG_SOC_SERIES_INTEL_CAVS_V25 #include <adsp_shim.h> #endif #define DT_DRV_COMPAT intel_alh_dai #define LOG_DOMAIN dai_intel_alh LOG_MODULE_REGISTER(LOG_DOMAIN); #include "alh.h" /* global data shared between all alh instances */ struct dai_alh_global_shared dai_alh_global; /* Digital Audio interface formatting */ static int dai_alh_set_config_tplg(struct dai_intel_alh *dp, const void *spec_config) { struct dai_intel_alh_pdata *alh = dai_get_drvdata(dp); const struct dai_intel_ipc3_alh_params *config = spec_config; if (config->channels && config->rate) { alh->params.channels = config->channels; alh->params.rate = config->rate; LOG_INF("%s channels %d rate %d", __func__, config->channels, config->rate); } alh->params.stream_id = config->stream_id; return 0; } static int dai_alh_set_config_blob(struct dai_intel_alh *dp, const struct dai_config *cfg, const void *spec_config) { struct dai_intel_alh_pdata *alh = dai_get_drvdata(dp); const struct dai_intel_ipc4_alh_configuration_blob *blob = spec_config; const struct ipc4_alh_multi_gtw_cfg *alh_cfg = &blob->alh_cfg; alh->params.rate = cfg->rate; for (int i = 0; i < alh_cfg->count; i++) { /* the LSB 8bits are for stream id */ int alh_id = alh_cfg->mapping[i].alh_id & 0xff; if (IPC4_ALH_DAI_INDEX(alh_id) == dp->index) { alh->params.stream_id = alh_id; alh->params.channels = POPCOUNT(alh_cfg->mapping[i].channel_mask); break; } } return 0; } static int dai_alh_trigger(const struct device *dev, enum dai_dir dir, enum dai_trigger_cmd cmd) { LOG_DBG("cmd %d", cmd); return 0; } static void alh_claim_ownership(void) { #if CONFIG_DAI_ALH_HAS_OWNERSHIP uint32_t ALHASCTL = DT_INST_PROP_BY_IDX(0, reg, 0); uint32_t ALHCSCTL = DT_INST_PROP_BY_IDX(0, reg, 1); sys_write32(sys_read32(ALHASCTL) | ALHASCTL_OSEL(0x3), ALHASCTL); sys_write32(sys_read32(ALHCSCTL) | ALHASCTL_OSEL(0x3), ALHCSCTL); #endif #ifdef CONFIG_SOC_SERIES_INTEL_CAVS_V25 /* Allow LPGPDMA connection to Audio Link Hub */ sys_set_bits(ADSP_DSPALHO_ADDRESS, DSPALHO_ASO_FLAG | DSPALHO_CSO_FLAG); #endif } static void alh_release_ownership(void) { #if CONFIG_DAI_ALH_HAS_OWNERSHIP uint32_t ALHASCTL = DT_INST_PROP_BY_IDX(0, reg, 0); uint32_t ALHCSCTL = DT_INST_PROP_BY_IDX(0, reg, 1); sys_write32(sys_read32(ALHASCTL) | ALHASCTL_OSEL(0), ALHASCTL); sys_write32(sys_read32(ALHCSCTL) | ALHASCTL_OSEL(0), ALHCSCTL); #endif #ifdef CONFIG_SOC_SERIES_INTEL_CAVS_V25 sys_clear_bits(ADSP_DSPALHO_ADDRESS, DSPALHO_ASO_FLAG | DSPALHO_CSO_FLAG); #endif } static int dai_alh_config_get(const struct device *dev, struct dai_config *cfg, enum dai_dir dir) { struct dai_config *params = (struct dai_config *)dev->config; struct dai_intel_alh *dp = (struct dai_intel_alh *)dev->data; struct dai_intel_alh_pdata *alh = dai_get_drvdata(dp); if (!cfg) { return -EINVAL; } params->rate = alh->params.rate; params->channels = alh->params.channels; params->word_size = ALH_WORD_SIZE_DEFAULT; *cfg = *params; return 0; } static int dai_alh_config_set(const struct device *dev, const struct dai_config *cfg, const void *bespoke_cfg) { struct dai_intel_alh *dp = (struct dai_intel_alh *)dev->data; LOG_DBG("%s", __func__); if (cfg->type == DAI_INTEL_ALH) { return dai_alh_set_config_tplg(dp, bespoke_cfg); } else { return dai_alh_set_config_blob(dp, cfg, bespoke_cfg); } } static const struct dai_properties *dai_alh_get_properties(const struct device *dev, enum dai_dir dir, int stream_id) { struct dai_intel_alh *dp = (struct dai_intel_alh *)dev->data; struct dai_intel_alh_pdata *alh = dai_get_drvdata(dp); struct dai_properties *prop = &alh->props; uint32_t offset = dir == DAI_DIR_PLAYBACK ? ALH_TXDA_OFFSET : ALH_RXDA_OFFSET; prop->fifo_address = dai_base(dp) + offset + ALH_STREAM_OFFSET * stream_id; prop->fifo_depth = ALH_GPDMA_BURST_LENGTH; prop->dma_hs_id = alh_handshake_map[stream_id]; prop->stream_id = alh->params.stream_id; LOG_DBG("dai_index %u", dp->index); LOG_DBG("fifo %u", prop->fifo_address); LOG_DBG("handshake %u", prop->dma_hs_id); return prop; } static int dai_alh_probe(const struct device *dev) { k_spinlock_key_t key; LOG_DBG("%s", __func__); key = k_spin_lock(&dai_alh_global.lock); if (dai_alh_global.sref == 0) { alh_claim_ownership(); } dai_alh_global.sref++; k_spin_unlock(&dai_alh_global.lock, key); return 0; } static int dai_alh_remove(const struct device *dev) { k_spinlock_key_t key; LOG_DBG("%s", __func__); key = k_spin_lock(&dai_alh_global.lock); if (--dai_alh_global.sref == 0) { alh_release_ownership(); } k_spin_unlock(&dai_alh_global.lock, key); return 0; } static const struct dai_driver_api dai_intel_alh_api_funcs = { .probe = dai_alh_probe, .remove = dai_alh_remove, .config_set = dai_alh_config_set, .config_get = dai_alh_config_get, .trigger = dai_alh_trigger, .get_properties = dai_alh_get_properties, }; #define DAI_INTEL_ALH_DEVICE_INIT(n) \ static struct dai_config dai_intel_alh_config_##n = { \ .type = DAI_INTEL_ALH, \ .dai_index = (n / DAI_NUM_ALH_BI_DIR_LINKS_GROUP) << 8 | \ (n % DAI_NUM_ALH_BI_DIR_LINKS_GROUP), \ }; \ static struct dai_intel_alh dai_intel_alh_data_##n = { \ .index = (n / DAI_NUM_ALH_BI_DIR_LINKS_GROUP) << 8 | \ (n % DAI_NUM_ALH_BI_DIR_LINKS_GROUP), \ .plat_data = { \ .base = DT_INST_PROP_BY_IDX(n, reg, 0), \ .fifo_depth[DAI_DIR_PLAYBACK] = ALH_GPDMA_BURST_LENGTH, \ .fifo_depth[DAI_DIR_CAPTURE] = ALH_GPDMA_BURST_LENGTH, \ }, \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ NULL, NULL, \ &dai_intel_alh_data_##n, \ &dai_intel_alh_config_##n, \ POST_KERNEL, 32, \ &dai_intel_alh_api_funcs); DT_INST_FOREACH_STATUS_OKAY(DAI_INTEL_ALH_DEVICE_INIT) ```
/content/code_sandbox/drivers/dai/intel/alh/alh.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,889
```objective-c /* * */ #ifndef __INTEL_DAI_DRIVER_SSP_REGSV2_H__ #define __INTEL_DAI_DRIVER_SSP_REGSV2_H__ /* SSP register offsets */ #define SSCR0 0x00 #define SSCR1 0x04 #define SSSR 0x08 #define SSITR 0x0C #define SSTO 0x28 #define SSPSP 0x2C #define SSTSS 0x38 #define SSCR2 0x40 #define SSPSP2 0x44 #define SSIOC 0x4C #define SSGFS 0x50 #define SSDR 0x10 /* Not PTL */ #define SSTSA 0x30 /* Not PTL */ #define SSRSA 0x34 /* Not PTL */ #define OUT_FIFO SSDR #define IN_FIFO SSDR /* SSCR0 bits */ #define SSCR0_DSIZE(x) DAI_INTEL_SSP_SET_BITS(3, 0, (x) - 1) #define SSCR0_DSIZE_GET(x) (((x) & DAI_INTEL_SSP_MASK(3, 0)) + 1) #define SSCR0_FRF DAI_INTEL_SSP_MASK(5, 4) #define SSCR0_MOT DAI_INTEL_SSP_SET_BITS(5, 4, 0) #define SSCR0_TI DAI_INTEL_SSP_SET_BITS(5, 4, 1) #define SSCR0_NAT DAI_INTEL_SSP_SET_BITS(5, 4, 2) #define SSCR0_PSP DAI_INTEL_SSP_SET_BITS(5, 4, 3) #define SSCR0_ECS BIT(6) #define SSCR0_SSE BIT(7) #define SSCR0_SCR_MASK DAI_INTEL_SSP_MASK(19, 8) #define SSCR0_SCR(x) DAI_INTEL_SSP_SET_BITS(19, 8, x) #define SSCR0_EDSS BIT(20) #define SSCR0_NCS BIT(21) #define SSCR0_RIM BIT(22) #define SSCR0_TIM BIT(23) #define SSCR0_FRDC(x) DAI_INTEL_SSP_SET_BITS(26, 24, (x) - 1) #define SSCR0_FRDC_GET(x) ((((x) & DAI_INTEL_SSP_MASK(26, 24)) >> 24) + 1) #define SSCR0_ACS BIT(30) #define SSCR0_MOD BIT(31) /* SSCR1 bits */ #define SSCR1_RIE BIT(0) #define SSCR1_TIE BIT(1) #define SSCR1_LBM BIT(2) #define SSCR1_SPO BIT(3) #define SSCR1_SPH BIT(4) #define SSCR1_MWDS BIT(5) #define SSCR1_TFT_MASK DAI_INTEL_SSP_MASK(9, 6) #define SSCR1_TFT(x) DAI_INTEL_SSP_SET_BITS(9, 6, (x) - 1) #define SSCR1_RFT_MASK DAI_INTEL_SSP_MASK(13, 10) #define SSCR1_RFT(x) DAI_INTEL_SSP_SET_BITS(13, 10, (x) - 1) #define SSCR1_EFWR BIT(14) #define SSCR1_STRF BIT(15) #define SSCR1_IFS BIT(16) #define SSCR1_PINTE BIT(18) #define SSCR1_TINTE BIT(19) #define SSCR1_RSRE BIT(20) #define SSCR1_TSRE BIT(21) #define SSCR1_TRAIL BIT(22) #define SSCR1_RWOT BIT(23) #define SSCR1_SFRMDIR BIT(24) #define SSCR1_SCLKDIR BIT(25) #define SSCR1_ECRB BIT(26) #define SSCR1_ECRA BIT(27) #define SSCR1_SCFR BIT(28) #define SSCR1_EBCEI BIT(29) #define SSCR1_TTE BIT(30) #define SSCR1_TTELP BIT(31) #define SSCR2_TURM1 BIT(1) #define SSCR2_PSPSRWFDFD BIT(3) #define SSCR2_PSPSTWFDFD BIT(4) #define SSCR2_SDFD BIT(14) #define SSCR2_SDPM BIT(16) #define SSCR2_LJDFD BIT(17) #define SSCR2_MMRATF BIT(18) #define SSCR2_SMTATF BIT(19) #define SSCR2_SFRMEN BIT(20) #define SSCR2_ACIOLBS BIT(21) /* SSR bits */ #define SSSR_TNF BIT(2) #define SSSR_RNE BIT(3) #define SSSR_BSY BIT(4) #define SSSR_TFS BIT(5) #define SSSR_RFS BIT(6) #define SSSR_ROR BIT(7) #define SSSR_TUR BIT(21) /* SSPSP bits */ #define SSPSP_SCMODE(x) DAI_INTEL_SSP_SET_BITS(1, 0, x) #define SSPSP_SFRMP(x) DAI_INTEL_SSP_SET_BIT(2, x) #define SSPSP_ETDS BIT(3) #define SSPSP_STRTDLY(x) DAI_INTEL_SSP_SET_BITS(6, 4, x) #define SSPSP_DMYSTRT(x) DAI_INTEL_SSP_SET_BITS(8, 7, x) #define SSPSP_SFRMDLY(x) DAI_INTEL_SSP_SET_BITS(15, 9, x) #define SSPSP_SFRMWDTH(x) DAI_INTEL_SSP_SET_BITS(21, 16, x) #define SSPSP_DMYSTOP(x) DAI_INTEL_SSP_SET_BITS(24, 23, x) #define SSPSP_DMYSTOP_BITS 2 #define SSPSP_DMYSTOP_MASK DAI_INTEL_SSP_MASK(SSPSP_DMYSTOP_BITS - 1, 0) #define SSPSP_FSRT BIT(25) #define SSPSP_EDMYSTOP(x) DAI_INTEL_SSP_SET_BITS(28, 26, x) #define SSPSP2 0x44 #define SSPSP2_FEP_MASK 0xff #define SSCR3 0x48 #define SSIOC 0x4C #define SSP_REG_MAX SSIOC /* SSTSA bits */ #define SSTSA_SSTSA(x) DAI_INTEL_SSP_SET_BITS(7, 0, x) #define SSTSA_GET(x) ((x) & DAI_INTEL_SSP_MASK(7, 0)) #define SSTSA_TXEN BIT(8) /* SSRSA bits */ #define SSRSA_SSRSA(x) DAI_INTEL_SSP_SET_BITS(7, 0, x) #define SSRSA_GET(x) ((x) & DAI_INTEL_SSP_MASK(7, 0)) #define SSRSA_RXEN BIT(8) /* SSCR3 bits */ #define SSCR3_FRM_MST_EN BIT(0) #define SSCR3_I2S_MODE_EN BIT(1) #define SSCR3_I2S_FRM_POL(x) DAI_INTEL_SSP_SET_BIT(2, x) #define SSCR3_I2S_TX_SS_FIX_EN BIT(3) #define SSCR3_I2S_RX_SS_FIX_EN BIT(4) #define SSCR3_I2S_TX_EN BIT(9) #define SSCR3_I2S_RX_EN BIT(10) #define SSCR3_CLK_EDGE_SEL BIT(12) #define SSCR3_STRETCH_TX BIT(14) #define SSCR3_STRETCH_RX BIT(15) #define SSCR3_MST_CLK_EN BIT(16) #define SSCR3_SYN_FIX_EN BIT(17) /* SSCR4 bits */ #define SSCR4_TOT_FRM_PRD(x) ((x) << 7) /* SSCR5 bits */ #define SSCR5_FRM_ASRT_CLOCKS(x) (((x) - 1) << 1) #define SSCR5_FRM_POLARITY(x) DAI_INTEL_SSP_SET_BIT(0, x) /* SFIFOTT bits */ #define SFIFOTT_TX(x) ((x) - 1) #define SFIFOTT_RX(x) (((x) - 1) << 16) /* SFIFOL bits */ #define SFIFOL_TFL(x) ((x) & 0xFFFF) #define SFIFOL_RFL(x) ((x) >> 16) #define SSTSA_TSEN BIT(8) #define SSRSA_RSEN BIT(8) #define SSCR3_TFL_MASK DAI_INTEL_SSP_MASK(5, 0) #define SSCR3_RFL_MASK DAI_INTEL_SSP_MASK(13, 8) #define SSCR3_TFL_VAL(scr3_val) (((scr3_val) >> 0) & DAI_INTEL_SSP_MASK(5, 0)) #define SSCR3_RFL_VAL(scr3_val) (((scr3_val) >> 8) & DAI_INTEL_SSP_MASK(5, 0)) #define SSCR3_TX(x) DAI_INTEL_SSP_SET_BITS(21, 16, (x) - 1) #define SSCR3_RX(x) DAI_INTEL_SSP_SET_BITS(29, 24, (x) - 1) #define SSIOC_TXDPDEB BIT(1) #define SSIOC_SFCR BIT(4) #define SSIOC_SCOE BIT(5) /* SSMIDyCS */ #define SSMIDyCS_RXEN BIT(0) #define SSMIDyCS_RSRE BIT(1) #define SSMIDyCS_RFL DAI_INTEL_SSP_MASK(23, 16) #define SSMIDyCS_RFL_VAL(rfl_val) (((rfl_val) >> 16) & DAI_INTEL_SSP_MASK(7, 0)) #define SSMIDyCS_RNE BIT(26) #define SSMIDyCS_RFS BIT(27) #define SSMIDyCS_ROR BIT(28) #define SSMIDyCS_PINT BIT(29) #define SSMIDyCS_TINT BIT(30) #define SSMIDyCS_EOC BIT(31) /* SSMIDyTSA */ #define SSMIDyTSA_RTSA DAI_INTEL_SSP_MASK(63, 0) #define SSMIDyTSA_SRTSA(x) DAI_INTEL_SSP_MASK(63, 0, x) /* SSMODyCS */ #define SSMODyCS_TXEN BIT(0) #define SSMODyCS_TSRE BIT(1) #define SSMODyCS_TFL DAI_INTEL_SSP_MASK(23, 16) #define SSMIDyCS_TFL_VAL(rfl_val) (((rfl_val) >> 16) & DAI_INTEL_SSP_MASK(7, 0)) #define SSMODyCS_TNF BIT(26) #define SSMODyCS_TFS BIT(27) #define SSMODyCS_TUR BIT(28) /* SSMODyTSA */ #define SSMODyTSA_TTSA DAI_INTEL_SSP_MASK(63, 0) #define SSMODyTSA_STTSA(x) DAI_INTEL_SSP_MASK(63, 0, x) /* For 8000 Hz rate one sample is transmitted within 125us */ #define DAI_INTEL_SSP_MAX_SEND_TIME_PER_SAMPLE 125 /* SSP flush retry counts maximum */ #define DAI_INTEL_SSP_RX_FLUSH_RETRY_MAX 16 #define SSP_CLK_MCLK_ES_REQ BIT(0) #define SSP_CLK_MCLK_ACTIVE BIT(1) #define SSP_CLK_BCLK_ES_REQ BIT(2) #define SSP_CLK_BCLK_ACTIVE BIT(3) #define I2SLCTL_OFFSET 0x04 #define I2SLCTL_OFLEN BIT(4) #define I2SLCTL_SPA(x) BIT(16 + x) #define I2SLCTL_CPA(x) BIT(23 + x) #define PCMS0CM_OFFSET 0x16 #define PCMS1CM_OFFSET 0x1A #define I2CLCTL_MLCS(x) DAI_INTEL_SSP_SET_BITS(29, 27, x) #define SHIM_CLKCTL 0x78 #define SHIM_CLKCTL_I2SFDCGB(x) BIT(20 + x) #define SHIM_CLKCTL_I2SEFDCGB(x) BIT(18 + x) /** \brief Offset of MCLK Divider Control Register. */ #define MN_MDIVCTRL 0x100 /** \brief Offset of MCLK Divider x Ratio Register. */ #define MN_MDIVR(x) (0x180 + (x) * 0x4) /** \brief Enables the output of MCLK Divider. */ #define MN_MDIVCTRL_M_DIV_ENABLE(x) BIT(x) /** \brief Bits for setting MCLK source clock. */ #define MCDSS(x) DAI_INTEL_SSP_SET_BITS(17, 16, x) /** \brief Offset of BCLK x M/N Divider M Value Register. */ #define MN_MDIV_M_VAL(x) (0x100 + (x) * 0x8 + 0x0) /** \brief Offset of BCLK x M/N Divider N Value Register. */ #define MN_MDIV_N_VAL(x) (0x100 + (x) * 0x8 + 0x4) /** \brief Bits for setting M/N source clock. */ #define MNDSS(x) DAI_INTEL_SSP_SET_BITS(21, 20, x) /** \brief Mask for clearing mclk and bclk source in MN_MDIVCTRL */ #define MN_SOURCE_CLKS_MASK 0x3 #endif /* __INTEL_DAI_DRIVER_SSP_REGSV2_H__ */ ```
/content/code_sandbox/drivers/dai/intel/ssp/ssp_regs_v2.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,144
```objective-c /* * */ #ifndef __INTEL_DAI_DRIVER_SSP_H__ #define __INTEL_DAI_DRIVER_SSP_H__ #include <stdint.h> #include <zephyr/drivers/dai.h> #include "dai-params-intel-ipc3.h" #include "dai-params-intel-ipc4.h" #define DAI_INTEL_SSP_MASK(b_hi, b_lo) \ (((1ULL << ((b_hi) - (b_lo) + 1ULL)) - 1ULL) << (b_lo)) #define DAI_INTEL_SSP_SET_BIT(b, x) (((x) & 1) << (b)) #define DAI_INTEL_SSP_SET_BITS(b_hi, b_lo, x) \ (((x) & ((1ULL << ((b_hi) - (b_lo) + 1ULL)) - 1ULL)) << (b_lo)) #define DAI_INTEL_SSP_GET_BIT(b, x) \ (((x) & (1ULL << (b))) >> (b)) #define DAI_INTEL_SSP_GET_BITS(b_hi, b_lo, x) \ (((x) & MASK(b_hi, b_lo)) >> (b_lo)) #define DAI_INTEL_SSP_IS_BIT_SET(reg, bit) (((reg >> bit) & (0x1)) != 0) /* ssp_freq array constants */ #define DAI_INTEL_SSP_NUM_FREQ 3 #define DAI_INTEL_SSP_MAX_FREQ_INDEX (DAI_INTEL_SSP_NUM_FREQ - 1) #define DAI_INTEL_SSP_DEFAULT_IDX 1 /* the SSP port fifo depth */ #define DAI_INTEL_SSP_FIFO_DEPTH 32 /* the watermark for the SSP fifo depth setting */ #define DAI_INTEL_SSP_FIFO_WATERMARK 8 /* minimal SSP port delay in cycles */ #define DAI_INTEL_SSP_PLATFORM_DELAY 1600 /* minimal SSP port delay in useconds */ #define DAI_INTEL_SSP_PLATFORM_DELAY_US 42 #define DAI_INTEL_SSP_PLATFORM_DEFAULT_DELAY 12 #define DAI_INTEL_SSP_DEFAULT_TRY_TIMES 8 /** \brief Number of SSP MCLKs available */ #define DAI_INTEL_SSP_NUM_MCLK 2 #define DAI_INTEL_SSP_CLOCK_XTAL_OSCILLATOR 0x0 #define DAI_INTEL_SSP_CLOCK_AUDIO_CARDINAL 0x1 #define DAI_INTEL_SSP_CLOCK_PLL_FIXED 0x2 #if defined(CONFIG_SOC_INTEL_ACE15_MTPM) || defined(CONFIG_SOC_SERIES_INTEL_ADSP_CAVS) #include "ssp_regs_v1.h" #elif defined(CONFIG_SOC_INTEL_ACE20_LNL) #include "ssp_regs_v2.h" #elif defined(CONFIG_SOC_INTEL_ACE30_PTL) #include "ssp_regs_v3.h" #else #error "Missing ssp definitions" #endif #if CONFIG_INTEL_MN /** \brief BCLKs can be driven by multiple sources - M/N or XTAL directly. * Even in the case of M/N, the actual clock source can be XTAL, * Audio cardinal clock (24.576) or 96 MHz PLL. * The MN block is not really the source of clocks, but rather * an intermediate component. * Input for source is shared by all outputs coming from that source * and once it's in use, it can be adjusted only with dividers. * In order to change input, the source should not be in use, that's why * it's necessary to keep track of BCLKs sources to know when it's safe * to change shared input clock. */ enum bclk_source { MN_BCLK_SOURCE_NONE = 0, /**< port is not using any clock */ MN_BCLK_SOURCE_MN, /**< port is using clock driven by M/N */ MN_BCLK_SOURCE_XTAL, /**< port is using XTAL directly */ }; #endif struct dai_intel_ssp_mn { uint32_t base; /**< keep track of which MCLKs are in use to know when it's safe to * change shared clock */ int mclk_sources_ref[DAI_INTEL_SSP_NUM_MCLK]; int mclk_rate[DAI_INTEL_SSP_NUM_MCLK]; int mclk_source_clock; #if CONFIG_INTEL_MN enum bclk_source bclk_sources[(CONFIG_DAI_INTEL_SSP_NUM_BASE + CONFIG_DAI_INTEL_SSP_NUM_EXT)]; int bclk_source_mn_clock; #endif struct k_spinlock lock; /**< lock mechanism */ }; struct dai_intel_ssp_freq_table { uint32_t freq; uint32_t ticks_per_msec; }; struct dai_intel_ssp_plat_fifo_data { uint32_t offset; uint32_t width; uint32_t depth; uint32_t watermark; uint32_t handshake; }; struct dai_intel_ssp_plat_data { uint32_t ssp_index; int acquire_count; bool is_initialized; bool is_power_en; uint32_t base; uint32_t ip_base; uint32_t shim_base; #if defined(CONFIG_SOC_INTEL_ACE20_LNL) || defined(CONFIG_SOC_INTEL_ACE30_PTL) uint32_t hdamlssp_base; uint32_t i2svss_base; #endif int irq; const char *irq_name; uint32_t flags; struct dai_intel_ssp_plat_fifo_data fifo[2]; struct dai_intel_ssp_mn *mn_inst; struct dai_intel_ssp_freq_table *ftable; uint32_t *fsources; uint32_t clk_active; struct dai_intel_ipc3_ssp_params params; }; struct dai_intel_ssp_pdata { uint32_t sscr0; uint32_t sscr1; uint32_t psp; struct dai_config config; struct dai_properties props; }; struct dai_intel_ssp { uint32_t dai_index; uint32_t ssp_index; uint32_t tdm_slot_group; uint32_t state[2]; struct k_spinlock lock; /**< locking mechanism */ int sref; /**< simple ref counter, guarded by lock */ struct dai_intel_ssp_plat_data *ssp_plat_data; void *priv_data; }; #endif ```
/content/code_sandbox/drivers/dai/intel/ssp/ssp.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,334
```objective-c /* * */ #ifndef __INTEL_DAI_DRIVER_SSP_REGSV1_H__ #define __INTEL_DAI_DRIVER_SSP_REGSV1_H__ /* SSP register offsets */ #define SSCR0 0x00 #define SSCR1 0x04 #define SSSR 0x08 #define SSITR 0x0C #define SSTO 0x28 #define SSPSP 0x2C #define SSTSS 0x38 #define SSCR2 0x40 #define SSPSP2 0x44 #define SSIOC 0x4C #define SSGFS 0x50 #define SSDR 0x10 /* Not PTL */ #define SSTSA 0x30 /* Not PTL */ #define SSRSA 0x34 /* Not PTL */ #define OUT_FIFO SSDR #define IN_FIFO SSDR /* SSCR0 bits */ #define SSCR0_DSIZE(x) DAI_INTEL_SSP_SET_BITS(3, 0, (x) - 1) #define SSCR0_DSIZE_GET(x) (((x) & DAI_INTEL_SSP_MASK(3, 0)) + 1) #define SSCR0_FRF DAI_INTEL_SSP_MASK(5, 4) #define SSCR0_MOT DAI_INTEL_SSP_SET_BITS(5, 4, 0) #define SSCR0_TI DAI_INTEL_SSP_SET_BITS(5, 4, 1) #define SSCR0_NAT DAI_INTEL_SSP_SET_BITS(5, 4, 2) #define SSCR0_PSP DAI_INTEL_SSP_SET_BITS(5, 4, 3) #define SSCR0_ECS BIT(6) #define SSCR0_SSE BIT(7) #define SSCR0_SCR_MASK DAI_INTEL_SSP_MASK(19, 8) #define SSCR0_SCR(x) DAI_INTEL_SSP_SET_BITS(19, 8, x) #define SSCR0_EDSS BIT(20) #define SSCR0_NCS BIT(21) #define SSCR0_RIM BIT(22) #define SSCR0_TIM BIT(23) #define SSCR0_FRDC(x) DAI_INTEL_SSP_SET_BITS(26, 24, (x) - 1) #define SSCR0_FRDC_GET(x) ((((x) & DAI_INTEL_SSP_MASK(26, 24)) >> 24) + 1) #define SSCR0_ACS BIT(30) #define SSCR0_MOD BIT(31) /* SSCR1 bits */ #define SSCR1_RIE BIT(0) #define SSCR1_TIE BIT(1) #define SSCR1_LBM BIT(2) #define SSCR1_SPO BIT(3) #define SSCR1_SPH BIT(4) #define SSCR1_MWDS BIT(5) #define SSCR1_TFT_MASK DAI_INTEL_SSP_MASK(9, 6) #define SSCR1_TFT(x) DAI_INTEL_SSP_SET_BITS(9, 6, (x) - 1) #define SSCR1_RFT_MASK DAI_INTEL_SSP_MASK(13, 10) #define SSCR1_RFT(x) DAI_INTEL_SSP_SET_BITS(13, 10, (x) - 1) #define SSCR1_EFWR BIT(14) #define SSCR1_STRF BIT(15) #define SSCR1_IFS BIT(16) #define SSCR1_PINTE BIT(18) #define SSCR1_TINTE BIT(19) #define SSCR1_RSRE BIT(20) #define SSCR1_TSRE BIT(21) #define SSCR1_TRAIL BIT(22) #define SSCR1_RWOT BIT(23) #define SSCR1_SFRMDIR BIT(24) #define SSCR1_SCLKDIR BIT(25) #define SSCR1_ECRB BIT(26) #define SSCR1_ECRA BIT(27) #define SSCR1_SCFR BIT(28) #define SSCR1_EBCEI BIT(29) #define SSCR1_TTE BIT(30) #define SSCR1_TTELP BIT(31) #define SSCR2_TURM1 BIT(1) #define SSCR2_PSPSRWFDFD BIT(3) #define SSCR2_PSPSTWFDFD BIT(4) #define SSCR2_SDFD BIT(14) #define SSCR2_SDPM BIT(16) #define SSCR2_LJDFD BIT(17) #define SSCR2_MMRATF BIT(18) #define SSCR2_SMTATF BIT(19) #define SSCR2_SFRMEN BIT(20) #define SSCR2_ACIOLBS BIT(21) /* SSR bits */ #define SSSR_TNF BIT(2) #define SSSR_RNE BIT(3) #define SSSR_BSY BIT(4) #define SSSR_TFS BIT(5) #define SSSR_RFS BIT(6) #define SSSR_ROR BIT(7) #define SSSR_TUR BIT(21) /* SSPSP bits */ #define SSPSP_SCMODE(x) DAI_INTEL_SSP_SET_BITS(1, 0, x) #define SSPSP_SFRMP(x) DAI_INTEL_SSP_SET_BIT(2, x) #define SSPSP_ETDS BIT(3) #define SSPSP_STRTDLY(x) DAI_INTEL_SSP_SET_BITS(6, 4, x) #define SSPSP_DMYSTRT(x) DAI_INTEL_SSP_SET_BITS(8, 7, x) #define SSPSP_SFRMDLY(x) DAI_INTEL_SSP_SET_BITS(15, 9, x) #define SSPSP_SFRMWDTH(x) DAI_INTEL_SSP_SET_BITS(21, 16, x) #define SSPSP_DMYSTOP(x) DAI_INTEL_SSP_SET_BITS(24, 23, x) #define SSPSP_DMYSTOP_BITS 2 #define SSPSP_DMYSTOP_MASK DAI_INTEL_SSP_MASK(SSPSP_DMYSTOP_BITS - 1, 0) #define SSPSP_FSRT BIT(25) #define SSPSP_EDMYSTOP(x) DAI_INTEL_SSP_SET_BITS(28, 26, x) #define SSPSP2 0x44 #define SSPSP2_FEP_MASK 0xff #define SSCR3 0x48 #define SSIOC 0x4C #define SSP_REG_MAX SSIOC /* SSTSA bits */ #define SSTSA_SSTSA(x) DAI_INTEL_SSP_SET_BITS(7, 0, x) #define SSTSA_GET(x) ((x) & DAI_INTEL_SSP_MASK(7, 0)) #define SSTSA_TXEN BIT(8) /* SSRSA bits */ #define SSRSA_SSRSA(x) DAI_INTEL_SSP_SET_BITS(7, 0, x) #define SSRSA_GET(x) ((x) & DAI_INTEL_SSP_MASK(7, 0)) #define SSRSA_RXEN BIT(8) /* SSCR3 bits */ #define SSCR3_FRM_MST_EN BIT(0) #define SSCR3_I2S_MODE_EN BIT(1) #define SSCR3_I2S_FRM_POL(x) DAI_INTEL_SSP_SET_BIT(2, x) #define SSCR3_I2S_TX_SS_FIX_EN BIT(3) #define SSCR3_I2S_RX_SS_FIX_EN BIT(4) #define SSCR3_I2S_TX_EN BIT(9) #define SSCR3_I2S_RX_EN BIT(10) #define SSCR3_CLK_EDGE_SEL BIT(12) #define SSCR3_STRETCH_TX BIT(14) #define SSCR3_STRETCH_RX BIT(15) #define SSCR3_MST_CLK_EN BIT(16) #define SSCR3_SYN_FIX_EN BIT(17) /* SSCR4 bits */ #define SSCR4_TOT_FRM_PRD(x) ((x) << 7) /* SSCR5 bits */ #define SSCR5_FRM_ASRT_CLOCKS(x) (((x) - 1) << 1) #define SSCR5_FRM_POLARITY(x) DAI_INTEL_SSP_SET_BIT(0, x) /* SFIFOTT bits */ #define SFIFOTT_TX(x) ((x) - 1) #define SFIFOTT_RX(x) (((x) - 1) << 16) /* SFIFOL bits */ #define SFIFOL_TFL(x) ((x) & 0xFFFF) #define SFIFOL_RFL(x) ((x) >> 16) #define SSTSA_TSEN BIT(8) #define SSRSA_RSEN BIT(8) #define SSCR3_TFL_MASK DAI_INTEL_SSP_MASK(5, 0) #define SSCR3_RFL_MASK DAI_INTEL_SSP_MASK(13, 8) #define SSCR3_TFL_VAL(scr3_val) (((scr3_val) >> 0) & DAI_INTEL_SSP_MASK(5, 0)) #define SSCR3_RFL_VAL(scr3_val) (((scr3_val) >> 8) & DAI_INTEL_SSP_MASK(5, 0)) #define SSCR3_TX(x) DAI_INTEL_SSP_SET_BITS(21, 16, (x) - 1) #define SSCR3_RX(x) DAI_INTEL_SSP_SET_BITS(29, 24, (x) - 1) #define SSIOC_TXDPDEB BIT(1) #define SSIOC_SFCR BIT(4) #define SSIOC_SCOE BIT(5) /* SSMIDyCS */ #define SSMIDyCS_RXEN BIT(0) #define SSMIDyCS_RSRE BIT(1) #define SSMIDyCS_RFL DAI_INTEL_SSP_MASK(23, 16) #define SSMIDyCS_RFL_VAL(rfl_val) (((rfl_val) >> 16) & DAI_INTEL_SSP_MASK(7, 0)) #define SSMIDyCS_RNE BIT(26) #define SSMIDyCS_RFS BIT(27) #define SSMIDyCS_ROR BIT(28) #define SSMIDyCS_PINT BIT(29) #define SSMIDyCS_TINT BIT(30) #define SSMIDyCS_EOC BIT(31) /* SSMIDyTSA */ #define SSMIDyTSA_RTSA DAI_INTEL_SSP_MASK(63, 0) #define SSMIDyTSA_SRTSA(x) DAI_INTEL_SSP_MASK(63, 0, x) /* SSMODyCS */ #define SSMODyCS_TXEN BIT(0) #define SSMODyCS_TSRE BIT(1) #define SSMODyCS_TFL DAI_INTEL_SSP_MASK(23, 16) #define SSMIDyCS_TFL_VAL(rfl_val) (((rfl_val) >> 16) & DAI_INTEL_SSP_MASK(7, 0)) #define SSMODyCS_TNF BIT(26) #define SSMODyCS_TFS BIT(27) #define SSMODyCS_TUR BIT(28) /* SSMODyTSA */ #define SSMODyTSA_TTSA DAI_INTEL_SSP_MASK(63, 0) #define SSMODyTSA_STTSA(x) DAI_INTEL_SSP_MASK(63, 0, x) /* For 8000 Hz rate one sample is transmitted within 125us */ #define DAI_INTEL_SSP_MAX_SEND_TIME_PER_SAMPLE 125 /* SSP flush retry counts maximum */ #define DAI_INTEL_SSP_RX_FLUSH_RETRY_MAX 16 #define SSP_CLK_MCLK_ES_REQ BIT(0) #define SSP_CLK_MCLK_ACTIVE BIT(1) #define SSP_CLK_BCLK_ES_REQ BIT(2) #define SSP_CLK_BCLK_ACTIVE BIT(3) #define I2SLCTL_OFFSET 0x04 #define I2SLCTL_SPA(x) BIT(0 + x) #define I2SLCTL_CPA(x) BIT(8 + x) #define I2CLCTL_MLCS(x) DAI_INTEL_SSP_SET_BITS(29, 27, x) #define SHIM_CLKCTL 0x78 #define SHIM_CLKCTL_I2SFDCGB(x) BIT(20 + x) #define SHIM_CLKCTL_I2SEFDCGB(x) BIT(18 + x) #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE /** \brief Offset of MCLK Divider Control Register. */ #define MN_MDIVCTRL 0x100 /** \brief Offset of MCLK Divider x Ratio Register. */ #define MN_MDIVR(x) (0x180 + (x) * 0x4) #else #define MN_MDIVCTRL 0x0 #define MN_MDIVR(x) (0x80 + (x) * 0x4) #endif /** \brief Enables the output of MCLK Divider. */ #define MN_MDIVCTRL_M_DIV_ENABLE(x) BIT(x) /** \brief Bits for setting MCLK source clock. */ #define MCDSS(x) DAI_INTEL_SSP_SET_BITS(17, 16, x) /** \brief Offset of BCLK x M/N Divider M Value Register. */ #define MN_MDIV_M_VAL(x) (0x100 + (x) * 0x8 + 0x0) /** \brief Offset of BCLK x M/N Divider N Value Register. */ #define MN_MDIV_N_VAL(x) (0x100 + (x) * 0x8 + 0x4) /** \brief Bits for setting M/N source clock. */ #define MNDSS(x) DAI_INTEL_SSP_SET_BITS(21, 20, x) /** \brief Mask for clearing mclk and bclk source in MN_MDIVCTRL */ #define MN_SOURCE_CLKS_MASK 0x3 #endif /* __INTEL_DAI_DRIVER_SSP_REGSV1_H__ */ ```
/content/code_sandbox/drivers/dai/intel/ssp/ssp_regs_v1.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,154