text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```c /* * */ #define DT_DRV_COMPAT nxp_imx_lpi2c #include <errno.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/kernel.h> #include <zephyr/irq.h> #include <fsl_lpi2c.h> #if CONFIG_NXP_LP_FLEXCOMM #include <zephyr/drivers/mfd/nxp_lp_flexcomm.h> #endif #include <zephyr/drivers/pinctrl.h> #ifdef CONFIG_I2C_MCUX_LPI2C_BUS_RECOVERY #include "i2c_bitbang.h" #include <zephyr/drivers/gpio.h> #endif /* CONFIG_I2C_MCUX_LPI2C_BUS_RECOVERY */ #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(mcux_lpi2c); #include "i2c-priv.h" /* Wait for the duration of 12 bits to detect a NAK after a bus * address scan. (10 appears sufficient, 20% safety factor.) */ #define SCAN_DELAY_US(baudrate) (12 * USEC_PER_SEC / baudrate) /* Required by DEVICE_MMIO_NAMED_* macros */ #define DEV_CFG(_dev) \ ((const struct mcux_lpi2c_config *)(_dev)->config) #define DEV_DATA(_dev) ((struct mcux_lpi2c_data *)(_dev)->data) struct mcux_lpi2c_config { DEVICE_MMIO_NAMED_ROM(reg_base); #ifdef CONFIG_NXP_LP_FLEXCOMM const struct device *parent_dev; #endif const struct device *clock_dev; clock_control_subsys_t clock_subsys; void (*irq_config_func)(const struct device *dev); uint32_t bitrate; uint32_t bus_idle_timeout_ns; const struct pinctrl_dev_config *pincfg; #ifdef CONFIG_I2C_MCUX_LPI2C_BUS_RECOVERY struct gpio_dt_spec scl; struct gpio_dt_spec sda; #endif /* CONFIG_I2C_MCUX_LPI2C_BUS_RECOVERY */ }; struct mcux_lpi2c_data { DEVICE_MMIO_NAMED_RAM(reg_base); lpi2c_master_handle_t handle; struct k_sem lock; struct k_sem device_sync_sem; status_t callback_status; #ifdef CONFIG_I2C_TARGET lpi2c_slave_handle_t target_handle; struct i2c_target_config *target_cfg; bool target_attached; bool first_tx; bool read_active; bool send_ack; #endif }; static int mcux_lpi2c_configure(const struct device *dev, uint32_t dev_config_raw) { const struct mcux_lpi2c_config *config = dev->config; struct mcux_lpi2c_data *data = dev->data; LPI2C_Type *base = (LPI2C_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); uint32_t clock_freq; uint32_t baudrate; int ret; if (!(I2C_MODE_CONTROLLER & dev_config_raw)) { return -EINVAL; } if (I2C_ADDR_10_BITS & dev_config_raw) { return -EINVAL; } switch (I2C_SPEED_GET(dev_config_raw)) { case I2C_SPEED_STANDARD: baudrate = KHZ(100); break; case I2C_SPEED_FAST: baudrate = KHZ(400); break; case I2C_SPEED_FAST_PLUS: baudrate = MHZ(1); break; default: return -EINVAL; } if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { return -EINVAL; } ret = k_sem_take(&data->lock, K_FOREVER); if (ret) { return ret; } LPI2C_MasterSetBaudRate(base, clock_freq, baudrate); k_sem_give(&data->lock); return 0; } static void mcux_lpi2c_master_transfer_callback(LPI2C_Type *base, lpi2c_master_handle_t *handle, status_t status, void *userData) { struct mcux_lpi2c_data *data = userData; ARG_UNUSED(handle); ARG_UNUSED(base); data->callback_status = status; k_sem_give(&data->device_sync_sem); } static uint32_t mcux_lpi2c_convert_flags(int msg_flags) { uint32_t flags = 0U; if (!(msg_flags & I2C_MSG_STOP)) { flags |= kLPI2C_TransferNoStopFlag; } if (msg_flags & I2C_MSG_RESTART) { flags |= kLPI2C_TransferRepeatedStartFlag; } return flags; } static int mcux_lpi2c_transfer(const struct device *dev, struct i2c_msg *msgs, uint8_t num_msgs, uint16_t addr) { const struct mcux_lpi2c_config *config = dev->config; struct mcux_lpi2c_data *data = dev->data; LPI2C_Type *base = (LPI2C_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); lpi2c_master_transfer_t transfer; status_t status; int ret = 0; ret = k_sem_take(&data->lock, K_FOREVER); if (ret) { return ret; } /* Iterate over all the messages */ for (int i = 0; i < num_msgs; i++) { if (I2C_MSG_ADDR_10_BITS & msgs->flags) { ret = -ENOTSUP; break; } /* Initialize the transfer descriptor */ transfer.flags = mcux_lpi2c_convert_flags(msgs->flags); /* Prevent the controller to send a start condition between * messages, except if explicitly requested. */ if (i != 0 && !(msgs->flags & I2C_MSG_RESTART)) { transfer.flags |= kLPI2C_TransferNoStartFlag; } transfer.slaveAddress = addr; transfer.direction = (msgs->flags & I2C_MSG_READ) ? kLPI2C_Read : kLPI2C_Write; transfer.subaddress = 0; transfer.subaddressSize = 0; transfer.data = msgs->buf; transfer.dataSize = msgs->len; /* Start the transfer */ status = LPI2C_MasterTransferNonBlocking(base, &data->handle, &transfer); /* Return an error if the transfer didn't start successfully * e.g., if the bus was busy */ if (status != kStatus_Success) { LPI2C_MasterTransferAbort(base, &data->handle); ret = -EIO; break; } /* Wait for the transfer to complete */ k_sem_take(&data->device_sync_sem, K_FOREVER); /* Return an error if the transfer didn't complete * successfully. e.g., nak, timeout, lost arbitration */ if (data->callback_status != kStatus_Success) { LPI2C_MasterTransferAbort(base, &data->handle); ret = -EIO; break; } if (msgs->len == 0) { k_busy_wait(SCAN_DELAY_US(config->bitrate)); if (0 != (base->MSR & LPI2C_MSR_NDF_MASK)) { LPI2C_MasterTransferAbort(base, &data->handle); ret = -EIO; break; } } /* Move to the next message */ msgs++; } k_sem_give(&data->lock); return ret; } #if CONFIG_I2C_MCUX_LPI2C_BUS_RECOVERY static void mcux_lpi2c_bitbang_set_scl(void *io_context, int state) { const struct mcux_lpi2c_config *config = io_context; gpio_pin_set_dt(&config->scl, state); } static void mcux_lpi2c_bitbang_set_sda(void *io_context, int state) { const struct mcux_lpi2c_config *config = io_context; gpio_pin_set_dt(&config->sda, state); } static int mcux_lpi2c_bitbang_get_sda(void *io_context) { const struct mcux_lpi2c_config *config = io_context; return gpio_pin_get_dt(&config->sda) == 0 ? 0 : 1; } static int mcux_lpi2c_recover_bus(const struct device *dev) { const struct mcux_lpi2c_config *config = dev->config; struct mcux_lpi2c_data *data = dev->data; struct i2c_bitbang bitbang_ctx; struct i2c_bitbang_io bitbang_io = { .set_scl = mcux_lpi2c_bitbang_set_scl, .set_sda = mcux_lpi2c_bitbang_set_sda, .get_sda = mcux_lpi2c_bitbang_get_sda, }; uint32_t bitrate_cfg; int error = 0; if (!gpio_is_ready_dt(&config->scl)) { LOG_ERR("SCL GPIO device not ready"); return -EIO; } if (!gpio_is_ready_dt(&config->sda)) { LOG_ERR("SDA GPIO device not ready"); return -EIO; } k_sem_take(&data->lock, K_FOREVER); error = gpio_pin_configure_dt(&config->scl, GPIO_OUTPUT_HIGH); if (error != 0) { LOG_ERR("failed to configure SCL GPIO (err %d)", error); goto restore; } error = gpio_pin_configure_dt(&config->sda, GPIO_OUTPUT_HIGH); if (error != 0) { LOG_ERR("failed to configure SDA GPIO (err %d)", error); goto restore; } i2c_bitbang_init(&bitbang_ctx, &bitbang_io, (void *)config); bitrate_cfg = i2c_map_dt_bitrate(config->bitrate) | I2C_MODE_CONTROLLER; error = i2c_bitbang_configure(&bitbang_ctx, bitrate_cfg); if (error != 0) { LOG_ERR("failed to configure I2C bitbang (err %d)", error); goto restore; } error = i2c_bitbang_recover_bus(&bitbang_ctx); if (error != 0) { LOG_ERR("failed to recover bus (err %d)", error); goto restore; } restore: (void)pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); k_sem_give(&data->lock); return error; } #endif /* CONFIG_I2C_MCUX_LPI2C_BUS_RECOVERY */ #ifdef CONFIG_I2C_TARGET static void mcux_lpi2c_slave_irq_handler(const struct device *dev) { struct mcux_lpi2c_data *data = dev->data; LPI2C_Type *base = (LPI2C_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); const struct i2c_target_callbacks *target_cb = data->target_cfg->callbacks; int ret; uint32_t flags; uint8_t i2c_data; /* Note- the HAL provides a callback-based I2C slave API, but * the API expects the user to provide a transmit buffer of * a fixed length at the first byte received, and will not signal * the user callback until this buffer is exhausted. This does not * work well with the Zephyr API, which requires callbacks for * every byte. For these reason, we handle the LPI2C IRQ * directly. */ flags = LPI2C_SlaveGetStatusFlags(base); if (flags & kLPI2C_SlaveAddressValidFlag) { /* Read Slave address to clear flag */ LPI2C_SlaveGetReceivedAddress(base); data->first_tx = true; /* Reset to sending ACK, in case we NAK'ed before */ data->send_ack = true; } if (flags & kLPI2C_SlaveRxReadyFlag) { /* RX data is available, read it and issue callback */ i2c_data = (uint8_t)base->SRDR; if (data->first_tx) { data->first_tx = false; if (target_cb->write_requested) { ret = target_cb->write_requested(data->target_cfg); if (ret < 0) { /* NAK further bytes */ data->send_ack = false; } } } if (target_cb->write_received) { ret = target_cb->write_received(data->target_cfg, i2c_data); if (ret < 0) { /* NAK further bytes */ data->send_ack = false; } } } if (flags & kLPI2C_SlaveTxReadyFlag) { /* Space is available in TX fifo, issue callback and write out */ if (data->first_tx) { data->read_active = true; data->first_tx = false; if (target_cb->read_requested) { ret = target_cb->read_requested(data->target_cfg, &i2c_data); if (ret < 0) { /* Disable TX */ data->read_active = false; } else { /* Send I2C data */ base->STDR = i2c_data; } } } else if (data->read_active) { if (target_cb->read_processed) { ret = target_cb->read_processed(data->target_cfg, &i2c_data); if (ret < 0) { /* Disable TX */ data->read_active = false; } else { /* Send I2C data */ base->STDR = i2c_data; } } } } if (flags & kLPI2C_SlaveStopDetectFlag) { LPI2C_SlaveClearStatusFlags(base, flags); if (target_cb->stop) { target_cb->stop(data->target_cfg); } } if (flags & kLPI2C_SlaveTransmitAckFlag) { LPI2C_SlaveTransmitAck(base, data->send_ack); } } static int mcux_lpi2c_target_register(const struct device *dev, struct i2c_target_config *target_config) { const struct mcux_lpi2c_config *config = dev->config; struct mcux_lpi2c_data *data = dev->data; LPI2C_Type *base = (LPI2C_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); lpi2c_slave_config_t slave_config; uint32_t clock_freq; LPI2C_MasterDeinit(base); /* Get the clock frequency */ if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { return -EINVAL; } if (!target_config) { return -EINVAL; } if (data->target_attached) { return -EBUSY; } data->target_attached = true; data->target_cfg = target_config; data->first_tx = false; LPI2C_SlaveGetDefaultConfig(&slave_config); slave_config.address0 = target_config->address; /* Note- this setting enables clock stretching to allow the * slave to respond to each byte with an ACK/NAK. * this behavior may cause issues with some I2C controllers. */ slave_config.sclStall.enableAck = true; LPI2C_SlaveInit(base, &slave_config, clock_freq); /* Clear all flags. */ LPI2C_SlaveClearStatusFlags(base, (uint32_t)kLPI2C_SlaveClearFlags); /* Enable interrupt */ LPI2C_SlaveEnableInterrupts(base, (kLPI2C_SlaveTxReadyFlag | kLPI2C_SlaveRxReadyFlag | kLPI2C_SlaveStopDetectFlag | kLPI2C_SlaveAddressValidFlag | kLPI2C_SlaveTransmitAckFlag)); return 0; } static int mcux_lpi2c_target_unregister(const struct device *dev, struct i2c_target_config *target_config) { struct mcux_lpi2c_data *data = dev->data; LPI2C_Type *base = (LPI2C_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); if (!data->target_attached) { return -EINVAL; } data->target_cfg = NULL; data->target_attached = false; LPI2C_SlaveDeinit(base); return 0; } #endif /* CONFIG_I2C_TARGET */ static void mcux_lpi2c_isr(const struct device *dev) { struct mcux_lpi2c_data *data = dev->data; LPI2C_Type *base = (LPI2C_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); #ifdef CONFIG_I2C_TARGET if (data->target_attached) { mcux_lpi2c_slave_irq_handler(dev); } #endif /* CONFIG_I2C_TARGET */ #if CONFIG_HAS_MCUX_FLEXCOMM LPI2C_MasterTransferHandleIRQ(LPI2C_GetInstance(base), &data->handle); #else LPI2C_MasterTransferHandleIRQ(base, &data->handle); #endif } static int mcux_lpi2c_init(const struct device *dev) { const struct mcux_lpi2c_config *config = dev->config; struct mcux_lpi2c_data *data = dev->data; LPI2C_Type *base; uint32_t clock_freq, bitrate_cfg; lpi2c_master_config_t master_config; int error; DEVICE_MMIO_NAMED_MAP(dev, reg_base, K_MEM_CACHE_NONE | K_MEM_DIRECT_MAP); base = (LPI2C_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); k_sem_init(&data->lock, 1, 1); k_sem_init(&data->device_sync_sem, 0, K_SEM_MAX_LIMIT); if (!device_is_ready(config->clock_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } error = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (error) { return error; } if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { return -EINVAL; } LPI2C_MasterGetDefaultConfig(&master_config); master_config.busIdleTimeout_ns = config->bus_idle_timeout_ns; LPI2C_MasterInit(base, &master_config, clock_freq); LPI2C_MasterTransferCreateHandle(base, &data->handle, mcux_lpi2c_master_transfer_callback, data); bitrate_cfg = i2c_map_dt_bitrate(config->bitrate); error = mcux_lpi2c_configure(dev, I2C_MODE_CONTROLLER | bitrate_cfg); if (error) { return error; } #if CONFIG_NXP_LP_FLEXCOMM /* When using LP Flexcomm driver, register the interrupt handler * so we receive notification from the LP Flexcomm interrupt handler. */ nxp_lp_flexcomm_setirqhandler(config->parent_dev, dev, LP_FLEXCOMM_PERIPH_LPI2C, mcux_lpi2c_isr); #else /* Interrupt is managed by this driver */ config->irq_config_func(dev); #endif return 0; } static const struct i2c_driver_api mcux_lpi2c_driver_api = { .configure = mcux_lpi2c_configure, .transfer = mcux_lpi2c_transfer, #if CONFIG_I2C_MCUX_LPI2C_BUS_RECOVERY .recover_bus = mcux_lpi2c_recover_bus, #endif /* CONFIG_I2C_MCUX_LPI2C_BUS_RECOVERY */ #if CONFIG_I2C_TARGET .target_register = mcux_lpi2c_target_register, .target_unregister = mcux_lpi2c_target_unregister, #endif /* CONFIG_I2C_TARGET */ }; #if CONFIG_I2C_MCUX_LPI2C_BUS_RECOVERY #define I2C_MCUX_LPI2C_SCL_INIT(n) .scl = GPIO_DT_SPEC_INST_GET_OR(n, scl_gpios, {0}), #define I2C_MCUX_LPI2C_SDA_INIT(n) .sda = GPIO_DT_SPEC_INST_GET_OR(n, sda_gpios, {0}), #else #define I2C_MCUX_LPI2C_SCL_INIT(n) #define I2C_MCUX_LPI2C_SDA_INIT(n) #endif /* CONFIG_I2C_MCUX_LPI2C_BUS_RECOVERY */ #define I2C_MCUX_LPI2C_MODULE_IRQ_CONNECT(n) \ do { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ mcux_lpi2c_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } while (false) #define I2C_MCUX_LPI2C_MODULE_IRQ(n) \ IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 0), \ (I2C_MCUX_LPI2C_MODULE_IRQ_CONNECT(n))) #ifdef CONFIG_NXP_LP_FLEXCOMM #define PARENT_DEV(n) \ .parent_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), #else #define PARENT_DEV(n) #endif /* CONFIG_NXP_LP_FLEXCOMM */ #define I2C_MCUX_LPI2C_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ \ static void mcux_lpi2c_config_func_##n(const struct device *dev); \ \ static const struct mcux_lpi2c_config mcux_lpi2c_config_##n = { \ DEVICE_MMIO_NAMED_ROM_INIT(reg_base, DT_DRV_INST(n)), \ PARENT_DEV(n) \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name),\ .irq_config_func = mcux_lpi2c_config_func_##n, \ .bitrate = DT_INST_PROP(n, clock_frequency), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ I2C_MCUX_LPI2C_SCL_INIT(n) \ I2C_MCUX_LPI2C_SDA_INIT(n) \ .bus_idle_timeout_ns = \ UTIL_AND(DT_INST_NODE_HAS_PROP(n, bus_idle_timeout),\ DT_INST_PROP(n, bus_idle_timeout)), \ }; \ \ static struct mcux_lpi2c_data mcux_lpi2c_data_##n; \ \ I2C_DEVICE_DT_INST_DEFINE(n, mcux_lpi2c_init, NULL, \ &mcux_lpi2c_data_##n, \ &mcux_lpi2c_config_##n, POST_KERNEL, \ CONFIG_I2C_INIT_PRIORITY, \ &mcux_lpi2c_driver_api); \ \ static void mcux_lpi2c_config_func_##n(const struct device *dev) \ { \ I2C_MCUX_LPI2C_MODULE_IRQ(n); \ } DT_INST_FOREACH_STATUS_OKAY(I2C_MCUX_LPI2C_INIT) ```
/content/code_sandbox/drivers/i2c/i2c_mcux_lpi2c.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,190
```c /* * */ #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/i2c/rtio.h> #include <zephyr/dt-bindings/i2c/i2c.h> #include <zephyr/pm/device.h> #include <zephyr/drivers/pinctrl.h> #include <soc.h> #include <nrfx_twi.h> #include "i2c_nrfx_twi_common.h" #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(i2c_nrfx_twi, CONFIG_I2C_LOG_LEVEL); struct i2c_nrfx_twi_rtio_data { uint32_t dev_config; bool twi_enabled; struct i2c_rtio *ctx; }; /* Enforce dev_config matches the same offset as the common structure, * otherwise common API won't be compatible with i2c_nrfx_twi_rtio. */ BUILD_ASSERT( offsetof(struct i2c_nrfx_twi_rtio_data, dev_config) == offsetof(struct i2c_nrfx_twi_common_data, dev_config) ); static void i2c_nrfx_twi_rtio_complete(const struct device *dev, int status); static bool i2c_nrfx_twi_rtio_msg_start(const struct device *dev, uint8_t flags, uint8_t *buf, size_t buf_len, uint16_t i2c_addr) { const struct i2c_nrfx_twi_config *config = dev->config; struct i2c_nrfx_twi_rtio_data *const dev_data = dev->data; struct i2c_rtio *ctx = dev_data->ctx; int ret = 0; /** Enabling while already enabled ends up in a failed assertion: skip it. */ if (!dev_data->twi_enabled) { nrfx_twi_enable(&config->twi); dev_data->twi_enabled = true; } ret = i2c_nrfx_twi_msg_transfer(dev, flags, buf, buf_len, i2c_addr, false); if (ret != 0) { nrfx_twi_disable(&config->twi); dev_data->twi_enabled = false; return i2c_rtio_complete(ctx, ret); } return false; } static bool i2c_nrfx_twi_rtio_start(const struct device *dev) { struct i2c_nrfx_twi_rtio_data *const dev_data = dev->data; struct i2c_rtio *ctx = dev_data->ctx; struct rtio_sqe *sqe = &ctx->txn_curr->sqe; struct i2c_dt_spec *dt_spec = sqe->iodev->data; switch (sqe->op) { case RTIO_OP_RX: return i2c_nrfx_twi_rtio_msg_start(dev, I2C_MSG_READ | sqe->iodev_flags, sqe->rx.buf, sqe->rx.buf_len, dt_spec->addr); case RTIO_OP_TINY_TX: return i2c_nrfx_twi_rtio_msg_start(dev, I2C_MSG_WRITE | sqe->iodev_flags, sqe->tiny_tx.buf, sqe->tiny_tx.buf_len, dt_spec->addr); case RTIO_OP_TX: return i2c_nrfx_twi_rtio_msg_start(dev, I2C_MSG_WRITE | sqe->iodev_flags, sqe->tx.buf, sqe->tx.buf_len, dt_spec->addr); case RTIO_OP_I2C_CONFIGURE: (void)i2c_nrfx_twi_configure(dev, sqe->i2c_config); return false; case RTIO_OP_I2C_RECOVER: (void)i2c_rtio_recover(ctx); return false; default: LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe); return i2c_rtio_complete(ctx, -EINVAL); } } static void i2c_nrfx_twi_rtio_complete(const struct device *dev, int status) { /** Finalize if there are no more pending xfers */ const struct i2c_nrfx_twi_config *config = dev->config; struct i2c_nrfx_twi_rtio_data *data = dev->data; struct i2c_rtio *const ctx = data->ctx; if (i2c_rtio_complete(ctx, status)) { (void)i2c_nrfx_twi_rtio_start(dev); } else { nrfx_twi_disable(&config->twi); data->twi_enabled = false; } } static int i2c_nrfx_twi_rtio_transfer(const struct device *dev, struct i2c_msg *msgs, uint8_t num_msgs, uint16_t addr) { struct i2c_rtio *const ctx = ((struct i2c_nrfx_twi_rtio_data *) dev->data)->ctx; return i2c_rtio_transfer(ctx, msgs, num_msgs, addr); } static void event_handler(nrfx_twi_evt_t const *p_event, void *p_context) { const struct device *dev = p_context; int status = 0; if (i2c_nrfx_twi_get_evt_result(p_event) != NRFX_SUCCESS) { status = -EIO; } i2c_nrfx_twi_rtio_complete(dev, status); } static void i2c_nrfx_twi_rtio_submit(const struct device *dev, struct rtio_iodev_sqe *iodev_seq) { struct i2c_nrfx_twi_rtio_data *data = dev->data; struct i2c_rtio *const ctx = data->ctx; if (i2c_rtio_submit(ctx, iodev_seq)) { (void)i2c_nrfx_twi_rtio_start(dev); } } static const struct i2c_driver_api i2c_nrfx_twi_rtio_driver_api = { .configure = i2c_nrfx_twi_configure, .transfer = i2c_nrfx_twi_rtio_transfer, .recover_bus = i2c_nrfx_twi_recover_bus, .iodev_submit = i2c_nrfx_twi_rtio_submit, }; #define I2C_NRFX_TWI_RTIO_DEVICE(idx) \ NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(I2C(idx)); \ BUILD_ASSERT(I2C_FREQUENCY(idx) != \ I2C_NRFX_TWI_INVALID_FREQUENCY, \ "Wrong I2C " #idx " frequency setting in dts"); \ static int twi_##idx##_init(const struct device *dev) \ { \ IRQ_CONNECT(DT_IRQN(I2C(idx)), DT_IRQ(I2C(idx), priority), \ nrfx_isr, nrfx_twi_##idx##_irq_handler, 0); \ const struct i2c_nrfx_twi_config *config = dev->config; \ const struct i2c_nrfx_twi_rtio_data *dev_data = dev->data; \ int err = pinctrl_apply_state(config->pcfg, \ PINCTRL_STATE_DEFAULT); \ if (err < 0) { \ return err; \ } \ i2c_rtio_init(dev_data->ctx, dev); \ return i2c_nrfx_twi_init(dev); \ } \ I2C_RTIO_DEFINE(_i2c##idx##_twi_rtio, \ DT_INST_PROP_OR(n, sq_size, CONFIG_I2C_RTIO_SQ_SIZE), \ DT_INST_PROP_OR(n, cq_size, CONFIG_I2C_RTIO_CQ_SIZE)); \ static struct i2c_nrfx_twi_rtio_data twi_##idx##_data = { \ .ctx = &_i2c##idx##_twi_rtio, \ }; \ PINCTRL_DT_DEFINE(I2C(idx)); \ static const struct i2c_nrfx_twi_config twi_##idx##z_config = { \ .twi = NRFX_TWI_INSTANCE(idx), \ .config = { \ .skip_gpio_cfg = true, \ .skip_psel_cfg = true, \ .frequency = I2C_FREQUENCY(idx), \ }, \ .event_handler = event_handler, \ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(I2C(idx)), \ }; \ PM_DEVICE_DT_DEFINE(I2C(idx), twi_nrfx_pm_action); \ I2C_DEVICE_DT_DEFINE(I2C(idx), \ twi_##idx##_init, \ PM_DEVICE_DT_GET(I2C(idx)), \ &twi_##idx##_data, \ &twi_##idx##z_config, \ POST_KERNEL, \ CONFIG_I2C_INIT_PRIORITY, \ &i2c_nrfx_twi_rtio_driver_api) #ifdef CONFIG_HAS_HW_NRF_TWI0 I2C_NRFX_TWI_RTIO_DEVICE(0); #endif #ifdef CONFIG_HAS_HW_NRF_TWI1 I2C_NRFX_TWI_RTIO_DEVICE(1); #endif ```
/content/code_sandbox/drivers/i2c/i2c_nrfx_twi_rtio.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,044
```c /* * */ #include <errno.h> #include <string.h> #include <zephyr/drivers/i2c.h> #include <zephyr/irq.h> #include <zephyr/device.h> #include <zephyr/drivers/i2c.h> #include <zephyr/sys/util.h> #include <zephyr/irq.h> #include <zephyr/logging/log.h> #include <zephyr/sys/sys_io.h> #include <zephyr/sys/atomic.h> #include <zephyr/sys/barrier.h> LOG_MODULE_REGISTER(i2c_mchp, CONFIG_I2C_LOG_LEVEL); #define DT_DRV_COMPAT microchip_mpfs_i2c #define CORE_I2C_CTRL (0x00) #define CORE_I2C_STATUS (0x04) #define CORE_I2C_DATA (0x08) #define CORE_I2C_ADDR_0 (0x0C) #define CORE_I2C_FREQ (0x14) #define CORE_I2C_GLITCHREG (0x18) #define CORE_I2C_ADDR_1 (0x1C) #define CTRL_CR0 BIT(0) #define CTRL_CR1 BIT(1) #define CTRL_AA BIT(2) #define CTRL_SI BIT(3) #define CTRL_STO BIT(4) #define CTRL_STA BIT(5) #define CTRL_ENS1 BIT(6) #define CTRL_CR2 BIT(7) #define STATUS_M_START_SENT (0x08) #define STATUS_M_REPEATED_START_SENT (0x10) #define STATUS_M_SLAW_ACK (0x18) #define STATUS_M_SLAW_NACK (0x20) #define STATUS_M_TX_DATA_ACK (0x28) #define STATUS_M_TX_DATA_NACK (0x30) #define STATUS_M_ARB_LOST (0x38) #define STATUS_M_SLAR_ACK (0x40) #define STATUS_M_SLAR_NACK (0x48) #define STATUS_M_RX_DATA_ACKED (0x50) #define STATUS_M_RX_DATA_NACKED (0x58) #define STATUS_S_SLAW_ACKED (0x60) #define STATUS_S_ARB_LOST_SLAW_ACKED (0x68) #define STATUS_S_GENERAL_CALL_ACKED (0x70) #define STATUS_S_ARB_LOST_GENERAL_CALL_ACKED (0x78) #define STATUS_S_RX_DATA_ACKED (0x80) #define STATUS_S_RX_DATA_NACKED (0x88) #define STATUS_S_GENERAL_CALL_RX_DATA_ACKED (0x90) #define STATUS_S_GENERAL_CALL_RX_DATA_NACKED (0x98) #define STATUS_S_RX_STOP (0xA0) #define STATUS_S_SLAR_ACKED (0xA8) #define STATUS_S_ARB_LOST_SLAR_ACKED (0xB0) #define STATUS_S_TX_DATA_ACK (0xB8) #define STATUS_S_TX_DATA_NACK (0xC0) #define STATUS_LAST_DATA_ACK (0xC8) #define PCLK_DIV_960 (CTRL_CR2) #define PCLK_DIV_256 (0) #define PCLK_DIV_224 (CTRL_CR0) #define PCLK_DIV_192 (CTRL_CR1) #define PCLK_DIV_160 (CTRL_CR0 | CTRL_CR1) #define PCLK_DIV_120 (CTRL_CR0 | CTRL_CR2) #define PCLK_DIV_60 (CTRL_CR1 | CTRL_CR2) #define BCLK_DIV_8 (CTRL_CR0 | CTRL_CR1 | CTRL_CR2) #define CLK_MASK (CTRL_CR0 | CTRL_CR1 | CTRL_CR2) /* -- Transactions types -- */ #define NO_TRANSACTION (0x00) #define CONTROLLER_WRITE_TRANSACTION (0x01) #define CONTROLLER_READ_TRANSACTION (0x02) #define CONTROLLER_RANDOM_READ_TRANSACTION (0x03) #define WRITE_TARGET_TRANSACTION (0x04) #define READ_TARGET_TRANSACTION (0x05) #define MSS_I2C_RELEASE_BUS (0x00) #define MSS_I2C_HOLD_BUS (0x01) #define TARGET_ADDR_SHIFT (0x01) #define MSS_I2C_SUCCESS (0x00) #define MSS_I2C_IN_PROGRESS (0x01) #define MSS_I2C_FAILED (0x02) #define MSS_I2C_TIMED_OUT (0x03) struct mss_i2c_config { uint32_t clock_freq; uintptr_t i2c_base_addr; uint32_t i2c_irq_base; }; struct mss_i2c_data { uint8_t ser_address; uint8_t target_addr; uint8_t options; uint8_t transaction; const uint8_t *controller_tx_buffer; uint16_t controller_tx_size; uint16_t controller_tx_idx; uint8_t dir; uint8_t *controller_rx_buffer; uint16_t controller_rx_size; uint16_t controller_rx_idx; atomic_t controller_status; uint32_t controller_timeout_ms; const uint8_t *target_tx_buffer; uint16_t target_tx_size; uint16_t target_tx_idx; uint8_t *target_rx_buffer; uint16_t target_rx_size; uint16_t target_rx_idx; atomic_t target_status; uint8_t target_mem_offset_length; uint8_t is_target_enabled; uint8_t bus_status; uint8_t is_transaction_pending; uint8_t pending_transaction; sys_slist_t cb; }; static int mss_i2c_configure(const struct device *dev, uint32_t dev_config_raw) { const struct mss_i2c_config *cfg = dev->config; uint8_t ctrl = sys_read8(cfg->i2c_base_addr + CORE_I2C_CTRL); switch (I2C_SPEED_GET(dev_config_raw)) { case I2C_SPEED_STANDARD: sys_write8((ctrl | PCLK_DIV_960), cfg->i2c_base_addr + CORE_I2C_CTRL); break; case I2C_SPEED_FAST: sys_write8((ctrl | PCLK_DIV_256), cfg->i2c_base_addr + CORE_I2C_CTRL); break; default: return -EINVAL; } return 0; } static int mss_wait_complete(const struct device *dev) { struct mss_i2c_data *const data = dev->data; atomic_t i2c_status = 0; do { i2c_status = atomic_get(&data->controller_status); } while (i2c_status == MSS_I2C_IN_PROGRESS); return i2c_status; } static int mss_i2c_read(const struct device *dev, uint8_t serial_addr, uint8_t *read_buffer, uint32_t read_size) { struct mss_i2c_data *const data = dev->data; const struct mss_i2c_config *cfg = dev->config; uint8_t ctrl = sys_read8(cfg->i2c_base_addr + CORE_I2C_CTRL); data->target_addr = serial_addr << TARGET_ADDR_SHIFT; data->pending_transaction = CONTROLLER_READ_TRANSACTION; data->dir = I2C_MSG_READ; data->controller_rx_buffer = read_buffer; data->controller_rx_size = read_size; data->controller_rx_idx = 0u; sys_write8((ctrl | CTRL_STA), cfg->i2c_base_addr + CORE_I2C_CTRL); return 0; } static int mss_i2c_write(const struct device *dev, uint8_t serial_addr, uint8_t *tx_buffer, uint32_t tx_num_write) { struct mss_i2c_data *const data = dev->data; const struct mss_i2c_config *cfg = dev->config; uint8_t ctrl = sys_read8(cfg->i2c_base_addr + CORE_I2C_CTRL); atomic_t target_status = data->target_status; if (data->transaction == NO_TRANSACTION) { data->transaction = CONTROLLER_WRITE_TRANSACTION; } data->pending_transaction = CONTROLLER_WRITE_TRANSACTION; data->target_addr = serial_addr << TARGET_ADDR_SHIFT; data->dir = I2C_MSG_WRITE; data->controller_tx_buffer = tx_buffer; data->controller_tx_size = tx_num_write; data->controller_tx_idx = 0u; atomic_set(&data->controller_status, MSS_I2C_IN_PROGRESS); if (target_status == MSS_I2C_IN_PROGRESS) { data->is_transaction_pending = CONTROLLER_WRITE_TRANSACTION; } else { sys_write8((ctrl | CTRL_STA), cfg->i2c_base_addr + CORE_I2C_CTRL); } if (data->bus_status == MSS_I2C_HOLD_BUS) { sys_write8((ctrl & ~CTRL_SI), cfg->i2c_base_addr + CORE_I2C_CTRL); } return 0; } static int mss_i2c_transfer(const struct device *dev, struct i2c_msg *msgs, uint8_t num_msgs, uint16_t addr) { for (int i = 0; i < num_msgs; i++) { struct i2c_msg *current = &msgs[i]; if ((current->flags & I2C_MSG_RW_MASK) == I2C_MSG_READ) { mss_i2c_read(dev, addr, current->buf, current->len); mss_wait_complete(dev); } else { mss_i2c_write(dev, addr, current->buf, current->len); mss_wait_complete(dev); } } return 0; } static const struct i2c_driver_api mss_i2c_driver_api = { .configure = mss_i2c_configure, .transfer = mss_i2c_transfer, }; static void mss_i2c_reset(const struct device *dev) { const struct mss_i2c_config *cfg = dev->config; uint8_t ctrl = sys_read8(cfg->i2c_base_addr + CORE_I2C_CTRL); sys_write8((ctrl & ~CTRL_ENS1), cfg->i2c_base_addr + CORE_I2C_CTRL); ctrl = sys_read8(cfg->i2c_base_addr + CORE_I2C_CTRL); sys_write8((ctrl | CTRL_ENS1), cfg->i2c_base_addr + CORE_I2C_CTRL); } static void mss_i2c_irq_handler(const struct device *dev) { struct mss_i2c_data *const data = dev->data; const struct mss_i2c_config *cfg = dev->config; uint8_t ctrl = sys_read8(cfg->i2c_base_addr + CORE_I2C_CTRL); uint8_t status = sys_read8(cfg->i2c_base_addr + CORE_I2C_STATUS); uint8_t hold_bus = 0; switch (status) { case STATUS_M_START_SENT: case STATUS_M_REPEATED_START_SENT: sys_write8((ctrl & ~CTRL_STA), cfg->i2c_base_addr + CORE_I2C_CTRL); sys_write8(data->target_addr | data->dir, cfg->i2c_base_addr + CORE_I2C_DATA); data->controller_tx_idx = 0; data->controller_rx_idx = 0; data->is_transaction_pending = false; data->transaction = data->pending_transaction; break; case STATUS_M_ARB_LOST: sys_write8((ctrl | CTRL_STA), cfg->i2c_base_addr + CORE_I2C_CTRL); LOG_WRN("lost arbitration: %x\n", status); break; case STATUS_M_SLAW_ACK: case STATUS_M_TX_DATA_ACK: if (data->controller_tx_idx < data->controller_tx_size) { sys_write8(data->controller_tx_buffer[data->controller_tx_idx], cfg->i2c_base_addr + CORE_I2C_DATA); data->controller_tx_idx++; } else if (data->transaction == CONTROLLER_RANDOM_READ_TRANSACTION) { data->dir = I2C_MSG_READ; sys_write8((ctrl | CTRL_STA), cfg->i2c_base_addr + CORE_I2C_CTRL); } else { data->transaction = NO_TRANSACTION; hold_bus = data->options & MSS_I2C_HOLD_BUS; data->bus_status = hold_bus; if (hold_bus == MSS_I2C_RELEASE_BUS) { sys_write8((ctrl | CTRL_STO), cfg->i2c_base_addr + CORE_I2C_CTRL); } } atomic_set(&data->controller_status, MSS_I2C_SUCCESS); break; case STATUS_M_TX_DATA_NACK: case STATUS_M_SLAR_NACK: case STATUS_M_SLAW_NACK: sys_write8((ctrl | CTRL_STO), cfg->i2c_base_addr + CORE_I2C_CTRL); atomic_set(&data->controller_status, MSS_I2C_FAILED); data->transaction = NO_TRANSACTION; break; case STATUS_M_SLAR_ACK: if (data->controller_rx_size > 1u) { sys_write8((ctrl | CTRL_AA), cfg->i2c_base_addr + CORE_I2C_CTRL); } else if (data->controller_rx_size == 1u) { sys_write8((ctrl & ~CTRL_AA), cfg->i2c_base_addr + CORE_I2C_CTRL); } else { sys_write8((ctrl | CTRL_AA | CTRL_STO), cfg->i2c_base_addr + CORE_I2C_CTRL); atomic_set(&data->controller_status, MSS_I2C_SUCCESS); data->transaction = NO_TRANSACTION; } break; case STATUS_M_RX_DATA_ACKED: data->controller_rx_buffer[data->controller_rx_idx] = sys_read8(cfg->i2c_base_addr + CORE_I2C_DATA); data->controller_rx_idx++; /* Second Last byte */ if (data->controller_rx_idx >= (data->controller_rx_size - 1u)) { sys_write8((ctrl & ~CTRL_AA), cfg->i2c_base_addr + CORE_I2C_CTRL); } else { atomic_set(&data->controller_status, MSS_I2C_IN_PROGRESS); } break; case STATUS_M_RX_DATA_NACKED: data->controller_rx_buffer[data->controller_rx_idx] = sys_read8(cfg->i2c_base_addr + CORE_I2C_DATA); hold_bus = data->options & MSS_I2C_HOLD_BUS; data->bus_status = hold_bus; if (hold_bus == 0u) { sys_write8((ctrl | CTRL_STO), cfg->i2c_base_addr + CORE_I2C_CTRL); } data->transaction = NO_TRANSACTION; atomic_set(&data->controller_status, MSS_I2C_SUCCESS); break; default: break; } ctrl = sys_read8(cfg->i2c_base_addr + CORE_I2C_CTRL); sys_write8((ctrl & ~CTRL_SI), cfg->i2c_base_addr + CORE_I2C_CTRL); } #define MSS_I2C_INIT(n) \ static int mss_i2c_init_##n(const struct device *dev) \ { \ mss_i2c_reset(dev); \ \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), mss_i2c_irq_handler, \ DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ \ return 0; \ } \ \ static struct mss_i2c_data mss_i2c_data_##n; \ \ static const struct mss_i2c_config mss_i2c_config_##n = { \ .i2c_base_addr = DT_INST_REG_ADDR(n), \ .i2c_irq_base = DT_INST_IRQN(n), \ .clock_freq = DT_INST_PROP(n, clock_frequency), \ }; \ \ DEVICE_DT_INST_DEFINE(n, mss_i2c_init_##n, NULL, &mss_i2c_data_##n, &mss_i2c_config_##n, \ PRE_KERNEL_1, CONFIG_I2C_INIT_PRIORITY, &mss_i2c_driver_api); DT_INST_FOREACH_STATUS_OKAY(MSS_I2C_INIT) ```
/content/code_sandbox/drivers/i2c/i2c_mchp_mss.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,451
```unknown # Infineon XMC4 I2C configuration options # an affiliate of Cypress Semiconductor Corporation # menuconfig I2C_INFINEON_XMC4 bool "Infineon XMC4 I2C driver" default y depends on DT_HAS_INFINEON_XMC4XXX_I2C_ENABLED help This option enables the I2C driver for Infineon XMC4 family. config I2C_INFINEON_XMC4_TARGET_BUF int "I2C Target data buffer length" depends on I2C_INFINEON_XMC4 range 1 1024 default 64 help Buffer to receive data as an I2C Target. ```
/content/code_sandbox/drivers/i2c/Kconfig.ifx_xmc4
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
149
```c /* * */ #include <zephyr/drivers/i2c.h> #include <zephyr/shell/shell.h> #include <stdlib.h> #include <string.h> #include <zephyr/sys/byteorder.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(i2c_shell, CONFIG_LOG_DEFAULT_LEVEL); #define MAX_BYTES_FOR_REGISTER_INDEX 4 #define ARGV_DEV 1 #define ARGV_ADDR 2 #define ARGV_REG 3 /* Maximum bytes we can write or read at once */ #define MAX_I2C_BYTES 16 static int get_bytes_count_for_hex(char *arg) { int length = (strlen(arg) + 1) / 2; if (length > 1 && arg[0] == '0' && (arg[1] == 'x' || arg[1] == 'X')) { length -= 1; } return MIN(MAX_BYTES_FOR_REGISTER_INDEX, length); } /* * This sends I2C messages without any data (i.e. stop condition after * sending just the address). If there is an ACK for the address, it * is assumed there is a device present. * * WARNING: As there is no standard I2C detection command, this code * uses arbitrary SMBus commands (namely SMBus quick write and SMBus * receive byte) to probe for devices. This operation can confuse * your I2C bus, cause data loss, and is known to corrupt the Atmel * AT24RF08 EEPROM found on many IBM Thinkpad laptops. * * path_to_url */ /* i2c scan <device> */ static int cmd_i2c_scan(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev; uint8_t cnt = 0, first = 0x04, last = 0x77; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I2C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } shell_print(shell_ctx, " 0 1 2 3 4 5 6 7 8 9 a b c d e f"); for (uint8_t i = 0; i <= last; i += 16) { shell_fprintf(shell_ctx, SHELL_NORMAL, "%02x: ", i); for (uint8_t j = 0; j < 16; j++) { if (i + j < first || i + j > last) { shell_fprintf(shell_ctx, SHELL_NORMAL, " "); continue; } struct i2c_msg msgs[1]; uint8_t dst; /* Send the address to read from */ msgs[0].buf = &dst; msgs[0].len = 0U; msgs[0].flags = I2C_MSG_WRITE | I2C_MSG_STOP; if (i2c_transfer(dev, &msgs[0], 1, i + j) == 0) { shell_fprintf(shell_ctx, SHELL_NORMAL, "%02x ", i + j); ++cnt; } else { shell_fprintf(shell_ctx, SHELL_NORMAL, "-- "); } } shell_print(shell_ctx, ""); } shell_print(shell_ctx, "%u devices found on %s", cnt, argv[ARGV_DEV]); return 0; } /* i2c recover <device> */ static int cmd_i2c_recover(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev; int err; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I2C: Device driver %s not found.", argv[1]); return -ENODEV; } err = i2c_recover_bus(dev); if (err) { shell_error(shell_ctx, "I2C: Bus recovery failed (err %d)", err); return err; } return 0; } static int i2c_write_from_buffer(const struct shell *shell_ctx, char *s_dev_name, char *s_dev_addr, char *s_reg_addr, char **data, uint8_t data_length) { /* This buffer must preserve 4 bytes for register address, as it is * filled using put_be32 function and we don't want to lower available * space when using 1 byte address. */ uint8_t buf[MAX_I2C_BYTES + MAX_BYTES_FOR_REGISTER_INDEX - 1]; const struct device *dev; int reg_addr_bytes; int reg_addr; int dev_addr; int ret; int i; dev = device_get_binding(s_dev_name); if (!dev) { shell_error(shell_ctx, "I2C: Device driver %s not found.", s_dev_name); return -ENODEV; } dev_addr = strtol(s_dev_addr, NULL, 16); reg_addr = strtol(s_reg_addr, NULL, 16); reg_addr_bytes = get_bytes_count_for_hex(s_reg_addr); sys_put_be32(reg_addr, buf); if (data_length + reg_addr_bytes > MAX_I2C_BYTES) { data_length = MAX_I2C_BYTES - reg_addr_bytes; shell_info(shell_ctx, "Too many bytes provided, limit is %d", MAX_I2C_BYTES - reg_addr_bytes); } for (i = 0; i < data_length; i++) { buf[MAX_BYTES_FOR_REGISTER_INDEX + i] = (uint8_t)strtol(data[i], NULL, 16); } ret = i2c_write(dev, buf + MAX_BYTES_FOR_REGISTER_INDEX - reg_addr_bytes, reg_addr_bytes + data_length, dev_addr); if (ret < 0) { shell_error(shell_ctx, "Failed to write to device: %s", s_dev_addr); return -EIO; } return 0; } /* i2c write <device> <dev_addr> <reg_addr> [<byte1>, ...] */ static int cmd_i2c_write(const struct shell *shell_ctx, size_t argc, char **argv) { return i2c_write_from_buffer(shell_ctx, argv[ARGV_DEV], argv[ARGV_ADDR], argv[ARGV_REG], &argv[4], argc - 4); } /* i2c write_byte <device> <dev_addr> <reg_addr> <value> */ static int cmd_i2c_write_byte(const struct shell *shell_ctx, size_t argc, char **argv) { return i2c_write_from_buffer(shell_ctx, argv[ARGV_DEV], argv[ARGV_ADDR], argv[ARGV_REG], &argv[4], 1); } static int i2c_read_to_buffer(const struct shell *shell_ctx, char *s_dev_name, char *s_dev_addr, char *s_reg_addr, uint8_t *buf, uint8_t buf_length) { const struct device *dev; int dev_addr; int ret; dev = device_get_binding(s_dev_name); if (!dev) { shell_error(shell_ctx, "I2C: Device driver %s not found.", s_dev_name); return -ENODEV; } dev_addr = strtol(s_dev_addr, NULL, 16); if (s_reg_addr != NULL) { uint8_t reg_addr_buf[MAX_BYTES_FOR_REGISTER_INDEX]; int reg_addr_bytes; int reg_addr; reg_addr = strtol(s_reg_addr, NULL, 16); reg_addr_bytes = get_bytes_count_for_hex(s_reg_addr); sys_put_be32(reg_addr, reg_addr_buf); ret = i2c_write_read(dev, dev_addr, reg_addr_buf + MAX_BYTES_FOR_REGISTER_INDEX - reg_addr_bytes, reg_addr_bytes, buf, buf_length); } else { ret = i2c_read(dev, buf, buf_length, dev_addr); } if (ret < 0) { shell_error(shell_ctx, "Failed to read from device: %s", s_dev_addr); return -EIO; } return 0; } /* i2c read_byte <device> <dev_addr> <reg_addr> */ static int cmd_i2c_read_byte(const struct shell *shell_ctx, size_t argc, char **argv) { uint8_t out; int ret; ret = i2c_read_to_buffer(shell_ctx, argv[ARGV_DEV], argv[ARGV_ADDR], argv[ARGV_REG], &out, 1); if (ret == 0) { shell_print(shell_ctx, "Output: 0x%x", out); } return ret; } /* i2c read <device> <dev_addr> <reg_addr> [<numbytes>] */ static int cmd_i2c_read(const struct shell *shell_ctx, size_t argc, char **argv) { uint8_t buf[MAX_I2C_BYTES]; int num_bytes; int ret; if (argc > 4) { num_bytes = strtol(argv[4], NULL, 16); if (num_bytes > MAX_I2C_BYTES) { num_bytes = MAX_I2C_BYTES; } } else { num_bytes = MAX_I2C_BYTES; } ret = i2c_read_to_buffer(shell_ctx, argv[ARGV_DEV], argv[ARGV_ADDR], argv[ARGV_REG], buf, num_bytes); if (ret == 0) { shell_hexdump(shell_ctx, buf, num_bytes); } return ret; } /* i2c direct_read <device> <dev_addr> [<numbytes>] */ static int cmd_i2c_direct_read(const struct shell *shell_ctx, size_t argc, char **argv) { uint8_t buf[MAX_I2C_BYTES]; int num_bytes; int ret; if (argc > 3) { num_bytes = strtol(argv[3], NULL, 16); if (num_bytes > MAX_I2C_BYTES) { num_bytes = MAX_I2C_BYTES; } } else { num_bytes = MAX_I2C_BYTES; } ret = i2c_read_to_buffer(shell_ctx, argv[ARGV_DEV], argv[ARGV_ADDR], NULL, buf, num_bytes); if (ret == 0) { shell_hexdump(shell_ctx, buf, num_bytes); } return ret; } /* i2c speed <device> <speed> * For: speed see constants like I2C_SPEED_STANDARD */ static int cmd_i2c_speed(const struct shell *shell_ctx, size_t argc, char **argv) { char *s_dev_name = argv[ARGV_DEV]; const struct device *dev; uint32_t dev_config = 0; uint32_t speed; int ret; dev = device_get_binding(s_dev_name); if (!dev) { shell_error(shell_ctx, "I2C: Device driver %s not found.", s_dev_name); return -ENODEV; } speed = strtol(argv[ARGV_DEV + 1], NULL, 10); ret = i2c_get_config(dev, &dev_config); if (ret == 0) { dev_config &= ~I2C_SPEED_MASK; dev_config |= I2C_SPEED_SET(speed); } else { /* Can't get current config. Fallback to something reasonable */ dev_config = I2C_MODE_CONTROLLER | I2C_SPEED_SET(speed); } ret = i2c_configure(dev, dev_config); if (ret < 0) { shell_error(shell_ctx, "I2C: Failed to configure device: %s", s_dev_name); return -EIO; } return 0; } static void device_name_get(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, NULL); entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = NULL; } SHELL_DYNAMIC_CMD_CREATE(dsub_device_name, device_name_get); SHELL_STATIC_SUBCMD_SET_CREATE(sub_i2c_cmds, SHELL_CMD_ARG(scan, &dsub_device_name, "Scan I2C devices\n" "Usage: scan <device>", cmd_i2c_scan, 2, 0), SHELL_CMD_ARG(recover, &dsub_device_name, "Recover I2C bus\n" "Usage: recover <device>", cmd_i2c_recover, 2, 0), SHELL_CMD_ARG(read, &dsub_device_name, "Read bytes from an I2C device\n" "Usage: read <device> <addr> <reg> [<bytes>]", cmd_i2c_read, 4, 1), SHELL_CMD_ARG(read_byte, &dsub_device_name, "Read a byte from an I2C device\n" "Usage: read_byte <device> <addr> <reg>", cmd_i2c_read_byte, 4, 0), SHELL_CMD_ARG(direct_read, &dsub_device_name, "Read byte stream directly from an I2C device without " "writing a register address first\n" "Usage: direct_read <device> <addr> [<bytes>]", cmd_i2c_direct_read, 3, 1), SHELL_CMD_ARG(write, &dsub_device_name, "Write bytes to an I2C device\n" "Usage: write <device> <addr> <reg> [<byte1>, ...]", cmd_i2c_write, 4, MAX_I2C_BYTES), SHELL_CMD_ARG(write_byte, &dsub_device_name, "Write a byte to an I2C device\n" "Usage: write_byte <device> <addr> <reg> <value>", cmd_i2c_write_byte, 5, 0), SHELL_CMD_ARG(speed, &dsub_device_name, "Configure I2C bus speed\n" "Usage: speed <device> <speed>", cmd_i2c_speed, 3, 0), SHELL_SUBCMD_SET_END /* Array terminated. */ ); SHELL_CMD_REGISTER(i2c, &sub_i2c_cmds, "I2C commands", NULL); ```
/content/code_sandbox/drivers/i2c/i2c_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,114
```unknown # Infineon CAT1 I2C configuration options # an affiliate of Cypress Semiconductor Corporation # config I2C_INFINEON_CAT1 bool "Infineon CAT1 I2C driver" default y depends on DT_HAS_INFINEON_CAT1_I2C_ENABLED select USE_INFINEON_I2C help This option enables the I2C driver for Infineon CAT1 family. config I2C_INFINEON_CAT1_TARGET_BUF int "I2C Target data buffer length" depends on I2C_INFINEON_CAT1 range 1 1024 default 64 help Buffer to receive data as an I2C Target. config I2C_INFINEON_CAT1_ASYNC bool "Support Asynchronous I2C driver" default y depends on I2C_INFINEON_CAT1 help Configure the I2C driver to be non-blocking/Asynchronous mode. ```
/content/code_sandbox/drivers/i2c/Kconfig.ifx_cat1
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
203
```c /* * */ #define DT_DRV_COMPAT sifive_i2c0 #define LOG_LEVEL CONFIG_I2C_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(i2c_sifive); #include <zephyr/device.h> #include <zephyr/drivers/i2c.h> #include <soc.h> #include <zephyr/sys/sys_io.h> #include "i2c-priv.h" /* Macros */ #define I2C_REG(config, reg) ((mem_addr_t) ((config)->base + reg)) #define IS_SET(config, reg, value) (sys_read8(I2C_REG(config, reg)) & (value)) /* Register Offsets */ #define REG_PRESCALE_LOW 0x00 #define REG_PRESCALE_HIGH 0x04 #define REG_CONTROL 0x08 /* Transmit on write, receive on read */ #define REG_TRANSMIT 0x0c #define REG_RECEIVE 0x0c /* Command on write, status on read */ #define REG_COMMAND 0x10 #define REG_STATUS 0x10 /* Values */ #define SF_CONTROL_EN (1 << 7) #define SF_CONTROL_IE (1 << 6) #define SF_TX_WRITE (0 << 0) #define SF_TX_READ (1 << 0) #define SF_CMD_START (1 << 7) #define SF_CMD_STOP (1 << 6) #define SF_CMD_READ (1 << 5) #define SF_CMD_WRITE (1 << 4) #define SF_CMD_ACK (1 << 3) #define SF_CMD_IACK (1 << 0) #define SF_STATUS_RXACK (1 << 7) #define SF_STATUS_BUSY (1 << 6) #define SF_STATUS_AL (1 << 5) #define SF_STATUS_TIP (1 << 1) #define SF_STATUS_IP (1 << 0) /* Structure declarations */ struct i2c_sifive_cfg { uint32_t base; uint32_t f_sys; uint32_t f_bus; }; /* Helper functions */ static inline bool i2c_sifive_busy(const struct device *dev) { const struct i2c_sifive_cfg *config = dev->config; return IS_SET(config, REG_STATUS, SF_STATUS_TIP); } static int i2c_sifive_send_addr(const struct device *dev, uint16_t addr, uint16_t rw_flag) { const struct i2c_sifive_cfg *config = dev->config; uint8_t command = 0U; /* Wait for a previous transfer to complete */ while (i2c_sifive_busy(dev)) { } /* Set transmit register to address with read/write flag */ sys_write8((addr | rw_flag), I2C_REG(config, REG_TRANSMIT)); /* Addresses are always written */ command = SF_CMD_WRITE | SF_CMD_START; /* Write the command register to start the transfer */ sys_write8(command, I2C_REG(config, REG_COMMAND)); while (i2c_sifive_busy(dev)) { } if (IS_SET(config, REG_STATUS, SF_STATUS_RXACK)) { LOG_ERR("I2C Rx failed to acknowledge\n"); return -EIO; } return 0; } static int i2c_sifive_write_msg(const struct device *dev, struct i2c_msg *msg, uint16_t addr) { const struct i2c_sifive_cfg *config = dev->config; int rc = 0; uint8_t command = 0U; rc = i2c_sifive_send_addr(dev, addr, SF_TX_WRITE); if (rc != 0) { LOG_ERR("I2C failed to write message\n"); return rc; } for (uint32_t i = 0; i < msg->len; i++) { /* Wait for a previous transfer */ while (i2c_sifive_busy(dev)) { } /* Put data in transmit reg */ sys_write8((msg->buf)[i], I2C_REG(config, REG_TRANSMIT)); /* Generate command byte */ command = SF_CMD_WRITE; /* On the last byte of the message */ if (i == (msg->len - 1)) { /* If the stop bit is requested, set it */ if (msg->flags & I2C_MSG_STOP) { command |= SF_CMD_STOP; } } /* Write command reg */ sys_write8(command, I2C_REG(config, REG_COMMAND)); /* Wait for a previous transfer */ while (i2c_sifive_busy(dev)) { } if (IS_SET(config, REG_STATUS, SF_STATUS_RXACK)) { LOG_ERR("I2C Rx failed to acknowledge\n"); return -EIO; } } return 0; } static int i2c_sifive_read_msg(const struct device *dev, struct i2c_msg *msg, uint16_t addr) { const struct i2c_sifive_cfg *config = dev->config; uint8_t command = 0U; i2c_sifive_send_addr(dev, addr, SF_TX_READ); while (i2c_sifive_busy(dev)) { } for (int i = 0; i < msg->len; i++) { /* Generate command byte */ command = SF_CMD_READ; /* On the last byte of the message */ if (i == (msg->len - 1)) { /* Set NACK to end read */ command |= SF_CMD_ACK; /* If the stop bit is requested, set it */ if (msg->flags & I2C_MSG_STOP) { command |= SF_CMD_STOP; } } /* Write command reg */ sys_write8(command, I2C_REG(config, REG_COMMAND)); /* Wait for the read to complete */ while (i2c_sifive_busy(dev)) { } /* Store the received byte */ (msg->buf)[i] = sys_read8(I2C_REG(config, REG_RECEIVE)); } return 0; } /* API Functions */ static int i2c_sifive_configure(const struct device *dev, uint32_t dev_config) { const struct i2c_sifive_cfg *config = NULL; uint32_t i2c_speed = 0U; uint16_t prescale = 0U; /* Check for NULL pointers */ if (dev == NULL) { LOG_ERR("Device handle is NULL"); return -EINVAL; } config = dev->config; if (config == NULL) { LOG_ERR("Device config is NULL"); return -EINVAL; } /* Disable the I2C peripheral */ sys_write8(0, I2C_REG(config, REG_CONTROL)); /* Configure bus frequency */ switch (I2C_SPEED_GET(dev_config)) { case I2C_SPEED_STANDARD: i2c_speed = 100000U; /* 100 KHz */ break; case I2C_SPEED_FAST: i2c_speed = 400000U; /* 400 KHz */ break; case I2C_SPEED_FAST_PLUS: case I2C_SPEED_HIGH: case I2C_SPEED_ULTRA: default: LOG_ERR("Unsupported I2C speed requested"); return -ENOTSUP; } /* Calculate prescale value */ prescale = (config->f_sys / (i2c_speed * 5U)) - 1; /* Configure peripheral with calculated prescale */ sys_write8((uint8_t) (0xFF & prescale), I2C_REG(config, REG_PRESCALE_LOW)); sys_write8((uint8_t) (0xFF & (prescale >> 8)), I2C_REG(config, REG_PRESCALE_HIGH)); /* Support I2C Master mode only */ if (!(dev_config & I2C_MODE_CONTROLLER)) { LOG_ERR("I2C only supports operation as master"); return -ENOTSUP; } /* * Driver does not support 10-bit addressing. This can be added * in the future when needed. */ if (dev_config & I2C_ADDR_10_BITS) { LOG_ERR("I2C driver does not support 10-bit addresses"); return -ENOTSUP; } /* Enable the I2C peripheral */ sys_write8(SF_CONTROL_EN, I2C_REG(config, REG_CONTROL)); return 0; } static int i2c_sifive_transfer(const struct device *dev, struct i2c_msg *msgs, uint8_t num_msgs, uint16_t addr) { int rc = 0; /* Check for NULL pointers */ if (dev == NULL) { LOG_ERR("Device handle is NULL"); return -EINVAL; } if (dev->config == NULL) { LOG_ERR("Device config is NULL"); return -EINVAL; } if (msgs == NULL) { return -EINVAL; } for (int i = 0; i < num_msgs; i++) { if (msgs[i].flags & I2C_MSG_READ) { rc = i2c_sifive_read_msg(dev, &(msgs[i]), addr); } else { rc = i2c_sifive_write_msg(dev, &(msgs[i]), addr); } if (rc != 0) { LOG_ERR("I2C failed to transfer messages\n"); return rc; } } return 0; }; static int i2c_sifive_init(const struct device *dev) { const struct i2c_sifive_cfg *config = dev->config; uint32_t dev_config = 0U; int rc = 0; dev_config = (I2C_MODE_CONTROLLER | i2c_map_dt_bitrate(config->f_bus)); rc = i2c_sifive_configure(dev, dev_config); if (rc != 0) { LOG_ERR("Failed to configure I2C on init"); return rc; } return 0; } static const struct i2c_driver_api i2c_sifive_api = { .configure = i2c_sifive_configure, .transfer = i2c_sifive_transfer, }; /* Device instantiation */ #define I2C_SIFIVE_INIT(n) \ static struct i2c_sifive_cfg i2c_sifive_cfg_##n = { \ .base = DT_INST_REG_ADDR(n), \ .f_sys = SIFIVE_PERIPHERAL_CLOCK_FREQUENCY, \ .f_bus = DT_INST_PROP(n, clock_frequency), \ }; \ I2C_DEVICE_DT_INST_DEFINE(n, \ i2c_sifive_init, \ NULL, \ NULL, \ &i2c_sifive_cfg_##n, \ POST_KERNEL, \ CONFIG_I2C_INIT_PRIORITY, \ &i2c_sifive_api); DT_INST_FOREACH_STATUS_OKAY(I2C_SIFIVE_INIT) ```
/content/code_sandbox/drivers/i2c/i2c_sifive.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,371
```unknown menuconfig I2C_SAM0 bool "SAM0 series I2C SERCOM driver" default y depends on DT_HAS_ATMEL_SAM0_I2C_ENABLED help Enable the SAM0 series SERCOM I2C driver. config I2C_SAM0_DMA_DRIVEN bool "DMA support for SAM0 I2C devices" depends on I2C_SAM0 select DMA help This enables DMA driven transactions for the I2C peripheral. DMA driven mode requires fewer interrupts to handle the transaction and ensures that high speed modes are not delayed by data reloading. ```
/content/code_sandbox/drivers/i2c/Kconfig.sam0
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
136
```c /* * */ #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/i2c/rtio.h> #include <zephyr/rtio/rtio.h> #include <zephyr/sys/mpsc_lockfree.h> #include <zephyr/sys/__assert.h> #define LOG_LEVEL CONFIG_I2C_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(i2c_rtio); const struct rtio_iodev_api i2c_iodev_api = { .submit = i2c_iodev_submit, }; struct rtio_sqe *i2c_rtio_copy(struct rtio *r, struct rtio_iodev *iodev, const struct i2c_msg *msgs, uint8_t num_msgs) { __ASSERT(num_msgs > 0, "Expecting at least one message to copy"); struct rtio_sqe *sqe = NULL; for (uint8_t i = 0; i < num_msgs; i++) { sqe = rtio_sqe_acquire(r); if (sqe == NULL) { rtio_sqe_drop_all(r); return NULL; } if (msgs[i].flags & I2C_MSG_READ) { rtio_sqe_prep_read(sqe, iodev, RTIO_PRIO_NORM, msgs[i].buf, msgs[i].len, NULL); } else { rtio_sqe_prep_write(sqe, iodev, RTIO_PRIO_NORM, msgs[i].buf, msgs[i].len, NULL); } sqe->flags |= RTIO_SQE_TRANSACTION; sqe->iodev_flags = ((msgs[i].flags & I2C_MSG_STOP) ? RTIO_IODEV_I2C_STOP : 0) | ((msgs[i].flags & I2C_MSG_RESTART) ? RTIO_IODEV_I2C_RESTART : 0) | ((msgs[i].flags & I2C_MSG_ADDR_10_BITS) ? RTIO_IODEV_I2C_10_BITS : 0); } sqe->flags &= ~RTIO_SQE_TRANSACTION; return sqe; } void i2c_rtio_init(struct i2c_rtio *ctx, const struct device *dev) { k_sem_init(&ctx->lock, 1, 1); mpsc_init(&ctx->io_q); ctx->txn_curr = NULL; ctx->txn_head = NULL; ctx->dt_spec.bus = dev; ctx->iodev.data = &ctx->dt_spec; ctx->iodev.api = &i2c_iodev_api; } /** * @private * @brief Setup the next transaction (could be a single op) if needed * * @retval true New transaction to start with the hardware is setup * @retval false No new transaction to start */ static bool i2c_rtio_next(struct i2c_rtio *ctx, bool completion) { k_spinlock_key_t key = k_spin_lock(&ctx->slock); /* Already working on something, bail early */ if (!completion && ctx->txn_head != NULL) { k_spin_unlock(&ctx->slock, key); return false; } struct mpsc_node *next = mpsc_pop(&ctx->io_q); /* Nothing left to do */ if (next == NULL) { ctx->txn_head = NULL; ctx->txn_curr = NULL; k_spin_unlock(&ctx->slock, key); return false; } ctx->txn_head = CONTAINER_OF(next, struct rtio_iodev_sqe, q); ctx->txn_curr = ctx->txn_head; k_spin_unlock(&ctx->slock, key); return true; } bool i2c_rtio_complete(struct i2c_rtio *ctx, int status) { /* On error bail */ if (status < 0) { rtio_iodev_sqe_err(ctx->txn_head, status); return i2c_rtio_next(ctx, true); } /* Try for next submission in the transaction */ ctx->txn_curr = rtio_txn_next(ctx->txn_curr); if (ctx->txn_curr) { return true; } rtio_iodev_sqe_ok(ctx->txn_head, status); return i2c_rtio_next(ctx, true); } bool i2c_rtio_submit(struct i2c_rtio *ctx, struct rtio_iodev_sqe *iodev_sqe) { mpsc_push(&ctx->io_q, &iodev_sqe->q); return i2c_rtio_next(ctx, false); } int i2c_rtio_transfer(struct i2c_rtio *ctx, struct i2c_msg *msgs, uint8_t num_msgs, uint16_t addr) { struct rtio_iodev *iodev = &ctx->iodev; struct rtio *const r = ctx->r; struct rtio_sqe *sqe = NULL; struct rtio_cqe *cqe = NULL; int res = 0; k_sem_take(&ctx->lock, K_FOREVER); ctx->dt_spec.addr = addr; sqe = i2c_rtio_copy(r, iodev, msgs, num_msgs); if (sqe == NULL) { LOG_ERR("Not enough submission queue entries"); res = -ENOMEM; goto out; } rtio_submit(r, 1); cqe = rtio_cqe_consume(r); while (cqe != NULL) { res = cqe->result; rtio_cqe_release(r, cqe); cqe = rtio_cqe_consume(r); } out: k_sem_give(&ctx->lock); return res; } int i2c_rtio_configure(struct i2c_rtio *ctx, uint32_t i2c_config) { struct rtio_iodev *iodev = &ctx->iodev; struct rtio *const r = ctx->r; struct rtio_sqe *sqe = NULL; struct rtio_cqe *cqe = NULL; int res = 0; k_sem_take(&ctx->lock, K_FOREVER); sqe = rtio_sqe_acquire(r); if (sqe == NULL) { LOG_ERR("Not enough submission queue entries"); res = -ENOMEM; goto out; } sqe->op = RTIO_OP_I2C_CONFIGURE; sqe->iodev = iodev; sqe->i2c_config = i2c_config; rtio_submit(r, 1); cqe = rtio_cqe_consume(r); res = cqe->result; rtio_cqe_release(r, cqe); out: k_sem_give(&ctx->lock); return res; } int i2c_rtio_recover(struct i2c_rtio *ctx) { struct rtio_iodev *iodev = &ctx->iodev; struct rtio *const r = ctx->r; struct rtio_sqe *sqe = NULL; struct rtio_cqe *cqe = NULL; int res = 0; k_sem_take(&ctx->lock, K_FOREVER); sqe = rtio_sqe_acquire(r); if (sqe == NULL) { LOG_ERR("Not enough submission queue entries"); res = -ENOMEM; goto out; } sqe->op = RTIO_OP_I2C_RECOVER; sqe->iodev = iodev; rtio_submit(r, 1); cqe = rtio_cqe_consume(r); res = cqe->result; rtio_cqe_release(r, cqe); out: k_sem_give(&ctx->lock); return res; } ```
/content/code_sandbox/drivers/i2c/i2c_rtio.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,646
```c /* * */ /** * @file I2C driver for AndesTech atciic100 IP */ #include <string.h> #include <zephyr/drivers/i2c.h> #include <zephyr/irq.h> #include "i2c_andes_atciic100.h" #define DT_DRV_COMPAT andestech_atciic100 typedef void (*atciic100_dt_init_func_t)(void); struct i2c_atciic100_config { uint32_t base; uint32_t irq_num; atciic100_dt_init_func_t dt_init_fn; }; static int i2c_atciic100_controller_send(const struct device *dev, uint16_t addr, const uint8_t *data, uint32_t num, uint8_t flags); static int i2c_atciic100_controller_receive(const struct device *dev, uint16_t addr, uint8_t *data, uint32_t num, uint8_t flags); static void i2c_controller_fifo_write(const struct device *dev, uint8_t is_init); static void i2c_controller_fifo_read(const struct device *dev); static int i2c_atciic100_init(const struct device *dev); #if defined(CONFIG_I2C_TARGET) static void i2c_atciic100_target_send(const struct device *dev, const uint8_t *data); static void i2c_atciic100_target_receive(const struct device *dev, uint8_t *data); #endif static void i2c_atciic100_default_control(const struct device *dev) { struct i2c_atciic100_dev_data_t *dev_data = dev->data; uint32_t reg = 0; k_sem_init(&dev_data->bus_lock, 1, 1); k_sem_init(&dev_data->device_sync_sem, 0, 1); /* Reset I2C bus */ reg = sys_read32(I2C_CMD(dev)); reg &= (~CMD_MSK); reg |= (CMD_RESET_I2C); sys_write32(reg, I2C_CMD(dev)); /* I2C query FIFO depth */ reg = sys_read32(I2C_CFG(dev)); switch (reg & 0x3) { case 0x0: dev_data->fifo_depth = 2; break; case 0x1: dev_data->fifo_depth = 4; break; case 0x2: dev_data->fifo_depth = 8; break; case 0x3: dev_data->fifo_depth = 16; break; } /* * I2C setting: target mode(default), standard speed * 7-bit, CPU mode */ sys_write32(0x0, I2C_SET(dev)); reg = sys_read32(I2C_SET(dev)); reg |= ((SETUP_T_SUDAT_STD << 24) | (SETUP_T_SP_STD << 21) | (SETUP_T_HDDAT_STD << 16) | (SETUP_T_SCL_RATIO_STD << 13) | (SETUP_T_SCLHI_STD << 4) | SETUP_I2C_EN); sys_write32(reg, I2C_SET(dev)); dev_data->driver_state = I2C_DRV_INIT; dev_data->status.mode = 0; dev_data->status.arbitration_lost = 0; dev_data->status.target_ack = 0; } static int i2c_atciic100_configure(const struct device *dev, uint32_t dev_config) { struct i2c_atciic100_dev_data_t *dev_data = dev->data; uint32_t reg = 0; int ret = 0; reg = sys_read32(I2C_SET(dev)); switch (I2C_SPEED_GET(dev_config)) { case I2C_SPEED_STANDARD: reg |= SETUP_SPEED_STD; break; case I2C_SPEED_FAST: reg |= SETUP_SPEED_FAST; break; case I2C_SPEED_FAST_PLUS: reg |= SETUP_SPEED_FAST_PLUS; case I2C_SPEED_HIGH: ret = -EIO; goto unlock; case 0x00: break; default: ret = -EIO; goto unlock; } if (dev_config & I2C_MODE_CONTROLLER) { reg |= SETUP_CONTROLLER; dev_data->status.mode = 1; } else { reg &= ~SETUP_CONTROLLER; dev_data->status.mode = 0; } if (dev_config & I2C_ADDR_10_BITS) { reg |= SETUP_ADDRESSING; } else { reg &= ~SETUP_ADDRESSING; } sys_write32(reg, I2C_SET(dev)); dev_data->driver_state |= I2C_DRV_CFG_PARAM; unlock: k_sem_give(&dev_data->bus_lock); return ret; } static int i2c_atciic100_transfer(const struct device *dev, struct i2c_msg *msgs, uint8_t num_msgs, uint16_t addr) { struct i2c_atciic100_dev_data_t *dev_data = dev->data; int ret = 0; int count = 0; uint8_t burst_write_len = msgs[0].len + msgs[1].len; uint8_t burst_write_buf[I2C_MAX_COUNT + BURST_CMD_COUNT]; k_sem_take(&dev_data->bus_lock, K_FOREVER); if ((msgs[0].flags == I2C_MSG_WRITE) && (msgs[1].flags == (I2C_MSG_WRITE | I2C_MSG_STOP))) { burst_write_len = msgs[0].len + msgs[1].len; if (burst_write_len > MAX_XFER_SZ) { return -EIO; } for (count = 0; count < burst_write_len; count++) { if (count < msgs[0].len) { burst_write_buf[count] = msgs[0].buf[count]; } else { burst_write_buf[count] = msgs[1].buf[count - msgs[0].len]; } } ret = i2c_atciic100_controller_send(dev, addr, burst_write_buf, burst_write_len, true); goto exit; } for (uint8_t i = 0; i < num_msgs; i++) { if ((msgs[i].flags & I2C_MSG_RW_MASK) == I2C_MSG_WRITE) { ret = i2c_atciic100_controller_send(dev, addr, msgs[i].buf, msgs[i].len, msgs[i].flags); } else { ret = i2c_atciic100_controller_receive(dev, addr, msgs[i].buf, msgs[i].len, msgs[i].flags); } if (ret < 0) { goto exit; } } exit: /* Wait for transfer complete */ k_sem_give(&dev_data->bus_lock); return ret; } static int i2c_atciic100_controller_send(const struct device *dev, uint16_t addr, const uint8_t *data, uint32_t num, uint8_t flags) { struct i2c_atciic100_dev_data_t *dev_data = dev->data; uint32_t reg = 0; /* * Max to 10-bit address. * Parameters data = null or num = 0 means no payload for * acknowledge polling. If no I2C payload, set Phase_data=0x0. */ if (addr > 0x3FF) { return -EIO; } /* Disable all I2C interrupts */ reg = sys_read32(I2C_INTE(dev)); reg &= (~IEN_ALL); sys_write32(reg, I2C_INTE(dev)); dev_data->status.mode = 1; reg = sys_read32(I2C_SET(dev)); reg |= SETUP_CONTROLLER; sys_write32(reg, I2C_SET(dev)); /* Direction => tx:0, rx:1 */ dev_data->status.arbitration_lost = 0; dev_data->status.target_ack = 0; dev_data->driver_state = I2C_DRV_CONTROLLER_TX; /* Step1, Clear FIFO */ reg = sys_read32(I2C_CMD(dev)); reg &= (~CMD_MSK); reg |= (CMD_CLEAR_FIFO); sys_write32(reg, I2C_CMD(dev)); /* * Step2 * Enable START, ADDRESS, DATA and STOP phase. * If no payload, clear DATA phase. * STOP condition triggered when transmission finish in controller mode. * The bus is busy until STOP condition triggered. * For 10-bit target address, we must set STOP bit. * I2C direction : controller tx, set xfer DATA count. */ reg = sys_read32(I2C_CTRL(dev)); reg &= (~(CTRL_PHASE_START | CTRL_PHASE_ADDR | CTRL_PHASE_STOP | CTRL_DIR | CTRL_DATA_COUNT)); if (flags & I2C_MSG_STOP) { reg |= CTRL_PHASE_STOP; } if ((flags & I2C_MSG_RESTART) == 0) { reg |= (CTRL_PHASE_START | CTRL_PHASE_ADDR); } if (num) { reg |= (CTRL_PHASE_DATA | (num & CTRL_DATA_COUNT)); } sys_write32(reg, I2C_CTRL(dev)); /* Step3 init I2C info */ dev_data->target_addr = addr; dev_data->xfered_data_wt_ptr = 0; dev_data->xfer_wt_num = num; dev_data->middleware_tx_buf = (uint8_t *)data; /* In I2C target address, general call address = 0x0(7-bit or 10-bit) */ reg = sys_read32(I2C_ADDR(dev)); reg &= (~TARGET_ADDR_MSK); reg |= (dev_data->target_addr & (TARGET_ADDR_MSK)); sys_write32(reg, I2C_ADDR(dev)); /* * Step4 Enable Interrupts: Complete, Arbitration Lose * Enable/Disable the FIFO Empty Interrupt * Fill the FIFO before enabling FIFO Empty Interrupt */ reg = sys_read32(I2C_INTE(dev)); i2c_controller_fifo_write(dev, 1); reg |= (IEN_CMPL | IEN_ARB_LOSE | IEN_ADDR_HIT); if (num > 0) { reg |= IEN_FIFO_EMPTY; } else { reg &= (~IEN_FIFO_EMPTY); } sys_write32(reg, I2C_INTE(dev)); /* * Step5, * I2C Write 0x1 to the Command register to issue the transaction */ reg = sys_read32(I2C_CMD(dev)); reg &= (~CMD_MSK); reg |= (CMD_ISSUE_TRANSACTION); sys_write32(reg, I2C_CMD(dev)); k_sem_take(&dev_data->device_sync_sem, K_FOREVER); if (dev_data->status.target_ack != 1) { return -EIO; } dev_data->status.target_ack = 0; return 0; } static int i2c_atciic100_controller_receive(const struct device *dev, uint16_t addr, uint8_t *data, uint32_t num, uint8_t flags) { struct i2c_atciic100_dev_data_t *dev_data = dev->data; uint32_t reg = 0; /* * Max to 10-bit address. * Parameters data = null or num = 0 means no payload for * acknowledge polling. If no I2C payload, set Phase_data=0x0. */ if (addr > 0x3FF) { return -EIO; } /* Disable all I2C interrupts */ reg = sys_read32(I2C_INTE(dev)); reg &= (~IEN_ALL); sys_write32(reg, I2C_INTE(dev)); dev_data->status.mode = 1; reg = sys_read32(I2C_SET(dev)); reg |= SETUP_CONTROLLER; sys_write32(reg, I2C_SET(dev)); /* Direction => tx:0, rx:1 */ dev_data->status.arbitration_lost = 0; dev_data->status.target_ack = 0; dev_data->driver_state = I2C_DRV_CONTROLLER_RX; /* Step1, Clear FIFO */ reg = sys_read32(I2C_CMD(dev)); reg &= (~CMD_MSK); reg |= (CMD_CLEAR_FIFO); sys_write32(reg, I2C_CMD(dev)); /* * Step2 * Enable START, ADDRESS, DATA and STOP phase. * If no payload, clear DATA phase. * STOP condition triggered when transmission finish in Controller mode. * The bus is busy until STOP condition triggered. * For 10-bit target address, we must set STOP bit. * I2C direction : controller rx, set xfer data count. */ reg = sys_read32(I2C_CTRL(dev)); reg &= (~(CTRL_PHASE_START | CTRL_PHASE_ADDR | CTRL_PHASE_STOP | CTRL_DIR | CTRL_DATA_COUNT)); reg |= (CTRL_PHASE_START | CTRL_PHASE_ADDR | CTRL_DIR); if (flags & I2C_MSG_STOP) { reg |= CTRL_PHASE_STOP; } if (num) { reg |= (CTRL_PHASE_DATA | (num & CTRL_DATA_COUNT)); } sys_write32(reg, I2C_CTRL(dev)); /* Step3 init I2C info */ dev_data->target_addr = addr; dev_data->xfered_data_rd_ptr = 0; dev_data->xfer_rd_num = num; dev_data->middleware_rx_buf = (uint8_t *)data; /* In I2C target address, general call address = 0x0(7-bit or 10-bit) */ reg = sys_read32(I2C_ADDR(dev)); reg &= (~TARGET_ADDR_MSK); reg |= (dev_data->target_addr & (TARGET_ADDR_MSK)); sys_write32(reg, I2C_ADDR(dev)); /* * Step4 Enable Interrupts: Complete, Arbitration Lose * Enable/Disable the FIFO Full Interrupt */ reg = sys_read32(I2C_INTE(dev)); reg |= (IEN_CMPL | IEN_FIFO_FULL | IEN_ARB_LOSE | IEN_ADDR_HIT); sys_write32(reg, I2C_INTE(dev)); /* * Step5, * I2C write 0x1 to the Command register to issue the transaction */ reg = sys_read32(I2C_CMD(dev)); reg &= (~CMD_MSK); reg |= (CMD_ISSUE_TRANSACTION); sys_write32(reg, I2C_CMD(dev)); k_sem_take(&dev_data->device_sync_sem, K_FOREVER); if (dev_data->status.target_ack != 1) { return -EIO; } dev_data->status.target_ack = 0; return 0; } #if defined(CONFIG_I2C_TARGET) static void i2c_atciic100_target_send(const struct device *dev, const uint8_t *data) { uint32_t reg = 0; /* Clear FIFO */ reg = sys_read32(I2C_CMD(dev)); reg &= (~CMD_MSK); reg |= (CMD_CLEAR_FIFO); sys_write32(reg, I2C_CMD(dev)); sys_write32(*data, I2C_DATA(dev)); } static void i2c_atciic100_target_receive(const struct device *dev, uint8_t *data) { *data = sys_read32(I2C_DATA(dev)); } #endif static void i2c_controller_fifo_write(const struct device *dev, uint8_t is_init) { struct i2c_atciic100_dev_data_t *dev_data = dev->data; uint32_t i = 0, write_fifo_count = 0, reg = 0; uint8_t write_data; write_fifo_count = dev_data->xfer_wt_num - dev_data->xfered_data_wt_ptr; if (write_fifo_count >= dev_data->fifo_depth) { write_fifo_count = dev_data->fifo_depth; } if (is_init) { write_fifo_count = 2; } /* I2C write a patch of data(FIFO_Depth) to FIFO */ for (i = 0; i < write_fifo_count; i++) { write_data = dev_data->middleware_tx_buf[dev_data->xfered_data_wt_ptr]; sys_write32((write_data & DATA_MSK), I2C_DATA(dev)); dev_data->xfered_data_wt_ptr++; /* Disable the FIFO Empty Interrupt if no more data to send */ if (dev_data->xfered_data_wt_ptr == dev_data->xfer_wt_num) { reg = sys_read32(I2C_INTE(dev)); reg &= (~IEN_FIFO_EMPTY); sys_write32(reg, I2C_INTE(dev)); } } } /* Basic fifo read function */ static void i2c_controller_fifo_read(const struct device *dev) { struct i2c_atciic100_dev_data_t *dev_data = dev->data; uint32_t i = 0, read_fifo_count = 0, reg = 0; uint8_t read_data; read_fifo_count = dev_data->xfer_rd_num - dev_data->xfered_data_rd_ptr; if (read_fifo_count >= dev_data->fifo_depth) { read_fifo_count = dev_data->fifo_depth; } /* I2C read a patch of data(FIFO_Depth) from FIFO */ for (i = 0; i < read_fifo_count; i++) { read_data = sys_read32(I2C_DATA(dev)) & DATA_MSK; dev_data->middleware_rx_buf[dev_data->xfered_data_rd_ptr] = read_data; dev_data->xfered_data_rd_ptr++; /* Disable the FIFO Full Interrupt if no more data to receive */ if (dev_data->xfered_data_rd_ptr == dev_data->xfer_rd_num) { reg = sys_read32(I2C_INTE(dev)); reg &= (~IEN_FIFO_FULL); sys_write32(reg, I2C_INTE(dev)); } } } static void i2c_fifo_empty_handler(const struct device *dev) { struct i2c_atciic100_dev_data_t *dev_data = dev->data; if (dev_data->driver_state & I2C_DRV_CONTROLLER_TX) { i2c_controller_fifo_write(dev, 0); } } static void i2c_fifo_full_handler(const struct device *dev) { struct i2c_atciic100_dev_data_t *dev_data = dev->data; if (dev_data->driver_state & I2C_DRV_CONTROLLER_RX) { i2c_controller_fifo_read(dev); } } static void i2c_cmpl_handler(const struct device *dev, uint32_t reg_stat) { struct i2c_atciic100_dev_data_t *dev_data = dev->data; uint32_t reg_set = 0, reg_ctrl = 0, reg = 0; reg_set = sys_read32(I2C_SET(dev)); /* Controller mode */ if (dev_data->status.mode == 1) { /* Disable all I2C interrupts */ reg = sys_read32(I2C_INTE(dev)); reg &= (~IEN_ALL); sys_write32(reg, I2C_INTE(dev)); } if (dev_data->driver_state & (I2C_DRV_CONTROLLER_TX | I2C_DRV_CONTROLLER_RX)) { /* Get the remain number of data */ reg_ctrl = sys_read32(I2C_CTRL(dev)) & CTRL_DATA_COUNT; if (dev_data->driver_state & I2C_DRV_CONTROLLER_TX) { /* Clear & set driver state to controller tx complete */ dev_data->driver_state = I2C_DRV_CONTROLLER_TX_CMPL; } if (dev_data->driver_state & I2C_DRV_CONTROLLER_RX) { i2c_controller_fifo_read(dev); /* Clear & set driver state to controller rx complete */ dev_data->driver_state = I2C_DRV_CONTROLLER_RX_CMPL; } k_sem_give(&dev_data->device_sync_sem); } #if defined(CONFIG_I2C_TARGET) if (dev_data->driver_state & (I2C_DRV_TARGET_TX | I2C_DRV_TARGET_RX)) { reg_set = sys_read32(I2C_SET(dev)); reg_ctrl = sys_read32(I2C_CTRL(dev)); if (dev_data->driver_state & I2C_DRV_TARGET_TX) { dev_data->driver_state = I2C_DRV_TARGET_TX_CMPL; } if (dev_data->driver_state & I2C_DRV_TARGET_RX) { dev_data->driver_state = I2C_DRV_TARGET_RX_CMPL; } /* If the Completion Interrupt asserts, * clear the FIFO and go next transaction. */ uint32_t reg_cmd = 0; reg_cmd = sys_read32(I2C_CMD(dev)); reg_cmd &= (~CMD_MSK); reg_cmd |= (CMD_CLEAR_FIFO); sys_write32(reg_cmd, I2C_CMD(dev)); } /* Enable Completion & Address Hit Interrupt */ /* Enable Byte Receive & Transfer for default target mode */ reg = 0x0; reg |= (IEN_CMPL | IEN_ADDR_HIT | STATUS_BYTE_RECV | STATUS_BYTE_TRANS); sys_write32(reg, I2C_INTE(dev)); reg = sys_read32(I2C_SET(dev)); reg &= ~(SETUP_CONTROLLER); sys_write32(reg, I2C_SET(dev)); reg &= (~TARGET_ADDR_MSK); reg |= (dev_data->target_config->address & (TARGET_ADDR_MSK)); sys_write32(reg, I2C_ADDR(dev)); dev_data->driver_state = I2C_DRV_INIT; dev_data->status.mode = 0; dev_data->status.arbitration_lost = 0; #endif } #if defined(CONFIG_I2C_TARGET) static void andes_i2c_target_event(const struct device *dev, uint32_t reg_stat, uint32_t reg_ctrl) { struct i2c_atciic100_dev_data_t *dev_data = dev->data; uint32_t reg_set = 0; uint8_t val; /* * Here is the entry for target mode driver to detect * target RX/TX action depend on controller TX/RX action. * A new I2C data transaction(START-ADDRESS-DATA-STOP) */ if (reg_stat & STATUS_ADDR_HIT) { if (k_sem_take(&dev_data->bus_lock, K_NO_WAIT) != 0) { return; } if (((reg_ctrl & CTRL_DIR) >> 8) == I2C_TARGET_TX) { /* Notify middleware to do target rx action */ dev_data->driver_state = I2C_DRV_TARGET_TX; dev_data->target_callbacks->read_requested (dev_data->target_config, &val); i2c_atciic100_target_send(dev, &val); } else if (((reg_ctrl & CTRL_DIR) >> 8) == I2C_TARGET_RX) { /* Notify middleware to do target tx action */ dev_data->driver_state = I2C_DRV_TARGET_RX; dev_data->target_callbacks->write_requested (dev_data->target_config); } reg_set |= (CMD_ACK); sys_write32(reg_set, I2C_CMD(dev)); } if (reg_stat & STATUS_BYTE_RECV) { i2c_atciic100_target_receive(dev, &val); dev_data->target_callbacks->write_received (dev_data->target_config, val); reg_set = 0; if ((reg_stat & STATUS_CMPL) == 0) { reg_set |= (CMD_ACK); sys_write32(reg_set, I2C_CMD(dev)); } else { reg_set |= (CMD_NACK); sys_write32(reg_set, I2C_CMD(dev)); } } else if (reg_stat & STATUS_BYTE_TRANS) { dev_data->target_callbacks->read_processed (dev_data->target_config, &val); i2c_atciic100_target_send(dev, &val); } if (reg_stat & STATUS_CMPL) { i2c_cmpl_handler(dev, reg_stat); k_sem_give(&dev_data->bus_lock); } } static int i2c_atciic100_target_register(const struct device *dev, struct i2c_target_config *cfg) { struct i2c_atciic100_dev_data_t *dev_data = dev->data; uint16_t reg_addr = 0; uint32_t reg; reg_addr &= (~TARGET_ADDR_MSK); reg_addr |= (cfg->address & (TARGET_ADDR_MSK)); sys_write32(reg_addr, I2C_ADDR(dev)); dev_data->target_callbacks = cfg->callbacks; dev_data->target_config = cfg; /* Enable Completion & Address Hit Interrupt */ /* Enable Byte Receive & Transfer for default target mode */ reg = 0x0; reg |= (IEN_CMPL | IEN_ADDR_HIT | STATUS_BYTE_RECV | STATUS_BYTE_TRANS); sys_write32(reg, I2C_INTE(dev)); return 0; } static int i2c_atciic100_target_unregister(const struct device *dev, struct i2c_target_config *cfg) { uint32_t reg; /* Disable all I2C interrupts */ reg = sys_read32(I2C_INTE(dev)); reg &= (~IEN_ALL); sys_write32(reg, I2C_INTE(dev)); sys_write32(0x0, I2C_ADDR(dev)); return 0; } #endif static void i2c_atciic100_irq_handler(void *arg) { const struct device *dev = (struct device *)arg; struct i2c_atciic100_dev_data_t *dev_data = dev->data; uint32_t reg_set, reg_stat = 0, reg_ctrl = 0; reg_stat = sys_read32(I2C_STAT(dev)); reg_set = sys_read32(I2C_SET(dev)); reg_ctrl = sys_read32(I2C_CTRL(dev)); /* Clear interrupts status */ sys_write32((reg_stat & STATUS_W1C_ALL), I2C_STAT(dev)); #if defined(CONFIG_I2C_TARGET) if (dev_data->status.mode == 0) { andes_i2c_target_event(dev, reg_stat, reg_ctrl); return; } #endif if (reg_stat & STATUS_ADDR_HIT) { dev_data->status.target_ack = 1; } if (reg_stat & STATUS_FIFO_EMPTY) { i2c_fifo_empty_handler(dev); } if (reg_stat & STATUS_FIFO_FULL) { /* Store hw receive data count quickly */ i2c_fifo_full_handler(dev); } if (reg_stat & STATUS_CMPL) { /* Store hw receive data count quickly */ i2c_cmpl_handler(dev, reg_stat); } if ((reg_stat & STATUS_ARB_LOSE) && (reg_set & SETUP_CONTROLLER)) { dev_data->status.arbitration_lost = 1; } } static const struct i2c_driver_api i2c_atciic100_driver = { .configure = (i2c_api_configure_t)i2c_atciic100_configure, .transfer = (i2c_api_full_io_t)i2c_atciic100_transfer, #if defined(CONFIG_I2C_TARGET) .target_register = (i2c_api_target_register_t)i2c_atciic100_target_register, .target_unregister = (i2c_api_target_unregister_t)i2c_atciic100_target_unregister #endif }; static int i2c_atciic100_init(const struct device *dev) { const struct i2c_atciic100_config *dev_cfg = dev->config; /* Disable all interrupts. */ sys_write32(0x00000000, I2C_INTE(dev)); /* Clear interrupts status. */ sys_write32(0xFFFFFFFF, I2C_STAT(dev)); dev_cfg->dt_init_fn(); i2c_atciic100_default_control(dev); #if defined(CONFIG_I2C_TARGET) i2c_atciic100_configure(dev, I2C_SPEED_SET(I2C_SPEED_STANDARD)); #else i2c_atciic100_configure(dev, I2C_SPEED_SET(I2C_SPEED_STANDARD) | I2C_MODE_CONTROLLER); #endif irq_enable(dev_cfg->irq_num); return 0; } #define I2C_INIT(n) \ static struct i2c_atciic100_dev_data_t \ i2c_atciic100_dev_data_##n; \ static void i2c_dt_init_##n(void); \ static const struct i2c_atciic100_config \ i2c_atciic100_config_##n = { \ .base = DT_INST_REG_ADDR(n), \ .irq_num = DT_INST_IRQN(n), \ .dt_init_fn = i2c_dt_init_##n \ }; \ I2C_DEVICE_DT_INST_DEFINE(n, \ i2c_atciic100_init, \ NULL, \ &i2c_atciic100_dev_data_##n, \ &i2c_atciic100_config_##n, \ POST_KERNEL, \ CONFIG_I2C_INIT_PRIORITY, \ &i2c_atciic100_driver); \ \ static void i2c_dt_init_##n(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ i2c_atciic100_irq_handler, \ DEVICE_DT_INST_GET(n), \ 0); \ } DT_INST_FOREACH_STATUS_OKAY(I2C_INIT) ```
/content/code_sandbox/drivers/i2c/i2c_andes_atciic100.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,428
```unknown config I2C_ENE_KB1200 bool "ENE KB1200 I2C driver" default y depends on DT_HAS_ENE_KB1200_I2C_ENABLED select PINCTRL help Enable the ENE KB1200 I2C driver. ```
/content/code_sandbox/drivers/i2c/Kconfig.ene
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
62
```unknown config I2C_TELINK_B91 bool "Telink Semiconductor B91 I2C driver" default y depends on DT_HAS_TELINK_B91_I2C_ENABLED help Enables Telink B91 I2C driver. ```
/content/code_sandbox/drivers/i2c/Kconfig.b91
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
55
```unknown # I2C EEPROM Target configuration options config I2C_EEPROM_TARGET bool "I2C Target EEPROM driver" help Enable virtual I2C Target EEPROM driver config I2C_EEPROM_TARGET_RUNTIME_ADDR bool "Set I2C Target EEPROM Address at Runtime" depends on I2C_EEPROM_TARGET help Enable changing virtual I2C Target EEPROM device address at runtime ```
/content/code_sandbox/drivers/i2c/target/Kconfig.eeprom
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
87
```c /* * */ #define DT_DRV_COMPAT zephyr_i2c_target_eeprom #include <zephyr/sys/util.h> #include <zephyr/kernel.h> #include <errno.h> #include <zephyr/drivers/i2c.h> #include <string.h> #include <zephyr/drivers/i2c/target/eeprom.h> #define LOG_LEVEL CONFIG_I2C_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(i2c_target); struct i2c_eeprom_target_data { struct i2c_target_config config; uint32_t buffer_size; uint8_t *buffer; uint32_t buffer_idx; uint32_t idx_write_cnt; uint8_t address_width; }; struct i2c_eeprom_target_config { struct i2c_dt_spec bus; uint32_t buffer_size; uint8_t *buffer; }; int eeprom_target_program(const struct device *dev, const uint8_t *eeprom_data, unsigned int length) { struct i2c_eeprom_target_data *data = dev->data; if (length > data->buffer_size) { return -EINVAL; } memcpy(data->buffer, eeprom_data, length); return 0; } int eeprom_target_read(const struct device *dev, uint8_t *eeprom_data, unsigned int offset) { struct i2c_eeprom_target_data *data = dev->data; if (!data || offset >= data->buffer_size) { return -EINVAL; } *eeprom_data = data->buffer[offset]; return 0; } #ifdef CONFIG_I2C_EEPROM_TARGET_RUNTIME_ADDR int eeprom_target_set_addr(const struct device *dev, uint8_t addr) { const struct i2c_eeprom_target_config *cfg = dev->config; struct i2c_eeprom_target_data *data = dev->data; int ret; ret = i2c_target_unregister(cfg->bus.bus, &data->config); if (ret) { LOG_DBG("eeprom target failed to unregister"); return ret; } data->config.address = addr; return i2c_target_register(cfg->bus.bus, &data->config); } #endif /* CONFIG_I2C_EEPROM_TARGET_RUNTIME_ADDR */ static int eeprom_target_write_requested(struct i2c_target_config *config) { struct i2c_eeprom_target_data *data = CONTAINER_OF(config, struct i2c_eeprom_target_data, config); LOG_DBG("eeprom: write req"); data->idx_write_cnt = 0; return 0; } static int eeprom_target_read_requested(struct i2c_target_config *config, uint8_t *val) { struct i2c_eeprom_target_data *data = CONTAINER_OF(config, struct i2c_eeprom_target_data, config); *val = data->buffer[data->buffer_idx]; LOG_DBG("eeprom: read req, val=0x%x", *val); /* Increment will be done in the read_processed callback */ return 0; } static int eeprom_target_write_received(struct i2c_target_config *config, uint8_t val) { struct i2c_eeprom_target_data *data = CONTAINER_OF(config, struct i2c_eeprom_target_data, config); LOG_DBG("eeprom: write done, val=0x%x", val); /* In case EEPROM wants to be R/O, return !0 here could trigger * a NACK to the I2C controller, support depends on the * I2C controller support */ if (data->idx_write_cnt < (data->address_width >> 3)) { if (data->idx_write_cnt == 0) { data->buffer_idx = 0; } data->buffer_idx = val | (data->buffer_idx << 8); data->idx_write_cnt++; } else { data->buffer[data->buffer_idx++] = val; } data->buffer_idx = data->buffer_idx % data->buffer_size; return 0; } static int eeprom_target_read_processed(struct i2c_target_config *config, uint8_t *val) { struct i2c_eeprom_target_data *data = CONTAINER_OF(config, struct i2c_eeprom_target_data, config); /* Increment here */ data->buffer_idx = (data->buffer_idx + 1) % data->buffer_size; *val = data->buffer[data->buffer_idx]; LOG_DBG("eeprom: read done, val=0x%x", *val); /* Increment will be done in the next read_processed callback * In case of STOP, the byte won't be taken in account */ return 0; } static int eeprom_target_stop(struct i2c_target_config *config) { struct i2c_eeprom_target_data *data = CONTAINER_OF(config, struct i2c_eeprom_target_data, config); LOG_DBG("eeprom: stop"); data->idx_write_cnt = 0; return 0; } #ifdef CONFIG_I2C_TARGET_BUFFER_MODE static void eeprom_target_buf_write_received(struct i2c_target_config *config, uint8_t *ptr, uint32_t len) { struct i2c_eeprom_target_data *data = CONTAINER_OF(config, struct i2c_eeprom_target_data, config); /* The first byte is offset */ data->buffer_idx = *ptr; memcpy(&data->buffer[data->buffer_idx], ptr + 1, len - 1); } static int eeprom_target_buf_read_requested(struct i2c_target_config *config, uint8_t **ptr, uint32_t *len) { struct i2c_eeprom_target_data *data = CONTAINER_OF(config, struct i2c_eeprom_target_data, config); *ptr = &data->buffer[data->buffer_idx]; *len = data->buffer_size; return 0; } #endif static int eeprom_target_register(const struct device *dev) { const struct i2c_eeprom_target_config *cfg = dev->config; struct i2c_eeprom_target_data *data = dev->data; return i2c_target_register(cfg->bus.bus, &data->config); } static int eeprom_target_unregister(const struct device *dev) { const struct i2c_eeprom_target_config *cfg = dev->config; struct i2c_eeprom_target_data *data = dev->data; return i2c_target_unregister(cfg->bus.bus, &data->config); } static const struct i2c_target_driver_api api_funcs = { .driver_register = eeprom_target_register, .driver_unregister = eeprom_target_unregister, }; static const struct i2c_target_callbacks eeprom_callbacks = { .write_requested = eeprom_target_write_requested, .read_requested = eeprom_target_read_requested, .write_received = eeprom_target_write_received, .read_processed = eeprom_target_read_processed, #ifdef CONFIG_I2C_TARGET_BUFFER_MODE .buf_write_received = eeprom_target_buf_write_received, .buf_read_requested = eeprom_target_buf_read_requested, #endif .stop = eeprom_target_stop, }; static int i2c_eeprom_target_init(const struct device *dev) { struct i2c_eeprom_target_data *data = dev->data; const struct i2c_eeprom_target_config *cfg = dev->config; if (!device_is_ready(cfg->bus.bus)) { LOG_ERR("I2C controller device not ready"); return -ENODEV; } data->buffer_size = cfg->buffer_size; data->buffer = cfg->buffer; data->config.address = cfg->bus.addr; data->config.callbacks = &eeprom_callbacks; return 0; } #define I2C_EEPROM_INIT(inst) \ static struct i2c_eeprom_target_data \ i2c_eeprom_target_##inst##_dev_data = { \ .address_width = DT_INST_PROP_OR(inst, \ address_width, 8), \ }; \ \ static uint8_t \ i2c_eeprom_target_##inst##_buffer[(DT_INST_PROP(inst, size))]; \ \ BUILD_ASSERT(DT_INST_PROP(inst, size) <= \ (1 << DT_INST_PROP_OR(inst, address_width, 8)), \ "size must be <= than 2^address_width"); \ \ static const struct i2c_eeprom_target_config \ i2c_eeprom_target_##inst##_cfg = { \ .bus = I2C_DT_SPEC_INST_GET(inst), \ .buffer_size = DT_INST_PROP(inst, size), \ .buffer = i2c_eeprom_target_##inst##_buffer \ }; \ \ DEVICE_DT_INST_DEFINE(inst, \ &i2c_eeprom_target_init, \ NULL, \ &i2c_eeprom_target_##inst##_dev_data, \ &i2c_eeprom_target_##inst##_cfg, \ POST_KERNEL, \ CONFIG_I2C_TARGET_INIT_PRIORITY, \ &api_funcs); DT_INST_FOREACH_STATUS_OKAY(I2C_EEPROM_INIT) ```
/content/code_sandbox/drivers/i2c/target/eeprom_target.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,967
```unknown # I2C Target configuration options # # I2C options # menuconfig I2C_TARGET bool "I2C Target Drivers" help Enable I2C Target Driver Configuration if I2C_TARGET config I2C_TARGET_INIT_PRIORITY int "Init priority" default 60 help I2C Target device driver initialization priority. config I2C_TARGET_BUFFER_MODE bool "I2C target driver for buffer mode [EXPERIMENTAL]" select EXPERIMENTAL help This is an option to enable buffer mode. source "drivers/i2c/target/Kconfig.eeprom" endif # I2C_TARGET ```
/content/code_sandbox/drivers/i2c/target/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
138
```c /* * */ #define DT_DRV_COMPAT atmel_sam_i2c_twim /** @file * @brief I2C bus (TWIM) driver for Atmel SAM4L MCU family. * * I2C Master Mode with 7/10 bit addressing is currently supported. * Very long transfers are allowed using NCMDR register. DMA is not * yet supported. */ #include <errno.h> #include <zephyr/sys/__assert.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <soc.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control/atmel_sam_pmc.h> #define LOG_LEVEL CONFIG_I2C_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(i2c_sam_twim); #include "i2c-priv.h" /** I2C bus speed [Hz] in Standard Mode */ #define BUS_SPEED_STANDARD_HZ 100000U /** I2C bus speed [Hz] in Fast Mode */ #define BUS_SPEED_FAST_HZ 400000U /** I2C bus speed [Hz] in Fast Plus Mode */ #define BUS_SPEED_PLUS_HZ 1000000U /** I2C bus speed [Hz] in High Speed Mode */ #define BUS_SPEED_HIGH_HZ 3400000U /* Maximum value of Clock Divider (CKDIV) */ #define CKDIV_MAX 7 /* Maximum Frequency prescaled */ #define F_PRESCALED_MAX 255 /** Status Clear Register Mask for No Acknowledgements */ #define TWIM_SCR_NAK_MASK (TWIM_SCR_ANAK | TWIM_SCR_DNAK) /** Status Register Mask for No Acknowledgements */ #define TWIM_SR_NAK_MASK (TWIM_SR_ANAK | TWIM_SR_DNAK) /** Interrupt Enable Register Mask for No Acknowledgements */ #define TWIM_IER_NAK_MASK (TWIM_IER_ANAK | TWIM_IER_DNAK) /** Frequently used Interrupt Enable Register Mask */ #define TWIM_IER_STD_MASK (TWIM_IER_ANAK | TWIM_IER_ARBLST) /** Frequently used Status Clear Register Mask */ #define TWIM_SR_STD_MASK (TWIM_SR_ANAK | TWIM_SR_ARBLST) /** \internal Max value of NBYTES per transfer by hardware */ #define TWIM_MAX_NBYTES_PER_XFER \ (TWIM_CMDR_NBYTES_Msk >> TWIM_CMDR_NBYTES_Pos) #define TWIM_NCMDR_FREE_WAIT 2000 /* Device constant configuration parameters */ struct i2c_sam_twim_dev_cfg { Twim *regs; void (*irq_config)(void); uint32_t bitrate; const struct atmel_sam_pmc_config clock_cfg; const struct pinctrl_dev_config *pcfg; uint8_t irq_id; uint8_t std_clk_slew_lim; uint8_t std_clk_strength_low; uint8_t std_data_slew_lim; uint8_t std_data_strength_low; uint8_t hs_clk_slew_lim; uint8_t hs_clk_strength_high; uint8_t hs_clk_strength_low; uint8_t hs_data_slew_lim; uint8_t hs_data_strength_low; uint8_t hs_master_code; }; /* Device run time data */ struct i2c_sam_twim_dev_data { struct k_mutex bus_mutex; struct k_sem sem; struct i2c_msg *msgs; uint32_t msg_cur_idx; uint32_t msg_next_idx; uint32_t msg_max_idx; uint32_t cur_remaining; uint32_t cur_idx; uint32_t cur_sr; uint32_t next_nb_bytes; bool next_is_valid; bool next_need_rs; bool cur_need_rs; }; static int i2c_clk_set(const struct device *dev, uint32_t speed) { const struct i2c_sam_twim_dev_cfg *const cfg = dev->config; Twim *const twim = cfg->regs; uint32_t per_clk = SOC_ATMEL_SAM_MCK_FREQ_HZ; uint32_t f_prescaled = (per_clk / speed / 2); uint32_t cwgr_reg_val = 0; uint8_t cwgr_exp = 0; /* f_prescaled must fit in 8 bits, cwgr_exp must fit in 3 bits */ while ((f_prescaled > F_PRESCALED_MAX) && (cwgr_exp <= CKDIV_MAX)) { /* increase clock divider */ cwgr_exp++; /* divide f_prescaled value */ f_prescaled /= 2; } if (cwgr_exp > CKDIV_MAX) { LOG_ERR("Failed to configure I2C clock"); return -EIO; } cwgr_reg_val = TWIM_HSCWGR_LOW(f_prescaled / 2) | TWIM_HSCWGR_HIGH(f_prescaled - (f_prescaled / 2)) | TWIM_HSCWGR_EXP(cwgr_exp) | TWIM_HSCWGR_DATA(0) | TWIM_HSCWGR_STASTO(f_prescaled); /* This configuration should be applied after a TWIM_CR_SWRST * Set clock waveform generator register */ if (speed == BUS_SPEED_HIGH_HZ) { twim->HSCWGR = cwgr_reg_val; } else { twim->CWGR = cwgr_reg_val; } LOG_DBG("per_clk: %d, f_prescaled: %d, cwgr_exp: 0x%02x," "cwgr_reg_val: 0x%08x", per_clk, f_prescaled, cwgr_exp, cwgr_reg_val); /* Set clock and data slew rate */ twim->SRR = ((speed == BUS_SPEED_PLUS_HZ) ? TWIM_SRR_FILTER(2) : TWIM_SRR_FILTER(3)) | TWIM_SRR_CLSLEW(cfg->std_clk_slew_lim) | TWIM_SRR_CLDRIVEL(cfg->std_clk_strength_low) | TWIM_SRR_DASLEW(cfg->std_data_slew_lim) | TWIM_SRR_DADRIVEL(cfg->std_data_strength_low); twim->HSSRR = TWIM_HSSRR_FILTER(1) | TWIM_HSSRR_CLSLEW(cfg->hs_clk_slew_lim) | TWIM_HSSRR_CLDRIVEH(cfg->hs_clk_strength_high) | TWIM_HSSRR_CLDRIVEL(cfg->hs_clk_strength_low) | TWIM_HSSRR_DASLEW(cfg->hs_data_slew_lim) | TWIM_HSSRR_DADRIVEL(cfg->hs_data_strength_low); return 0; } static int i2c_sam_twim_configure(const struct device *dev, uint32_t config) { uint32_t bitrate; int ret; if (!(config & I2C_MODE_CONTROLLER)) { LOG_ERR("Master Mode is not enabled"); return -EIO; } if (config & I2C_ADDR_10_BITS) { LOG_ERR("I2C 10-bit addressing is currently not supported"); LOG_ERR("Please submit a patch"); return -EIO; } /* Configure clock */ switch (I2C_SPEED_GET(config)) { case I2C_SPEED_STANDARD: bitrate = BUS_SPEED_STANDARD_HZ; break; case I2C_SPEED_FAST: bitrate = BUS_SPEED_FAST_HZ; break; case I2C_SPEED_FAST_PLUS: bitrate = BUS_SPEED_PLUS_HZ; break; case I2C_SPEED_HIGH: bitrate = BUS_SPEED_HIGH_HZ; break; default: LOG_ERR("Unsupported I2C speed value"); return -EIO; } /* Setup clock waveform */ ret = i2c_clk_set(dev, bitrate); if (ret < 0) { return ret; } return 0; } static void i2c_prepare_xfer_data(struct i2c_sam_twim_dev_data *data) { struct i2c_msg *next_msg = NULL; if (data->next_nb_bytes > TWIM_MAX_NBYTES_PER_XFER) { data->cur_remaining = TWIM_MAX_NBYTES_PER_XFER; data->next_nb_bytes -= TWIM_MAX_NBYTES_PER_XFER; data->next_is_valid = true; data->next_need_rs = false; } else { data->cur_remaining = data->next_nb_bytes; if ((data->msg_next_idx + 1) < data->msg_max_idx) { next_msg = &data->msgs[++data->msg_next_idx]; data->next_nb_bytes = next_msg->len; data->next_is_valid = true; data->next_need_rs = true; } else { data->next_nb_bytes = 0; data->next_is_valid = false; data->next_need_rs = false; } } } static uint32_t i2c_prepare_xfer_cmd(struct i2c_sam_twim_dev_data *data, uint32_t *cmdr_reg, uint32_t next_msg_idx) { struct i2c_msg *next_msg = &data->msgs[next_msg_idx]; bool next_msg_is_read; uint32_t next_nb_remaining; *cmdr_reg &= ~(TWIM_CMDR_NBYTES_Msk | TWIM_CMDR_ACKLAST | TWIM_CMDR_START | TWIM_CMDR_READ); next_msg_is_read = ((next_msg->flags & I2C_MSG_RW_MASK) == I2C_MSG_READ); if (next_msg_is_read) { *cmdr_reg |= TWIM_CMDR_READ; } if (data->next_need_rs) { /* TODO: evaluate 10 bits repeat start read * because of blank cmd */ *cmdr_reg |= TWIM_CMDR_START; } if (data->next_nb_bytes > TWIM_MAX_NBYTES_PER_XFER) { next_nb_remaining = TWIM_MAX_NBYTES_PER_XFER; if (next_msg_is_read) { *cmdr_reg |= TWIM_CMDR_ACKLAST; } } else { next_nb_remaining = data->next_nb_bytes; /* Is there any more messages ? */ if ((next_msg_idx + 1) >= data->msg_max_idx) { *cmdr_reg |= TWIM_CMDR_STOP; } } return next_nb_remaining; } static void i2c_start_xfer(const struct device *dev, uint16_t daddr) { const struct i2c_sam_twim_dev_cfg *const cfg = dev->config; struct i2c_sam_twim_dev_data *data = dev->data; struct i2c_msg *msg = &data->msgs[0]; Twim *const twim = cfg->regs; uint32_t cmdr_reg; uint32_t data_size; uint32_t cur_is_read; /* Reset the TWIM module */ twim->CR = TWIM_CR_MEN; twim->CR = TWIM_CR_SWRST; twim->CR = TWIM_CR_MDIS; twim->IDR = ~0UL; /* Clear the interrupt flags */ twim->SCR = ~0UL; /* Clear the status flags */ /* Reset indexes */ data->msg_cur_idx = 0; data->msg_next_idx = 0; /* pre-load current message to infer next */ data->next_nb_bytes = data->msgs[data->msg_next_idx].len; data->next_is_valid = false; data->next_need_rs = false; data->cur_remaining = 0; data->cur_idx = 0; LOG_DBG("Config first/next Transfer: msgs: %d", data->msg_max_idx); cmdr_reg = TWIM_CMDR_SADR(daddr) | TWIM_CMDR_VALID; if (I2C_SPEED_GET(msg->flags) >= I2C_SPEED_HIGH) { cmdr_reg |= TWIM_CMDR_HS | TWIM_CMDR_HSMCODE(cfg->hs_master_code); } if (msg->flags & I2C_MSG_ADDR_10_BITS) { cmdr_reg |= TWIM_CMDR_TENBIT; } if ((msg->flags & I2C_MSG_RW_MASK) == I2C_MSG_READ && (msg->flags & I2C_MSG_ADDR_10_BITS)) { /* Fill transfer command (empty) * It must be a write xfer with NBYTES = 0 */ twim->CMDR = cmdr_reg | TWIM_CMDR_START; /* Fill next transfer command. REPSAME performs a repeated * start to the same slave address as addressed in the * previous transfer in order to enter master receiver mode. */ cmdr_reg |= TWIM_CMDR_REPSAME; i2c_prepare_xfer_data(data); /* Special condition: reset msg_next_idx */ data->msg_next_idx = 0; data_size = i2c_prepare_xfer_cmd(data, &cmdr_reg, 0); cmdr_reg |= TWIM_CMDR_NBYTES(data->cur_remaining); twim->NCMDR = cmdr_reg | TWIM_CMDR_START; } else { /* Fill transfer command */ i2c_prepare_xfer_data(data); data_size = i2c_prepare_xfer_cmd(data, &cmdr_reg, 0); cmdr_reg |= TWIM_CMDR_NBYTES(data->cur_remaining); twim->CMDR = cmdr_reg | TWIM_CMDR_START; /* Fill next transfer command */ if (data->next_is_valid) { data_size = i2c_prepare_xfer_cmd(data, &cmdr_reg, data->msg_next_idx); cmdr_reg |= TWIM_CMDR_NBYTES(data_size); twim->NCMDR = cmdr_reg; } } LOG_DBG("Start Transfer: CMDR: 0x%08x, NCMDR: 0x%08x", twim->CMDR, twim->NCMDR); /* Extract Read/Write start operation */ cmdr_reg = twim->CMDR; cur_is_read = (cmdr_reg & TWIM_CMDR_READ); /* Enable master transfer */ twim->CR = TWIM_CR_MEN; twim->IER = TWIM_IER_STD_MASK | (cur_is_read ? TWIM_IER_RXRDY : TWIM_IER_TXRDY) | TWIM_IER_IDLE; } static void i2c_prepare_next(struct i2c_sam_twim_dev_data *data, Twim *const twim) { struct i2c_msg *msg = &data->msgs[data->msg_cur_idx]; volatile uint32_t ncmdr_wait; uint32_t cmdr_reg; uint32_t data_size; uint32_t cur_is_read; if (data->cur_idx == msg->len) { data->cur_idx = 0; data->msg_cur_idx++; } i2c_prepare_xfer_data(data); /* Sync CMDR with NCMDR before apply changes */ ncmdr_wait = TWIM_NCMDR_FREE_WAIT; while ((twim->NCMDR & TWIM_NCMDR_VALID) && (ncmdr_wait--)) { ; } cmdr_reg = twim->CMDR; cur_is_read = (cmdr_reg & TWIM_CMDR_READ); twim->IER |= (cur_is_read ? TWIM_IER_RXRDY : TWIM_IER_TXRDY); /* Is there any more transfer? */ if (data->next_nb_bytes == 0) { return; } data_size = i2c_prepare_xfer_cmd(data, &cmdr_reg, data->msg_next_idx); cmdr_reg |= TWIM_CMDR_NBYTES(data_size); twim->NCMDR = cmdr_reg; LOG_DBG("ld xfer: NCMDR: 0x%08x", twim->NCMDR); } static void i2c_sam_twim_isr(const struct device *dev) { const struct i2c_sam_twim_dev_cfg *const cfg = dev->config; struct i2c_sam_twim_dev_data *const data = dev->data; Twim *const twim = cfg->regs; struct i2c_msg *msg = &data->msgs[data->msg_cur_idx]; uint32_t isr_status; /* Retrieve interrupt status */ isr_status = twim->SR & twim->IMR; LOG_DBG("ISR: IMR: 0x%08x", isr_status); /* Not Acknowledged */ if (isr_status & TWIM_SR_STD_MASK) { /* * If we get a NACK, clear the valid bit in CMDR, * otherwise the command will be re-sent. */ twim->NCMDR &= ~TWIM_NCMDR_VALID; twim->CMDR &= ~TWIM_CMDR_VALID; data->cur_sr = isr_status; goto xfer_comp; } data->cur_sr = 0; /* Byte received */ if (isr_status & TWIM_SR_RXRDY) { msg->buf[data->cur_idx++] = twim->RHR; data->cur_remaining--; if (data->cur_remaining > 0) { goto check_xfer; } twim->IDR = TWIM_IDR_RXRDY; /* Check for next transfer */ if (data->next_is_valid && data->next_nb_bytes > 0) { i2c_prepare_next(data, twim); } else { data->next_nb_bytes = 0; } } /* Byte sent */ if (isr_status & TWIM_SR_TXRDY) { if (data->cur_idx < msg->len) { twim->THR = msg->buf[data->cur_idx++]; data->cur_remaining--; goto check_xfer; } twim->IDR = TWIM_IDR_TXRDY; /* Check for next transfer */ if (data->next_is_valid && data->next_nb_bytes > 0) { i2c_prepare_next(data, twim); } } check_xfer: /* Is transaction finished ? */ if (!(isr_status & TWIM_SR_IDLE)) { return; } LOG_DBG("ISR: TWIM_SR_IDLE"); xfer_comp: /* Disable all enabled interrupts */ twim->IDR = ~0UL; /* Clear all status */ twim->SCR = ~0UL; /* We are done */ k_sem_give(&data->sem); } static int i2c_sam_twim_transfer(const struct device *dev, struct i2c_msg *msgs, uint8_t num_msgs, uint16_t addr) { struct i2c_sam_twim_dev_data *data = dev->data; int ret = 0; /* Send out messages */ k_mutex_lock(&data->bus_mutex, K_FOREVER); /* Load messages */ data->msgs = msgs; data->msg_max_idx = num_msgs; i2c_start_xfer(dev, addr); /* Wait for the message transfer to complete */ k_sem_take(&data->sem, K_FOREVER); if (data->cur_sr & TWIM_SR_STD_MASK) { ret = -EIO; LOG_INF("MSG: %d, ANAK: %d, ARBLST: %d", data->msg_cur_idx, (data->cur_sr & TWIM_SR_ANAK) > 0, (data->cur_sr & TWIM_SR_ARBLST) > 0); } k_mutex_unlock(&data->bus_mutex); return ret; } static int i2c_sam_twim_initialize(const struct device *dev) { const struct i2c_sam_twim_dev_cfg *const cfg = dev->config; struct i2c_sam_twim_dev_data *data = dev->data; Twim *const twim = cfg->regs; uint32_t bitrate_cfg; int ret; /* Configure interrupts */ cfg->irq_config(); /* * initialize mutex. it is used when multiple transfers are taking * place to guarantee that each one is atomic and has exclusive access * to the I2C bus. */ k_mutex_init(&data->bus_mutex); /* Initialize semaphore */ k_sem_init(&data->sem, 0, 1); /* Connect pins to the peripheral */ ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /* Enable TWIM clock in PM */ (void)clock_control_on(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&cfg->clock_cfg); /* Enable the module*/ twim->CR = TWIM_CR_MEN; /* Reset the module */ twim->CR |= TWIM_CR_SWRST; /* Clear SR */ twim->SCR = ~0UL; bitrate_cfg = i2c_map_dt_bitrate(cfg->bitrate); ret = i2c_sam_twim_configure(dev, I2C_MODE_CONTROLLER | bitrate_cfg); if (ret < 0) { LOG_ERR("Failed to initialize %s device", dev->name); return ret; } /* Enable module's IRQ */ irq_enable(cfg->irq_id); LOG_INF("Device %s initialized", dev->name); return 0; } static const struct i2c_driver_api i2c_sam_twim_driver_api = { .configure = i2c_sam_twim_configure, .transfer = i2c_sam_twim_transfer, }; #define I2C_TWIM_SAM_SLEW_REGS(n) \ .std_clk_slew_lim = DT_INST_ENUM_IDX(n, std_clk_slew_lim), \ .std_clk_strength_low = DT_INST_ENUM_IDX(n, std_clk_strength_low),\ .std_data_slew_lim = DT_INST_ENUM_IDX(n, std_data_slew_lim), \ .std_data_strength_low = DT_INST_ENUM_IDX(n, std_data_strength_low),\ .hs_clk_slew_lim = DT_INST_ENUM_IDX(n, hs_clk_slew_lim), \ .hs_clk_strength_high = DT_INST_ENUM_IDX(n, hs_clk_strength_high),\ .hs_clk_strength_low = DT_INST_ENUM_IDX(n, hs_clk_strength_low),\ .hs_data_slew_lim = DT_INST_ENUM_IDX(n, hs_data_slew_lim), \ .hs_data_strength_low = DT_INST_ENUM_IDX(n, hs_data_strength_low) #define I2C_TWIM_SAM_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static void i2c##n##_sam_irq_config(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ i2c_sam_twim_isr, \ DEVICE_DT_INST_GET(n), 0); \ } \ \ static const struct i2c_sam_twim_dev_cfg i2c##n##_sam_config = {\ .regs = (Twim *)DT_INST_REG_ADDR(n), \ .irq_config = i2c##n##_sam_irq_config, \ .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(n), \ .irq_id = DT_INST_IRQN(n), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .bitrate = DT_INST_PROP(n, clock_frequency), \ .hs_master_code = DT_INST_ENUM_IDX(n, hs_master_code), \ I2C_TWIM_SAM_SLEW_REGS(n), \ }; \ \ static struct i2c_sam_twim_dev_data i2c##n##_sam_data; \ \ I2C_DEVICE_DT_INST_DEFINE(n, i2c_sam_twim_initialize, \ NULL, \ &i2c##n##_sam_data, &i2c##n##_sam_config, \ POST_KERNEL, CONFIG_I2C_INIT_PRIORITY, \ &i2c_sam_twim_driver_api) DT_INST_FOREACH_STATUS_OKAY(I2C_TWIM_SAM_INIT); ```
/content/code_sandbox/drivers/i2c/i2c_sam4l_twim.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,321
```c /* * */ #define DT_DRV_COMPAT linaro_ivshmem_ipm #include <stdint.h> #include <string.h> #include <zephyr/device.h> #include <zephyr/drivers/ipm.h> #include <zephyr/drivers/virtualization/ivshmem.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ipm_ivshmem, CONFIG_IPM_LOG_LEVEL); K_THREAD_STACK_DEFINE(ivshmem_ev_loop_stack, CONFIG_IPM_IVSHMEM_EVENT_LOOP_STACK_SIZE); static struct k_thread ivshmem_ev_loop_thread; struct ivshmem_ipm_data { ipm_callback_t cb; void *user_data; }; struct ivshmem_ipm_config { const struct device *ivshmem_dev; }; static void ivshmem_ipm_event_loop_thread(void *arg, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); unsigned int poll_signaled; int ivshmem_vector_rx; struct k_poll_signal sig; struct k_poll_event events[] = { K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &sig), }; const struct device *dev = (const struct device *)arg; struct ivshmem_ipm_data *dev_data = (struct ivshmem_ipm_data *)dev->data; struct ivshmem_ipm_config *dev_cfg = (struct ivshmem_ipm_config *)dev->config; k_poll_signal_init(&sig); int ret = ivshmem_register_handler(dev_cfg->ivshmem_dev, &sig, 0); if (ret < 0) { LOG_ERR("registering handlers must be supported: %d\n", ret); k_panic(); } while (1) { LOG_DBG("%s: waiting interrupt from client...\n", __func__); ret = k_poll(events, ARRAY_SIZE(events), K_FOREVER); k_poll_signal_check(&sig, &poll_signaled, &ivshmem_vector_rx); /* get ready for next signal */ k_poll_signal_reset(&sig); if (dev_data->cb) { dev_data->cb(dev, dev_data->user_data, 0, NULL); } } } static int ivshmem_ipm_send(const struct device *dev, int wait, uint32_t id, const void *data, int size) { ARG_UNUSED(wait); ARG_UNUSED(data); ARG_UNUSED(size); struct ivshmem_ipm_config *dev_cfg = (struct ivshmem_ipm_config *)dev->config; LOG_DBG("sending notification to the peer id 0x%x\n", id); return ivshmem_int_peer(dev_cfg->ivshmem_dev, id, 0); } static void ivshmem_ipm_register_callback(const struct device *dev, ipm_callback_t cb, void *user_data) { struct ivshmem_ipm_data *dev_data = (struct ivshmem_ipm_data *)dev->data; dev_data->cb = cb; dev_data->user_data = user_data; } static int ivshmem_ipm_set_enabled(const struct device *dev, int enable) { /* some subsystems needs this minimal function just return success here*/ ARG_UNUSED(dev); ARG_UNUSED(enable); return 0; } static int ivshmem_ipm_init(const struct device *dev) { k_thread_create(&ivshmem_ev_loop_thread, ivshmem_ev_loop_stack, CONFIG_IPM_IVSHMEM_EVENT_LOOP_STACK_SIZE, ivshmem_ipm_event_loop_thread, (void *)dev, NULL, NULL, CONFIG_IPM_IVSHMEM_EVENT_LOOP_PRIO, 0, K_NO_WAIT); return 0; } static const struct ipm_driver_api ivshmem_ipm_driver_api = { .send = ivshmem_ipm_send, .register_callback = ivshmem_ipm_register_callback, .set_enabled = ivshmem_ipm_set_enabled }; #define IPM_IVSHMEM_INIT(inst) \ static const struct ivshmem_ipm_config ivshmem_ipm_cfg_##inst = { \ .ivshmem_dev = \ DEVICE_DT_GET(DT_INST_PHANDLE(inst, ivshmem))\ }; \ static struct ivshmem_ipm_data ivshmem_ipm_data_##inst = { \ .cb = NULL, \ .user_data = NULL, \ }; \ DEVICE_DT_INST_DEFINE(inst, \ ivshmem_ipm_init, \ NULL, \ &ivshmem_ipm_data_##inst, &ivshmem_ipm_cfg_##inst, \ POST_KERNEL, CONFIG_APPLICATION_INIT_PRIORITY, \ &ivshmem_ipm_driver_api); \ DT_INST_FOREACH_STATUS_OKAY(IPM_IVSHMEM_INIT); ```
/content/code_sandbox/drivers/ipm/ipm_ivshmem.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,032
```c /* * */ #define DT_DRV_COMPAT intel_sedi_ipm #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/pm/device.h> #include <zephyr/drivers/ipm.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ipm_sedi, CONFIG_IPM_LOG_LEVEL); #include "ipm_sedi.h" extern void sedi_ipc_isr(IN sedi_ipc_t ipc_device); static void set_ipm_dev_busy(const struct device *dev, bool is_write) { struct ipm_sedi_context *ipm = dev->data; unsigned int key = irq_lock(); atomic_set_bit(&ipm->status, is_write ? IPM_WRITE_BUSY_BIT : IPM_READ_BUSY_BIT); pm_device_busy_set(dev); irq_unlock(key); } static void clear_ipm_dev_busy(const struct device *dev, bool is_write) { struct ipm_sedi_context *ipm = dev->data; unsigned int key = irq_lock(); atomic_clear_bit(&ipm->status, is_write ? IPM_WRITE_BUSY_BIT : IPM_READ_BUSY_BIT); if ((!atomic_test_bit(&ipm->status, IPM_WRITE_BUSY_BIT)) && (!atomic_test_bit(&ipm->status, IPM_READ_BUSY_BIT))) { pm_device_busy_clear(dev); } irq_unlock(key); } static void ipm_event_dispose(IN sedi_ipc_t device, IN uint32_t event, INOUT void *params) { const struct device *dev = (const struct device *)params; struct ipm_sedi_context *ipm = dev->data; uint32_t drbl_in = 0, len; LOG_DBG("dev: %u, event: %u", device, event); switch (event) { case SEDI_IPC_EVENT_MSG_IN: if (ipm->rx_msg_notify_cb != NULL) { set_ipm_dev_busy(dev, false); sedi_ipc_read_dbl(device, &drbl_in); len = IPC_HEADER_GET_LENGTH(drbl_in); sedi_ipc_read_msg(device, ipm->incoming_data_buf, len); ipm->rx_msg_notify_cb(dev, ipm->rx_msg_notify_cb_data, drbl_in, ipm->incoming_data_buf); } else { LOG_WRN("no handler for ipm new msg"); } break; case SEDI_IPC_EVENT_MSG_PEER_ACKED: if (atomic_test_bit(&ipm->status, IPM_WRITE_IN_PROC_BIT)) { k_sem_give(&ipm->device_write_msg_sem); } else { LOG_WRN("no sending in progress, got an ack"); } break; default: return; } } static int ipm_init(const struct device *dev) { /* allocate resource and context*/ const struct ipm_sedi_config_t *info = dev->config; sedi_ipc_t device = info->ipc_device; struct ipm_sedi_context *ipm = dev->data; info->irq_config(); k_sem_init(&ipm->device_write_msg_sem, 0, 1); k_mutex_init(&ipm->device_write_lock); ipm->status = 0; sedi_ipc_init(device, ipm_event_dispose, (void *)dev); atomic_set_bit(&ipm->status, IPM_PEER_READY_BIT); LOG_DBG("ipm driver initialized on device: %p", dev); return 0; } static int ipm_send_isr(const struct device *dev, uint32_t drbl, const void *msg, int msg_size) { const struct ipm_sedi_config_t *info = dev->config; sedi_ipc_t device = info->ipc_device; uint32_t drbl_acked = 0; sedi_ipc_write_msg(device, (uint8_t *)msg, (uint32_t)msg_size); sedi_ipc_write_dbl(device, drbl); do { sedi_ipc_read_ack_drbl(device, &drbl_acked); } while ((drbl_acked & BIT(IPC_BUSY_BIT)) == 0); return 0; } static int ipm_sedi_send(const struct device *dev, int wait, uint32_t drbl, const void *msg, int msg_size) { __ASSERT((dev != NULL), "bad params\n"); const struct ipm_sedi_config_t *info = dev->config; struct ipm_sedi_context *ipm = dev->data; sedi_ipc_t device = info->ipc_device; int ret, sedi_ret; /* check params, check status */ if ((msg_size > IPC_DATA_LEN_MAX) || ((msg_size > 0) && (msg == NULL)) || ((drbl & BIT(IPC_BUSY_BIT)) == 0)) { LOG_ERR("bad params when sending ipm msg on device: %p", dev); return -EINVAL; } if (wait == 0) { LOG_ERR("not support no wait mode when sending ipm msg"); return -ENOTSUP; } if (k_is_in_isr()) { return ipm_send_isr(dev, drbl, msg, msg_size); } k_mutex_lock(&ipm->device_write_lock, K_FOREVER); set_ipm_dev_busy(dev, true); if (!atomic_test_bit(&ipm->status, IPM_PEER_READY_BIT)) { LOG_WRN("peer is not ready"); ret = -EBUSY; goto write_err; } /* write data regs */ if (msg_size > 0) { sedi_ret = sedi_ipc_write_msg(device, (uint8_t *)msg, (uint32_t)msg_size); if (sedi_ret != SEDI_DRIVER_OK) { LOG_ERR("ipm write data fail on device: %p", dev); ret = -EBUSY; goto write_err; } } atomic_set_bit(&ipm->status, IPM_WRITE_IN_PROC_BIT); /* write drbl regs to interrupt peer*/ sedi_ret = sedi_ipc_write_dbl(device, drbl); if (sedi_ret != SEDI_DRIVER_OK) { LOG_ERR("ipm write doorbell fail on device: %p", dev); ret = -EBUSY; goto func_out; } /* wait for busy-bit-consumed interrupt */ ret = k_sem_take(&ipm->device_write_msg_sem, K_MSEC(IPM_TIMEOUT_MS)); if (ret) { LOG_WRN("ipm write timeout on device: %p", dev); sedi_ipc_write_dbl(device, 0); } func_out: atomic_clear_bit(&ipm->status, IPM_WRITE_IN_PROC_BIT); write_err: clear_ipm_dev_busy(dev, true); k_mutex_unlock(&ipm->device_write_lock); if (ret == 0) { LOG_DBG("ipm wrote a new message on device: %p, drbl=%08x", dev, drbl); } return ret; } static void ipm_sedi_register_callback(const struct device *dev, ipm_callback_t cb, void *user_data) { __ASSERT((dev != NULL), "bad params\n"); struct ipm_sedi_context *ipm = dev->data; if (cb == NULL) { LOG_ERR("bad params when add ipm callback on device: %p", dev); return; } if (ipm->rx_msg_notify_cb == NULL) { ipm->rx_msg_notify_cb = cb; ipm->rx_msg_notify_cb_data = user_data; } else { LOG_ERR("ipm rx callback already exists on device: %p", dev); } } static void ipm_sedi_complete(const struct device *dev) { int ret; __ASSERT((dev != NULL), "bad params\n"); const struct ipm_sedi_config_t *info = dev->config; sedi_ipc_t device = info->ipc_device; ret = sedi_ipc_send_ack_drbl(device, 0); if (ret != SEDI_DRIVER_OK) { LOG_ERR("ipm send ack drl fail on device: %p", dev); } clear_ipm_dev_busy(dev, false); } static int ipm_sedi_get_max_data_size(const struct device *ipmdev) { ARG_UNUSED(ipmdev); return IPC_DATA_LEN_MAX; } static uint32_t ipm_sedi_get_max_id(const struct device *ipmdev) { ARG_UNUSED(ipmdev); return UINT32_MAX; } static int ipm_sedi_set_enable(const struct device *dev, int enable) { __ASSERT((dev != NULL), "bad params\n"); const struct ipm_sedi_config_t *info = dev->config; if (enable) { irq_enable(info->irq_num); } else { irq_disable(info->irq_num); } return 0; } #if defined(CONFIG_PM_DEVICE) static int ipm_power_ctrl(const struct device *dev, enum pm_device_action action) { return 0; } #endif static const struct ipm_driver_api ipm_funcs = { .send = ipm_sedi_send, .register_callback = ipm_sedi_register_callback, .max_data_size_get = ipm_sedi_get_max_data_size, .max_id_val_get = ipm_sedi_get_max_id, .complete = ipm_sedi_complete, .set_enabled = ipm_sedi_set_enable }; #define IPM_SEDI_DEV_DEFINE(n) \ static struct ipm_sedi_context ipm_data_##n; \ static void ipm_##n##_irq_config(void); \ static const struct ipm_sedi_config_t ipm_config_##n = { \ .ipc_device = DT_INST_PROP(n, peripheral_id), \ .irq_num = DT_INST_IRQN(n), \ .irq_config = ipm_##n##_irq_config, \ }; \ static void ipm_##n##_irq_config(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), sedi_ipc_isr, \ DT_INST_PROP(n, peripheral_id), \ DT_INST_IRQ(n, sense)); \ } \ PM_DEVICE_DT_DEFINE(DT_NODELABEL(ipm##n), ipm_power_ctrl); \ DEVICE_DT_INST_DEFINE(n, \ &ipm_init, \ PM_DEVICE_DT_GET(DT_NODELABEL(ipm##n)), \ &ipm_data_##n, \ &ipm_config_##n, \ POST_KERNEL, \ 0, \ &ipm_funcs); DT_INST_FOREACH_STATUS_OKAY(IPM_SEDI_DEV_DEFINE) ```
/content/code_sandbox/drivers/ipm/ipm_sedi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,324
```c /* * */ #define DT_DRV_COMPAT nxp_imx_mu #include <errno.h> #include <string.h> #include <zephyr/device.h> #include <soc.h> #include <zephyr/drivers/ipm.h> #include <zephyr/irq.h> #include <zephyr/sys/barrier.h> #ifdef CONFIG_HAS_MCUX /* MCUX HAL uses a different header file than the i.MX HAL for this IP block */ #include "fsl_mu.h" #else #include <mu_imx.h> #endif #define MU(config) ((MU_Type *)config->base) #if ((CONFIG_IPM_IMX_MAX_DATA_SIZE % 4) != 0) #error CONFIG_IPM_IMX_MAX_DATA_SIZE is invalid #endif #define IMX_IPM_DATA_REGS (CONFIG_IPM_IMX_MAX_DATA_SIZE / 4) struct imx_mu_config { MU_Type *base; void (*irq_config_func)(const struct device *dev); }; struct imx_mu_data { ipm_callback_t callback; void *user_data; }; #if defined(CONFIG_HAS_MCUX) /*! * @brief Check RX full status. * * This function checks the specific receive register full status. * * @param base Register base address for the module. * @param index RX register index to check. * @retval true RX register is full. * @retval false RX register is not full. */ static inline bool MU_IsRxFull(MU_Type *base, uint32_t index) { switch (index) { case 0: return (bool)(MU_GetStatusFlags(base) & kMU_Rx0FullFlag); case 1: return (bool)(MU_GetStatusFlags(base) & kMU_Rx1FullFlag); case 2: return (bool)(MU_GetStatusFlags(base) & kMU_Rx2FullFlag); case 3: return (bool)(MU_GetStatusFlags(base) & kMU_Rx3FullFlag); default: /* This shouldn't happen */ assert(false); return false; } } /*! * @brief Check TX empty status. * * This function checks the specific transmit register empty status. * * @param base Register base address for the module. * @param index TX register index to check. * @retval true TX register is empty. * @retval false TX register is not empty. */ static inline bool MU_IsTxEmpty(MU_Type *base, uint32_t index) { switch (index) { case 0: return (bool)(MU_GetStatusFlags(base) & kMU_Tx0EmptyFlag); case 1: return (bool)(MU_GetStatusFlags(base) & kMU_Tx1EmptyFlag); case 2: return (bool)(MU_GetStatusFlags(base) & kMU_Tx2EmptyFlag); case 3: return (bool)(MU_GetStatusFlags(base) & kMU_Tx3EmptyFlag); default: /* This shouldn't happen */ assert(false); return false; } } #endif static void imx_mu_isr(const struct device *dev) { const struct imx_mu_config *config = dev->config; MU_Type *base = MU(config); struct imx_mu_data *data = dev->data; uint32_t data32[IMX_IPM_DATA_REGS]; uint32_t status_reg; int32_t id; int32_t i; bool all_registers_full; status_reg = base->SR >>= MU_SR_RFn_SHIFT; for (id = CONFIG_IPM_IMX_MAX_ID_VAL; id >= 0; id--) { if (status_reg & 0x1U) { /* * Check if all receive registers are full. If not, * it is violation of the protocol (status register * are set earlier than all receive registers). * Do not read any of the registers in such situation. */ all_registers_full = true; for (i = 0; i < IMX_IPM_DATA_REGS; i++) { if (!MU_IsRxFull(base, (id * IMX_IPM_DATA_REGS) + i)) { all_registers_full = false; break; } } if (all_registers_full) { for (i = 0; i < IMX_IPM_DATA_REGS; i++) { #if defined(CONFIG_HAS_MCUX) data32[i] = MU_ReceiveMsg(base, (id * IMX_IPM_DATA_REGS) + i); #else MU_ReceiveMsg(base, (id * IMX_IPM_DATA_REGS) + i, &data32[i]); #endif } if (data->callback) { data->callback(dev, data->user_data, (uint32_t)id, &data32[0]); } } } status_reg >>= IMX_IPM_DATA_REGS; } /* Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F * Store immediate overlapping exception return operation * might vector to incorrect interrupt. For Cortex-M7, if * core speed much faster than peripheral register write * speed, the peripheral interrupt flags may be still set * after exiting ISR, this results to the same error similar * with errata 838869. */ #if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) barrier_dsync_fence_full(); #endif } static int imx_mu_ipm_send(const struct device *dev, int wait, uint32_t id, const void *data, int size) { const struct imx_mu_config *config = dev->config; MU_Type *base = MU(config); uint32_t data32[IMX_IPM_DATA_REGS] = {0}; #if !defined(CONFIG_HAS_MCUX) mu_status_t status; #endif int i; if (id > CONFIG_IPM_IMX_MAX_ID_VAL) { return -EINVAL; } if ((size < 0) || (size > CONFIG_IPM_IMX_MAX_DATA_SIZE)) { return -EMSGSIZE; } /* Actual message is passing using 32 bits registers */ memcpy(data32, data, size); #if defined(CONFIG_HAS_MCUX) if (wait) { for (i = 0; i < IMX_IPM_DATA_REGS; i++) { MU_SendMsgNonBlocking(base, id * IMX_IPM_DATA_REGS + i, data32[i]); } while (!MU_IsTxEmpty(base, (id * IMX_IPM_DATA_REGS) + IMX_IPM_DATA_REGS - 1)) { } } else { for (i = 0; i < IMX_IPM_DATA_REGS; i++) { if (MU_IsTxEmpty(base, id * IMX_IPM_DATA_REGS + i)) { MU_SendMsg(base, id * IMX_IPM_DATA_REGS + i, data32[i]); } else { return -EBUSY; } } } #else for (i = 0; i < IMX_IPM_DATA_REGS; i++) { status = MU_TrySendMsg(base, id * IMX_IPM_DATA_REGS + i, data32[i]); if (status == kStatus_MU_TxNotEmpty) { return -EBUSY; } } if (wait) { while (!MU_IsTxEmpty(base, (id * IMX_IPM_DATA_REGS) + IMX_IPM_DATA_REGS - 1)) { } } #endif return 0; } static int imx_mu_ipm_max_data_size_get(const struct device *dev) { ARG_UNUSED(dev); return CONFIG_IPM_IMX_MAX_DATA_SIZE; } static uint32_t imx_mu_ipm_max_id_val_get(const struct device *dev) { ARG_UNUSED(dev); return CONFIG_IPM_IMX_MAX_ID_VAL; } static void imx_mu_ipm_register_callback(const struct device *dev, ipm_callback_t cb, void *user_data) { struct imx_mu_data *driver_data = dev->data; driver_data->callback = cb; driver_data->user_data = user_data; } static int imx_mu_ipm_set_enabled(const struct device *dev, int enable) { const struct imx_mu_config *config = dev->config; MU_Type *base = MU(config); #if defined(CONFIG_HAS_MCUX) #if CONFIG_IPM_IMX_MAX_DATA_SIZE_4 if (enable) { MU_EnableInterrupts(base, kMU_Rx0FullInterruptEnable); MU_EnableInterrupts(base, kMU_Rx1FullInterruptEnable); MU_EnableInterrupts(base, kMU_Rx2FullInterruptEnable); MU_EnableInterrupts(base, kMU_Rx3FullInterruptEnable); } else { MU_DisableInterrupts(base, kMU_Rx0FullInterruptEnable); MU_DisableInterrupts(base, kMU_Rx1FullInterruptEnable); MU_DisableInterrupts(base, kMU_Rx2FullInterruptEnable); MU_DisableInterrupts(base, kMU_Rx3FullInterruptEnable); } #elif CONFIG_IPM_IMX_MAX_DATA_SIZE_8 if (enable) { MU_EnableInterrupts(base, kMU_Rx1FullInterruptEnable); MU_EnableInterrupts(base, kMU_Rx3FullInterruptEnable); } else { MU_DisableInterrupts(base, kMU_Rx1FullInterruptEnable); MU_DisableInterrupts(base, kMU_Rx3FullInterruptEnable); } #elif CONFIG_IPM_IMX_MAX_DATA_SIZE_16 if (enable) { MU_EnableInterrupts(base, kMU_Rx3FullInterruptEnable); } else { MU_DisableInterrupts(base, kMU_Rx3FullInterruptEnable); } #else #error "CONFIG_IPM_IMX_MAX_DATA_SIZE_n is not set" #endif #else #if CONFIG_IPM_IMX_MAX_DATA_SIZE_4 if (enable) { MU_EnableRxFullInt(base, 0U); MU_EnableRxFullInt(base, 1U); MU_EnableRxFullInt(base, 2U); MU_EnableRxFullInt(base, 3U); } else { MU_DisableRxFullInt(base, 0U); MU_DisableRxFullInt(base, 1U); MU_DisableRxFullInt(base, 2U); MU_DisableRxFullInt(base, 3U); } #elif CONFIG_IPM_IMX_MAX_DATA_SIZE_8 if (enable) { MU_EnableRxFullInt(base, 1U); MU_EnableRxFullInt(base, 3U); } else { MU_DisableRxFullInt(base, 1U); MU_DisableRxFullInt(base, 3U); } #elif CONFIG_IPM_IMX_MAX_DATA_SIZE_16 if (enable) { MU_EnableRxFullInt(base, 3U); } else { MU_DisableRxFullInt(base, 3U); } #else #error "CONFIG_IPM_IMX_MAX_DATA_SIZE_n is not set" #endif #endif return 0; } static int imx_mu_init(const struct device *dev) { const struct imx_mu_config *config = dev->config; MU_Init(MU(config)); config->irq_config_func(dev); #if defined(CONFIG_IPM_IMX_FW_READY_REPLY) /* Send FW_READY reply message - this is used on host side, * for handshake communication. * * An example is in Linux, imx_dsp_rproc driver, where * after starting the remote processor, the host is waiting for a * FW_READY reply. */ MU_Type * base = MU(config); MU_TriggerInterrupts(base, kMU_GenInt0InterruptTrigger | kMU_GenInt1InterruptTrigger | kMU_GenInt2InterruptTrigger | kMU_GenInt3InterruptTrigger); #endif return 0; } static const struct ipm_driver_api imx_mu_driver_api = { .send = imx_mu_ipm_send, .register_callback = imx_mu_ipm_register_callback, .max_data_size_get = imx_mu_ipm_max_data_size_get, .max_id_val_get = imx_mu_ipm_max_id_val_get, .set_enabled = imx_mu_ipm_set_enabled }; /* Config MU */ static void imx_mu_config_func_b(const struct device *dev); static const struct imx_mu_config imx_mu_b_config = { .base = (MU_Type *)DT_INST_REG_ADDR(0), .irq_config_func = imx_mu_config_func_b, }; static struct imx_mu_data imx_mu_b_data; DEVICE_DT_INST_DEFINE(0, &imx_mu_init, NULL, &imx_mu_b_data, &imx_mu_b_config, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &imx_mu_driver_api); static void imx_mu_config_func_b(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), imx_mu_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); } ```
/content/code_sandbox/drivers/ipm/ipm_imx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,877
```c /* * */ #define DT_DRV_COMPAT nordic_nrf_ipc #include <string.h> #include <zephyr/drivers/ipm.h> #include <nrfx_ipc.h> #include "ipm_nrfx_ipc.h" #define LOG_LEVEL CONFIG_IPM_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(ipm_nrfx_ipc); struct ipm_nrf_data { ipm_callback_t callback; void *user_data; }; static struct ipm_nrf_data nrfx_ipm_data; static void gipm_init(void); static void gipm_send(uint32_t id); #if defined(CONFIG_IPM_NRF_SINGLE_INSTANCE) static void nrfx_ipc_handler(uint8_t event_idx, void *p_context) { if (nrfx_ipm_data.callback) { __ASSERT(event_idx < NRFX_IPC_ID_MAX_VALUE, "Illegal event_idx: %d", event_idx); nrfx_ipm_data.callback(DEVICE_DT_INST_GET(0), nrfx_ipm_data.user_data, event_idx, NULL); } } static int ipm_nrf_send(const struct device *dev, int wait, uint32_t id, const void *data, int size) { if (id > NRFX_IPC_ID_MAX_VALUE) { return -EINVAL; } if (size > 0) { LOG_WRN("nRF driver does not support sending data over IPM"); } gipm_send(id); return 0; } static int ipm_nrf_max_data_size_get(const struct device *dev) { ARG_UNUSED(dev); return 0; } static uint32_t ipm_nrf_max_id_val_get(const struct device *dev) { ARG_UNUSED(dev); return NRFX_IPC_ID_MAX_VALUE; } static void ipm_nrf_register_callback(const struct device *dev, ipm_callback_t cb, void *user_data) { nrfx_ipm_data.callback = cb; nrfx_ipm_data.user_data = user_data; } static int ipm_nrf_set_enabled(const struct device *dev, int enable) { /* Enable configured channels */ if (enable) { irq_enable(DT_INST_IRQN(0)); nrfx_ipc_receive_event_group_enable((uint32_t)IPC_EVENT_BITS); } else { irq_disable(DT_INST_IRQN(0)); nrfx_ipc_receive_event_group_disable((uint32_t)IPC_EVENT_BITS); } return 0; } static int ipm_nrf_init(const struct device *dev) { gipm_init(); return 0; } static const struct ipm_driver_api ipm_nrf_driver_api = { .send = ipm_nrf_send, .register_callback = ipm_nrf_register_callback, .max_data_size_get = ipm_nrf_max_data_size_get, .max_id_val_get = ipm_nrf_max_id_val_get, .set_enabled = ipm_nrf_set_enabled }; DEVICE_DT_INST_DEFINE(0, ipm_nrf_init, NULL, NULL, NULL, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &ipm_nrf_driver_api); #else struct vipm_nrf_data { ipm_callback_t callback[NRFX_IPC_ID_MAX_VALUE]; void *user_data[NRFX_IPC_ID_MAX_VALUE]; const struct device *ipm_device[NRFX_IPC_ID_MAX_VALUE]; bool ipm_init; }; static struct vipm_nrf_data nrfx_vipm_data; static void vipm_dispatcher(uint8_t event_idx, void *p_context) { __ASSERT(event_idx < NRFX_IPC_ID_MAX_VALUE, "Illegal event_idx: %d", event_idx); if (nrfx_vipm_data.callback[event_idx] != NULL) { nrfx_vipm_data.callback[event_idx] (nrfx_vipm_data.ipm_device[event_idx], nrfx_vipm_data.user_data[event_idx], 0, NULL); } } static int vipm_nrf_max_data_size_get(const struct device *dev) { return ipm_max_data_size_get(dev); } static uint32_t vipm_nrf_max_id_val_get(const struct device *dev) { ARG_UNUSED(dev); return 0; } static int vipm_nrf_init(const struct device *dev) { if (!nrfx_vipm_data.ipm_init) { gipm_init(); nrfx_vipm_data.ipm_init = true; } return 0; } #define VIPM_DEVICE_1(_idx) \ static int vipm_nrf_##_idx##_send(const struct device *dev, int wait, \ uint32_t id, const void *data, int size) \ { \ if (!IS_ENABLED(CONFIG_IPM_MSG_CH_##_idx##_TX)) { \ LOG_ERR("IPM_" #_idx " is RX message channel"); \ return -EINVAL; \ } \ \ if (id > NRFX_IPC_ID_MAX_VALUE) { \ return -EINVAL; \ } \ \ if (id != 0) { \ LOG_WRN("Passing message ID to IPM with" \ "predefined message ID"); \ } \ \ if (size > 0) { \ LOG_WRN("nRF driver does not support" \ "sending data over IPM"); \ } \ \ gipm_send(_idx); \ return 0; \ } \ \ static void vipm_nrf_##_idx##_register_callback(const struct device *dev, \ ipm_callback_t cb, \ void *user_data) \ { \ if (IS_ENABLED(CONFIG_IPM_MSG_CH_##_idx##_RX)) { \ nrfx_vipm_data.callback[_idx] = cb; \ nrfx_vipm_data.user_data[_idx] = user_data; \ nrfx_vipm_data.ipm_device[_idx] = dev; \ } else { \ LOG_WRN("Trying to register a callback" \ "for TX channel IPM_" #_idx); \ } \ } \ \ static int vipm_nrf_##_idx##_set_enabled(const struct device *dev, int enable)\ { \ if (!IS_ENABLED(CONFIG_IPM_MSG_CH_##_idx##_RX)) { \ LOG_ERR("IPM_" #_idx " is TX message channel"); \ return -EINVAL; \ } else if (enable) { \ irq_enable(DT_INST_IRQN(0)); \ nrfx_ipc_receive_event_enable(_idx); \ } else if (!enable) { \ nrfx_ipc_receive_event_disable(_idx); \ } \ return 0; \ } \ \ static const struct ipm_driver_api vipm_nrf_##_idx##_driver_api = { \ .send = vipm_nrf_##_idx##_send, \ .register_callback = vipm_nrf_##_idx##_register_callback, \ .max_data_size_get = vipm_nrf_max_data_size_get, \ .max_id_val_get = vipm_nrf_max_id_val_get, \ .set_enabled = vipm_nrf_##_idx##_set_enabled \ }; \ \ DEVICE_DEFINE(vipm_nrf_##_idx, "IPM_"#_idx, \ vipm_nrf_init, NULL, NULL, NULL, \ PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, \ &vipm_nrf_##_idx##_driver_api) #define VIPM_DEVICE(_idx, _) \ IF_ENABLED(CONFIG_IPM_MSG_CH_##_idx##_ENABLE, (VIPM_DEVICE_1(_idx))) LISTIFY(NRFX_IPC_ID_MAX_VALUE, VIPM_DEVICE, (;), _); #endif static void gipm_init(void) { /* Init IPC */ #if defined(CONFIG_IPM_NRF_SINGLE_INSTANCE) nrfx_ipc_init(0, nrfx_ipc_handler, (void *)&nrfx_ipm_data); #else nrfx_ipc_init(0, vipm_dispatcher, (void *)&nrfx_ipm_data); #endif IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), nrfx_isr, nrfx_ipc_irq_handler, 0); /* Set up signals and channels */ nrfx_ipc_config_load(&ipc_cfg); } static void gipm_send(uint32_t id) { nrfx_ipc_signal(id); } ```
/content/code_sandbox/drivers/ipm/ipm_nrfx_ipc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,936
```unknown config IPM_STM32_IPCC bool "STM32 IPCC controller" default y depends on DT_HAS_ST_STM32_IPCC_MAILBOX_ENABLED select USE_STM32_LL_IPCC help Driver for stm32 IPCC mailboxes config IPM_STM32_IPCC_PROCID int "STM32 IPCC Processor ID" default 2 range 1 2 depends on IPM_STM32_IPCC help use to define the Processor ID for IPCC access config IPM_STM32_HSEM bool "STM32 HSEM controller" default y depends on DT_HAS_ST_STM32_HSEM_MAILBOX_ENABLED help Driver for stm32 HSEM mailbox config IPM_STM32_HSEM_CPU int "HSEM CPU ID" default 1 if "$(dt_nodelabel_enabled,cpu0)" default 2 if "$(dt_nodelabel_enabled,cpu1)" range 1 2 depends on IPM_STM32_HSEM help use to define the CPU ID used by HSEM ```
/content/code_sandbox/drivers/ipm/Kconfig.stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
231
```unknown # nRF IPM driver channel configuration menu "IPM Message Channel [$(nrfx_ipc_num)] configuration" config IPM_MSG_CH_$(nrfx_ipc_num)_ENABLE bool "IPM Message Channel $(nrfx_ipc_num)" config IPM_MSG_CH_$(nrfx_ipc_num)_RX bool "IPM Message RX Channel" depends on IPM_MSG_CH_$(nrfx_ipc_num)_ENABLE config IPM_MSG_CH_$(nrfx_ipc_num)_TX bool "IPM Message TX Channel" depends on IPM_MSG_CH_$(nrfx_ipc_num)_ENABLE default ! IPM_MSG_CH_$(nrfx_ipc_num)_RX endmenu ```
/content/code_sandbox/drivers/ipm/Kconfig.nrfx_ipc_channel
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
153
```c /* * */ #define DT_DRV_COMPAT xlnx_zynqmp_ipi_mailbox #include "ipm_xlnx_ipi.h" #include <errno.h> #include <zephyr/device.h> #include <zephyr/drivers/ipm.h> #include <zephyr/irq.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ipm_xlnx_ipi, CONFIG_IPM_LOG_LEVEL); #define XLNX_IPI_MAX_BUF_SIZE_BYTES 32 struct xlnx_ipi_data { size_t len; void *user_data; uint8_t data[]; }; struct xlnx_ipi_reg_info { uint32_t ipi_ch_bit; }; static const struct xlnx_ipi_reg_info xlnx_ipi_reg_info_zynqmp[] = { {.ipi_ch_bit = IPI_CH0_BIT}, /* IPI CH ID 0 - Default APU */ {.ipi_ch_bit = IPI_CH1_BIT}, /* IPI CH ID 1 - Default RPU0 */ {.ipi_ch_bit = IPI_CH2_BIT}, /* IPI CH ID 2 - Default RPU1 */ {.ipi_ch_bit = IPI_CH3_BIT}, /* IPI CH ID 3 - Default PMU0 */ {.ipi_ch_bit = IPI_CH4_BIT}, /* IPI CH ID 4 - Default PMU1 */ {.ipi_ch_bit = IPI_CH5_BIT}, /* IPI CH ID 5 - Default PMU2 */ {.ipi_ch_bit = IPI_CH6_BIT}, /* IPI CH ID 6 - Default PMU3 */ {.ipi_ch_bit = IPI_CH7_BIT}, /* IPI CH ID 7 - Default PL0 */ {.ipi_ch_bit = IPI_CH8_BIT}, /* IPI CH ID 8 - Default PL1 */ {.ipi_ch_bit = IPI_CH9_BIT}, /* IPI CH ID 9 - Default PL2 */ {.ipi_ch_bit = IPI_CH10_BIT}, /* IPI CH ID 10 - Default PL3 */ }; struct xlnx_ipi_config { uint32_t ipi_ch_bit; uint32_t host_ipi_reg; int (*xlnx_ipi_config_func)(const struct device *dev); const struct device **cdev_list; int num_cdev; }; struct xlnx_ipi_child_data { bool enabled; ipm_callback_t ipm_callback; void *user_data; }; struct xlnx_ipi_child_config { const char *node_id; uint32_t local_request_region; uint32_t local_response_region; uint32_t remote_request_region; uint32_t remote_response_region; uint32_t host_ipi_reg; uint32_t remote_ipi_id; uint32_t remote_ipi_ch_bit; }; static void xlnx_mailbox_rx_isr(const struct device *dev) { const struct xlnx_ipi_config *config; const struct device **cdev_list; const struct xlnx_ipi_child_config *cdev_conf; const struct xlnx_ipi_child_data *cdev_data; uint8_t ipi_buf[XLNX_IPI_MAX_BUF_SIZE_BYTES + sizeof(struct xlnx_ipi_data)]; int num_cdev; struct xlnx_ipi_data *msg; const struct device *cdev; uint32_t remote_ipi_ch_bit; int i, j; config = dev->config; cdev_list = config->cdev_list; num_cdev = config->num_cdev; msg = (struct xlnx_ipi_data *)ipi_buf; for (i = 0; i < num_cdev; i++) { cdev = cdev_list[i]; cdev_conf = cdev->config; cdev_data = cdev->data; if (!cdev_data->enabled) { continue; } remote_ipi_ch_bit = cdev_conf->remote_ipi_ch_bit; if (!sys_test_bit(config->host_ipi_reg + IPI_ISR, remote_ipi_ch_bit)) { continue; } msg->len = XLNX_IPI_MAX_BUF_SIZE_BYTES; msg->user_data = cdev_data->user_data; for (j = 0; j < XLNX_IPI_MAX_BUF_SIZE_BYTES; j++) { msg->data[j] = sys_read8(cdev_conf->remote_request_region + j); } if (cdev_data->ipm_callback) { cdev_data->ipm_callback(cdev, cdev_data->user_data, cdev_conf->remote_ipi_id, msg); } sys_set_bit(config->host_ipi_reg + IPI_ISR, remote_ipi_ch_bit); } } static int xlnx_ipi_send(const struct device *ipmdev, int wait, uint32_t id, const void *data, int size) { const uint8_t *msg = (uint8_t *)data; const struct xlnx_ipi_child_config *config = ipmdev->config; unsigned int key; int i, obs_bit; ARG_UNUSED(id); if (size > XLNX_IPI_MAX_BUF_SIZE_BYTES) { return -EMSGSIZE; } key = irq_lock(); if (msg) { /* Write buffer to send data */ for (i = 0; i < size; i++) { sys_write8(msg[i], config->local_request_region + i); } } irq_unlock(key); sys_set_bit(config->host_ipi_reg + IPI_TRIG, config->remote_ipi_ch_bit); obs_bit = 0; do { obs_bit = sys_test_bit(config->host_ipi_reg + IPI_OBS, config->remote_ipi_ch_bit); } while (obs_bit && wait); return 0; } static void xlnx_ipi_register_callback(const struct device *port, ipm_callback_t cb, void *user_data) { struct xlnx_ipi_child_data *data = port->data; data->ipm_callback = cb; data->user_data = user_data; } static int xlnx_ipi_max_data_size_get(const struct device *ipmdev) { return XLNX_IPI_MAX_BUF_SIZE_BYTES; } static uint32_t xlnx_ipi_max_id_val_get(const struct device *ipmdev) { return UINT32_MAX; } static int xlnx_ipi_set_enabled(const struct device *ipmdev, int enable) { const struct xlnx_ipi_child_config *config = ipmdev->config; struct xlnx_ipi_child_data *data = ipmdev->data; if (enable) { sys_set_bit(config->host_ipi_reg + IPI_IER, config->remote_ipi_ch_bit); } else { sys_set_bit(config->host_ipi_reg + IPI_IDR, config->remote_ipi_ch_bit); } /* If IPI channel bit in IPI Mask Register is not set, then interrupt is enabled */ if (!sys_test_bit(config->host_ipi_reg + IPI_IMR, config->remote_ipi_ch_bit)) { data->enabled = enable; return 0; } return -EINVAL; } static int xlnx_ipi_init(const struct device *dev) { const struct xlnx_ipi_config *conf = dev->config; /* disable all the interrupts */ sys_write32(0xFFFFFFFF, conf->host_ipi_reg + IPI_IDR); /* clear status of any previous interrupts */ sys_write32(0xFFFFFFFF, conf->host_ipi_reg + IPI_ISR); conf->xlnx_ipi_config_func(dev); return 0; } static struct ipm_driver_api xlnx_ipi_api = { .send = xlnx_ipi_send, .register_callback = xlnx_ipi_register_callback, .max_data_size_get = xlnx_ipi_max_data_size_get, .max_id_val_get = xlnx_ipi_max_id_val_get, .set_enabled = xlnx_ipi_set_enabled, }; #define GET_CHILD_DEV(node_id) DEVICE_DT_GET(node_id), #define XLNX_IPI_CHILD(ch_node) \ struct xlnx_ipi_child_data xlnx_ipi_child_data##ch_node = { \ .enabled = false, \ .ipm_callback = NULL, \ }; \ struct xlnx_ipi_child_config xlnx_ipi_child_config##ch_node = { \ .local_request_region = DT_REG_ADDR_BY_NAME(ch_node, local_request_region), \ .local_response_region = DT_REG_ADDR_BY_NAME(ch_node, local_response_region), \ .remote_request_region = DT_REG_ADDR_BY_NAME(ch_node, remote_request_region), \ .remote_response_region = DT_REG_ADDR_BY_NAME(ch_node, remote_response_region), \ .remote_ipi_id = DT_PROP(ch_node, remote_ipi_id), \ .remote_ipi_ch_bit = \ xlnx_ipi_reg_info_zynqmp[DT_PROP(ch_node, remote_ipi_id)].ipi_ch_bit, \ .host_ipi_reg = DT_REG_ADDR_BY_NAME(DT_PARENT(ch_node), host_ipi_reg), \ }; \ DEVICE_DT_DEFINE(ch_node, NULL, NULL, &xlnx_ipi_child_data##ch_node, \ &xlnx_ipi_child_config##ch_node, POST_KERNEL, \ CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &xlnx_ipi_api); #define XLNX_IPI(inst) \ DT_INST_FOREACH_CHILD_STATUS_OKAY(inst, XLNX_IPI_CHILD); \ static const struct device *cdev##inst[] = { \ DT_INST_FOREACH_CHILD_STATUS_OKAY(inst, GET_CHILD_DEV)}; \ static int xlnx_ipi_config_func##inst(const struct device *dev); \ struct xlnx_ipi_config xlnx_ipi_config##inst = { \ .host_ipi_reg = DT_INST_REG_ADDR_BY_NAME(inst, host_ipi_reg), \ .xlnx_ipi_config_func = xlnx_ipi_config_func##inst, \ .cdev_list = cdev##inst, \ .num_cdev = ARRAY_SIZE(cdev##inst), \ }; \ DEVICE_DT_INST_DEFINE(inst, &xlnx_ipi_init, NULL, NULL, /* data */ \ &xlnx_ipi_config##inst, /* conf */ \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, NULL); \ static int xlnx_ipi_config_func##inst(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), xlnx_mailbox_rx_isr, \ DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQN(inst)); \ LOG_DBG("irq %d is enabled: %s\n", DT_INST_IRQN(inst), \ irq_is_enabled(DT_INST_IRQN(inst)) ? "true" : "false"); \ return 0; \ } DT_INST_FOREACH_STATUS_OKAY(XLNX_IPI) ```
/content/code_sandbox/drivers/ipm/ipm_xlnx_ipi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,426
```c */ #include <zephyr/kernel.h> #include <zephyr/drivers/ipm.h> #include <adsp_memory.h> #include <adsp_shim.h> #include <intel_adsp_ipc.h> #include <mem_window.h> #include <zephyr/cache.h> /* Matches SOF_IPC_MSG_MAX_SIZE, though in practice nothing anywhere * near that big is ever sent. Should maybe consider making this a * kconfig to avoid waste. */ #define MAX_MSG 384 /* Note: these addresses aren't flexible! We require that they match * current SOF ipc3/4 layout, which means that: * * + Buffer addresses are 4k-aligned (this is a hardware requirement) * + Inbuf must be 4k after outbuf, with no use of the intervening memory * + Outbuf must be 4k after the start of win0 (this is where the host driver looks) * * One side effect is that the word "before" MSG_INBUF is owned by our * code too, and can be used for a nice trick below. */ /* host windows */ #define DMWBA(win_base) (win_base + 0x0) #define DMWLO(win_base) (win_base + 0x4) struct ipm_cavs_host_data { ipm_callback_t callback; void *user_data; bool enabled; }; /* Note: this call is unsynchronized. The IPM docs are silent as to * whether this is required, and the SOF code that will be using this * is externally synchronized already. */ static int send(const struct device *dev, int wait, uint32_t id, const void *data, int size) { const struct device *mw0 = DEVICE_DT_GET(DT_NODELABEL(mem_window0)); if (!device_is_ready(mw0)) { return -ENODEV; } const struct mem_win_config *mw0_config = mw0->config; uint32_t *buf = (uint32_t *)sys_cache_uncached_ptr_get( (void *)((uint32_t)mw0_config->mem_base + CONFIG_IPM_CAVS_HOST_OUTBOX_OFFSET)); if (!intel_adsp_ipc_is_complete(INTEL_ADSP_IPC_HOST_DEV)) { return -EBUSY; } if ((size < 0) || (size > MAX_MSG)) { return -EMSGSIZE; } if ((id & 0xc0000000) != 0) { /* cAVS IDR register has only 30 usable bits */ return -EINVAL; } uint32_t ext_data = 0; /* Protocol variant (used by SOF "ipc4"): store the first word * of the message in the IPC scratch registers */ if (IS_ENABLED(CONFIG_IPM_CAVS_HOST_REGWORD) && size >= 4) { ext_data = ((uint32_t *)data)[0]; data = &((const uint32_t *)data)[1]; size -= 4; } memcpy(buf, data, size); int ret = intel_adsp_ipc_send_message(INTEL_ADSP_IPC_HOST_DEV, id, ext_data); /* The IPM docs call for "busy waiting" here, but in fact * there's a blocking synchronous call available that might be * better. But then we'd have to check whether we're in * interrupt context, and it's not clear to me that SOF would * benefit anyway as all its usage is async. This is OK for * now. */ if (ret == -EBUSY && wait) { while (!intel_adsp_ipc_is_complete(INTEL_ADSP_IPC_HOST_DEV)) { k_busy_wait(1); } } return ret; } static bool ipc_handler(const struct device *dev, void *arg, uint32_t data, uint32_t ext_data) { ARG_UNUSED(arg); struct device *ipmdev = arg; struct ipm_cavs_host_data *devdata = ipmdev->data; const struct device *mw1 = DEVICE_DT_GET(DT_NODELABEL(mem_window1)); if (!device_is_ready(mw1)) { return -ENODEV; } const struct mem_win_config *mw1_config = mw1->config; uint32_t *msg = sys_cache_uncached_ptr_get((void *)mw1_config->mem_base); /* We play tricks to leave one word available before the * beginning of the SRAM window, this way the host can see the * same offsets it does with the original ipc4 protocol * implementation, but here in the firmware we see a single * contiguous buffer. See above. */ if (IS_ENABLED(CONFIG_IPM_CAVS_HOST_REGWORD)) { msg = &msg[-1]; msg[0] = ext_data; } if (devdata->enabled && (devdata->callback != NULL)) { devdata->callback(ipmdev, devdata->user_data, data & 0x3fffffff, msg); } /* Return false for async handling */ return !IS_ENABLED(IPM_CALLBACK_ASYNC); } static int max_data_size_get(const struct device *ipmdev) { return MAX_MSG; } static uint32_t max_id_val_get(const struct device *ipmdev) { /* 30 user-writable bits in cAVS IDR register */ return 0x3fffffff; } static void register_callback(const struct device *port, ipm_callback_t cb, void *user_data) { struct ipm_cavs_host_data *data = port->data; data->callback = cb; data->user_data = user_data; } static int set_enabled(const struct device *ipmdev, int enable) { /* This protocol doesn't support any kind of queuing, and in * fact will stall if a message goes unacknowledged. Support * it as best we can by gating the callbacks only. That will * allow the DONE notifications to proceed as normal, at the * cost of dropping any messages received while not "enabled" * of course. */ struct ipm_cavs_host_data *data = ipmdev->data; data->enabled = enable; return 0; } static void complete(const struct device *ipmdev) { intel_adsp_ipc_complete(INTEL_ADSP_IPC_HOST_DEV); } static int init(const struct device *dev) { struct ipm_cavs_host_data *data = dev->data; const struct device *mw1 = DEVICE_DT_GET(DT_NODELABEL(mem_window1)); if (!device_is_ready(mw1)) { return -ENODEV; } const struct mem_win_config *mw1_config = mw1->config; /* Initialize hardware SRAM window. SOF will give the host 8k * here, let's limit it to just the memory we're using for * futureproofing. */ sys_write32(ROUND_UP(MAX_MSG, 8) | 0x7, DMWLO(mw1_config->base_addr)); sys_write32((mw1_config->mem_base | ADSP_DMWBA_ENABLE), DMWBA(mw1_config->base_addr)); intel_adsp_ipc_set_message_handler(INTEL_ADSP_IPC_HOST_DEV, ipc_handler, (void *)dev); data->enabled = true; return 0; } static const struct ipm_driver_api api = { .send = send, .max_data_size_get = max_data_size_get, .max_id_val_get = max_id_val_get, .register_callback = register_callback, .set_enabled = set_enabled, .complete = complete, }; static struct ipm_cavs_host_data data; DEVICE_DEFINE(ipm_cavs_host, "ipm_cavs_host", init, NULL, &data, NULL, PRE_KERNEL_2, 1, &api); ```
/content/code_sandbox/drivers/ipm/ipm_cavs_host.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,683
```unknown config IPM_SEDI bool "Intel SEDI IPM Driver" default y if DT_HAS_INTEL_SEDI_IPM_ENABLED select IPM_CALLBACK_ASYNC help This option enables the Intel SEDI IPM(IPC) driver. This driver is simply a shim driver built upon the SEDI bare metal IPC driver in the hal-intel module ```
/content/code_sandbox/drivers/ipm/Kconfig.sedi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
79
```objective-c /* * */ #ifndef __DRIVERS_IPM_SEDI_H #define __DRIVERS_IPM_SEDI_H #ifdef __cplusplus extern "C" { #endif #include "sedi_driver_common.h" #include "sedi_driver_ipc.h" #include <zephyr/sys/atomic.h> /* * bit 31 indicates whether message is valid, and could generate interrupt * while set/clear */ #define IPC_BUSY_BIT 31 #define IPM_WRITE_IN_PROC_BIT 0 #define IPM_WRITE_BUSY_BIT 1 #define IPM_READ_BUSY_BIT 2 #define IPM_PEER_READY_BIT 3 #define IPM_TIMEOUT_MS 1000 struct ipm_sedi_config_t { sedi_ipc_t ipc_device; int32_t irq_num; void (*irq_config)(void); }; struct ipm_sedi_context { ipm_callback_t rx_msg_notify_cb; void *rx_msg_notify_cb_data; uint8_t incoming_data_buf[IPC_DATA_LEN_MAX]; struct k_sem device_write_msg_sem; struct k_mutex device_write_lock; atomic_t status; uint32_t power_status; }; #ifdef __cplusplus } #endif #endif /* __DRIVERS_IPM_SEDI_H */ ```
/content/code_sandbox/drivers/ipm/ipm_sedi.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
251
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_IPM_IPM_MHU_H_ #define ZEPHYR_DRIVERS_IPM_IPM_MHU_H_ #include <zephyr/kernel.h> #include <zephyr/drivers/ipm.h> #include <zephyr/device.h> #ifdef __cplusplus extern "C" { #endif #define IPM_MHU_MAX_DATA_SIZE 1 #define IPM_MHU_MAX_ID_VAL 0 #define SSE_200_CPU_ID_UNIT_OFFSET ((0x1F000UL)) #define SSE_200_DEVICE_BASE_REG_MSK (0xF0000000UL) /* SSE 200 MHU register map structure */ struct ipm_mhu_reg_map_t { /* (R/ ) CPU 0 Interrupt Status Register */ volatile uint32_t cpu0intr_stat; volatile uint32_t cpu0intr_set; /* ( /W) CPU 0 Interrupt Set Register */ volatile uint32_t cpu0intr_clr; /* ( /W) CPU 0 Interrupt Clear Register */ volatile uint32_t reserved0; /* (R/ ) CPU 1 Interrupt Status Register */ volatile uint32_t cpu1intr_stat; volatile uint32_t cpu1intr_set; /* ( /W) CPU 1 Interrupt Set Register */ volatile uint32_t cpu1intr_clr; /* ( /W) CPU 1 Interrupt Clear Register */ volatile uint32_t reserved1[1004]; volatile uint32_t pidr4; /* ( /W) Peripheral ID 4 */ volatile uint32_t reserved2[3]; volatile uint32_t pidr0; /* ( /W) Peripheral ID 0 */ volatile uint32_t pidr1; /* ( /W) Peripheral ID 1 */ volatile uint32_t pidr2; /* ( /W) Peripheral ID 2 */ volatile uint32_t pidr3; /* ( /W) Peripheral ID 3 */ volatile uint32_t cidr0; /* ( /W) Component ID 0 */ volatile uint32_t cidr1; /* ( /W) Component ID 1 */ volatile uint32_t cidr2; /* ( /W) Component ID 2 */ volatile uint32_t cidr3; /* ( /W) Component ID 3 */ }; /* MHU enumeration types */ enum ipm_mhu_error_t { IPM_MHU_ERR_NONE = 0, /* No error */ IPM_MHU_ERR_INVALID_ARG, /* Invalid argument */ }; /* MHU enumeration types */ enum ipm_mhu_cpu_id_t { IPM_MHU_CPU0 = 0, IPM_MHU_CPU1, IPM_MHU_CPU_MAX, }; struct ipm_mhu_device_config { uint8_t *base; void (*irq_config_func)(const struct device *d); }; /* Device data structure */ struct ipm_mhu_data { ipm_callback_t callback; void *user_data; }; #ifdef __cplusplus } #endif #endif /* ZEPHYR_DRIVERS_IPM_IPM_MHU_H_ */ ```
/content/code_sandbox/drivers/ipm/ipm_mhu.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
642
```c /* * */ #define DT_DRV_COMPAT st_stm32_hsem_mailbox #include <zephyr/device.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/ipm.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include "stm32_hsem.h" #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(ipm_stm32_hsem, CONFIG_IPM_LOG_LEVEL); #define HSEM_CPU1 1 #define HSEM_CPU2 2 #if CONFIG_IPM_STM32_HSEM_CPU == HSEM_CPU1 #define ll_hsem_enableit_cier LL_HSEM_EnableIT_C1IER #define ll_hsem_disableit_cier LL_HSEM_DisableIT_C1IER #define ll_hsem_clearflag_cicr LL_HSEM_ClearFlag_C1ICR #define ll_hsem_isactiveflag_cmisr LL_HSEM_IsActiveFlag_C1MISR #else /* HSEM_CPU2 */ #define ll_hsem_enableit_cier LL_HSEM_EnableIT_C2IER #define ll_hsem_disableit_cier LL_HSEM_DisableIT_C2IER #define ll_hsem_clearflag_cicr LL_HSEM_ClearFlag_C2ICR #define ll_hsem_isactiveflag_cmisr LL_HSEM_IsActiveFlag_C2MISR #endif /* CONFIG_IPM_STM32_HSEM_CPU */ struct stm32_hsem_mailbox_config { void (*irq_config_func)(const struct device *dev); struct stm32_pclken pclken; }; struct stm32_hsem_mailbox_data { uint32_t tx_semid; uint32_t rx_semid; ipm_callback_t callback; void *user_data; }; static struct stm32_hsem_mailbox_data stm32_hsem_mailbox_0_data; void stm32_hsem_mailbox_ipm_rx_isr(const struct device *dev) { struct stm32_hsem_mailbox_data *data = dev->data; uint32_t mask_semid = (1U << data->rx_semid); /* Check semaphore rx_semid interrupt status */ if (!ll_hsem_isactiveflag_cmisr(HSEM, mask_semid)) { return; } /* Notify user with NULL data pointer */ if (data->callback) { data->callback(dev, data->user_data, 0, NULL); } /* Clear semaphore rx_semid interrupt status and masked status */ ll_hsem_clearflag_cicr(HSEM, mask_semid); } static void stm32_hsem_mailbox_irq_config_func(const struct device *dev) { ARG_UNUSED(dev); IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), stm32_hsem_mailbox_ipm_rx_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); } int stm32_hsem_mailbox_ipm_send(const struct device *dev, int wait, uint32_t id, const void *buff, int size) { struct stm32_hsem_mailbox_data *data = dev->data; ARG_UNUSED(wait); ARG_UNUSED(buff); if (size) { LOG_WRN("stm32 HSEM not support data transfer"); return -EMSGSIZE; } if (id) { LOG_WRN("stm32 HSEM only support a single instance of mailbox"); return -EINVAL; } /* Lock the semaphore tx_semid */ z_stm32_hsem_lock(data->tx_semid, HSEM_LOCK_DEFAULT_RETRY); /** * Release the semaphore tx_semid. * This will trigger a HSEMx interrupt on another CPU. */ z_stm32_hsem_unlock(data->tx_semid); return 0; } void stm32_hsem_mailbox_ipm_register_callback(const struct device *dev, ipm_callback_t cb, void *user_data) { struct stm32_hsem_mailbox_data *data = dev->data; data->callback = cb; data->user_data = user_data; } int stm32_hsem_mailbox_ipm_max_data_size_get(const struct device *dev) { ARG_UNUSED(dev); /* stm32 HSEM not support data transfer */ return 0; } uint32_t stm32_hsem_mailbox_ipm_max_id_val_get(const struct device *dev) { ARG_UNUSED(dev); /* stm32 HSEM only support a single instance of mailbox */ return 0; } int stm32_hsem_mailbox_ipm_set_enabled(const struct device *dev, int enable) { struct stm32_hsem_mailbox_data *data = dev->data; uint32_t mask_semid = (1U << data->rx_semid); if (enable) { /* Clear semaphore rx_semid interrupt status and masked status */ ll_hsem_clearflag_cicr(HSEM, mask_semid); /* Enable semaphore rx_semid on HESMx interrupt */ ll_hsem_enableit_cier(HSEM, mask_semid); } else { /* Disable semaphore rx_semid on HSEMx interrupt */ ll_hsem_disableit_cier(HSEM, mask_semid); } return 0; } static int stm32_hsem_mailbox_init(const struct device *dev) { struct stm32_hsem_mailbox_data *data = dev->data; const struct stm32_hsem_mailbox_config *cfg = dev->config; const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); /* Config transfer semaphore */ switch (CONFIG_IPM_STM32_HSEM_CPU) { case HSEM_CPU1: if (!device_is_ready(clk)) { LOG_ERR("clock control device not ready"); return -ENODEV; } /* Enable clock */ if (clock_control_on(clk, (clock_control_subsys_t)&cfg->pclken) != 0) { LOG_WRN("Failed to enable clock"); return -EIO; } data->tx_semid = CFG_HW_IPM_CPU2_SEMID; data->rx_semid = CFG_HW_IPM_CPU1_SEMID; break; case HSEM_CPU2: data->tx_semid = CFG_HW_IPM_CPU1_SEMID; data->rx_semid = CFG_HW_IPM_CPU2_SEMID; break; } cfg->irq_config_func(dev); return 0; } static const struct ipm_driver_api stm32_hsem_mailbox_ipm_dirver_api = { .send = stm32_hsem_mailbox_ipm_send, .register_callback = stm32_hsem_mailbox_ipm_register_callback, .max_data_size_get = stm32_hsem_mailbox_ipm_max_data_size_get, .max_id_val_get = stm32_hsem_mailbox_ipm_max_id_val_get, .set_enabled = stm32_hsem_mailbox_ipm_set_enabled, }; static const struct stm32_hsem_mailbox_config stm32_hsem_mailbox_0_config = { .irq_config_func = stm32_hsem_mailbox_irq_config_func, .pclken = { .bus = DT_INST_CLOCKS_CELL(0, bus), .enr = DT_INST_CLOCKS_CELL(0, bits) }, }; /* * STM32 HSEM has its own LL_HSEM(low-level HSEM) API provided by the hal_stm32 module. * The ipm_stm32_hsem driver only picks up two semaphore IDs from stm32_hsem.h to simulate * a virtual mailbox device. So there will have only one instance. */ #define IPM_STM32_HSEM_INIT(inst) \ BUILD_ASSERT((inst) == 0, \ "multiple instances not supported"); \ DEVICE_DT_INST_DEFINE(0, \ &stm32_hsem_mailbox_init, \ NULL, \ &stm32_hsem_mailbox_0_data, \ &stm32_hsem_mailbox_0_config, \ POST_KERNEL, \ CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, \ &stm32_hsem_mailbox_ipm_dirver_api); \ DT_INST_FOREACH_STATUS_OKAY(IPM_STM32_HSEM_INIT) ```
/content/code_sandbox/drivers/ipm/ipm_stm32_hsem.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,778
```unknown config IPM_CALLBACK_ASYNC bool "Deliver callbacks asynchronously" default y if IPM_CAVS_HOST help When selected, the driver supports "asynchronous" command delivery. Commands will stay active after the ISR returns, until the application expressly "completes" the command later. config IPM_CAVS_HOST bool "cAVS DSP/host communication" select INTEL_ADSP_IPC help Driver for host/DSP communication on intel_adsp devices if IPM_CAVS_HOST config IPM_CAVS_HOST_INBOX_OFFSET hex "Byte offset of cAVS inbox window" depends on INTEL_ADSP_IPC default 0x6000 help Location of the host-writable inbox window within the HP_SRAM_RESERVE region. This location must be synchronized with host driver and SOF source code (must match SRAM_INBOX_BASE). Be careful. config IPM_CAVS_HOST_OUTBOX_OFFSET hex "Byte offset of cAVS outbox memory" depends on INTEL_ADSP_IPC default 0x1000 help Location of the "outbox" region for SOF IPC3/4 message within the pre-existing window 0 (this is not the same as the HP_SRAM_RESERVE region used for INBOX_OFFSET). This location must be synchronized with host driver and SOF source code (where it must equal SRAM_SW_REG_SIZE). Be careful. config IPM_CAVS_HOST_REGWORD bool "Store first 4 bytes in IPC register" depends on INTEL_ADSP_IPC help Protocol variant. When true, the first four bytes of a message are passed in the cAVS IDR/TDR register pair instead of in the SRAM window. Only available on cAVS 1.8+. endif # IPM_CAVS_HOST ```
/content/code_sandbox/drivers/ipm/Kconfig.intel_adsp
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
430
```c /* * */ #include <zephyr/internal/syscall_handler.h> #include <zephyr/drivers/ipm.h> static inline int z_vrfy_ipm_send(const struct device *dev, int wait, uint32_t id, const void *data, int size) { K_OOPS(K_SYSCALL_DRIVER_IPM(dev, send)); K_OOPS(K_SYSCALL_MEMORY_READ(data, size)); return z_impl_ipm_send((const struct device *)dev, wait, id, (const void *)data, size); } #include <zephyr/syscalls/ipm_send_mrsh.c> static inline int z_vrfy_ipm_max_data_size_get(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_IPM(dev, max_data_size_get)); return z_impl_ipm_max_data_size_get((const struct device *)dev); } #include <zephyr/syscalls/ipm_max_data_size_get_mrsh.c> static inline uint32_t z_vrfy_ipm_max_id_val_get(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_IPM(dev, max_id_val_get)); return z_impl_ipm_max_id_val_get((const struct device *)dev); } #include <zephyr/syscalls/ipm_max_id_val_get_mrsh.c> static inline int z_vrfy_ipm_set_enabled(const struct device *dev, int enable) { K_OOPS(K_SYSCALL_DRIVER_IPM(dev, set_enabled)); return z_impl_ipm_set_enabled((const struct device *)dev, enable); } #include <zephyr/syscalls/ipm_set_enabled_mrsh.c> ```
/content/code_sandbox/drivers/ipm/ipm_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
335
```unknown # nRF IPM driver configuration if IPM_NRFX nrfx_ipc_num = 0 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 1 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 2 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 3 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 4 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 5 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 6 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 7 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 8 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 9 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 10 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 11 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 12 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 13 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 14 rsource "Kconfig.nrfx_ipc_channel" nrfx_ipc_num = 15 rsource "Kconfig.nrfx_ipc_channel" endif # IPM_NRFX ```
/content/code_sandbox/drivers/ipm/Kconfig.nrfx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
342
```objective-c /* * */ #include <nrfx_ipc.h> #define NRFX_IPC_ID_MAX_VALUE IPC_CONF_NUM /* * Group IPC signals, events and channels into message channels. * Message channels are one-way connections between cores. * * For example Message Channel 0 is configured as TX on core 0 * and as RX on core 1: * * [C0] [C1] * SIGNAL0 -> CHANNEL0 -> EVENT0 * * Message Channel 1 is configured as RX on core 0 and as TX * on core 1: * [C0] [C1] * EVENT1 <- CHANNEL1 <- SIGNAL1 */ #define IPC_EVENT_BIT(idx) \ ((IS_ENABLED(CONFIG_IPM_MSG_CH_##idx##_RX)) << idx) #define IPC_EVENT_BITS \ ( \ IPC_EVENT_BIT(0) | \ IPC_EVENT_BIT(1) | \ IPC_EVENT_BIT(2) | \ IPC_EVENT_BIT(3) | \ IPC_EVENT_BIT(4) | \ IPC_EVENT_BIT(5) | \ IPC_EVENT_BIT(6) | \ IPC_EVENT_BIT(7) | \ IPC_EVENT_BIT(8) | \ IPC_EVENT_BIT(9) | \ IPC_EVENT_BIT(10) | \ IPC_EVENT_BIT(11) | \ IPC_EVENT_BIT(12) | \ IPC_EVENT_BIT(13) | \ IPC_EVENT_BIT(14) | \ IPC_EVENT_BIT(15) \ ) static const nrfx_ipc_config_t ipc_cfg = { .send_task_config = { [0] = BIT(0), [1] = BIT(1), [2] = BIT(2), [3] = BIT(3), [4] = BIT(4), [5] = BIT(5), [6] = BIT(6), [7] = BIT(7), [8] = BIT(8), [9] = BIT(9), [10] = BIT(10), [11] = BIT(11), [12] = BIT(12), [13] = BIT(13), [14] = BIT(14), [15] = BIT(15), }, .receive_event_config = { [0] = BIT(0), [1] = BIT(1), [2] = BIT(2), [3] = BIT(3), [4] = BIT(4), [5] = BIT(5), [6] = BIT(6), [7] = BIT(7), [8] = BIT(8), [9] = BIT(9), [10] = BIT(10), [11] = BIT(11), [12] = BIT(12), [13] = BIT(13), [14] = BIT(14), [15] = BIT(15), }, .receive_events_enabled = IPC_EVENT_BITS, }; ```
/content/code_sandbox/drivers/ipm/ipm_nrfx_ipc.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
705
```unknown config IPM_MCUX bool "MCUX IPM driver" default y depends on DT_HAS_NXP_LPC_MAILBOX_ENABLED select RESET help Driver for MCUX mailbox config IPM_IMX bool "IMX IPM driver" default y depends on DT_HAS_NXP_IMX_MU_ENABLED help Driver for NXP i.MX messaging unit if IPM_IMX choice prompt "IMX IPM max data size" default IPM_IMX_MAX_DATA_SIZE_16 help Select maximum message size for NXP i.MX messaging unit. config IPM_IMX_MAX_DATA_SIZE_4 bool "4 bytes" help There will be four message types with ids 0, 1, 2 or 3 and a maximum size of 4 bytes each. config IPM_IMX_MAX_DATA_SIZE_8 bool "8 bytes" help There will be two message types with ids 0 or 1 and a maximum size of 8 bytes each. config IPM_IMX_MAX_DATA_SIZE_16 bool "16 bytes" help There will be a single message type with id 0 and a maximum size of 16 bytes. endchoice config IPM_IMX_MAX_DATA_SIZE int range 4 16 default 4 if IPM_IMX_MAX_DATA_SIZE_4 default 8 if IPM_IMX_MAX_DATA_SIZE_8 default 16 if IPM_IMX_MAX_DATA_SIZE_16 config IPM_IMX_MAX_ID_VAL int range 0 3 default 3 if IPM_IMX_MAX_DATA_SIZE_4 default 1 if IPM_IMX_MAX_DATA_SIZE_8 default 0 if IPM_IMX_MAX_DATA_SIZE_16 config IPM_IMX_FW_READY_REPLY bool "Send FW_READY reply message" help Send FW_READY reply to check for FW boot completion endif # IPM_IMX ```
/content/code_sandbox/drivers/ipm/Kconfig.imx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
427
```unknown menuconfig IPM bool "Inter-Processor Mailbox (IPM) drivers" help Include interrupt-based inter-processor mailboxes drivers in system configuration if IPM config IPM_MHU bool "IPM MHU driver" default y depends on DT_HAS_ARM_MHU_ENABLED help Driver for SSE 200 MHU (Message Handling Unit) config IPM_NRFX bool "IPM NRF driver" default y depends on DT_HAS_NORDIC_NRF_IPC_ENABLED select NRFX_IPC help Driver for Nordic nRF messaging unit, based on nRF IPC peripheral HW. config IPM_NRF_SINGLE_INSTANCE bool "Single instance of IPM device" help Enable this option if the IPM device should have a single instance, instead of one per IPC message channel. config ESP32_SOFT_IPM bool "ESP32 Software IPM driver" default y depends on DT_HAS_ESPRESSIF_ESP32_IPM_ENABLED help Interprocessor driver for ESP32 when using AMP. config IPM_IVSHMEM bool "IPM driver based on IVSHMEM-Doorbell" default y depends on DT_HAS_LINARO_IVSHMEM_IPM_ENABLED depends on IVSHMEM depends on IVSHMEM_DOORBELL help Interprocessor driver using IVSHMEM Doorbell mechanism. config XLNX_IPI bool "AMD-Xilinx IPM driver" default y depends on DT_HAS_XLNX_ZYNQMP_IPI_MAILBOX_ENABLED help Inter Processor Interrupt driver for AMD-Xilinx platforms such as ZynqMP Ultrascale+. source "drivers/ipm/Kconfig.nrfx" source "drivers/ipm/Kconfig.imx" source "drivers/ipm/Kconfig.stm32" source "drivers/ipm/Kconfig.intel_adsp" source "drivers/ipm/Kconfig.ivshmem" source "drivers/ipm/Kconfig.sedi" module = IPM module-str = ipm source "subsys/logging/Kconfig.template.log_config" endif #IPM ```
/content/code_sandbox/drivers/ipm/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
458
```c /* * */ #define DT_DRV_COMPAT nxp_lpc_mailbox #include <errno.h> #include <zephyr/device.h> #include <zephyr/drivers/ipm.h> #include <fsl_mailbox.h> #include <fsl_clock.h> #include <soc.h> #include <zephyr/irq.h> #include <zephyr/sys/barrier.h> #include <zephyr/drivers/reset.h> #include <zephyr/sys/util_macro.h> #define MCUX_IPM_DATA_REGS 1 #define MCUX_IPM_MAX_ID_VAL 0 #if (defined(LPC55S69_cm33_core0_SERIES) || defined(LPC55S69_cm33_core1_SERIES)) #ifdef LPC55S69_cm33_core0_SERIES #define MAILBOX_ID_THIS_CPU kMAILBOX_CM33_Core0 #define MAILBOX_ID_OTHER_CPU kMAILBOX_CM33_Core1 #else #define MAILBOX_ID_THIS_CPU kMAILBOX_CM33_Core1 #define MAILBOX_ID_OTHER_CPU kMAILBOX_CM33_Core0 #endif #else #if defined(__CM4_CMSIS_VERSION) #define MAILBOX_ID_THIS_CPU kMAILBOX_CM4 #define MAILBOX_ID_OTHER_CPU kMAILBOX_CM0Plus #else #define MAILBOX_ID_THIS_CPU kMAILBOX_CM0Plus #define MAILBOX_ID_OTHER_CPU kMAILBOX_CM4 #endif #endif #define MAILBOX_USES_RESET COND_CODE_1(DT_ANY_INST_HAS_PROP_STATUS_OKAY(resets), (true), (false)) struct mcux_mailbox_config { MAILBOX_Type *base; void (*irq_config_func)(const struct device *dev); const struct reset_dt_spec reset; }; struct mcux_mailbox_data { ipm_callback_t callback; void *callback_ctx; }; static void mcux_mailbox_isr(const struct device *dev) { struct mcux_mailbox_data *data = dev->data; const struct mcux_mailbox_config *config = dev->config; mailbox_cpu_id_t cpu_id; cpu_id = MAILBOX_ID_THIS_CPU; volatile uint32_t value = MAILBOX_GetValue(config->base, cpu_id); __ASSERT(value, "spurious MAILBOX interrupt"); /* Clear or the interrupt gets called intermittently */ MAILBOX_ClearValueBits(config->base, cpu_id, value); if (data->callback) { /* Only one MAILBOX, id is unused and set to 0 */ data->callback(dev, data->callback_ctx, 0, &value); } /* Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F * Store immediate overlapping exception return operation * might vector to incorrect interrupt */ #if defined __CORTEX_M && (__CORTEX_M == 4U) barrier_dsync_fence_full(); #endif } static int mcux_mailbox_ipm_send(const struct device *d, int wait, uint32_t id, const void *data, int size) { const struct mcux_mailbox_config *config = d->config; MAILBOX_Type *base = config->base; /* Until we change API to uint32_t array */ uint32_t data32[MCUX_IPM_DATA_REGS] = {0}; unsigned int flags; int i; ARG_UNUSED(wait); if (id > MCUX_IPM_MAX_ID_VAL) { return -EINVAL; } if ((size < 0) || (size > MCUX_IPM_DATA_REGS * sizeof(uint32_t))) { return -EMSGSIZE; } flags = irq_lock(); /* Actual message is passing using 32 bits registers */ memcpy(data32, data, size); for (i = 0; i < ARRAY_SIZE(data32); ++i) { MAILBOX_SetValueBits(base, MAILBOX_ID_OTHER_CPU, data32[i]); } irq_unlock(flags); return 0; } static int mcux_mailbox_ipm_max_data_size_get(const struct device *d) { ARG_UNUSED(d); /* Only a single 32-bit register available */ return MCUX_IPM_DATA_REGS*sizeof(uint32_t); } static uint32_t mcux_mailbox_ipm_max_id_val_get(const struct device *d) { ARG_UNUSED(d); /* Only a single instance of MAILBOX available for this platform */ return MCUX_IPM_MAX_ID_VAL; } static void mcux_mailbox_ipm_register_callback(const struct device *d, ipm_callback_t cb, void *context) { struct mcux_mailbox_data *driver_data = d->data; driver_data->callback = cb; driver_data->callback_ctx = context; } static int mcux_mailbox_ipm_set_enabled(const struct device *d, int enable) { /* For now: nothing to be done */ return 0; } static inline int mcux_mailbox_reset(const struct device *dev) { const struct mcux_mailbox_config *config = dev->config; int ret = 0; /* on some platforms, explicit reset is not needed or possible for the mailbox */ if (!MAILBOX_USES_RESET) { return 0; } if (!device_is_ready(config->reset.dev)) { ret = -ENODEV; } else { ret = reset_line_toggle(config->reset.dev, config->reset.id); } return ret; } static int mcux_mailbox_init(const struct device *dev) { const struct mcux_mailbox_config *config = dev->config; int ret = 0; ret = mcux_mailbox_reset(dev); if (ret) { return ret; } MAILBOX_Init(config->base); config->irq_config_func(dev); return 0; } static const struct ipm_driver_api mcux_mailbox_driver_api = { .send = mcux_mailbox_ipm_send, .register_callback = mcux_mailbox_ipm_register_callback, .max_data_size_get = mcux_mailbox_ipm_max_data_size_get, .max_id_val_get = mcux_mailbox_ipm_max_id_val_get, .set_enabled = mcux_mailbox_ipm_set_enabled }; /* Config MAILBOX 0 */ static void mcux_mailbox_config_func_0(const struct device *dev); static const struct mcux_mailbox_config mcux_mailbox_0_config = { .base = (MAILBOX_Type *)DT_INST_REG_ADDR(0), .irq_config_func = mcux_mailbox_config_func_0, .reset = RESET_DT_SPEC_INST_GET_OR(0, {0}), }; static struct mcux_mailbox_data mcux_mailbox_0_data; DEVICE_DT_INST_DEFINE(0, &mcux_mailbox_init, NULL, &mcux_mailbox_0_data, &mcux_mailbox_0_config, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &mcux_mailbox_driver_api); static void mcux_mailbox_config_func_0(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), mcux_mailbox_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); } ```
/content/code_sandbox/drivers/ipm/ipm_mcux.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,508
```c /* * */ #define DT_DRV_COMPAT arm_mhu #include <errno.h> #include <zephyr/device.h> #include <soc.h> #include <zephyr/irq.h> #include "ipm_mhu.h" #define IPM_MHU_REGS(dev) \ ((volatile struct ipm_mhu_reg_map_t *) \ (((const struct ipm_mhu_device_config * const)(dev)->config)->base)) static enum ipm_mhu_cpu_id_t ipm_mhu_get_cpu_id(const struct device *d) { volatile uint32_t *p_mhu_dev_base; volatile uint32_t *p_cpu_id; p_mhu_dev_base = (volatile uint32_t *)IPM_MHU_REGS(d); p_cpu_id = (volatile uint32_t *)(((uint32_t)p_mhu_dev_base & SSE_200_DEVICE_BASE_REG_MSK) + SSE_200_CPU_ID_UNIT_OFFSET); return (enum ipm_mhu_cpu_id_t)*p_cpu_id; } static uint32_t ipm_mhu_get_status(const struct device *d, enum ipm_mhu_cpu_id_t cpu_id, uint32_t *status) { struct ipm_mhu_reg_map_t *p_mhu_dev; if (status == NULL) { return IPM_MHU_ERR_INVALID_ARG; } p_mhu_dev = (struct ipm_mhu_reg_map_t *)IPM_MHU_REGS(d); switch (cpu_id) { case IPM_MHU_CPU1: *status = p_mhu_dev->cpu1intr_stat; break; case IPM_MHU_CPU0: default: *status = p_mhu_dev->cpu0intr_stat; break; } return IPM_MHU_ERR_NONE; } static int ipm_mhu_send(const struct device *d, int wait, uint32_t cpu_id, const void *data, int size) { ARG_UNUSED(wait); ARG_UNUSED(data); const uint32_t set_val = 0x01; struct ipm_mhu_reg_map_t *p_mhu_dev; if (cpu_id >= IPM_MHU_CPU_MAX) { return -EINVAL; } if (size > IPM_MHU_MAX_DATA_SIZE) { return -EMSGSIZE; } p_mhu_dev = (struct ipm_mhu_reg_map_t *)IPM_MHU_REGS(d); switch (cpu_id) { case IPM_MHU_CPU1: p_mhu_dev->cpu1intr_set = set_val; break; case IPM_MHU_CPU0: default: p_mhu_dev->cpu0intr_set = set_val; break; } return 0; } static void ipm_mhu_clear_val(const struct device *d, enum ipm_mhu_cpu_id_t cpu_id, uint32_t clear_val) { struct ipm_mhu_reg_map_t *p_mhu_dev; p_mhu_dev = (struct ipm_mhu_reg_map_t *)IPM_MHU_REGS(d); switch (cpu_id) { case IPM_MHU_CPU1: p_mhu_dev->cpu1intr_clr = clear_val; break; case IPM_MHU_CPU0: default: p_mhu_dev->cpu0intr_clr = clear_val; break; } } static uint32_t ipm_mhu_max_id_val_get(const struct device *d) { ARG_UNUSED(d); return IPM_MHU_MAX_ID_VAL; } static int ipm_mhu_init(const struct device *d) { const struct ipm_mhu_device_config *config = d->config; config->irq_config_func(d); return 0; } static void ipm_mhu_isr(const struct device *d) { struct ipm_mhu_data *driver_data = d->data; enum ipm_mhu_cpu_id_t cpu_id; uint32_t ipm_mhu_status; cpu_id = ipm_mhu_get_cpu_id(d); ipm_mhu_get_status(d, cpu_id, &ipm_mhu_status); ipm_mhu_clear_val(d, cpu_id, ipm_mhu_status); if (driver_data->callback) { driver_data->callback(d, driver_data->user_data, cpu_id, &ipm_mhu_status); } } static int ipm_mhu_set_enabled(const struct device *d, int enable) { ARG_UNUSED(d); ARG_UNUSED(enable); return 0; } static int ipm_mhu_max_data_size_get(const struct device *d) { ARG_UNUSED(d); return IPM_MHU_MAX_DATA_SIZE; } static void ipm_mhu_register_cb(const struct device *d, ipm_callback_t cb, void *user_data) { struct ipm_mhu_data *driver_data = d->data; driver_data->callback = cb; driver_data->user_data = user_data; } static const struct ipm_driver_api ipm_mhu_driver_api = { .send = ipm_mhu_send, .register_callback = ipm_mhu_register_cb, .max_data_size_get = ipm_mhu_max_data_size_get, .max_id_val_get = ipm_mhu_max_id_val_get, .set_enabled = ipm_mhu_set_enabled, }; static void ipm_mhu_irq_config_func_0(const struct device *d); static const struct ipm_mhu_device_config ipm_mhu_cfg_0 = { .base = (uint8_t *)DT_INST_REG_ADDR(0), .irq_config_func = ipm_mhu_irq_config_func_0, }; static struct ipm_mhu_data ipm_mhu_data_0 = { .callback = NULL, .user_data = NULL, }; DEVICE_DT_INST_DEFINE(0, &ipm_mhu_init, NULL, &ipm_mhu_data_0, &ipm_mhu_cfg_0, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &ipm_mhu_driver_api); static void ipm_mhu_irq_config_func_0(const struct device *d) { ARG_UNUSED(d); IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), ipm_mhu_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); } static void ipm_mhu_irq_config_func_1(const struct device *d); static const struct ipm_mhu_device_config ipm_mhu_cfg_1 = { .base = (uint8_t *)DT_INST_REG_ADDR(1), .irq_config_func = ipm_mhu_irq_config_func_1, }; static struct ipm_mhu_data ipm_mhu_data_1 = { .callback = NULL, .user_data = NULL, }; DEVICE_DT_INST_DEFINE(1, &ipm_mhu_init, NULL, &ipm_mhu_data_1, &ipm_mhu_cfg_1, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &ipm_mhu_driver_api); static void ipm_mhu_irq_config_func_1(const struct device *d) { ARG_UNUSED(d); IRQ_CONNECT(DT_INST_IRQN(1), DT_INST_IRQ(1, priority), ipm_mhu_isr, DEVICE_DT_INST_GET(1), 0); irq_enable(DT_INST_IRQN(1)); } ```
/content/code_sandbox/drivers/ipm/ipm_mhu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,573
```unknown if IPM_IVSHMEM config IPM_IVSHMEM_EVENT_LOOP_STACK_SIZE int "Stack size in bytes of IVSHMEM IPM Event loop task" default 8192 help Adjust the stack size, in bytes of the ivshmem event loop task. config IPM_IVSHMEM_EVENT_LOOP_PRIO int "Priority of IVSHMEM IPM Event loop task" default 2 help Adjust the priority of the ivshmem event loop task. endif ```
/content/code_sandbox/drivers/ipm/Kconfig.ivshmem
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
105
```c /* * */ #define DT_DRV_COMPAT st_stm32_ipcc_mailbox #include <zephyr/drivers/clock_control.h> #include <zephyr/device.h> #include <errno.h> #include <zephyr/drivers/ipm.h> #include <soc.h> #include <stm32_ll_ipcc.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(ipm_stm32_ipcc, CONFIG_IPM_LOG_LEVEL); #define MBX_STRUCT(dev) \ ((IPCC_TypeDef *) \ ((const struct stm32_ipcc_mailbox_config * const)(dev)->config)->uconf.base) #define IPCC_ALL_MR_TXF_CH_MASK 0xFFFF0000 #define IPCC_ALL_MR_RXO_CH_MASK 0x0000FFFF #define IPCC_ALL_SR_CH_MASK 0x0000FFFF #if (CONFIG_IPM_STM32_IPCC_PROCID == 1) #define IPCC_EnableIT_TXF(hipcc) LL_C1_IPCC_EnableIT_TXF(hipcc) #define IPCC_DisableIT_TXF(hipcc) LL_C1_IPCC_DisableIT_TXF(hipcc) #define IPCC_EnableIT_RXO(hipcc) LL_C1_IPCC_EnableIT_RXO(hipcc) #define IPCC_DisableIT_RXO(hipcc) LL_C1_IPCC_DisableIT_RXO(hipcc) #define IPCC_EnableReceiveChannel(hipcc, ch) \ LL_C1_IPCC_EnableReceiveChannel(hipcc, 1 << ch) #define IPCC_EnableTransmitChannel(hipcc, ch) \ LL_C1_IPCC_EnableTransmitChannel(hipcc, 1 << ch) #define IPCC_DisableReceiveChannel(hipcc, ch) \ LL_C2_IPCC_DisableReceiveChannel(hipcc, 1 << ch) #define IPCC_DisableTransmitChannel(hipcc, ch) \ LL_C1_IPCC_DisableTransmitChannel(hipcc, 1 << ch) #define IPCC_ClearFlag_CHx(hipcc, ch) LL_C1_IPCC_ClearFlag_CHx(hipcc, 1 << ch) #define IPCC_SetFlag_CHx(hipcc, ch) LL_C1_IPCC_SetFlag_CHx(hipcc, 1 << ch) #define IPCC_IsActiveFlag_CHx(hipcc, ch) \ LL_C1_IPCC_IsActiveFlag_CHx(hipcc, 1 << ch) #define IPCC_ReadReg(hipcc, reg) READ_REG(hipcc->C1##reg) #define IPCC_ReadReg_SR(hipcc) READ_REG(hipcc->C1TOC2SR) #define IPCC_ReadOtherInstReg_SR(hipcc) READ_REG(hipcc->C2TOC1SR) #else #define IPCC_EnableIT_TXF(hipcc) LL_C2_IPCC_EnableIT_TXF(hipcc) #define IPCC_DisableIT_TXF(hipcc) LL_C2_IPCC_DisableIT_TXF(hipcc) #define IPCC_EnableIT_RXO(hipcc) LL_C2_IPCC_EnableIT_RXO(hipcc) #define IPCC_DisableIT_RXO(hipcc) LL_C2_IPCC_DisableIT_RXO(hipcc) #define IPCC_EnableReceiveChannel(hipcc, ch) \ LL_C2_IPCC_EnableReceiveChannel(hipcc, 1 << ch) #define IPCC_EnableTransmitChannel(hipcc, ch) \ LL_C2_IPCC_EnableTransmitChannel(hipcc, 1 << ch) #define IPCC_DisableReceiveChannel(hipcc, ch) \ LL_C2_IPCC_DisableReceiveChannel(hipcc, 1 << ch) #define IPCC_DisableTransmitChannel(hipcc, ch) \ LL_C2_IPCC_DisableTransmitChannel(hipcc, 1 << ch) #define IPCC_ClearFlag_CHx(hipcc, ch) LL_C2_IPCC_ClearFlag_CHx(hipcc, 1 << ch) #define IPCC_SetFlag_CHx(hipcc, ch) LL_C2_IPCC_SetFlag_CHx(hipcc, 1 << ch) #define IPCC_IsActiveFlag_CHx(hipcc, ch) \ LL_C2_IPCC_IsActiveFlag_CHx(hipcc, 1 << ch) #define IPCC_ReadReg(hipcc, reg) READ_REG(hipcc->C2##reg) #define IPCC_ReadReg_SR(hipcc) READ_REG(hipcc->C2TOC1SR) #define IPCC_ReadOtherInstReg_SR(hipcc) READ_REG(hipcc->C1TOC2SR) #endif struct stm32_ipcc_mailbox_config { void (*irq_config_func)(const struct device *dev); IPCC_TypeDef *ipcc; struct stm32_pclken pclken; }; struct stm32_ipcc_mbx_data { uint32_t num_ch; ipm_callback_t callback; void *user_data; }; static struct stm32_ipcc_mbx_data stm32_IPCC_data; static void stm32_ipcc_mailbox_rx_isr(const struct device *dev) { struct stm32_ipcc_mbx_data *data = dev->data; const struct stm32_ipcc_mailbox_config *cfg = dev->config; unsigned int value = 0; uint32_t mask, i; mask = (~IPCC_ReadReg(cfg->ipcc, MR)) & IPCC_ALL_MR_RXO_CH_MASK; mask &= IPCC_ReadOtherInstReg_SR(cfg->ipcc) & IPCC_ALL_SR_CH_MASK; for (i = 0; i < data->num_ch; i++) { if (!((1 << i) & mask)) { continue; } LOG_DBG("%s channel = %x\r\n", __func__, i); /* mask the channel Free interrupt */ IPCC_DisableReceiveChannel(cfg->ipcc, i); if (data->callback) { /* Only one MAILBOX, id is unused and set to 0 */ data->callback(dev, data->user_data, i, &value); } /* clear status to acknowledge message reception */ IPCC_ClearFlag_CHx(cfg->ipcc, i); IPCC_EnableReceiveChannel(cfg->ipcc, i); } } static void stm32_ipcc_mailbox_tx_isr(const struct device *dev) { struct stm32_ipcc_mbx_data *data = dev->data; const struct stm32_ipcc_mailbox_config *cfg = dev->config; uint32_t mask, i; mask = (~IPCC_ReadReg(cfg->ipcc, MR)) & IPCC_ALL_MR_TXF_CH_MASK; mask = mask >> IPCC_C1MR_CH1FM_Pos; mask &= ~IPCC_ReadReg_SR(cfg->ipcc) & IPCC_ALL_SR_CH_MASK; for (i = 0; i < data->num_ch; i++) { if (!((1 << i) & mask)) { continue; } LOG_DBG("%s channel = %x\r\n", __func__, i); /* mask the channel Free interrupt */ IPCC_DisableTransmitChannel(cfg->ipcc, i); } } static int stm32_ipcc_mailbox_ipm_send(const struct device *dev, int wait, uint32_t id, const void *buff, int size) { struct stm32_ipcc_mbx_data *data = dev->data; const struct stm32_ipcc_mailbox_config *cfg = dev->config; ARG_UNUSED(wait); ARG_UNUSED(buff); /* No data transmission, only doorbell */ if (size) { return -EMSGSIZE; } if (id >= data->num_ch) { LOG_ERR("invalid id (%d)\r\n", id); return -EINVAL; } LOG_DBG("Send msg on channel %d\r\n", id); /* Check that the channel is free (otherwise wait) */ if (IPCC_IsActiveFlag_CHx(cfg->ipcc, id)) { LOG_DBG("Waiting for channel to be freed\r\n"); while (IPCC_IsActiveFlag_CHx(cfg->ipcc, id)) { ; } } IPCC_EnableTransmitChannel(cfg->ipcc, id); IPCC_SetFlag_CHx(cfg->ipcc, id); return 0; } static int stm32_ipcc_mailbox_ipm_max_data_size_get(const struct device *dev) { ARG_UNUSED(dev); /* no data transfer capability */ return 0; } static uint32_t stm32_ipcc_mailbox_ipm_max_id_val_get(const struct device *d) { struct stm32_ipcc_mbx_data *data = d->data; return data->num_ch - 1; } static void stm32_ipcc_mailbox_ipm_register_callback(const struct device *d, ipm_callback_t cb, void *user_data) { struct stm32_ipcc_mbx_data *data = d->data; data->callback = cb; data->user_data = user_data; } static int stm32_ipcc_mailbox_ipm_set_enabled(const struct device *dev, int enable) { struct stm32_ipcc_mbx_data *data = dev->data; const struct stm32_ipcc_mailbox_config *cfg = dev->config; uint32_t i; /* For now: nothing to be done */ LOG_DBG("%s %s mailbox\r\n", __func__, enable ? "enable" : "disable"); if (enable) { /* Enable RX and TX interrupts */ IPCC_EnableIT_TXF(cfg->ipcc); IPCC_EnableIT_RXO(cfg->ipcc); for (i = 0; i < data->num_ch; i++) { IPCC_EnableReceiveChannel(cfg->ipcc, i); } } else { /* Disable RX and TX interrupts */ IPCC_DisableIT_TXF(cfg->ipcc); IPCC_DisableIT_RXO(cfg->ipcc); for (i = 0; i < data->num_ch; i++) { IPCC_DisableReceiveChannel(cfg->ipcc, i); } } return 0; } static int stm32_ipcc_mailbox_init(const struct device *dev) { struct stm32_ipcc_mbx_data *data = dev->data; const struct stm32_ipcc_mailbox_config *cfg = dev->config; const struct device *clk; uint32_t i; clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(clk)) { LOG_ERR("clock control device not ready"); return -ENODEV; } /* enable clock */ if (clock_control_on(clk, (clock_control_subsys_t)&cfg->pclken) != 0) { return -EIO; } /* Disable RX and TX interrupts */ IPCC_DisableIT_TXF(cfg->ipcc); IPCC_DisableIT_RXO(cfg->ipcc); data->num_ch = LL_IPCC_GetChannelConfig(cfg->ipcc); for (i = 0; i < data->num_ch; i++) { /* Clear RX status */ IPCC_ClearFlag_CHx(cfg->ipcc, i); /* mask RX and TX interrupts */ IPCC_DisableReceiveChannel(cfg->ipcc, i); IPCC_DisableTransmitChannel(cfg->ipcc, i); } cfg->irq_config_func(dev); return 0; } static const struct ipm_driver_api stm32_ipcc_mailbox_driver_api = { .send = stm32_ipcc_mailbox_ipm_send, .register_callback = stm32_ipcc_mailbox_ipm_register_callback, .max_data_size_get = stm32_ipcc_mailbox_ipm_max_data_size_get, .max_id_val_get = stm32_ipcc_mailbox_ipm_max_id_val_get, .set_enabled = stm32_ipcc_mailbox_ipm_set_enabled, }; static void stm32_ipcc_mailbox_config_func(const struct device *dev); /* Config MAILBOX 0 */ static const struct stm32_ipcc_mailbox_config stm32_ipcc_mailbox_0_config = { .irq_config_func = stm32_ipcc_mailbox_config_func, .ipcc = (IPCC_TypeDef *)DT_INST_REG_ADDR(0), .pclken = { .bus = DT_INST_CLOCKS_CELL(0, bus), .enr = DT_INST_CLOCKS_CELL(0, bits) }, }; DEVICE_DT_INST_DEFINE(0, &stm32_ipcc_mailbox_init, NULL, &stm32_IPCC_data, &stm32_ipcc_mailbox_0_config, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &stm32_ipcc_mailbox_driver_api); static void stm32_ipcc_mailbox_config_func(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, rxo, irq), DT_INST_IRQ_BY_NAME(0, rxo, priority), stm32_ipcc_mailbox_rx_isr, DEVICE_DT_INST_GET(0), 0); IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, txf, irq), DT_INST_IRQ_BY_NAME(0, txf, priority), stm32_ipcc_mailbox_tx_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, rxo, irq)); irq_enable(DT_INST_IRQ_BY_NAME(0, txf, irq)); } ```
/content/code_sandbox/drivers/ipm/ipm_stm32_ipcc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,897
```objective-c /* * */ #ifndef _IPM_XLNX_IPI_H_ #define _IPM_XLNX_IPI_H_ /* IPI Channel ID bits */ #define IPI_CH0_BIT 0 #define IPI_CH1_BIT 8 #define IPI_CH2_BIT 9 #define IPI_CH3_BIT 16 #define IPI_CH4_BIT 17 #define IPI_CH5_BIT 18 #define IPI_CH6_BIT 19 #define IPI_CH7_BIT 24 #define IPI_CH8_BIT 25 #define IPI_CH9_BIT 26 #define IPI_CH10_BIT 27 /* Register offsets */ #define IPI_TRIG 0x00 #define IPI_OBS 0x04 #define IPI_ISR 0x10 #define IPI_IMR 0x14 #define IPI_IER 0x18 #define IPI_IDR 0x1C #endif /* _IPM_XLNX_IPI_H_ */ ```
/content/code_sandbox/drivers/ipm/ipm_xlnx_ipi.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
223
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/internal/syscall_handler.h> #include <zephyr/drivers/i2s.h> static inline int z_vrfy_i2s_configure(const struct device *dev, enum i2s_dir dir, const struct i2s_config *cfg_ptr) { struct i2s_config config; int ret = -EINVAL; if (K_SYSCALL_DRIVER_I2S(dev, configure)) { goto out; } K_OOPS(k_usermode_from_copy(&config, (const void *)cfg_ptr, sizeof(struct i2s_config))); /* Check that the k_mem_slab provided is a valid pointer and that * the caller has permission on it */ if (K_SYSCALL_OBJ(config.mem_slab, K_OBJ_MEM_SLAB)) { goto out; } /* Ensure that the k_mem_slab's slabs are large enough for the * specified block size */ if (config.block_size > config.mem_slab->info.block_size) { goto out; } ret = z_impl_i2s_configure((const struct device *)dev, dir, &config); out: return ret; } #include <zephyr/syscalls/i2s_configure_mrsh.c> static inline int z_vrfy_i2s_buf_read(const struct device *dev, void *buf, size_t *size) { void *mem_block; size_t data_size; int ret; K_OOPS(K_SYSCALL_DRIVER_I2S(dev, read)); ret = i2s_read((const struct device *)dev, &mem_block, &data_size); if (!ret) { const struct i2s_config *rx_cfg; int copy_success; /* Presumed to be configured otherwise the i2s_read() call * would have failed. */ rx_cfg = i2s_config_get((const struct device *)dev, I2S_DIR_RX); copy_success = k_usermode_to_copy((void *)buf, mem_block, data_size); k_mem_slab_free(rx_cfg->mem_slab, mem_block); K_OOPS(copy_success); K_OOPS(k_usermode_to_copy((void *)size, &data_size, sizeof(data_size))); } return ret; } #include <zephyr/syscalls/i2s_buf_read_mrsh.c> static inline int z_vrfy_i2s_buf_write(const struct device *dev, void *buf, size_t size) { int ret; const struct i2s_config *tx_cfg; void *mem_block; K_OOPS(K_SYSCALL_DRIVER_I2S(dev, write)); tx_cfg = i2s_config_get((const struct device *)dev, I2S_DIR_TX); if (!tx_cfg) { return -EIO; } if (size > tx_cfg->block_size) { return -EINVAL; } ret = k_mem_slab_alloc(tx_cfg->mem_slab, &mem_block, K_FOREVER); if (ret < 0) { return -ENOMEM; } ret = k_usermode_from_copy(mem_block, (void *)buf, size); if (ret) { k_mem_slab_free(tx_cfg->mem_slab, mem_block); K_OOPS(ret); } ret = i2s_write((const struct device *)dev, mem_block, size); if (ret != 0) { k_mem_slab_free(tx_cfg->mem_slab, mem_block); } return ret; } #include <zephyr/syscalls/i2s_buf_write_mrsh.c> static inline int z_vrfy_i2s_trigger(const struct device *dev, enum i2s_dir dir, enum i2s_trigger_cmd cmd) { K_OOPS(K_SYSCALL_DRIVER_I2S(dev, trigger)); return z_impl_i2s_trigger((const struct device *)dev, dir, cmd); } #include <zephyr/syscalls/i2s_trigger_mrsh.c> ```
/content/code_sandbox/drivers/i2s/i2s_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
849
```c /* * */ #define DT_DRV_COMPAT espressif_esp32_ipm #include "soc/dport_reg.h" #include "soc/gpio_periph.h" #include <stdint.h> #include <string.h> #include <zephyr/device.h> #include <zephyr/drivers/ipm.h> #include <zephyr/drivers/interrupt_controller/intc_esp32.h> #include <soc.h> #include <zephyr/sys/atomic.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ipm_esp32, CONFIG_IPM_LOG_LEVEL); #define ESP32_IPM_LOCK_FREE_VAL 0xB33FFFFF #define ESP32_IPM_NOOP_VAL 0xFF __packed struct esp32_ipm_control { uint16_t dest_cpu_msg_id[2]; atomic_val_t lock; }; struct esp32_ipm_memory { uint8_t *pro_cpu_shm; uint8_t *app_cpu_shm; }; struct esp32_ipm_config { uint32_t irq_source_pro_cpu; uint32_t irq_source_app_cpu; }; struct esp32_ipm_data { ipm_callback_t cb; void *user_data; uint32_t this_core_id; uint32_t other_core_id; uint32_t shm_size; struct esp32_ipm_memory shm; struct esp32_ipm_control *control; }; IRAM_ATTR static void esp32_ipm_isr(const struct device *dev) { struct esp32_ipm_data *dev_data = (struct esp32_ipm_data *)dev->data; uint32_t core_id = dev_data->this_core_id; /* clear interrupt flag */ if (core_id == 0) { #if defined(CONFIG_SOC_SERIES_ESP32) DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0); #elif defined(CONFIG_SOC_SERIES_ESP32S3) WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, 0); #endif } else { #if defined(CONFIG_SOC_SERIES_ESP32) DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, 0); #elif defined(CONFIG_SOC_SERIES_ESP32S3) WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_1_REG, 0); #endif } /* first of all take the own of the shared memory */ while (!atomic_cas(&dev_data->control->lock, ESP32_IPM_LOCK_FREE_VAL, dev_data->this_core_id)) { ; } if (dev_data->cb) { volatile void *shm = dev_data->shm.pro_cpu_shm; if (core_id != 0) { shm = dev_data->shm.app_cpu_shm; } dev_data->cb(dev, dev_data->user_data, dev_data->control->dest_cpu_msg_id[core_id], shm); } /* unlock the shared memory */ atomic_set(&dev_data->control->lock, ESP32_IPM_LOCK_FREE_VAL); } static int esp32_ipm_send(const struct device *dev, int wait, uint32_t id, const void *data, int size) { struct esp32_ipm_data *dev_data = (struct esp32_ipm_data *)dev->data; if (size > 0 && data == NULL) { LOG_ERR("Invalid data source"); return -EINVAL; } if (id > 0xFFFF) { LOG_ERR("Invalid message ID format"); return -EINVAL; } if (dev_data->shm_size < size) { LOG_ERR("Not enough memory in IPM channel"); return -ENOMEM; } uint32_t key = irq_lock(); /* try to lock the shared memory */ while (!atomic_cas(&dev_data->control->lock, ESP32_IPM_LOCK_FREE_VAL, dev_data->this_core_id)) { k_busy_wait(1); if ((wait != -1) && (wait > 0)) { /* lock could not be held this time, return */ wait--; if (wait == 0) { irq_unlock(key); return -ETIMEDOUT; } } } /* Only the lower 16bits of id are used */ dev_data->control->dest_cpu_msg_id[dev_data->other_core_id] = (uint16_t)(id & 0xFFFF); /* data copied, set the id and, generate interrupt in the remote core */ if (dev_data->this_core_id == 0) { memcpy(dev_data->shm.app_cpu_shm, data, size); atomic_set(&dev_data->control->lock, ESP32_IPM_LOCK_FREE_VAL); LOG_DBG("Generating interrupt on remote CPU 1 from CPU 0"); #if defined(CONFIG_SOC_SERIES_ESP32) DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, DPORT_CPU_INTR_FROM_CPU_1); #elif defined(CONFIG_SOC_SERIES_ESP32S3) WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_1_REG, SYSTEM_CPU_INTR_FROM_CPU_1); #endif } else { memcpy(dev_data->shm.pro_cpu_shm, data, size); atomic_set(&dev_data->control->lock, ESP32_IPM_LOCK_FREE_VAL); LOG_DBG("Generating interrupt on remote CPU 0 from CPU 1"); #if defined(CONFIG_SOC_SERIES_ESP32) DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, DPORT_CPU_INTR_FROM_CPU_0); #elif defined(CONFIG_SOC_SERIES_ESP32S3) WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, SYSTEM_CPU_INTR_FROM_CPU_0); #endif } irq_unlock(key); return 0; } static void esp32_ipm_register_callback(const struct device *dev, ipm_callback_t cb, void *user_data) { struct esp32_ipm_data *data = (struct esp32_ipm_data *)dev->data; uint32_t key = irq_lock(); data->cb = cb; data->user_data = user_data; irq_unlock(key); } static int esp32_ipm_max_data_size_get(const struct device *dev) { struct esp32_ipm_data *data = (struct esp32_ipm_data *)dev->data; return data->shm_size; } static uint32_t esp_32_ipm_max_id_val_get(const struct device *dev) { ARG_UNUSED(dev); return 0xFFFF; } static int esp_32_ipm_set_enabled(const struct device *dev, int enable) { /* The esp32 IPM is always enabled * but rpmsg backend needs IPM set enabled to be * implemented so just return success here */ ARG_UNUSED(dev); ARG_UNUSED(enable); return 0; } static int esp32_ipm_init(const struct device *dev) { struct esp32_ipm_data *data = (struct esp32_ipm_data *)dev->data; struct esp32_ipm_config *cfg = (struct esp32_ipm_config *)dev->config; data->this_core_id = esp_core_id(); data->other_core_id = (data->this_core_id == 0) ? 1 : 0; LOG_DBG("Size of IPM shared memory: %d", data->shm_size); LOG_DBG("Address of PRO_CPU IPM shared memory: %p", data->shm.pro_cpu_shm); LOG_DBG("Address of APP_CPU IPM shared memory: %p", data->shm.app_cpu_shm); LOG_DBG("Address of IPM control structure: %p", data->control); /* pro_cpu is responsible to initialize the lock of shared memory */ if (data->this_core_id == 0) { esp_intr_alloc(cfg->irq_source_pro_cpu, ESP_INTR_FLAG_IRAM, (intr_handler_t)esp32_ipm_isr, (void *)dev, NULL); atomic_set(&data->control->lock, ESP32_IPM_LOCK_FREE_VAL); } else { /* app_cpu wait for initialization from pro_cpu, then takes it, * after that releases */ esp_intr_alloc(cfg->irq_source_app_cpu, ESP_INTR_FLAG_IRAM, (intr_handler_t)esp32_ipm_isr, (void *)dev, NULL); LOG_DBG("Waiting CPU0 to sync"); while (!atomic_cas(&data->control->lock, ESP32_IPM_LOCK_FREE_VAL, data->this_core_id)) { ; } atomic_set(&data->control->lock, ESP32_IPM_LOCK_FREE_VAL); LOG_DBG("Synchronization done"); } return 0; } static const struct ipm_driver_api esp32_ipm_driver_api = { .send = esp32_ipm_send, .register_callback = esp32_ipm_register_callback, .max_data_size_get = esp32_ipm_max_data_size_get, .max_id_val_get = esp_32_ipm_max_id_val_get, .set_enabled = esp_32_ipm_set_enabled }; #define ESP32_IPM_SHM_SIZE_BY_IDX(idx) \ DT_INST_PROP(idx, shared_memory_size) \ #define ESP32_IPM_SHM_ADDR_BY_IDX(idx) \ DT_REG_ADDR(DT_PHANDLE(DT_DRV_INST(idx), shared_memory)) \ #define ESP32_IPM_INIT(idx) \ \ static struct esp32_ipm_config esp32_ipm_device_cfg_##idx = { \ .irq_source_pro_cpu = DT_INST_IRQN(idx), \ .irq_source_app_cpu = DT_INST_IRQN(idx) + 1, \ }; \ \ static struct esp32_ipm_data esp32_ipm_device_data_##idx = { \ .shm_size = ESP32_IPM_SHM_SIZE_BY_IDX(idx), \ .shm.pro_cpu_shm = (uint8_t *)ESP32_IPM_SHM_ADDR_BY_IDX(idx), \ .shm.app_cpu_shm = (uint8_t *)ESP32_IPM_SHM_ADDR_BY_IDX(idx) + \ ESP32_IPM_SHM_SIZE_BY_IDX(idx)/2, \ .control = (struct esp32_ipm_control *)DT_INST_REG_ADDR(idx), \ }; \ \ DEVICE_DT_INST_DEFINE(idx, &esp32_ipm_init, NULL, \ &esp32_ipm_device_data_##idx, &esp32_ipm_device_cfg_##idx, \ PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, \ &esp32_ipm_driver_api); \ DT_INST_FOREACH_STATUS_OKAY(ESP32_IPM_INIT); ```
/content/code_sandbox/drivers/ipm/ipm_esp32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,277
```unknown # # # menuconfig I2S_LITEX bool "Litex I2S driver" default y depends on DT_HAS_LITEX_I2S_ENABLED help Enable Litex Inter Sound (I2S) bus driver. if I2S_LITEX config I2S_LITEX_RX_BLOCK_COUNT int "RX queue length" default 500 config I2S_LITEX_TX_BLOCK_COUNT int "TX queue length" default 500 config I2S_LITEX_CHANNELS_CONCATENATED bool "Channels placed without padding in fifo" config I2S_LITEX_DATA_BIG_ENDIAN bool "Received data will be stored as big endian" endif ```
/content/code_sandbox/drivers/i2s/Kconfig.litex
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
151
```unknown # MCUX I2S driver configuration options menuconfig I2S_MCUX_SAI bool "NXP MCUX I2S controller driver" default y depends on DT_HAS_NXP_MCUX_I2S_ENABLED select DMA select PINCTRL help Enable I2S support on the I.MX family of processors. if I2S_MCUX_SAI config I2S_RX_BLOCK_COUNT int "RX queue length" default 4 config I2S_TX_BLOCK_COUNT int "TX queue length" default 4 config I2S_EDMA_BURST_SIZE int "I2S EDMA BURST SIZE" default 2 help I2S EDMA burst size in bytes. endif # I2S_MCUX_SAI ```
/content/code_sandbox/drivers/i2s/Kconfig.mcux
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
169
```unknown # STM32 I2S driver configuration options menuconfig I2S_STM32 bool "STM32 MCU I2S controller driver" default y depends on DT_HAS_ST_STM32_I2S_ENABLED select CACHE_MANAGEMENT if CPU_HAS_DCACHE select DMA help Enable I2S support on the STM32 family of processors. (Tested on the STM32F4 & STM32H7 series) if I2S_STM32 config I2S_STM32_RX_BLOCK_COUNT int "RX queue length" default 4 config I2S_STM32_TX_BLOCK_COUNT int "TX queue length" default 4 endif # I2S_STM32 ```
/content/code_sandbox/drivers/i2s/Kconfig.stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
153
```c /* * */ #include <zephyr/kernel.h> #include <string.h> #include <zephyr/drivers/i2s.h> int z_impl_i2s_buf_read(const struct device *dev, void *buf, size_t *size) { void *mem_block; int ret; ret = i2s_read((const struct device *)dev, &mem_block, size); if (!ret) { const struct i2s_config *rx_cfg; rx_cfg = i2s_config_get((const struct device *)dev, I2S_DIR_RX); memcpy(buf, mem_block, *size); k_mem_slab_free(rx_cfg->mem_slab, mem_block); } return ret; } int z_impl_i2s_buf_write(const struct device *dev, void *buf, size_t size) { int ret; const struct i2s_config *tx_cfg; void *mem_block; tx_cfg = i2s_config_get((const struct device *)dev, I2S_DIR_TX); if (!tx_cfg) { return -EIO; } if (size > tx_cfg->block_size) { return -EINVAL; } ret = k_mem_slab_alloc(tx_cfg->mem_slab, &mem_block, K_FOREVER); if (ret < 0) { return -ENOMEM; } memcpy(mem_block, (void *)buf, size); ret = i2s_write((const struct device *)dev, mem_block, size); if (ret != 0) { k_mem_slab_free(tx_cfg->mem_slab, mem_block); } return ret; } ```
/content/code_sandbox/drivers/i2s/i2s_common.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
340
```c /* * */ #include <string.h> #include <zephyr/drivers/i2s.h> #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include <soc.h> #include <zephyr/sys/util.h> #include <zephyr/sys/__assert.h> #include "i2s_litex.h" #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(i2s_litex); #define MODULO_INC(val, max) \ { \ val = (val == max - 1) ? 0 : val + 1; \ } /** * @brief Enable i2s device * * @param reg base register of device */ static void i2s_enable(uintptr_t reg) { uint8_t reg_data = litex_read8(reg + I2S_CONTROL_OFFSET); litex_write8(reg_data | I2S_ENABLE, reg + I2S_CONTROL_OFFSET); } /** * @brief Disable i2s device * * @param reg base register of device */ static void i2s_disable(uintptr_t reg) { uint8_t reg_data = litex_read8(reg + I2S_CONTROL_OFFSET); litex_write8(reg_data & ~(I2S_ENABLE), reg + I2S_CONTROL_OFFSET); } /** * @brief Reset i2s fifo * * @param reg base register of device */ static void i2s_reset_fifo(uintptr_t reg) { uint8_t reg_data = litex_read8(reg + I2S_CONTROL_OFFSET); litex_write8(reg_data | I2S_FIFO_RESET, reg + I2S_CONTROL_OFFSET); } /** * @brief Get i2s format handled by device * * @param reg base register of device * * @return currently supported format or error * when format can't be handled */ static i2s_fmt_t i2s_get_foramt(uintptr_t reg) { uint8_t reg_data = litex_read32(reg + I2S_CONFIG_OFFSET); reg_data &= I2S_CONF_FORMAT_MASK; if (reg_data == LITEX_I2S_STANDARD) { return I2S_FMT_DATA_FORMAT_I2S; } else if (reg_data == LITEX_I2S_LEFT_JUSTIFIED) { return I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED; } return -EINVAL; } /** * @brief Get i2s sample width handled by device * * @param reg base register of device * * @return i2s sample width in bits */ static uint32_t i2s_get_sample_width(uintptr_t reg) { uint32_t reg_data = litex_read32(reg + I2S_CONFIG_OFFSET); reg_data &= I2S_CONF_SAMPLE_WIDTH_MASK; return reg_data >> I2S_CONF_SAMPLE_WIDTH_OFFSET; } /** * @brief Get i2s audio sampling rate handled by device * * @param reg base register of device * * @return audio sampling rate in Hz */ static uint32_t i2s_get_audio_freq(uintptr_t reg) { uint32_t reg_data = litex_read32(reg + I2S_CONFIG_OFFSET); reg_data &= I2S_CONF_LRCK_MASK; return reg_data >> I2S_CONF_LRCK_FREQ_OFFSET; } /** * @brief Enable i2s interrupt in event register * * @param reg base register of device * @param irq_type irq type to be enabled one of I2S_EV_READY or I2S_EV_ERROR */ static void i2s_irq_enable(uintptr_t reg, int irq_type) { __ASSERT_NO_MSG(irq_type == I2S_EV_READY || irq_type == I2S_EV_ERROR); uint8_t reg_data = litex_read8(reg + I2S_EV_ENABLE_OFFSET); litex_write8(reg_data | irq_type, reg + I2S_EV_ENABLE_OFFSET); } /** * @brief Disable i2s interrupt in event register * * @param reg base register of device * @param irq_type irq type to be disabled one of I2S_EV_READY or I2S_EV_ERROR */ static void i2s_irq_disable(uintptr_t reg, int irq_type) { __ASSERT_NO_MSG(irq_type == I2S_EV_READY || irq_type == I2S_EV_ERROR); uint8_t reg_data = litex_read8(reg + I2S_EV_ENABLE_OFFSET); litex_write8(reg_data & ~(irq_type), reg + I2S_EV_ENABLE_OFFSET); } /** * @brief Clear all pending irqs * * @param reg base register of device */ static void i2s_clear_pending_irq(uintptr_t reg) { uint8_t reg_data = litex_read8(reg + I2S_EV_PENDING_OFFSET); litex_write8(reg_data, reg + I2S_EV_PENDING_OFFSET); } /** * @brief Fast data copy function * * Each operation copies 32 bit data chunks * This function copies data from fifo into user buffer * * @param dst memory destination where fifo data will be copied to * @param size amount of data to be copied * @param sample_width width of single sample in bits * @param channels number of received channels */ static void i2s_copy_from_fifo(uint8_t *dst, size_t size, int sample_width, int channels) { uint32_t data; int chan_size = sample_width / 8; #if CONFIG_I2S_LITEX_CHANNELS_CONCATENATED if (channels == 2) { for (size_t i = 0; i < size / chan_size; i += 4) { /* using sys_read function, as fifo is not a csr, * but a contiguous memory space */ *(dst + i) = sys_read32(I2S_RX_FIFO_ADDR); } } else { for (size_t i = 0; i < size / chan_size; i += 2) { data = sys_read32(I2S_RX_FIFO_ADDR); *((uint16_t *)(dst + i)) = data & 0xffff; } } #else int max_off = chan_size - 1; for (size_t i = 0; i < size / chan_size; ++i) { data = sys_read32(I2S_RX_FIFO_ADDR); for (int off = max_off; off >= 0; off--) { #if CONFIG_I2S_LITEX_DATA_BIG_ENDIAN *(dst + i * chan_size + (max_off - off)) = data >> 8 * off; #else *(dst + i * chan_size + off) = data >> 8 * off; #endif } /* if mono, copy every left channel * right channel is discarded */ if (channels == 1) { sys_read32(I2S_RX_FIFO_ADDR); } } #endif } /** * @brief Fast data copy function * * Each operation copies 32 bit data chunks * This function copies data from user buffer into fifo * * @param src memory from which data will be copied to fifo * @param size amount of data to be copied in bytes * @param sample_width width of single sample in bits * @param channels number of received channels */ static void i2s_copy_to_fifo(uint8_t *src, size_t size, int sample_width, int channels) { int chan_size = sample_width / 8; #if CONFIG_I2S_LITEX_CHANNELS_CONCATENATED if (channels == 2) { for (size_t i = 0; i < size / chan_size; i += 4) { /* using sys_write function, as fifo is not a csr, * but a contignous memory space */ sys_write32(*(src + i), I2S_TX_FIFO_ADDR); } } else { for (size_t i = 0; i < size / chan_size; i += 2) { sys_write32(*((uint16_t *)(src + i)), I2S_TX_FIFO_ADDR); } } #else int max_off = chan_size - 1; uint32_t data; uint8_t *d_ptr = (uint8_t *)&data; for (size_t i = 0; i < size / chan_size; ++i) { for (int off = max_off; off >= 0; off--) { #if CONFIG_I2S_LITEX_DATA_BIG_ENDIAN *(d_ptr + off) = *(src + i * chan_size + (max_off - off)); #else *(d_ptr + off) = *(src + i * chan_size + off); #endif } sys_write32(data, I2S_TX_FIFO_ADDR); /* if mono send every left channel * right channel will be same as left */ if (channels == 1) { sys_write32(data, I2S_TX_FIFO_ADDR); } } #endif } /* * Get data from the queue */ static int queue_get(struct ring_buf *rb, void **mem_block, size_t *size) { unsigned int key; key = irq_lock(); if (rb->tail == rb->head) { /* Ring buffer is empty */ irq_unlock(key); return -ENOMEM; } *mem_block = rb->buf[rb->tail].mem_block; *size = rb->buf[rb->tail].size; MODULO_INC(rb->tail, rb->len); irq_unlock(key); return 0; } /* * Put data in the queue */ static int queue_put(struct ring_buf *rb, void *mem_block, size_t size) { uint16_t head_next; unsigned int key; key = irq_lock(); head_next = rb->head; MODULO_INC(head_next, rb->len); if (head_next == rb->tail) { /* Ring buffer is full */ irq_unlock(key); return -ENOMEM; } rb->buf[rb->head].mem_block = mem_block; rb->buf[rb->head].size = size; rb->head = head_next; irq_unlock(key); return 0; } static int i2s_litex_initialize(const struct device *dev) { const struct i2s_litex_cfg *cfg = dev->config; struct i2s_litex_data *const dev_data = dev->data; k_sem_init(&dev_data->rx.sem, 0, CONFIG_I2S_LITEX_RX_BLOCK_COUNT); k_sem_init(&dev_data->tx.sem, CONFIG_I2S_LITEX_TX_BLOCK_COUNT - 1, CONFIG_I2S_LITEX_TX_BLOCK_COUNT); cfg->irq_config(dev); return 0; } static int i2s_litex_configure(const struct device *dev, enum i2s_dir dir, const struct i2s_config *i2s_cfg) { struct i2s_litex_data *const dev_data = dev->data; const struct i2s_litex_cfg *const cfg = dev->config; struct stream *stream; int channels_concatenated = litex_read8(cfg->base + I2S_STATUS_OFFSET); int dev_audio_freq = i2s_get_audio_freq(cfg->base); int channel_div; if (dir == I2S_DIR_RX) { stream = &dev_data->rx; channels_concatenated &= I2S_RX_STAT_CHANNEL_CONCATENATED_MASK; } else if (dir == I2S_DIR_TX) { stream = &dev_data->tx; channels_concatenated &= I2S_TX_STAT_CHANNEL_CONCATENATED_MASK; } else if (dir == I2S_DIR_BOTH) { return -ENOSYS; } else { LOG_ERR("either RX or TX direction must be selected"); return -EINVAL; } if (stream->state != I2S_STATE_NOT_READY && stream->state != I2S_STATE_READY) { LOG_ERR("invalid state"); return -EINVAL; } if (i2s_cfg->options & I2S_OPT_BIT_CLK_GATED) { LOG_ERR("invalid operating mode"); return -EINVAL; } if (i2s_cfg->frame_clk_freq != dev_audio_freq) { LOG_WRN("invalid audio frequency sampling rate"); } if (i2s_cfg->channels == 1) { channel_div = 2; } else if (i2s_cfg->channels == 2) { channel_div = 1; } else { LOG_ERR("invalid channels number"); return -EINVAL; } int req_buf_s = (cfg->fifo_depth * (i2s_cfg->word_size / 8)) / channel_div; if (i2s_cfg->block_size < req_buf_s) { LOG_ERR("not enough space to allocate single buffer"); LOG_ERR("fifo requires at least %i bytes", req_buf_s); return -EINVAL; } else if (i2s_cfg->block_size != req_buf_s) { LOG_WRN("the buffer is greater than required," "only %" "i bytes of data are valid ", req_buf_s); /* The block_size field will be corrected to req_buf_s in the * structure copied as stream configuration (see below). */ } int dev_sample_width = i2s_get_sample_width(cfg->base); if (i2s_cfg->word_size != 8U && i2s_cfg->word_size != 16U && i2s_cfg->word_size != 24U && i2s_cfg->word_size != 32U && i2s_cfg->word_size != dev_sample_width) { LOG_ERR("invalid word size"); return -EINVAL; } int dev_format = i2s_get_foramt(cfg->base); if (dev_format != i2s_cfg->format) { LOG_ERR("unsupported I2S data format"); return -EINVAL; } #if CONFIG_I2S_LITEX_CHANNELS_CONCATENATED #if CONFIG_I2S_LITEX_DATA_BIG_ENDIAN LOG_ERR("Big endian is not uspported " "when channels are conncatenated"); return -EINVAL; #endif if (channels_concatenated == 0) { LOG_ERR("invalid state. " "Your device is configured to send " "channels with padding. " "Please reconfigure driver"); return -EINVAL; } if (i2s_cfg->word_size != 16) { LOG_ERR("invalid word size"); return -EINVAL; } #endif memcpy(&stream->cfg, i2s_cfg, sizeof(struct i2s_config)); stream->cfg.block_size = req_buf_s; stream->state = I2S_STATE_READY; return 0; } static int i2s_litex_read(const struct device *dev, void **mem_block, size_t *size) { struct i2s_litex_data *const dev_data = dev->data; int ret; if (dev_data->rx.state == I2S_STATE_NOT_READY) { LOG_DBG("invalid state"); return -ENOMEM; } /* just to implement timeout*/ ret = k_sem_take(&dev_data->rx.sem, SYS_TIMEOUT_MS(dev_data->rx.cfg.timeout)); if (ret < 0) { return ret; } /* Get data from the beginning of RX queue */ return queue_get(&dev_data->rx.mem_block_queue, mem_block, size); } static int i2s_litex_write(const struct device *dev, void *mem_block, size_t size) { struct i2s_litex_data *const dev_data = dev->data; const struct i2s_litex_cfg *cfg = dev->config; int ret; if (dev_data->tx.state != I2S_STATE_RUNNING && dev_data->tx.state != I2S_STATE_READY) { LOG_DBG("invalid state"); return -EIO; } /* just to implement timeout */ ret = k_sem_take(&dev_data->tx.sem, SYS_TIMEOUT_MS(dev_data->tx.cfg.timeout)); if (ret < 0) { return ret; } /* Add data to the end of the TX queue */ ret = queue_put(&dev_data->tx.mem_block_queue, mem_block, size); if (ret < 0) { return ret; } if (dev_data->tx.state == I2S_STATE_READY) { i2s_irq_enable(cfg->base, I2S_EV_READY); dev_data->tx.state = I2S_STATE_RUNNING; } return ret; } static int i2s_litex_trigger(const struct device *dev, enum i2s_dir dir, enum i2s_trigger_cmd cmd) { struct i2s_litex_data *const dev_data = dev->data; const struct i2s_litex_cfg *const cfg = dev->config; struct stream *stream; if (dir == I2S_DIR_RX) { stream = &dev_data->rx; } else if (dir == I2S_DIR_TX) { stream = &dev_data->tx; } else if (dir == I2S_DIR_BOTH) { return -ENOSYS; } else { LOG_ERR("either RX or TX direction must be selected"); return -EINVAL; } switch (cmd) { case I2S_TRIGGER_START: if (stream->state != I2S_STATE_READY) { LOG_ERR("START trigger: invalid state %d", stream->state); return -EIO; } __ASSERT_NO_MSG(stream->mem_block == NULL); i2s_reset_fifo(cfg->base); i2s_enable(cfg->base); i2s_irq_enable(cfg->base, I2S_EV_READY); stream->state = I2S_STATE_RUNNING; break; case I2S_TRIGGER_STOP: if (stream->state != I2S_STATE_RUNNING && stream->state != I2S_STATE_READY) { LOG_ERR("STOP trigger: invalid state"); return -EIO; } i2s_disable(cfg->base); i2s_irq_disable(cfg->base, I2S_EV_READY); stream->state = I2S_STATE_READY; break; default: LOG_ERR("unsupported trigger command"); return -EINVAL; } return 0; } static inline void clear_rx_fifo(const struct i2s_litex_cfg *cfg) { for (int i = 0; i < I2S_RX_FIFO_DEPTH; i++) { sys_read32(I2S_RX_FIFO_ADDR); } i2s_clear_pending_irq(cfg->base); } static void i2s_litex_isr_rx(void *arg) { const struct device *dev = (const struct device *)arg; const struct i2s_litex_cfg *cfg = dev->config; struct i2s_litex_data *data = dev->data; struct stream *stream = &data->rx; int ret; /* Prepare to receive the next data block */ ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block, K_NO_WAIT); if (ret < 0) { clear_rx_fifo(cfg); return; } i2s_copy_from_fifo((uint8_t *)stream->mem_block, stream->cfg.block_size, stream->cfg.word_size, stream->cfg.channels); i2s_clear_pending_irq(cfg->base); ret = queue_put(&stream->mem_block_queue, stream->mem_block, stream->cfg.block_size); if (ret < 0) { LOG_WRN("Couldn't copy data " "from RX fifo to the ring " "buffer (no space left) - " "dropping a frame"); return; } k_sem_give(&stream->sem); } static void i2s_litex_isr_tx(void *arg) { const struct device *dev = (const struct device *)arg; const struct i2s_litex_cfg *cfg = dev->config; struct i2s_litex_data *data = dev->data; size_t mem_block_size; struct stream *stream = &data->tx; int ret; ret = queue_get(&stream->mem_block_queue, &stream->mem_block, &mem_block_size); if (ret < 0) { i2s_irq_disable(cfg->base, I2S_EV_READY); stream->state = I2S_STATE_READY; return; } k_sem_give(&stream->sem); i2s_copy_to_fifo((uint8_t *)stream->mem_block, mem_block_size, stream->cfg.word_size, stream->cfg.channels); i2s_clear_pending_irq(cfg->base); k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block); } static const struct i2s_driver_api i2s_litex_driver_api = { .configure = i2s_litex_configure, .read = i2s_litex_read, .write = i2s_litex_write, .trigger = i2s_litex_trigger, }; #define I2S_INIT(dir) \ \ static struct queue_item rx_ring_buf[CONFIG_I2S_LITEX_RX_BLOCK_COUNT]; \ static struct queue_item tx_ring_buf[CONFIG_I2S_LITEX_TX_BLOCK_COUNT]; \ \ static struct i2s_litex_data i2s_litex_data_##dir = { \ .dir.mem_block_queue.buf = dir##_ring_buf, \ .dir.mem_block_queue.len = \ sizeof(dir##_ring_buf) / sizeof(struct queue_item), \ }; \ \ static void i2s_litex_irq_config_func_##dir(const struct device *dev); \ \ static struct i2s_litex_cfg i2s_litex_cfg_##dir = { \ .base = DT_REG_ADDR(DT_NODELABEL(i2s_##dir)), \ .fifo_base = \ DT_REG_ADDR_BY_NAME(DT_NODELABEL(i2s_##dir), fifo), \ .fifo_depth = DT_PROP(DT_NODELABEL(i2s_##dir), fifo_depth), \ .irq_config = i2s_litex_irq_config_func_##dir \ }; \ DEVICE_DT_DEFINE(DT_NODELABEL(i2s_##dir), i2s_litex_initialize, \ NULL, &i2s_litex_data_##dir, \ &i2s_litex_cfg_##dir, POST_KERNEL, \ CONFIG_I2S_INIT_PRIORITY, \ &i2s_litex_driver_api); \ \ static void i2s_litex_irq_config_func_##dir(const struct device *dev) \ { \ IRQ_CONNECT(DT_IRQN(DT_NODELABEL(i2s_##dir)), \ DT_IRQ(DT_NODELABEL(i2s_##dir), \ priority), \ i2s_litex_isr_##dir, \ DEVICE_DT_GET(DT_NODELABEL(i2s_##dir)), 0);\ irq_enable(DT_IRQN(DT_NODELABEL(i2s_##dir))); \ } #if DT_NODE_HAS_STATUS(DT_NODELABEL(i2s_rx), okay) I2S_INIT(rx); #endif #if DT_NODE_HAS_STATUS(DT_NODELABEL(i2s_tx), okay) I2S_INIT(tx); #endif ```
/content/code_sandbox/drivers/i2s/i2s_litex.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,052
```c /* * */ #define DT_DRV_COMPAT atmel_sam_ssc /** @file * @brief I2S bus (SSC) driver for Atmel SAM MCU family. * * Limitations: * - TX and RX path share a common bit clock divider and as a result they cannot * be configured independently. If RX and TX path are set to different bit * clock frequencies the latter setting will quietly override the former. * We should return an error in such a case. * - DMA is used in simple single block transfer mode and as such is not able * to handle high speed data. To support higher transfer speeds the DMA * linked list mode should be used. */ #include <errno.h> #include <string.h> #include <zephyr/sys/__assert.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/i2s.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control/atmel_sam_pmc.h> #include <soc.h> #define LOG_DOMAIN dev_i2s_sam_ssc #define LOG_LEVEL CONFIG_I2S_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(LOG_DOMAIN); #if __DCACHE_PRESENT == 1 #define DCACHE_INVALIDATE(addr, size) \ SCB_InvalidateDCache_by_Addr((uint32_t *)addr, size) #define DCACHE_CLEAN(addr, size) \ SCB_CleanDCache_by_Addr((uint32_t *)addr, size) #else #define DCACHE_INVALIDATE(addr, size) {; } #define DCACHE_CLEAN(addr, size) {; } #endif #define SAM_SSC_WORD_SIZE_BITS_MIN 2 #define SAM_SSC_WORD_SIZE_BITS_MAX 32 #define SAM_SSC_WORD_PER_FRAME_MIN 1 #define SAM_SSC_WORD_PER_FRAME_MAX 16 struct queue_item { void *mem_block; size_t size; }; /* Minimal ring buffer implementation */ struct ring_buf { struct queue_item *buf; uint16_t len; uint16_t head; uint16_t tail; }; /* Device constant configuration parameters */ struct i2s_sam_dev_cfg { const struct device *dev_dma; Ssc *regs; void (*irq_config)(void); const struct atmel_sam_pmc_config clock_cfg; const struct pinctrl_dev_config *pcfg; uint8_t irq_id; }; struct stream { int32_t state; struct k_sem sem; uint32_t dma_channel; uint8_t dma_perid; uint8_t word_size_bytes; bool last_block; struct i2s_config cfg; struct ring_buf mem_block_queue; void *mem_block; int (*stream_start)(struct stream *, Ssc *const, const struct device *); void (*stream_disable)(struct stream *, Ssc *const, const struct device *); void (*queue_drop)(struct stream *); int (*set_data_format)(const struct i2s_sam_dev_cfg *const, const struct i2s_config *); }; /* Device run time data */ struct i2s_sam_dev_data { struct stream rx; struct stream tx; }; #define MODULO_INC(val, max) { val = (++val < max) ? val : 0; } static const struct device *get_dev_from_dma_channel(uint32_t dma_channel); static void dma_rx_callback(const struct device *, void *, uint32_t, int); static void dma_tx_callback(const struct device *, void *, uint32_t, int); static void rx_stream_disable(struct stream *, Ssc *const, const struct device *); static void tx_stream_disable(struct stream *, Ssc *const, const struct device *); /* * Get data from the queue */ static int queue_get(struct ring_buf *rb, void **mem_block, size_t *size) { unsigned int key; key = irq_lock(); if (rb->tail == rb->head) { /* Ring buffer is empty */ irq_unlock(key); return -ENOMEM; } *mem_block = rb->buf[rb->tail].mem_block; *size = rb->buf[rb->tail].size; MODULO_INC(rb->tail, rb->len); irq_unlock(key); return 0; } /* * Put data in the queue */ static int queue_put(struct ring_buf *rb, void *mem_block, size_t size) { uint16_t head_next; unsigned int key; key = irq_lock(); head_next = rb->head; MODULO_INC(head_next, rb->len); if (head_next == rb->tail) { /* Ring buffer is full */ irq_unlock(key); return -ENOMEM; } rb->buf[rb->head].mem_block = mem_block; rb->buf[rb->head].size = size; rb->head = head_next; irq_unlock(key); return 0; } static int reload_dma(const struct device *dev_dma, uint32_t channel, void *src, void *dst, size_t size) { int ret; ret = dma_reload(dev_dma, channel, (uint32_t)src, (uint32_t)dst, size); if (ret < 0) { return ret; } ret = dma_start(dev_dma, channel); return ret; } static int start_dma(const struct device *dev_dma, uint32_t channel, struct dma_config *cfg, void *src, void *dst, uint32_t blk_size) { struct dma_block_config blk_cfg; int ret; (void)memset(&blk_cfg, 0, sizeof(blk_cfg)); blk_cfg.block_size = blk_size; blk_cfg.source_address = (uint32_t)src; blk_cfg.dest_address = (uint32_t)dst; cfg->head_block = &blk_cfg; ret = dma_config(dev_dma, channel, cfg); if (ret < 0) { return ret; } ret = dma_start(dev_dma, channel); return ret; } /* This function is executed in the interrupt context */ static void dma_rx_callback(const struct device *dma_dev, void *user_data, uint32_t channel, int status) { const struct device *dev = get_dev_from_dma_channel(channel); const struct i2s_sam_dev_cfg *const dev_cfg = dev->config; struct i2s_sam_dev_data *const dev_data = dev->data; Ssc *const ssc = dev_cfg->regs; struct stream *stream = &dev_data->rx; int ret; ARG_UNUSED(user_data); __ASSERT_NO_MSG(stream->mem_block != NULL); /* Stop reception if there was an error */ if (stream->state == I2S_STATE_ERROR) { goto rx_disable; } /* All block data received */ ret = queue_put(&stream->mem_block_queue, stream->mem_block, stream->cfg.block_size); if (ret < 0) { stream->state = I2S_STATE_ERROR; goto rx_disable; } stream->mem_block = NULL; k_sem_give(&stream->sem); /* Stop reception if we were requested */ if (stream->state == I2S_STATE_STOPPING) { stream->state = I2S_STATE_READY; goto rx_disable; } /* Prepare to receive the next data block */ ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block, K_NO_WAIT); if (ret < 0) { stream->state = I2S_STATE_ERROR; goto rx_disable; } /* Assure cache coherency before DMA write operation */ DCACHE_INVALIDATE(stream->mem_block, stream->cfg.block_size); ret = reload_dma(dev_cfg->dev_dma, stream->dma_channel, (void *)&(ssc->SSC_RHR), stream->mem_block, stream->cfg.block_size); if (ret < 0) { LOG_DBG("Failed to reload RX DMA transfer: %d", ret); goto rx_disable; } return; rx_disable: rx_stream_disable(stream, ssc, dev_cfg->dev_dma); } /* This function is executed in the interrupt context */ static void dma_tx_callback(const struct device *dma_dev, void *user_data, uint32_t channel, int status) { const struct device *dev = get_dev_from_dma_channel(channel); const struct i2s_sam_dev_cfg *const dev_cfg = dev->config; struct i2s_sam_dev_data *const dev_data = dev->data; Ssc *const ssc = dev_cfg->regs; struct stream *stream = &dev_data->tx; size_t mem_block_size; int ret; ARG_UNUSED(user_data); __ASSERT_NO_MSG(stream->mem_block != NULL); /* All block data sent */ k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block); stream->mem_block = NULL; /* Stop transmission if there was an error */ if (stream->state == I2S_STATE_ERROR) { LOG_DBG("TX error detected"); goto tx_disable; } /* Stop transmission if we were requested */ if (stream->last_block) { stream->state = I2S_STATE_READY; goto tx_disable; } /* Prepare to send the next data block */ ret = queue_get(&stream->mem_block_queue, &stream->mem_block, &mem_block_size); if (ret < 0) { if (stream->state == I2S_STATE_STOPPING) { stream->state = I2S_STATE_READY; } else { stream->state = I2S_STATE_ERROR; } goto tx_disable; } k_sem_give(&stream->sem); /* Assure cache coherency before DMA read operation */ DCACHE_CLEAN(stream->mem_block, mem_block_size); ret = reload_dma(dev_cfg->dev_dma, stream->dma_channel, stream->mem_block, (void *)&(ssc->SSC_THR), mem_block_size); if (ret < 0) { LOG_DBG("Failed to reload TX DMA transfer: %d", ret); goto tx_disable; } return; tx_disable: tx_stream_disable(stream, ssc, dev_cfg->dev_dma); } static int set_rx_data_format(const struct i2s_sam_dev_cfg *const dev_cfg, const struct i2s_config *i2s_cfg) { Ssc *const ssc = dev_cfg->regs; const bool pin_rk_en = IS_ENABLED(CONFIG_I2S_SAM_SSC_0_PIN_RK_EN); const bool pin_rf_en = IS_ENABLED(CONFIG_I2S_SAM_SSC_0_PIN_RF_EN); uint8_t word_size_bits = i2s_cfg->word_size; uint8_t num_words = i2s_cfg->channels; uint8_t fslen = 0U; uint32_t ssc_rcmr = 0U; uint32_t ssc_rfmr = 0U; bool frame_clk_master = !(i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE); switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) { case I2S_FMT_DATA_FORMAT_I2S: num_words = 2U; fslen = word_size_bits - 1; ssc_rcmr = SSC_RCMR_CKI | (pin_rf_en ? SSC_RCMR_START_RF_FALLING : 0) | SSC_RCMR_STTDLY(1); ssc_rfmr = (pin_rf_en && frame_clk_master ? SSC_RFMR_FSOS_NEGATIVE : SSC_RFMR_FSOS_NONE); break; case I2S_FMT_DATA_FORMAT_PCM_SHORT: ssc_rcmr = (pin_rf_en ? SSC_RCMR_START_RF_FALLING : 0) | SSC_RCMR_STTDLY(0); ssc_rfmr = (pin_rf_en && frame_clk_master ? SSC_RFMR_FSOS_POSITIVE : SSC_RFMR_FSOS_NONE); break; case I2S_FMT_DATA_FORMAT_PCM_LONG: fslen = num_words * word_size_bits / 2U - 1; ssc_rcmr = (pin_rf_en ? SSC_RCMR_START_RF_RISING : 0) | SSC_RCMR_STTDLY(0); ssc_rfmr = (pin_rf_en && frame_clk_master ? SSC_RFMR_FSOS_POSITIVE : SSC_RFMR_FSOS_NONE); break; case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED: fslen = num_words * word_size_bits / 2U - 1; ssc_rcmr = SSC_RCMR_CKI | (pin_rf_en ? SSC_RCMR_START_RF_RISING : 0) | SSC_RCMR_STTDLY(0); ssc_rfmr = (pin_rf_en && frame_clk_master ? SSC_RFMR_FSOS_POSITIVE : SSC_RFMR_FSOS_NONE); break; default: LOG_ERR("Unsupported I2S data format"); return -EINVAL; } if (pin_rk_en) { ssc_rcmr |= ((i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) ? SSC_RCMR_CKS_RK : SSC_RCMR_CKS_MCK) | ((i2s_cfg->options & I2S_OPT_BIT_CLK_GATED) ? SSC_RCMR_CKO_TRANSFER : SSC_RCMR_CKO_CONTINUOUS); } else { ssc_rcmr |= SSC_RCMR_CKS_TK | SSC_RCMR_CKO_NONE; } /* SSC_RCMR.PERIOD bit filed does not support setting the * frame period with one bit resolution. In case the required * frame period is an odd number set it to be one bit longer. */ ssc_rcmr |= (pin_rf_en ? 0 : SSC_RCMR_START_TRANSMIT) | SSC_RCMR_PERIOD((num_words * word_size_bits + 1) / 2U - 1); /* Receive Clock Mode Register */ ssc->SSC_RCMR = ssc_rcmr; ssc_rfmr |= SSC_RFMR_DATLEN(word_size_bits - 1) | ((i2s_cfg->format & I2S_FMT_DATA_ORDER_LSB) ? 0 : SSC_RFMR_MSBF) | SSC_RFMR_DATNB(num_words - 1) | SSC_RFMR_FSLEN(fslen) | SSC_RFMR_FSLEN_EXT(fslen >> 4); /* Receive Frame Mode Register */ ssc->SSC_RFMR = ssc_rfmr; return 0; } static int set_tx_data_format(const struct i2s_sam_dev_cfg *const dev_cfg, const struct i2s_config *i2s_cfg) { Ssc *const ssc = dev_cfg->regs; uint8_t word_size_bits = i2s_cfg->word_size; uint8_t num_words = i2s_cfg->channels; uint8_t fslen = 0U; uint32_t ssc_tcmr = 0U; uint32_t ssc_tfmr = 0U; switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) { case I2S_FMT_DATA_FORMAT_I2S: num_words = 2U; fslen = word_size_bits - 1; ssc_tcmr = SSC_TCMR_START_TF_FALLING | SSC_TCMR_STTDLY(1); ssc_tfmr = SSC_TFMR_FSOS_NEGATIVE; break; case I2S_FMT_DATA_FORMAT_PCM_SHORT: ssc_tcmr = SSC_TCMR_CKI | SSC_TCMR_START_TF_FALLING | SSC_TCMR_STTDLY(0); ssc_tfmr = SSC_TFMR_FSOS_POSITIVE; break; case I2S_FMT_DATA_FORMAT_PCM_LONG: fslen = num_words * word_size_bits / 2U - 1; ssc_tcmr = SSC_TCMR_CKI | SSC_TCMR_START_TF_RISING | SSC_TCMR_STTDLY(0); ssc_tfmr = SSC_TFMR_FSOS_POSITIVE; break; case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED: fslen = num_words * word_size_bits / 2U - 1; ssc_tcmr = SSC_TCMR_START_TF_RISING | SSC_TCMR_STTDLY(0); ssc_tfmr = SSC_TFMR_FSOS_POSITIVE; break; default: LOG_ERR("Unsupported I2S data format"); return -EINVAL; } /* SSC_TCMR.PERIOD bit filed does not support setting the * frame period with one bit resolution. In case the required * frame period is an odd number set it to be one bit longer. */ ssc_tcmr |= ((i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) ? SSC_TCMR_CKS_TK : SSC_TCMR_CKS_MCK) | ((i2s_cfg->options & I2S_OPT_BIT_CLK_GATED) ? SSC_TCMR_CKO_TRANSFER : SSC_TCMR_CKO_CONTINUOUS) | SSC_TCMR_PERIOD((num_words * word_size_bits + 1) / 2U - 1); /* Transmit Clock Mode Register */ ssc->SSC_TCMR = ssc_tcmr; if (i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE) { ssc_tfmr &= ~SSC_TFMR_FSOS_Msk; ssc_tfmr |= SSC_TFMR_FSOS_NONE; } ssc_tfmr |= SSC_TFMR_DATLEN(word_size_bits - 1) | ((i2s_cfg->format & I2S_FMT_DATA_ORDER_LSB) ? 0 : SSC_TFMR_MSBF) | SSC_TFMR_DATNB(num_words - 1) | SSC_TFMR_FSLEN(fslen) | SSC_TFMR_FSLEN_EXT(fslen >> 4); /* Transmit Frame Mode Register */ ssc->SSC_TFMR = ssc_tfmr; return 0; } /* Calculate number of bytes required to store a word of bit_size length */ static uint8_t get_word_size_bytes(uint8_t bit_size) { uint8_t byte_size_min = (bit_size + 7) / 8U; uint8_t byte_size; byte_size = (byte_size_min == 3U) ? 4 : byte_size_min; return byte_size; } static int bit_clock_set(Ssc *const ssc, uint32_t bit_clk_freq) { uint32_t clk_div = SOC_ATMEL_SAM_MCK_FREQ_HZ / bit_clk_freq / 2U; if (clk_div == 0U || clk_div >= (1 << 12)) { LOG_ERR("Invalid bit clock frequency"); return -EINVAL; } ssc->SSC_CMR = clk_div; LOG_DBG("freq = %d", bit_clk_freq); return 0; } static const struct i2s_config *i2s_sam_config_get(const struct device *dev, enum i2s_dir dir) { struct i2s_sam_dev_data *const dev_data = dev->data; struct stream *stream; if (dir == I2S_DIR_RX) { stream = &dev_data->rx; } else { stream = &dev_data->tx; } if (stream->state == I2S_STATE_NOT_READY) { return NULL; } return &stream->cfg; } static int i2s_sam_configure(const struct device *dev, enum i2s_dir dir, const struct i2s_config *i2s_cfg) { const struct i2s_sam_dev_cfg *const dev_cfg = dev->config; struct i2s_sam_dev_data *const dev_data = dev->data; Ssc *const ssc = dev_cfg->regs; uint8_t num_words = i2s_cfg->channels; uint8_t word_size_bits = i2s_cfg->word_size; uint32_t bit_clk_freq; struct stream *stream; int ret; if (dir == I2S_DIR_RX) { stream = &dev_data->rx; } else if (dir == I2S_DIR_TX) { stream = &dev_data->tx; } else if (dir == I2S_DIR_BOTH) { return -ENOSYS; } else { LOG_ERR("Either RX or TX direction must be selected"); return -EINVAL; } if (stream->state != I2S_STATE_NOT_READY && stream->state != I2S_STATE_READY) { LOG_ERR("invalid state"); return -EINVAL; } if (i2s_cfg->frame_clk_freq == 0U) { stream->queue_drop(stream); (void)memset(&stream->cfg, 0, sizeof(struct i2s_config)); stream->state = I2S_STATE_NOT_READY; return 0; } if (i2s_cfg->format & I2S_FMT_FRAME_CLK_INV) { LOG_ERR("Frame clock inversion is not implemented"); LOG_ERR("Please submit a patch"); return -EINVAL; } if (i2s_cfg->format & I2S_FMT_BIT_CLK_INV) { LOG_ERR("Bit clock inversion is not implemented"); LOG_ERR("Please submit a patch"); return -EINVAL; } if (word_size_bits < SAM_SSC_WORD_SIZE_BITS_MIN || word_size_bits > SAM_SSC_WORD_SIZE_BITS_MAX) { LOG_ERR("Unsupported I2S word size"); return -EINVAL; } if (num_words < SAM_SSC_WORD_PER_FRAME_MIN || num_words > SAM_SSC_WORD_PER_FRAME_MAX) { LOG_ERR("Unsupported words per frame number"); return -EINVAL; } memcpy(&stream->cfg, i2s_cfg, sizeof(struct i2s_config)); bit_clk_freq = i2s_cfg->frame_clk_freq * word_size_bits * num_words; ret = bit_clock_set(ssc, bit_clk_freq); if (ret < 0) { return ret; } ret = stream->set_data_format(dev_cfg, i2s_cfg); if (ret < 0) { return ret; } /* Set up DMA channel parameters */ stream->word_size_bytes = get_word_size_bytes(word_size_bits); if (i2s_cfg->options & I2S_OPT_LOOPBACK) { ssc->SSC_RFMR |= SSC_RFMR_LOOP; } stream->state = I2S_STATE_READY; return 0; } static int rx_stream_start(struct stream *stream, Ssc *const ssc, const struct device *dev_dma) { int ret; ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block, K_NO_WAIT); if (ret < 0) { return ret; } /* Workaround for a hardware bug: DMA engine will read first data * item even if SSC_SR.RXEN (Receive Enable) is not set. An extra read * before enabling DMA engine sets hardware FSM in the correct state. */ (void)ssc->SSC_RHR; struct dma_config dma_cfg = { .source_data_size = stream->word_size_bytes, .dest_data_size = stream->word_size_bytes, .block_count = 1, .dma_slot = stream->dma_perid, .channel_direction = PERIPHERAL_TO_MEMORY, .source_burst_length = 1, .dest_burst_length = 1, .dma_callback = dma_rx_callback, }; ret = start_dma(dev_dma, stream->dma_channel, &dma_cfg, (void *)&(ssc->SSC_RHR), stream->mem_block, stream->cfg.block_size); if (ret < 0) { LOG_ERR("Failed to start RX DMA transfer: %d", ret); return ret; } /* Clear status register */ (void)ssc->SSC_SR; ssc->SSC_IER = SSC_IER_OVRUN; ssc->SSC_CR = SSC_CR_RXEN; return 0; } static int tx_stream_start(struct stream *stream, Ssc *const ssc, const struct device *dev_dma) { size_t mem_block_size; int ret; ret = queue_get(&stream->mem_block_queue, &stream->mem_block, &mem_block_size); if (ret < 0) { return ret; } k_sem_give(&stream->sem); /* Workaround for a hardware bug: DMA engine will transfer first data * item even if SSC_SR.TXEN (Transmit Enable) is not set. An extra write * before enabling DMA engine sets hardware FSM in the correct state. * This data item will not be output on I2S interface. */ ssc->SSC_THR = 0; struct dma_config dma_cfg = { .source_data_size = stream->word_size_bytes, .dest_data_size = stream->word_size_bytes, .block_count = 1, .dma_slot = stream->dma_perid, .channel_direction = MEMORY_TO_PERIPHERAL, .source_burst_length = 1, .dest_burst_length = 1, .dma_callback = dma_tx_callback, }; /* Assure cache coherency before DMA read operation */ DCACHE_CLEAN(stream->mem_block, mem_block_size); ret = start_dma(dev_dma, stream->dma_channel, &dma_cfg, stream->mem_block, (void *)&(ssc->SSC_THR), mem_block_size); if (ret < 0) { LOG_ERR("Failed to start TX DMA transfer: %d", ret); return ret; } /* Clear status register */ (void)ssc->SSC_SR; ssc->SSC_IER = SSC_IER_TXEMPTY; ssc->SSC_CR = SSC_CR_TXEN; return 0; } static void rx_stream_disable(struct stream *stream, Ssc *const ssc, const struct device *dev_dma) { ssc->SSC_CR = SSC_CR_RXDIS; ssc->SSC_IDR = SSC_IDR_OVRUN; dma_stop(dev_dma, stream->dma_channel); if (stream->mem_block != NULL) { k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block); stream->mem_block = NULL; } } static void tx_stream_disable(struct stream *stream, Ssc *const ssc, const struct device *dev_dma) { ssc->SSC_CR = SSC_CR_TXDIS; ssc->SSC_IDR = SSC_IDR_TXEMPTY; dma_stop(dev_dma, stream->dma_channel); if (stream->mem_block != NULL) { k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block); stream->mem_block = NULL; } } static void rx_queue_drop(struct stream *stream) { size_t size; void *mem_block; while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) { k_mem_slab_free(stream->cfg.mem_slab, mem_block); } k_sem_reset(&stream->sem); } static void tx_queue_drop(struct stream *stream) { size_t size; void *mem_block; unsigned int n = 0U; while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) { k_mem_slab_free(stream->cfg.mem_slab, mem_block); n++; } for (; n > 0; n--) { k_sem_give(&stream->sem); } } static int i2s_sam_trigger(const struct device *dev, enum i2s_dir dir, enum i2s_trigger_cmd cmd) { const struct i2s_sam_dev_cfg *const dev_cfg = dev->config; struct i2s_sam_dev_data *const dev_data = dev->data; Ssc *const ssc = dev_cfg->regs; struct stream *stream; unsigned int key; int ret; if (dir == I2S_DIR_RX) { stream = &dev_data->rx; } else if (dir == I2S_DIR_TX) { stream = &dev_data->tx; } else if (dir == I2S_DIR_BOTH) { return -ENOSYS; } else { LOG_ERR("Either RX or TX direction must be selected"); return -EINVAL; } switch (cmd) { case I2S_TRIGGER_START: if (stream->state != I2S_STATE_READY) { LOG_DBG("START trigger: invalid state"); return -EIO; } __ASSERT_NO_MSG(stream->mem_block == NULL); ret = stream->stream_start(stream, ssc, dev_cfg->dev_dma); if (ret < 0) { LOG_DBG("START trigger failed %d", ret); return ret; } stream->state = I2S_STATE_RUNNING; stream->last_block = false; break; case I2S_TRIGGER_STOP: key = irq_lock(); if (stream->state != I2S_STATE_RUNNING) { irq_unlock(key); LOG_DBG("STOP trigger: invalid state"); return -EIO; } stream->state = I2S_STATE_STOPPING; irq_unlock(key); stream->last_block = true; break; case I2S_TRIGGER_DRAIN: key = irq_lock(); if (stream->state != I2S_STATE_RUNNING) { irq_unlock(key); LOG_DBG("DRAIN trigger: invalid state"); return -EIO; } stream->state = I2S_STATE_STOPPING; irq_unlock(key); break; case I2S_TRIGGER_DROP: if (stream->state == I2S_STATE_NOT_READY) { LOG_DBG("DROP trigger: invalid state"); return -EIO; } stream->stream_disable(stream, ssc, dev_cfg->dev_dma); stream->queue_drop(stream); stream->state = I2S_STATE_READY; break; case I2S_TRIGGER_PREPARE: if (stream->state != I2S_STATE_ERROR) { LOG_DBG("PREPARE trigger: invalid state"); return -EIO; } stream->state = I2S_STATE_READY; stream->queue_drop(stream); break; default: LOG_ERR("Unsupported trigger command"); return -EINVAL; } return 0; } static int i2s_sam_read(const struct device *dev, void **mem_block, size_t *size) { struct i2s_sam_dev_data *const dev_data = dev->data; int ret; if (dev_data->rx.state == I2S_STATE_NOT_READY) { LOG_DBG("invalid state"); return -EIO; } if (dev_data->rx.state != I2S_STATE_ERROR) { ret = k_sem_take(&dev_data->rx.sem, SYS_TIMEOUT_MS(dev_data->rx.cfg.timeout)); if (ret < 0) { return ret; } } /* Get data from the beginning of RX queue */ ret = queue_get(&dev_data->rx.mem_block_queue, mem_block, size); if (ret < 0) { return -EIO; } return 0; } static int i2s_sam_write(const struct device *dev, void *mem_block, size_t size) { struct i2s_sam_dev_data *const dev_data = dev->data; int ret; if (dev_data->tx.state != I2S_STATE_RUNNING && dev_data->tx.state != I2S_STATE_READY) { LOG_DBG("invalid state"); return -EIO; } ret = k_sem_take(&dev_data->tx.sem, SYS_TIMEOUT_MS(dev_data->tx.cfg.timeout)); if (ret < 0) { return ret; } /* Add data to the end of the TX queue */ queue_put(&dev_data->tx.mem_block_queue, mem_block, size); return 0; } static void i2s_sam_isr(const struct device *dev) { const struct i2s_sam_dev_cfg *const dev_cfg = dev->config; struct i2s_sam_dev_data *const dev_data = dev->data; Ssc *const ssc = dev_cfg->regs; uint32_t isr_status; /* Retrieve interrupt status */ isr_status = ssc->SSC_SR & ssc->SSC_IMR; /* Check for RX buffer overrun */ if (isr_status & SSC_SR_OVRUN) { dev_data->rx.state = I2S_STATE_ERROR; /* Disable interrupt */ ssc->SSC_IDR = SSC_IDR_OVRUN; LOG_DBG("RX buffer overrun error"); } /* Check for TX buffer underrun */ if (isr_status & SSC_SR_TXEMPTY) { dev_data->tx.state = I2S_STATE_ERROR; /* Disable interrupt */ ssc->SSC_IDR = SSC_IDR_TXEMPTY; LOG_DBG("TX buffer underrun error"); } } static int i2s_sam_initialize(const struct device *dev) { const struct i2s_sam_dev_cfg *const dev_cfg = dev->config; struct i2s_sam_dev_data *const dev_data = dev->data; Ssc *const ssc = dev_cfg->regs; int ret; /* Configure interrupts */ dev_cfg->irq_config(); /* Initialize semaphores */ k_sem_init(&dev_data->rx.sem, 0, CONFIG_I2S_SAM_SSC_RX_BLOCK_COUNT); k_sem_init(&dev_data->tx.sem, CONFIG_I2S_SAM_SSC_TX_BLOCK_COUNT, CONFIG_I2S_SAM_SSC_TX_BLOCK_COUNT); if (!device_is_ready(dev_cfg->dev_dma)) { LOG_ERR("%s device not ready", dev_cfg->dev_dma->name); return -ENODEV; } /* Connect pins to the peripheral */ ret = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /* Enable SSC clock in PMC */ (void)clock_control_on(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&dev_cfg->clock_cfg); /* Reset the module, disable receiver & transmitter */ ssc->SSC_CR = SSC_CR_RXDIS | SSC_CR_TXDIS | SSC_CR_SWRST; /* Enable module's IRQ */ irq_enable(dev_cfg->irq_id); LOG_INF("Device %s initialized", dev->name); return 0; } static const struct i2s_driver_api i2s_sam_driver_api = { .configure = i2s_sam_configure, .config_get = i2s_sam_config_get, .read = i2s_sam_read, .write = i2s_sam_write, .trigger = i2s_sam_trigger, }; /* I2S0 */ static const struct device *get_dev_from_dma_channel(uint32_t dma_channel) { return &DEVICE_DT_NAME_GET(DT_DRV_INST(0)); } static void i2s0_sam_irq_config(void) { IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), i2s_sam_isr, DEVICE_DT_INST_GET(0), 0); } PINCTRL_DT_INST_DEFINE(0); static const struct i2s_sam_dev_cfg i2s0_sam_config = { .dev_dma = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(0, tx)), .regs = (Ssc *)DT_INST_REG_ADDR(0), .irq_config = i2s0_sam_irq_config, .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(0), .irq_id = DT_INST_IRQN(0), .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), }; struct queue_item rx_0_ring_buf[CONFIG_I2S_SAM_SSC_RX_BLOCK_COUNT + 1]; struct queue_item tx_0_ring_buf[CONFIG_I2S_SAM_SSC_TX_BLOCK_COUNT + 1]; static struct i2s_sam_dev_data i2s0_sam_data = { .rx = { .dma_channel = DT_INST_DMAS_CELL_BY_NAME(0, rx, channel), .dma_perid = DT_INST_DMAS_CELL_BY_NAME(0, rx, perid), .mem_block_queue.buf = rx_0_ring_buf, .mem_block_queue.len = ARRAY_SIZE(rx_0_ring_buf), .stream_start = rx_stream_start, .stream_disable = rx_stream_disable, .queue_drop = rx_queue_drop, .set_data_format = set_rx_data_format, }, .tx = { .dma_channel = DT_INST_DMAS_CELL_BY_NAME(0, tx, channel), .dma_perid = DT_INST_DMAS_CELL_BY_NAME(0, tx, perid), .mem_block_queue.buf = tx_0_ring_buf, .mem_block_queue.len = ARRAY_SIZE(tx_0_ring_buf), .stream_start = tx_stream_start, .stream_disable = tx_stream_disable, .queue_drop = tx_queue_drop, .set_data_format = set_tx_data_format, }, }; DEVICE_DT_INST_DEFINE(0, &i2s_sam_initialize, NULL, &i2s0_sam_data, &i2s0_sam_config, POST_KERNEL, CONFIG_I2S_INIT_PRIORITY, &i2s_sam_driver_api); ```
/content/code_sandbox/drivers/i2s/i2s_sam_ssc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,161
```objective-c /* * All rights reserved. * */ #ifndef ZEPHYR_DRIVERS_I2S_MCUX_H_ #define ZEPHYR_DRIVERS_I2S_MCUX_H_ #ifdef __cplusplus extern "C" { #endif #include <zephyr/device.h> #include <fsl_sai.h> #include <fsl_edma.h> #define SAI_WORD_SIZE_BITS_MIN 8 #define SAI_WORD_SIZE_BITS_MAX 32 #define SAI_WORD_PER_FRAME_MIN 0 #define SAI_WORD_PER_FRAME_MAX 32 #ifdef __cplusplus } #endif #endif /* ZEPHYR_DRIVERS_I2S_MCUX_H_ */ ```
/content/code_sandbox/drivers/i2s/i2s_mcux_sai.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
135
```c /* * All rights reserved. * */ /** @file * @brief I2S bus (SAI) driver for NXP i.MX RT series. */ #include <errno.h> #include <string.h> #include <zephyr/sys/__assert.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/i2s.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/dt-bindings/clock/imx_ccm.h> #include <zephyr/sys/barrier.h> #include <soc.h> #include "i2s_mcux_sai.h" #define LOG_DOMAIN dev_i2s_mcux #define LOG_LEVEL CONFIG_I2S_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(LOG_DOMAIN); #define DT_DRV_COMPAT nxp_mcux_i2s #define NUM_DMA_BLOCKS_RX_PREP 3 #define MAX_TX_DMA_BLOCKS CONFIG_DMA_TCD_QUEUE_SIZE #if (NUM_DMA_BLOCKS_RX_PREP >= CONFIG_DMA_TCD_QUEUE_SIZE) #error NUM_DMA_BLOCKS_RX_PREP must be < CONFIG_DMA_TCD_QUEUE_SIZE #endif #if defined(CONFIG_DMA_MCUX_EDMA) && (NUM_DMA_BLOCKS_RX_PREP < 3) #error eDMA avoids TCD coherency issue if NUM_DMA_BLOCKS_RX_PREP >= 3 #endif /* * SAI driver uses source_gather_en/dest_scatter_en feature of DMA, and relies * on DMA driver managing circular list of DMA blocks. Like eDMA driver links * Transfer Control Descriptors (TCDs) in list, and manages the tcdpool. * Calling dma_reload() adds new DMA block to DMA channel already configured, * into the DMA driver's circular list of blocks. * This indicates the Tx/Rx stream. * * in_queue and out_queue are used as follows * transmit stream: * application provided buffer is queued to in_queue until loaded to DMA. * when DMA channel is idle, buffer is retrieved from in_queue and loaded * to DMA and queued to out_queue. when DMA completes, buffer is retrieved * from out_queue and freed. * * receive stream: * driver allocates buffer from slab and loads DMA buffer is queued to * in_queue when DMA completes, buffer is retrieved from in_queue * and queued to out_queue when application reads, buffer is read * (may optionally block) from out_queue and presented to application. */ struct stream { int32_t state; uint32_t dma_channel; uint32_t start_channel; void (*irq_call_back)(void); struct i2s_config cfg; struct dma_config dma_cfg; struct dma_block_config dma_block; uint8_t free_tx_dma_blocks; bool last_block; struct k_msgq in_queue; struct k_msgq out_queue; }; struct i2s_mcux_config { I2S_Type *base; uint32_t clk_src; uint32_t clk_pre_div; uint32_t clk_src_div; uint32_t pll_src; uint32_t pll_lp; uint32_t pll_pd; uint32_t pll_num; uint32_t pll_den; uint32_t mclk_pin_mask; uint32_t mclk_pin_offset; uint32_t tx_channel; clock_control_subsys_t clk_sub_sys; const struct device *ccm_dev; const struct pinctrl_dev_config *pinctrl; void (*irq_connect)(const struct device *dev); bool rx_sync_mode; bool tx_sync_mode; }; /* Device run time data */ struct i2s_dev_data { const struct device *dev_dma; struct stream tx; void *tx_in_msgs[CONFIG_I2S_TX_BLOCK_COUNT]; void *tx_out_msgs[CONFIG_I2S_TX_BLOCK_COUNT]; struct stream rx; void *rx_in_msgs[CONFIG_I2S_RX_BLOCK_COUNT]; void *rx_out_msgs[CONFIG_I2S_RX_BLOCK_COUNT]; }; static void i2s_dma_tx_callback(const struct device *, void *, uint32_t, int); static void i2s_tx_stream_disable(const struct device *, bool drop); static void i2s_rx_stream_disable(const struct device *, bool in_drop, bool out_drop); static inline void i2s_purge_stream_buffers(struct stream *strm, struct k_mem_slab *mem_slab, bool in_drop, bool out_drop) { void *buffer; if (in_drop) { while (k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT) == 0) { k_mem_slab_free(mem_slab, buffer); } } if (out_drop) { while (k_msgq_get(&strm->out_queue, &buffer, K_NO_WAIT) == 0) { k_mem_slab_free(mem_slab, buffer); } } } static void i2s_tx_stream_disable(const struct device *dev, bool drop) { struct i2s_dev_data *dev_data = dev->data; struct stream *strm = &dev_data->tx; const struct device *dev_dma = dev_data->dev_dma; const struct i2s_mcux_config *dev_cfg = dev->config; LOG_DBG("Stopping DMA channel %u for TX stream", strm->dma_channel); /* Disable FIFO DMA request */ SAI_TxEnableDMA(dev_cfg->base, kSAI_FIFORequestDMAEnable, false); dma_stop(dev_dma, strm->dma_channel); /* wait for TX FIFO to drain before disabling */ while ((dev_cfg->base->TCSR & I2S_TCSR_FWF_MASK) == 0) { ; } /* Disable the channel FIFO */ dev_cfg->base->TCR3 &= ~I2S_TCR3_TCE_MASK; /* Disable Tx */ SAI_TxEnable(dev_cfg->base, false); /* If Tx is disabled, reset the FIFO pointer, clear error flags */ if ((dev_cfg->base->TCSR & I2S_TCSR_TE_MASK) == 0UL) { dev_cfg->base->TCSR |= (I2S_TCSR_FR_MASK | I2S_TCSR_SR_MASK); dev_cfg->base->TCSR &= ~I2S_TCSR_SR_MASK; } /* purge buffers queued in the stream */ if (drop) { i2s_purge_stream_buffers(strm, dev_data->tx.cfg.mem_slab, true, true); } } static void i2s_rx_stream_disable(const struct device *dev, bool in_drop, bool out_drop) { struct i2s_dev_data *dev_data = dev->data; struct stream *strm = &dev_data->rx; const struct device *dev_dma = dev_data->dev_dma; const struct i2s_mcux_config *dev_cfg = dev->config; LOG_DBG("Stopping RX stream & DMA channel %u", strm->dma_channel); dma_stop(dev_dma, strm->dma_channel); /* Disable the channel FIFO */ dev_cfg->base->RCR3 &= ~I2S_RCR3_RCE_MASK; /* Disable DMA enable bit */ SAI_RxEnableDMA(dev_cfg->base, kSAI_FIFORequestDMAEnable, false); /* Disable Rx */ SAI_RxEnable(dev_cfg->base, false); /* wait for Receiver to disable */ while (dev_cfg->base->RCSR & I2S_RCSR_RE_MASK) { ; } /* reset the FIFO pointer and clear error flags */ dev_cfg->base->RCSR |= (I2S_RCSR_FR_MASK | I2S_RCSR_SR_MASK); dev_cfg->base->RCSR &= ~I2S_RCSR_SR_MASK; /* purge buffers queued in the stream */ if (in_drop || out_drop) { i2s_purge_stream_buffers(strm, dev_data->rx.cfg.mem_slab, in_drop, out_drop); } } static int i2s_tx_reload_multiple_dma_blocks(const struct device *dev, uint8_t *blocks_queued) { struct i2s_dev_data *dev_data = dev->data; const struct i2s_mcux_config *dev_cfg = dev->config; I2S_Type *base = (I2S_Type *)dev_cfg->base; struct stream *strm = &dev_data->tx; void *buffer = NULL; int ret = 0; unsigned int key; *blocks_queued = 0; key = irq_lock(); /* queue additional blocks to DMA if in_queue and DMA has free blocks */ while (strm->free_tx_dma_blocks) { /* get the next buffer from queue */ ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT); if (ret) { /* in_queue is empty, no more blocks to send to DMA */ ret = 0; break; } /* reload the DMA */ ret = dma_reload(dev_data->dev_dma, strm->dma_channel, (uint32_t)buffer, (uint32_t)&base->TDR[strm->start_channel], strm->cfg.block_size); if (ret != 0) { LOG_ERR("dma_reload() failed with error 0x%x", ret); break; } (strm->free_tx_dma_blocks)--; ret = k_msgq_put(&strm->out_queue, &buffer, K_NO_WAIT); if (ret != 0) { LOG_ERR("buffer %p -> out %p err %d", buffer, &strm->out_queue, ret); break; } (*blocks_queued)++; } irq_unlock(key); return ret; } /* This function is executed in the interrupt context */ static void i2s_dma_tx_callback(const struct device *dma_dev, void *arg, uint32_t channel, int status) { const struct device *dev = (struct device *)arg; struct i2s_dev_data *dev_data = dev->data; struct stream *strm = &dev_data->tx; void *buffer = NULL; int ret; uint8_t blocks_queued; LOG_DBG("tx cb"); ret = k_msgq_get(&strm->out_queue, &buffer, K_NO_WAIT); if (ret == 0) { /* transmission complete. free the buffer */ k_mem_slab_free(strm->cfg.mem_slab, buffer); (strm->free_tx_dma_blocks)++; } else { LOG_ERR("no buf in out_queue for channel %u", channel); } if (strm->free_tx_dma_blocks > MAX_TX_DMA_BLOCKS) { strm->state = I2S_STATE_ERROR; LOG_ERR("free_tx_dma_blocks exceeded maximum, now %d", strm->free_tx_dma_blocks); goto disabled_exit_no_drop; } /* Received a STOP trigger, terminate TX immediately */ if (strm->last_block) { strm->state = I2S_STATE_READY; LOG_DBG("TX STOPPED last_block set"); goto disabled_exit_no_drop; } if (ret) { /* k_msgq_get() returned error, and was not last_block */ strm->state = I2S_STATE_ERROR; goto disabled_exit_no_drop; } switch (strm->state) { case I2S_STATE_RUNNING: case I2S_STATE_STOPPING: ret = i2s_tx_reload_multiple_dma_blocks(dev, &blocks_queued); if (ret) { strm->state = I2S_STATE_ERROR; goto disabled_exit_no_drop; } dma_start(dev_data->dev_dma, strm->dma_channel); if (blocks_queued || (strm->free_tx_dma_blocks < MAX_TX_DMA_BLOCKS)) { goto enabled_exit; } else { /* all DMA blocks are free but no blocks were queued */ if (strm->state == I2S_STATE_STOPPING) { /* TX queue has drained */ strm->state = I2S_STATE_READY; LOG_DBG("TX stream has stopped"); } else { strm->state = I2S_STATE_ERROR; LOG_ERR("TX Failed to reload DMA"); } goto disabled_exit_no_drop; } case I2S_STATE_ERROR: default: goto disabled_exit_drop; } disabled_exit_no_drop: i2s_tx_stream_disable(dev, false); return; disabled_exit_drop: i2s_tx_stream_disable(dev, true); return; enabled_exit: return; } static void i2s_dma_rx_callback(const struct device *dma_dev, void *arg, uint32_t channel, int status) { struct device *dev = (struct device *)arg; const struct i2s_mcux_config *dev_cfg = dev->config; I2S_Type *base = (I2S_Type *)dev_cfg->base; struct i2s_dev_data *dev_data = dev->data; struct stream *strm = &dev_data->rx; void *buffer; int ret; LOG_DBG("RX cb"); switch (strm->state) { case I2S_STATE_STOPPING: case I2S_STATE_RUNNING: /* retrieve buffer from input queue */ ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT); __ASSERT_NO_MSG(ret == 0); /* put buffer to output queue */ ret = k_msgq_put(&strm->out_queue, &buffer, K_NO_WAIT); if (ret != 0) { LOG_ERR("buffer %p -> out_queue %p err %d", buffer, &strm->out_queue, ret); i2s_rx_stream_disable(dev, false, false); strm->state = I2S_STATE_ERROR; return; } if (strm->state == I2S_STATE_RUNNING) { /* allocate new buffer for next audio frame */ ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer, K_NO_WAIT); if (ret != 0) { LOG_ERR("buffer alloc from slab %p err %d", strm->cfg.mem_slab, ret); i2s_rx_stream_disable(dev, false, false); strm->state = I2S_STATE_ERROR; } else { uint32_t data_path = strm->start_channel; ret = dma_reload(dev_data->dev_dma, strm->dma_channel, (uint32_t)&base->RDR[data_path], (uint32_t)buffer, strm->cfg.block_size); if (ret != 0) { LOG_ERR("dma_reload() failed with error 0x%x", ret); i2s_rx_stream_disable(dev, false, false); strm->state = I2S_STATE_ERROR; return; } /* put buffer in input queue */ ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT); if (ret != 0) { LOG_ERR("%p -> in_queue %p err %d", buffer, &strm->in_queue, ret); } dma_start(dev_data->dev_dma, strm->dma_channel); } } else { i2s_rx_stream_disable(dev, true, false); /* Received a STOP/DRAIN trigger */ strm->state = I2S_STATE_READY; } break; case I2S_STATE_ERROR: i2s_rx_stream_disable(dev, true, true); break; } } static void enable_mclk_direction(const struct device *dev, bool dir) { const struct i2s_mcux_config *dev_cfg = dev->config; uint32_t offset = dev_cfg->mclk_pin_offset; uint32_t mask = dev_cfg->mclk_pin_mask; uint32_t *gpr = (uint32_t *) (DT_REG_ADDR(DT_NODELABEL(iomuxcgpr)) + offset); if (dir) { *gpr |= mask; } else { *gpr &= ~mask; } } static void get_mclk_rate(const struct device *dev, uint32_t *mclk) { const struct i2s_mcux_config *dev_cfg = dev->config; const struct device *ccm_dev = dev_cfg->ccm_dev; clock_control_subsys_t clk_sub_sys = dev_cfg->clk_sub_sys; uint32_t rate = 0; if (device_is_ready(ccm_dev)) { clock_control_get_rate(ccm_dev, clk_sub_sys, &rate); } else { LOG_ERR("CCM driver is not installed"); *mclk = rate; return; } *mclk = rate; } static int i2s_mcux_config(const struct device *dev, enum i2s_dir dir, const struct i2s_config *i2s_cfg) { const struct i2s_mcux_config *dev_cfg = dev->config; I2S_Type *base = (I2S_Type *)dev_cfg->base; struct i2s_dev_data *dev_data = dev->data; sai_transceiver_t config; uint32_t mclk; /*num_words is frame size*/ uint8_t num_words = i2s_cfg->channels; uint8_t word_size_bits = i2s_cfg->word_size; if ((dev_data->tx.state != I2S_STATE_NOT_READY) && (dev_data->tx.state != I2S_STATE_READY) && (dev_data->rx.state != I2S_STATE_NOT_READY) && (dev_data->rx.state != I2S_STATE_READY)) { LOG_ERR("invalid state tx(%u) rx(%u)", dev_data->tx.state, dev_data->rx.state); if (dir == I2S_DIR_TX) { dev_data->tx.state = I2S_STATE_NOT_READY; } else { dev_data->rx.state = I2S_STATE_NOT_READY; } return -EINVAL; } if (i2s_cfg->frame_clk_freq == 0U) { LOG_ERR("Invalid frame_clk_freq %u", i2s_cfg->frame_clk_freq); if (dir == I2S_DIR_TX) { dev_data->tx.state = I2S_STATE_NOT_READY; } else { dev_data->rx.state = I2S_STATE_NOT_READY; } return 0; } if (word_size_bits < SAI_WORD_SIZE_BITS_MIN || word_size_bits > SAI_WORD_SIZE_BITS_MAX) { LOG_ERR("Unsupported I2S word size %u", word_size_bits); if (dir == I2S_DIR_TX) { dev_data->tx.state = I2S_STATE_NOT_READY; } else { dev_data->rx.state = I2S_STATE_NOT_READY; } return -EINVAL; } if (num_words < SAI_WORD_PER_FRAME_MIN || num_words > SAI_WORD_PER_FRAME_MAX) { LOG_ERR("Unsupported words length %u", num_words); if (dir == I2S_DIR_TX) { dev_data->tx.state = I2S_STATE_NOT_READY; } else { dev_data->rx.state = I2S_STATE_NOT_READY; } return -EINVAL; } if ((i2s_cfg->options & I2S_OPT_PINGPONG) == I2S_OPT_PINGPONG) { LOG_ERR("Ping-pong mode not supported"); if (dir == I2S_DIR_TX) { dev_data->tx.state = I2S_STATE_NOT_READY; } else { dev_data->rx.state = I2S_STATE_NOT_READY; } return -ENOTSUP; } memset(&config, 0, sizeof(config)); const bool is_mclk_slave = i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE; enable_mclk_direction(dev, !is_mclk_slave); get_mclk_rate(dev, &mclk); LOG_DBG("mclk is %d", mclk); /* bit clock source is MCLK */ config.bitClock.bclkSource = kSAI_BclkSourceMclkDiv; /* * additional settings for bclk * read the SDK header file for more details */ config.bitClock.bclkInputDelay = false; /* frame sync default configurations */ #if defined(FSL_FEATURE_SAI_HAS_ON_DEMAND_MODE) && \ FSL_FEATURE_SAI_HAS_ON_DEMAND_MODE config.frameSync.frameSyncGenerateOnDemand = false; #endif /* serial data default configurations */ #if defined(FSL_FEATURE_SAI_HAS_CHANNEL_MODE) && \ FSL_FEATURE_SAI_HAS_CHANNEL_MODE config.serialData.dataMode = kSAI_DataPinStateOutputZero; #endif config.frameSync.frameSyncPolarity = kSAI_PolarityActiveLow; config.bitClock.bclkSrcSwap = false; /* format */ switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) { case I2S_FMT_DATA_FORMAT_I2S: SAI_GetClassicI2SConfig(&config, word_size_bits, kSAI_Stereo, dev_cfg->tx_channel); break; case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED: SAI_GetLeftJustifiedConfig(&config, word_size_bits, kSAI_Stereo, dev_cfg->tx_channel); break; case I2S_FMT_DATA_FORMAT_PCM_SHORT: SAI_GetDSPConfig(&config, kSAI_FrameSyncLenOneBitClk, word_size_bits, kSAI_Stereo, dev_cfg->tx_channel); /* We need to set the data word count manually, since the HAL * function does not */ config.serialData.dataWordNum = num_words; config.frameSync.frameSyncEarly = true; config.bitClock.bclkPolarity = kSAI_SampleOnFallingEdge; break; case I2S_FMT_DATA_FORMAT_PCM_LONG: SAI_GetTDMConfig(&config, kSAI_FrameSyncLenPerWordWidth, word_size_bits, num_words, dev_cfg->tx_channel); config.bitClock.bclkPolarity = kSAI_SampleOnFallingEdge; break; default: LOG_ERR("Unsupported I2S data format"); if (dir == I2S_DIR_TX) { dev_data->tx.state = I2S_STATE_NOT_READY; } else { dev_data->rx.state = I2S_STATE_NOT_READY; } return -EINVAL; } /* sync mode configurations */ if (dir == I2S_DIR_TX) { /* TX */ if (dev_cfg->tx_sync_mode) { config.syncMode = kSAI_ModeSync; } else { config.syncMode = kSAI_ModeAsync; } } else { /* RX */ if (dev_cfg->rx_sync_mode) { config.syncMode = kSAI_ModeSync; } else { config.syncMode = kSAI_ModeAsync; } } if (i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE) { if (i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) { config.masterSlave = kSAI_Slave; } else { config.masterSlave = kSAI_Bclk_Master_FrameSync_Slave; } } else { if (i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) { config.masterSlave = kSAI_Bclk_Slave_FrameSync_Master; } else { config.masterSlave = kSAI_Master; } } /* clock signal polarity */ switch (i2s_cfg->format & I2S_FMT_CLK_FORMAT_MASK) { case I2S_FMT_CLK_NF_NB: /* No action required, leave the configuration untouched */ break; case I2S_FMT_CLK_NF_IB: /* Swap bclk polarity */ config.bitClock.bclkPolarity = (config.bitClock.bclkPolarity == kSAI_SampleOnFallingEdge) ? kSAI_SampleOnRisingEdge : kSAI_SampleOnFallingEdge; break; case I2S_FMT_CLK_IF_NB: /* Swap frame sync polarity */ config.frameSync.frameSyncPolarity = (config.frameSync.frameSyncPolarity == kSAI_PolarityActiveHigh) ? kSAI_PolarityActiveLow : kSAI_PolarityActiveHigh; break; case I2S_FMT_CLK_IF_IB: /* Swap frame sync and bclk polarity */ config.frameSync.frameSyncPolarity = (config.frameSync.frameSyncPolarity == kSAI_PolarityActiveHigh) ? kSAI_PolarityActiveLow : kSAI_PolarityActiveHigh; config.bitClock.bclkPolarity = (config.bitClock.bclkPolarity == kSAI_SampleOnFallingEdge) ? kSAI_SampleOnRisingEdge : kSAI_SampleOnFallingEdge; break; } /* PCM short format always requires that WS be one BCLK cycle */ if ((i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) != I2S_FMT_DATA_FORMAT_PCM_SHORT) { config.frameSync.frameSyncWidth = (uint8_t)word_size_bits; } if (dir == I2S_DIR_TX) { memcpy(&dev_data->tx.cfg, i2s_cfg, sizeof(struct i2s_config)); LOG_DBG("tx slab free_list = 0x%x", (uint32_t)i2s_cfg->mem_slab->free_list); LOG_DBG("tx slab num_blocks = %d", (uint32_t)i2s_cfg->mem_slab->info.num_blocks); LOG_DBG("tx slab block_size = %d", (uint32_t)i2s_cfg->mem_slab->info.block_size); LOG_DBG("tx slab buffer = 0x%x", (uint32_t)i2s_cfg->mem_slab->buffer); /* set bit clock divider */ SAI_TxSetConfig(base, &config); dev_data->tx.start_channel = config.startChannel; /* Disable the channel FIFO */ base->TCR3 &= ~I2S_TCR3_TCE_MASK; SAI_TxSetBitClockRate(base, mclk, i2s_cfg->frame_clk_freq, word_size_bits, i2s_cfg->channels); LOG_DBG("tx start_channel = %d", dev_data->tx.start_channel); /*set up dma settings*/ dev_data->tx.dma_cfg.source_data_size = word_size_bits / 8; dev_data->tx.dma_cfg.dest_data_size = word_size_bits / 8; dev_data->tx.dma_cfg.source_burst_length = i2s_cfg->word_size / 8; dev_data->tx.dma_cfg.dest_burst_length = i2s_cfg->word_size / 8; dev_data->tx.dma_cfg.user_data = (void *)dev; dev_data->tx.state = I2S_STATE_READY; } else { /* For RX, DMA reads from FIFO whenever data present */ config.fifo.fifoWatermark = 0; memcpy(&dev_data->rx.cfg, i2s_cfg, sizeof(struct i2s_config)); LOG_DBG("rx slab free_list = 0x%x", (uint32_t)i2s_cfg->mem_slab->free_list); LOG_DBG("rx slab num_blocks = %d", (uint32_t)i2s_cfg->mem_slab->info.num_blocks); LOG_DBG("rx slab block_size = %d", (uint32_t)i2s_cfg->mem_slab->info.block_size); LOG_DBG("rx slab buffer = 0x%x", (uint32_t)i2s_cfg->mem_slab->buffer); /* set bit clock divider */ SAI_RxSetConfig(base, &config); dev_data->rx.start_channel = config.startChannel; SAI_RxSetBitClockRate(base, mclk, i2s_cfg->frame_clk_freq, word_size_bits, i2s_cfg->channels); LOG_DBG("rx start_channel = %d", dev_data->rx.start_channel); /*set up dma settings*/ dev_data->rx.dma_cfg.source_data_size = word_size_bits / 8; dev_data->rx.dma_cfg.dest_data_size = word_size_bits / 8; dev_data->rx.dma_cfg.source_burst_length = i2s_cfg->word_size / 8; dev_data->rx.dma_cfg.dest_burst_length = i2s_cfg->word_size / 8; dev_data->rx.dma_cfg.user_data = (void *)dev; dev_data->rx.state = I2S_STATE_READY; } return 0; } const struct i2s_config *i2s_mcux_config_get(const struct device *dev, enum i2s_dir dir) { struct i2s_dev_data *dev_data = dev->data; if (dir == I2S_DIR_RX) { return &dev_data->rx.cfg; } return &dev_data->tx.cfg; } static int i2s_tx_stream_start(const struct device *dev) { int ret = 0; void *buffer; struct i2s_dev_data *dev_data = dev->data; struct stream *strm = &dev_data->tx; const struct device *dev_dma = dev_data->dev_dma; const struct i2s_mcux_config *dev_cfg = dev->config; I2S_Type *base = (I2S_Type *)dev_cfg->base; /* retrieve buffer from input queue */ ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT); if (ret != 0) { LOG_ERR("No buffer in input queue to start"); return -EIO; } LOG_DBG("tx stream start"); /* Driver keeps track of how many DMA blocks can be loaded to the DMA */ strm->free_tx_dma_blocks = MAX_TX_DMA_BLOCKS; /* Configure the DMA with the first TX block */ struct dma_block_config *blk_cfg = &strm->dma_block; memset(blk_cfg, 0, sizeof(struct dma_block_config)); uint32_t data_path = strm->start_channel; blk_cfg->dest_address = (uint32_t)&base->TDR[data_path]; blk_cfg->source_address = (uint32_t)buffer; blk_cfg->block_size = strm->cfg.block_size; blk_cfg->dest_scatter_en = 1; strm->dma_cfg.block_count = 1; strm->dma_cfg.head_block = &strm->dma_block; strm->dma_cfg.user_data = (void *)dev; (strm->free_tx_dma_blocks)--; dma_config(dev_dma, strm->dma_channel, &strm->dma_cfg); /* put buffer in output queue */ ret = k_msgq_put(&strm->out_queue, &buffer, K_NO_WAIT); if (ret != 0) { LOG_ERR("failed to put buffer in output queue"); return ret; } uint8_t blocks_queued; ret = i2s_tx_reload_multiple_dma_blocks(dev, &blocks_queued); if (ret) { LOG_ERR("i2s_tx_reload_multiple_dma_blocks() failed (%d)", ret); return ret; } ret = dma_start(dev_dma, strm->dma_channel); if (ret < 0) { LOG_ERR("dma_start failed (%d)", ret); return ret; } /* Enable DMA enable bit */ SAI_TxEnableDMA(base, kSAI_FIFORequestDMAEnable, true); /* Enable the channel FIFO */ base->TCR3 |= I2S_TCR3_TCE(1UL << strm->start_channel); /* Enable SAI Tx clock */ SAI_TxEnable(base, true); return 0; } static int i2s_rx_stream_start(const struct device *dev) { int ret = 0; void *buffer; struct i2s_dev_data *dev_data = dev->data; struct stream *strm = &dev_data->rx; const struct device *dev_dma = dev_data->dev_dma; const struct i2s_mcux_config *dev_cfg = dev->config; I2S_Type *base = (I2S_Type *)dev_cfg->base; uint8_t num_of_bufs; num_of_bufs = k_mem_slab_num_free_get(strm->cfg.mem_slab); /* * Need at least NUM_DMA_BLOCKS_RX_PREP buffers on the RX memory slab * for reliable DMA reception. */ if (num_of_bufs < NUM_DMA_BLOCKS_RX_PREP) { return -EINVAL; } /* allocate 1st receive buffer from SLAB */ ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer, K_NO_WAIT); if (ret != 0) { LOG_DBG("buffer alloc from mem_slab failed (%d)", ret); return ret; } /* Configure DMA block */ struct dma_block_config *blk_cfg = &strm->dma_block; memset(blk_cfg, 0, sizeof(struct dma_block_config)); uint32_t data_path = strm->start_channel; blk_cfg->dest_address = (uint32_t)buffer; blk_cfg->source_address = (uint32_t)&base->RDR[data_path]; blk_cfg->block_size = strm->cfg.block_size; blk_cfg->source_gather_en = 1; strm->dma_cfg.block_count = 1; strm->dma_cfg.head_block = &strm->dma_block; strm->dma_cfg.user_data = (void *)dev; dma_config(dev_dma, strm->dma_channel, &strm->dma_cfg); /* put buffer in input queue */ ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT); if (ret != 0) { LOG_ERR("failed to put buffer in input queue, ret1 %d", ret); return ret; } /* prep DMA for each of remaining (NUM_DMA_BLOCKS_RX_PREP-1) buffers */ for (int i = 0; i < NUM_DMA_BLOCKS_RX_PREP - 1; i++) { /* allocate receive buffer from SLAB */ ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer, K_NO_WAIT); if (ret != 0) { LOG_ERR("buffer alloc from mem_slab failed (%d)", ret); return ret; } ret = dma_reload(dev_dma, strm->dma_channel, (uint32_t)&base->RDR[data_path], (uint32_t)buffer, blk_cfg->block_size); if (ret != 0) { LOG_ERR("dma_reload() failed with error 0x%x", ret); return ret; } /* put buffer in input queue */ ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT); if (ret != 0) { LOG_ERR("failed to put buffer in input queue, ret2 %d", ret); return ret; } } LOG_DBG("Starting DMA Ch%u", strm->dma_channel); ret = dma_start(dev_dma, strm->dma_channel); if (ret < 0) { LOG_ERR("Failed to start DMA Ch%d (%d)", strm->dma_channel, ret); return ret; } /* Enable DMA enable bit */ SAI_RxEnableDMA(base, kSAI_FIFORequestDMAEnable, true); /* Enable the channel FIFO */ base->RCR3 |= I2S_RCR3_RCE(1UL << strm->start_channel); /* Enable SAI Rx clock */ SAI_RxEnable(base, true); return 0; } static int i2s_mcux_trigger(const struct device *dev, enum i2s_dir dir, enum i2s_trigger_cmd cmd) { struct i2s_dev_data *dev_data = dev->data; struct stream *strm; unsigned int key; int ret = 0; if (dir == I2S_DIR_BOTH) { return -ENOSYS; } strm = (dir == I2S_DIR_TX) ? &dev_data->tx : &dev_data->rx; key = irq_lock(); switch (cmd) { case I2S_TRIGGER_START: if (strm->state != I2S_STATE_READY) { LOG_ERR("START trigger: invalid state %u", strm->state); ret = -EIO; break; } if (dir == I2S_DIR_TX) { ret = i2s_tx_stream_start(dev); } else { ret = i2s_rx_stream_start(dev); } if (ret < 0) { LOG_DBG("START trigger failed %d", ret); ret = -EIO; break; } strm->state = I2S_STATE_RUNNING; strm->last_block = false; break; case I2S_TRIGGER_DROP: if (strm->state == I2S_STATE_NOT_READY) { LOG_ERR("DROP trigger: invalid state %d", strm->state); ret = -EIO; break; } strm->state = I2S_STATE_READY; if (dir == I2S_DIR_TX) { i2s_tx_stream_disable(dev, true); } else { i2s_rx_stream_disable(dev, true, true); } break; case I2S_TRIGGER_STOP: if (strm->state != I2S_STATE_RUNNING) { LOG_ERR("STOP trigger: invalid state %d", strm->state); ret = -EIO; break; } strm->state = I2S_STATE_STOPPING; strm->last_block = true; break; case I2S_TRIGGER_DRAIN: if (strm->state != I2S_STATE_RUNNING) { LOG_ERR("DRAIN/STOP trigger: invalid state %d", strm->state); ret = -EIO; break; } strm->state = I2S_STATE_STOPPING; break; case I2S_TRIGGER_PREPARE: if (strm->state != I2S_STATE_ERROR) { LOG_ERR("PREPARE trigger: invalid state %d", strm->state); ret = -EIO; break; } strm->state = I2S_STATE_READY; if (dir == I2S_DIR_TX) { i2s_tx_stream_disable(dev, true); } else { i2s_rx_stream_disable(dev, true, true); } break; default: LOG_ERR("Unsupported trigger command"); ret = -EINVAL; } irq_unlock(key); return ret; } static int i2s_mcux_read(const struct device *dev, void **mem_block, size_t *size) { struct i2s_dev_data *dev_data = dev->data; struct stream *strm = &dev_data->rx; void *buffer; int status, ret = 0; LOG_DBG("i2s_mcux_read"); if (strm->state == I2S_STATE_NOT_READY) { LOG_ERR("invalid state %d", strm->state); return -EIO; } status = k_msgq_get(&strm->out_queue, &buffer, SYS_TIMEOUT_MS(strm->cfg.timeout)); if (status != 0) { if (strm->state == I2S_STATE_ERROR) { ret = -EIO; } else { LOG_DBG("need retry"); ret = -EAGAIN; } return ret; } *mem_block = buffer; *size = strm->cfg.block_size; return 0; } static int i2s_mcux_write(const struct device *dev, void *mem_block, size_t size) { struct i2s_dev_data *dev_data = dev->data; struct stream *strm = &dev_data->tx; int ret; LOG_DBG("i2s_mcux_write"); if (strm->state != I2S_STATE_RUNNING && strm->state != I2S_STATE_READY) { LOG_ERR("invalid state (%d)", strm->state); return -EIO; } ret = k_msgq_put(&strm->in_queue, &mem_block, SYS_TIMEOUT_MS(strm->cfg.timeout)); if (ret) { LOG_DBG("k_msgq_put returned code %d", ret); return ret; } return ret; } static void sai_driver_irq(const struct device *dev) { const struct i2s_mcux_config *dev_cfg = dev->config; I2S_Type *base = (I2S_Type *)dev_cfg->base; if ((base->TCSR & I2S_TCSR_FEF_MASK) == I2S_TCSR_FEF_MASK) { /* Clear FIFO error flag to continue transfer */ SAI_TxClearStatusFlags(base, I2S_TCSR_FEF_MASK); /* Reset FIFO for safety */ SAI_TxSoftwareReset(base, kSAI_ResetTypeFIFO); LOG_DBG("sai tx error occurred"); } if ((base->RCSR & I2S_RCSR_FEF_MASK) == I2S_RCSR_FEF_MASK) { /* Clear FIFO error flag to continue transfer */ SAI_RxClearStatusFlags(base, I2S_RCSR_FEF_MASK); /* Reset FIFO for safety */ SAI_RxSoftwareReset(base, kSAI_ResetTypeFIFO); LOG_DBG("sai rx error occurred"); } } /* clear IRQ sources atm */ static void i2s_mcux_isr(void *arg) { struct device *dev = (struct device *)arg; const struct i2s_mcux_config *dev_cfg = dev->config; I2S_Type *base = (I2S_Type *)dev_cfg->base; if ((base->RCSR & I2S_TCSR_FEF_MASK) == I2S_TCSR_FEF_MASK) { sai_driver_irq(dev); } if ((base->TCSR & I2S_RCSR_FEF_MASK) == I2S_RCSR_FEF_MASK) { sai_driver_irq(dev); } /* * Add for ARM errata 838869, affects Cortex-M4, * Cortex-M4F Store immediate overlapping exception return operation * might vector to incorrect interrupt */ #if defined __CORTEX_M && (__CORTEX_M == 4U) barrier_dsync_fence_full(); #endif } static void audio_clock_settings(const struct device *dev) { clock_audio_pll_config_t audioPllConfig; const struct i2s_mcux_config *dev_cfg = dev->config; uint32_t clock_name = (uint32_t) dev_cfg->clk_sub_sys; /*Clock setting for SAI*/ imxrt_audio_codec_pll_init(clock_name, dev_cfg->clk_src, dev_cfg->clk_pre_div, dev_cfg->clk_src_div); #ifdef CONFIG_SOC_SERIES_IMXRT11XX audioPllConfig.loopDivider = dev_cfg->pll_lp; audioPllConfig.postDivider = dev_cfg->pll_pd; audioPllConfig.numerator = dev_cfg->pll_num; audioPllConfig.denominator = dev_cfg->pll_den; audioPllConfig.ssEnable = false; #elif defined CONFIG_SOC_SERIES_IMXRT10XX audioPllConfig.src = dev_cfg->pll_src; audioPllConfig.loopDivider = dev_cfg->pll_lp; audioPllConfig.postDivider = dev_cfg->pll_pd; audioPllConfig.numerator = dev_cfg->pll_num; audioPllConfig.denominator = dev_cfg->pll_den; #else #error Initialize SOC Series-specific clock_audio_pll_config_t #endif /* CONFIG_SOC_SERIES */ CLOCK_InitAudioPll(&audioPllConfig); } static int i2s_mcux_initialize(const struct device *dev) { const struct i2s_mcux_config *dev_cfg = dev->config; I2S_Type *base = (I2S_Type *)dev_cfg->base; struct i2s_dev_data *dev_data = dev->data; uint32_t mclk; int err; if (!dev_data->dev_dma) { LOG_ERR("DMA device not found"); return -ENODEV; } /* Initialize the buffer queues */ k_msgq_init(&dev_data->tx.in_queue, (char *)dev_data->tx_in_msgs, sizeof(void *), CONFIG_I2S_TX_BLOCK_COUNT); k_msgq_init(&dev_data->rx.in_queue, (char *)dev_data->rx_in_msgs, sizeof(void *), CONFIG_I2S_RX_BLOCK_COUNT); k_msgq_init(&dev_data->tx.out_queue, (char *)dev_data->tx_out_msgs, sizeof(void *), CONFIG_I2S_TX_BLOCK_COUNT); k_msgq_init(&dev_data->rx.out_queue, (char *)dev_data->rx_out_msgs, sizeof(void *), CONFIG_I2S_RX_BLOCK_COUNT); /* register ISR */ dev_cfg->irq_connect(dev); /* pinctrl */ err = pinctrl_apply_state(dev_cfg->pinctrl, PINCTRL_STATE_DEFAULT); if (err) { LOG_ERR("mclk pinctrl setup failed (%d)", err); return err; } /*clock configuration*/ audio_clock_settings(dev); SAI_Init(base); dev_data->tx.state = I2S_STATE_NOT_READY; dev_data->rx.state = I2S_STATE_NOT_READY; #if (defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)) || \ (defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && \ (FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER)) sai_master_clock_t mclkConfig = { #if defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR) .mclkOutputEnable = true, #if !(defined(FSL_FEATURE_SAI_HAS_NO_MCR_MICS) && \ (FSL_FEATURE_SAI_HAS_NO_MCR_MICS)) .mclkSource = kSAI_MclkSourceSysclk, #endif #endif }; #endif get_mclk_rate(dev, &mclk); /* master clock configurations */ #if (defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)) || \ (defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && \ (FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER)) #if defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && \ (FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) mclkConfig.mclkHz = mclk; mclkConfig.mclkSourceClkHz = mclk; #endif SAI_SetMasterClockConfig(base, &mclkConfig); #endif LOG_INF("Device %s initialized", dev->name); return 0; } static const struct i2s_driver_api i2s_mcux_driver_api = { .configure = i2s_mcux_config, .read = i2s_mcux_read, .write = i2s_mcux_write, .config_get = i2s_mcux_config_get, .trigger = i2s_mcux_trigger, }; #define I2S_MCUX_INIT(i2s_id) \ static void i2s_irq_connect_##i2s_id(const struct device *dev); \ \ PINCTRL_DT_INST_DEFINE(i2s_id); \ \ static const struct i2s_mcux_config i2s_##i2s_id##_config = { \ .base = (I2S_Type *)DT_INST_REG_ADDR(i2s_id), \ .clk_src = DT_INST_PROP(i2s_id, clock_mux), \ .clk_pre_div = DT_INST_PROP(i2s_id, pre_div), \ .clk_src_div = DT_INST_PROP(i2s_id, podf), \ .pll_src = \ DT_PHA_BY_NAME(DT_DRV_INST(i2s_id), \ pll_clocks, src, value), \ .pll_lp = \ DT_PHA_BY_NAME(DT_DRV_INST(i2s_id), \ pll_clocks, lp, value), \ .pll_pd = \ DT_PHA_BY_NAME(DT_DRV_INST(i2s_id), \ pll_clocks, pd, value), \ .pll_num = \ DT_PHA_BY_NAME(DT_DRV_INST(i2s_id), \ pll_clocks, num, value), \ .pll_den = \ DT_PHA_BY_NAME(DT_DRV_INST(i2s_id), \ pll_clocks, den, value), \ .mclk_pin_mask = \ DT_PHA_BY_IDX(DT_DRV_INST(i2s_id), \ pinmuxes, 0, function), \ .mclk_pin_offset = \ DT_PHA_BY_IDX(DT_DRV_INST(i2s_id), \ pinmuxes, 0, pin), \ .clk_sub_sys = (clock_control_subsys_t) \ DT_INST_CLOCKS_CELL_BY_IDX(i2s_id, 0, name), \ .ccm_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(i2s_id)), \ .irq_connect = i2s_irq_connect_##i2s_id, \ .pinctrl = PINCTRL_DT_INST_DEV_CONFIG_GET(i2s_id), \ .tx_sync_mode = \ DT_INST_PROP(i2s_id, nxp_tx_sync_mode), \ .rx_sync_mode = \ DT_INST_PROP(i2s_id, nxp_rx_sync_mode), \ .tx_channel = DT_INST_PROP(i2s_id, nxp_tx_channel), \ }; \ \ static struct i2s_dev_data i2s_##i2s_id##_data = { \ .dev_dma = DEVICE_DT_GET( \ DT_INST_DMAS_CTLR_BY_NAME(i2s_id, rx)), \ .tx = { \ .dma_channel = \ DT_INST_PROP(i2s_id, nxp_tx_dma_channel), \ .dma_cfg = { \ .source_burst_length = \ CONFIG_I2S_EDMA_BURST_SIZE, \ .dest_burst_length = \ CONFIG_I2S_EDMA_BURST_SIZE, \ .dma_callback = i2s_dma_tx_callback, \ .complete_callback_en = 1, \ .error_callback_dis = 1, \ .block_count = 1, \ .head_block = \ &i2s_##i2s_id##_data.tx.dma_block, \ .channel_direction = MEMORY_TO_PERIPHERAL, \ .dma_slot = \ DT_INST_DMAS_CELL_BY_NAME(i2s_id, \ tx, source), \ }, \ }, \ .rx = { \ .dma_channel = \ DT_INST_PROP(i2s_id, nxp_rx_dma_channel), \ .dma_cfg = { \ .source_burst_length = \ CONFIG_I2S_EDMA_BURST_SIZE, \ .dest_burst_length = \ CONFIG_I2S_EDMA_BURST_SIZE, \ .dma_callback = i2s_dma_rx_callback, \ .complete_callback_en = 1, \ .error_callback_dis = 1, \ .block_count = 1, \ .head_block = \ &i2s_##i2s_id##_data.rx.dma_block, \ .channel_direction = PERIPHERAL_TO_MEMORY, \ .dma_slot = \ DT_INST_DMAS_CELL_BY_NAME(i2s_id, \ rx, source), \ }, \ }, \ }; \ \ DEVICE_DT_INST_DEFINE(i2s_id, &i2s_mcux_initialize, NULL, \ &i2s_##i2s_id##_data, &i2s_##i2s_id##_config, \ POST_KERNEL, \ CONFIG_I2S_INIT_PRIORITY, &i2s_mcux_driver_api); \ \ static void i2s_irq_connect_##i2s_id(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(i2s_id, 0, irq), \ DT_INST_IRQ_BY_IDX(i2s_id, 0, priority), \ i2s_mcux_isr, \ DEVICE_DT_INST_GET(i2s_id), 0); \ irq_enable(DT_INST_IRQN(i2s_id)); \ } DT_INST_FOREACH_STATUS_OKAY(I2S_MCUX_INIT) ```
/content/code_sandbox/drivers/i2s/i2s_mcux_sai.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
11,693
```unknown # Atmel SAM I2S bus driver configuration options menuconfig I2S_SAM_SSC bool "Atmel SAM MCU family I2S (SSC) Bus Driver" default y depends on DT_HAS_ATMEL_SAM_SSC_ENABLED select DMA help Enable Inter Sound (I2S) bus driver for Atmel SAM MCU family based on Synchronous Serial Controller (SSC) module. if I2S_SAM_SSC config I2S_SAM_SSC_RX_BLOCK_COUNT int "RX queue length" default 4 config I2S_SAM_SSC_TX_BLOCK_COUNT int "TX queue length" default 4 config I2S_SAM_SSC_0_PIN_RF_EN bool "RF pin enabled" default y help If enabled RF signal is connected to RF pin. It will be configured as an output or an input depending on whether the receiver is working in master or slave mode. If disabled RF signal is disconnected from RF pin and connected internally to TF (Transmitter Frame Synchro signal). config I2S_SAM_SSC_0_PIN_RK_EN bool "RK pin enabled" default y help If enabled RK signal is connected to RK pin. It will be configured as an output or an input depending on whether the receiver is working in master or slave mode. If disabled RK signal is disconnected from RK pin and connected internally to TK (Transmitter Clock signal). endif # I2S_SAM_SSC ```
/content/code_sandbox/drivers/i2s/Kconfig.sam_ssc
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
332
```unknown menuconfig I2S_NRFX bool "nRF I2S nrfx driver" default y depends on DT_HAS_NORDIC_NRF_I2S_ENABLED select NRFX_I2S0 if HAS_HW_NRF_I2S0 select NRFX_I2S20 if HAS_HW_NRF_I2S20 select PINCTRL help Enable support for nrfx I2S driver for nRF MCU series. if I2S_NRFX config I2S_NRFX_RX_BLOCK_COUNT int "RX queue length" default 4 config I2S_NRFX_TX_BLOCK_COUNT int "TX queue length" default 4 endif # I2S_NRFX ```
/content/code_sandbox/drivers/i2s/Kconfig.nrfx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
152
```c /* * */ #define DT_DRV_COMPAT st_stm32_i2s #include <string.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/i2s.h> #include <zephyr/drivers/dma/dma_stm32.h> #include <soc.h> #include <stm32_ll_rcc.h> #include <stm32_ll_spi.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/cache.h> #include "i2s_ll_stm32.h" #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(i2s_ll_stm32); #define MODULO_INC(val, max) { val = (++val < max) ? val : 0; } static unsigned int div_round_closest(uint32_t dividend, uint32_t divisor) { return (dividend + (divisor / 2U)) / divisor; } static bool queue_is_empty(struct ring_buf *rb) { unsigned int key; key = irq_lock(); if (rb->tail != rb->head) { /* Ring buffer is not empty */ irq_unlock(key); return false; } irq_unlock(key); return true; } /* * Get data from the queue */ static int queue_get(struct ring_buf *rb, void **mem_block, size_t *size) { unsigned int key; key = irq_lock(); if (queue_is_empty(rb) == true) { irq_unlock(key); return -ENOMEM; } *mem_block = rb->buf[rb->tail].mem_block; *size = rb->buf[rb->tail].size; MODULO_INC(rb->tail, rb->len); irq_unlock(key); return 0; } /* * Put data in the queue */ static int queue_put(struct ring_buf *rb, void *mem_block, size_t size) { uint16_t head_next; unsigned int key; key = irq_lock(); head_next = rb->head; MODULO_INC(head_next, rb->len); if (head_next == rb->tail) { /* Ring buffer is full */ irq_unlock(key); return -ENOMEM; } rb->buf[rb->head].mem_block = mem_block; rb->buf[rb->head].size = size; rb->head = head_next; irq_unlock(key); return 0; } static int i2s_stm32_enable_clock(const struct device *dev) { const struct i2s_stm32_cfg *cfg = dev->config; const struct device *clk; int ret; clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(clk)) { LOG_ERR("clock control device not ready"); return -ENODEV; } ret = clock_control_on(clk, (clock_control_subsys_t)&cfg->pclken[0]); if (ret != 0) { LOG_ERR("Could not enable I2S clock"); return -EIO; } if (cfg->pclk_len > 1) { /* Enable I2S clock source */ ret = clock_control_configure(clk, (clock_control_subsys_t)&cfg->pclken[1], NULL); if (ret < 0) { LOG_ERR("Could not configure I2S domain clock"); return -EIO; } } return 0; } static int i2s_stm32_set_clock(const struct device *dev, uint32_t bit_clk_freq) { const struct i2s_stm32_cfg *cfg = dev->config; uint32_t freq_in = 0U; uint8_t i2s_div, i2s_odd; if (cfg->pclk_len > 1) { if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), (clock_control_subsys_t)&cfg->pclken[1], &freq_in) < 0) { LOG_ERR("Failed call clock_control_get_rate(pclken[1])"); return -EIO; } } /* * The ratio between input clock (I2SxClk) and output * clock on the pad (I2S_CK) is obtained using the * following formula: * (i2s_div * 2) + i2s_odd */ i2s_div = div_round_closest(freq_in, bit_clk_freq); i2s_odd = (i2s_div & 0x1) ? 1 : 0; i2s_div >>= 1; /* i2s_div == 0 || i2s_div == 1 are forbidden */ if (i2s_div < 2U) { LOG_ERR("The linear prescaler value is unsupported"); return -EINVAL; } LOG_DBG("i2s_div: %d - i2s_odd: %d", i2s_div, i2s_odd); LL_I2S_SetPrescalerLinear(cfg->i2s, i2s_div); LL_I2S_SetPrescalerParity(cfg->i2s, i2s_odd); return 0; } static int i2s_stm32_configure(const struct device *dev, enum i2s_dir dir, const struct i2s_config *i2s_cfg) { const struct i2s_stm32_cfg *const cfg = dev->config; struct i2s_stm32_data *const dev_data = dev->data; /* For words greater than 16-bit the channel length is considered 32-bit */ const uint32_t channel_length = i2s_cfg->word_size > 16U ? 32U : 16U; /* * comply with the i2s_config driver remark: * When I2S data format is selected parameter channels is ignored, * number of words in a frame is always 2. */ const uint32_t num_channels = i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK ? 2U : i2s_cfg->channels; struct stream *stream; uint32_t bit_clk_freq; bool enable_mck; int ret; if (dir == I2S_DIR_RX) { stream = &dev_data->rx; } else if (dir == I2S_DIR_TX) { stream = &dev_data->tx; } else if (dir == I2S_DIR_BOTH) { return -ENOSYS; } else { LOG_ERR("Either RX or TX direction must be selected"); return -EINVAL; } if (stream->state != I2S_STATE_NOT_READY && stream->state != I2S_STATE_READY) { LOG_ERR("invalid state"); return -EINVAL; } stream->master = true; if (i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE || i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) { stream->master = false; } if (i2s_cfg->frame_clk_freq == 0U) { stream->queue_drop(stream); memset(&stream->cfg, 0, sizeof(struct i2s_config)); stream->state = I2S_STATE_NOT_READY; return 0; } memcpy(&stream->cfg, i2s_cfg, sizeof(struct i2s_config)); /* conditions to enable master clock output */ enable_mck = stream->master && cfg->master_clk_sel; /* set I2S bitclock */ bit_clk_freq = i2s_cfg->frame_clk_freq * channel_length * num_channels; if (enable_mck) { /* * Compensate for the master clock dividers. * MCK = N * CK, where N: * 8 when the channel frame is 16-bit wide * 4 when the channel frame is 32-bit wide */ bit_clk_freq *= channel_length == 16U ? 4U * 2U : 4U; } ret = i2s_stm32_set_clock(dev, bit_clk_freq); if (ret < 0) { return ret; } /* set I2S Master Clock output in the MCK pin, enabled in the DT */ if (enable_mck) { LL_I2S_EnableMasterClock(cfg->i2s); } else { LL_I2S_DisableMasterClock(cfg->i2s); } /* * set I2S Data Format * 16-bit data extended on 32-bit channel length excluded */ if (i2s_cfg->word_size == 16U) { LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_16B); } else if (i2s_cfg->word_size == 24U) { LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_24B); } else if (i2s_cfg->word_size == 32U) { LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_32B); } else { LOG_ERR("invalid word size"); return -EINVAL; } /* set I2S Standard */ switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) { case I2S_FMT_DATA_FORMAT_I2S: LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_PHILIPS); break; case I2S_FMT_DATA_FORMAT_PCM_SHORT: LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_PCM_SHORT); break; case I2S_FMT_DATA_FORMAT_PCM_LONG: LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_PCM_LONG); break; case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED: LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_MSB); break; case I2S_FMT_DATA_FORMAT_RIGHT_JUSTIFIED: LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_LSB); break; default: LOG_ERR("Unsupported I2S data format"); return -EINVAL; } /* set I2S clock polarity */ if ((i2s_cfg->format & I2S_FMT_CLK_FORMAT_MASK) == I2S_FMT_BIT_CLK_INV) LL_I2S_SetClockPolarity(cfg->i2s, LL_I2S_POLARITY_HIGH); else LL_I2S_SetClockPolarity(cfg->i2s, LL_I2S_POLARITY_LOW); stream->state = I2S_STATE_READY; return 0; } static int i2s_stm32_trigger(const struct device *dev, enum i2s_dir dir, enum i2s_trigger_cmd cmd) { struct i2s_stm32_data *const dev_data = dev->data; const struct i2s_stm32_cfg *const cfg = dev->config; struct stream *stream; unsigned int key; int ret; if (dir == I2S_DIR_RX) { stream = &dev_data->rx; } else if (dir == I2S_DIR_TX) { stream = &dev_data->tx; } else if (dir == I2S_DIR_BOTH) { return -ENOSYS; } else { LOG_ERR("Either RX or TX direction must be selected"); return -EINVAL; } switch (cmd) { case I2S_TRIGGER_START: if (stream->state != I2S_STATE_READY) { LOG_ERR("START trigger: invalid state %d", stream->state); return -EIO; } __ASSERT_NO_MSG(stream->mem_block == NULL); ret = stream->stream_start(stream, dev); if (ret < 0) { LOG_ERR("START trigger failed %d", ret); return ret; } stream->state = I2S_STATE_RUNNING; stream->last_block = false; break; case I2S_TRIGGER_STOP: key = irq_lock(); if (stream->state != I2S_STATE_RUNNING) { irq_unlock(key); LOG_ERR("STOP trigger: invalid state"); return -EIO; } do_trigger_stop: if (ll_func_i2s_dma_busy(cfg->i2s)) { stream->state = I2S_STATE_STOPPING; /* * Indicate that the transition to I2S_STATE_STOPPING * is triggered by STOP command */ stream->tx_stop_for_drain = false; } else { stream->stream_disable(stream, dev); stream->state = I2S_STATE_READY; stream->last_block = true; } irq_unlock(key); break; case I2S_TRIGGER_DRAIN: key = irq_lock(); if (stream->state != I2S_STATE_RUNNING) { irq_unlock(key); LOG_ERR("DRAIN trigger: invalid state"); return -EIO; } if (dir == I2S_DIR_TX) { if ((queue_is_empty(&stream->mem_block_queue) == false) || (ll_func_i2s_dma_busy(cfg->i2s))) { stream->state = I2S_STATE_STOPPING; /* * Indicate that the transition to I2S_STATE_STOPPING * is triggered by DRAIN command */ stream->tx_stop_for_drain = true; } else { stream->stream_disable(stream, dev); stream->state = I2S_STATE_READY; } } else if (dir == I2S_DIR_RX) { goto do_trigger_stop; } else { LOG_ERR("Unavailable direction"); return -EINVAL; } irq_unlock(key); break; case I2S_TRIGGER_DROP: if (stream->state == I2S_STATE_NOT_READY) { LOG_ERR("DROP trigger: invalid state"); return -EIO; } stream->stream_disable(stream, dev); stream->queue_drop(stream); stream->state = I2S_STATE_READY; break; case I2S_TRIGGER_PREPARE: if (stream->state != I2S_STATE_ERROR) { LOG_ERR("PREPARE trigger: invalid state"); return -EIO; } stream->state = I2S_STATE_READY; stream->queue_drop(stream); break; default: LOG_ERR("Unsupported trigger command"); return -EINVAL; } return 0; } static int i2s_stm32_read(const struct device *dev, void **mem_block, size_t *size) { struct i2s_stm32_data *const dev_data = dev->data; int ret; if (dev_data->rx.state == I2S_STATE_NOT_READY) { LOG_DBG("invalid state"); return -EIO; } if (dev_data->rx.state != I2S_STATE_ERROR) { ret = k_sem_take(&dev_data->rx.sem, SYS_TIMEOUT_MS(dev_data->rx.cfg.timeout)); if (ret < 0) { return ret; } } /* Get data from the beginning of RX queue */ ret = queue_get(&dev_data->rx.mem_block_queue, mem_block, size); if (ret < 0) { return -EIO; } return 0; } static int i2s_stm32_write(const struct device *dev, void *mem_block, size_t size) { struct i2s_stm32_data *const dev_data = dev->data; int ret; if (dev_data->tx.state != I2S_STATE_RUNNING && dev_data->tx.state != I2S_STATE_READY) { LOG_DBG("invalid state"); return -EIO; } ret = k_sem_take(&dev_data->tx.sem, SYS_TIMEOUT_MS(dev_data->tx.cfg.timeout)); if (ret < 0) { return ret; } /* Add data to the end of the TX queue */ queue_put(&dev_data->tx.mem_block_queue, mem_block, size); return 0; } static const struct i2s_driver_api i2s_stm32_driver_api = { .configure = i2s_stm32_configure, .read = i2s_stm32_read, .write = i2s_stm32_write, .trigger = i2s_stm32_trigger, }; #define STM32_DMA_NUM_CHANNELS 8 static const struct device *active_dma_rx_channel[STM32_DMA_NUM_CHANNELS]; static const struct device *active_dma_tx_channel[STM32_DMA_NUM_CHANNELS]; static int reload_dma(const struct device *dev_dma, uint32_t channel, struct dma_config *dcfg, void *src, void *dst, uint32_t blk_size) { int ret; ret = dma_reload(dev_dma, channel, (uint32_t)src, (uint32_t)dst, blk_size); if (ret < 0) { return ret; } ret = dma_start(dev_dma, channel); return ret; } static int start_dma(const struct device *dev_dma, uint32_t channel, struct dma_config *dcfg, void *src, bool src_addr_increment, void *dst, bool dst_addr_increment, uint8_t fifo_threshold, uint32_t blk_size) { struct dma_block_config blk_cfg; int ret; memset(&blk_cfg, 0, sizeof(blk_cfg)); blk_cfg.block_size = blk_size; blk_cfg.source_address = (uint32_t)src; blk_cfg.dest_address = (uint32_t)dst; if (src_addr_increment) { blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } if (dst_addr_increment) { blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } blk_cfg.fifo_mode_control = fifo_threshold; dcfg->head_block = &blk_cfg; ret = dma_config(dev_dma, channel, dcfg); if (ret < 0) { return ret; } ret = dma_start(dev_dma, channel); return ret; } static const struct device *get_dev_from_rx_dma_channel(uint32_t dma_channel); static const struct device *get_dev_from_tx_dma_channel(uint32_t dma_channel); static void rx_stream_disable(struct stream *stream, const struct device *dev); static void tx_stream_disable(struct stream *stream, const struct device *dev); /* This function is executed in the interrupt context */ static void dma_rx_callback(const struct device *dma_dev, void *arg, uint32_t channel, int status) { const struct device *dev = get_dev_from_rx_dma_channel(channel); const struct i2s_stm32_cfg *cfg = dev->config; struct i2s_stm32_data *const dev_data = dev->data; struct stream *stream = &dev_data->rx; void *mblk_tmp; int ret; if (status < 0) { ret = -EIO; stream->state = I2S_STATE_ERROR; goto rx_disable; } __ASSERT_NO_MSG(stream->mem_block != NULL); /* Stop reception if there was an error */ if (stream->state == I2S_STATE_ERROR) { goto rx_disable; } mblk_tmp = stream->mem_block; /* Prepare to receive the next data block */ ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block, K_NO_WAIT); if (ret < 0) { stream->state = I2S_STATE_ERROR; goto rx_disable; } ret = reload_dma(stream->dev_dma, stream->dma_channel, &stream->dma_cfg, #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s) (void *)LL_SPI_DMA_GetRxRegAddr(cfg->i2s), #else (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s), #endif stream->mem_block, stream->cfg.block_size); if (ret < 0) { LOG_DBG("Failed to start RX DMA transfer: %d", ret); goto rx_disable; } /* Assure cache coherency after DMA write operation */ sys_cache_data_invd_range(mblk_tmp, stream->cfg.block_size); /* All block data received */ ret = queue_put(&stream->mem_block_queue, mblk_tmp, stream->cfg.block_size); if (ret < 0) { stream->state = I2S_STATE_ERROR; goto rx_disable; } k_sem_give(&stream->sem); /* Stop reception if we were requested */ if (stream->state == I2S_STATE_STOPPING) { stream->state = I2S_STATE_READY; goto rx_disable; } return; rx_disable: rx_stream_disable(stream, dev); } static void dma_tx_callback(const struct device *dma_dev, void *arg, uint32_t channel, int status) { const struct device *dev = get_dev_from_tx_dma_channel(channel); const struct i2s_stm32_cfg *cfg = dev->config; struct i2s_stm32_data *const dev_data = dev->data; struct stream *stream = &dev_data->tx; size_t mem_block_size; int ret; if (status < 0) { ret = -EIO; stream->state = I2S_STATE_ERROR; goto tx_disable; } __ASSERT_NO_MSG(stream->mem_block != NULL); /* All block data sent */ k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block); stream->mem_block = NULL; /* Stop transmission if there was an error */ if (stream->state == I2S_STATE_ERROR) { LOG_ERR("TX error detected"); goto tx_disable; } /* Check if we finished transferring one block and stopping is requested */ if ((stream->state == I2S_STATE_STOPPING) && (status == DMA_STATUS_COMPLETE)) { /* * Check if all tx samples have been completely handled * as stated in zephyr i2s specification, in case of DRAIN command * send all data in the transmit queue and stop the transmission. */ if (queue_is_empty(&stream->mem_block_queue) == true) { stream->queue_drop(stream); stream->state = I2S_STATE_READY; goto tx_disable; } else if (stream->tx_stop_for_drain == false) { /* * In case of STOP command, just stop the transmission * at the current. The transmission can be resumed. */ stream->state = I2S_STATE_READY; goto tx_disable; } /* else: DRAIN trigger -> continue TX normally until queue is empty */ } /* Stop transmission if we were requested */ if (stream->last_block) { stream->state = I2S_STATE_READY; goto tx_disable; } /* Prepare to send the next data block */ ret = queue_get(&stream->mem_block_queue, &stream->mem_block, &mem_block_size); if (ret < 0) { if (stream->state == I2S_STATE_STOPPING) { stream->state = I2S_STATE_READY; } else { stream->state = I2S_STATE_ERROR; } goto tx_disable; } k_sem_give(&stream->sem); /* Assure cache coherency before DMA read operation */ sys_cache_data_flush_range(stream->mem_block, mem_block_size); ret = reload_dma(stream->dev_dma, stream->dma_channel, &stream->dma_cfg, stream->mem_block, #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s) (void *)LL_SPI_DMA_GetTxRegAddr(cfg->i2s), #else (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s), #endif mem_block_size); if (ret < 0) { LOG_DBG("Failed to start TX DMA transfer: %d", ret); goto tx_disable; } return; tx_disable: tx_stream_disable(stream, dev); } static uint32_t i2s_stm32_irq_count; static uint32_t i2s_stm32_irq_ovr_count; static uint32_t i2s_stm32_irq_udr_count; static void i2s_stm32_isr(const struct device *dev) { const struct i2s_stm32_cfg *cfg = dev->config; /* OVR error must be explicitly cleared */ if (LL_I2S_IsActiveFlag_OVR(cfg->i2s)) { i2s_stm32_irq_ovr_count++; LL_I2S_ClearFlag_OVR(cfg->i2s); } /* NOTE: UDR error must be explicitly cleared on STM32H7 */ if (LL_I2S_IsActiveFlag_UDR(cfg->i2s)) { i2s_stm32_irq_udr_count++; LL_I2S_ClearFlag_UDR(cfg->i2s); } i2s_stm32_irq_count++; } static int i2s_stm32_initialize(const struct device *dev) { const struct i2s_stm32_cfg *cfg = dev->config; struct i2s_stm32_data *const dev_data = dev->data; struct stream *stream = &dev_data->tx; int ret, i; /* Initialize the variable used to handle the TX */ stream->tx_stop_for_drain = false; /* Enable I2S clock propagation */ ret = i2s_stm32_enable_clock(dev); if (ret < 0) { LOG_ERR("%s: clock enabling failed: %d", __func__, ret); return -EIO; } /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("I2S pinctrl setup failed (%d)", ret); return ret; } cfg->irq_config(dev); k_sem_init(&dev_data->rx.sem, 0, CONFIG_I2S_STM32_RX_BLOCK_COUNT); k_sem_init(&dev_data->tx.sem, CONFIG_I2S_STM32_TX_BLOCK_COUNT, CONFIG_I2S_STM32_TX_BLOCK_COUNT); for (i = 0; i < STM32_DMA_NUM_CHANNELS; i++) { active_dma_rx_channel[i] = NULL; active_dma_tx_channel[i] = NULL; } /* Get the binding to the DMA device */ if (!device_is_ready(dev_data->tx.dev_dma)) { LOG_ERR("%s device not ready", dev_data->tx.dev_dma->name); return -ENODEV; } if (!device_is_ready(dev_data->rx.dev_dma)) { LOG_ERR("%s device not ready", dev_data->rx.dev_dma->name); return -ENODEV; } LOG_INF("%s inited", dev->name); return 0; } static int rx_stream_start(struct stream *stream, const struct device *dev) { const struct i2s_stm32_cfg *cfg = dev->config; int ret; ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block, K_NO_WAIT); if (ret < 0) { return ret; } if (stream->master) { LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_MASTER_RX); } else { LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_SLAVE_RX); } /* remember active RX DMA channel (used in callback) */ active_dma_rx_channel[stream->dma_channel] = dev; ret = start_dma(stream->dev_dma, stream->dma_channel, &stream->dma_cfg, #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s) (void *)LL_SPI_DMA_GetRxRegAddr(cfg->i2s), #else (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s), #endif stream->src_addr_increment, stream->mem_block, stream->dst_addr_increment, stream->fifo_threshold, stream->cfg.block_size); if (ret < 0) { LOG_ERR("Failed to start RX DMA transfer: %d", ret); return ret; } LL_I2S_EnableDMAReq_RX(cfg->i2s); #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s) LL_I2S_EnableIT_OVR(cfg->i2s); LL_I2S_EnableIT_UDR(cfg->i2s); LL_I2S_EnableIT_FRE(cfg->i2s); LL_I2S_Enable(cfg->i2s); LL_SPI_StartMasterTransfer(cfg->i2s); #else LL_I2S_EnableIT_ERR(cfg->i2s); LL_I2S_Enable(cfg->i2s); #endif return 0; } static int tx_stream_start(struct stream *stream, const struct device *dev) { const struct i2s_stm32_cfg *cfg = dev->config; size_t mem_block_size; int ret; ret = queue_get(&stream->mem_block_queue, &stream->mem_block, &mem_block_size); if (ret < 0) { return ret; } k_sem_give(&stream->sem); /* Assure cache coherency before DMA read operation */ sys_cache_data_flush_range(stream->mem_block, mem_block_size); if (stream->master) { LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_MASTER_TX); } else { LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_SLAVE_TX); } /* remember active TX DMA channel (used in callback) */ active_dma_tx_channel[stream->dma_channel] = dev; ret = start_dma(stream->dev_dma, stream->dma_channel, &stream->dma_cfg, stream->mem_block, stream->src_addr_increment, #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s) (void *)LL_SPI_DMA_GetTxRegAddr(cfg->i2s), #else (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s), #endif stream->dst_addr_increment, stream->fifo_threshold, stream->cfg.block_size); if (ret < 0) { LOG_ERR("Failed to start TX DMA transfer: %d", ret); return ret; } LL_I2S_EnableDMAReq_TX(cfg->i2s); #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s) LL_I2S_EnableIT_OVR(cfg->i2s); LL_I2S_EnableIT_UDR(cfg->i2s); LL_I2S_EnableIT_FRE(cfg->i2s); LL_I2S_Enable(cfg->i2s); LL_SPI_StartMasterTransfer(cfg->i2s); #else LL_I2S_EnableIT_ERR(cfg->i2s); LL_I2S_Enable(cfg->i2s); #endif return 0; } static void rx_stream_disable(struct stream *stream, const struct device *dev) { const struct i2s_stm32_cfg *cfg = dev->config; LL_I2S_DisableDMAReq_RX(cfg->i2s); #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s) LL_I2S_DisableIT_OVR(cfg->i2s); LL_I2S_DisableIT_UDR(cfg->i2s); LL_I2S_DisableIT_FRE(cfg->i2s); #else LL_I2S_DisableIT_ERR(cfg->i2s); #endif dma_stop(stream->dev_dma, stream->dma_channel); if (stream->mem_block != NULL) { k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block); stream->mem_block = NULL; } LL_I2S_Disable(cfg->i2s); active_dma_rx_channel[stream->dma_channel] = NULL; } static void tx_stream_disable(struct stream *stream, const struct device *dev) { const struct i2s_stm32_cfg *cfg = dev->config; LL_I2S_DisableDMAReq_TX(cfg->i2s); #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s) LL_I2S_DisableIT_OVR(cfg->i2s); LL_I2S_DisableIT_UDR(cfg->i2s); LL_I2S_DisableIT_FRE(cfg->i2s); #else LL_I2S_DisableIT_ERR(cfg->i2s); #endif dma_stop(stream->dev_dma, stream->dma_channel); if (stream->mem_block != NULL) { k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block); stream->mem_block = NULL; } /* Wait for TX queue to drain before disabling */ k_busy_wait(100); LL_I2S_Disable(cfg->i2s); active_dma_tx_channel[stream->dma_channel] = NULL; } static void rx_queue_drop(struct stream *stream) { size_t size; void *mem_block; while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) { k_mem_slab_free(stream->cfg.mem_slab, mem_block); } k_sem_reset(&stream->sem); } static void tx_queue_drop(struct stream *stream) { size_t size; void *mem_block; unsigned int n = 0U; while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) { k_mem_slab_free(stream->cfg.mem_slab, mem_block); n++; } for (; n > 0; n--) { k_sem_give(&stream->sem); } } static const struct device *get_dev_from_rx_dma_channel(uint32_t dma_channel) { return active_dma_rx_channel[dma_channel]; } static const struct device *get_dev_from_tx_dma_channel(uint32_t dma_channel) { return active_dma_tx_channel[dma_channel]; } /* src_dev and dest_dev should be 'MEMORY' or 'PERIPHERAL'. */ #define I2S_DMA_CHANNEL_INIT(index, dir, dir_cap, src_dev, dest_dev) \ .dir = { \ .dev_dma = DEVICE_DT_GET(STM32_DMA_CTLR(index, dir)), \ .dma_channel = DT_INST_DMAS_CELL_BY_NAME(index, dir, channel), \ .dma_cfg = { \ .block_count = 2, \ .dma_slot = STM32_DMA_SLOT(index, dir, slot),\ .channel_direction = src_dev##_TO_##dest_dev, \ .source_data_size = 2, /* 16bit default */ \ .dest_data_size = 2, /* 16bit default */ \ .source_burst_length = 1, /* SINGLE transfer */ \ .dest_burst_length = 1, \ .channel_priority = STM32_DMA_CONFIG_PRIORITY( \ STM32_DMA_CHANNEL_CONFIG(index, dir)),\ .dma_callback = dma_##dir##_callback, \ }, \ .src_addr_increment = STM32_DMA_CONFIG_##src_dev##_ADDR_INC( \ STM32_DMA_CHANNEL_CONFIG(index, dir)), \ .dst_addr_increment = STM32_DMA_CONFIG_##dest_dev##_ADDR_INC( \ STM32_DMA_CHANNEL_CONFIG(index, dir)), \ .fifo_threshold = STM32_DMA_FEATURES_FIFO_THRESHOLD( \ STM32_DMA_FEATURES(index, dir)), \ .stream_start = dir##_stream_start, \ .stream_disable = dir##_stream_disable, \ .queue_drop = dir##_queue_drop, \ .mem_block_queue.buf = dir##_##index##_ring_buf, \ .mem_block_queue.len = ARRAY_SIZE(dir##_##index##_ring_buf) \ } #define I2S_STM32_INIT(index) \ \ static void i2s_stm32_irq_config_func_##index(const struct device *dev);\ \ PINCTRL_DT_INST_DEFINE(index); \ \ static const struct stm32_pclken clk_##index[] = \ STM32_DT_INST_CLOCKS(index); \ \ static const struct i2s_stm32_cfg i2s_stm32_config_##index = { \ .i2s = (SPI_TypeDef *)DT_INST_REG_ADDR(index), \ .pclken = clk_##index, \ .pclk_len = DT_INST_NUM_CLOCKS(index), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(index), \ .irq_config = i2s_stm32_irq_config_func_##index, \ .master_clk_sel = DT_INST_PROP(index, mck_enabled) \ }; \ \ struct queue_item rx_##index##_ring_buf[CONFIG_I2S_STM32_RX_BLOCK_COUNT + 1];\ struct queue_item tx_##index##_ring_buf[CONFIG_I2S_STM32_TX_BLOCK_COUNT + 1];\ \ static struct i2s_stm32_data i2s_stm32_data_##index = { \ UTIL_AND(DT_INST_DMAS_HAS_NAME(index, rx), \ I2S_DMA_CHANNEL_INIT(index, rx, RX, PERIPHERAL, MEMORY)),\ UTIL_AND(DT_INST_DMAS_HAS_NAME(index, tx), \ I2S_DMA_CHANNEL_INIT(index, tx, TX, MEMORY, PERIPHERAL)),\ }; \ DEVICE_DT_INST_DEFINE(index, \ &i2s_stm32_initialize, NULL, \ &i2s_stm32_data_##index, \ &i2s_stm32_config_##index, POST_KERNEL, \ CONFIG_I2S_INIT_PRIORITY, &i2s_stm32_driver_api); \ \ static void i2s_stm32_irq_config_func_##index(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(index), \ DT_INST_IRQ(index, priority), \ i2s_stm32_isr, DEVICE_DT_INST_GET(index), 0); \ irq_enable(DT_INST_IRQN(index)); \ } DT_INST_FOREACH_STATUS_OKAY(I2S_STM32_INIT) ```
/content/code_sandbox/drivers/i2s/i2s_ll_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,379
```unknown # I2S (Inter-IC Sound) driver configuration options # # I2S Drivers # menuconfig I2S bool "Inter-IC Sound (I2S) bus drivers" help Enable support for the I2S (Inter-IC Sound) hardware bus. if I2S config I2S_INIT_PRIORITY int "Init priority" default KERNEL_INIT_PRIORITY_DEVICE help Device driver initialization priority. module = I2S module-str = i2s source "subsys/logging/Kconfig.template.log_config" comment "Device Drivers" source "drivers/i2s/Kconfig.*" endif # I2S ```
/content/code_sandbox/drivers/i2s/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
138
```unknown config I2S_MCUX_FLEXCOMM bool "MCUX FLEXCOMM I2S driver" default y depends on DT_HAS_NXP_LPC_I2S_ENABLED select DMA help Enable support for mcux flexcomm i2s driver. if I2S_MCUX_FLEXCOMM config I2S_MCUX_FLEXCOMM_RX_BLOCK_COUNT int "RX queue length" default 4 config I2S_MCUX_FLEXCOMM_TX_BLOCK_COUNT int "TX queue length" default 4 endif # I2S_MCUX_FLEXCOMM ```
/content/code_sandbox/drivers/i2s/Kconfig.mcux_flexcomm
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
126
```objective-c /* * */ #ifndef _STM32_I2S_H_ #define _STM32_I2S_H_ struct queue_item { void *mem_block; size_t size; }; /* Minimal ring buffer implementation */ struct ring_buf { struct queue_item *buf; uint16_t len; uint16_t head; uint16_t tail; }; /* Device constant configuration parameters */ struct i2s_stm32_cfg { SPI_TypeDef *i2s; const struct stm32_pclken *pclken; size_t pclk_len; const struct pinctrl_dev_config *pcfg; void (*irq_config)(const struct device *dev); bool master_clk_sel; }; struct stream { int32_t state; struct k_sem sem; const struct device *dev_dma; uint32_t dma_channel; struct dma_config dma_cfg; uint8_t priority; bool src_addr_increment; bool dst_addr_increment; uint8_t fifo_threshold; bool tx_stop_for_drain; struct i2s_config cfg; struct ring_buf mem_block_queue; void *mem_block; bool last_block; bool master; int (*stream_start)(struct stream *, const struct device *dev); void (*stream_disable)(struct stream *, const struct device *dev); void (*queue_drop)(struct stream *); }; /* Device run time data */ struct i2s_stm32_data { struct stream rx; struct stream tx; }; /* checks that DMA Tx packet is fully transmitted over the I2S */ static inline uint32_t ll_func_i2s_dma_busy(SPI_TypeDef *i2s) { #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s) return LL_SPI_IsActiveFlag_TXC(i2s) == 0; #else /* the I2S Tx empty and busy flags are needed */ return (LL_SPI_IsActiveFlag_TXE(i2s) && !LL_SPI_IsActiveFlag_BSY(i2s)); #endif } #endif /* _STM32_I2S_H_ */ ```
/content/code_sandbox/drivers/i2s/i2s_ll_stm32.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
411
```c /* * */ #include <stdlib.h> #include <zephyr/drivers/i2s.h> #include <zephyr/drivers/clock_control/nrf_clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <soc.h> #include <nrfx_i2s.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(i2s_nrfx, CONFIG_I2S_LOG_LEVEL); struct stream_cfg { struct i2s_config cfg; nrfx_i2s_config_t nrfx_cfg; }; struct i2s_buf { void *mem_block; size_t size; }; struct i2s_nrfx_drv_data { struct onoff_manager *clk_mgr; struct onoff_client clk_cli; struct stream_cfg tx; struct k_msgq tx_queue; struct stream_cfg rx; struct k_msgq rx_queue; const nrfx_i2s_t *p_i2s; const uint32_t *last_tx_buffer; enum i2s_state state; enum i2s_dir active_dir; bool stop; /* stop after the current (TX or RX) block */ bool discard_rx; /* discard further RX blocks */ volatile bool next_tx_buffer_needed; bool tx_configured : 1; bool rx_configured : 1; bool request_clock : 1; }; struct i2s_nrfx_drv_cfg { nrfx_i2s_data_handler_t data_handler; nrfx_i2s_t i2s; nrfx_i2s_config_t nrfx_def_cfg; const struct pinctrl_dev_config *pcfg; enum clock_source { PCLK32M, PCLK32M_HFXO, ACLK } clk_src; }; /* Finds the clock settings that give the frame clock frequency closest to * the one requested, taking into account the hardware limitations. */ static void find_suitable_clock(const struct i2s_nrfx_drv_cfg *drv_cfg, nrfx_i2s_config_t *config, const struct i2s_config *i2s_cfg) { static const struct { uint16_t ratio_val; nrf_i2s_ratio_t ratio_enum; } ratios[] = { { 32, NRF_I2S_RATIO_32X }, { 48, NRF_I2S_RATIO_48X }, { 64, NRF_I2S_RATIO_64X }, { 96, NRF_I2S_RATIO_96X }, { 128, NRF_I2S_RATIO_128X }, { 192, NRF_I2S_RATIO_192X }, { 256, NRF_I2S_RATIO_256X }, { 384, NRF_I2S_RATIO_384X }, { 512, NRF_I2S_RATIO_512X } }; const uint32_t src_freq = (NRF_I2S_HAS_CLKCONFIG && drv_cfg->clk_src == ACLK) /* The I2S_NRFX_DEVICE() macro contains build assertions that * make sure that the ACLK clock source is only used when it is * available and only with the "hfclkaudio-frequency" property * defined, but the default value of 0 here needs to be used to * prevent compilation errors when the property is not defined * (this expression will be eventually optimized away then). */ ? DT_PROP_OR(DT_NODELABEL(clock), hfclkaudio_frequency, 0) : 32*1000*1000UL; uint32_t bits_per_frame = 2 * i2s_cfg->word_size; uint32_t best_diff = UINT32_MAX; uint8_t r, best_r = 0; nrf_i2s_mck_t best_mck_cfg = 0; uint32_t best_mck = 0; for (r = 0; (best_diff != 0) && (r < ARRAY_SIZE(ratios)); ++r) { /* Only multiples of the frame width can be used as ratios. */ if ((ratios[r].ratio_val % bits_per_frame) != 0) { continue; } if (IS_ENABLED(CONFIG_SOC_SERIES_NRF53X) || IS_ENABLED(CONFIG_SOC_SERIES_NRF54LX)) { uint32_t requested_mck = i2s_cfg->frame_clk_freq * ratios[r].ratio_val; /* As specified in the nRF5340 PS: * * MCKFREQ = 4096 * floor(f_MCK * 1048576 / * (f_source + f_MCK / 2)) * f_actual = f_source / * floor(1048576 * 4096 / MCKFREQ) */ enum { MCKCONST = 1048576 }; uint32_t mck_factor = (uint32_t)(((uint64_t)requested_mck * MCKCONST) / (src_freq + requested_mck / 2)); /* skip cases when mck_factor is too big for dividing */ if (mck_factor > MCKCONST) { continue; } uint32_t actual_mck = src_freq / (MCKCONST / mck_factor); uint32_t lrck_freq = actual_mck / ratios[r].ratio_val; uint32_t diff = lrck_freq >= i2s_cfg->frame_clk_freq ? (lrck_freq - i2s_cfg->frame_clk_freq) : (i2s_cfg->frame_clk_freq - lrck_freq); if (diff < best_diff) { best_mck_cfg = mck_factor * 4096; best_mck = actual_mck; best_r = r; best_diff = diff; } } else { static const struct { uint8_t divider_val; nrf_i2s_mck_t divider_enum; } dividers[] = { { 8, NRF_I2S_MCK_32MDIV8 }, { 10, NRF_I2S_MCK_32MDIV10 }, { 11, NRF_I2S_MCK_32MDIV11 }, { 15, NRF_I2S_MCK_32MDIV15 }, { 16, NRF_I2S_MCK_32MDIV16 }, { 21, NRF_I2S_MCK_32MDIV21 }, { 23, NRF_I2S_MCK_32MDIV23 }, { 30, NRF_I2S_MCK_32MDIV30 }, { 31, NRF_I2S_MCK_32MDIV31 }, { 32, NRF_I2S_MCK_32MDIV32 }, { 42, NRF_I2S_MCK_32MDIV42 }, { 63, NRF_I2S_MCK_32MDIV63 }, { 125, NRF_I2S_MCK_32MDIV125 } }; for (uint8_t d = 0; (best_diff != 0) && (d < ARRAY_SIZE(dividers)); ++d) { uint32_t mck_freq = src_freq / dividers[d].divider_val; uint32_t lrck_freq = mck_freq / ratios[r].ratio_val; uint32_t diff = lrck_freq >= i2s_cfg->frame_clk_freq ? (lrck_freq - i2s_cfg->frame_clk_freq) : (i2s_cfg->frame_clk_freq - lrck_freq); if (diff < best_diff) { best_mck_cfg = dividers[d].divider_enum; best_mck = mck_freq; best_r = r; best_diff = diff; } /* Since dividers are in ascending order, stop * checking next ones for the current ratio * after resulting LRCK frequency falls below * the one requested. */ if (lrck_freq < i2s_cfg->frame_clk_freq) { break; } } } } config->mck_setup = best_mck_cfg; config->ratio = ratios[best_r].ratio_enum; LOG_INF("I2S MCK frequency: %u, actual PCM rate: %u", best_mck, best_mck / ratios[best_r].ratio_val); } static bool get_next_tx_buffer(struct i2s_nrfx_drv_data *drv_data, nrfx_i2s_buffers_t *buffers) { struct i2s_buf buf; int ret = k_msgq_get(&drv_data->tx_queue, &buf, K_NO_WAIT); if (ret == 0) { buffers->p_tx_buffer = buf.mem_block; buffers->buffer_size = buf.size / sizeof(uint32_t); } return (ret == 0); } static bool get_next_rx_buffer(struct i2s_nrfx_drv_data *drv_data, nrfx_i2s_buffers_t *buffers) { int ret = k_mem_slab_alloc(drv_data->rx.cfg.mem_slab, (void **)&buffers->p_rx_buffer, K_NO_WAIT); if (ret < 0) { LOG_ERR("Failed to allocate next RX buffer: %d", ret); return false; } return true; } static void free_tx_buffer(struct i2s_nrfx_drv_data *drv_data, const void *buffer) { k_mem_slab_free(drv_data->tx.cfg.mem_slab, (void *)buffer); LOG_DBG("Freed TX %p", buffer); } static void free_rx_buffer(struct i2s_nrfx_drv_data *drv_data, void *buffer) { k_mem_slab_free(drv_data->rx.cfg.mem_slab, buffer); LOG_DBG("Freed RX %p", buffer); } static bool supply_next_buffers(struct i2s_nrfx_drv_data *drv_data, nrfx_i2s_buffers_t *next) { if (drv_data->active_dir != I2S_DIR_TX) { /* -> RX active */ if (!get_next_rx_buffer(drv_data, next)) { drv_data->state = I2S_STATE_ERROR; nrfx_i2s_stop(drv_data->p_i2s); return false; } /* Set buffer size if there is no TX buffer (which effectively * controls how many bytes will be received). */ if (drv_data->active_dir == I2S_DIR_RX) { next->buffer_size = drv_data->rx.cfg.block_size / sizeof(uint32_t); } } drv_data->last_tx_buffer = next->p_tx_buffer; LOG_DBG("Next buffers: %p/%p", next->p_tx_buffer, next->p_rx_buffer); nrfx_i2s_next_buffers_set(drv_data->p_i2s, next); return true; } static void data_handler(const struct device *dev, const nrfx_i2s_buffers_t *released, uint32_t status) { struct i2s_nrfx_drv_data *drv_data = dev->data; bool stop_transfer = false; if (status & NRFX_I2S_STATUS_TRANSFER_STOPPED) { if (drv_data->state == I2S_STATE_STOPPING) { drv_data->state = I2S_STATE_READY; } if (drv_data->last_tx_buffer) { /* Usually, these pointers are equal, i.e. the last TX * buffer that were to be transferred is released by the * driver after it stops. The last TX buffer pointer is * then set to NULL here so that the buffer can be freed * below, just as any other TX buffer released by the * driver. However, it may happen that the buffer is not * released this way, for example, when the transfer * ends with an error because an RX buffer allocation * fails. In such case, the last TX buffer needs to be * freed here. */ if (drv_data->last_tx_buffer != released->p_tx_buffer) { free_tx_buffer(drv_data, drv_data->last_tx_buffer); } drv_data->last_tx_buffer = NULL; } nrfx_i2s_uninit(drv_data->p_i2s); if (drv_data->request_clock) { (void)onoff_release(drv_data->clk_mgr); } } if (released == NULL) { /* This means that buffers for the next part of the transfer * were not supplied and the previous ones cannot be released * yet, as pointers to them were latched in the I2S registers. * It is not an error when the transfer is to be stopped (those * buffers will be released after the transfer actually stops). */ if (drv_data->state != I2S_STATE_STOPPING) { LOG_ERR("Next buffers not supplied on time"); drv_data->state = I2S_STATE_ERROR; } nrfx_i2s_stop(drv_data->p_i2s); return; } if (released->p_rx_buffer) { if (drv_data->discard_rx) { free_rx_buffer(drv_data, released->p_rx_buffer); } else { struct i2s_buf buf = { .mem_block = released->p_rx_buffer, .size = released->buffer_size * sizeof(uint32_t) }; int ret = k_msgq_put(&drv_data->rx_queue, &buf, K_NO_WAIT); if (ret < 0) { LOG_ERR("No room in RX queue"); drv_data->state = I2S_STATE_ERROR; stop_transfer = true; free_rx_buffer(drv_data, released->p_rx_buffer); } else { LOG_DBG("Queued RX %p", released->p_rx_buffer); /* If the TX direction is not active and * the transfer should be stopped after * the current block, stop the reception. */ if (drv_data->active_dir == I2S_DIR_RX && drv_data->stop) { drv_data->discard_rx = true; stop_transfer = true; } } } } if (released->p_tx_buffer) { /* If the last buffer that was to be transferred has just been * released, it is time to stop the transfer. */ if (released->p_tx_buffer == drv_data->last_tx_buffer) { drv_data->discard_rx = true; stop_transfer = true; } else { free_tx_buffer(drv_data, released->p_tx_buffer); } } if (stop_transfer) { nrfx_i2s_stop(drv_data->p_i2s); } else if (status & NRFX_I2S_STATUS_NEXT_BUFFERS_NEEDED) { nrfx_i2s_buffers_t next = { 0 }; if (drv_data->active_dir != I2S_DIR_RX) { /* -> TX active */ if (drv_data->stop) { /* If the stream is to be stopped, don't get * the next TX buffer from the queue, instead * supply the one used last time (it won't be * transferred, the stream will stop right * before this buffer would be started again). */ next.p_tx_buffer = drv_data->last_tx_buffer; next.buffer_size = 1; } else if (get_next_tx_buffer(drv_data, &next)) { /* Next TX buffer successfully retrieved from * the queue, nothing more to do here. */ } else if (drv_data->state == I2S_STATE_STOPPING) { /* If there are no more TX blocks queued and * the current state is STOPPING (so the DRAIN * command was triggered) it is time to finish * the transfer. */ drv_data->stop = true; /* Supply the same buffer as last time; it will * not be transferred anyway, as the transfer * will be stopped earlier. */ next.p_tx_buffer = drv_data->last_tx_buffer; next.buffer_size = 1; } else { /* Next TX buffer cannot be supplied now. * Defer it to when the user writes more data. */ drv_data->next_tx_buffer_needed = true; return; } } (void)supply_next_buffers(drv_data, &next); } } static void purge_queue(const struct device *dev, enum i2s_dir dir) { struct i2s_nrfx_drv_data *drv_data = dev->data; struct i2s_buf buf; if (dir == I2S_DIR_TX || dir == I2S_DIR_BOTH) { while (k_msgq_get(&drv_data->tx_queue, &buf, K_NO_WAIT) == 0) { free_tx_buffer(drv_data, buf.mem_block); } } if (dir == I2S_DIR_RX || dir == I2S_DIR_BOTH) { while (k_msgq_get(&drv_data->rx_queue, &buf, K_NO_WAIT) == 0) { free_rx_buffer(drv_data, buf.mem_block); } } } static int i2s_nrfx_configure(const struct device *dev, enum i2s_dir dir, const struct i2s_config *i2s_cfg) { struct i2s_nrfx_drv_data *drv_data = dev->data; const struct i2s_nrfx_drv_cfg *drv_cfg = dev->config; nrfx_i2s_config_t nrfx_cfg; if (drv_data->state != I2S_STATE_READY) { LOG_ERR("Cannot configure in state: %d", drv_data->state); return -EINVAL; } if (i2s_cfg->frame_clk_freq == 0) { /* -> reset state */ purge_queue(dev, dir); if (dir == I2S_DIR_TX || dir == I2S_DIR_BOTH) { drv_data->tx_configured = false; memset(&drv_data->tx, 0, sizeof(drv_data->tx)); } if (dir == I2S_DIR_RX || dir == I2S_DIR_BOTH) { drv_data->rx_configured = false; memset(&drv_data->rx, 0, sizeof(drv_data->rx)); } return 0; } __ASSERT_NO_MSG(i2s_cfg->mem_slab != NULL && i2s_cfg->block_size != 0); if ((i2s_cfg->block_size % sizeof(uint32_t)) != 0) { LOG_ERR("This device can transfer only full 32-bit words"); return -EINVAL; } nrfx_cfg = drv_cfg->nrfx_def_cfg; switch (i2s_cfg->word_size) { case 8: nrfx_cfg.sample_width = NRF_I2S_SWIDTH_8BIT; break; case 16: nrfx_cfg.sample_width = NRF_I2S_SWIDTH_16BIT; break; case 24: nrfx_cfg.sample_width = NRF_I2S_SWIDTH_24BIT; break; #if defined(I2S_CONFIG_SWIDTH_SWIDTH_32Bit) case 32: nrfx_cfg.sample_width = NRF_I2S_SWIDTH_32BIT; break; #endif default: LOG_ERR("Unsupported word size: %u", i2s_cfg->word_size); return -EINVAL; } switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) { case I2S_FMT_DATA_FORMAT_I2S: nrfx_cfg.alignment = NRF_I2S_ALIGN_LEFT; nrfx_cfg.format = NRF_I2S_FORMAT_I2S; break; case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED: nrfx_cfg.alignment = NRF_I2S_ALIGN_LEFT; nrfx_cfg.format = NRF_I2S_FORMAT_ALIGNED; break; case I2S_FMT_DATA_FORMAT_RIGHT_JUSTIFIED: nrfx_cfg.alignment = NRF_I2S_ALIGN_RIGHT; nrfx_cfg.format = NRF_I2S_FORMAT_ALIGNED; break; default: LOG_ERR("Unsupported data format: 0x%02x", i2s_cfg->format); return -EINVAL; } if ((i2s_cfg->format & I2S_FMT_DATA_ORDER_LSB) || (i2s_cfg->format & I2S_FMT_BIT_CLK_INV) || (i2s_cfg->format & I2S_FMT_FRAME_CLK_INV)) { LOG_ERR("Unsupported stream format: 0x%02x", i2s_cfg->format); return -EINVAL; } if (i2s_cfg->channels == 2) { nrfx_cfg.channels = NRF_I2S_CHANNELS_STEREO; } else if (i2s_cfg->channels == 1) { nrfx_cfg.channels = NRF_I2S_CHANNELS_LEFT; } else { LOG_ERR("Unsupported number of channels: %u", i2s_cfg->channels); return -EINVAL; } if ((i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) && (i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE)) { nrfx_cfg.mode = NRF_I2S_MODE_SLAVE; } else if (!(i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) && !(i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE)) { nrfx_cfg.mode = NRF_I2S_MODE_MASTER; } else { LOG_ERR("Unsupported operation mode: 0x%02x", i2s_cfg->options); return -EINVAL; } /* If the master clock generator is needed (i.e. in Master mode or when * the MCK output is used), find a suitable clock configuration for it. */ if (nrfx_cfg.mode == NRF_I2S_MODE_MASTER || (nrf_i2s_mck_pin_get(drv_cfg->i2s.p_reg) & I2S_PSEL_MCK_CONNECT_Msk) == I2S_PSEL_MCK_CONNECT_Connected << I2S_PSEL_MCK_CONNECT_Pos) { find_suitable_clock(drv_cfg, &nrfx_cfg, i2s_cfg); /* Unless the PCLK32M source is used with the HFINT oscillator * (which is always available without any additional actions), * it is required to request the proper clock to be running * before starting the transfer itself. */ drv_data->request_clock = (drv_cfg->clk_src != PCLK32M); } else { nrfx_cfg.mck_setup = NRF_I2S_MCK_DISABLED; drv_data->request_clock = false; } if ((i2s_cfg->options & I2S_OPT_LOOPBACK) || (i2s_cfg->options & I2S_OPT_PINGPONG)) { LOG_ERR("Unsupported options: 0x%02x", i2s_cfg->options); return -EINVAL; } if (dir == I2S_DIR_TX || dir == I2S_DIR_BOTH) { drv_data->tx.cfg = *i2s_cfg; drv_data->tx.nrfx_cfg = nrfx_cfg; drv_data->tx_configured = true; } if (dir == I2S_DIR_RX || dir == I2S_DIR_BOTH) { drv_data->rx.cfg = *i2s_cfg; drv_data->rx.nrfx_cfg = nrfx_cfg; drv_data->rx_configured = true; } return 0; } static const struct i2s_config *i2s_nrfx_config_get(const struct device *dev, enum i2s_dir dir) { struct i2s_nrfx_drv_data *drv_data = dev->data; if (dir == I2S_DIR_TX && drv_data->tx_configured) { return &drv_data->tx.cfg; } if (dir == I2S_DIR_RX && drv_data->rx_configured) { return &drv_data->rx.cfg; } return NULL; } static int i2s_nrfx_read(const struct device *dev, void **mem_block, size_t *size) { struct i2s_nrfx_drv_data *drv_data = dev->data; struct i2s_buf buf; int ret; if (!drv_data->rx_configured) { LOG_ERR("Device is not configured"); return -EIO; } ret = k_msgq_get(&drv_data->rx_queue, &buf, (drv_data->state == I2S_STATE_ERROR) ? K_NO_WAIT : SYS_TIMEOUT_MS(drv_data->rx.cfg.timeout)); if (ret == -ENOMSG) { return -EIO; } LOG_DBG("Released RX %p", buf.mem_block); if (ret == 0) { *mem_block = buf.mem_block; *size = buf.size; } return ret; } static int i2s_nrfx_write(const struct device *dev, void *mem_block, size_t size) { struct i2s_nrfx_drv_data *drv_data = dev->data; struct i2s_buf buf = { .mem_block = mem_block, .size = size }; int ret; if (!drv_data->tx_configured) { LOG_ERR("Device is not configured"); return -EIO; } if (drv_data->state != I2S_STATE_RUNNING && drv_data->state != I2S_STATE_READY) { LOG_ERR("Cannot write in state: %d", drv_data->state); return -EIO; } if (size > drv_data->tx.cfg.block_size || size < sizeof(uint32_t)) { LOG_ERR("This device can only write blocks up to %u bytes", drv_data->tx.cfg.block_size); return -EIO; } ret = k_msgq_put(&drv_data->tx_queue, &buf, SYS_TIMEOUT_MS(drv_data->tx.cfg.timeout)); if (ret < 0) { return ret; } LOG_DBG("Queued TX %p", mem_block); /* Check if interrupt wanted to get next TX buffer before current buffer * was queued. Do not move this check before queuing because doing so * opens the possibility for a race condition between this function and * data_handler() that is called in interrupt context. */ if (drv_data->state == I2S_STATE_RUNNING && drv_data->next_tx_buffer_needed) { nrfx_i2s_buffers_t next = { 0 }; if (!get_next_tx_buffer(drv_data, &next)) { /* Log error because this is definitely unexpected. * Do not return error because the caller is no longer * responsible for releasing the buffer. */ LOG_ERR("Cannot reacquire queued buffer"); return 0; } drv_data->next_tx_buffer_needed = false; LOG_DBG("Next TX %p", next.p_tx_buffer); if (!supply_next_buffers(drv_data, &next)) { return -EIO; } } return 0; } static int start_transfer(struct i2s_nrfx_drv_data *drv_data) { nrfx_i2s_buffers_t initial_buffers = { 0 }; int ret; if (drv_data->active_dir != I2S_DIR_RX && /* -> TX to be started */ !get_next_tx_buffer(drv_data, &initial_buffers)) { LOG_ERR("No TX buffer available"); ret = -ENOMEM; } else if (drv_data->active_dir != I2S_DIR_TX && /* -> RX to be started */ !get_next_rx_buffer(drv_data, &initial_buffers)) { /* Failed to allocate next RX buffer */ ret = -ENOMEM; } else { nrfx_err_t err; /* It is necessary to set buffer size here only for I2S_DIR_RX, * because only then the get_next_tx_buffer() call in the if * condition above gets short-circuited. */ if (drv_data->active_dir == I2S_DIR_RX) { initial_buffers.buffer_size = drv_data->rx.cfg.block_size / sizeof(uint32_t); } drv_data->last_tx_buffer = initial_buffers.p_tx_buffer; err = nrfx_i2s_start(drv_data->p_i2s, &initial_buffers, 0); if (err == NRFX_SUCCESS) { return 0; } LOG_ERR("Failed to start I2S transfer: 0x%08x", err); ret = -EIO; } nrfx_i2s_uninit(drv_data->p_i2s); if (drv_data->request_clock) { (void)onoff_release(drv_data->clk_mgr); } if (initial_buffers.p_tx_buffer) { free_tx_buffer(drv_data, initial_buffers.p_tx_buffer); } if (initial_buffers.p_rx_buffer) { free_rx_buffer(drv_data, initial_buffers.p_rx_buffer); } drv_data->state = I2S_STATE_ERROR; return ret; } static void clock_started_callback(struct onoff_manager *mgr, struct onoff_client *cli, uint32_t state, int res) { struct i2s_nrfx_drv_data *drv_data = CONTAINER_OF(cli, struct i2s_nrfx_drv_data, clk_cli); /* The driver state can be set back to READY at this point if the DROP * command was triggered before the clock has started. Do not start * the actual transfer in such case. */ if (drv_data->state == I2S_STATE_READY) { nrfx_i2s_uninit(drv_data->p_i2s); (void)onoff_release(drv_data->clk_mgr); } else { (void)start_transfer(drv_data); } } static int trigger_start(const struct device *dev) { struct i2s_nrfx_drv_data *drv_data = dev->data; const struct i2s_nrfx_drv_cfg *drv_cfg = dev->config; nrfx_err_t err; int ret; const nrfx_i2s_config_t *nrfx_cfg = (drv_data->active_dir == I2S_DIR_TX) ? &drv_data->tx.nrfx_cfg : &drv_data->rx.nrfx_cfg; err = nrfx_i2s_init(drv_data->p_i2s, nrfx_cfg, drv_cfg->data_handler); if (err != NRFX_SUCCESS) { LOG_ERR("Failed to initialize I2S: 0x%08x", err); return -EIO; } drv_data->state = I2S_STATE_RUNNING; #if NRF_I2S_HAS_CLKCONFIG nrf_i2s_clk_configure(drv_cfg->i2s.p_reg, drv_cfg->clk_src == ACLK ? NRF_I2S_CLKSRC_ACLK : NRF_I2S_CLKSRC_PCLK32M, false); #endif /* If it is required to use certain HF clock, request it to be running * first. If not, start the transfer directly. */ if (drv_data->request_clock) { sys_notify_init_callback(&drv_data->clk_cli.notify, clock_started_callback); ret = onoff_request(drv_data->clk_mgr, &drv_data->clk_cli); if (ret < 0) { nrfx_i2s_uninit(drv_data->p_i2s); drv_data->state = I2S_STATE_READY; LOG_ERR("Failed to request clock: %d", ret); return -EIO; } } else { ret = start_transfer(drv_data); if (ret < 0) { return ret; } } return 0; } static int i2s_nrfx_trigger(const struct device *dev, enum i2s_dir dir, enum i2s_trigger_cmd cmd) { struct i2s_nrfx_drv_data *drv_data = dev->data; bool configured = false; bool cmd_allowed; /* This driver does not use the I2S_STATE_NOT_READY value. * Instead, if a given stream is not configured, the respective * flag (tx_configured or rx_configured) is cleared. */ if (dir == I2S_DIR_BOTH) { configured = drv_data->tx_configured && drv_data->rx_configured; } else if (dir == I2S_DIR_TX) { configured = drv_data->tx_configured; } else if (dir == I2S_DIR_RX) { configured = drv_data->rx_configured; } if (!configured) { LOG_ERR("Device is not configured"); return -EIO; } if (dir == I2S_DIR_BOTH && (memcmp(&drv_data->tx.nrfx_cfg, &drv_data->rx.nrfx_cfg, sizeof(drv_data->rx.nrfx_cfg)) != 0 || (drv_data->tx.cfg.block_size != drv_data->rx.cfg.block_size))) { LOG_ERR("TX and RX configurations are different"); return -EIO; } switch (cmd) { case I2S_TRIGGER_START: cmd_allowed = (drv_data->state == I2S_STATE_READY); break; case I2S_TRIGGER_STOP: case I2S_TRIGGER_DRAIN: cmd_allowed = (drv_data->state == I2S_STATE_RUNNING); break; case I2S_TRIGGER_DROP: cmd_allowed = configured; break; case I2S_TRIGGER_PREPARE: cmd_allowed = (drv_data->state == I2S_STATE_ERROR); break; default: LOG_ERR("Invalid trigger: %d", cmd); return -EINVAL; } if (!cmd_allowed) { return -EIO; } /* For triggers applicable to the RUNNING state (i.e. STOP, DRAIN, * and DROP), ensure that the command is applied to the streams * that are currently active (this device cannot e.g. stop only TX * without stopping RX). */ if (drv_data->state == I2S_STATE_RUNNING && drv_data->active_dir != dir) { LOG_ERR("Inappropriate trigger (%d/%d), active stream(s): %d", cmd, dir, drv_data->active_dir); return -EINVAL; } switch (cmd) { case I2S_TRIGGER_START: drv_data->stop = false; drv_data->discard_rx = false; drv_data->active_dir = dir; drv_data->next_tx_buffer_needed = false; return trigger_start(dev); case I2S_TRIGGER_STOP: drv_data->state = I2S_STATE_STOPPING; drv_data->stop = true; return 0; case I2S_TRIGGER_DRAIN: drv_data->state = I2S_STATE_STOPPING; /* If only RX is active, DRAIN is equivalent to STOP. */ drv_data->stop = (drv_data->active_dir == I2S_DIR_RX); return 0; case I2S_TRIGGER_DROP: if (drv_data->state != I2S_STATE_READY) { drv_data->discard_rx = true; nrfx_i2s_stop(drv_data->p_i2s); } purge_queue(dev, dir); drv_data->state = I2S_STATE_READY; return 0; case I2S_TRIGGER_PREPARE: purge_queue(dev, dir); drv_data->state = I2S_STATE_READY; return 0; default: LOG_ERR("Invalid trigger: %d", cmd); return -EINVAL; } } static void init_clock_manager(const struct device *dev) { struct i2s_nrfx_drv_data *drv_data = dev->data; clock_control_subsys_t subsys; #if NRF_CLOCK_HAS_HFCLKAUDIO const struct i2s_nrfx_drv_cfg *drv_cfg = dev->config; if (drv_cfg->clk_src == ACLK) { subsys = CLOCK_CONTROL_NRF_SUBSYS_HFAUDIO; } else #endif { subsys = CLOCK_CONTROL_NRF_SUBSYS_HF; } drv_data->clk_mgr = z_nrf_clock_control_get_onoff(subsys); __ASSERT_NO_MSG(drv_data->clk_mgr != NULL); } static const struct i2s_driver_api i2s_nrf_drv_api = { .configure = i2s_nrfx_configure, .config_get = i2s_nrfx_config_get, .read = i2s_nrfx_read, .write = i2s_nrfx_write, .trigger = i2s_nrfx_trigger, }; #define I2S(idx) DT_NODELABEL(i2s##idx) #define I2S_CLK_SRC(idx) DT_STRING_TOKEN(I2S(idx), clock_source) #define I2S_NRFX_DEVICE(idx) \ static struct i2s_buf tx_msgs##idx[CONFIG_I2S_NRFX_TX_BLOCK_COUNT]; \ static struct i2s_buf rx_msgs##idx[CONFIG_I2S_NRFX_RX_BLOCK_COUNT]; \ static void data_handler##idx(nrfx_i2s_buffers_t const *p_released, \ uint32_t status) \ { \ data_handler(DEVICE_DT_GET(I2S(idx)), p_released, status); \ } \ PINCTRL_DT_DEFINE(I2S(idx)); \ static const struct i2s_nrfx_drv_cfg i2s_nrfx_cfg##idx = { \ .data_handler = data_handler##idx, \ .i2s = NRFX_I2S_INSTANCE(idx), \ .nrfx_def_cfg = NRFX_I2S_DEFAULT_CONFIG( \ NRF_I2S_PIN_NOT_CONNECTED, \ NRF_I2S_PIN_NOT_CONNECTED, \ NRF_I2S_PIN_NOT_CONNECTED, \ NRF_I2S_PIN_NOT_CONNECTED, \ NRF_I2S_PIN_NOT_CONNECTED), \ .nrfx_def_cfg.skip_gpio_cfg = true, \ .nrfx_def_cfg.skip_psel_cfg = true, \ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(I2S(idx)), \ .clk_src = I2S_CLK_SRC(idx), \ }; \ static struct i2s_nrfx_drv_data i2s_nrfx_data##idx = { \ .state = I2S_STATE_READY, \ .p_i2s = &i2s_nrfx_cfg##idx.i2s \ }; \ static int i2s_nrfx_init##idx(const struct device *dev) \ { \ IRQ_CONNECT(DT_IRQN(I2S(idx)), DT_IRQ(I2S(idx), priority), \ nrfx_isr, nrfx_i2s_##idx##_irq_handler, 0); \ const struct i2s_nrfx_drv_cfg *drv_cfg = dev->config; \ int err = pinctrl_apply_state(drv_cfg->pcfg, \ PINCTRL_STATE_DEFAULT); \ if (err < 0) { \ return err; \ } \ k_msgq_init(&i2s_nrfx_data##idx.tx_queue, \ (char *)tx_msgs##idx, sizeof(struct i2s_buf), \ ARRAY_SIZE(tx_msgs##idx)); \ k_msgq_init(&i2s_nrfx_data##idx.rx_queue, \ (char *)rx_msgs##idx, sizeof(struct i2s_buf), \ ARRAY_SIZE(rx_msgs##idx)); \ init_clock_manager(dev); \ return 0; \ } \ BUILD_ASSERT(I2S_CLK_SRC(idx) != ACLK || NRF_I2S_HAS_CLKCONFIG, \ "Clock source ACLK is not available."); \ BUILD_ASSERT(I2S_CLK_SRC(idx) != ACLK || \ DT_NODE_HAS_PROP(DT_NODELABEL(clock), \ hfclkaudio_frequency), \ "Clock source ACLK requires the hfclkaudio-frequency " \ "property to be defined in the nordic,nrf-clock node."); \ DEVICE_DT_DEFINE(I2S(idx), i2s_nrfx_init##idx, NULL, \ &i2s_nrfx_data##idx, &i2s_nrfx_cfg##idx, \ POST_KERNEL, CONFIG_I2S_INIT_PRIORITY, \ &i2s_nrf_drv_api); #ifdef CONFIG_HAS_HW_NRF_I2S0 I2S_NRFX_DEVICE(0); #endif #ifdef CONFIG_HAS_HW_NRF_I2S20 I2S_NRFX_DEVICE(20); #endif ```
/content/code_sandbox/drivers/i2s/i2s_nrfx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,033
```objective-c /* * */ #ifndef _I2S_LITEI2S__H #define _I2S_LITEI2S__H #include <zephyr/device.h> #include <zephyr/drivers/i2s.h> #include <zephyr/devicetree.h> #include <zephyr/kernel.h> /* i2s configuration mask*/ #define I2S_CONF_FORMAT_OFFSET 0 #define I2S_CONF_SAMPLE_WIDTH_OFFSET 2 #define I2S_CONF_LRCK_FREQ_OFFSET 8 #define I2S_CONF_FORMAT_MASK (0x3 << I2S_CONF_FORMAT_OFFSET) #define I2S_CONF_SAMPLE_WIDTH_MASK (0x3f << I2S_CONF_SAMPLE_WIDTH_OFFSET) #define I2S_CONF_LRCK_MASK (0xffffff << I2S_CONF_LRCK_FREQ_OFFSET) /* i2s control register options*/ #define I2S_ENABLE (1 << 0) #define I2S_FIFO_RESET (1 << 1) /* i2s event*/ #define I2S_EV_ENABLE (1 << 0) /* i2s event types*/ #define I2S_EV_READY (1 << 0) #define I2S_EV_ERROR (1 << 1) /* i2s rx*/ #define I2S_RX_STAT_CHANNEL_CONCATENATED_OFFSET 31 #define I2S_RX_STAT_CHANNEL_CONCATENATED_MASK \ (0x1 << I2S_RX_STAT_CHANNEL_CONCATENATED_OFFSET) #define I2S_RX_FIFO_ADDR DT_REG_ADDR_BY_NAME(DT_NODELABEL(i2s_rx), fifo) #define I2S_RX_FIFO_DEPTH DT_PROP(DT_NODELABEL(i2s_rx), fifo_depth) /* i2s tx*/ #define I2S_TX_STAT_CHANNEL_CONCATENATED_OFFSET 24 #define I2S_TX_STAT_CHANNEL_CONCATENATED_MASK \ (0x1 << I2S_TX_STAT_CHANNEL_CONCATENATED_OFFSET) #define I2S_TX_FIFO_ADDR DT_REG_ADDR_BY_NAME(DT_NODELABEL(i2s_tx), fifo) #define I2S_TX_FIFO_DEPTH DT_PROP(DT_NODELABEL(i2s_tx), fifo_depth) /* i2s register offsets (they are the same for all i2s nodes, both rx and tx) */ #define I2S_BASE_ADDR DT_REG_ADDR(DT_NODELABEL(i2s_rx)) #define I2S_EV_STATUS_OFFSET (DT_REG_ADDR_BY_NAME(DT_NODELABEL(i2s_rx), ev_status) \ - I2S_BASE_ADDR) #define I2S_EV_PENDING_OFFSET (DT_REG_ADDR_BY_NAME(DT_NODELABEL(i2s_rx), ev_pending) \ - I2S_BASE_ADDR) #define I2S_EV_ENABLE_OFFSET (DT_REG_ADDR_BY_NAME(DT_NODELABEL(i2s_rx), ev_enable) \ - I2S_BASE_ADDR) #define I2S_CONTROL_OFFSET (DT_REG_ADDR_BY_NAME(DT_NODELABEL(i2s_rx), rx_ctl) \ - I2S_BASE_ADDR) #define I2S_STATUS_OFFSET (DT_REG_ADDR_BY_NAME(DT_NODELABEL(i2s_rx), rx_stat) \ - I2S_BASE_ADDR) #define I2S_CONFIG_OFFSET (DT_REG_ADDR_BY_NAME(DT_NODELABEL(i2s_rx), rx_conf) \ - I2S_BASE_ADDR) enum litex_i2s_fmt { LITEX_I2S_STANDARD = 1, LITEX_I2S_LEFT_JUSTIFIED = 2, }; struct queue_item { void *mem_block; size_t size; }; /* Minimal ring buffer implementation */ struct ring_buf { struct queue_item *buf; uint16_t len; uint16_t head; uint16_t tail; }; struct stream { int32_t state; struct k_sem sem; struct i2s_config cfg; struct ring_buf mem_block_queue; void *mem_block; }; /* Device run time data */ struct i2s_litex_data { struct stream rx; struct stream tx; }; /* Device const configuration */ struct i2s_litex_cfg { uint32_t base; uint32_t fifo_base; uint16_t fifo_depth; void (*irq_config)(const struct device *dev); }; #endif /* _I2S_LITEI2S__H */ ```
/content/code_sandbox/drivers/i2s/i2s_litex.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
893
```unknown # Kconfig Andes mbox configuration options # # # config MBOX_ANDES_PLIC_SW bool "MBOX Andes PLIC-SW driver" default y depends on DT_HAS_ANDESTECH_PLIC_SW_ENABLED help Enable driver for the Andes IPM mailbox controller. Says n if not sure. ```
/content/code_sandbox/drivers/mbox/Kconfig.andes
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
72
```unknown config MBOX_NRF_VEVIF_EVENT_RX bool "nRF VEVIF event RX driver" depends on DT_HAS_NORDIC_NRF_VEVIF_EVENT_RX_ENABLED default y help Mailbox driver for receiving events triggered by VPR config MBOX_NRF_VEVIF_EVENT_TX bool "nRF VEVIF event TX driver" depends on DT_HAS_NORDIC_NRF_VEVIF_EVENT_TX_ENABLED default y help Mailbox driver for transmitting events from VPR to a remote core ```
/content/code_sandbox/drivers/mbox/Kconfig.nrf_vevif_event
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
117
```c /* */ #define DT_DRV_COMPAT nordic_nrf_bellboard_tx #include <zephyr/devicetree.h> #include <zephyr/drivers/mbox.h> #include <haly/nrfy_bellboard.h> struct mbox_bellboard_tx_conf { NRF_BELLBOARD_Type *bellboard; }; static int bellboard_tx_send(const struct device *dev, uint32_t id, const struct mbox_msg *msg) { const struct mbox_bellboard_tx_conf *config = dev->config; if (id >= BELLBOARD_TASKS_TRIGGER_MaxCount) { return -EINVAL; } if (msg != NULL) { return -ENOTSUP; } nrfy_bellboard_task_trigger(config->bellboard, nrf_bellboard_trigger_task_get(id)); return 0; } static int bellboard_tx_mtu_get(const struct device *dev) { ARG_UNUSED(dev); return 0; } static uint32_t bellboard_tx_max_channels_get(const struct device *dev) { ARG_UNUSED(dev); return BELLBOARD_TASKS_TRIGGER_MaxCount; } static const struct mbox_driver_api bellboard_tx_driver_api = { .send = bellboard_tx_send, .mtu_get = bellboard_tx_mtu_get, .max_channels_get = bellboard_tx_max_channels_get, }; #define BELLBOARD_REMOTE_DEFINE(inst) \ static const struct mbox_bellboard_tx_conf conf##inst = { \ .bellboard = (NRF_BELLBOARD_Type *)DT_INST_REG_ADDR(inst), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, NULL, NULL, NULL, &conf##inst, POST_KERNEL, \ CONFIG_MBOX_INIT_PRIORITY, &bellboard_tx_driver_api); DT_INST_FOREACH_STATUS_OKAY(BELLBOARD_REMOTE_DEFINE) ```
/content/code_sandbox/drivers/mbox/mbox_nrf_bellboard_tx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
382
```c /* * * * Wrapper of the i.MX Message Unit driver into Zephyr's MBOX model. */ #include <zephyr/devicetree.h> #include <zephyr/drivers/mbox.h> #include <zephyr/irq.h> #include <zephyr/sys/util_macro.h> #include <fsl_mu.h> #define LOG_LEVEL CONFIG_MBOX_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(nxp_mbox_imx_mu); #define DT_DRV_COMPAT nxp_mbox_imx_mu #define MU_MAX_CHANNELS 4 #define MU_MBOX_SIZE sizeof(uint32_t) struct nxp_imx_mu_data { mbox_callback_t cb[MU_MAX_CHANNELS]; void *user_data[MU_MAX_CHANNELS]; uint32_t received_data; }; struct nxp_imx_mu_config { MU_Type *base; }; static int nxp_imx_mu_send(const struct device *dev, uint32_t channel, const struct mbox_msg *msg) { uint32_t __aligned(4) data32; const struct nxp_imx_mu_config *cfg = dev->config; if (channel >= MU_MAX_CHANNELS) { return -EINVAL; } /* Signalling mode. */ if (msg == NULL) { return MU_TriggerInterrupts(cfg->base, kMU_GenInt0InterruptTrigger >> channel); } /* Data transfer mode. */ if (msg->size != MU_MBOX_SIZE) { /* We can only send this many bytes at a time. */ return -EMSGSIZE; } /* memcpy to avoid issues when msg->data is not word-aligned. */ memcpy(&data32, msg->data, msg->size); MU_SendMsg(cfg->base, channel, data32); return 0; } static int nxp_imx_mu_register_callback(const struct device *dev, uint32_t channel, mbox_callback_t cb, void *user_data) { struct nxp_imx_mu_data *data = dev->data; if (channel >= MU_MAX_CHANNELS) { return -EINVAL; } data->cb[channel] = cb; data->user_data[channel] = user_data; return 0; } static int nxp_imx_mu_mtu_get(const struct device *dev) { ARG_UNUSED(dev); return MU_MBOX_SIZE; } static uint32_t nxp_imx_mu_max_channels_get(const struct device *dev) { ARG_UNUSED(dev); return MU_MAX_CHANNELS; } static int nxp_imx_mu_set_enabled(const struct device *dev, uint32_t channel, bool enable) { struct nxp_imx_mu_data *data = dev->data; const struct nxp_imx_mu_config *cfg = dev->config; if (channel >= MU_MAX_CHANNELS) { return -EINVAL; } if (enable) { if (data->cb[channel] == NULL) { LOG_WRN("Enabling channel without a registered callback"); } MU_EnableInterrupts( cfg->base, kMU_GenInt0InterruptEnable | kMU_GenInt1InterruptEnable | kMU_GenInt2InterruptEnable | kMU_GenInt3InterruptEnable | kMU_Rx0FullInterruptEnable | kMU_Rx1FullInterruptEnable | kMU_Rx2FullInterruptEnable | kMU_Rx3FullInterruptEnable); } else { MU_DisableInterrupts( cfg->base, kMU_GenInt0InterruptEnable | kMU_GenInt1InterruptEnable | kMU_GenInt2InterruptEnable | kMU_GenInt3InterruptEnable | kMU_Rx0FullInterruptEnable | kMU_Rx1FullInterruptEnable | kMU_Rx2FullInterruptEnable | kMU_Rx3FullInterruptEnable); } return 0; } static const struct mbox_driver_api nxp_imx_mu_driver_api = { .send = nxp_imx_mu_send, .register_callback = nxp_imx_mu_register_callback, .mtu_get = nxp_imx_mu_mtu_get, .max_channels_get = nxp_imx_mu_max_channels_get, .set_enabled = nxp_imx_mu_set_enabled, }; static void handle_irq(const struct device *dev); #define MU_INSTANCE_DEFINE(idx) \ static struct nxp_imx_mu_data nxp_imx_mu_##idx##_data; \ const static struct nxp_imx_mu_config nxp_imx_mu_##idx##_config = { \ .base = (MU_Type *)DT_INST_REG_ADDR(idx), \ }; \ void MU_##idx##_IRQHandler(void); \ static int nxp_imx_mu_##idx##_init(const struct device *dev) \ { \ ARG_UNUSED(dev); \ MU_Init(nxp_imx_mu_##idx##_config.base); \ IRQ_CONNECT(DT_INST_IRQN(idx), DT_INST_IRQ(idx, priority), MU_##idx##_IRQHandler, \ NULL, 0); \ irq_enable(DT_INST_IRQN(idx)); \ return 0; \ } \ DEVICE_DT_INST_DEFINE(idx, nxp_imx_mu_##idx##_init, NULL, &nxp_imx_mu_##idx##_data, \ &nxp_imx_mu_##idx##_config, POST_KERNEL, CONFIG_MBOX_INIT_PRIORITY, \ &nxp_imx_mu_driver_api) #define MU_IRQ_HANDLER(idx) \ void MU_##idx##_IRQHandler(void) \ { \ const struct device *dev = DEVICE_DT_INST_GET(idx); \ handle_irq(dev); \ } #define MU_INST(idx) \ MU_INSTANCE_DEFINE(idx); \ MU_IRQ_HANDLER(idx); DT_INST_FOREACH_STATUS_OKAY(MU_INST) static void handle_irq(const struct device *dev) { struct nxp_imx_mu_data *data = dev->data; const struct nxp_imx_mu_config *config = dev->config; const uint32_t flag = MU_GetStatusFlags(config->base); for (int i_channel = 0; i_channel < MU_MAX_CHANNELS; i_channel++) { if ((flag & (kMU_Rx0FullFlag >> i_channel)) == (kMU_Rx0FullFlag >> i_channel)) { data->received_data = MU_ReceiveMsgNonBlocking(config->base, i_channel); struct mbox_msg msg = {(const void *)&data->received_data, MU_MBOX_SIZE}; if (data->cb[i_channel]) { data->cb[i_channel](dev, i_channel, data->user_data[i_channel], &msg); } } else if ((flag & (kMU_GenInt0Flag >> i_channel)) == (kMU_GenInt0Flag >> i_channel)) { MU_ClearStatusFlags(config->base, (kMU_GenInt0Flag >> i_channel)); if (data->cb[i_channel]) { data->cb[i_channel](dev, i_channel, data->user_data[i_channel], NULL); } } } } ```
/content/code_sandbox/drivers/mbox/mbox_nxp_imx_mu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,510
```unknown config MBOX_NRF_BELLBOARD_RX bool "nRF BELLBOARD RX driver" depends on DT_HAS_NORDIC_NRF_BELLBOARD_RX_ENABLED default y help Mailbox driver for RX Nordic nRF BELLBOARD config MBOX_NRF_BELLBOARD_TX bool "nRF BELLBOARD TX driver" depends on DT_HAS_NORDIC_NRF_BELLBOARD_TX_ENABLED default y help Mailbox driver for TX Nordic nRF BELLBOARD ```
/content/code_sandbox/drivers/mbox/Kconfig.nrf_bellboard
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
110
```c /* */ #define DT_DRV_COMPAT nordic_nrf_vevif_event_tx #include <zephyr/devicetree.h> #include <zephyr/drivers/mbox.h> #include <hal/nrf_vpr.h> #include <hal/nrf_vpr_csr.h> #include <hal/nrf_vpr_csr_vevif.h> #if defined(CONFIG_SOC_NRF54L15_ENGA_CPUFLPR) #define EVENTS_IDX_MAX 17U #else #define EVENTS_IDX_MAX NRF_VPR_EVENTS_TRIGGERED_MAX #endif #define VEVIF_EVENTS_NUM DT_INST_PROP(0, nordic_events) #define VEVIF_EVENTS_MASK DT_INST_PROP(0, nordic_events_mask) BUILD_ASSERT(DT_INST_PROP(0, nordic_events) <= NRF_VPR_EVENTS_TRIGGERED_COUNT, "Number of events exceeds maximum"); static inline bool vevif_event_tx_is_valid(uint32_t id) { return (id < EVENTS_IDX_MAX) && ((VEVIF_EVENTS_MASK & BIT(id)) != 0U); } static int vevif_event_tx_send(const struct device *dev, uint32_t id, const struct mbox_msg *msg) { ARG_UNUSED(dev); if (!vevif_event_tx_is_valid(id)) { return -EINVAL; } if (msg != NULL) { return -ENOTSUP; } nrf_vpr_csr_vevif_events_trigger(BIT(id)); return 0; } static int vevif_event_tx_mtu_get(const struct device *dev) { ARG_UNUSED(dev); return 0; } static uint32_t vevif_event_tx_max_channels_get(const struct device *dev) { ARG_UNUSED(dev); return VEVIF_EVENTS_NUM; } static const struct mbox_driver_api vevif_event_tx_driver_api = { .send = vevif_event_tx_send, .mtu_get = vevif_event_tx_mtu_get, .max_channels_get = vevif_event_tx_max_channels_get, }; DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, NULL, POST_KERNEL, CONFIG_MBOX_INIT_PRIORITY, &vevif_event_tx_driver_api); ```
/content/code_sandbox/drivers/mbox/mbox_nrf_vevif_event_tx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
450
```c /* * */ #define DT_DRV_COMPAT nxp_lpc_i2s #include <string.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/i2s.h> #include <zephyr/drivers/clock_control.h> #include <fsl_i2s.h> #include <fsl_dma.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <zephyr/drivers/pinctrl.h> LOG_MODULE_REGISTER(i2s_mcux_flexcomm); #define NUM_RX_DMA_BLOCKS 2 /* Device constant configuration parameters */ struct i2s_mcux_config { I2S_Type *base; const struct device *clock_dev; clock_control_subsys_t clock_subsys; void (*irq_config)(const struct device *dev); const struct pinctrl_dev_config *pincfg; }; struct stream { int32_t state; const struct device *dev_dma; uint32_t channel; /* stores the channel for dma */ struct i2s_config cfg; struct dma_config dma_cfg; bool last_block; struct k_msgq in_queue; struct k_msgq out_queue; }; struct i2s_txq_entry { void *mem_block; size_t size; }; struct i2s_mcux_data { struct stream rx; void *rx_in_msgs[CONFIG_I2S_MCUX_FLEXCOMM_RX_BLOCK_COUNT]; void *rx_out_msgs[CONFIG_I2S_MCUX_FLEXCOMM_RX_BLOCK_COUNT]; struct dma_block_config rx_dma_blocks[NUM_RX_DMA_BLOCKS]; struct stream tx; /* For tx, the in queue is for requests generated by * the i2s_write() API call, and size must be tracked * separate from the buffer size. * The out_queue is for tracking buffers that should * be freed once the DMA is done transferring it. */ struct i2s_txq_entry tx_in_msgs[CONFIG_I2S_MCUX_FLEXCOMM_TX_BLOCK_COUNT]; void *tx_out_msgs[CONFIG_I2S_MCUX_FLEXCOMM_TX_BLOCK_COUNT]; struct dma_block_config tx_dma_block; }; static int i2s_mcux_flexcomm_cfg_convert(uint32_t base_frequency, enum i2s_dir dir, const struct i2s_config *i2s_cfg, i2s_config_t *fsl_cfg) { if (dir == I2S_DIR_RX) { I2S_RxGetDefaultConfig(fsl_cfg); } else if (dir == I2S_DIR_TX) { I2S_TxGetDefaultConfig(fsl_cfg); } fsl_cfg->dataLength = i2s_cfg->word_size; if ((i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) == I2S_FMT_DATA_FORMAT_I2S) { /* Classic I2S. We always use 2 channels */ fsl_cfg->frameLength = 2 * i2s_cfg->word_size; } else { fsl_cfg->frameLength = i2s_cfg->channels * i2s_cfg->word_size; } if (fsl_cfg->dataLength < 4 || fsl_cfg->dataLength > 32) { LOG_ERR("Unsupported data length"); return -EINVAL; } if (fsl_cfg->frameLength < 4 || fsl_cfg->frameLength > 2048) { LOG_ERR("Unsupported frame length"); return -EINVAL; } /* Set master/slave configuration */ switch (i2s_cfg->options & (I2S_OPT_BIT_CLK_SLAVE | I2S_OPT_FRAME_CLK_SLAVE)) { case I2S_OPT_BIT_CLK_MASTER | I2S_OPT_FRAME_CLK_MASTER: fsl_cfg->masterSlave = kI2S_MasterSlaveNormalMaster; break; case I2S_OPT_BIT_CLK_SLAVE | I2S_OPT_FRAME_CLK_SLAVE: fsl_cfg->masterSlave = kI2S_MasterSlaveNormalSlave; break; case I2S_OPT_BIT_CLK_SLAVE | I2S_OPT_FRAME_CLK_MASTER: /* Master using external CLK */ fsl_cfg->masterSlave = kI2S_MasterSlaveExtSckMaster; break; case I2S_OPT_BIT_CLK_MASTER | I2S_OPT_FRAME_CLK_SLAVE: /* WS synchronized master */ fsl_cfg->masterSlave = kI2S_MasterSlaveWsSyncMaster; break; } switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) { case I2S_FMT_DATA_FORMAT_I2S: fsl_cfg->mode = kI2S_ModeI2sClassic; break; case I2S_FMT_DATA_FORMAT_PCM_SHORT: fsl_cfg->mode = kI2S_ModeDspWsShort; fsl_cfg->wsPol = true; break; case I2S_FMT_DATA_FORMAT_PCM_LONG: fsl_cfg->mode = kI2S_ModeDspWsLong; fsl_cfg->wsPol = true; break; case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED: fsl_cfg->mode = kI2S_ModeDspWs50; fsl_cfg->wsPol = true; break; default: LOG_ERR("Unsupported I2S data format"); return -EINVAL; } if (fsl_cfg->masterSlave == kI2S_MasterSlaveNormalMaster || fsl_cfg->masterSlave == kI2S_MasterSlaveWsSyncMaster) { fsl_cfg->divider = base_frequency / i2s_cfg->frame_clk_freq / fsl_cfg->frameLength; } /* * Set frame and bit clock polarity according to * inversion flags. */ switch (i2s_cfg->format & I2S_FMT_CLK_FORMAT_MASK) { case I2S_FMT_CLK_NF_NB: break; case I2S_FMT_CLK_NF_IB: fsl_cfg->sckPol = !fsl_cfg->sckPol; break; case I2S_FMT_CLK_IF_NB: fsl_cfg->wsPol = !fsl_cfg->wsPol; break; case I2S_FMT_CLK_IF_IB: fsl_cfg->sckPol = !fsl_cfg->sckPol; fsl_cfg->wsPol = !fsl_cfg->wsPol; break; default: LOG_ERR("Unsupported clocks polarity"); return -EINVAL; } return 0; } static const struct i2s_config *i2s_mcux_config_get(const struct device *dev, enum i2s_dir dir) { struct i2s_mcux_data *dev_data = dev->data; struct stream *stream; if (dir == I2S_DIR_RX) { stream = &dev_data->rx; } else { stream = &dev_data->tx; } if (stream->state == I2S_STATE_NOT_READY) { return NULL; } return &stream->cfg; } static int i2s_mcux_configure(const struct device *dev, enum i2s_dir dir, const struct i2s_config *i2s_cfg) { const struct i2s_mcux_config *cfg = dev->config; struct i2s_mcux_data *dev_data = dev->data; struct stream *stream; uint32_t base_frequency; i2s_config_t fsl_cfg; int result; if (dir == I2S_DIR_RX) { stream = &dev_data->rx; } else if (dir == I2S_DIR_TX) { stream = &dev_data->tx; } else if (dir == I2S_DIR_BOTH) { return -ENOSYS; } else { LOG_ERR("Either RX or TX direction must be selected"); return -EINVAL; } if (stream->state != I2S_STATE_NOT_READY && stream->state != I2S_STATE_READY) { LOG_ERR("invalid state"); return -EINVAL; } if (i2s_cfg->frame_clk_freq == 0U) { stream->state = I2S_STATE_NOT_READY; return 0; } /* * The memory block passed by the user to the i2s_write function is * tightly packed next to each other. * However for 8-bit word_size the I2S hardware expects the data * to be in 2bytes which does not match what is passed by the user. * This will be addressed in a separate PR once the zephyr API committee * finalizes on an I2S API for the user to probe hardware variations. */ if (i2s_cfg->word_size <= 8) { return -ENOTSUP; } if (!device_is_ready(cfg->clock_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } /* Figure out function base clock */ if (clock_control_get_rate(cfg->clock_dev, cfg->clock_subsys, &base_frequency)) { return -EINVAL; } /* * Validate the configuration by converting it to SDK * format. */ result = i2s_mcux_flexcomm_cfg_convert(base_frequency, dir, i2s_cfg, &fsl_cfg); if (result != 0) { return result; } /* Apply the configuration */ if (dir == I2S_DIR_RX) { I2S_RxInit(cfg->base, &fsl_cfg); } else { I2S_TxInit(cfg->base, &fsl_cfg); } if ((i2s_cfg->channels > 2) && (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) != I2S_FMT_DATA_FORMAT_I2S) { /* * More than 2 channels are enabled, so we need to enable * secondary channel pairs. */ #if (defined(FSL_FEATURE_I2S_SUPPORT_SECONDARY_CHANNEL) && \ FSL_FEATURE_I2S_SUPPORT_SECONDARY_CHANNEL) for (uint32_t slot = 1; slot < i2s_cfg->channels / 2; slot++) { /* Position must be set so that data does not overlap * with previous channel pair. Each channel pair * will occupy slots of "word_size" bits. */ I2S_EnableSecondaryChannel(cfg->base, slot - 1, false, i2s_cfg->word_size * 2 * slot); } #else /* No support */ return -ENOTSUP; #endif } /* * I2S API definition specifies that a "16 bit word will occupy 2 bytes, * a 24 or 32 bit word will occupy 4 bytes". Therefore, we will assume * that "odd" word sizes will be aligned to 16 or 32 bit boundaries. * * FIFO depth is controlled by the number of bits per word (DATALEN). * Per the RM: * If the data length is 4-16, the FIFO should be filled * with two 16 bit values (one for left, one for right channel) * * If the data length is 17-24, the FIFO should be filled with 2 24 bit * values (one for left, one for right channel). We can just transfer * 4 bytes, since the I2S API specifies 24 bit values would be aligned * to a 32 bit boundary. * * If the data length is 25-32, the FIFO should be filled * with one 32 bit value. First value is left channel, second is right. * * All this is to say that we can always use 4 byte transfer widths * with the DMA engine, regardless of the data length. */ stream->dma_cfg.dest_data_size = 4U; stream->dma_cfg.source_data_size = 4U; /* Save configuration for get_config */ memcpy(&stream->cfg, i2s_cfg, sizeof(struct i2s_config)); stream->state = I2S_STATE_READY; return 0; } static inline void i2s_purge_stream_buffers(struct stream *stream, struct k_mem_slab *mem_slab, bool tx) { void *buffer; if (tx) { struct i2s_txq_entry queue_entry; while (k_msgq_get(&stream->in_queue, &queue_entry, K_NO_WAIT) == 0) { k_mem_slab_free(mem_slab, queue_entry.mem_block); } } else { while (k_msgq_get(&stream->in_queue, &buffer, K_NO_WAIT) == 0) { k_mem_slab_free(mem_slab, buffer); } } while (k_msgq_get(&stream->out_queue, &buffer, K_NO_WAIT) == 0) { k_mem_slab_free(mem_slab, buffer); } } static void i2s_mcux_tx_stream_disable(const struct device *dev, bool drop) { const struct i2s_mcux_config *cfg = dev->config; struct i2s_mcux_data *dev_data = dev->data; struct stream *stream = &dev_data->tx; I2S_Type *base = cfg->base; LOG_DBG("Stopping DMA channel %u for TX stream", stream->channel); dma_stop(stream->dev_dma, stream->channel); /* Clear TX error interrupt flag */ base->FIFOSTAT = I2S_FIFOSTAT_TXERR(1U); I2S_DisableInterrupts(base, (uint32_t)kI2S_TxErrorFlag); if (base->CFG1 & I2S_CFG1_MAINENABLE_MASK) { /* Wait until all transmitted data get out of FIFO */ while ((base->FIFOSTAT & I2S_FIFOSTAT_TXEMPTY_MASK) == 0U) { } /* * The last piece of valid data can be still being transmitted from * I2S at this moment */ /* Write additional data to FIFO */ base->FIFOWR = 0U; while ((base->FIFOSTAT & I2S_FIFOSTAT_TXEMPTY_MASK) == 0U) { } /* At this moment the additional data is out of FIFO, we can stop I2S */ /* Disable TX DMA */ base->FIFOCFG &= (~I2S_FIFOCFG_DMATX_MASK); base->FIFOCFG |= I2S_FIFOCFG_EMPTYTX_MASK; I2S_Disable(base); } /* purge buffers queued in the stream */ if (drop) { i2s_purge_stream_buffers(stream, stream->cfg.mem_slab, true); } } static void i2s_mcux_rx_stream_disable(const struct device *dev, bool drop) { const struct i2s_mcux_config *cfg = dev->config; struct i2s_mcux_data *dev_data = dev->data; struct stream *stream = &dev_data->rx; I2S_Type *base = cfg->base; LOG_DBG("Stopping DMA channel %u for RX stream", stream->channel); dma_stop(stream->dev_dma, stream->channel); /* Clear RX error interrupt flag */ base->FIFOSTAT = I2S_FIFOSTAT_RXERR(1U); I2S_DisableInterrupts(base, (uint32_t)kI2S_RxErrorFlag); /* stop transfer */ /* Disable Rx DMA */ base->FIFOCFG &= (~I2S_FIFOCFG_DMARX_MASK); base->FIFOCFG |= I2S_FIFOCFG_EMPTYRX_MASK; I2S_Disable(base); /* purge buffers queued in the stream */ if (drop) { i2s_purge_stream_buffers(stream, stream->cfg.mem_slab, false); } } static void i2s_mcux_config_dma_blocks(const struct device *dev, enum i2s_dir dir, uint32_t *buffer, size_t block_size) { const struct i2s_mcux_config *cfg = dev->config; struct i2s_mcux_data *dev_data = dev->data; I2S_Type *base = cfg->base; struct dma_block_config *blk_cfg; struct stream *stream; if (dir == I2S_DIR_RX) { stream = &dev_data->rx; blk_cfg = &dev_data->rx_dma_blocks[0]; memset(blk_cfg, 0, sizeof(dev_data->rx_dma_blocks)); } else { stream = &dev_data->tx; blk_cfg = &dev_data->tx_dma_block; memset(blk_cfg, 0, sizeof(dev_data->tx_dma_block)); } stream->dma_cfg.head_block = blk_cfg; if (dir == I2S_DIR_RX) { blk_cfg->source_address = (uint32_t)&base->FIFORD; blk_cfg->dest_address = (uint32_t)buffer[0]; blk_cfg->block_size = block_size; blk_cfg->next_block = &dev_data->rx_dma_blocks[1]; blk_cfg->dest_reload_en = 1; blk_cfg = &dev_data->rx_dma_blocks[1]; blk_cfg->source_address = (uint32_t)&base->FIFORD; blk_cfg->dest_address = (uint32_t)buffer[1]; blk_cfg->block_size = block_size; } else { blk_cfg->dest_address = (uint32_t)&base->FIFOWR; blk_cfg->source_address = (uint32_t)buffer; blk_cfg->block_size = block_size; } stream->dma_cfg.user_data = (void *)dev; dma_config(stream->dev_dma, stream->channel, &stream->dma_cfg); LOG_DBG("dma_slot is %d", stream->dma_cfg.dma_slot); LOG_DBG("channel_direction is %d", stream->dma_cfg.channel_direction); LOG_DBG("complete_callback_en is %d", stream->dma_cfg.complete_callback_en); LOG_DBG("error_callback_dis is %d", stream->dma_cfg.error_callback_dis); LOG_DBG("source_handshake is %d", stream->dma_cfg.source_handshake); LOG_DBG("dest_handshake is %d", stream->dma_cfg.dest_handshake); LOG_DBG("channel_priority is %d", stream->dma_cfg.channel_priority); LOG_DBG("source_chaining_en is %d", stream->dma_cfg.source_chaining_en); LOG_DBG("dest_chaining_en is %d", stream->dma_cfg.dest_chaining_en); LOG_DBG("linked_channel is %d", stream->dma_cfg.linked_channel); LOG_DBG("source_data_size is %d", stream->dma_cfg.source_data_size); LOG_DBG("dest_data_size is %d", stream->dma_cfg.dest_data_size); LOG_DBG("source_burst_length is %d", stream->dma_cfg.source_burst_length); LOG_DBG("dest_burst_length is %d", stream->dma_cfg.dest_burst_length); LOG_DBG("block_count is %d", stream->dma_cfg.block_count); } /* This function is executed in the interrupt context */ static void i2s_mcux_dma_tx_callback(const struct device *dma_dev, void *arg, uint32_t channel, int status) { const struct device *dev = (const struct device *)arg; struct i2s_mcux_data *dev_data = dev->data; struct stream *stream = &dev_data->tx; struct i2s_txq_entry queue_entry; int ret; LOG_DBG("tx cb: %d", stream->state); ret = k_msgq_get(&stream->out_queue, &queue_entry.mem_block, K_NO_WAIT); if (ret == 0) { /* transmission complete. free the buffer */ k_mem_slab_free(stream->cfg.mem_slab, queue_entry.mem_block); } else { LOG_ERR("no buffer in output queue for channel %u", channel); } /* Received a STOP trigger, terminate TX immediately */ if (stream->last_block) { stream->state = I2S_STATE_READY; i2s_mcux_tx_stream_disable(dev, false); LOG_DBG("TX STOPPED"); return; } switch (stream->state) { case I2S_STATE_RUNNING: case I2S_STATE_STOPPING: /* get the next buffer from queue */ ret = k_msgq_get(&stream->in_queue, &queue_entry, K_NO_WAIT); if (ret == 0) { /* config the DMA */ i2s_mcux_config_dma_blocks(dev, I2S_DIR_TX, (uint32_t *)queue_entry.mem_block, queue_entry.size); k_msgq_put(&stream->out_queue, &queue_entry.mem_block, K_NO_WAIT); dma_start(stream->dev_dma, stream->channel); } if (ret || status < 0) { /* * DMA encountered an error (status < 0) * or * No buffers in input queue */ LOG_DBG("DMA status %08x channel %u k_msgq_get ret %d", status, channel, ret); if (stream->state == I2S_STATE_STOPPING) { stream->state = I2S_STATE_READY; } else { stream->state = I2S_STATE_ERROR; } i2s_mcux_tx_stream_disable(dev, false); } break; case I2S_STATE_ERROR: i2s_mcux_tx_stream_disable(dev, true); break; } } static void i2s_mcux_dma_rx_callback(const struct device *dma_dev, void *arg, uint32_t channel, int status) { const struct device *dev = (const struct device *)arg; struct i2s_mcux_data *dev_data = dev->data; struct stream *stream = &dev_data->rx; void *buffer; int ret; LOG_DBG("rx cb: %d", stream->state); if (status < 0) { stream->state = I2S_STATE_ERROR; i2s_mcux_rx_stream_disable(dev, false); return; } switch (stream->state) { case I2S_STATE_STOPPING: case I2S_STATE_RUNNING: /* retrieve buffer from input queue */ ret = k_msgq_get(&stream->in_queue, &buffer, K_NO_WAIT); __ASSERT_NO_MSG(ret == 0); /* put buffer to output queue */ ret = k_msgq_put(&stream->out_queue, &buffer, K_NO_WAIT); if (ret != 0) { LOG_ERR("buffer %p -> out_queue %p err %d", buffer, &stream->out_queue, ret); i2s_mcux_rx_stream_disable(dev, false); stream->state = I2S_STATE_ERROR; } if (stream->state == I2S_STATE_RUNNING) { /* allocate new buffer for next audio frame */ ret = k_mem_slab_alloc(stream->cfg.mem_slab, &buffer, K_NO_WAIT); if (ret != 0) { LOG_ERR("buffer alloc from slab %p err %d", stream->cfg.mem_slab, ret); i2s_mcux_rx_stream_disable(dev, false); stream->state = I2S_STATE_ERROR; } else { const struct i2s_mcux_config *cfg = dev->config; I2S_Type *base = cfg->base; dma_reload(stream->dev_dma, stream->channel, (uint32_t)&base->FIFORD, (uint32_t)buffer, stream->cfg.block_size); /* put buffer in input queue */ ret = k_msgq_put(&stream->in_queue, &buffer, K_NO_WAIT); if (ret != 0) { LOG_ERR("buffer %p -> in_queue %p err %d", buffer, &stream->in_queue, ret); } dma_start(stream->dev_dma, stream->channel); } } else { /* Received a STOP/DRAIN trigger */ i2s_mcux_rx_stream_disable(dev, true); stream->state = I2S_STATE_READY; } break; case I2S_STATE_ERROR: i2s_mcux_rx_stream_disable(dev, true); break; } } static int i2s_mcux_tx_stream_start(const struct device *dev) { int ret = 0; const struct i2s_mcux_config *cfg = dev->config; struct i2s_mcux_data *dev_data = dev->data; struct stream *stream = &dev_data->tx; I2S_Type *base = cfg->base; struct i2s_txq_entry queue_entry; /* retrieve buffer from input queue */ ret = k_msgq_get(&stream->in_queue, &queue_entry, K_NO_WAIT); if (ret != 0) { LOG_ERR("No buffer in input queue to start transmission"); return ret; } i2s_mcux_config_dma_blocks(dev, I2S_DIR_TX, (uint32_t *)queue_entry.mem_block, queue_entry.size); /* put buffer in output queue */ ret = k_msgq_put(&stream->out_queue, &queue_entry.mem_block, K_NO_WAIT); if (ret != 0) { LOG_ERR("failed to put buffer in output queue"); return ret; } /* Enable TX DMA */ base->FIFOCFG |= I2S_FIFOCFG_DMATX_MASK; ret = dma_start(stream->dev_dma, stream->channel); if (ret < 0) { LOG_ERR("dma_start failed (%d)", ret); return ret; } I2S_Enable(base); I2S_EnableInterrupts(base, (uint32_t)kI2S_TxErrorFlag); return 0; } static int i2s_mcux_rx_stream_start(const struct device *dev) { int ret = 0; void *buffer[NUM_RX_DMA_BLOCKS]; const struct i2s_mcux_config *cfg = dev->config; struct i2s_mcux_data *dev_data = dev->data; struct stream *stream = &dev_data->rx; I2S_Type *base = cfg->base; uint8_t num_of_bufs; num_of_bufs = k_mem_slab_num_free_get(stream->cfg.mem_slab); /* * Need at least two buffers on the RX memory slab for * reliable DMA reception. */ if (num_of_bufs <= 1) { return -EINVAL; } for (int i = 0; i < NUM_RX_DMA_BLOCKS; i++) { ret = k_mem_slab_alloc(stream->cfg.mem_slab, &buffer[i], K_NO_WAIT); if (ret != 0) { LOG_ERR("buffer alloc from mem_slab failed (%d)", ret); return ret; } } i2s_mcux_config_dma_blocks(dev, I2S_DIR_RX, (uint32_t *)buffer, stream->cfg.block_size); /* put buffers in input queue */ for (int i = 0; i < NUM_RX_DMA_BLOCKS; i++) { ret = k_msgq_put(&stream->in_queue, &buffer[i], K_NO_WAIT); if (ret != 0) { LOG_ERR("failed to put buffer in input queue"); return ret; } } /* Enable RX DMA */ base->FIFOCFG |= I2S_FIFOCFG_DMARX_MASK; ret = dma_start(stream->dev_dma, stream->channel); if (ret < 0) { LOG_ERR("Failed to start DMA Ch%d (%d)", stream->channel, ret); return ret; } I2S_Enable(base); I2S_EnableInterrupts(base, (uint32_t)kI2S_RxErrorFlag); return 0; } static int i2s_mcux_trigger(const struct device *dev, enum i2s_dir dir, enum i2s_trigger_cmd cmd) { struct i2s_mcux_data *dev_data = dev->data; struct stream *stream; unsigned int key; int ret = 0; if (dir == I2S_DIR_RX) { stream = &dev_data->rx; } else if (dir == I2S_DIR_TX) { stream = &dev_data->tx; } else if (dir == I2S_DIR_BOTH) { return -ENOSYS; } else { LOG_ERR("Either RX or TX direction must be selected"); return -EINVAL; } key = irq_lock(); switch (cmd) { case I2S_TRIGGER_START: if (stream->state != I2S_STATE_READY) { LOG_ERR("START trigger: invalid state %d", stream->state); ret = -EIO; break; } if (dir == I2S_DIR_TX) { ret = i2s_mcux_tx_stream_start(dev); } else { ret = i2s_mcux_rx_stream_start(dev); } if (ret < 0) { LOG_ERR("START trigger failed %d", ret); break; } stream->state = I2S_STATE_RUNNING; stream->last_block = false; break; case I2S_TRIGGER_STOP: if (stream->state != I2S_STATE_RUNNING) { LOG_ERR("STOP trigger: invalid state %d", stream->state); ret = -EIO; break; } stream->state = I2S_STATE_STOPPING; stream->last_block = true; break; case I2S_TRIGGER_DRAIN: if (stream->state != I2S_STATE_RUNNING) { LOG_ERR("DRAIN trigger: invalid state %d", stream->state); ret = -EIO; break; } stream->state = I2S_STATE_STOPPING; break; case I2S_TRIGGER_DROP: if (stream->state == I2S_STATE_NOT_READY) { LOG_ERR("DROP trigger: invalid state %d", stream->state); ret = -EIO; break; } stream->state = I2S_STATE_READY; if (dir == I2S_DIR_TX) { i2s_mcux_tx_stream_disable(dev, true); } else { i2s_mcux_rx_stream_disable(dev, true); } break; case I2S_TRIGGER_PREPARE: if (stream->state != I2S_STATE_ERROR) { LOG_ERR("PREPARE trigger: invalid state %d", stream->state); ret = -EIO; break; } stream->state = I2S_STATE_READY; if (dir == I2S_DIR_TX) { i2s_mcux_tx_stream_disable(dev, true); } else { i2s_mcux_rx_stream_disable(dev, true); } break; default: LOG_ERR("Unsupported trigger command"); ret = -EINVAL; } irq_unlock(key); return ret; } static int i2s_mcux_read(const struct device *dev, void **mem_block, size_t *size) { struct i2s_mcux_data *dev_data = dev->data; struct stream *stream = &dev_data->rx; void *buffer; int ret = 0; if (stream->state == I2S_STATE_NOT_READY) { LOG_ERR("invalid state %d", stream->state); return -EIO; } ret = k_msgq_get(&stream->out_queue, &buffer, SYS_TIMEOUT_MS(stream->cfg.timeout)); if (ret != 0) { if (stream->state == I2S_STATE_ERROR) { return -EIO; } else { return -EAGAIN; } } *mem_block = buffer; *size = stream->cfg.block_size; return 0; } static int i2s_mcux_write(const struct device *dev, void *mem_block, size_t size) { struct i2s_mcux_data *dev_data = dev->data; struct stream *stream = &dev_data->tx; int ret; struct i2s_txq_entry queue_entry = { .mem_block = mem_block, .size = size, }; if (stream->state != I2S_STATE_RUNNING && stream->state != I2S_STATE_READY) { LOG_ERR("invalid state (%d)", stream->state); return -EIO; } ret = k_msgq_put(&stream->in_queue, &queue_entry, SYS_TIMEOUT_MS(stream->cfg.timeout)); if (ret) { LOG_ERR("k_msgq_put failed %d", ret); return ret; } return ret; } static const struct i2s_driver_api i2s_mcux_driver_api = { .configure = i2s_mcux_configure, .config_get = i2s_mcux_config_get, .read = i2s_mcux_read, .write = i2s_mcux_write, .trigger = i2s_mcux_trigger, }; static void i2s_mcux_isr(const struct device *dev) { const struct i2s_mcux_config *cfg = dev->config; struct i2s_mcux_data *dev_data = dev->data; struct stream *stream = &dev_data->tx; I2S_Type *base = cfg->base; uint32_t intstat = base->FIFOINTSTAT; if ((intstat & I2S_FIFOINTSTAT_TXERR_MASK) != 0UL) { /* Clear TX error interrupt flag */ base->FIFOSTAT = I2S_FIFOSTAT_TXERR(1U); stream = &dev_data->tx; stream->state = I2S_STATE_ERROR; } if ((intstat & I2S_FIFOINTSTAT_RXERR_MASK) != 0UL) { /* Clear RX error interrupt flag */ base->FIFOSTAT = I2S_FIFOSTAT_RXERR(1U); stream = &dev_data->rx; stream->state = I2S_STATE_ERROR; } } static int i2s_mcux_init(const struct device *dev) { const struct i2s_mcux_config *cfg = dev->config; struct i2s_mcux_data *const data = dev->data; int err; err = pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } cfg->irq_config(dev); /* Initialize the buffer queues */ k_msgq_init(&data->tx.in_queue, (char *)data->tx_in_msgs, sizeof(struct i2s_txq_entry), CONFIG_I2S_MCUX_FLEXCOMM_TX_BLOCK_COUNT); k_msgq_init(&data->rx.in_queue, (char *)data->rx_in_msgs, sizeof(void *), CONFIG_I2S_MCUX_FLEXCOMM_RX_BLOCK_COUNT); k_msgq_init(&data->tx.out_queue, (char *)data->tx_out_msgs, sizeof(void *), CONFIG_I2S_MCUX_FLEXCOMM_TX_BLOCK_COUNT); k_msgq_init(&data->rx.out_queue, (char *)data->rx_out_msgs, sizeof(void *), CONFIG_I2S_MCUX_FLEXCOMM_RX_BLOCK_COUNT); if (data->tx.dev_dma != NULL) { if (!device_is_ready(data->tx.dev_dma)) { LOG_ERR("%s device not ready", data->tx.dev_dma->name); return -ENODEV; } } if (data->rx.dev_dma != NULL) { if (!device_is_ready(data->rx.dev_dma)) { LOG_ERR("%s device not ready", data->rx.dev_dma->name); return -ENODEV; } } data->tx.state = I2S_STATE_NOT_READY; data->rx.state = I2S_STATE_NOT_READY; LOG_DBG("Device %s inited", dev->name); return 0; } #define I2S_DMA_CHANNELS(id) \ .tx = { \ .dev_dma = UTIL_AND( \ DT_INST_DMAS_HAS_NAME(id, tx), \ DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx))), \ .channel = UTIL_AND( \ DT_INST_DMAS_HAS_NAME(id, tx), \ DT_INST_DMAS_CELL_BY_NAME(id, tx, channel)), \ .dma_cfg = { \ .channel_direction = MEMORY_TO_PERIPHERAL, \ .dma_callback = i2s_mcux_dma_tx_callback, \ .block_count = 1, \ } \ }, \ .rx = { \ .dev_dma = UTIL_AND( \ DT_INST_DMAS_HAS_NAME(id, rx), \ DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx))), \ .channel = UTIL_AND( \ DT_INST_DMAS_HAS_NAME(id, rx), \ DT_INST_DMAS_CELL_BY_NAME(id, rx, channel)), \ .dma_cfg = { \ .channel_direction = PERIPHERAL_TO_MEMORY, \ .dma_callback = i2s_mcux_dma_rx_callback, \ .complete_callback_en = true, \ .block_count = NUM_RX_DMA_BLOCKS, \ } \ } #define I2S_MCUX_FLEXCOMM_DEVICE(id) \ PINCTRL_DT_INST_DEFINE(id); \ static void i2s_mcux_config_func_##id(const struct device *dev); \ static const struct i2s_mcux_config i2s_mcux_config_##id = { \ .base = \ (I2S_Type *)DT_INST_REG_ADDR(id), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(id)), \ .clock_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL(id, name),\ .irq_config = i2s_mcux_config_func_##id, \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \ }; \ static struct i2s_mcux_data i2s_mcux_data_##id = { \ I2S_DMA_CHANNELS(id) \ }; \ DEVICE_DT_INST_DEFINE(id, \ &i2s_mcux_init, \ NULL, \ &i2s_mcux_data_##id, \ &i2s_mcux_config_##id, \ POST_KERNEL, \ CONFIG_I2S_INIT_PRIORITY, \ &i2s_mcux_driver_api); \ static void i2s_mcux_config_func_##id(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(id), \ DT_INST_IRQ(id, priority), \ i2s_mcux_isr, \ DEVICE_DT_INST_GET(id), \ 0); \ irq_enable(DT_INST_IRQN(id)); \ } DT_INST_FOREACH_STATUS_OKAY(I2S_MCUX_FLEXCOMM_DEVICE) ```
/content/code_sandbox/drivers/i2s/i2s_mcux_flexcomm.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,528
```unknown config MBOX_NXP_S32_MRU bool "NXP S32 Message Receive Unit (MRU) driver" default y depends on DT_HAS_NXP_S32_MRU_ENABLED select NOCACHE_MEMORY if ARCH_HAS_NOCACHE_MEMORY_SUPPORT help Driver for NXP S32 Message Receive Unit (MRU). ```
/content/code_sandbox/drivers/mbox/Kconfig.nxp_s32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
72
```unknown config MBOX_NXP_MAILBOX bool "NXP Mailbox driver for MBOX" default y depends on DT_HAS_NXP_MBOX_MAILBOX_ENABLED help Driver for NXP Mailbox Unit around MBOX. ```
/content/code_sandbox/drivers/mbox/Kconfig.nxp_mailbox
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
51
```unknown config MBOX_ESP32 bool "ESP32 Soft mailbox driver" default y depends on DT_HAS_ESPRESSIF_MBOX_ESP32_ENABLED help Driver for ESP32 mailbox based on its doorbell mechanism. ```
/content/code_sandbox/drivers/mbox/Kconfig.esp32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
50
```c /* * */ #include <zephyr/device.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/mbox.h> #include <zephyr/irq.h> #include <zephyr/logging/log.h> #include "stm32_hsem.h" LOG_MODULE_REGISTER(mbox_stm32_hsem_ipc, CONFIG_MBOX_LOG_LEVEL); #define DT_DRV_COMPAT st_mbox_stm32_hsem #define HSEM_CPU1 1 #define HSEM_CPU2 2 #if DT_NODE_EXISTS(DT_NODELABEL(cpu0)) #define HSEM_CPU_ID HSEM_CPU1 #elif DT_NODE_EXISTS(DT_NODELABEL(cpu1)) #define HSEM_CPU_ID HSEM_CPU2 #else #error "Neither cpu0 nor cpu1 defined!" #endif #if HSEM_CPU_ID == HSEM_CPU1 #define MBOX_TX_HSEM_ID CFG_HW_IPM_CPU2_SEMID #define MBOX_RX_HSEM_ID CFG_HW_IPM_CPU1_SEMID #else /* HSEM_CPU2 */ #define MBOX_TX_HSEM_ID CFG_HW_IPM_CPU1_SEMID #define MBOX_RX_HSEM_ID CFG_HW_IPM_CPU2_SEMID #endif /* HSEM_CPU_ID */ #define MAX_CHANNELS 2 struct mbox_stm32_hsem_data { const struct device *dev; mbox_callback_t cb; void *user_data; }; static struct mbox_stm32_hsem_data stm32_hsem_mbox_data; static struct mbox_stm32_hsem_conf { struct stm32_pclken pclken; } stm32_hsem_mbox_conf = { .pclken = { .bus = DT_INST_CLOCKS_CELL(0, bus), .enr = DT_INST_CLOCKS_CELL(0, bits) }, }; static inline void stm32_hsem_enable_rx_interrupt(void) { const uint32_t mask_hsem_id = BIT(MBOX_RX_HSEM_ID); #if HSEM_CPU_ID == HSEM_CPU1 LL_HSEM_EnableIT_C1IER(HSEM, mask_hsem_id); #else /* HSEM_CPU2 */ LL_HSEM_EnableIT_C2IER(HSEM, mask_hsem_id); #endif /* HSEM_CPU_ID */ } static inline void stm32_hsem_disable_rx_interrupt(void) { const uint32_t mask_hsem_id = BIT(MBOX_RX_HSEM_ID); #if HSEM_CPU_ID == HSEM_CPU1 LL_HSEM_DisableIT_C1IER(HSEM, mask_hsem_id); #else /* HSEM_CPU2 */ LL_HSEM_DisableIT_C2IER(HSEM, mask_hsem_id); #endif /* HSEM_CPU_ID */ } static inline void stm32_hsem_clear_rx_interrupt(void) { const uint32_t mask_hsem_id = BIT(MBOX_RX_HSEM_ID); #if HSEM_CPU_ID == HSEM_CPU1 LL_HSEM_ClearFlag_C1ICR(HSEM, mask_hsem_id); #else /* HSEM_CPU2 */ LL_HSEM_ClearFlag_C2ICR(HSEM, mask_hsem_id); #endif /* HSEM_CPU_ID */ } static inline uint32_t stm32_hsem_is_rx_interrupt_active(void) { const uint32_t mask_hsem_id = BIT(MBOX_RX_HSEM_ID); #if HSEM_CPU_ID == HSEM_CPU1 return LL_HSEM_IsActiveFlag_C1ISR(HSEM, mask_hsem_id); #else /* HSEM_CPU2 */ return LL_HSEM_IsActiveFlag_C2ISR(HSEM, mask_hsem_id); #endif /* HSEM_CPU_ID */ } static inline bool is_rx_channel_valid(const struct device *dev, uint32_t ch) { /* Only support one RX channel */ return (ch == MBOX_RX_HSEM_ID); } static inline bool is_tx_channel_valid(const struct device *dev, uint32_t ch) { /* Only support one TX channel */ return (ch == MBOX_TX_HSEM_ID); } static void mbox_dispatcher(const struct device *dev) { struct mbox_stm32_hsem_data *data = dev->data; /* Check semaphore rx_semid interrupt status */ if (!stm32_hsem_is_rx_interrupt_active()) { return; } if (data->cb != NULL) { data->cb(dev, MBOX_RX_HSEM_ID, data->user_data, NULL); } /* Clear semaphore rx_semid interrupt status and masked status */ stm32_hsem_clear_rx_interrupt(); } static int mbox_stm32_hsem_send(const struct device *dev, uint32_t channel, const struct mbox_msg *msg) { if (msg) { LOG_ERR("Sending data not supported."); return -EINVAL; } if (!is_tx_channel_valid(dev, channel)) { return -EINVAL; } /* * Locking and unlocking the hardware semaphore * causes an interrupt on the receiving side. */ z_stm32_hsem_lock(MBOX_TX_HSEM_ID, HSEM_LOCK_DEFAULT_RETRY); z_stm32_hsem_unlock(MBOX_TX_HSEM_ID); return 0; } static int mbox_stm32_hsem_register_callback(const struct device *dev, uint32_t channel, mbox_callback_t cb, void *user_data) { struct mbox_stm32_hsem_data *data = dev->data; if (!(is_rx_channel_valid(dev, channel))) { return -EINVAL; } data->cb = cb; data->user_data = user_data; return 0; } static int mbox_stm32_hsem_mtu_get(const struct device *dev) { ARG_UNUSED(dev); /* We only support signalling */ return 0; } static uint32_t mbox_stm32_hsem_max_channels_get(const struct device *dev) { ARG_UNUSED(dev); /* Only two channels supported, one RX and one TX */ return MAX_CHANNELS; } static int mbox_stm32_hsem_set_enabled(const struct device *dev, uint32_t channel, bool enable) { if (!is_rx_channel_valid(dev, channel)) { return -EINVAL; } if (enable) { stm32_hsem_clear_rx_interrupt(); stm32_hsem_enable_rx_interrupt(); } else { stm32_hsem_disable_rx_interrupt(); } return 0; } #if HSEM_CPU_ID == HSEM_CPU1 static int mbox_stm32_clock_init(const struct device *dev) { const struct mbox_stm32_hsem_conf *cfg = dev->config; const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(clk)) { LOG_ERR("Clock control device not ready."); return -ENODEV; } if (clock_control_on(clk, (clock_control_subsys_t *)&cfg->pclken) != 0) { LOG_WRN("Failed to enable clock."); return -EIO; } return 0; } #endif /* HSEM_CPU_ID */ static int mbox_stm32_hsem_init(const struct device *dev) { struct mbox_stm32_hsem_data *data = dev->data; int ret = 0; data->dev = dev; #if HSEM_CPU_ID == HSEM_CPU1 ret = mbox_stm32_clock_init(dev); if (ret != 0) { return ret; } #endif /* HSEM_CPU_ID */ /* Configure interrupt service routine */ IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), mbox_dispatcher, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); return ret; } static const struct mbox_driver_api mbox_stm32_hsem_driver_api = { .send = mbox_stm32_hsem_send, .register_callback = mbox_stm32_hsem_register_callback, .mtu_get = mbox_stm32_hsem_mtu_get, .max_channels_get = mbox_stm32_hsem_max_channels_get, .set_enabled = mbox_stm32_hsem_set_enabled, }; DEVICE_DT_INST_DEFINE( 0, mbox_stm32_hsem_init, NULL, &stm32_hsem_mbox_data, &stm32_hsem_mbox_conf, POST_KERNEL, CONFIG_MBOX_INIT_PRIORITY, &mbox_stm32_hsem_driver_api); ```
/content/code_sandbox/drivers/mbox/mbox_stm32_hsem.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,748
```c /* * */ #include <zephyr/drivers/mbox.h> #include <nrfx_ipc.h> #define LOG_LEVEL CONFIG_MBOX_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(mbox_nrfx_ipc); #define DT_DRV_COMPAT nordic_mbox_nrf_ipc struct mbox_nrf_data { mbox_callback_t cb[IPC_CONF_NUM]; void *user_data[IPC_CONF_NUM]; const struct device *dev; uint32_t enabled_mask; }; static struct mbox_nrf_data nrfx_mbox_data; static struct mbox_nrf_conf { uint32_t rx_mask; uint32_t tx_mask; } nrfx_mbox_conf = { .rx_mask = DT_INST_PROP(0, rx_mask), .tx_mask = DT_INST_PROP(0, tx_mask), }; static inline bool is_rx_channel_valid(const struct device *dev, uint32_t ch) { const struct mbox_nrf_conf *conf = dev->config; return ((ch < IPC_CONF_NUM) && (conf->rx_mask & BIT(ch))); } static inline bool is_tx_channel_valid(const struct device *dev, uint32_t ch) { const struct mbox_nrf_conf *conf = dev->config; return ((ch < IPC_CONF_NUM) && (conf->tx_mask & BIT(ch))); } static void mbox_dispatcher(uint8_t event_idx, void *p_context) { struct mbox_nrf_data *data = (struct mbox_nrf_data *) p_context; const struct device *dev = data->dev; uint32_t channel = event_idx; if (!is_rx_channel_valid(dev, channel)) { LOG_WRN("RX event on illegal channel"); } if (!(data->enabled_mask & BIT(channel))) { LOG_WRN("RX event on disabled channel"); } if (data->cb[channel] != NULL) { data->cb[channel](dev, channel, data->user_data[channel], NULL); } } static int mbox_nrf_send(const struct device *dev, uint32_t channel, const struct mbox_msg *msg) { if (msg) { LOG_WRN("Sending data not supported"); } if (!is_tx_channel_valid(dev, channel)) { return -EINVAL; } nrfx_ipc_signal(channel); return 0; } static int mbox_nrf_register_callback(const struct device *dev, uint32_t channel, mbox_callback_t cb, void *user_data) { struct mbox_nrf_data *data = dev->data; if (channel >= IPC_CONF_NUM) { return -EINVAL; } data->cb[channel] = cb; data->user_data[channel] = user_data; return 0; } static int mbox_nrf_mtu_get(const struct device *dev) { /* We only support signalling */ return 0; } static uint32_t mbox_nrf_max_channels_get(const struct device *dev) { return IPC_CONF_NUM; } static int mbox_nrf_set_enabled(const struct device *dev, uint32_t channel, bool enable) { struct mbox_nrf_data *data = dev->data; if (!is_rx_channel_valid(dev, channel)) { return -EINVAL; } if ((enable == 0 && (!(data->enabled_mask & BIT(channel)))) || (enable != 0 && (data->enabled_mask & BIT(channel)))) { return -EALREADY; } if (enable && (data->cb[channel] == NULL)) { LOG_WRN("Enabling channel without a registered callback\n"); } if (enable && data->enabled_mask == 0) { irq_enable(DT_INST_IRQN(0)); } if (enable) { data->enabled_mask |= BIT(channel); compiler_barrier(); nrfx_ipc_receive_event_enable(channel); } else { nrfx_ipc_receive_event_disable(channel); compiler_barrier(); data->enabled_mask &= ~BIT(channel); } if (data->enabled_mask == 0) { irq_disable(DT_INST_IRQN(0)); } return 0; } static void enable_dt_channels(const struct device *dev) { const struct mbox_nrf_conf *conf = dev->config; nrfx_ipc_config_t ch_config = { 0 }; if (conf->tx_mask >= BIT(IPC_CONF_NUM)) { LOG_WRN("tx_mask too big (or IPC_CONF_NUM too small)"); } if (conf->rx_mask >= BIT(IPC_CONF_NUM)) { LOG_WRN("rx_mask too big (or IPC_CONF_NUM too small)"); } /* Enable the interrupts on .set_enabled() only */ ch_config.receive_events_enabled = 0; for (size_t ch = 0; ch < IPC_CONF_NUM; ch++) { if (conf->tx_mask & BIT(ch)) { ch_config.send_task_config[ch] = BIT(ch); } if (conf->rx_mask & BIT(ch)) { ch_config.receive_event_config[ch] = BIT(ch); } } nrfx_ipc_config_load(&ch_config); } static int mbox_nrf_init(const struct device *dev) { struct mbox_nrf_data *data = dev->data; data->dev = dev; nrfx_ipc_init(0, mbox_dispatcher, (void *) data); IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), nrfx_isr, nrfx_ipc_irq_handler, 0); enable_dt_channels(dev); return 0; } static const struct mbox_driver_api mbox_nrf_driver_api = { .send = mbox_nrf_send, .register_callback = mbox_nrf_register_callback, .mtu_get = mbox_nrf_mtu_get, .max_channels_get = mbox_nrf_max_channels_get, .set_enabled = mbox_nrf_set_enabled, }; DEVICE_DT_INST_DEFINE(0, mbox_nrf_init, NULL, &nrfx_mbox_data, &nrfx_mbox_conf, POST_KERNEL, CONFIG_MBOX_INIT_PRIORITY, &mbox_nrf_driver_api); ```
/content/code_sandbox/drivers/mbox/mbox_nrfx_ipc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,276
```c /* * * * Wrapper of NXP Mailbox driver for Zephyr's MBOX model. */ #include <zephyr/devicetree.h> #include <zephyr/drivers/mbox.h> #include <zephyr/irq.h> #include <zephyr/sys/util_macro.h> #include <fsl_mailbox.h> #define LOG_LEVEL CONFIG_MBOX_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(nxp_mbox_mailbox); #define DT_DRV_COMPAT nxp_mbox_mailbox #define MAILBOX_MAX_CHANNELS 4 #define MAILBOX_MBOX_SIZE 3 #if (defined(LPC55S69_cm33_core0_SERIES) || defined(LPC55S69_cm33_core1_SERIES)) #ifdef LPC55S69_cm33_core0_SERIES #define MAILBOX_ID_THIS_CPU kMAILBOX_CM33_Core0 #define MAILBOX_ID_OTHER_CPU kMAILBOX_CM33_Core1 #else #define MAILBOX_ID_THIS_CPU kMAILBOX_CM33_Core1 #define MAILBOX_ID_OTHER_CPU kMAILBOX_CM33_Core0 #endif #else #if defined(__CM4_CMSIS_VERSION) #define MAILBOX_ID_THIS_CPU kMAILBOX_CM4 #define MAILBOX_ID_OTHER_CPU kMAILBOX_CM0Plus #else #define MAILBOX_ID_THIS_CPU kMAILBOX_CM0Plus #define MAILBOX_ID_OTHER_CPU kMAILBOX_CM4 #endif #endif #define GENIRQ_SHIFT (28U) #define GEN0_IRQ_TRIGGER BIT(GENIRQ_SHIFT + 3U) /*!< General interrupt 3. */ #define GEN1_IRQ_TRIGGER BIT(GENIRQ_SHIFT + 2U) /*!< General interrupt 2. */ #define GEN2_IRQ_TRIGGER BIT(GENIRQ_SHIFT + 1U) /*!< General interrupt 1. */ #define GEN3_IRQ_TRIGGER BIT(GENIRQ_SHIFT + 0U) /*!< General interrupt 0. */ #define DATA_MASK BIT_MASK(24U) #define DATAIRQ_SHIFT (24U) #define DATA0_IRQ_TRIGGER BIT(DATAIRQ_SHIFT + 3U) /*!< Data interrupt 3. */ #define DATA1_IRQ_TRIGGER BIT(DATAIRQ_SHIFT + 2U) /*!< Data interrupt 2. */ #define DATA2_IRQ_TRIGGER BIT(DATAIRQ_SHIFT + 1U) /*!< Data interrupt 1. */ #define DATA3_IRQ_TRIGGER BIT(DATAIRQ_SHIFT + 0U) /*!< Data interrupt 0. */ struct nxp_mailbox_data { mbox_callback_t cb[MAILBOX_MAX_CHANNELS]; void *user_data[MAILBOX_MAX_CHANNELS]; bool channel_enable[MAILBOX_MAX_CHANNELS]; uint32_t received_data; }; struct nxp_mailbox_config { MAILBOX_Type *base; }; static void mailbox_isr(const struct device *dev) { struct nxp_mailbox_data *data = dev->data; const struct nxp_mailbox_config *config = dev->config; mailbox_cpu_id_t cpu_id; cpu_id = MAILBOX_ID_THIS_CPU; volatile uint32_t mailbox_value = MAILBOX_GetValue(config->base, cpu_id); uint32_t flags = mailbox_value & (~DATA_MASK); /* Clear or the interrupt gets called intermittently */ MAILBOX_ClearValueBits(config->base, cpu_id, mailbox_value); for (int i_channel = 0; i_channel < MAILBOX_MAX_CHANNELS; i_channel++) { /* Continue to next channel if channel is not enabled */ if (!data->channel_enable[i_channel]) { continue; } if ((flags & (DATA0_IRQ_TRIGGER >> i_channel))) { data->received_data = mailbox_value & DATA_MASK; struct mbox_msg msg = {(const void *)&data->received_data, MAILBOX_MBOX_SIZE}; if (data->cb[i_channel]) { data->cb[i_channel](dev, i_channel, data->user_data[i_channel], &msg); } } else if ((flags & (GEN0_IRQ_TRIGGER >> i_channel))) { if (data->cb[i_channel]) { data->cb[i_channel](dev, i_channel, data->user_data[i_channel], NULL); } } } /* Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F * Store immediate overlapping exception return operation * might vector to incorrect interrupt */ #if defined __CORTEX_M && (__CORTEX_M == 4U) barrier_dsync_fence_full(); #endif } static int nxp_mailbox_send(const struct device *dev, uint32_t channel, const struct mbox_msg *msg) { uint32_t __aligned(4) data32; const struct nxp_mailbox_config *cfg = dev->config; if (channel >= MAILBOX_MAX_CHANNELS) { return -EINVAL; } /* Signalling mode. */ if (msg == NULL) { MAILBOX_SetValueBits(cfg->base, MAILBOX_ID_OTHER_CPU, GEN0_IRQ_TRIGGER >> channel); return 0; } /* Data transfer mode. */ if (msg->size != MAILBOX_MBOX_SIZE) { /* We can only send this many bytes at a time. */ return -EMSGSIZE; } /* memcpy to avoid issues when msg->data is not word-aligned. */ memcpy(&data32, msg->data, msg->size); MAILBOX_SetValueBits(cfg->base, MAILBOX_ID_OTHER_CPU, (DATA0_IRQ_TRIGGER >> channel) | (data32 & DATA_MASK)); return 0; } static int nxp_mailbox_register_callback(const struct device *dev, uint32_t channel, mbox_callback_t cb, void *user_data) { struct nxp_mailbox_data *data = dev->data; if (channel >= MAILBOX_MAX_CHANNELS) { return -EINVAL; } data->cb[channel] = cb; data->user_data[channel] = user_data; return 0; } static int nxp_mailbox_mtu_get(const struct device *dev) { ARG_UNUSED(dev); return MAILBOX_MBOX_SIZE; } static uint32_t nxp_mailbox_max_channels_get(const struct device *dev) { ARG_UNUSED(dev); return MAILBOX_MAX_CHANNELS; } static int nxp_mailbox_set_enabled(const struct device *dev, uint32_t channel, bool enable) { struct nxp_mailbox_data *data = dev->data; if (channel >= MAILBOX_MAX_CHANNELS) { return -EINVAL; } data->channel_enable[channel] = enable; return 0; } static const struct mbox_driver_api nxp_mailbox_driver_api = { .send = nxp_mailbox_send, .register_callback = nxp_mailbox_register_callback, .mtu_get = nxp_mailbox_mtu_get, .max_channels_get = nxp_mailbox_max_channels_get, .set_enabled = nxp_mailbox_set_enabled, }; #define MAILBOX_INSTANCE_DEFINE(idx) \ static struct nxp_mailbox_data nxp_mailbox_##idx##_data; \ const static struct nxp_mailbox_config nxp_mailbox_##idx##_config = { \ .base = (MAILBOX_Type *)DT_INST_REG_ADDR(idx), \ }; \ static int nxp_mailbox_##idx##_init(const struct device *dev) \ { \ ARG_UNUSED(dev); \ MAILBOX_Init(nxp_mailbox_##idx##_config.base); \ IRQ_CONNECT(DT_INST_IRQN(idx), DT_INST_IRQ(idx, priority), mailbox_isr, \ DEVICE_DT_INST_GET(idx), 0); \ irq_enable(DT_INST_IRQN(idx)); \ return 0; \ } \ DEVICE_DT_INST_DEFINE(idx, nxp_mailbox_##idx##_init, NULL, &nxp_mailbox_##idx##_data, \ &nxp_mailbox_##idx##_config, POST_KERNEL, CONFIG_MBOX_INIT_PRIORITY, \ &nxp_mailbox_driver_api) #define MAILBOX_INST(idx) MAILBOX_INSTANCE_DEFINE(idx); DT_INST_FOREACH_STATUS_OKAY(MAILBOX_INST) ```
/content/code_sandbox/drivers/mbox/mbox_nxp_mailbox.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,698
```c /* * */ #include <zephyr/internal/syscall_handler.h> #include <zephyr/drivers/mbox.h> static inline int z_vrfy_mbox_send(const struct device *dev, mbox_channel_id_t channel_id, const struct mbox_msg *msg) { K_OOPS(K_SYSCALL_DRIVER_MBOX(dev, send)); K_OOPS(K_SYSCALL_MEMORY_READ(msg, sizeof(struct mbox_msg))); K_OOPS(K_SYSCALL_MEMORY_READ(msg->data, msg->size)); return z_impl_mbox_send(dev, channel_id, msg); } #include <zephyr/syscalls/mbox_send_mrsh.c> static inline int z_vrfy_mbox_mtu_get(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_MBOX(dev, mtu_get)); return z_impl_mbox_mtu_get(dev); } #include <zephyr/syscalls/mbox_mtu_get_mrsh.c> static inline uint32_t z_vrfy_mbox_max_channels_get(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_MBOX(dev, max_channels_get)); return z_impl_mbox_max_channels_get(dev); } #include <zephyr/syscalls/mbox_max_channels_get_mrsh.c> static inline int z_vrfy_mbox_set_enabled(const struct device *dev, mbox_channel_id_t channel_id, bool enabled) { K_OOPS(K_SYSCALL_DRIVER_MBOX(dev, set_enabled)); return z_impl_mbox_set_enabled(dev, channel_id, enabled); } #include <zephyr/syscalls/mbox_set_enabled_mrsh.c> ```
/content/code_sandbox/drivers/mbox/mbox_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
328
```c /* */ #define DT_DRV_COMPAT nordic_nrf_vevif_task_tx #include <zephyr/devicetree.h> #include <zephyr/drivers/mbox.h> #include <haly/nrfy_vpr.h> #if defined(CONFIG_SOC_NRF54L15_ENGA_CPUAPP) #define TASKS_IDX_MAX 17U #else #define TASKS_IDX_MAX NRF_VPR_TASKS_TRIGGER_MAX #endif struct mbox_vevif_task_tx_conf { NRF_VPR_Type *vpr; uint32_t tasks_mask; uint8_t tasks; }; static inline bool vevif_task_tx_is_valid(const struct device *dev, uint32_t id) { const struct mbox_vevif_task_tx_conf *config = dev->config; return ((id <= TASKS_IDX_MAX) && ((config->tasks_mask & BIT(id)) != 0U)); } static int vevif_task_tx_send(const struct device *dev, uint32_t id, const struct mbox_msg *msg) { const struct mbox_vevif_task_tx_conf *config = dev->config; if (!vevif_task_tx_is_valid(dev, id)) { return -EINVAL; } if (msg != NULL) { return -ENOTSUP; } nrfy_vpr_task_trigger(config->vpr, nrfy_vpr_trigger_task_get(id)); return 0; } static int vevif_task_tx_mtu_get(const struct device *dev) { ARG_UNUSED(dev); return 0; } static uint32_t vevif_task_tx_max_channels_get(const struct device *dev) { const struct mbox_vevif_task_tx_conf *config = dev->config; return config->tasks; } static const struct mbox_driver_api vevif_task_tx_driver_api = { .send = vevif_task_tx_send, .mtu_get = vevif_task_tx_mtu_get, .max_channels_get = vevif_task_tx_max_channels_get, }; #define VEVIF_TASK_TX_DEFINE(inst) \ BUILD_ASSERT(DT_INST_PROP(inst, nordic_tasks) <= VPR_TASKS_TRIGGER_MaxCount, \ "Number of tasks exceeds maximum"); \ \ static const struct mbox_vevif_task_tx_conf conf##inst = { \ .vpr = (NRF_VPR_Type *)DT_INST_REG_ADDR(inst), \ .tasks = DT_INST_PROP(inst, nordic_tasks), \ .tasks_mask = DT_INST_PROP(inst, nordic_tasks_mask), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, NULL, NULL, NULL, &conf##inst, POST_KERNEL, \ CONFIG_MBOX_INIT_PRIORITY, &vevif_task_tx_driver_api); DT_INST_FOREACH_STATUS_OKAY(VEVIF_TASK_TX_DEFINE) ```
/content/code_sandbox/drivers/mbox/mbox_nrf_vevif_task_tx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
589
```c /* * */ #define DT_DRV_COMPAT espressif_mbox_esp32 #include "soc/dport_reg.h" #include "soc/gpio_periph.h" #include <stdint.h> #include <string.h> #include <zephyr/device.h> #include <zephyr/drivers/mbox.h> #include <zephyr/drivers/interrupt_controller/intc_esp32.h> #include <soc.h> #include <zephyr/sys/atomic.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(mbox_esp32, CONFIG_MBOX_LOG_LEVEL); #define ESP32_MBOX_LOCK_FREE_VAL 0xB33FFFFF #define ESP32_MBOX_NOOP_VAL 0xFF __packed struct esp32_mbox_control { uint16_t dest_cpu_msg_id[2]; atomic_t lock; }; struct esp32_mbox_memory { volatile uint8_t *pro_cpu_shm; volatile uint8_t *app_cpu_shm; }; struct esp32_mbox_config { uint32_t irq_source_pro_cpu; uint32_t irq_source_app_cpu; }; struct esp32_mbox_data { mbox_callback_t cb; void *user_data; uint32_t this_core_id; uint32_t other_core_id; uint32_t shm_size; struct esp32_mbox_memory shm; struct esp32_mbox_control *control; }; IRAM_ATTR static void esp32_mbox_isr(const struct device *dev) { struct esp32_mbox_data *dev_data = (struct esp32_mbox_data *)dev->data; uint32_t core_id = dev_data->this_core_id; /* clear interrupt flag */ if (core_id == 0) { #if defined(CONFIG_SOC_SERIES_ESP32) DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0); #elif defined(CONFIG_SOC_SERIES_ESP32S3) WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, 0); #endif } else { #if defined(CONFIG_SOC_SERIES_ESP32) DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, 0); #elif defined(CONFIG_SOC_SERIES_ESP32S3) WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_1_REG, 0); #endif } /* first of all take the ownership of the shared memory */ while (!atomic_cas(&dev_data->control->lock, ESP32_MBOX_LOCK_FREE_VAL, dev_data->this_core_id)) ; if (dev_data->cb) { volatile void *shm = dev_data->shm.pro_cpu_shm; if (core_id != 0) { shm = dev_data->shm.app_cpu_shm; } } /* unlock the shared memory */ atomic_set(&dev_data->control->lock, ESP32_MBOX_LOCK_FREE_VAL); } static int esp32_mbox_send(const struct device *dev, mbox_channel_id_t channel, const struct mbox_msg *msg) { ARG_UNUSED(msg); struct esp32_mbox_data *dev_data = (struct esp32_mbox_data *)dev->data; if (channel > 0xFFFF) { LOG_ERR("Invalid channel"); return -EINVAL; } uint32_t key = irq_lock(); /* try to lock the shared memory */ while (!atomic_cas(&dev_data->control->lock, ESP32_MBOX_LOCK_FREE_VAL, dev_data->this_core_id)) { k_msleep(1); } /* Only the lower 16bits of id are used */ dev_data->control->dest_cpu_msg_id[dev_data->other_core_id] = (uint16_t)(channel & 0xFFFF); /* Generate interrupt in the remote core */ if (dev_data->this_core_id == 0) { atomic_set(&dev_data->control->lock, ESP32_MBOX_LOCK_FREE_VAL); LOG_DBG("Generating interrupt on remote CPU 1 from CPU 0"); #if defined(CONFIG_SOC_SERIES_ESP32) DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, DPORT_CPU_INTR_FROM_CPU_1); #elif defined(CONFIG_SOC_SERIES_ESP32S3) WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_1_REG, SYSTEM_CPU_INTR_FROM_CPU_1); #endif } else { atomic_set(&dev_data->control->lock, ESP32_MBOX_LOCK_FREE_VAL); LOG_DBG("Generating interrupt on remote CPU 0 from CPU 1"); #if defined(CONFIG_SOC_SERIES_ESP32) DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, DPORT_CPU_INTR_FROM_CPU_0); #elif defined(CONFIG_SOC_SERIES_ESP32S3) WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, SYSTEM_CPU_INTR_FROM_CPU_0); #endif } irq_unlock(key); return 0; } static int esp32_mbox_register_callback(const struct device *dev, mbox_channel_id_t channel, mbox_callback_t cb, void *user_data) { ARG_UNUSED(channel); struct esp32_mbox_data *data = (struct esp32_mbox_data *)dev->data; if (!cb) { LOG_ERR("Must provide callback"); return -EINVAL; } uint32_t key = irq_lock(); data->cb = cb; data->user_data = user_data; irq_unlock(key); return 0; } static int esp32_mbox_mtu_get(const struct device *dev) { struct esp32_mbox_data *data = (struct esp32_mbox_data *)dev->data; return data->shm_size; } static uint32_t esp32_mbox_max_channels_get(const struct device *dev) { ARG_UNUSED(dev); return 1; } static int esp32_mbox_set_enabled(const struct device *dev, mbox_channel_id_t channel, bool enable) { /* The esp32 MBOX is always enabled * but rpmsg backend needs MBOX set enabled to be * implemented so just return success here */ ARG_UNUSED(dev); ARG_UNUSED(enable); ARG_UNUSED(channel); return 0; } static int esp32_mbox_init(const struct device *dev) { struct esp32_mbox_data *data = (struct esp32_mbox_data *)dev->data; struct esp32_mbox_config *cfg = (struct esp32_mbox_config *)dev->config; data->this_core_id = esp_core_id(); data->other_core_id = (data->this_core_id == 0) ? 1 : 0; LOG_DBG("Size of MBOX shared memory: %d", data->shm_size); LOG_DBG("Address of PRO_CPU MBOX shared memory: %p", data->shm.pro_cpu_shm); LOG_DBG("Address of APP_CPU MBOX shared memory: %p", data->shm.app_cpu_shm); LOG_DBG("Address of MBOX control structure: %p", data->control); /* pro_cpu is responsible to initialize the lock of shared memory */ if (data->this_core_id == 0) { esp_intr_alloc(cfg->irq_source_pro_cpu, ESP_INTR_FLAG_IRAM, (intr_handler_t)esp32_mbox_isr, (void *)dev, NULL); atomic_set(&data->control->lock, ESP32_MBOX_LOCK_FREE_VAL); } else { /* app_cpu wait for initialization from pro_cpu, then takes it, * after that releases */ esp_intr_alloc(cfg->irq_source_app_cpu, ESP_INTR_FLAG_IRAM, (intr_handler_t)esp32_mbox_isr, (void *)dev, NULL); LOG_DBG("Waiting CPU0 to sync"); while (!atomic_cas(&data->control->lock, ESP32_MBOX_LOCK_FREE_VAL, data->this_core_id)) ; atomic_set(&data->control->lock, ESP32_MBOX_LOCK_FREE_VAL); LOG_DBG("Synchronization done"); } return 0; } static const struct mbox_driver_api esp32_mbox_driver_api = { .send = esp32_mbox_send, .register_callback = esp32_mbox_register_callback, .mtu_get = esp32_mbox_mtu_get, .max_channels_get = esp32_mbox_max_channels_get, .set_enabled = esp32_mbox_set_enabled, }; #define ESP32_MBOX_SHM_SIZE_BY_IDX(idx) \ DT_INST_PROP(idx, shared_memory_size) \ #define ESP32_MBOX_SHM_ADDR_BY_IDX(idx) \ DT_REG_ADDR(DT_PHANDLE(DT_DRV_INST(idx), shared_memory)) \ #define ESP32_MBOX_INIT(idx) \ \ static struct esp32_mbox_config esp32_mbox_device_cfg_##idx = { \ .irq_source_pro_cpu = DT_INST_IRQN(idx), \ .irq_source_app_cpu = DT_INST_IRQN(idx) + 1, \ }; \ \ static struct esp32_mbox_data esp32_mbox_device_data_##idx = { \ .shm_size = ESP32_MBOX_SHM_SIZE_BY_IDX(idx), \ .shm.pro_cpu_shm = (uint8_t *)ESP32_MBOX_SHM_ADDR_BY_IDX(idx), \ .shm.app_cpu_shm = (uint8_t *)ESP32_MBOX_SHM_ADDR_BY_IDX(idx) + \ ESP32_MBOX_SHM_SIZE_BY_IDX(idx)/2, \ .control = (struct esp32_mbox_control *)DT_INST_REG_ADDR(idx), \ }; \ \ DEVICE_DT_INST_DEFINE(idx, &esp32_mbox_init, NULL, \ &esp32_mbox_device_data_##idx, &esp32_mbox_device_cfg_##idx, \ PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, \ &esp32_mbox_driver_api); \ DT_INST_FOREACH_STATUS_OKAY(ESP32_MBOX_INIT); ```
/content/code_sandbox/drivers/mbox/mbox_esp32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,115
```c /* */ #define DT_DRV_COMPAT nordic_nrf_vevif_event_rx #include <zephyr/devicetree.h> #include <zephyr/drivers/mbox.h> #include <haly/nrfy_vpr.h> #if defined(CONFIG_SOC_NRF54L15_ENGA_CPUAPP) #define EVENTS_IDX_MIN 11U #define EVENTS_IDX_MAX 17U #else #define EVENTS_IDX_MIN NRF_VPR_EVENTS_TRIGGERED_MIN #define EVENTS_IDX_MAX NRF_VPR_EVENTS_TRIGGERED_MAX #endif /* callbacks */ struct mbox_vevif_event_rx_cbs { mbox_callback_t cb[EVENTS_IDX_MAX - EVENTS_IDX_MIN + 1U]; void *user_data[EVENTS_IDX_MAX - EVENTS_IDX_MIN + 1U]; uint32_t enabled_mask; }; struct mbox_vevif_event_rx_conf { NRF_VPR_Type *vpr; uint32_t events_mask; uint8_t events; void (*irq_connect)(void); }; static void vevif_event_rx_isr(const void *device) { const struct device *dev = (struct device *)device; const struct mbox_vevif_event_rx_conf *config = dev->config; struct mbox_vevif_event_rx_cbs *cbs = dev->data; for (uint8_t id = EVENTS_IDX_MIN; id < EVENTS_IDX_MAX + 1U; id++) { nrf_vpr_event_t event = nrfy_vpr_triggered_event_get(id); if (nrfy_vpr_event_check(config->vpr, event)) { nrfy_vpr_event_clear(config->vpr, event); uint8_t idx = id - EVENTS_IDX_MIN; if ((cbs->enabled_mask & BIT(id)) && (cbs->cb[idx] != NULL)) { cbs->cb[idx](dev, id, cbs->user_data[idx], NULL); } } } } static inline bool vevif_event_rx_event_is_valid(uint32_t events_mask, uint32_t id) { return ((id <= EVENTS_IDX_MAX) && ((events_mask & BIT(id)) != 0U)); } static uint32_t vevif_event_rx_max_channels_get(const struct device *dev) { const struct mbox_vevif_event_rx_conf *config = dev->config; return config->events; } static int vevif_event_rx_register_callback(const struct device *dev, uint32_t id, mbox_callback_t cb, void *user_data) { const struct mbox_vevif_event_rx_conf *config = dev->config; struct mbox_vevif_event_rx_cbs *cbs = dev->data; uint8_t idx = id - EVENTS_IDX_MIN; if (!vevif_event_rx_event_is_valid(config->events_mask, id)) { return -EINVAL; } cbs->cb[idx] = cb; cbs->user_data[idx] = user_data; return 0; } static int vevif_event_rx_set_enabled(const struct device *dev, uint32_t id, bool enable) { const struct mbox_vevif_event_rx_conf *config = dev->config; struct mbox_vevif_event_rx_cbs *cbs = dev->data; if (!vevif_event_rx_event_is_valid(config->events_mask, id)) { return -EINVAL; } if (enable) { if ((cbs->enabled_mask & BIT(id)) != 0U) { return -EALREADY; } cbs->enabled_mask |= BIT(id); nrfy_vpr_int_enable(config->vpr, BIT(id)); } else { if ((cbs->enabled_mask & BIT(id)) == 0U) { return -EALREADY; } cbs->enabled_mask &= ~BIT(id); nrfy_vpr_int_disable(config->vpr, BIT(id)); } return 0; } static const struct mbox_driver_api vevif_event_rx_driver_api = { .max_channels_get = vevif_event_rx_max_channels_get, .register_callback = vevif_event_rx_register_callback, .set_enabled = vevif_event_rx_set_enabled, }; static int vevif_event_rx_init(const struct device *dev) { const struct mbox_vevif_event_rx_conf *config = dev->config; config->irq_connect(); return 0; } #define VEVIF_EVENT_RX_DEFINE(inst) \ BUILD_ASSERT(DT_INST_PROP(inst, nordic_events) <= NRF_VPR_EVENTS_TRIGGERED_COUNT, \ "Number of events exceeds maximum"); \ \ static void irq_connect##inst(void) \ { \ IRQ_CONNECT(DT_IRQN(DT_DRV_INST(inst)), DT_IRQ(DT_DRV_INST(inst), priority), \ vevif_event_rx_isr, (const void *)DEVICE_DT_GET(DT_DRV_INST(inst)), \ 0); \ irq_enable(DT_IRQN(DT_DRV_INST(inst))); \ }; \ \ static struct mbox_vevif_event_rx_cbs data##inst = { \ .enabled_mask = 0, \ }; \ static const struct mbox_vevif_event_rx_conf conf##inst = { \ .vpr = (NRF_VPR_Type *)DT_INST_REG_ADDR(inst), \ .events = DT_INST_PROP(inst, nordic_events), \ .events_mask = DT_INST_PROP(inst, nordic_events_mask), \ .irq_connect = irq_connect##inst, \ }; \ \ DEVICE_DT_INST_DEFINE(inst, vevif_event_rx_init, NULL, &data##inst, &conf##inst, \ POST_KERNEL, CONFIG_MBOX_INIT_PRIORITY, &vevif_event_rx_driver_api); DT_INST_FOREACH_STATUS_OKAY(VEVIF_EVENT_RX_DEFINE) ```
/content/code_sandbox/drivers/mbox/mbox_nrf_vevif_event_rx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,240
```unknown config MBOX_NRF_VEVIF_TASK_RX bool "nRF VEVIF task RX driver" depends on DT_HAS_NORDIC_NRF_VEVIF_TASK_RX_ENABLED default y help Mailbox driver for receiving VEVIF tasks on VPR as CLIC interrupts config MBOX_NRF_VEVIF_TASK_TX bool "nRF VEVIF task TX driver" depends on DT_HAS_NORDIC_NRF_VEVIF_TASK_TX_ENABLED default y help Mailbox driver for transmitting VEVIF tasks to VPR as CLIC interrupts ```
/content/code_sandbox/drivers/mbox/Kconfig.nrf_vevif_task
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
126
```c /* */ #define DT_DRV_COMPAT nordic_nrf_vevif_task_rx #include <zephyr/devicetree.h> #include <zephyr/drivers/mbox.h> #include <hal/nrf_vpr.h> #include <hal/nrf_vpr_csr.h> #include <hal/nrf_vpr_csr_vevif.h> #if defined(CONFIG_SOC_NRF54L15_ENGA_CPUFLPR) #define TASKS_IDX_MIN 11U #define TASKS_IDX_MAX 17U #else #define TASKS_IDX_MIN NRF_VPR_TASKS_TRIGGER_MIN #define TASKS_IDX_MAX NRF_VPR_TASKS_TRIGGER_MAX #endif #define VEVIF_TASKS_NUM DT_INST_PROP(0, nordic_tasks) #define VEVIF_TASKS_MASK DT_INST_PROP(0, nordic_tasks_mask) BUILD_ASSERT(VEVIF_TASKS_NUM <= VPR_TASKS_TRIGGER_MaxCount, "Number of tasks exceeds maximum"); BUILD_ASSERT(VEVIF_TASKS_NUM == DT_NUM_IRQS(DT_DRV_INST(0)), "# IRQs != # tasks"); /* callbacks */ struct mbox_vevif_task_rx_cbs { mbox_callback_t cb[TASKS_IDX_MAX - TASKS_IDX_MIN + 1U]; void *user_data[TASKS_IDX_MAX - TASKS_IDX_MIN + 1U]; uint32_t enabled_mask; }; static struct mbox_vevif_task_rx_cbs cbs; /* IRQ list */ #define VEVIF_IRQN(idx, _) DT_INST_IRQ_BY_IDX(0, idx, irq) static const uint8_t vevif_irqs[VEVIF_TASKS_NUM] = { LISTIFY(DT_NUM_IRQS(DT_DRV_INST(0)), VEVIF_IRQN, (,)) }; static void vevif_task_rx_isr(const void *parameter) { uint8_t channel = *(uint8_t *)parameter; uint8_t idx = channel - TASKS_IDX_MIN; nrf_vpr_csr_vevif_tasks_clear(BIT(channel)); if (cbs.cb[idx] != NULL) { cbs.cb[idx](DEVICE_DT_INST_GET(0), channel, cbs.user_data[idx], NULL); } } static inline bool vevif_task_rx_is_task_valid(uint32_t id) { return ((id <= TASKS_IDX_MAX) && ((VEVIF_TASKS_MASK & BIT(id)) != 0U)); } static uint32_t vevif_task_rx_max_channels_get(const struct device *dev) { ARG_UNUSED(dev); return VEVIF_TASKS_NUM; } static int vevif_task_rx_register_callback(const struct device *dev, uint32_t id, mbox_callback_t cb, void *user_data) { ARG_UNUSED(dev); uint8_t idx = id - TASKS_IDX_MIN; if (!vevif_task_rx_is_task_valid(id)) { return -EINVAL; } cbs.cb[idx] = cb; cbs.user_data[idx] = user_data; return 0; } static int vevif_task_rx_set_enabled(const struct device *dev, uint32_t id, bool enable) { ARG_UNUSED(dev); uint8_t idx = id - TASKS_IDX_MIN; if (!vevif_task_rx_is_task_valid(id)) { return -EINVAL; } if (enable) { if ((cbs.enabled_mask & BIT(id)) != 0U) { return -EALREADY; } cbs.enabled_mask |= BIT(id); irq_enable(vevif_irqs[idx]); } else { if ((cbs.enabled_mask & BIT(id)) == 0U) { return -EALREADY; } cbs.enabled_mask &= ~BIT(id); irq_disable(vevif_irqs[idx]); } return 0; } static const struct mbox_driver_api vevif_task_rx_driver_api = { .max_channels_get = vevif_task_rx_max_channels_get, .register_callback = vevif_task_rx_register_callback, .set_enabled = vevif_task_rx_set_enabled, }; #define VEVIF_IRQ_CONNECT(idx, _) \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, idx, irq), DT_INST_IRQ_BY_IDX(0, idx, priority), \ vevif_task_rx_isr, &vevif_irqs[idx], 0) static int vevif_task_rx_init(const struct device *dev) { nrf_vpr_csr_vevif_tasks_clear(NRF_VPR_TASK_TRIGGER_ALL_MASK); LISTIFY(DT_NUM_IRQS(DT_DRV_INST(0)), VEVIF_IRQ_CONNECT, (;)); return 0; } DEVICE_DT_INST_DEFINE(0, vevif_task_rx_init, NULL, NULL, NULL, POST_KERNEL, CONFIG_MBOX_INIT_PRIORITY, &vevif_task_rx_driver_api); ```
/content/code_sandbox/drivers/mbox/mbox_nrf_vevif_task_rx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,014
```unknown config MBOX_NXP_IMX_MU bool "NXP i.MX Message Unit (MU) driver" default y depends on DT_HAS_NXP_MBOX_IMX_MU_ENABLED help Driver for NXP i.MX Message Unit. ```
/content/code_sandbox/drivers/mbox/Kconfig.nxp_imx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
57
```unknown config MBOX_STM32_HSEM bool "MBOX STM32 HSEM driver" depends on DT_HAS_ST_MBOX_STM32_HSEM_ENABLED help MBOX Driver for STM32 hardware semaphore ```
/content/code_sandbox/drivers/mbox/Kconfig.stm32_hsem
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
48
```unknown config MBOX_NRFX_IPC bool "MBOX NRF IPC driver" depends on DT_HAS_NORDIC_MBOX_NRF_IPC_ENABLED default y select NRFX_IPC help Driver for Nordic nRF messaging unit, based on nRF IPC peripheral HW. ```
/content/code_sandbox/drivers/mbox/Kconfig.nrfx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
64
```unknown menuconfig MBOX bool "Multi-Channel Inter-Processor Mailbox (MBOX) drivers" help Include multi-channel interrupt-based inter-processor mailboxes drivers in system configuration if MBOX # Include these first so that any properties (e.g. defaults) below can be # overridden (by defining symbols in multiple locations) source "drivers/mbox/Kconfig.nrfx" source "drivers/mbox/Kconfig.nxp_s32" source "drivers/mbox/Kconfig.nxp_imx" source "drivers/mbox/Kconfig.nxp_mailbox" source "drivers/mbox/Kconfig.andes" source "drivers/mbox/Kconfig.nrf_vevif_task" source "drivers/mbox/Kconfig.nrf_vevif_event" source "drivers/mbox/Kconfig.nrf_bellboard" source "drivers/mbox/Kconfig.stm32_hsem" source "drivers/mbox/Kconfig.esp32" config MBOX_INIT_PRIORITY int "MBOX init priority" default 40 help MBOX driver device initialization priority. module = MBOX module-str = mbox source "subsys/logging/Kconfig.template.log_config" endif # MBOX ```
/content/code_sandbox/drivers/mbox/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
249
```c /* */ #define DT_DRV_COMPAT nordic_nrf_bellboard_rx #include <zephyr/devicetree.h> #include <zephyr/drivers/mbox.h> #include <zephyr/sys/util.h> #include <zephyr/sys/__assert.h> #include <hal/nrf_bellboard.h> #define BELLBOARD_NUM_IRQS 4U BUILD_ASSERT(DT_NUM_IRQS(DT_DRV_INST(0)) <= BELLBOARD_NUM_IRQS, "# interrupt exceeds maximum"); BUILD_ASSERT((DT_INST_PROP_LEN(0, nordic_interrupt_mapping) % 2) == 0, "# interrupt mappings not specified in pairs"); /* BELLBOARD event mappings */ #define EVT_MAPPING_ITEM(idx) DT_INST_PROP_BY_IDX(0, nordic_interrupt_mapping, idx) #define BELLBOARD_GET_EVT_MAPPING(idx, _) \ COND_CODE_1( \ DT_INST_PROP_HAS_IDX(0, nordic_interrupt_mapping, UTIL_INC(UTIL_X2(idx))), \ ([EVT_MAPPING_ITEM(UTIL_INC(UTIL_X2(idx)))] = EVT_MAPPING_ITEM(UTIL_X2(idx)),), \ ()) static const uint32_t evt_mappings[BELLBOARD_NUM_IRQS] = { LISTIFY(DT_NUM_IRQS(DT_DRV_INST(0)), BELLBOARD_GET_EVT_MAPPING, ())}; /* BELLBOARD instance */ static NRF_BELLBOARD_Type *bellboard = (NRF_BELLBOARD_Type *)DT_INST_REG_ADDR(0); /* BELLBOARD runtime resources */ static mbox_callback_t cbs[NRF_BELLBOARD_EVENTS_TRIGGERED_COUNT]; static void *cbs_ctx[NRF_BELLBOARD_EVENTS_TRIGGERED_COUNT]; static uint32_t evt_enabled_masks[BELLBOARD_NUM_IRQS]; static void bellboard_rx_isr(const void *parameter) { uint8_t irq_idx = (uint8_t)(uintptr_t)parameter; uint32_t int_pend; int_pend = nrf_bellboard_int_pending_get(bellboard, irq_idx); for (uint8_t i = 0U; i < NRF_BELLBOARD_EVENTS_TRIGGERED_COUNT; i++) { nrf_bellboard_event_t event = nrf_bellboard_triggered_event_get(i); if ((int_pend & BIT(i)) != 0U) { /* Only clear those events that have their corresponding bit set * in INTPEND at the time we read it. Otherwise, if two (or more) * events are generated in quick succession, INTPEND may be set for * only one of events, but we clear the EVENTS_TRIGGERED bit for * all of them, thus losing them. * * Assume nrf_bellboard_event_check() is true for the event * that raised this interrupt. */ __ASSERT_NO_MSG(nrf_bellboard_event_check(bellboard, event)); nrf_bellboard_event_clear(bellboard, event); if (cbs[i] != NULL) { cbs[i](DEVICE_DT_INST_GET(0), i, cbs_ctx[i], NULL); } } } } static uint32_t bellboard_rx_max_channels_get(const struct device *dev) { ARG_UNUSED(dev); return NRF_BELLBOARD_EVENTS_TRIGGERED_COUNT; } static int bellboard_rx_register_callback(const struct device *dev, uint32_t id, mbox_callback_t cb, void *user_data) { ARG_UNUSED(dev); if (id >= NRF_BELLBOARD_EVENTS_TRIGGERED_COUNT) { return -EINVAL; } cbs[id] = cb; cbs_ctx[id] = user_data; return 0; } static int bellboard_rx_set_enabled(const struct device *dev, uint32_t id, bool enable) { bool valid_found = false; ARG_UNUSED(dev); if (id >= NRF_BELLBOARD_EVENTS_TRIGGERED_COUNT) { return -EINVAL; } for (uint8_t i = 0U; i < BELLBOARD_NUM_IRQS; i++) { uint32_t *evt_enabled_mask; if ((evt_mappings[i] == 0U) || ((evt_mappings[i] & BIT(id)) == 0U)) { continue; } valid_found = true; evt_enabled_mask = &evt_enabled_masks[i]; if (enable) { if ((*evt_enabled_mask & BIT(id)) != 0U) { return -EALREADY; } *evt_enabled_mask |= BIT(id); nrf_bellboard_int_enable(bellboard, i, BIT(id)); } else { if ((*evt_enabled_mask & BIT(id)) == 0U) { return -EALREADY; } *evt_enabled_mask &= ~BIT(id); nrf_bellboard_int_disable(bellboard, i, BIT(id)); } } if (!valid_found) { return -EINVAL; } return 0; } static const struct mbox_driver_api bellboard_rx_driver_api = { .max_channels_get = bellboard_rx_max_channels_get, .register_callback = bellboard_rx_register_callback, .set_enabled = bellboard_rx_set_enabled, }; #define BELLBOARD_IRQ_CONFIGURE(name, idx) \ COND_CODE_1(DT_INST_IRQ_HAS_NAME(0, name), \ (IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, name, irq), \ DT_INST_IRQ_BY_NAME(0, name, priority), bellboard_rx_isr, \ (const void *)idx, 0); \ irq_enable(DT_INST_IRQ_BY_NAME(0, name, irq));), \ ()) static int bellboard_rx_init(const struct device *dev) { uint32_t evt_all_mappings = evt_mappings[0] | evt_mappings[1] | evt_mappings[2] | evt_mappings[3]; ARG_UNUSED(dev); nrf_bellboard_int_disable(bellboard, 0, evt_mappings[0]); nrf_bellboard_int_disable(bellboard, 1, evt_mappings[1]); nrf_bellboard_int_disable(bellboard, 2, evt_mappings[2]); nrf_bellboard_int_disable(bellboard, 3, evt_mappings[3]); for (uint8_t i = 0U; i < NRF_BELLBOARD_EVENTS_TRIGGERED_COUNT; i++) { if ((evt_all_mappings & BIT(i)) != 0U) { nrf_bellboard_event_clear(bellboard, nrf_bellboard_triggered_event_get(i)); } } BELLBOARD_IRQ_CONFIGURE(irq0, 0); BELLBOARD_IRQ_CONFIGURE(irq1, 1); BELLBOARD_IRQ_CONFIGURE(irq2, 2); BELLBOARD_IRQ_CONFIGURE(irq3, 3); return 0; } DEVICE_DT_INST_DEFINE(0, bellboard_rx_init, NULL, NULL, NULL, POST_KERNEL, CONFIG_MBOX_INIT_PRIORITY, &bellboard_rx_driver_api); ```
/content/code_sandbox/drivers/mbox/mbox_nrf_bellboard_rx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,487
```c /* * */ #include <zephyr/drivers/mbox.h> #define LOG_LEVEL CONFIG_MBOX_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <zephyr/spinlock.h> LOG_MODULE_REGISTER(mbox_andes_plic_sw); #define DT_DRV_COMPAT andestech_plic_sw #define IRQ_REG(n) (n >> 5) #define PLIC_BASE(dev) \ ((const struct mbox_andes_conf * const)(dev)->config)->base #define REG_PRIORITY(dev, irq) \ (PLIC_BASE(dev) + 0x0 + (irq << 2)) #define REG_PENDING(dev, irq) \ (PLIC_BASE(dev) + 0x1000 + (IRQ_REG(irq) << 2)) #define REG_ENABLE(dev, hart, irq) \ (PLIC_BASE(dev) + 0x2000 + (hart << 7) + IRQ_REG(irq)) #define REG_CLAIM(dev, hart) \ (PLIC_BASE(dev) + 0x200004 + (hart << 12)) #define IPI_NUM DT_INST_PROP(0, channel_max) static struct mbox_andes_data { mbox_callback_t cb[IPI_NUM]; void *user_data[IPI_NUM]; uint32_t enabled_channel[CONFIG_MP_MAX_NUM_CPUS]; #ifdef CONFIG_SCHED_IPI_SUPPORTED uint32_t reg_cb_channel; uint32_t ipi_channel; #endif } andes_mbox_data; static struct mbox_andes_conf { uint32_t base; uint32_t channel_max; } andes_mbox_conf = { .base = DT_INST_REG_ADDR(0), .channel_max = IPI_NUM, }; static struct k_spinlock mbox_syn; static void plic_sw_irq_set_pending(const struct device *dev, uint32_t irq) { uint32_t pend; k_spinlock_key_t key = k_spin_lock(&mbox_syn); pend = sys_read32(REG_PENDING(dev, irq)); pend |= BIT(irq); sys_write32(pend, REG_PENDING(dev, irq)); k_spin_unlock(&mbox_syn, key); } static inline bool is_channel_valid(const struct device *dev, uint32_t ch) { const struct mbox_andes_conf *conf = dev->config; return (ch <= conf->channel_max); } static int mbox_andes_send(const struct device *dev, uint32_t ch, const struct mbox_msg *msg) { if (msg) { LOG_WRN("Sending data not supported"); } if (!is_channel_valid(dev, ch)) { return -EINVAL; } /* Send IPI by triggering the pending register of PLIC SW. */ plic_sw_irq_set_pending(dev, ch + 1); return 0; } static int mbox_andes_register_callback(const struct device *dev, uint32_t ch, mbox_callback_t cb, void *user_data) { struct mbox_andes_data *data = dev->data; const struct mbox_andes_conf *conf = dev->config; int ret = 0; k_spinlock_key_t key = k_spin_lock(&mbox_syn); if (ch > conf->channel_max) { ret = -EINVAL; goto out; } #ifdef CONFIG_SCHED_IPI_SUPPORTED if (ch & data->ipi_channel & data->reg_cb_channel) { ret = -EALREADY; goto out; } data->reg_cb_channel |= BIT(ch); #endif data->cb[ch] = cb; data->user_data[ch] = user_data; out: k_spin_unlock(&mbox_syn, key); return 0; } static int mbox_andes_mtu_get(const struct device *dev) { /* We only support signalling */ return 0; } static uint32_t mbox_andes_max_channels_get(const struct device *dev) { const struct mbox_andes_conf *conf = dev->config; return conf->channel_max; } static int mbox_andes_set_enabled(const struct device *dev, uint32_t ch, bool enable) { uint32_t en, is_enabled_ch, hartid, cpu_id, irq; struct mbox_andes_data *data = dev->data; int ret = 0; k_spinlock_key_t key = k_spin_lock(&mbox_syn); if (!is_channel_valid(dev, ch)) { ret = -EINVAL; goto out; } irq = ch + 1; hartid = arch_proc_id(); cpu_id = _current_cpu->id; is_enabled_ch = data->enabled_channel[cpu_id] & BIT(ch); if ((!enable && !is_enabled_ch) || (enable && is_enabled_ch)) { ret = -EALREADY; goto out; } if (enable && !(data->cb[ch])) { LOG_WRN("Enabling channel without a registered callback\n"); } en = sys_read32(REG_ENABLE(dev, hartid, irq)); if (enable) { data->enabled_channel[cpu_id] |= BIT(ch); sys_write32(1, REG_PRIORITY(dev, irq)); en |= BIT(irq); } else { data->enabled_channel[cpu_id] &= ~BIT(ch); en &= ~BIT(irq); } sys_write32(en, REG_ENABLE(dev, hartid, irq)); out: k_spin_unlock(&mbox_syn, key); return ret; } static void andes_plic_sw_irq_handler(const struct device *dev) { struct mbox_andes_data *data = dev->data; uint32_t irq, ch, hartid; hartid = arch_proc_id(); /* PLIC claim: Get the SW IRQ number generating the interrupt. */ irq = sys_read32(REG_CLAIM(dev, hartid)); ch = irq - 1; if (irq) { sys_write32(irq, REG_CLAIM(dev, hartid)); if (data->cb[ch]) { /* Only one MAILBOX, id is unused and set to 0 */ data->cb[ch](dev, ch, data->user_data[ch], NULL); } } } static int mbox_andes_init(const struct device *dev) { /* Setup IRQ handler for PLIC SW driver */ IRQ_CONNECT(RISCV_IRQ_MSOFT, 1, andes_plic_sw_irq_handler, DEVICE_DT_INST_GET(0), 0); #ifndef CONFIG_SMP irq_enable(RISCV_IRQ_MSOFT); #endif return 0; } static const struct mbox_driver_api mbox_andes_driver_api = { .send = mbox_andes_send, .register_callback = mbox_andes_register_callback, .mtu_get = mbox_andes_mtu_get, .max_channels_get = mbox_andes_max_channels_get, .set_enabled = mbox_andes_set_enabled, }; DEVICE_DT_INST_DEFINE(0, mbox_andes_init, NULL, &andes_mbox_data, &andes_mbox_conf, PRE_KERNEL_1, CONFIG_MBOX_INIT_PRIORITY, &mbox_andes_driver_api); ```
/content/code_sandbox/drivers/mbox/mbox_andes_plic_sw.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,475
```unknown # Atmel SAM GPIO configuration options config GPIO_SAM bool "Atmel SAM GPIO (PORT) driver" default y depends on DT_HAS_ATMEL_SAM_GPIO_ENABLED help Enable support for the Atmel SAM 'PORT' GPIO controllers. config GPIO_SAM4L bool "Atmel SAM4L GPIO (PORT) driver" default y depends on DT_HAS_ATMEL_SAM4L_GPIO_ENABLED help Enable support for the Atmel SAM4L 'PORT' GPIO controllers. ```
/content/code_sandbox/drivers/gpio/Kconfig.sam
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
113
```unknown # Organisation (CSIRO) ABN 41 687 119 230. # Hidden option for turning on the dummy driver for vnd,gpio devices # used in testing. config GPIO_TEST def_bool DT_HAS_VND_GPIO_ENABLED depends on DT_HAS_VND_GPIO_ENABLED ```
/content/code_sandbox/drivers/gpio/Kconfig.test
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
62
```c /* * */ #define DT_DRV_COMPAT nuvoton_nct38xx_gpio #include <zephyr/device.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/gpio/gpio_nct38xx.h> #include <zephyr/drivers/mfd/nct38xx.h> #include <zephyr/kernel.h> #include <zephyr/sys/util_macro.h> #include "gpio_nct38xx.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(gpio_ntc38xx, CONFIG_GPIO_LOG_LEVEL); /* Driver config */ struct gpio_nct38xx_config { /* Multi-function device, parent to the NCT38xx GPIO controller */ const struct device *mfd; /* GPIO ports */ const struct device **sub_gpio_dev; uint8_t sub_gpio_port_num; /* Alert handler */ const struct device *alert_dev; }; /* Driver data */ struct gpio_nct38xx_data { /* NCT38XX device */ const struct device *dev; /* lock NCT38xx register access */ struct k_sem *lock; /* I2C device for the MFD parent */ const struct i2c_dt_spec *i2c_dev; }; void nct38xx_gpio_alert_handler(const struct device *dev) { const struct gpio_nct38xx_config *const config = dev->config; for (int i = 0; i < config->sub_gpio_port_num; i++) { gpio_nct38xx_dispatch_port_isr(config->sub_gpio_dev[i]); } } static int nct38xx_init_interrupt(const struct device *dev) { uint16_t alert, alert_mask = 0; int ret = 0; struct gpio_nct38xx_data *data = dev->data; k_sem_take(data->lock, K_FOREVER); /* Disable all interrupt */ if (i2c_burst_write_dt(data->i2c_dev, NCT38XX_REG_ALERT_MASK, (uint8_t *)&alert_mask, sizeof(alert_mask))) { ret = -EIO; goto unlock; } /* Enable vendor-defined alert for GPIO. */ alert_mask |= BIT(NCT38XX_REG_ALERT_MASK_VENDOR_DEFINDED_ALERT); /* Clear alert */ if (i2c_burst_read_dt(data->i2c_dev, NCT38XX_REG_ALERT, (uint8_t *)&alert, sizeof(alert))) { ret = -EIO; goto unlock; } alert &= alert_mask; if (alert) { if (i2c_burst_write_dt(data->i2c_dev, NCT38XX_REG_ALERT, (uint8_t *)&alert, sizeof(alert))) { ret = -EIO; goto unlock; } } if (i2c_burst_write_dt(data->i2c_dev, NCT38XX_REG_ALERT_MASK, (uint8_t *)&alert_mask, sizeof(alert_mask))) { ret = -EIO; goto unlock; } unlock: k_sem_give(data->lock); return ret; } static int nct38xx_gpio_init(const struct device *dev) { const struct gpio_nct38xx_config *const config = dev->config; struct gpio_nct38xx_data *data = dev->data; /* Verify multi-function parent is ready */ if (!device_is_ready(config->mfd)) { LOG_ERR("%s device not ready", config->mfd->name); return -ENODEV; } data->lock = mfd_nct38xx_get_lock_reference(config->mfd); data->i2c_dev = mfd_nct38xx_get_i2c_dt_spec(config->mfd); if (IS_ENABLED(CONFIG_GPIO_NCT38XX_ALERT)) { nct38xx_init_interrupt(dev); } return 0; } #define GPIO_NCT38XX_DEVICE_INSTANCE(inst) \ static const struct device *sub_gpio_dev_##inst[] = { \ DT_INST_FOREACH_CHILD_STATUS_OKAY_SEP(inst, DEVICE_DT_GET, (,)) \ }; \ static const struct gpio_nct38xx_config gpio_nct38xx_cfg_##inst = { \ .mfd = DEVICE_DT_GET(DT_INST_PARENT(inst)), \ .sub_gpio_dev = sub_gpio_dev_##inst, \ .sub_gpio_port_num = ARRAY_SIZE(sub_gpio_dev_##inst), \ }; \ static struct gpio_nct38xx_data gpio_nct38xx_data_##inst = { \ .dev = DEVICE_DT_INST_GET(inst), \ }; \ DEVICE_DT_INST_DEFINE(inst, nct38xx_gpio_init, NULL, &gpio_nct38xx_data_##inst, \ &gpio_nct38xx_cfg_##inst, POST_KERNEL, \ CONFIG_GPIO_NCT38XX_INIT_PRIORITY, NULL); DT_INST_FOREACH_STATUS_OKAY(GPIO_NCT38XX_DEVICE_INSTANCE) /* The nct38xx MFD parent must be initialized before this driver */ BUILD_ASSERT(CONFIG_GPIO_NCT38XX_INIT_PRIORITY > CONFIG_MFD_INIT_PRIORITY); ```
/content/code_sandbox/drivers/gpio/gpio_nct38xx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,065
```c /* * */ #define DT_DRV_COMPAT nxp_s32_mru #include <zephyr/drivers/mbox.h> #include <zephyr/irq.h> #include <zephyr/sys/util_macro.h> #include <Mru_Ip.h> #define LOG_LEVEL CONFIG_MBOX_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(nxp_s32_mru); #define MRU_MAX_INT_GROUPS 2 #define MRU_MAX_CHANNELS 12 #define MRU_MAX_MBOX_PER_CHAN 1 #define MRU_MBOX_SIZE 4 #define MRU_CHANNEL_OFFSET 0x1000 /* Utility macros to convert from GIC index to interrupt group index */ #define _MRU_IRQ_17 MRU_IP_INT_GROUP_0 #define _MRU_IRQ_18 MRU_IP_INT_GROUP_1 #define MRU_INT_GROUP(irq) _CONCAT(_MRU_IRQ_, irq) struct nxp_s32_mru_data { mbox_callback_t cb[MRU_MAX_CHANNELS]; void *user_data[MRU_MAX_CHANNELS]; }; struct nxp_s32_mru_config { RTU_MRU_Type *base; Mru_Ip_ConfigType hw_cfg; void (*config_irq)(void); uint8_t irq_group; }; static inline bool is_rx_channel_valid(const struct device *dev, uint32_t ch) { const struct nxp_s32_mru_config *cfg = dev->config; return ((ch < MRU_MAX_CHANNELS) && (ch < cfg->hw_cfg.NumChannel)); } /* Get a channel's mailbox address, no boundaries validation */ static inline uintptr_t get_mbox_addr(const struct device *dev, uint32_t channel, uint32_t mbox) { const struct nxp_s32_mru_config *cfg = dev->config; return ((uintptr_t)cfg->base + (channel + 1) * MRU_CHANNEL_OFFSET + mbox * MRU_MBOX_SIZE); } static int nxp_s32_mru_send(const struct device *dev, uint32_t channel, const struct mbox_msg *msg) { const struct nxp_s32_mru_config *cfg = dev->config; uint32_t *tx_mbox_addr[MRU_MAX_MBOX_PER_CHAN]; Mru_Ip_TransmitChannelType tx_cfg; Mru_Ip_StatusType status; if (channel >= MRU_MAX_CHANNELS) { return -EINVAL; } if (msg == NULL) { return -EINVAL; } else if (msg->size > (MRU_MBOX_SIZE * MRU_MAX_MBOX_PER_CHAN)) { return -EMSGSIZE; } for (int i = 0; i < MRU_MAX_MBOX_PER_CHAN; i++) { tx_mbox_addr[i] = (uint32_t *)get_mbox_addr(dev, channel, i); } tx_cfg.NumTxMB = MRU_MAX_MBOX_PER_CHAN; tx_cfg.LastTxMBIndex = MRU_MAX_MBOX_PER_CHAN - 1; tx_cfg.MBAddList = (volatile uint32 * const *)tx_mbox_addr; tx_cfg.ChMBSTATAdd = (volatile uint32 *)&cfg->base->CHXCONFIG[channel].CH_MBSTAT; status = Mru_Ip_Transmit(&tx_cfg, (const uint32 *)msg->data); return (status == MRU_IP_STATUS_SUCCESS ? 0 : -EBUSY); } static int nxp_s32_mru_register_callback(const struct device *dev, uint32_t channel, mbox_callback_t cb, void *user_data) { struct nxp_s32_mru_data *data = dev->data; if (!is_rx_channel_valid(dev, channel)) { return -EINVAL; } data->cb[channel] = cb; data->user_data[channel] = user_data; return 0; } static int nxp_s32_mru_mtu_get(const struct device *dev) { return (MRU_MBOX_SIZE * MRU_MAX_MBOX_PER_CHAN); } static uint32_t nxp_s32_mru_max_channels_get(const struct device *dev) { return MRU_MAX_CHANNELS; } static int nxp_s32_mru_set_enabled(const struct device *dev, uint32_t channel, bool enable) { struct nxp_s32_mru_data *data = dev->data; const struct nxp_s32_mru_config *cfg = dev->config; const Mru_Ip_ChannelCfgType *ch_cfg = cfg->hw_cfg.ChannelCfg; if (!is_rx_channel_valid(dev, channel)) { return -EINVAL; } if (enable && (data->cb[channel] == NULL)) { LOG_WRN("Enabling channel without a registered callback\n"); } if (enable) { /* * Make the channel's registers writable and then once again after * enabling interrupts and mailboxes so remote can transmit */ *ch_cfg[channel].ChCFG0Add = RTU_MRU_CH_CFG0_CHE(1); *ch_cfg[channel].ChCFG0Add = RTU_MRU_CH_CFG0_IE(1) | RTU_MRU_CH_CFG0_MBE0(1) | RTU_MRU_CH_CFG0_CHE(1); } else { /* * Disable interrupts and mailboxes on this channel, making * the channel's registers not writable afterwards */ *ch_cfg[channel].ChCFG0Add = RTU_MRU_CH_CFG0_IE(0) | RTU_MRU_CH_CFG0_MBE0(0); } return 0; } static int nxp_s32_mru_init(const struct device *dev) { const struct nxp_s32_mru_config *cfg = dev->config; if (cfg->hw_cfg.NumChannel == 0) { /* Nothing to do if no Rx channels are configured */ return 0; } /* All configured Rx channels will be disabled after this call */ Mru_Ip_Init(&cfg->hw_cfg); /* * Configure and enable interrupt group, but channel's interrupt are * disabled until calling .set_enabled() */ cfg->config_irq(); return 0; } void nxp_s32_mru_isr(const struct device *dev) { const struct nxp_s32_mru_config *config = dev->config; Mru_Ip_IrqHandler(config->hw_cfg.InstanceId, config->irq_group); } static const struct mbox_driver_api nxp_s32_mru_driver_api = { .send = nxp_s32_mru_send, .register_callback = nxp_s32_mru_register_callback, .mtu_get = nxp_s32_mru_mtu_get, .max_channels_get = nxp_s32_mru_max_channels_get, .set_enabled = nxp_s32_mru_set_enabled, }; #define MRU_BASE(n) ((RTU_MRU_Type *)DT_INST_REG_ADDR(n)) #define MRU_RX_CHANNELS(n) DT_INST_PROP_OR(n, rx_channels, 0) #define MRU_MBOX_ADDR(n, ch, mb) \ (DT_INST_REG_ADDR(n) + ((ch + 1) * MRU_CHANNEL_OFFSET) + (MRU_MBOX_SIZE * mb)) #define MRU_HW_INSTANCE_CHECK(i, n) \ ((DT_INST_REG_ADDR(n) == IP_MRU_##i##_BASE) ? i : 0) #define MRU_HW_INSTANCE(n) \ LISTIFY(__DEBRACKET RTU_MRU_INSTANCE_COUNT, MRU_HW_INSTANCE_CHECK, (|), n) #define MRU_INIT_IRQ_FUNC(n) \ static void nxp_s32_mru_##n##_init_irq(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ nxp_s32_mru_isr, \ DEVICE_DT_INST_GET(n), \ DT_INST_IRQ(n, flags)); \ irq_enable(DT_INST_IRQN(n)); \ } #define MRU_CH_RX_CFG(i, n) \ static volatile const uint32 * const \ nxp_s32_mru_##n##_ch_##i##_rx_mbox_addr[MRU_MAX_MBOX_PER_CHAN] = { \ (uint32 *const)MRU_MBOX_ADDR(n, i, 0), \ }; \ static uint32 nxp_s32_mru_##n##_ch_##i##_buf[MRU_MAX_MBOX_PER_CHAN]; \ static const Mru_Ip_ReceiveChannelType nxp_s32_mru_##n##_ch_##i##_rx_cfg = { \ .ChannelId = i, \ .ChannelIndex = i, \ .NumRxMB = MRU_MAX_MBOX_PER_CHAN, \ .MBAddList = nxp_s32_mru_##n##_ch_##i##_rx_mbox_addr, \ .RxBuffer = nxp_s32_mru_##n##_ch_##i##_buf, \ .ReceiveNotification = nxp_s32_mru_##n##_cb \ } #define MRU_CH_RX_LINK_CFG_MBOX(i, n, chan, intgroup) \ { \ [intgroup] = { &nxp_s32_mru_##n##_ch_##chan##_rx_cfg } \ } #define MRU_CH_RX_LINK_CFG(i, n) \ static const Mru_Ip_MBLinkReceiveChannelType \ nxp_s32_mru_##n##_ch_##i##_rx_link_cfg[MRU_MAX_MBOX_PER_CHAN][MRU_MAX_INT_GROUPS] = {\ MRU_CH_RX_LINK_CFG_MBOX(0, n, i, MRU_INT_GROUP(DT_INST_IRQN(n))) \ } #define MRU_CH_CFG(i, n) \ { \ .ChCFG0Add = (volatile uint32 *)&MRU_BASE(n)->CHXCONFIG[i].CH_CFG0, \ .ChCFG0 = RTU_MRU_CH_CFG0_IE(0) | RTU_MRU_CH_CFG0_MBE0(0), \ .ChCFG1Add = (volatile uint32 *)&MRU_BASE(n)->CHXCONFIG[i].CH_CFG1, \ .ChCFG1 = RTU_MRU_CH_CFG1_MBIC0(MRU_INT_GROUP(DT_INST_IRQN(n))), \ .ChMBSTATAdd = (volatile uint32 *)&MRU_BASE(n)->CHXCONFIG[i].CH_MBSTAT, \ .NumMailbox = MRU_MAX_MBOX_PER_CHAN, \ .MBLinkReceiveChCfg = nxp_s32_mru_##n##_ch_##i##_rx_link_cfg \ } /* Callback wrapper to adapt MRU's baremetal driver callback to Zephyr's mbox driver callback */ #define MRU_CALLBACK_WRAPPER_FUNC(n) \ void nxp_s32_mru_##n##_cb(uint8_t channel, const uint32 *buf, uint8_t mbox_count) \ { \ const struct device *dev = DEVICE_DT_INST_GET(n); \ struct nxp_s32_mru_data *data = dev->data; \ \ if (is_rx_channel_valid(dev, channel)) { \ if (data->cb[channel] != NULL) { \ struct mbox_msg msg = { \ .data = (const void *)buf, \ .size = mbox_count * MRU_MBOX_SIZE \ }; \ data->cb[channel](dev, channel, data->user_data[channel], &msg);\ } \ } \ } #define MRU_CH_RX_DEFINITIONS(n) \ MRU_CALLBACK_WRAPPER_FUNC(n) \ MRU_INIT_IRQ_FUNC(n) \ LISTIFY(MRU_RX_CHANNELS(n), MRU_CH_RX_CFG, (;), n); \ LISTIFY(MRU_RX_CHANNELS(n), MRU_CH_RX_LINK_CFG, (;), n); \ static const Mru_Ip_ChannelCfgType nxp_s32_mru_##n##_ch_cfg[] = { \ LISTIFY(MRU_RX_CHANNELS(n), MRU_CH_CFG, (,), n) \ } #define MRU_INSTANCE_DEFINE(n) \ COND_CODE_0(MRU_RX_CHANNELS(n), (EMPTY), (MRU_CH_RX_DEFINITIONS(n))); \ static struct nxp_s32_mru_data nxp_s32_mru_##n##_data; \ static struct nxp_s32_mru_config nxp_s32_mru_##n##_config = { \ .base = MRU_BASE(n), \ .hw_cfg = { \ .InstanceId = MRU_HW_INSTANCE(n), \ .StateIndex = n, \ .NumChannel = MRU_RX_CHANNELS(n), \ .ChannelCfg = COND_CODE_0(MRU_RX_CHANNELS(n), \ (NULL), (nxp_s32_mru_##n##_ch_cfg)), \ .NOTIFYAdd = { \ (const volatile uint32 *)&MRU_BASE(n)->NOTIFY[0], \ (const volatile uint32 *)&MRU_BASE(n)->NOTIFY[1] \ }, \ }, \ .irq_group = MRU_INT_GROUP(DT_INST_IRQN(n)), \ .config_irq = COND_CODE_0(MRU_RX_CHANNELS(n), \ (NULL), (nxp_s32_mru_##n##_init_irq)), \ }; \ \ DEVICE_DT_INST_DEFINE(n, nxp_s32_mru_init, NULL, \ &nxp_s32_mru_##n##_data, &nxp_s32_mru_##n##_config, \ POST_KERNEL, CONFIG_MBOX_INIT_PRIORITY, \ &nxp_s32_mru_driver_api); DT_INST_FOREACH_STATUS_OKAY(MRU_INSTANCE_DEFINE) ```
/content/code_sandbox/drivers/mbox/mbox_nxp_s32_mru.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,038
```unknown menuconfig GPIO_FXL6408 bool "FXL6408 I2C-based GPIO chip" default y depends on DT_HAS_FCS_FXL6408_ENABLED depends on I2C help Enable driver for FXL6408 I2C-based GPIO chip. if GPIO_FXL6408 config GPIO_FXL6408_INIT_PRIORITY int "Init priority" default 80 help Device driver initialization priority. module = FXL6408 module-str = fxl6408 source "subsys/logging/Kconfig.template.log_config" endif # GPIO_FXL6408 ```
/content/code_sandbox/drivers/gpio/Kconfig.fxl6408
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
130
```c /* * */ #define DT_DRV_COMPAT x_powers_axp192_gpio #include <errno.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/gpio/gpio_utils.h> #include <zephyr/drivers/i2c.h> #include <zephyr/kernel.h> #include <zephyr/sys/util_macro.h> #include <zephyr/toolchain.h> #include <zephyr/logging/log.h> #include <zephyr/drivers/mfd/axp192.h> LOG_MODULE_REGISTER(gpio_axp192, CONFIG_GPIO_LOG_LEVEL); struct gpio_axp192_config { struct gpio_driver_config common; struct i2c_dt_spec i2c; const struct device *mfd; uint32_t ngpios; }; struct gpio_axp192_data { struct gpio_driver_data common; struct k_mutex mutex; sys_slist_t cb_list_gpio; }; static int gpio_axp192_port_get_raw(const struct device *dev, uint32_t *value) { int ret; uint8_t port_val; const struct gpio_axp192_config *config = dev->config; if (k_is_in_isr()) { return -EWOULDBLOCK; } ret = mfd_axp192_gpio_read_port(config->mfd, &port_val); if (ret == 0) { *value = port_val; } return ret; } static int gpio_axp192_port_set_masked_raw(const struct device *dev, gpio_port_pins_t mask, gpio_port_value_t value) { int ret; const struct gpio_axp192_config *config = dev->config; if (k_is_in_isr()) { return -EWOULDBLOCK; } ret = mfd_axp192_gpio_write_port(config->mfd, value, mask); return ret; } static int gpio_axp192_port_set_bits_raw(const struct device *dev, gpio_port_pins_t pins) { return gpio_axp192_port_set_masked_raw(dev, pins, pins); } static int gpio_axp192_port_clear_bits_raw(const struct device *dev, gpio_port_pins_t pins) { return gpio_axp192_port_set_masked_raw(dev, pins, 0); } static int gpio_axp192_configure(const struct device *dev, gpio_pin_t pin, gpio_flags_t flags) { const struct gpio_axp192_config *config = dev->config; int ret; enum axp192_gpio_func func; if (pin >= config->ngpios) { LOG_ERR("Invalid gpio pin (%d)", pin); return -EINVAL; } if (k_is_in_isr()) { return -EWOULDBLOCK; } /* Configure pin */ LOG_DBG("Pin: %d / flags=0x%x", pin, flags); if ((flags & GPIO_OUTPUT) != 0) { /* Initialize output function */ func = AXP192_GPIO_FUNC_OUTPUT_LOW; if ((flags & GPIO_OPEN_DRAIN) != 0) { func = AXP192_GPIO_FUNC_OUTPUT_OD; } ret = mfd_axp192_gpio_func_ctrl(config->mfd, dev, pin, func); if (ret != 0) { return ret; } /* Set init value */ if ((flags & GPIO_OUTPUT_INIT_LOW) != 0) { ret = mfd_axp192_gpio_write_port(config->mfd, BIT(pin), 0); } else if ((flags & GPIO_OUTPUT_INIT_HIGH) != 0) { ret = mfd_axp192_gpio_write_port(config->mfd, BIT(pin), BIT(pin)); } } else if ((flags & GPIO_INPUT) != 0) { /* Initialize input function */ func = AXP192_GPIO_FUNC_INPUT; ret = mfd_axp192_gpio_func_ctrl(config->mfd, dev, pin, func); if (ret != 0) { return ret; } /* Configure pull-down */ if ((flags & GPIO_PULL_UP) != 0) { /* not supported */ LOG_ERR("Pull-Up not supported"); ret = -ENOTSUP; } else if ((flags & GPIO_PULL_DOWN) != 0) { /* out = 0 means pull-down*/ ret = mfd_axp192_gpio_pd_ctrl(config->mfd, pin, true); } else { ret = mfd_axp192_gpio_pd_ctrl(config->mfd, pin, false); } } else { /* Neither input nor output mode is selected */ LOG_INF("No valid gpio mode selected"); ret = -ENOTSUP; } return ret; } static int gpio_axp192_port_toggle_bits(const struct device *dev, gpio_port_pins_t pins) { struct gpio_axp192_data *data = dev->data; int ret; uint32_t value; k_mutex_lock(&data->mutex, K_FOREVER); ret = gpio_axp192_port_get_raw(dev, &value); if (ret == 0) { ret = gpio_axp192_port_set_masked_raw(dev, pins, ~value); } k_mutex_unlock(&data->mutex); return ret; } static int gpio_axp192_pin_interrupt_configure(const struct device *dev, gpio_pin_t pin, enum gpio_int_mode mode, enum gpio_int_trig trig) { ARG_UNUSED(dev); ARG_UNUSED(pin); ARG_UNUSED(mode); ARG_UNUSED(trig); return -ENOTSUP; } #if defined(CONFIG_GPIO_GET_CONFIG) || defined(CONFIG_GPIO_GET_DIRECTION) static int gpio_axp192_get_config(const struct device *dev, gpio_pin_t pin, gpio_flags_t *out_flags) { const struct gpio_axp192_config *config = dev->config; enum axp192_gpio_func func; bool pd_enabled; int ret; if (k_is_in_isr()) { return -EWOULDBLOCK; } ret = mfd_axp192_gpio_func_get(config->mfd, pin, &func); if (ret != 0) { return ret; } /* Set OUTPUT/INPUT flags */ *out_flags = 0; switch (func) { case AXP192_GPIO_FUNC_INPUT: *out_flags |= GPIO_INPUT; break; case AXP192_GPIO_FUNC_OUTPUT_OD: *out_flags |= GPIO_OUTPUT | GPIO_OPEN_DRAIN; break; case AXP192_GPIO_FUNC_OUTPUT_LOW: *out_flags |= GPIO_OUTPUT; break; case AXP192_GPIO_FUNC_LDO: __fallthrough; case AXP192_GPIO_FUNC_ADC: __fallthrough; case AXP192_GPIO_FUNC_FLOAT: __fallthrough; default: LOG_DBG("Pin %d not configured as GPIO", pin); break; } /* Query pull-down config status */ ret = mfd_axp192_gpio_pd_get(config->mfd, pin, &pd_enabled); if (ret != 0) { return ret; } if (pd_enabled) { *out_flags |= GPIO_PULL_DOWN; } return 0; } #endif /* CONFIG_GPIO_GET_CONFIG */ #ifdef CONFIG_GPIO_GET_DIRECTION static int gpio_axp192_port_get_direction(const struct device *dev, gpio_port_pins_t map, gpio_port_pins_t *inputs, gpio_port_pins_t *outputs) { const struct gpio_axp192_config *config = dev->config; gpio_flags_t flags; int ret; /* reset output variables */ *inputs = 0; *outputs = 0; /* loop through all */ for (gpio_pin_t gpio = 0; gpio < config->ngpios; gpio++) { if ((map & (1u << gpio)) != 0) { /* use internal get_config method to get gpio flags */ ret = gpio_axp192_get_config(dev, gpio, &flags); if (ret != 0) { return ret; } /* Set output and input flags */ if ((flags & GPIO_OUTPUT) != 0) { *outputs |= (1u << gpio); } else if (0 != (flags & GPIO_INPUT)) { *inputs |= (1u << gpio); } } } return 0; } #endif /* CONFIG_GPIO_GET_DIRECTION */ static int gpio_axp192_manage_callback(const struct device *dev, struct gpio_callback *callback, bool set) { struct gpio_axp192_data *const data = dev->data; return gpio_manage_callback(&data->cb_list_gpio, callback, set); } static const struct gpio_driver_api gpio_axp192_api = { .pin_configure = gpio_axp192_configure, .port_get_raw = gpio_axp192_port_get_raw, .port_set_masked_raw = gpio_axp192_port_set_masked_raw, .port_set_bits_raw = gpio_axp192_port_set_bits_raw, .port_clear_bits_raw = gpio_axp192_port_clear_bits_raw, .port_toggle_bits = gpio_axp192_port_toggle_bits, .pin_interrupt_configure = gpio_axp192_pin_interrupt_configure, .manage_callback = gpio_axp192_manage_callback, #ifdef CONFIG_GPIO_GET_DIRECTION .port_get_direction = gpio_axp192_port_get_direction, #endif /* CONFIG_GPIO_GET_DIRECTION */ #ifdef CONFIG_GPIO_GET_CONFIG .pin_get_config = gpio_axp192_get_config, #endif }; static int gpio_axp192_init(const struct device *dev) { const struct gpio_axp192_config *config = dev->config; struct gpio_axp192_data *data = dev->data; LOG_DBG("Initializing"); if (!i2c_is_ready_dt(&config->i2c)) { LOG_ERR("device not ready"); return -ENODEV; } return k_mutex_init(&data->mutex); } #define GPIO_AXP192_DEFINE(inst) \ static const struct gpio_axp192_config gpio_axp192_config##inst = { \ .common = \ { \ .port_pin_mask = GPIO_PORT_PIN_MASK_FROM_DT_INST(inst), \ }, \ .i2c = I2C_DT_SPEC_GET(DT_INST_PARENT(inst)), \ .mfd = DEVICE_DT_GET(DT_INST_PARENT(inst)), \ .ngpios = DT_INST_PROP(inst, ngpios), \ }; \ \ static struct gpio_axp192_data gpio_axp192_data##inst; \ \ DEVICE_DT_INST_DEFINE(inst, gpio_axp192_init, NULL, &gpio_axp192_data##inst, \ &gpio_axp192_config##inst, POST_KERNEL, \ CONFIG_GPIO_AXP192_INIT_PRIORITY, &gpio_axp192_api); DT_INST_FOREACH_STATUS_OKAY(GPIO_AXP192_DEFINE) ```
/content/code_sandbox/drivers/gpio/gpio_axp192.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,277
```unknown # MCUX GPIO configuration options config GPIO_MCUX bool "MCUX GPIO driver" default y depends on DT_HAS_NXP_KINETIS_GPIO_ENABLED help Enable the MCUX pinmux driver. ```
/content/code_sandbox/drivers/gpio/Kconfig.mcux
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
47
```c /* * */ #define DT_DRV_COMPAT xlnx_xps_gpio_1_00_a #include <zephyr/device.h> #include <zephyr/drivers/gpio.h> #include <zephyr/irq.h> #include <zephyr/sys/sys_io.h> #include <zephyr/drivers/gpio/gpio_utils.h> /* AXI GPIO v2 register offsets (See Xilinx PG144 for details) */ #define GPIO_DATA_OFFSET 0x0000 #define GPIO_TRI_OFFSET 0x0004 #define GPIO2_OFFSET 0x0008 #define GPIO2_DATA_OFFSET 0x0008 #define GPIO2_TRI_OFFSET 0x000c #define GIER_OFFSET 0x011c #define IPISR_OFFSET 0x0120 #define IPIER_OFFSET 0x0128 /* GIER bit definitions */ #define GIER_GIE BIT(31) /* IPISR and IPIER bit definitions */ #define IPIXX_CH1_IE BIT(0) #define IPIXX_CH2_IE BIT(1) /* Maximum number of GPIOs supported per channel */ #define MAX_GPIOS 32 struct gpio_xlnx_axi_config { /* gpio_driver_config needs to be first */ struct gpio_driver_config common; mm_reg_t base; uint8_t channel; bool all_inputs: 1; bool all_outputs: 1; bool interrupts_available: 1; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(interrupts) void (*irq_config_func)(const struct device *dev); #endif }; struct gpio_xlnx_axi_data { /* gpio_driver_data needs to be first */ struct gpio_driver_data common; /* Shadow registers for data out and tristate */ uint32_t dout; uint32_t tri; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(interrupts) uint32_t previous_data_reading; sys_slist_t callbacks; uint32_t rising_edge_interrupts; uint32_t falling_edge_interrupts; /* Workaround to handle channel 2 interrupts from channel 1*/ const struct device *other_channel_device; #endif }; static inline uint32_t gpio_xlnx_axi_read_data(const struct device *dev) { const struct gpio_xlnx_axi_config *config = dev->config; return sys_read32(config->base + (config->channel * GPIO2_OFFSET) + GPIO_DATA_OFFSET); } static inline void gpio_xlnx_axi_write_data(const struct device *dev, uint32_t val) { const struct gpio_xlnx_axi_config *config = dev->config; sys_write32(val, config->base + (config->channel * GPIO2_OFFSET) + GPIO_DATA_OFFSET); } static inline void gpio_xlnx_axi_write_tri(const struct device *dev, uint32_t val) { const struct gpio_xlnx_axi_config *config = dev->config; sys_write32(val, config->base + (config->channel * GPIO2_OFFSET) + GPIO_TRI_OFFSET); } static int gpio_xlnx_axi_pin_configure(const struct device *dev, gpio_pin_t pin, gpio_flags_t flags) { const struct gpio_xlnx_axi_config *config = dev->config; struct gpio_xlnx_axi_data *data = dev->data; unsigned int key; if (!(BIT(pin) & config->common.port_pin_mask)) { return -EINVAL; } if (((flags & GPIO_INPUT) != 0) && ((flags & GPIO_OUTPUT) != 0)) { return -ENOTSUP; } if ((flags & GPIO_SINGLE_ENDED) != 0) { return -ENOTSUP; } if ((flags & (GPIO_PULL_UP | GPIO_PULL_DOWN)) != 0) { return -ENOTSUP; } if (((flags & GPIO_INPUT) != 0) && config->all_outputs) { return -ENOTSUP; } if (((flags & GPIO_OUTPUT) != 0) && config->all_inputs) { return -ENOTSUP; } key = irq_lock(); switch (flags & GPIO_DIR_MASK) { case GPIO_INPUT: data->tri |= BIT(pin); break; case GPIO_OUTPUT: if ((flags & GPIO_OUTPUT_INIT_HIGH) != 0) { data->dout |= BIT(pin); } else if ((flags & GPIO_OUTPUT_INIT_LOW) != 0) { data->dout &= ~BIT(pin); } data->tri &= ~BIT(pin); break; default: return -ENOTSUP; } gpio_xlnx_axi_write_data(dev, data->dout); gpio_xlnx_axi_write_tri(dev, data->tri); irq_unlock(key); return 0; } static int gpio_xlnx_axi_port_get_raw(const struct device *dev, gpio_port_value_t *value) { *value = gpio_xlnx_axi_read_data(dev); return 0; } static int gpio_xlnx_axi_port_set_masked_raw(const struct device *dev, gpio_port_pins_t mask, gpio_port_value_t value) { struct gpio_xlnx_axi_data *data = dev->data; unsigned int key; key = irq_lock(); data->dout = (data->dout & ~mask) | (mask & value); gpio_xlnx_axi_write_data(dev, data->dout); irq_unlock(key); return 0; } static int gpio_xlnx_axi_port_set_bits_raw(const struct device *dev, gpio_port_pins_t pins) { struct gpio_xlnx_axi_data *data = dev->data; unsigned int key; key = irq_lock(); data->dout |= pins; gpio_xlnx_axi_write_data(dev, data->dout); irq_unlock(key); return 0; } static int gpio_xlnx_axi_port_clear_bits_raw(const struct device *dev, gpio_port_pins_t pins) { struct gpio_xlnx_axi_data *data = dev->data; unsigned int key; key = irq_lock(); data->dout &= ~pins; gpio_xlnx_axi_write_data(dev, data->dout); irq_unlock(key); return 0; } static int gpio_xlnx_axi_port_toggle_bits(const struct device *dev, gpio_port_pins_t pins) { struct gpio_xlnx_axi_data *data = dev->data; unsigned int key; key = irq_lock(); data->dout ^= pins; gpio_xlnx_axi_write_data(dev, data->dout); irq_unlock(key); return 0; } #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(interrupts) /** * Enables interrupts for the given pins on the channel * The axi gpio can only enable interrupts for an entire port, so we need to track * the pins and modes ourselves. */ static int gpio_xlnx_axi_pin_interrupt_configure(const struct device *dev, gpio_pin_t pin, enum gpio_int_mode mode, enum gpio_int_trig trig) { const struct gpio_xlnx_axi_config *config = dev->config; struct gpio_xlnx_axi_data *data = dev->data; const uint32_t pin_mask = BIT(pin); const uint32_t chan_mask = BIT(config->channel); unsigned int key; uint32_t enabled_interrupts; if (!config->interrupts_available) { return -ENOTSUP; } if ((mode & GPIO_INT_ENABLE) && !(mode & GPIO_INT_EDGE)) { /* only edge detection is supported */ return -ENOTSUP; } key = irq_lock(); data->rising_edge_interrupts &= ~pin_mask; data->falling_edge_interrupts &= ~pin_mask; if (mode & GPIO_INT_ENABLE) { if (trig & GPIO_INT_HIGH_1) { data->rising_edge_interrupts |= pin_mask; } if (trig & GPIO_INT_LOW_0) { data->falling_edge_interrupts |= pin_mask; } } /* if there's at least one pin interrupt enabled on the channel, enable the interrupts * for that entire channel without changing the other channel */ enabled_interrupts = sys_read32(config->base + IPIER_OFFSET); if (data->rising_edge_interrupts || data->falling_edge_interrupts) { if (!(enabled_interrupts & chan_mask)) { /* Clear any pending interrupts and update last state before enabling * interrupt */ if (sys_read32(config->base + IPISR_OFFSET) & chan_mask) { sys_write32(chan_mask, config->base + IPISR_OFFSET); } data->previous_data_reading = gpio_xlnx_axi_read_data(dev); enabled_interrupts |= chan_mask; } } else { enabled_interrupts &= ~chan_mask; } sys_write32(enabled_interrupts, config->base + IPIER_OFFSET); irq_unlock(key); return 0; } #endif #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(interrupts) static int gpio_xlnx_axi_manage_callback(const struct device *dev, struct gpio_callback *callback, bool set) { struct gpio_xlnx_axi_data *data = dev->data; return gpio_manage_callback(&data->callbacks, callback, set); } #endif #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(interrupts) /** * Returns the pins on this devices channel which changed and also have an interrupt enabled on that * pin. Also clears the pending interrupt for that channel. */ static uint32_t gpio_xlnx_axi_get_pending_int(const struct device *dev) { const struct gpio_xlnx_axi_config *config = dev->config; struct gpio_xlnx_axi_data *data = dev->data; const uint32_t chan_mask = BIT(config->channel); unsigned int key; uint32_t interrupt_flags; uint32_t current_data; uint32_t changed_pins; uint32_t changed_and_rising_edge; uint32_t changed_and_falling_edge; uint32_t interrupts; key = irq_lock(); /* make sure interrupt was for this channel */ interrupt_flags = sys_read32(config->base + IPISR_OFFSET); if (!(interrupt_flags & chan_mask)) { irq_unlock(key); return 0; } /* clear pending interrupt for the whole channel */ sys_write32(chan_mask, config->base + IPISR_OFFSET); /* find which pins changed and also have an interrupt enabled */ current_data = gpio_xlnx_axi_read_data(dev); changed_pins = current_data ^ data->previous_data_reading; data->previous_data_reading = current_data; changed_and_rising_edge = (changed_pins & current_data); changed_and_falling_edge = (changed_pins & ~current_data); interrupts = (changed_and_rising_edge & data->rising_edge_interrupts) | (changed_and_falling_edge & data->falling_edge_interrupts); irq_unlock(key); return interrupts; } #endif #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(interrupts) static void gpio_xlnx_axi_isr(const struct device *dev) { struct gpio_xlnx_axi_data *data = dev->data; gpio_fire_callbacks(&data->callbacks, dev, gpio_xlnx_axi_get_pending_int(dev)); /* Since both channels use the same interrupt, only the first channel registers the ISR. * If the second channel is also enabled, then check for any events on it as well. */ if (data->other_channel_device) { struct gpio_xlnx_axi_data *other_data = data->other_channel_device->data; gpio_fire_callbacks(&other_data->callbacks, data->other_channel_device, gpio_xlnx_axi_get_pending_int(data->other_channel_device)); } } #endif static int gpio_xlnx_axi_init(const struct device *dev) { struct gpio_xlnx_axi_data *data = dev->data; gpio_xlnx_axi_write_data(dev, data->dout); gpio_xlnx_axi_write_tri(dev, data->tri); #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(interrupts) const struct gpio_xlnx_axi_config *config = dev->config; if (config->irq_config_func != NULL) { /* Note: This is only called for the first channel, even if the second is enabled. * Need to perform the setup for both channels. * Disable all interrupts. */ sys_write32(0x0, config->base + IPIER_OFFSET); /* Clear all pending interrupts */ sys_write32(sys_read32(config->base + IPISR_OFFSET), config->base + IPISR_OFFSET); /* Enable global interrupts for this gpio device */ sys_write32(GIER_GIE, config->base + GIER_OFFSET); config->irq_config_func(dev); } #endif return 0; } static const struct gpio_driver_api gpio_xlnx_axi_driver_api = { .pin_configure = gpio_xlnx_axi_pin_configure, .port_get_raw = gpio_xlnx_axi_port_get_raw, .port_set_masked_raw = gpio_xlnx_axi_port_set_masked_raw, .port_set_bits_raw = gpio_xlnx_axi_port_set_bits_raw, .port_clear_bits_raw = gpio_xlnx_axi_port_clear_bits_raw, .port_toggle_bits = gpio_xlnx_axi_port_toggle_bits, #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(interrupts) .pin_interrupt_configure = gpio_xlnx_axi_pin_interrupt_configure, .manage_callback = gpio_xlnx_axi_manage_callback, .get_pending_int = gpio_xlnx_axi_get_pending_int, #endif }; #define GPIO_XLNX_AXI_GPIO2_HAS_COMPAT_STATUS_OKAY(n) \ UTIL_AND(DT_NODE_HAS_COMPAT(DT_INST_CHILD(n, gpio2), xlnx_xps_gpio_1_00_a_gpio2), \ DT_NODE_HAS_STATUS(DT_INST_CHILD(n, gpio2), okay)) #define GPIO_XLNX_AXI_GPIO2_COND_INIT(n) \ IF_ENABLED(UTIL_AND(DT_INST_PROP_OR(n, xlnx_is_dual, 1), \ GPIO_XLNX_AXI_GPIO2_HAS_COMPAT_STATUS_OKAY(n)), \ (GPIO_XLNX_AXI_GPIO2_INIT(n))); #define GPIO_XLNX_AXI_GPIO2_INIT(n) \ static struct gpio_xlnx_axi_data gpio_xlnx_axi_##n##_2_data = { \ .dout = DT_INST_PROP_OR(n, xlnx_dout_default_2, 0), \ .tri = DT_INST_PROP_OR(n, xlnx_tri_default_2, GENMASK(MAX_GPIOS - 1, 0)), \ }; \ \ static const struct gpio_xlnx_axi_config gpio_xlnx_axi_##n##_2_config = { \ .common = \ { \ .port_pin_mask = GPIO_PORT_PIN_MASK_FROM_NGPIOS( \ DT_INST_PROP_OR(n, xlnx_gpio2_width, MAX_GPIOS)), \ }, \ .base = DT_INST_REG_ADDR(n), \ .channel = 1, \ .all_inputs = DT_INST_PROP_OR(n, xlnx_all_inputs_2, 0), \ .all_outputs = DT_INST_PROP_OR(n, xlnx_all_outputs_2, 0), \ .interrupts_available = DT_INST_NODE_HAS_PROP(n, interrupts)}; \ \ DEVICE_DT_DEFINE(DT_INST_CHILD(n, gpio2), &gpio_xlnx_axi_init, NULL, \ &gpio_xlnx_axi_##n##_2_data, &gpio_xlnx_axi_##n##_2_config, PRE_KERNEL_1, \ CONFIG_GPIO_INIT_PRIORITY, &gpio_xlnx_axi_driver_api); #define GPIO_XLNX_AXI_INIT(n) \ IF_ENABLED(DT_INST_NODE_HAS_PROP(n, interrupts), \ (static void gpio_xlnx_axi_##n##_irq_config(const struct device *dev);)) \ \ GPIO_XLNX_AXI_GPIO2_COND_INIT(n); \ \ static struct gpio_xlnx_axi_data gpio_xlnx_axi_##n##_data = { \ .dout = DT_INST_PROP_OR(n, xlnx_dout_default, 0), \ .tri = DT_INST_PROP_OR(n, xlnx_tri_default, GENMASK(MAX_GPIOS - 1, 0)), \ IF_ENABLED(UTIL_AND(UTIL_AND(DT_INST_NODE_HAS_PROP(n, interrupts), \ DT_INST_PROP_OR(n, xlnx_is_dual, 1)), \ GPIO_XLNX_AXI_GPIO2_HAS_COMPAT_STATUS_OKAY(n)), \ (.other_channel_device = DEVICE_DT_GET(DT_INST_CHILD(n, gpio2))))}; \ \ static const struct gpio_xlnx_axi_config gpio_xlnx_axi_##n##_config = { \ .common = \ { \ .port_pin_mask = GPIO_PORT_PIN_MASK_FROM_NGPIOS( \ DT_INST_PROP_OR(n, xlnx_gpio_width, MAX_GPIOS)), \ }, \ .base = DT_INST_REG_ADDR(n), \ .channel = 0, \ .all_inputs = DT_INST_PROP_OR(n, xlnx_all_inputs, 0), \ .all_outputs = DT_INST_PROP_OR(n, xlnx_all_outputs, 0), \ .interrupts_available = DT_INST_NODE_HAS_PROP(n, interrupts), \ IF_ENABLED(DT_INST_NODE_HAS_PROP(n, interrupts), \ (.irq_config_func = gpio_xlnx_axi_##n##_irq_config))}; \ \ IF_ENABLED(DT_INST_NODE_HAS_PROP(n, interrupts), \ (static void gpio_xlnx_axi_##n##_irq_config(const struct device *dev) \ { \ ARG_UNUSED(dev); \ \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ gpio_xlnx_axi_isr, DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ })) \ \ DEVICE_DT_INST_DEFINE(n, gpio_xlnx_axi_init, NULL, &gpio_xlnx_axi_##n##_data, \ &gpio_xlnx_axi_##n##_config, PRE_KERNEL_1, \ CONFIG_GPIO_INIT_PRIORITY, &gpio_xlnx_axi_driver_api); DT_INST_FOREACH_STATUS_OKAY(GPIO_XLNX_AXI_INIT) ```
/content/code_sandbox/drivers/gpio/gpio_xlnx_axi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,010
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_GPIO_RT1718S_H_ #define ZEPHYR_DRIVERS_GPIO_RT1718S_H_ #include <zephyr/device.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/i2c.h> #include <zephyr/kernel.h> #define RT1718S_GPIO_NUM 3 #define RT1718S_REG_ALERT 0x10 #define RT1718S_REG_ALERT_VENDOR_DEFINED_ALERT BIT(15) #define RT1718S_REG_ALERT_MASK 0x12 #define RT1718S_REG_ALERT_MASK_VENDOR_DEFINED_ALERT BIT(15) #define RT1718S_REG_RT_MASK8 0xA6 #define RT1718S_REG_RT_MASK8_GPIO1_R BIT(0) #define RT1718S_REG_RT_MASK8_GPIO2_R BIT(1) #define RT1718S_REG_RT_MASK8_GPIO3_R BIT(2) #define RT1718S_REG_RT_MASK8_GPIO1_F BIT(4) #define RT1718S_REG_RT_MASK8_GPIO2_F BIT(5) #define RT1718S_REG_RT_MASK8_GPIO3_F BIT(6) #define RT1718S_REG_RT_INT8 0xA8 #define RT1718S_REG_RT_INT8_GPIO1_R BIT(0) #define RT1718S_REG_RT_INT8_GPIO2_R BIT(1) #define RT1718S_REG_RT_INT8_GPIO3_R BIT(2) #define RT1718S_REG_RT_INT8_GPIO1_F BIT(4) #define RT1718S_REG_RT_INT8_GPIO2_F BIT(5) #define RT1718S_REG_RT_INT8_GPIO3_F BIT(6) #define RT1718S_GPIO_INT_MASK \ (RT1718S_REG_RT_INT8_GPIO1_R | RT1718S_REG_RT_INT8_GPIO2_R | RT1718S_REG_RT_INT8_GPIO3_R | \ RT1718S_REG_RT_INT8_GPIO1_F | RT1718S_REG_RT_INT8_GPIO2_F | RT1718S_REG_RT_INT8_GPIO3_F) #define RT1718S_REG_RT_ST8 0xAA #define RT1718S_REG_RT_ST8_GPIO1_I BIT(0) #define RT1718S_REG_RT_ST8_GPIO2_I BIT(1) #define RT1718S_REG_RT_ST8_GPIO3_I BIT(2) #define RT1718S_REG_GPIO_CTRL(pin) (0xED + pin) #define RT1718S_REG_GPIO_CTRL_PU BIT(5) #define RT1718S_REG_GPIO_CTRL_PD BIT(4) #define RT1718S_REG_GPIO_CTRL_OD_N BIT(3) #define RT1718S_REG_GPIO_CTRL_OE BIT(2) #define RT1718S_REG_GPIO_CTRL_O BIT(1) #define RT1718S_REG_GPIO_CTRL_I BIT(0) /* RT1718S chip driver config */ struct rt1718s_config { /* I2C device */ const struct i2c_dt_spec i2c_dev; /* Alert GPIO pin */ const struct gpio_dt_spec irq_gpio; /* GPIO port device */ const struct device *gpio_port_dev; }; /* RT1718S chip driver data */ struct rt1718s_data { /* RT1718S device */ const struct device *dev; /* lock TCPCI registers access */ struct k_sem lock_tcpci; /* Alert pin callback */ struct gpio_callback gpio_cb; /* Alert worker */ struct k_work alert_worker; }; /** * @brief Read a RT1718S register * * @param dev RT1718S device * @param reg_addr Register address * @param val A pointer to a buffer for the data to return * * @return 0 if successful, otherwise failed. */ static inline int rt1718s_reg_read_byte(const struct device *dev, uint8_t reg_addr, uint8_t *val) { const struct rt1718s_config *const config = (const struct rt1718s_config *)dev->config; return i2c_reg_read_byte_dt(&config->i2c_dev, reg_addr, val); } /** * @brief Read a sequence of RT1718S registers * * @param dev RT1718S device * @param start_addr The register start address * @param buf A pointer to a buffer for the data to return * @param num_bytes Number of data to read * * @return 0 if successful, otherwise failed. */ static inline int rt1718s_reg_burst_read(const struct device *dev, uint8_t start_addr, uint8_t *buf, uint32_t num_bytes) { const struct rt1718s_config *const config = (const struct rt1718s_config *)dev->config; return i2c_burst_read_dt(&config->i2c_dev, start_addr, buf, num_bytes); } /** * @brief Write a RT1718S register * * @param dev RT1718S device * @param reg_addr Register address * @param val Data to write * * @return 0 if successful, otherwise failed. */ static inline int rt1718s_reg_write_byte(const struct device *dev, uint8_t reg_addr, uint8_t val) { const struct rt1718s_config *const config = (const struct rt1718s_config *)dev->config; return i2c_reg_write_byte_dt(&config->i2c_dev, reg_addr, val); } /** * @brief Write a sequence of RT1718S registers * * @param dev RT1718S device * @param start_addr The register start address * @param buf A pointer to a buffer for the data to write * @param num_bytes Number of data to write * * @return 0 if successful, otherwise failed. */ static inline int rt1718s_reg_burst_write(const struct device *dev, uint8_t start_addr, uint8_t *buf, uint32_t num_bytes) { const struct rt1718s_config *const config = (const struct rt1718s_config *)dev->config; return i2c_burst_write_dt(&config->i2c_dev, start_addr, buf, num_bytes); } /** * @brief Compare data & write a RT1718S register * * @param dev RT1718S device * @param reg_addr Register address * @param reg_val Old register data * @param new_val New register data * * @return 0 if successful, otherwise failed. */ static inline int rt1718s_reg_update(const struct device *dev, uint8_t reg_addr, uint8_t reg_val, uint8_t new_val) { if (reg_val == new_val) { return 0; } return rt1718s_reg_write_byte(dev, reg_addr, new_val); } /** * @brief Dispatch GPIO port alert * * @param dev RT1718S device */ void rt1718s_gpio_alert_handler(const struct device *dev); #endif /* ZEPHYR_DRIVERS_GPIO_RT1718S_H_*/ ```
/content/code_sandbox/drivers/gpio/gpio_rt1718s.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,503
```unknown # MCP23S17 GPIO configuration options config GPIO_MCP23XXX bool help Enable support for the Microchip 23xxx I2C/SPI IO expanders. menuconfig GPIO_MCP230XX bool "MCP230XX I2C-based GPIO chip" default y depends on DT_HAS_MICROCHIP_MCP23008_ENABLED || \ DT_HAS_MICROCHIP_MCP23009_ENABLED || \ DT_HAS_MICROCHIP_MCP23016_ENABLED || \ DT_HAS_MICROCHIP_MCP23017_ENABLED || \ DT_HAS_MICROCHIP_MCP23018_ENABLED depends on I2C select GPIO_MCP23XXX help Enable driver for MCP230XX I2C-based GPIO chip. if GPIO_MCP230XX config GPIO_MCP230XX_INIT_PRIORITY int "MCP230XX GPIO expander init priority" default 75 help Device driver initialization priority. endif #GPIO_MCP230XX menuconfig GPIO_MCP23SXX bool "MCP23SXX SPI-based GPIO chip" default y depends on DT_HAS_MICROCHIP_MCP23S08_ENABLED || \ DT_HAS_MICROCHIP_MCP23S09_ENABLED || \ DT_HAS_MICROCHIP_MCP23S17_ENABLED || \ DT_HAS_MICROCHIP_MCP23S18_ENABLED depends on SPI select GPIO_MCP23XXX help Enable driver for MCP23SXX SPI-based GPIO chip. if GPIO_MCP23SXX config GPIO_MCP23SXX_INIT_PRIORITY int "MCP23SXX GPIO expander init priority" default 75 help Device driver initialization priority. endif #GPIO_MCP23SXX ```
/content/code_sandbox/drivers/gpio/Kconfig.mcp23xxx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
370
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_GPIO_GPIO_GRGPIO_H_ #define ZEPHYR_DRIVERS_GPIO_GPIO_GRGPIO_H_ struct grgpio_regs { uint32_t data; /* 0x00 I/O port data register */ uint32_t output; /* 0x04 I/O port output register */ uint32_t dir; /* 0x08 I/O port direction register */ uint32_t imask; /* 0x0C Interrupt mask register */ uint32_t ipol; /* 0x10 Interrupt polarity register */ uint32_t iedge; /* 0x14 Interrupt edge register */ uint32_t bypass; /* 0x18 Bypass register */ uint32_t cap; /* 0x1C Capability register */ uint32_t irqmap[4]; /* 0x20 - 0x2C Interrupt map registers */ uint32_t res_30; /* 0x30 Reserved */ uint32_t res_34; /* 0x34 Reserved */ uint32_t res_38; /* 0x38 Reserved */ uint32_t res_3C; /* 0x3C Reserved */ uint32_t iavail; /* 0x40 Interrupt available register */ uint32_t iflag; /* 0x44 Interrupt flag register */ uint32_t res_48; /* 0x48 Reserved */ uint32_t pulse; /* 0x4C Pulse register */ uint32_t res_50; /* 0x50 Reserved */ uint32_t output_or; /* 0x54 I/O port output register, logical-OR */ uint32_t dir_or; /* 0x58 I/O port dir. register, logical-OR */ uint32_t imask_or; /* 0x5C Interrupt mask register, logical-OR */ uint32_t res_60; /* 0x60 Reserved */ uint32_t output_and; /* 0x64 I/O port output register, logical-AND */ uint32_t dir_and; /* 0x68 I/O port dir. register, logical-AND */ uint32_t imask_and; /* 0x6C Interrupt mask register, logical-AND */ uint32_t res_70; /* 0x70 Reserved */ uint32_t output_xor; /* 0x74 I/O port output register, logical-XOR */ uint32_t dir_xor; /* 0x78 I/O port dir. register, logical-XOR */ uint32_t imask_xor; /* 0x7C Interrupt mask register, logical-XOR */ }; #define GRGPIO_CAP_PU_BIT 18 #define GRGPIO_CAP_IER_BIT 17 #define GRGPIO_CAP_IFL_BIT 16 #define GRGPIO_CAP_IRQGEN_BIT 8 #define GRGPIO_CAP_NLINES_BIT 0 #define GRGPIO_CAP_PU (0x1 << GRGPIO_CAP_PU_BIT) #define GRGPIO_CAP_IER (0x1 << GRGPIO_CAP_IER_BIT) #define GRGPIO_CAP_IFL (0x1 << GRGPIO_CAP_IFL_BIT) #define GRGPIO_CAP_IRQGEN (0x1f << GRGPIO_CAP_IRQGEN_BIT) #define GRGPIO_CAP_NLINES (0x1f << GRGPIO_CAP_NLINES_BIT) #endif /* ZEPHYR_DRIVERS_GPIO_GPIO_GRGPIO_H_ */ ```
/content/code_sandbox/drivers/gpio/gpio_grgpio.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
730
```unknown # GPIO emulation using SDL keyboard events # config GPIO_EMUL_SDL bool "SDL GPIO emulation" default y depends on DT_HAS_ZEPHYR_GPIO_EMUL_SDL_ENABLED depends on GPIO_EMUL depends on HAS_SDL help Enable GPIO emulation using SDL keyboard events. ```
/content/code_sandbox/drivers/gpio/Kconfig.emul_sdl
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
67
```unknown # Ambiq SDK GPIO # # # config GPIO_AMBIQ bool "AMBIQ GPIO driver" default y depends on DT_HAS_AMBIQ_GPIO_ENABLED select AMBIQ_HAL help Enable driver for Ambiq gpio. ```
/content/code_sandbox/drivers/gpio/Kconfig.ambiq
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
54
```c /* * */ #define DT_DRV_COMPAT ene_kb1200_gpio #include <zephyr/drivers/gpio.h> #include <zephyr/kernel.h> #include <zephyr/drivers/gpio/gpio_utils.h> #include <zephyr/sys/util_macro.h> #include <reg/gpio.h> #include <reg/gptd.h> struct gpio_kb1200_data { /* gpio_driver_data needs to be first */ struct gpio_driver_data common; sys_slist_t cb; }; struct gpio_kb1200_config { /* gpio_driver_config needs to be first */ struct gpio_driver_config common; /* base address of GPIO port */ struct gpio_regs *gpio_regs; struct gptd_regs *gptd_regs; }; static void gpio_kb1200_isr(const struct device *dev) { const struct gpio_kb1200_config *config = dev->config; struct gpio_kb1200_data *context = dev->data; uint32_t pending_flag = config->gptd_regs->GPTDPF; gpio_fire_callbacks(&context->cb, dev, pending_flag); config->gptd_regs->GPTDPF |= pending_flag; } static int kb1200_gpio_pin_configure(const struct device *dev, gpio_pin_t pin, gpio_flags_t flags) { const struct gpio_kb1200_config *config = dev->config; WRITE_BIT(config->gpio_regs->GPIOFS, pin, 0); if ((flags & GPIO_OUTPUT) != 0) { WRITE_BIT(config->gpio_regs->GPIOIE, pin, 1); if ((flags & GPIO_SINGLE_ENDED) != 0) { if (flags & GPIO_LINE_OPEN_DRAIN) { WRITE_BIT(config->gpio_regs->GPIOOD, pin, 1); } } else { WRITE_BIT(config->gpio_regs->GPIOOD, pin, 0); } if (flags & GPIO_PULL_UP) { WRITE_BIT(config->gpio_regs->GPIOPU, pin, 1); } else { WRITE_BIT(config->gpio_regs->GPIOPU, pin, 0); } if ((flags & GPIO_OUTPUT_INIT_HIGH) != 0) { WRITE_BIT(config->gpio_regs->GPIOD, pin, 1); } else if ((flags & GPIO_OUTPUT_INIT_LOW) != 0) { WRITE_BIT(config->gpio_regs->GPIOD, pin, 0); } WRITE_BIT(config->gpio_regs->GPIOOE, pin, 1); } else { WRITE_BIT(config->gpio_regs->GPIOOE, pin, 0); if (flags & GPIO_PULL_UP) { WRITE_BIT(config->gpio_regs->GPIOPU, pin, 1); } else { WRITE_BIT(config->gpio_regs->GPIOPU, pin, 0); } WRITE_BIT(config->gpio_regs->GPIOIE, pin, 1); } return 0; } static int kb1200_gpio_port_get_raw(const struct device *dev, gpio_port_value_t *value) { const struct gpio_kb1200_config *config = dev->config; *value = config->gpio_regs->GPIOIN; return 0; } static int kb1200_gpio_port_set_masked_raw(const struct device *dev, gpio_port_pins_t mask, gpio_port_value_t value) { const struct gpio_kb1200_config *config = dev->config; config->gpio_regs->GPIOD |= (value & mask); return 0; } static int kb1200_gpio_port_set_bits_raw(const struct device *dev, gpio_port_pins_t pins) { const struct gpio_kb1200_config *config = dev->config; config->gpio_regs->GPIOD |= pins; return 0; } static int kb1200_gpio_port_clear_bits_raw(const struct device *dev, gpio_port_pins_t pins) { const struct gpio_kb1200_config *config = dev->config; config->gpio_regs->GPIOD &= ~pins; return 0; } static int kb1200_gpio_port_toggle_bits(const struct device *dev, gpio_port_pins_t pins) { const struct gpio_kb1200_config *config = dev->config; config->gpio_regs->GPIOD ^= pins; return 0; } static int kb1200_gpio_pin_interrupt_configure(const struct device *dev, gpio_pin_t pin, enum gpio_int_mode mode, enum gpio_int_trig trig) { const struct gpio_kb1200_config *config = dev->config; /* Check if GPIO port needs interrupt support */ if ((mode & GPIO_INT_DISABLE) || (mode & GPIO_INT_ENABLE) == 0) { /* Set the mask to disable the interrupt */ WRITE_BIT(config->gptd_regs->GPTDIE, pin, 0); } else { if (mode & GPIO_INT_EDGE) { WRITE_BIT(config->gptd_regs->GPTDEL, pin, 0); if (trig & GPIO_INT_HIGH_1) { if (trig & GPIO_INT_LOW_0) { /* Falling & Rising edge trigger */ /* Enable toggle trigger */ WRITE_BIT(config->gptd_regs->GPTDCHG, pin, 1); } else { /* Rising edge */ /* Disable toggle trigger */ WRITE_BIT(config->gptd_regs->GPTDCHG, pin, 0); WRITE_BIT(config->gptd_regs->GPTDPS, pin, 1); } } else { /* Falling edge */ /* Disable Toggle trigger */ WRITE_BIT(config->gptd_regs->GPTDCHG, pin, 0); WRITE_BIT(config->gptd_regs->GPTDPS, pin, 0); } } else { WRITE_BIT(config->gptd_regs->GPTDEL, pin, 1); /* Disable Toggle trigger */ WRITE_BIT(config->gptd_regs->GPTDCHG, pin, 0); if (trig & GPIO_INT_HIGH_1) { WRITE_BIT(config->gptd_regs->GPTDPS, pin, 1); } else { WRITE_BIT(config->gptd_regs->GPTDPS, pin, 0); } } /* clear pending flag */ WRITE_BIT(config->gptd_regs->GPTDPF, pin, 1); /* Enable the interrupt */ WRITE_BIT(config->gptd_regs->GPTDIE, pin, 1); } return 0; } static int kb1200_gpio_manage_callback(const struct device *dev, struct gpio_callback *cb, bool set) { struct gpio_kb1200_data *context = dev->data; gpio_manage_callback(&context->cb, cb, set); return 0; } static uint32_t kb1200_gpio_get_pending_int(const struct device *dev) { const struct gpio_kb1200_config *const config = dev->config; return config->gptd_regs->GPTDPF; } static const struct gpio_driver_api kb1200_gpio_api = { .pin_configure = kb1200_gpio_pin_configure, .port_get_raw = kb1200_gpio_port_get_raw, .port_set_masked_raw = kb1200_gpio_port_set_masked_raw, .port_set_bits_raw = kb1200_gpio_port_set_bits_raw, .port_clear_bits_raw = kb1200_gpio_port_clear_bits_raw, .port_toggle_bits = kb1200_gpio_port_toggle_bits, .pin_interrupt_configure = kb1200_gpio_pin_interrupt_configure, .manage_callback = kb1200_gpio_manage_callback, .get_pending_int = kb1200_gpio_get_pending_int, }; #define KB1200_GPIO_INIT(n) \ static int kb1200_gpio_##n##_init(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, 0, irq), DT_INST_IRQ_BY_IDX(n, 0, priority), \ gpio_kb1200_isr, DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(n, 0, irq)); \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, 1, irq), DT_INST_IRQ_BY_IDX(n, 1, priority), \ gpio_kb1200_isr, DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(n, 1, irq)); \ return 0; \ }; \ static const struct gpio_kb1200_config port_##n##_kb1200_config = { \ .common = {.port_pin_mask = GPIO_PORT_PIN_MASK_FROM_DT_INST(n)}, \ .gpio_regs = (struct gpio_regs *)DT_INST_REG_ADDR_BY_IDX(n, 0), \ .gptd_regs = (struct gptd_regs *)DT_INST_REG_ADDR_BY_IDX(n, 1), \ }; \ static struct gpio_kb1200_data gpio_kb1200_##n##_data; \ DEVICE_DT_INST_DEFINE(n, &kb1200_gpio_##n##_init, NULL, &gpio_kb1200_##n##_data, \ &port_##n##_kb1200_config, POST_KERNEL, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &kb1200_gpio_api); DT_INST_FOREACH_STATUS_OKAY(KB1200_GPIO_INIT) ```
/content/code_sandbox/drivers/gpio/gpio_ene_kb1200.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,034
```unknown # TLE9104 GPIO configuration options menuconfig GPIO_TLE9104 bool "TLE9104 SPI-based powertrain switch" default y depends on DT_HAS_INFINEON_TLE9104_GPIO_ENABLED select MFD help Enable GPIO driver for TLE9104 SPI-based powertrain switch. config GPIO_TLE9104_INIT_PRIORITY int "Init priority" default 75 depends on GPIO_TLE9104 help Device driver initialization priority. ```
/content/code_sandbox/drivers/gpio/Kconfig.tle9104
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
104
```c /* * */ #define DT_DRV_COMPAT snps_designware_gpio #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/drivers/gpio.h> #include <zephyr/dt-bindings/gpio/snps-designware-gpio.h> #include "gpio_dw.h" #include <zephyr/drivers/gpio/gpio_utils.h> #include <zephyr/pm/device.h> #include <zephyr/sys/sys_io.h> #include <zephyr/init.h> #include <zephyr/sys/util.h> #include <zephyr/sys/__assert.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/irq.h> #ifdef CONFIG_IOAPIC #include <zephyr/drivers/interrupt_controller/ioapic.h> #endif static int gpio_dw_port_set_bits_raw(const struct device *port, uint32_t mask); static int gpio_dw_port_clear_bits_raw(const struct device *port, uint32_t mask); /* * ARC architecture configure IP through IO auxiliary registers. * Other architectures as ARM and x86 configure IP through MMIO registers */ #ifdef GPIO_DW_IO_ACCESS static inline uint32_t dw_read(uint32_t base_addr, uint32_t offset) { return sys_in32(base_addr + offset); } static inline void dw_write(uint32_t base_addr, uint32_t offset, uint32_t val) { sys_out32(val, base_addr + offset); } static void dw_set_bit(uint32_t base_addr, uint32_t offset, uint32_t bit, bool value) { if (!value) { sys_io_clear_bit(base_addr + offset, bit); } else { sys_io_set_bit(base_addr + offset, bit); } } #else static inline uint32_t dw_read(uint32_t base_addr, uint32_t offset) { return sys_read32(base_addr + offset); } static inline void dw_write(uint32_t base_addr, uint32_t offset, uint32_t val) { sys_write32(val, base_addr + offset); } static void dw_set_bit(uint32_t base_addr, uint32_t offset, uint32_t bit, bool value) { if (!value) { sys_clear_bit(base_addr + offset, bit); } else { sys_set_bit(base_addr + offset, bit); } } #endif static inline int dw_base_to_block_base(uint32_t base_addr) { return (base_addr & 0xFFFFFFC0); } static inline int dw_derive_port_from_base(uint32_t base_addr) { uint32_t port = (base_addr & 0x3f) / 12U; return port; } static inline int dw_interrupt_support(const struct gpio_dw_config *config) { return ((int)(config->irq_num) > 0U); } static inline uint32_t dw_get_ext_port(uint32_t base_addr) { uint32_t ext_port; /* 4-port GPIO implementation translates from base address to port */ switch (dw_derive_port_from_base(base_addr)) { case 1: ext_port = EXT_PORTB; break; case 2: ext_port = EXT_PORTC; break; case 3: ext_port = EXT_PORTD; break; case 0: default: ext_port = EXT_PORTA; break; } return ext_port; } static inline uint32_t dw_get_data_port(uint32_t base_addr) { uint32_t dr_port; /* 4-port GPIO implementation translates from base address to port */ switch (dw_derive_port_from_base(base_addr)) { case 1: dr_port = SWPORTB_DR; break; case 2: dr_port = SWPORTC_DR; break; case 3: dr_port = SWPORTD_DR; break; case 0: default: dr_port = SWPORTA_DR; break; } return dr_port; } static inline uint32_t dw_get_dir_port(uint32_t base_addr) { uint32_t ddr_port; /* 4-port GPIO implementation translates from base address to port */ switch (dw_derive_port_from_base(base_addr)) { case 1: ddr_port = SWPORTB_DDR; break; case 2: ddr_port = SWPORTC_DDR; break; case 3: ddr_port = SWPORTD_DDR; break; case 0: default: ddr_port = SWPORTA_DDR; break; } return ddr_port; } static int gpio_dw_pin_interrupt_configure(const struct device *port, gpio_pin_t pin, enum gpio_int_mode mode, enum gpio_int_trig trig) { struct gpio_dw_runtime *context = port->data; const struct gpio_dw_config *config = port->config; uint32_t base_addr = dw_base_to_block_base(context->base_addr); uint32_t port_base_addr = context->base_addr; uint32_t dir_port = dw_get_dir_port(port_base_addr); uint32_t data_port = dw_get_data_port(port_base_addr); uint32_t dir_reg; /* Check for invalid pin number */ if (pin >= config->ngpios) { return -EINVAL; } /* Only PORT-A supports interrupts */ if (data_port != SWPORTA_DR) { return -ENOTSUP; } if (mode != GPIO_INT_MODE_DISABLED) { /* Check if GPIO port supports interrupts */ if (!dw_interrupt_support(config)) { return -ENOTSUP; } /* Interrupt to be enabled but pin is not set to input */ dir_reg = dw_read(base_addr, dir_port) & BIT(pin); if (dir_reg != 0U) { return -EINVAL; } } /* Does not support both edges */ if ((mode == GPIO_INT_MODE_EDGE) && (trig == GPIO_INT_TRIG_BOTH)) { return -ENOTSUP; } /* Clear interrupt enable */ dw_set_bit(base_addr, INTEN, pin, false); /* Mask and clear interrupt */ dw_set_bit(base_addr, INTMASK, pin, true); dw_write(base_addr, PORTA_EOI, BIT(pin)); if (mode != GPIO_INT_MODE_DISABLED) { /* level (0) or edge (1) */ dw_set_bit(base_addr, INTTYPE_LEVEL, pin, (mode == GPIO_INT_MODE_EDGE)); /* Active low/high */ dw_set_bit(base_addr, INT_POLARITY, pin, (trig == GPIO_INT_TRIG_HIGH)); /* Finally enabling interrupt */ dw_set_bit(base_addr, INTEN, pin, true); dw_set_bit(base_addr, INTMASK, pin, false); } return 0; } static inline void dw_pin_config(const struct device *port, uint32_t pin, int flags) { struct gpio_dw_runtime *context = port->data; const struct gpio_dw_config *config = port->config; uint32_t base_addr = dw_base_to_block_base(context->base_addr); uint32_t port_base_addr = context->base_addr; uint32_t dir_port = dw_get_dir_port(port_base_addr); bool pin_is_output, need_debounce; /* Set init value then direction */ pin_is_output = (flags & GPIO_OUTPUT) != 0U; dw_set_bit(base_addr, dir_port, pin, pin_is_output); if (pin_is_output) { if ((flags & GPIO_OUTPUT_INIT_HIGH) != 0U) { gpio_dw_port_set_bits_raw(port, BIT(pin)); } else if ((flags & GPIO_OUTPUT_INIT_LOW) != 0U) { gpio_dw_port_clear_bits_raw(port, BIT(pin)); } } /* Use built-in debounce. * Note debounce circuit is only available if also supporting * interrupts according to datasheet. */ if (dw_interrupt_support(config) && (dir_port == SWPORTA_DDR)) { need_debounce = (flags & DW_GPIO_DEBOUNCE); dw_set_bit(base_addr, PORTA_DEBOUNCE, pin, need_debounce); } } static inline int gpio_dw_config(const struct device *port, gpio_pin_t pin, gpio_flags_t flags) { const struct gpio_dw_config *config = port->config; uint32_t io_flags; /* Check for invalid pin number */ if (pin >= config->ngpios) { return -EINVAL; } /* Does not support disconnected pin, and * not supporting both input/output at same time. */ io_flags = flags & (GPIO_INPUT | GPIO_OUTPUT); if ((io_flags == GPIO_DISCONNECTED) || (io_flags == (GPIO_INPUT | GPIO_OUTPUT))) { return -ENOTSUP; } /* No open-drain support */ if ((flags & GPIO_SINGLE_ENDED) != 0U) { return -ENOTSUP; } /* Does not support pull-up/pull-down */ if ((flags & (GPIO_PULL_UP | GPIO_PULL_DOWN)) != 0U) { return -ENOTSUP; } dw_pin_config(port, pin, flags); return 0; } static int gpio_dw_port_get_raw(const struct device *port, uint32_t *value) { struct gpio_dw_runtime *context = port->data; uint32_t base_addr = dw_base_to_block_base(context->base_addr); uint32_t port_base_addr = context->base_addr; uint32_t ext_port = dw_get_ext_port(port_base_addr); *value = dw_read(base_addr, ext_port); return 0; } static int gpio_dw_port_set_masked_raw(const struct device *port, uint32_t mask, uint32_t value) { struct gpio_dw_runtime *context = port->data; uint32_t base_addr = dw_base_to_block_base(context->base_addr); uint32_t port_base_addr = context->base_addr; uint32_t data_port = dw_get_data_port(port_base_addr); uint32_t pins; pins = dw_read(base_addr, data_port); pins = (pins & ~mask) | (mask & value); dw_write(base_addr, data_port, pins); return 0; } static int gpio_dw_port_set_bits_raw(const struct device *port, uint32_t mask) { struct gpio_dw_runtime *context = port->data; uint32_t base_addr = dw_base_to_block_base(context->base_addr); uint32_t port_base_addr = context->base_addr; uint32_t data_port = dw_get_data_port(port_base_addr); uint32_t pins; pins = dw_read(base_addr, data_port); pins |= mask; dw_write(base_addr, data_port, pins); return 0; } static int gpio_dw_port_clear_bits_raw(const struct device *port, uint32_t mask) { struct gpio_dw_runtime *context = port->data; uint32_t base_addr = dw_base_to_block_base(context->base_addr); uint32_t port_base_addr = context->base_addr; uint32_t data_port = dw_get_data_port(port_base_addr); uint32_t pins; pins = dw_read(base_addr, data_port); pins &= ~mask; dw_write(base_addr, data_port, pins); return 0; } static int gpio_dw_port_toggle_bits(const struct device *port, uint32_t mask) { struct gpio_dw_runtime *context = port->data; uint32_t base_addr = dw_base_to_block_base(context->base_addr); uint32_t port_base_addr = context->base_addr; uint32_t data_port = dw_get_data_port(port_base_addr); uint32_t pins; pins = dw_read(base_addr, data_port); pins ^= mask; dw_write(base_addr, data_port, pins); return 0; } static inline int gpio_dw_manage_callback(const struct device *port, struct gpio_callback *callback, bool set) { struct gpio_dw_runtime *context = port->data; return gpio_manage_callback(&context->callbacks, callback, set); } #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(interrupts) static void gpio_dw_isr(const struct device *port) { struct gpio_dw_runtime *context = port->data; uint32_t base_addr = dw_base_to_block_base(context->base_addr); uint32_t int_status; int_status = dw_read(base_addr, INTSTATUS); dw_write(base_addr, PORTA_EOI, int_status); gpio_fire_callbacks(&context->callbacks, port, int_status); } #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(interrupts) */ static const struct gpio_driver_api api_funcs = { .pin_configure = gpio_dw_config, .port_get_raw = gpio_dw_port_get_raw, .port_set_masked_raw = gpio_dw_port_set_masked_raw, .port_set_bits_raw = gpio_dw_port_set_bits_raw, .port_clear_bits_raw = gpio_dw_port_clear_bits_raw, .port_toggle_bits = gpio_dw_port_toggle_bits, .pin_interrupt_configure = gpio_dw_pin_interrupt_configure, .manage_callback = gpio_dw_manage_callback, }; static int gpio_dw_initialize(const struct device *port) { struct gpio_dw_runtime *context = port->data; const struct gpio_dw_config *config = port->config; uint32_t base_addr; if (dw_interrupt_support(config)) { base_addr = dw_base_to_block_base(context->base_addr); /* interrupts in sync with system clock */ dw_set_bit(base_addr, INT_CLOCK_SYNC, LS_SYNC_POS, 1); /* mask and disable interrupts */ dw_write(base_addr, INTMASK, ~(0)); dw_write(base_addr, INTEN, 0); dw_write(base_addr, PORTA_EOI, ~(0)); config->config_func(port); } return 0; } /* Bindings to the platform */ #define INST_IRQ_FLAGS(n) \ COND_CODE_1(DT_INST_IRQ_HAS_CELL(n, flags), (DT_INST_IRQ(n, flags)), (0)) #define GPIO_CFG_IRQ(idx, n) \ IRQ_CONNECT(DT_INST_IRQN_BY_IDX(n, idx), \ DT_INST_IRQ(n, priority), gpio_dw_isr, \ DEVICE_DT_INST_GET(n), INST_IRQ_FLAGS(n)); \ irq_enable(DT_INST_IRQN_BY_IDX(n, idx)); \ #define GPIO_DW_INIT(n) \ static void gpio_config_##n##_irq(const struct device *port) \ { \ ARG_UNUSED(port); \ LISTIFY(DT_NUM_IRQS(DT_DRV_INST(n)), GPIO_CFG_IRQ, (), n) \ } \ \ static const struct gpio_dw_config gpio_dw_config_##n = { \ .common = { \ .port_pin_mask = GPIO_PORT_PIN_MASK_FROM_DT_INST(n), \ }, \ .irq_num = COND_CODE_1(DT_INST_IRQ_HAS_IDX(n, 0), (DT_INST_IRQN(n)), (0)), \ .ngpios = DT_INST_PROP(n, ngpios), \ .config_func = gpio_config_##n##_irq, \ }; \ \ static struct gpio_dw_runtime gpio_##n##_runtime = { \ .base_addr = DT_INST_REG_ADDR(n), \ }; \ \ DEVICE_DT_INST_DEFINE(n, gpio_dw_initialize, NULL, &gpio_##n##_runtime, \ &gpio_dw_config_##n, PRE_KERNEL_1, \ CONFIG_GPIO_INIT_PRIORITY, &api_funcs); \ DT_INST_FOREACH_STATUS_OKAY(GPIO_DW_INIT) ```
/content/code_sandbox/drivers/gpio/gpio_dw.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,326
```unknown # LPC11U6x GPIO configuration options config GPIO_LPC11U6X bool "LPC11U6x GPIO driver" default y depends on DT_HAS_NXP_LPC11U6X_GPIO_ENABLED depends on CLOCK_CONTROL_LPC11U6X help Enable GPIO driver for LPC11U6x MCUs. ```
/content/code_sandbox/drivers/gpio/Kconfig.lpc11u6x
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
77
```c /* * */ #define DT_DRV_COMPAT infineon_tle9104_gpio #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/gpio/gpio_utils.h> #include <zephyr/drivers/mfd/tle9104.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(gpio_tle9104, CONFIG_GPIO_LOG_LEVEL); struct tle9104_gpio_config { /* gpio_driver_config needs to be first */ struct gpio_driver_config common; /* parent MFD */ const struct device *parent; }; struct tle9104_gpio_data { /* gpio_driver_data needs to be first */ struct gpio_driver_data common; /* each bit is one output channel, bit 0 = OUT1, ... */ uint8_t state; /* each bit defines if the output channel is configured, see state */ uint8_t configured; struct k_mutex lock; }; static int tle9104_gpio_pin_configure(const struct device *dev, gpio_pin_t pin, gpio_flags_t flags) { const struct tle9104_gpio_config *config = dev->config; struct tle9104_gpio_data *data = dev->data; int result; /* cannot execute a bus operation in an ISR context */ if (k_is_in_isr()) { return -EWOULDBLOCK; } if (pin >= TLE9104_GPIO_COUNT) { LOG_ERR("invalid pin number %i", pin); return -EINVAL; } if ((flags & GPIO_INPUT) != 0) { LOG_ERR("cannot configure pin as input"); return -ENOTSUP; } if ((flags & GPIO_OUTPUT) == 0) { LOG_ERR("pin must be configured as an output"); return -ENOTSUP; } if ((flags & GPIO_SINGLE_ENDED) == 0) { LOG_ERR("pin must be configured as single ended"); return -ENOTSUP; } if ((flags & GPIO_LINE_OPEN_DRAIN) == 0) { LOG_ERR("pin must be configured as open drain"); return -ENOTSUP; } if ((flags & GPIO_PULL_UP) != 0) { LOG_ERR("pin cannot have a pull up configured"); return -ENOTSUP; } if ((flags & GPIO_PULL_DOWN) != 0) { LOG_ERR("pin cannot have a pull down configured"); return -ENOTSUP; } k_mutex_lock(&data->lock, K_FOREVER); if ((flags & GPIO_OUTPUT_INIT_LOW) != 0) { WRITE_BIT(data->state, pin, 0); } else if ((flags & GPIO_OUTPUT_INIT_HIGH) != 0) { WRITE_BIT(data->state, pin, 1); } WRITE_BIT(data->configured, pin, 1); result = tle9104_write_state(config->parent, data->state); k_mutex_unlock(&data->lock); return result; } static int tle9104_gpio_port_get_raw(const struct device *dev, uint32_t *value) { ARG_UNUSED(dev); ARG_UNUSED(value); LOG_ERR("input pins are not available"); return -ENOTSUP; } static int tle9104_gpio_port_set_masked_raw(const struct device *dev, uint32_t mask, uint32_t value) { const struct tle9104_gpio_config *config = dev->config; struct tle9104_gpio_data *data = dev->data; int result; /* cannot execute a bus operation in an ISR context */ if (k_is_in_isr()) { return -EWOULDBLOCK; } k_mutex_lock(&data->lock, K_FOREVER); data->state = (data->state & ~mask) | (mask & value); result = tle9104_write_state(config->parent, data->state); k_mutex_unlock(&data->lock); return result; } static int tle9104_gpio_port_set_bits_raw(const struct device *dev, uint32_t mask) { return tle9104_gpio_port_set_masked_raw(dev, mask, mask); } static int tle9104_gpio_port_clear_bits_raw(const struct device *dev, uint32_t mask) { return tle9104_gpio_port_set_masked_raw(dev, mask, 0); } static int tle9104_gpio_port_toggle_bits(const struct device *dev, uint32_t mask) { const struct tle9104_gpio_config *config = dev->config; struct tle9104_gpio_data *data = dev->data; int result; /* cannot execute a bus operation in an ISR context */ if (k_is_in_isr()) { return -EWOULDBLOCK; } k_mutex_lock(&data->lock, K_FOREVER); data->state ^= mask; result = tle9104_write_state(config->parent, data->state); k_mutex_unlock(&data->lock); return result; } static int tle9104_gpio_pin_interrupt_configure(const struct device *dev, gpio_pin_t pin, enum gpio_int_mode mode, enum gpio_int_trig trig) { ARG_UNUSED(dev); ARG_UNUSED(pin); ARG_UNUSED(mode); ARG_UNUSED(trig); return -ENOTSUP; } static const struct gpio_driver_api api_table = { .pin_configure = tle9104_gpio_pin_configure, .port_get_raw = tle9104_gpio_port_get_raw, .port_set_masked_raw = tle9104_gpio_port_set_masked_raw, .port_set_bits_raw = tle9104_gpio_port_set_bits_raw, .port_clear_bits_raw = tle9104_gpio_port_clear_bits_raw, .port_toggle_bits = tle9104_gpio_port_toggle_bits, .pin_interrupt_configure = tle9104_gpio_pin_interrupt_configure, }; static int tle9104_gpio_init(const struct device *dev) { const struct tle9104_gpio_config *config = dev->config; struct tle9104_gpio_data *data = dev->data; LOG_DBG("initialize TLE9104 GPIO instance %s", dev->name); if (!device_is_ready(config->parent)) { LOG_ERR("%s: parent MFD is not ready", dev->name); return -EINVAL; } int result = k_mutex_init(&data->lock); if (result != 0) { LOG_ERR("unable to initialize mutex"); return result; } return 0; } #define TLE9104_GPIO_INIT(inst) \ static const struct tle9104_gpio_config tle9104_gpio_##inst##_config = { \ .common = { \ .port_pin_mask = GPIO_PORT_PIN_MASK_FROM_DT_INST(inst), \ }, \ .parent = DEVICE_DT_GET(DT_PARENT(DT_DRV_INST(inst))), \ }; \ \ static struct tle9104_gpio_data tle9104_gpio_##inst##_drvdata; \ \ /* This has to be initialized after the SPI peripheral. */ \ DEVICE_DT_INST_DEFINE(inst, tle9104_gpio_init, NULL, &tle9104_gpio_##inst##_drvdata, \ &tle9104_gpio_##inst##_config, POST_KERNEL, \ CONFIG_GPIO_TLE9104_INIT_PRIORITY, &api_table); DT_INST_FOREACH_STATUS_OKAY(TLE9104_GPIO_INIT) ```
/content/code_sandbox/drivers/gpio/gpio_tle9104.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,573