text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```c /* * */ /** * @brief Common part of DMA drivers for imx rt series. */ #include <errno.h> #include <soc.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/devicetree.h> #include <zephyr/sys/atomic.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/sys/barrier.h> #include "dma_mcux_edma.h" #include <zephyr/logging/log.h> #include <zephyr/irq.h> #ifdef CONFIG_DMA_MCUX_EDMA #define DT_DRV_COMPAT nxp_mcux_edma #elif CONFIG_DMA_MCUX_EDMA_V3 #define DT_DRV_COMPAT nxp_mcux_edma_v3 #elif CONFIG_DMA_MCUX_EDMA_V4 #define DT_DRV_COMPAT nxp_mcux_edma_v4 #endif LOG_MODULE_REGISTER(dma_mcux_edma, CONFIG_DMA_LOG_LEVEL); #define HAS_CHANNEL_GAP(n) DT_INST_NODE_HAS_PROP(n, channel_gap) || #define DMA_MCUX_HAS_CHANNEL_GAP (DT_INST_FOREACH_STATUS_OKAY(HAS_CHANNEL_GAP) 0) struct dma_mcux_edma_config { DMA_Type *base; #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT DMAMUX_Type **dmamux_base; #endif uint8_t channels_per_mux; uint8_t dmamux_reg_offset; int dma_channels; /* number of channels */ #if DMA_MCUX_HAS_CHANNEL_GAP uint32_t channel_gap[2]; #endif void (*irq_config_func)(const struct device *dev); }; #ifdef CONFIG_HAS_MCUX_CACHE #ifdef CONFIG_DMA_MCUX_USE_DTCM_FOR_DMA_DESCRIPTORS #if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay) #define EDMA_TCDPOOL_CACHE_ATTR __dtcm_noinit_section #else /* DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay) */ #error Selected DTCM for MCUX DMA descriptors but no DTCM section. #endif /* DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay) */ #elif defined(CONFIG_NOCACHE_MEMORY) #define EDMA_TCDPOOL_CACHE_ATTR __nocache #else /* * Note: the TCD pool *must* be in non cacheable memory. All of the NXP SOCs * that support caching memory have their default SRAM regions defined as a * non cached memory region, but if the default SRAM region is changed EDMA * TCD pools would be moved to cacheable memory, resulting in DMA cache * coherency issues. */ #define EDMA_TCDPOOL_CACHE_ATTR #endif /* CONFIG_DMA_MCUX_USE_DTCM_FOR_DMA_DESCRIPTORS */ #else /* CONFIG_HAS_MCUX_CACHE */ #define EDMA_TCDPOOL_CACHE_ATTR #endif /* CONFIG_HAS_MCUX_CACHE */ static __aligned(32) EDMA_TCDPOOL_CACHE_ATTR edma_tcd_t tcdpool[DT_INST_PROP(0, dma_channels)][CONFIG_DMA_TCD_QUEUE_SIZE]; struct dma_mcux_channel_transfer_edma_settings { uint32_t source_data_size; uint32_t dest_data_size; uint32_t source_burst_length; uint32_t dest_burst_length; enum dma_channel_direction direction; edma_transfer_type_t transfer_type; bool valid; }; struct call_back { edma_transfer_config_t transferConfig; edma_handle_t edma_handle; const struct device *dev; void *user_data; dma_callback_t dma_callback; struct dma_mcux_channel_transfer_edma_settings transfer_settings; bool busy; }; struct dma_mcux_edma_data { struct dma_context dma_ctx; struct call_back data_cb[DT_INST_PROP(0, dma_channels)]; ATOMIC_DEFINE(channels_atomic, DT_INST_PROP(0, dma_channels)); }; #define DEV_CFG(dev) \ ((const struct dma_mcux_edma_config *const)dev->config) #define DEV_DATA(dev) ((struct dma_mcux_edma_data *)dev->data) #define DEV_BASE(dev) ((DMA_Type *)DEV_CFG(dev)->base) #define DEV_CHANNEL_DATA(dev, ch) \ ((struct call_back *)(&(DEV_DATA(dev)->data_cb[ch]))) #define DEV_EDMA_HANDLE(dev, ch) \ ((edma_handle_t *)(&(DEV_CHANNEL_DATA(dev, ch)->edma_handle))) #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT #define DEV_DMAMUX_BASE(dev, idx) ((DMAMUX_Type *)DEV_CFG(dev)->dmamux_base[idx]) #define DEV_DMAMUX_IDX(dev, ch) (ch / DEV_CFG(dev)->channels_per_mux) #define DEV_DMAMUX_CHANNEL(dev, ch) \ (ch % DEV_CFG(dev)->channels_per_mux) ^ (DEV_CFG(dev)->dmamux_reg_offset) #endif /* * The hardware channel (takes the gap into account) is used when access DMA registers. * For data structures in the shim driver still use the primitive channel. */ static ALWAYS_INLINE uint32_t dma_mcux_edma_add_channel_gap(const struct device *dev, uint32_t channel) { #if DMA_MCUX_HAS_CHANNEL_GAP const struct dma_mcux_edma_config *config = DEV_CFG(dev); return (channel < config->channel_gap[0]) ? channel : (channel + 1 + config->channel_gap[1] - config->channel_gap[0]); #else ARG_UNUSED(dev); return channel; #endif } static ALWAYS_INLINE uint32_t dma_mcux_edma_remove_channel_gap(const struct device *dev, uint32_t channel) { #if DMA_MCUX_HAS_CHANNEL_GAP const struct dma_mcux_edma_config *config = DEV_CFG(dev); return (channel < config->channel_gap[0]) ? channel : (channel + config->channel_gap[0] - config->channel_gap[1] - 1); #else ARG_UNUSED(dev); return channel; #endif } static bool data_size_valid(const size_t data_size) { return (data_size == 4U || data_size == 2U || data_size == 1U || data_size == 8U || data_size == 16U || data_size == 32U #if defined(CONFIG_DMA_MCUX_EDMA_V3) || defined(CONFIG_DMA_MCUX_EDMA_V4) || data_size == 64U #endif ); } static void nxp_edma_callback(edma_handle_t *handle, void *param, bool transferDone, uint32_t tcds) { int ret = -EIO; struct call_back *data = (struct call_back *)param; uint32_t channel = dma_mcux_edma_remove_channel_gap(data->dev, handle->channel); if (transferDone) { /* DMA is no longer busy when there are no remaining TCDs to transfer */ data->busy = (handle->tcdPool != NULL) && (handle->tcdUsed > 0); ret = DMA_STATUS_COMPLETE; } LOG_DBG("transfer %d", tcds); data->dma_callback(data->dev, data->user_data, channel, ret); } static void dma_mcux_edma_irq_handler(const struct device *dev, uint32_t channel) { uint32_t hw_channel = dma_mcux_edma_add_channel_gap(dev, channel); uint32_t flag = EDMA_GetChannelStatusFlags(DEV_BASE(dev), hw_channel); if (flag & kEDMA_InterruptFlag) { LOG_DBG("IRQ OCCURRED"); /* EDMA interrupt flag is cleared here */ EDMA_HandleIRQ(DEV_EDMA_HANDLE(dev, channel)); LOG_DBG("IRQ DONE"); } #if DT_INST_PROP(0, no_error_irq) /* Channel shares the same irq for error and transfer complete */ else if (flag & kEDMA_ErrorFlag) { EDMA_ClearChannelStatusFlags(DEV_BASE(dev), channel, 0xFFFFFFFF); EDMA_AbortTransfer(DEV_EDMA_HANDLE(dev, channel)); DEV_CHANNEL_DATA(dev, channel)->busy = false; LOG_INF("channel %d error status is 0x%x", channel, flag); } #endif } #if !DT_INST_PROP(0, no_error_irq) static void dma_mcux_edma_error_irq_handler(const struct device *dev) { int i = 0; uint32_t flag = 0; uint32_t hw_channel; for (i = 0; i < DEV_CFG(dev)->dma_channels; i++) { if (DEV_CHANNEL_DATA(dev, i)->busy) { hw_channel = dma_mcux_edma_add_channel_gap(dev, i); flag = EDMA_GetChannelStatusFlags(DEV_BASE(dev), hw_channel); EDMA_ClearChannelStatusFlags(DEV_BASE(dev), hw_channel, 0xFFFFFFFF); EDMA_AbortTransfer(DEV_EDMA_HANDLE(dev, i)); DEV_CHANNEL_DATA(dev, i)->busy = false; LOG_INF("channel %d error status is 0x%x", hw_channel, flag); } } #if defined(CONFIG_CPU_CORTEX_M4) barrier_dsync_fence_full(); #endif } #endif /* Configure a channel */ static int dma_mcux_edma_configure(const struct device *dev, uint32_t channel, struct dma_config *config) { /* Check for invalid parameters before dereferencing them. */ if (NULL == dev || NULL == config) { return -EINVAL; } edma_handle_t *p_handle = DEV_EDMA_HANDLE(dev, channel); struct call_back *data = DEV_CHANNEL_DATA(dev, channel); struct dma_block_config *block_config = config->head_block; uint32_t slot = config->dma_slot; uint32_t hw_channel; edma_transfer_type_t transfer_type; unsigned int key; int ret = 0; if (slot >= DT_INST_PROP(0, dma_requests)) { LOG_ERR("source number is out of scope %d", slot); return -ENOTSUP; } if (channel >= DT_INST_PROP(0, dma_channels)) { LOG_ERR("out of DMA channel %d", channel); return -EINVAL; } hw_channel = dma_mcux_edma_add_channel_gap(dev, channel); #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT uint8_t dmamux_idx, dmamux_channel; dmamux_idx = DEV_DMAMUX_IDX(dev, channel); dmamux_channel = DEV_DMAMUX_CHANNEL(dev, channel); #endif data->transfer_settings.valid = false; switch (config->channel_direction) { case MEMORY_TO_MEMORY: transfer_type = kEDMA_MemoryToMemory; break; case MEMORY_TO_PERIPHERAL: transfer_type = kEDMA_MemoryToPeripheral; break; case PERIPHERAL_TO_MEMORY: transfer_type = kEDMA_PeripheralToMemory; break; case PERIPHERAL_TO_PERIPHERAL: transfer_type = kEDMA_PeripheralToPeripheral; break; default: LOG_ERR("not support transfer direction"); return -EINVAL; } if (!data_size_valid(config->source_data_size)) { LOG_ERR("Source unit size error, %d", config->source_data_size); return -EINVAL; } if (!data_size_valid(config->dest_data_size)) { LOG_ERR("Dest unit size error, %d", config->dest_data_size); return -EINVAL; } if (block_config->source_gather_en || block_config->dest_scatter_en) { if (config->block_count > CONFIG_DMA_TCD_QUEUE_SIZE) { LOG_ERR("please config DMA_TCD_QUEUE_SIZE as %d", config->block_count); return -EINVAL; } } data->transfer_settings.source_data_size = config->source_data_size; data->transfer_settings.dest_data_size = config->dest_data_size; data->transfer_settings.source_burst_length = config->source_burst_length; data->transfer_settings.dest_burst_length = config->dest_burst_length; data->transfer_settings.direction = config->channel_direction; data->transfer_settings.transfer_type = transfer_type; data->transfer_settings.valid = true; /* Lock and page in the channel configuration */ key = irq_lock(); #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT #if DT_INST_PROP(0, nxp_a_on) if (config->source_handshake || config->dest_handshake || transfer_type == kEDMA_MemoryToMemory) { /*software trigger make the channel always on*/ LOG_DBG("ALWAYS ON"); DMAMUX_EnableAlwaysOn(DEV_DMAMUX_BASE(dev, dmamux_idx), dmamux_channel, true); } else { DMAMUX_SetSource(DEV_DMAMUX_BASE(dev, dmamux_idx), dmamux_channel, slot); } #else DMAMUX_SetSource(DEV_DMAMUX_BASE(dev, dmamux_idx), dmamux_channel, slot); #endif /* dam_imx_rt_set_channel_priority(dev, channel, config); */ DMAMUX_EnableChannel(DEV_DMAMUX_BASE(dev, dmamux_idx), dmamux_channel); #endif if (data->busy) { EDMA_AbortTransfer(p_handle); } EDMA_ResetChannel(DEV_BASE(dev), hw_channel); EDMA_CreateHandle(p_handle, DEV_BASE(dev), hw_channel); EDMA_SetCallback(p_handle, nxp_edma_callback, (void *)data); #if defined(FSL_FEATURE_EDMA_HAS_CHANNEL_MUX) && FSL_FEATURE_EDMA_HAS_CHANNEL_MUX /* First release any peripheral previously associated with this channel */ EDMA_SetChannelMux(DEV_BASE(dev), hw_channel, 0); EDMA_SetChannelMux(DEV_BASE(dev), hw_channel, slot); #endif LOG_DBG("channel is %d", channel); EDMA_EnableChannelInterrupts(DEV_BASE(dev), hw_channel, kEDMA_ErrorInterruptEnable); if (block_config->source_gather_en || block_config->dest_scatter_en) { EDMA_InstallTCDMemory(p_handle, tcdpool[channel], CONFIG_DMA_TCD_QUEUE_SIZE); while (block_config != NULL) { EDMA_PrepareTransfer( &(data->transferConfig), (void *)block_config->source_address, config->source_data_size, (void *)block_config->dest_address, config->dest_data_size, config->source_burst_length, block_config->block_size, transfer_type); const status_t submit_status = EDMA_SubmitTransfer(p_handle, &(data->transferConfig)); if (submit_status != kStatus_Success) { LOG_ERR("Error submitting EDMA Transfer: 0x%x", submit_status); ret = -EFAULT; } block_config = block_config->next_block; } } else { /* block_count shall be 1 */ LOG_DBG("block size is: %d", block_config->block_size); EDMA_PrepareTransfer(&(data->transferConfig), (void *)block_config->source_address, config->source_data_size, (void *)block_config->dest_address, config->dest_data_size, config->source_burst_length, block_config->block_size, transfer_type); const status_t submit_status = EDMA_SubmitTransfer(p_handle, &(data->transferConfig)); if (submit_status != kStatus_Success) { LOG_ERR("Error submitting EDMA Transfer: 0x%x", submit_status); ret = -EFAULT; } #if defined(CONFIG_DMA_MCUX_EDMA_V3) || defined(CONFIG_DMA_MCUX_EDMA_V4) LOG_DBG("DMA TCD_CSR 0x%x", DEV_BASE(dev)->CH[hw_channel].TCD_CSR); #else LOG_DBG("data csr is 0x%x", DEV_BASE(dev)->TCD[hw_channel].CSR); #endif } if (config->dest_chaining_en) { LOG_DBG("link major channel %d", config->linked_channel); EDMA_SetChannelLink(DEV_BASE(dev), channel, kEDMA_MajorLink, config->linked_channel); } if (config->source_chaining_en) { LOG_DBG("link minor channel %d", config->linked_channel); EDMA_SetChannelLink(DEV_BASE(dev), channel, kEDMA_MinorLink, config->linked_channel); } data->busy = false; if (config->dma_callback) { LOG_DBG("INSTALL call back on channel %d", channel); data->user_data = config->user_data; data->dma_callback = config->dma_callback; data->dev = dev; } irq_unlock(key); return ret; } static int dma_mcux_edma_start(const struct device *dev, uint32_t channel) { struct call_back *data = DEV_CHANNEL_DATA(dev, channel); LOG_DBG("START TRANSFER"); #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT uint8_t dmamux_idx = DEV_DMAMUX_IDX(dev, channel); uint8_t dmamux_channel = DEV_DMAMUX_CHANNEL(dev, channel); LOG_DBG("DMAMUX CHCFG 0x%x", DEV_DMAMUX_BASE(dev, dmamux_idx)->CHCFG[dmamux_channel]); #endif #if !defined(CONFIG_DMA_MCUX_EDMA_V3) && !defined(CONFIG_DMA_MCUX_EDMA_V4) LOG_DBG("DMA CR 0x%x", DEV_BASE(dev)->CR); #endif data->busy = true; EDMA_StartTransfer(DEV_EDMA_HANDLE(dev, channel)); return 0; } static int dma_mcux_edma_stop(const struct device *dev, uint32_t channel) { struct dma_mcux_edma_data *data = DEV_DATA(dev); uint32_t hw_channel; hw_channel = dma_mcux_edma_add_channel_gap(dev, channel); data->data_cb[channel].transfer_settings.valid = false; if (!data->data_cb[channel].busy) { return 0; } EDMA_AbortTransfer(DEV_EDMA_HANDLE(dev, channel)); EDMA_ClearChannelStatusFlags(DEV_BASE(dev), hw_channel, kEDMA_DoneFlag | kEDMA_ErrorFlag | kEDMA_InterruptFlag); EDMA_ResetChannel(DEV_BASE(dev), hw_channel); data->data_cb[channel].busy = false; return 0; } static int dma_mcux_edma_suspend(const struct device *dev, uint32_t channel) { struct call_back *data = DEV_CHANNEL_DATA(dev, channel); if (!data->busy) { return -EINVAL; } EDMA_StopTransfer(DEV_EDMA_HANDLE(dev, channel)); return 0; } static int dma_mcux_edma_resume(const struct device *dev, uint32_t channel) { struct call_back *data = DEV_CHANNEL_DATA(dev, channel); if (!data->busy) { return -EINVAL; } EDMA_StartTransfer(DEV_EDMA_HANDLE(dev, channel)); return 0; } static int dma_mcux_edma_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { struct call_back *data = DEV_CHANNEL_DATA(dev, channel); /* Lock the channel configuration */ const unsigned int key = irq_lock(); int ret = 0; if (!data->transfer_settings.valid) { LOG_ERR("Invalid EDMA settings on initial config. Configure DMA before reload."); ret = -EFAULT; goto cleanup; } /* If the tcdPool is not in use (no s/g) then only a single TCD can be active at once. */ if (data->busy && data->edma_handle.tcdPool == NULL) { LOG_ERR("EDMA busy. Wait until the transfer completes before reloading."); ret = -EBUSY; goto cleanup; } EDMA_PrepareTransfer( &(data->transferConfig), (void *)src, data->transfer_settings.source_data_size, (void *)dst, data->transfer_settings.dest_data_size, data->transfer_settings.source_burst_length, size, data->transfer_settings.transfer_type); const status_t submit_status = EDMA_SubmitTransfer(DEV_EDMA_HANDLE(dev, channel), &(data->transferConfig)); if (submit_status != kStatus_Success) { LOG_ERR("Error submitting EDMA Transfer: 0x%x", submit_status); ret = -EFAULT; } cleanup: irq_unlock(key); return ret; } static int dma_mcux_edma_get_status(const struct device *dev, uint32_t channel, struct dma_status *status) { uint32_t hw_channel = dma_mcux_edma_add_channel_gap(dev, channel); if (DEV_CHANNEL_DATA(dev, channel)->busy) { status->busy = true; status->pending_length = EDMA_GetRemainingMajorLoopCount(DEV_BASE(dev), hw_channel); } else { status->busy = false; status->pending_length = 0; } status->dir = DEV_CHANNEL_DATA(dev, channel)->transfer_settings.direction; #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT uint8_t dmamux_idx = DEV_DMAMUX_IDX(dev, channel); uint8_t dmamux_channel = DEV_DMAMUX_CHANNEL(dev, channel); LOG_DBG("DMAMUX CHCFG 0x%x", DEV_DMAMUX_BASE(dev, dmamux_idx)->CHCFG[dmamux_channel]); #endif #if defined(CONFIG_DMA_MCUX_EDMA_V3) || defined(CONFIG_DMA_MCUX_EDMA_V4) LOG_DBG("DMA MP_CSR 0x%x", DEV_BASE(dev)->MP_CSR); LOG_DBG("DMA MP_ES 0x%x", DEV_BASE(dev)->MP_ES); LOG_DBG("DMA CHx_ES 0x%x", DEV_BASE(dev)->CH[hw_channel].CH_ES); LOG_DBG("DMA CHx_CSR 0x%x", DEV_BASE(dev)->CH[hw_channel].CH_CSR); LOG_DBG("DMA CHx_ES 0x%x", DEV_BASE(dev)->CH[hw_channel].CH_ES); LOG_DBG("DMA CHx_INT 0x%x", DEV_BASE(dev)->CH[hw_channel].CH_INT); LOG_DBG("DMA TCD_CSR 0x%x", DEV_BASE(dev)->CH[hw_channel].TCD_CSR); #else LOG_DBG("DMA CR 0x%x", DEV_BASE(dev)->CR); LOG_DBG("DMA INT 0x%x", DEV_BASE(dev)->INT); LOG_DBG("DMA ERQ 0x%x", DEV_BASE(dev)->ERQ); LOG_DBG("DMA ES 0x%x", DEV_BASE(dev)->ES); LOG_DBG("DMA ERR 0x%x", DEV_BASE(dev)->ERR); LOG_DBG("DMA HRS 0x%x", DEV_BASE(dev)->HRS); LOG_DBG("data csr is 0x%x", DEV_BASE(dev)->TCD[hw_channel].CSR); #endif return 0; } static bool dma_mcux_edma_channel_filter(const struct device *dev, int channel_id, void *param) { enum dma_channel_filter *filter = (enum dma_channel_filter *)param; if (filter && *filter == DMA_CHANNEL_PERIODIC) { if (channel_id > 3) { return false; } } return true; } static const struct dma_driver_api dma_mcux_edma_api = { .reload = dma_mcux_edma_reload, .config = dma_mcux_edma_configure, .start = dma_mcux_edma_start, .stop = dma_mcux_edma_stop, .suspend = dma_mcux_edma_suspend, .resume = dma_mcux_edma_resume, .get_status = dma_mcux_edma_get_status, .chan_filter = dma_mcux_edma_channel_filter, }; static int dma_mcux_edma_init(const struct device *dev) { const struct dma_mcux_edma_config *config = dev->config; struct dma_mcux_edma_data *data = dev->data; edma_config_t userConfig = { 0 }; LOG_DBG("INIT NXP EDMA"); #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT uint8_t i; for (i = 0; i < config->dma_channels / config->channels_per_mux; i++) { DMAMUX_Init(DEV_DMAMUX_BASE(dev, i)); } #endif EDMA_GetDefaultConfig(&userConfig); EDMA_Init(DEV_BASE(dev), &userConfig); #ifdef CONFIG_DMA_MCUX_EDMA_V3 /* Channel linking available and will be controlled by each channel's link settings */ EDMA_EnableAllChannelLink(DEV_BASE(dev), true); #endif config->irq_config_func(dev); memset(dev->data, 0, sizeof(struct dma_mcux_edma_data)); memset(tcdpool, 0, sizeof(tcdpool)); data->dma_ctx.magic = DMA_MAGIC; data->dma_ctx.dma_channels = config->dma_channels; data->dma_ctx.atomic = data->channels_atomic; return 0; } /* The shared error interrupt (if have) must be declared as the last element in devicetree */ #if !DT_INST_PROP(0, no_error_irq) #define NUM_IRQS_WITHOUT_ERROR_IRQ(n) UTIL_DEC(DT_NUM_IRQS(DT_DRV_INST(n))) #else #define NUM_IRQS_WITHOUT_ERROR_IRQ(n) DT_NUM_IRQS(DT_DRV_INST(n)) #endif #define IRQ_CONFIG(n, idx, fn) \ { \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, idx, irq), \ DT_INST_IRQ_BY_IDX(n, idx, priority), \ fn, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(n, idx, irq)); \ } #define DMA_MCUX_EDMA_IRQ_DEFINE(idx, n) \ static void dma_mcux_edma_##n##_irq_##idx(const struct device *dev) \ { \ dma_mcux_edma_irq_handler(dev, idx); \ \ IF_ENABLED(UTIL_BOOL(DT_INST_PROP(n, irq_shared_offset)), \ (dma_mcux_edma_irq_handler(dev, \ idx + DT_INST_PROP(n, irq_shared_offset));)) \ \ IF_ENABLED(CONFIG_CPU_CORTEX_M4, (barrier_dsync_fence_full();)) \ } #define DMA_MCUX_EDMA_IRQ_CONFIG(idx, n) \ IRQ_CONFIG(n, idx, dma_mcux_edma_##n##_irq_##idx) #define DMA_MCUX_EDMA_CONFIG_FUNC(n) \ LISTIFY(NUM_IRQS_WITHOUT_ERROR_IRQ(n), DMA_MCUX_EDMA_IRQ_DEFINE, (), n) \ static void dma_imx_config_func_##n(const struct device *dev) \ { \ ARG_UNUSED(dev); \ \ LISTIFY(NUM_IRQS_WITHOUT_ERROR_IRQ(n), \ DMA_MCUX_EDMA_IRQ_CONFIG, (;), n) \ \ IF_ENABLED(UTIL_NOT(DT_INST_NODE_HAS_PROP(n, no_error_irq)), \ (IRQ_CONFIG(n, NUM_IRQS_WITHOUT_ERROR_IRQ(n), \ dma_mcux_edma_error_irq_handler))) \ \ LOG_DBG("install irq done"); \ } #if DMA_MCUX_HAS_CHANNEL_GAP #define DMA_MCUX_EDMA_CHANNEL_GAP(n) \ .channel_gap = DT_INST_PROP_OR(n, channel_gap, \ {[0 ... 1] = DT_INST_PROP(n, dma_channels)}), #else #define DMA_MCUX_EDMA_CHANNEL_GAP(n) #endif #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT #define DMA_MCUX_EDMA_MUX(idx, n) \ (DMAMUX_Type *)DT_INST_REG_ADDR_BY_IDX(n, UTIL_INC(idx)) #define DMAMUX_BASE_INIT_DEFINE(n) \ static DMAMUX_Type *dmamux_base_##n[] = { \ LISTIFY(UTIL_DEC(DT_NUM_REGS(DT_DRV_INST(n))), \ DMA_MCUX_EDMA_MUX, (,), n) \ }; #define DMAMUX_BASE_INIT(n) .dmamux_base = &dmamux_base_##n[0], #define CHANNELS_PER_MUX(n) .channels_per_mux = DT_INST_PROP(n, dma_channels) / \ ARRAY_SIZE(dmamux_base_##n), #else #define DMAMUX_BASE_INIT_DEFINE(n) #define DMAMUX_BASE_INIT(n) #define CHANNELS_PER_MUX(n) #endif /* * define the dma */ #define DMA_INIT(n) \ DMAMUX_BASE_INIT_DEFINE(n) \ static void dma_imx_config_func_##n(const struct device *dev); \ static const struct dma_mcux_edma_config dma_config_##n = { \ .base = (DMA_Type *)DT_INST_REG_ADDR(n), \ DMAMUX_BASE_INIT(n) \ .dma_channels = DT_INST_PROP(n, dma_channels), \ CHANNELS_PER_MUX(n) \ .irq_config_func = dma_imx_config_func_##n, \ .dmamux_reg_offset = DT_INST_PROP(n, dmamux_reg_offset), \ DMA_MCUX_EDMA_CHANNEL_GAP(n) \ }; \ \ struct dma_mcux_edma_data dma_data_##n; \ \ DEVICE_DT_INST_DEFINE(n, \ &dma_mcux_edma_init, NULL, \ &dma_data_##n, &dma_config_##n, \ PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \ &dma_mcux_edma_api); \ \ DMA_MCUX_EDMA_CONFIG_FUNC(n); DT_INST_FOREACH_STATUS_OKAY(DMA_INIT) ```
/content/code_sandbox/drivers/dma/dma_mcux_edma.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,487
```c /* * */ #define DT_DRV_COMPAT microchip_xec_dmac #include <soc.h> #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h> #include <zephyr/dt-bindings/interrupt-controller/mchp-xec-ecia.h> #include <zephyr/pm/device.h> #include <zephyr/sys/util_macro.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dma_mchp_xec, CONFIG_DMA_LOG_LEVEL); #define XEC_DMA_DEBUG 1 #ifdef XEC_DMA_DEBUG #include <string.h> #endif #define XEC_DMA_ABORT_WAIT_LOOPS 32 #define XEC_DMA_MAIN_REGS_SIZE 0x40 #define XEC_DMA_CHAN_REGS_SIZE 0x40 #define XEC_DMA_CHAN_REGS_ADDR(base, channel) \ (((uintptr_t)(base) + (XEC_DMA_MAIN_REGS_SIZE)) + \ ((uintptr_t)(channel) * XEC_DMA_CHAN_REGS_SIZE)) /* main control */ #define XEC_DMA_MAIN_CTRL_REG_MSK 0x3u #define XEC_DMA_MAIN_CTRL_EN_POS 0 #define XEC_DMA_MAIN_CTRL_SRST_POS 1 /* channel activate register */ #define XEC_DMA_CHAN_ACTV_EN_POS 0 /* channel control register */ #define XEC_DMA_CHAN_CTRL_REG_MSK 0x037fff27u #define XEC_DMA_CHAN_CTRL_HWFL_RUN_POS 0 #define XEC_DMA_CHAN_CTRL_REQ_POS 1 #define XEC_DMA_CHAN_CTRL_DONE_POS 2 #define XEC_DMA_CHAN_CTRL_BUSY_POS 5 #define XEC_DMA_CHAN_CTRL_M2D_POS 8 #define XEC_DMA_CHAN_CTRL_HWFL_DEV_POS 9 #define XEC_DMA_CHAN_CTRL_HWFL_DEV_MSK 0xfe00u #define XEC_DMA_CHAN_CTRL_HWFL_DEV_MSK0 0x7fu #define XEC_DMA_CHAN_CTRL_INCR_MEM_POS 16 #define XEC_DMA_CHAN_CTRL_INCR_DEV_POS 17 #define XEC_DMA_CHAN_CTRL_LOCK_ARB_POS 18 #define XEC_DMA_CHAN_CTRL_DIS_HWFL_POS 19 #define XEC_DMA_CHAN_CTRL_XFR_UNIT_POS 20 #define XEC_DMA_CHAN_CTRL_XFR_UNIT_MSK 0x700000u #define XEC_DMA_CHAN_CTRL_XFR_UNIT_MSK0 0x7u #define XEC_DMA_CHAN_CTRL_SWFL_GO_POS 24 #define XEC_DMA_CHAN_CTRL_ABORT_POS 25 /* channel interrupt status and enable registers */ #define XEC_DMA_CHAN_IES_REG_MSK 0xfu #define XEC_DMA_CHAN_IES_BERR_POS 0 #define XEC_DMA_CHAN_IES_OVFL_ERR_POS 1 #define XEC_DMA_CHAN_IES_DONE_POS 2 #define XEC_DMA_CHAN_IES_DEV_TERM_POS 3 /* channel fsm (RO) */ #define XEC_DMA_CHAN_FSM_REG_MSK 0xffffu #define XEC_DMA_CHAN_FSM_ARB_STATE_POS 0 #define XEC_DMA_CHAN_FSM_ARB_STATE_MSK 0xffu #define XEC_DMA_CHAN_FSM_CTRL_STATE_POS 8 #define XEC_DMA_CHAN_FSM_CTRL_STATE_MSK 0xff00u #define XEC_DMA_CHAN_FSM_CTRL_STATE_IDLE 0 #define XEC_DMA_CHAN_FSM_CTRL_STATE_ARB_REQ 0x100u #define XEC_DMA_CHAN_FSM_CTRL_STATE_RD_ACT 0x200u #define XEC_DMA_CHAN_FSM_CTRL_STATE_WR_ACT 0x300u #define XEC_DMA_CHAN_FSM_CTRL_STATE_WAIT_DONE 0x400u #define XEC_DMA_HWFL_DEV_VAL(d) \ (((uint32_t)(d) & XEC_DMA_CHAN_CTRL_HWFL_DEV_MSK0) << XEC_DMA_CHAN_CTRL_HWFL_DEV_POS) #define XEC_DMA_CHAN_CTRL_UNIT_VAL(u) \ (((uint32_t)(u) & XEC_DMA_CHAN_CTRL_XFR_UNIT_MSK0) << XEC_DMA_CHAN_CTRL_XFR_UNIT_POS) struct dma_xec_chan_regs { volatile uint32_t actv; volatile uint32_t mem_addr; volatile uint32_t mem_addr_end; volatile uint32_t dev_addr; volatile uint32_t control; volatile uint32_t istatus; volatile uint32_t ienable; volatile uint32_t fsm; uint32_t rsvd_20_3f[8]; }; struct dma_xec_regs { volatile uint32_t mctrl; volatile uint32_t mpkt; uint32_t rsvd_08_3f[14]; }; struct dma_xec_irq_info { uint8_t gid; /* GIRQ id [8, 26] */ uint8_t gpos; /* bit position in GIRQ [0, 31] */ uint8_t anid; /* aggregated external NVIC input */ uint8_t dnid; /* direct NVIC input */ }; struct dma_xec_config { struct dma_xec_regs *regs; uint8_t dma_channels; uint8_t dma_requests; uint8_t pcr_idx; uint8_t pcr_pos; int irq_info_size; const struct dma_xec_irq_info *irq_info_list; void (*irq_connect)(void); }; struct dma_xec_channel { uint32_t control; uint32_t mstart; uint32_t mend; uint32_t dstart; uint32_t isr_hw_status; uint32_t block_count; uint8_t unit_size; uint8_t dir; uint8_t flags; uint8_t rsvd[1]; struct dma_block_config *head; struct dma_block_config *curr; dma_callback_t cb; void *user_data; uint32_t total_req_xfr_len; uint32_t total_curr_xfr_len; }; #define DMA_XEC_CHAN_FLAGS_CB_EOB_POS 0 #define DMA_XEC_CHAN_FLAGS_CB_ERR_DIS_POS 1 struct dma_xec_data { struct dma_context ctx; struct dma_xec_channel *channels; }; #ifdef XEC_DMA_DEBUG static void xec_dma_debug_clean(void); #endif static inline struct dma_xec_chan_regs *xec_chan_regs(struct dma_xec_regs *regs, uint32_t chan) { uint8_t *pregs = (uint8_t *)regs + XEC_DMA_MAIN_REGS_SIZE; pregs += (chan * (XEC_DMA_CHAN_REGS_SIZE)); return (struct dma_xec_chan_regs *)pregs; } static inline struct dma_xec_irq_info const *xec_chan_irq_info(const struct dma_xec_config *devcfg, uint32_t channel) { return &devcfg->irq_info_list[channel]; } static int is_dma_data_size_valid(uint32_t datasz) { if ((datasz == 1U) || (datasz == 2U) || (datasz == 4U)) { return 1; } return 0; } /* HW requires if unit size is 2 or 4 bytes the source/destination addresses * to be aligned >= 2 or 4 bytes. */ static int is_data_aligned(uint32_t src, uint32_t dest, uint32_t unitsz) { if (unitsz == 1) { return 1; } if ((src | dest) & (unitsz - 1U)) { return 0; } return 1; } static void xec_dma_chan_clr(struct dma_xec_chan_regs * const chregs, const struct dma_xec_irq_info *info) { chregs->actv = 0; chregs->control = 0; chregs->mem_addr = 0; chregs->mem_addr_end = 0; chregs->dev_addr = 0; chregs->control = 0; chregs->ienable = 0; chregs->istatus = 0xffu; mchp_xec_ecia_girq_src_clr(info->gid, info->gpos); } static int is_dma_config_valid(const struct device *dev, struct dma_config *config) { const struct dma_xec_config * const devcfg = dev->config; if (config->dma_slot >= (uint32_t)devcfg->dma_requests) { LOG_ERR("XEC DMA config dma slot > exceeds number of request lines"); return 0; } if (config->source_data_size != config->dest_data_size) { LOG_ERR("XEC DMA requires source and dest data size identical"); return 0; } if (!((config->channel_direction == MEMORY_TO_MEMORY) || (config->channel_direction == MEMORY_TO_PERIPHERAL) || (config->channel_direction == PERIPHERAL_TO_MEMORY))) { LOG_ERR("XEC DMA only support M2M, M2P, P2M"); return 0; } if (!is_dma_data_size_valid(config->source_data_size)) { LOG_ERR("XEC DMA requires xfr unit size of 1, 2 or 4 bytes"); return 0; } if (config->block_count != 1) { LOG_ERR("XEC DMA block count != 1"); return 0; } return 1; } static int check_blocks(struct dma_xec_channel *chdata, struct dma_block_config *block, uint32_t block_count, uint32_t unit_size) { if (!block || !chdata) { LOG_ERR("bad pointer"); return -EINVAL; } chdata->total_req_xfr_len = 0; for (uint32_t i = 0; i < block_count; i++) { if ((block->source_addr_adj == DMA_ADDR_ADJ_DECREMENT) || (block->dest_addr_adj == DMA_ADDR_ADJ_DECREMENT)) { LOG_ERR("XEC DMA HW does not support address decrement. Block index %u", i); return -EINVAL; } if (!is_data_aligned(block->source_address, block->dest_address, unit_size)) { LOG_ERR("XEC DMA block at index %u violates source/dest unit size", i); return -EINVAL; } chdata->total_req_xfr_len += block->block_size; } return 0; } /* * struct dma_config flags * dma_slot - peripheral source/target ID. Not used for Mem2Mem * channel_direction - HW supports Mem2Mem, Mem2Periph, and Periph2Mem * complete_callback_en - if true invoke callback on completion (no error) * error_callback_dis - if true disable callback on error * source_handshake - 0=HW, 1=SW * dest_handshake - 0=HW, 1=SW * channel_priority - 4-bit field. HW implements round-robin only. * source_chaining_en - Chaining channel together * dest_chaining_en - HW does not support channel chaining. * linked_channel - HW does not support * cyclic - HW does not support cyclic buffer. Would have to emulate with SW. * source_data_size - unit size of source data. HW supports 1, 2, or 4 bytes * dest_data_size - unit size of dest data. HW requires same as source_data_size * source_burst_length - HW does not support * dest_burst_length - HW does not support * block_count - * user_data - * dma_callback - * head_block - pointer to struct dma_block_config * * struct dma_block_config * source_address - * source_gather_interval - N/A * dest_address - * dest_scatter_interval - N/A * dest_scatter_count - N/A * source_gather_count - N/A * block_size * config - flags * source_gather_en - N/A * dest_scatter_en - N/A * source_addr_adj - 0(increment), 1(decrement), 2(no change) * dest_addr_adj - 0(increment), 1(decrement), 2(no change) * source_reload_en - reload source address at end of block * dest_reload_en - reload destination address at end of block * fifo_mode_control - N/A * flow_control_mode - 0(source req service on data available) HW does this * 1(source req postposed until dest req happens) N/A * * * DMA channel implements memory start address, memory end address, * and peripheral address registers. No peripheral end address. * Transfer ends when memory start address increments and reaches * memory end address. * * Memory to Memory: copy from source_address to dest_address * chan direction = Mem2Dev. chan.control b[8]=1 * chan mem_addr = source_address * chan mem_addr_end = source_address + block_size * chan dev_addr = dest_address * * Memory to Peripheral: copy from source_address(memory) to dest_address(peripheral) * chan direction = Mem2Dev. chan.control b[8]=1 * chan mem_addr = source_address * chan mem_addr_end = chan mem_addr + block_size * chan dev_addr = dest_address * * Peripheral to Memory: * chan direction = Dev2Mem. chan.contronl b[8]=1 * chan mem_addr = dest_address * chan mem_addr_end = chan mem_addr + block_size * chan dev_addr = source_address */ static int dma_xec_configure(const struct device *dev, uint32_t channel, struct dma_config *config) { const struct dma_xec_config * const devcfg = dev->config; struct dma_xec_regs * const regs = devcfg->regs; struct dma_xec_data * const data = dev->data; uint32_t ctrl, mstart, mend, dstart, unit_size; int ret; if (!config || (channel >= (uint32_t)devcfg->dma_channels)) { return -EINVAL; } #ifdef XEC_DMA_DEBUG xec_dma_debug_clean(); #endif const struct dma_xec_irq_info *info = xec_chan_irq_info(devcfg, channel); struct dma_xec_chan_regs * const chregs = xec_chan_regs(regs, channel); struct dma_xec_channel *chdata = &data->channels[channel]; chdata->total_req_xfr_len = 0; chdata->total_curr_xfr_len = 0; xec_dma_chan_clr(chregs, info); if (!is_dma_config_valid(dev, config)) { return -EINVAL; } struct dma_block_config *block = config->head_block; ret = check_blocks(chdata, block, config->block_count, config->source_data_size); if (ret) { return ret; } unit_size = config->source_data_size; chdata->unit_size = unit_size; chdata->head = block; chdata->curr = block; chdata->block_count = config->block_count; chdata->dir = config->channel_direction; chdata->flags = 0; chdata->cb = config->dma_callback; chdata->user_data = config->user_data; /* invoke callback on completion of each block instead of all blocks ? */ if (config->complete_callback_en) { chdata->flags |= BIT(DMA_XEC_CHAN_FLAGS_CB_EOB_POS); } if (config->error_callback_dis) { /* disable callback on errors ? */ chdata->flags |= BIT(DMA_XEC_CHAN_FLAGS_CB_ERR_DIS_POS); } /* Use the control member of struct dma_xec_channel to * store control register value containing fields invariant * for all buffers: HW flow control device, direction, unit size, ... * derived from struct dma_config */ ctrl = XEC_DMA_CHAN_CTRL_UNIT_VAL(unit_size); if (config->channel_direction == MEMORY_TO_MEMORY) { ctrl |= BIT(XEC_DMA_CHAN_CTRL_DIS_HWFL_POS); } else { ctrl |= XEC_DMA_HWFL_DEV_VAL(config->dma_slot); } if (config->channel_direction == PERIPHERAL_TO_MEMORY) { mstart = block->dest_address; mend = block->dest_address + block->block_size; dstart = block->source_address; if (block->source_addr_adj == DMA_ADDR_ADJ_INCREMENT) { ctrl |= BIT(XEC_DMA_CHAN_CTRL_INCR_DEV_POS); } if (block->dest_addr_adj == DMA_ADDR_ADJ_INCREMENT) { ctrl |= BIT(XEC_DMA_CHAN_CTRL_INCR_MEM_POS); } } else { mstart = block->source_address; mend = block->source_address + block->block_size; dstart = block->dest_address; ctrl |= BIT(XEC_DMA_CHAN_CTRL_M2D_POS); if (block->source_addr_adj == DMA_ADDR_ADJ_INCREMENT) { ctrl |= BIT(XEC_DMA_CHAN_CTRL_INCR_MEM_POS); } if (block->dest_addr_adj == DMA_ADDR_ADJ_INCREMENT) { ctrl |= BIT(XEC_DMA_CHAN_CTRL_INCR_DEV_POS); } } chdata->control = ctrl; chdata->mstart = mstart; chdata->mend = mend; chdata->dstart = dstart; chregs->actv &= ~BIT(XEC_DMA_CHAN_ACTV_EN_POS); chregs->mem_addr = mstart; chregs->mem_addr_end = mend; chregs->dev_addr = dstart; chregs->control = ctrl; chregs->ienable = BIT(XEC_DMA_CHAN_IES_BERR_POS) | BIT(XEC_DMA_CHAN_IES_DONE_POS); chregs->actv |= BIT(XEC_DMA_CHAN_ACTV_EN_POS); return 0; } /* Update previously configured DMA channel with new data source address, * data destination address, and size in bytes. * src = source address for DMA transfer * dst = destination address for DMA transfer * size = size of DMA transfer. Assume this is in bytes. * We assume the caller will pass src, dst, and size that matches * the unit size from the previous configure call. */ static int dma_xec_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { const struct dma_xec_config * const devcfg = dev->config; struct dma_xec_data * const data = dev->data; struct dma_xec_regs * const regs = devcfg->regs; uint32_t ctrl; if (channel >= (uint32_t)devcfg->dma_channels) { return -EINVAL; } struct dma_xec_channel *chdata = &data->channels[channel]; struct dma_xec_chan_regs *chregs = xec_chan_regs(regs, channel); if (chregs->control & BIT(XEC_DMA_CHAN_CTRL_BUSY_POS)) { return -EBUSY; } ctrl = chregs->control & ~(BIT(XEC_DMA_CHAN_CTRL_HWFL_RUN_POS) | BIT(XEC_DMA_CHAN_CTRL_SWFL_GO_POS)); chregs->ienable = 0; chregs->control = 0; chregs->istatus = 0xffu; if (ctrl & BIT(XEC_DMA_CHAN_CTRL_M2D_POS)) { /* Memory to Device */ chdata->mstart = src; chdata->dstart = dst; } else { chdata->mstart = dst; chdata->dstart = src; } chdata->mend = chdata->mstart + size; chdata->total_req_xfr_len = size; chdata->total_curr_xfr_len = 0; chregs->mem_addr = chdata->mstart; chregs->mem_addr_end = chdata->mend; chregs->dev_addr = chdata->dstart; chregs->control = ctrl; return 0; } static int dma_xec_start(const struct device *dev, uint32_t channel) { const struct dma_xec_config * const devcfg = dev->config; struct dma_xec_regs * const regs = devcfg->regs; uint32_t chan_ctrl = 0U; if (channel >= (uint32_t)devcfg->dma_channels) { return -EINVAL; } struct dma_xec_chan_regs *chregs = xec_chan_regs(regs, channel); if (chregs->control & BIT(XEC_DMA_CHAN_CTRL_BUSY_POS)) { return -EBUSY; } chregs->ienable = 0u; chregs->istatus = 0xffu; chan_ctrl = chregs->control; if (chan_ctrl & BIT(XEC_DMA_CHAN_CTRL_DIS_HWFL_POS)) { chan_ctrl |= BIT(XEC_DMA_CHAN_CTRL_SWFL_GO_POS); } else { chan_ctrl |= BIT(XEC_DMA_CHAN_CTRL_HWFL_RUN_POS); } chregs->ienable = BIT(XEC_DMA_CHAN_IES_BERR_POS) | BIT(XEC_DMA_CHAN_IES_DONE_POS); chregs->control = chan_ctrl; chregs->actv |= BIT(XEC_DMA_CHAN_ACTV_EN_POS); return 0; } static int dma_xec_stop(const struct device *dev, uint32_t channel) { const struct dma_xec_config * const devcfg = dev->config; struct dma_xec_regs * const regs = devcfg->regs; int wait_loops = XEC_DMA_ABORT_WAIT_LOOPS; if (channel >= (uint32_t)devcfg->dma_channels) { return -EINVAL; } struct dma_xec_chan_regs *chregs = xec_chan_regs(regs, channel); chregs->ienable = 0; if (chregs->control & BIT(XEC_DMA_CHAN_CTRL_BUSY_POS)) { chregs->ienable = 0; chregs->control |= BIT(XEC_DMA_CHAN_CTRL_ABORT_POS); /* HW stops on next unit boundary (1, 2, or 4 bytes) */ do { if (!(chregs->control & BIT(XEC_DMA_CHAN_CTRL_BUSY_POS))) { break; } } while (wait_loops--); } chregs->mem_addr = chregs->mem_addr_end; chregs->fsm = 0; /* delay */ chregs->control = 0; chregs->istatus = 0xffu; chregs->actv = 0; return 0; } /* Get DMA transfer status. * HW supports: MEMORY_TO_MEMORY, MEMORY_TO_PERIPHERAL, or * PERIPHERAL_TO_MEMORY * current DMA runtime status structure * * busy - is current DMA transfer busy or idle * dir - DMA transfer direction * pending_length - data length pending to be transferred in bytes * or platform dependent. * We don't implement a circular buffer * free - free buffer space * write_position - write position in a circular dma buffer * read_position - read position in a circular dma buffer * */ static int dma_xec_get_status(const struct device *dev, uint32_t channel, struct dma_status *status) { const struct dma_xec_config * const devcfg = dev->config; struct dma_xec_data * const data = dev->data; struct dma_xec_regs * const regs = devcfg->regs; uint32_t chan_ctrl = 0U; if ((channel >= (uint32_t)devcfg->dma_channels) || (!status)) { LOG_ERR("unsupported channel"); return -EINVAL; } struct dma_xec_channel *chan_data = &data->channels[channel]; struct dma_xec_chan_regs *chregs = xec_chan_regs(regs, channel); chan_ctrl = chregs->control; if (chan_ctrl & BIT(XEC_DMA_CHAN_CTRL_BUSY_POS)) { status->busy = true; /* number of bytes remaining in channel */ status->pending_length = chan_data->total_req_xfr_len - (chregs->mem_addr_end - chregs->mem_addr); } else { status->pending_length = chan_data->total_req_xfr_len - chan_data->total_curr_xfr_len; status->busy = false; } if (chan_ctrl & BIT(XEC_DMA_CHAN_CTRL_DIS_HWFL_POS)) { status->dir = MEMORY_TO_MEMORY; } else if (chan_ctrl & BIT(XEC_DMA_CHAN_CTRL_M2D_POS)) { status->dir = MEMORY_TO_PERIPHERAL; } else { status->dir = PERIPHERAL_TO_MEMORY; } status->total_copied = chan_data->total_curr_xfr_len; return 0; } int xec_dma_get_attribute(const struct device *dev, uint32_t type, uint32_t *value) { if ((type == DMA_ATTR_MAX_BLOCK_COUNT) && value) { *value = 1; return 0; } return -EINVAL; } /* returns true if filter matched otherwise returns false */ static bool dma_xec_chan_filter(const struct device *dev, int ch, void *filter_param) { const struct dma_xec_config * const devcfg = dev->config; uint32_t filter = 0u; if (!filter_param && devcfg->dma_channels) { filter = GENMASK(devcfg->dma_channels-1u, 0); } else { filter = *((uint32_t *)filter_param); } return (filter & BIT(ch)); } /* API - HW does not stupport suspend/resume */ static const struct dma_driver_api dma_xec_api = { .config = dma_xec_configure, .reload = dma_xec_reload, .start = dma_xec_start, .stop = dma_xec_stop, .get_status = dma_xec_get_status, .chan_filter = dma_xec_chan_filter, .get_attribute = xec_dma_get_attribute, }; #ifdef CONFIG_PM_DEVICE /* TODO - DMA block has one PCR SLP_EN and one CLK_REQ. * If any channel is running the block's CLK_REQ is asserted. * CLK_REQ will not clear until all channels are done or disabled. * Clearing the DMA Main activate will kill DMA transactions resulting * possible data corruption and HW flow control device malfunctions. */ static int dmac_xec_pm_action(const struct device *dev, enum pm_device_action action) { const struct dma_xec_config * const devcfg = dev->config; struct dma_xec_regs * const regs = devcfg->regs; int ret = 0; switch (action) { case PM_DEVICE_ACTION_RESUME: regs->mctrl |= BIT(XEC_DMA_MAIN_CTRL_EN_POS); break; case PM_DEVICE_ACTION_SUSPEND: /* regs->mctrl &= ~BIT(XEC_DMA_MAIN_CTRL_EN_POS); */ break; default: ret = -ENOTSUP; } return ret; } #endif /* CONFIG_PM_DEVICE */ /* DMA channel interrupt handler called by ISR. * Callback flags in struct dma_config * completion_callback_en * 0 = invoke at completion of all blocks * 1 = invoke at completin of each block * error_callback_dis * 0 = invoke on all errors * 1 = disabled, do not invoke on errors */ /* DEBUG */ #ifdef XEC_DMA_DEBUG static volatile uint8_t channel_isr_idx[16]; static volatile uint8_t channel_isr_sts[16][16]; static volatile uint32_t channel_isr_ctrl[16][16]; static void xec_dma_debug_clean(void) { memset((void *)channel_isr_idx, 0, sizeof(channel_isr_idx)); memset((void *)channel_isr_sts, 0, sizeof(channel_isr_sts)); memset((void *)channel_isr_ctrl, 0, sizeof(channel_isr_ctrl)); } #endif static void dma_xec_irq_handler(const struct device *dev, uint32_t channel) { const struct dma_xec_config * const devcfg = dev->config; const struct dma_xec_irq_info *info = devcfg->irq_info_list; struct dma_xec_data * const data = dev->data; struct dma_xec_channel *chan_data = &data->channels[channel]; struct dma_xec_chan_regs * const regs = xec_chan_regs(devcfg->regs, channel); uint32_t sts = regs->istatus; int cb_status = 0; #ifdef XEC_DMA_DEBUG uint8_t idx = channel_isr_idx[channel]; if (idx < 16) { channel_isr_sts[channel][idx] = sts; channel_isr_ctrl[channel][idx] = regs->control; channel_isr_idx[channel] = ++idx; } #endif LOG_DBG("maddr=0x%08x mend=0x%08x daddr=0x%08x ctrl=0x%08x sts=0x%02x", regs->mem_addr, regs->mem_addr_end, regs->dev_addr, regs->control, sts); regs->ienable = 0u; regs->istatus = 0xffu; mchp_xec_ecia_girq_src_clr(info[channel].gid, info[channel].gpos); chan_data->isr_hw_status = sts; chan_data->total_curr_xfr_len += (regs->mem_addr - chan_data->mstart); if (sts & BIT(XEC_DMA_CHAN_IES_BERR_POS)) {/* Bus Error? */ if (!(chan_data->flags & BIT(DMA_XEC_CHAN_FLAGS_CB_ERR_DIS_POS))) { cb_status = -EIO; } } if (chan_data->cb) { chan_data->cb(dev, chan_data->user_data, channel, cb_status); } } static int dma_xec_init(const struct device *dev) { const struct dma_xec_config * const devcfg = dev->config; struct dma_xec_regs * const regs = devcfg->regs; LOG_DBG("driver init"); z_mchp_xec_pcr_periph_sleep(devcfg->pcr_idx, devcfg->pcr_pos, 0); /* soft reset, self-clearing */ regs->mctrl = BIT(XEC_DMA_MAIN_CTRL_SRST_POS); regs->mpkt = 0u; /* I/O delay, write to read-only register */ regs->mctrl = BIT(XEC_DMA_MAIN_CTRL_EN_POS); devcfg->irq_connect(); return 0; } /* n = node-id, p = property, i = index */ #define DMA_XEC_GID(n, p, i) MCHP_XEC_ECIA_GIRQ(DT_PROP_BY_IDX(n, p, i)) #define DMA_XEC_GPOS(n, p, i) MCHP_XEC_ECIA_GIRQ_POS(DT_PROP_BY_IDX(n, p, i)) #define DMA_XEC_GIRQ_INFO(n, p, i) \ { \ .gid = DMA_XEC_GID(n, p, i), \ .gpos = DMA_XEC_GPOS(n, p, i), \ .anid = MCHP_XEC_ECIA_NVIC_AGGR(DT_PROP_BY_IDX(n, p, i)), \ .dnid = MCHP_XEC_ECIA_NVIC_DIRECT(DT_PROP_BY_IDX(n, p, i)), \ }, /* n = node-id, p = property, i = index(channel?) */ #define DMA_XEC_IRQ_DECLARE(node_id, p, i) \ static void dma_xec_chan_##i##_isr(const struct device *dev) \ { \ dma_xec_irq_handler(dev, i); \ } \ #define DMA_XEC_IRQ_CONNECT_SUB(node_id, p, i) \ IRQ_CONNECT(DT_IRQ_BY_IDX(node_id, i, irq), \ DT_IRQ_BY_IDX(node_id, i, priority), \ dma_xec_chan_##i##_isr, \ DEVICE_DT_GET(node_id), 0); \ irq_enable(DT_IRQ_BY_IDX(node_id, i, irq)); \ mchp_xec_ecia_enable(DMA_XEC_GID(node_id, p, i), DMA_XEC_GPOS(node_id, p, i)); /* i = instance number of DMA controller */ #define DMA_XEC_IRQ_CONNECT(inst) \ DT_INST_FOREACH_PROP_ELEM(inst, girqs, DMA_XEC_IRQ_DECLARE) \ void dma_xec_irq_connect##inst(void) \ { \ DT_INST_FOREACH_PROP_ELEM(inst, girqs, DMA_XEC_IRQ_CONNECT_SUB) \ } #define DMA_XEC_DEVICE(i) \ BUILD_ASSERT(DT_INST_PROP(i, dma_channels) <= 16, "XEC DMA dma-channels > 16"); \ BUILD_ASSERT(DT_INST_PROP(i, dma_requests) <= 16, "XEC DMA dma-requests > 16"); \ \ static struct dma_xec_channel \ dma_xec_ctrl##i##_chans[DT_INST_PROP(i, dma_channels)]; \ ATOMIC_DEFINE(dma_xec_atomic##i, DT_INST_PROP(i, dma_channels)); \ \ static struct dma_xec_data dma_xec_data##i = { \ .ctx.magic = DMA_MAGIC, \ .ctx.dma_channels = DT_INST_PROP(i, dma_channels), \ .ctx.atomic = dma_xec_atomic##i, \ .channels = dma_xec_ctrl##i##_chans, \ }; \ \ DMA_XEC_IRQ_CONNECT(i) \ \ static const struct dma_xec_irq_info dma_xec_irqi##i[] = { \ DT_INST_FOREACH_PROP_ELEM(i, girqs, DMA_XEC_GIRQ_INFO) \ }; \ static const struct dma_xec_config dma_xec_cfg##i = { \ .regs = (struct dma_xec_regs *)DT_INST_REG_ADDR(i), \ .dma_channels = DT_INST_PROP(i, dma_channels), \ .dma_requests = DT_INST_PROP(i, dma_requests), \ .pcr_idx = DT_INST_PROP_BY_IDX(i, pcrs, 0), \ .pcr_pos = DT_INST_PROP_BY_IDX(i, pcrs, 1), \ .irq_info_size = ARRAY_SIZE(dma_xec_irqi##i), \ .irq_info_list = dma_xec_irqi##i, \ .irq_connect = dma_xec_irq_connect##i, \ }; \ PM_DEVICE_DT_DEFINE(i, dmac_xec_pm_action); \ DEVICE_DT_INST_DEFINE(i, &dma_xec_init, \ PM_DEVICE_DT_GET(i), \ &dma_xec_data##i, &dma_xec_cfg##i, \ PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \ &dma_xec_api); DT_INST_FOREACH_STATUS_OKAY(DMA_XEC_DEVICE) ```
/content/code_sandbox/drivers/dma/dma_mchp_xec.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,511
```c /* * */ #include <zephyr/device.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/reset.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <zephyr/dt-bindings/dma/rpi_pico_dma.h> #include <hardware/dma.h> #define DT_DRV_COMPAT raspberrypi_pico_dma #define DMA_INT_ERROR_FLAGS \ (DMA_CH0_CTRL_TRIG_AHB_ERROR_BITS | DMA_CH0_CTRL_TRIG_READ_ERROR_BITS | \ DMA_CH0_CTRL_TRIG_WRITE_ERROR_BITS) LOG_MODULE_REGISTER(dma_rpi_pico, CONFIG_DMA_LOG_LEVEL); struct dma_rpi_pico_config { uint32_t reg; uint32_t channels; struct reset_dt_spec reset; void (*irq_configure)(void); uint32_t *irq0_channels; size_t irq0_channels_size; }; struct dma_rpi_pico_channel { dma_callback_t callback; void *user_data; uint32_t direction; dma_channel_config config; void *source_address; void *dest_address; size_t block_size; }; struct dma_rpi_pico_data { struct dma_context ctx; struct dma_rpi_pico_channel *channels; }; /* * Register access functions */ static inline void rpi_pico_dma_channel_clear_error_flags(const struct device *dev, uint32_t channel) { const struct dma_rpi_pico_config *cfg = dev->config; ((dma_hw_t *)cfg->reg)->ch[channel].al1_ctrl &= ~DMA_INT_ERROR_FLAGS; } static inline uint32_t rpi_pico_dma_channel_get_error_flags(const struct device *dev, uint32_t channel) { const struct dma_rpi_pico_config *cfg = dev->config; return ((dma_hw_t *)cfg->reg)->ch[channel].al1_ctrl & DMA_INT_ERROR_FLAGS; } static inline void rpi_pico_dma_channel_abort(const struct device *dev, uint32_t channel) { const struct dma_rpi_pico_config *cfg = dev->config; ((dma_hw_t *)cfg->reg)->abort = BIT(channel); } /* * Utility functions */ static inline uint32_t dma_rpi_pico_transfer_size(uint32_t width) { switch (width) { case 4: return DMA_SIZE_32; case 2: return DMA_SIZE_16; default: return DMA_SIZE_8; } } static inline uint32_t dma_rpi_pico_channel_irq(const struct device *dev, uint32_t channel) { const struct dma_rpi_pico_config *cfg = dev->config; for (size_t i = 0; i < cfg->irq0_channels_size; i++) { if (cfg->irq0_channels[i] == channel) { return 0; } } return 1; } /* * API functions */ static int dma_rpi_pico_config(const struct device *dev, uint32_t channel, struct dma_config *dma_cfg) { const struct dma_rpi_pico_config *cfg = dev->config; struct dma_rpi_pico_data *data = dev->data; if (channel >= cfg->channels) { LOG_ERR("channel must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, channel); return -EINVAL; } if (dma_cfg->block_count != 1) { LOG_ERR("chained block transfer not supported."); return -ENOTSUP; } if (dma_cfg->channel_priority > 3) { LOG_ERR("channel_priority must be < 4 (%" PRIu32 ")", dma_cfg->channel_priority); return -EINVAL; } if (dma_cfg->head_block->source_addr_adj == DMA_ADDR_ADJ_DECREMENT) { LOG_ERR("source_addr_adj not supported DMA_ADDR_ADJ_DECREMENT"); return -ENOTSUP; } if (dma_cfg->head_block->dest_addr_adj == DMA_ADDR_ADJ_DECREMENT) { LOG_ERR("dest_addr_adj not supported DMA_ADDR_ADJ_DECREMENT"); return -ENOTSUP; } if (dma_cfg->head_block->source_addr_adj != DMA_ADDR_ADJ_INCREMENT && dma_cfg->head_block->source_addr_adj != DMA_ADDR_ADJ_NO_CHANGE) { LOG_ERR("invalid source_addr_adj %" PRIu16, dma_cfg->head_block->source_addr_adj); return -ENOTSUP; } if (dma_cfg->head_block->dest_addr_adj != DMA_ADDR_ADJ_INCREMENT && dma_cfg->head_block->dest_addr_adj != DMA_ADDR_ADJ_NO_CHANGE) { LOG_ERR("invalid dest_addr_adj %" PRIu16, dma_cfg->head_block->dest_addr_adj); return -ENOTSUP; } if (dma_cfg->source_data_size != 1 && dma_cfg->source_data_size != 2 && dma_cfg->source_data_size != 4) { LOG_ERR("source_data_size must be 1, 2, or 4 (%" PRIu32 ")", dma_cfg->source_data_size); return -EINVAL; } if (dma_cfg->source_data_size != dma_cfg->dest_data_size) { return -EINVAL; } if (dma_cfg->dest_data_size != 1 && dma_cfg->dest_data_size != 2 && dma_cfg->dest_data_size != 4) { LOG_ERR("dest_data_size must be 1, 2, or 4 (%" PRIu32 ")", dma_cfg->dest_data_size); return -EINVAL; } if (dma_cfg->channel_direction > PERIPHERAL_TO_MEMORY) { LOG_ERR("channel_direction must be MEMORY_TO_MEMORY, " "MEMORY_TO_PERIPHERAL or PERIPHERAL_TO_MEMORY (%" PRIu32 ")", dma_cfg->channel_direction); return -ENOTSUP; } data->channels[channel].config = dma_channel_get_default_config(channel); data->channels[channel].source_address = (void *)dma_cfg->head_block->source_address; data->channels[channel].dest_address = (void *)dma_cfg->head_block->dest_address; data->channels[channel].block_size = dma_cfg->head_block->block_size; channel_config_set_read_increment(&data->channels[channel].config, dma_cfg->head_block->source_addr_adj == DMA_ADDR_ADJ_INCREMENT); channel_config_set_write_increment(&data->channels[channel].config, dma_cfg->head_block->dest_addr_adj == DMA_ADDR_ADJ_INCREMENT); channel_config_set_transfer_data_size( &data->channels[channel].config, dma_rpi_pico_transfer_size(dma_cfg->source_data_size)); channel_config_set_dreq(&data->channels[channel].config, RPI_PICO_DMA_SLOT_TO_DREQ(dma_cfg->dma_slot)); channel_config_set_high_priority(&data->channels[channel].config, !!(dma_cfg->channel_priority)); data->channels[channel].callback = dma_cfg->dma_callback; data->channels[channel].user_data = dma_cfg->user_data; data->channels[channel].direction = dma_cfg->channel_direction; return 0; } static int dma_rpi_pico_reload(const struct device *dev, uint32_t ch, uint32_t src, uint32_t dst, size_t size) { const struct dma_rpi_pico_config *cfg = dev->config; struct dma_rpi_pico_data *data = dev->data; if (ch >= cfg->channels) { LOG_ERR("reload channel must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, ch); return -EINVAL; } if (dma_channel_is_busy(ch)) { return -EBUSY; } data->channels[ch].source_address = (void *)src; data->channels[ch].dest_address = (void *)dst; data->channels[ch].block_size = size; dma_channel_configure(ch, &data->channels[ch].config, data->channels[ch].dest_address, data->channels[ch].source_address, data->channels[ch].block_size, true); return 0; } static int dma_rpi_pico_start(const struct device *dev, uint32_t ch) { const struct dma_rpi_pico_config *cfg = dev->config; struct dma_rpi_pico_data *data = dev->data; if (ch >= cfg->channels) { LOG_ERR("start channel must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, ch); return -EINVAL; } dma_irqn_acknowledge_channel(dma_rpi_pico_channel_irq(dev, ch), ch); dma_irqn_set_channel_enabled(dma_rpi_pico_channel_irq(dev, ch), ch, true); dma_channel_configure(ch, &data->channels[ch].config, data->channels[ch].dest_address, data->channels[ch].source_address, data->channels[ch].block_size, true); return 0; } static int dma_rpi_pico_stop(const struct device *dev, uint32_t ch) { const struct dma_rpi_pico_config *cfg = dev->config; if (ch >= cfg->channels) { LOG_ERR("stop channel must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, ch); return -EINVAL; } dma_irqn_set_channel_enabled(dma_rpi_pico_channel_irq(dev, ch), ch, false); rpi_pico_dma_channel_clear_error_flags(dev, ch); /* * Considering the possibility of being called in an interrupt context, * it does not wait until the abort bit becomes clear. * Ensure the busy status is canceled with dma_get_status * before the next transfer starts. */ rpi_pico_dma_channel_abort(dev, ch); return 0; } static int dma_rpi_pico_get_status(const struct device *dev, uint32_t ch, struct dma_status *stat) { const struct dma_rpi_pico_config *cfg = dev->config; struct dma_rpi_pico_data *data = dev->data; if (ch >= cfg->channels) { LOG_ERR("channel must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, ch); return -EINVAL; } stat->pending_length = 0; stat->dir = data->channels[ch].direction; stat->busy = dma_channel_is_busy(ch); return 0; } static bool dma_rpi_pico_api_chan_filter(const struct device *dev, int ch, void *filter_param) { uint32_t filter; if (!filter_param) { LOG_ERR("filter_param must not be NULL"); return false; } filter = *((uint32_t *)filter_param); return (filter & BIT(ch)); } static int dma_rpi_pico_init(const struct device *dev) { const struct dma_rpi_pico_config *cfg = dev->config; (void)reset_line_toggle_dt(&cfg->reset); cfg->irq_configure(); return 0; } static void dma_rpi_pico_isr(const struct device *dev) { const struct dma_rpi_pico_config *cfg = dev->config; struct dma_rpi_pico_data *data = dev->data; int err = 0; for (uint32_t i = 0; i < cfg->channels; i++) { if (!dma_irqn_get_channel_status(dma_rpi_pico_channel_irq(dev, i), i)) { continue; } if (rpi_pico_dma_channel_get_error_flags(dev, i)) { err = -EIO; } dma_irqn_acknowledge_channel(dma_rpi_pico_channel_irq(dev, i), i); dma_irqn_set_channel_enabled(dma_rpi_pico_channel_irq(dev, i), i, false); rpi_pico_dma_channel_clear_error_flags(dev, i); if (data->channels[i].callback) { data->channels[i].callback(dev, data->channels[i].user_data, i, err); } } } static const struct dma_driver_api dma_rpi_pico_driver_api = { .config = dma_rpi_pico_config, .reload = dma_rpi_pico_reload, .start = dma_rpi_pico_start, .stop = dma_rpi_pico_stop, .get_status = dma_rpi_pico_get_status, .chan_filter = dma_rpi_pico_api_chan_filter, }; #define IRQ_CONFIGURE(n, inst) \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, n, irq), DT_INST_IRQ_BY_IDX(inst, n, priority), \ dma_rpi_pico_isr, DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(inst, n, irq)); #define CONFIGURE_ALL_IRQS(inst, n) LISTIFY(n, IRQ_CONFIGURE, (), inst) #define RPI_PICO_DMA_INIT(inst) \ static void dma_rpi_pico##inst##_irq_configure(void) \ { \ CONFIGURE_ALL_IRQS(inst, DT_NUM_IRQS(DT_DRV_INST(inst))); \ } \ static uint32_t dma_rpi_pico##inst##_irq0_channels[] = \ DT_INST_PROP_OR(inst, irq0_channels, {0}); \ static const struct dma_rpi_pico_config dma_rpi_pico##inst##_config = { \ .reg = DT_INST_REG_ADDR(inst), \ .channels = DT_INST_PROP(inst, dma_channels), \ .reset = RESET_DT_SPEC_INST_GET(inst), \ .irq_configure = dma_rpi_pico##inst##_irq_configure, \ .irq0_channels = dma_rpi_pico##inst##_irq0_channels, \ .irq0_channels_size = ARRAY_SIZE(dma_rpi_pico##inst##_irq0_channels), \ }; \ static struct dma_rpi_pico_channel \ dma_rpi_pico##inst##_channels[DT_INST_PROP(inst, dma_channels)]; \ ATOMIC_DEFINE(dma_rpi_pico_atomic##inst, DT_INST_PROP(inst, dma_channels)); \ static struct dma_rpi_pico_data dma_rpi_pico##inst##_data = { \ .ctx = \ { \ .magic = DMA_MAGIC, \ .atomic = dma_rpi_pico_atomic##inst, \ .dma_channels = DT_INST_PROP(inst, dma_channels), \ }, \ .channels = dma_rpi_pico##inst##_channels, \ }; \ \ DEVICE_DT_INST_DEFINE(inst, &dma_rpi_pico_init, NULL, &dma_rpi_pico##inst##_data, \ &dma_rpi_pico##inst##_config, POST_KERNEL, CONFIG_DMA_INIT_PRIORITY, \ &dma_rpi_pico_driver_api); DT_INST_FOREACH_STATUS_OKAY(RPI_PICO_DMA_INIT) ```
/content/code_sandbox/drivers/dma/dma_rpi_pico.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,203
```c /* * */ #define DT_DRV_COMPAT atmel_sam0_dmac #include <zephyr/device.h> #include <soc.h> #include <zephyr/drivers/dma.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(dma_sam0, CONFIG_DMA_LOG_LEVEL); #define DMA_REGS ((Dmac *)DT_INST_REG_ADDR(0)) struct dma_sam0_channel { dma_callback_t cb; void *user_data; }; struct dma_sam0_data { __aligned(16) DmacDescriptor descriptors[DMAC_CH_NUM]; __aligned(16) DmacDescriptor descriptors_wb[DMAC_CH_NUM]; struct dma_sam0_channel channels[DMAC_CH_NUM]; }; /* Handles DMA interrupts and dispatches to the individual channel */ static void dma_sam0_isr(const struct device *dev) { struct dma_sam0_data *data = dev->data; struct dma_sam0_channel *chdata; uint16_t pend = DMA_REGS->INTPEND.reg; uint32_t channel; /* Acknowledge all interrupts for the channel in pend */ DMA_REGS->INTPEND.reg = pend; channel = (pend & DMAC_INTPEND_ID_Msk) >> DMAC_INTPEND_ID_Pos; chdata = &data->channels[channel]; if (pend & DMAC_INTPEND_TERR) { if (chdata->cb) { chdata->cb(dev, chdata->user_data, channel, -DMAC_INTPEND_TERR); } } else if (pend & DMAC_INTPEND_TCMPL) { if (chdata->cb) { chdata->cb(dev, chdata->user_data, channel, 0); } } /* * If more than one channel is pending, we'll just immediately * interrupt again and handle it through a different INTPEND value. */ } /* Configure a channel */ static int dma_sam0_config(const struct device *dev, uint32_t channel, struct dma_config *config) { struct dma_sam0_data *data = dev->data; DmacDescriptor *desc = &data->descriptors[channel]; struct dma_block_config *block = config->head_block; struct dma_sam0_channel *channel_control; DMAC_BTCTRL_Type btctrl = { .reg = 0 }; unsigned int key; if (channel >= DMAC_CH_NUM) { LOG_ERR("Unsupported channel"); return -EINVAL; } if (config->block_count > 1) { LOG_ERR("Chained transfers not supported"); /* TODO: add support for chained transfers. */ return -ENOTSUP; } if (config->dma_slot >= DMAC_TRIG_NUM) { LOG_ERR("Invalid trigger number"); return -EINVAL; } /* Lock and page in the channel configuration */ key = irq_lock(); /* * The "bigger" DMAC on some SAM0 chips (e.g. SAMD5x) has * independently accessible registers for each channel, while * the other ones require an indirect channel selection before * accessing shared registers. The simplest way to detect the * difference is the presence of the DMAC_CHID_ID macro from the * ASF HAL (i.e. it's only defined if indirect access is required). */ #ifdef DMAC_CHID_ID /* Select the channel for configuration */ DMA_REGS->CHID.reg = DMAC_CHID_ID(channel); DMA_REGS->CHCTRLA.reg = 0; /* Connect the peripheral trigger */ if (config->channel_direction == MEMORY_TO_MEMORY) { /* * A single software trigger will start the * transfer */ DMA_REGS->CHCTRLB.reg = DMAC_CHCTRLB_TRIGACT_TRANSACTION | DMAC_CHCTRLB_TRIGSRC(config->dma_slot); } else { /* One peripheral trigger per beat */ DMA_REGS->CHCTRLB.reg = DMAC_CHCTRLB_TRIGACT_BEAT | DMAC_CHCTRLB_TRIGSRC(config->dma_slot); } /* Set the priority */ if (config->channel_priority >= DMAC_LVL_NUM) { LOG_ERR("Invalid priority"); goto inval; } DMA_REGS->CHCTRLB.bit.LVL = config->channel_priority; /* Enable the interrupts */ DMA_REGS->CHINTENSET.reg = DMAC_CHINTENSET_TCMPL; if (!config->error_callback_dis) { DMA_REGS->CHINTENSET.reg = DMAC_CHINTENSET_TERR; } else { DMA_REGS->CHINTENCLR.reg = DMAC_CHINTENSET_TERR; } DMA_REGS->CHINTFLAG.reg = DMAC_CHINTFLAG_TERR | DMAC_CHINTFLAG_TCMPL; #else /* Channels have separate configuration registers */ DmacChannel * chcfg = &DMA_REGS->Channel[channel]; if (config->channel_direction == MEMORY_TO_MEMORY) { /* * A single software trigger will start the * transfer */ chcfg->CHCTRLA.reg = DMAC_CHCTRLA_TRIGACT_TRANSACTION | DMAC_CHCTRLA_TRIGSRC(config->dma_slot); } else if ((config->channel_direction == MEMORY_TO_PERIPHERAL) || (config->channel_direction == PERIPHERAL_TO_MEMORY)) { /* One peripheral trigger per beat */ chcfg->CHCTRLA.reg = DMAC_CHCTRLA_TRIGACT_BURST | DMAC_CHCTRLA_TRIGSRC(config->dma_slot); } else { LOG_ERR("Direction error. %d", config->channel_direction); goto inval; } /* Set the priority */ if (config->channel_priority >= DMAC_LVL_NUM) { LOG_ERR("Invalid priority"); goto inval; } chcfg->CHPRILVL.bit.PRILVL = config->channel_priority; /* Set the burst length */ if (config->source_burst_length != config->dest_burst_length) { LOG_ERR("Source and destination burst lengths must be equal"); goto inval; } if (config->source_burst_length > 16U) { LOG_ERR("Invalid burst length"); goto inval; } if (config->source_burst_length > 0U) { chcfg->CHCTRLA.reg |= DMAC_CHCTRLA_BURSTLEN( config->source_burst_length - 1U); } /* Enable the interrupts */ chcfg->CHINTENSET.reg = DMAC_CHINTENSET_TCMPL; if (!config->error_callback_dis) { chcfg->CHINTENSET.reg = DMAC_CHINTENSET_TERR; } else { chcfg->CHINTENCLR.reg = DMAC_CHINTENSET_TERR; } chcfg->CHINTFLAG.reg = DMAC_CHINTFLAG_TERR | DMAC_CHINTFLAG_TCMPL; #endif /* Set the beat (single transfer) size */ if (config->source_data_size != config->dest_data_size) { LOG_ERR("Source and destination data sizes must be equal"); goto inval; } switch (config->source_data_size) { case 1: btctrl.bit.BEATSIZE = DMAC_BTCTRL_BEATSIZE_BYTE_Val; break; case 2: btctrl.bit.BEATSIZE = DMAC_BTCTRL_BEATSIZE_HWORD_Val; break; case 4: btctrl.bit.BEATSIZE = DMAC_BTCTRL_BEATSIZE_WORD_Val; break; default: LOG_ERR("Invalid data size"); goto inval; } /* Set up the one and only block */ desc->BTCNT.reg = block->block_size / config->source_data_size; desc->DESCADDR.reg = 0; /* Set the automatic source / dest increment */ switch (block->source_addr_adj) { case DMA_ADDR_ADJ_INCREMENT: desc->SRCADDR.reg = block->source_address + block->block_size; btctrl.bit.SRCINC = 1; break; case DMA_ADDR_ADJ_NO_CHANGE: desc->SRCADDR.reg = block->source_address; break; default: LOG_ERR("Invalid source increment"); goto inval; } switch (block->dest_addr_adj) { case DMA_ADDR_ADJ_INCREMENT: desc->DSTADDR.reg = block->dest_address + block->block_size; btctrl.bit.DSTINC = 1; break; case DMA_ADDR_ADJ_NO_CHANGE: desc->DSTADDR.reg = block->dest_address; break; default: LOG_ERR("Invalid destination increment"); goto inval; } btctrl.bit.VALID = 1; desc->BTCTRL = btctrl; channel_control = &data->channels[channel]; channel_control->cb = config->dma_callback; channel_control->user_data = config->user_data; LOG_DBG("Configured channel %d for %08X to %08X (%u)", channel, block->source_address, block->dest_address, block->block_size); irq_unlock(key); return 0; inval: irq_unlock(key); return -EINVAL; } static int dma_sam0_start(const struct device *dev, uint32_t channel) { unsigned int key = irq_lock(); ARG_UNUSED(dev); #ifdef DMAC_CHID_ID DMA_REGS->CHID.reg = channel; DMA_REGS->CHCTRLA.reg = DMAC_CHCTRLA_ENABLE; if (DMA_REGS->CHCTRLB.bit.TRIGSRC == 0) { /* Trigger via software */ DMA_REGS->SWTRIGCTRL.reg = 1U << channel; } #else DmacChannel * chcfg = &DMA_REGS->Channel[channel]; chcfg->CHCTRLA.bit.ENABLE = 1; if (chcfg->CHCTRLA.bit.TRIGSRC == 0) { /* Trigger via software */ DMA_REGS->SWTRIGCTRL.reg = 1U << channel; } #endif irq_unlock(key); return 0; } static int dma_sam0_stop(const struct device *dev, uint32_t channel) { unsigned int key = irq_lock(); ARG_UNUSED(dev); #ifdef DMAC_CHID_ID DMA_REGS->CHID.reg = channel; DMA_REGS->CHCTRLA.reg = 0; #else DmacChannel * chcfg = &DMA_REGS->Channel[channel]; chcfg->CHCTRLA.bit.ENABLE = 0; #endif irq_unlock(key); return 0; } static int dma_sam0_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { struct dma_sam0_data *data = dev->data; DmacDescriptor *desc = &data->descriptors[channel]; unsigned int key = irq_lock(); switch (desc->BTCTRL.bit.BEATSIZE) { case DMAC_BTCTRL_BEATSIZE_BYTE_Val: desc->BTCNT.reg = size; break; case DMAC_BTCTRL_BEATSIZE_HWORD_Val: desc->BTCNT.reg = size / 2U; break; case DMAC_BTCTRL_BEATSIZE_WORD_Val: desc->BTCNT.reg = size / 4U; break; default: goto inval; } if (desc->BTCTRL.bit.SRCINC) { desc->SRCADDR.reg = src + size; } else { desc->SRCADDR.reg = src; } if (desc->BTCTRL.bit.DSTINC) { desc->DSTADDR.reg = dst + size; } else { desc->DSTADDR.reg = dst; } LOG_DBG("Reloaded channel %d for %08X to %08X (%u)", channel, src, dst, size); irq_unlock(key); return 0; inval: irq_unlock(key); return -EINVAL; } static int dma_sam0_get_status(const struct device *dev, uint32_t channel, struct dma_status *stat) { struct dma_sam0_data *data = dev->data; uint32_t act; if (channel >= DMAC_CH_NUM || stat == NULL) { return -EINVAL; } act = DMA_REGS->ACTIVE.reg; if ((act & DMAC_ACTIVE_ABUSY) && ((act & DMAC_ACTIVE_ID_Msk) >> DMAC_ACTIVE_ID_Pos) == channel) { stat->busy = true; stat->pending_length = (act & DMAC_ACTIVE_BTCNT_Msk) >> DMAC_ACTIVE_BTCNT_Pos; } else { stat->busy = false; stat->pending_length = data->descriptors_wb[channel].BTCNT.reg; } switch (data->descriptors[channel].BTCTRL.bit.BEATSIZE) { case DMAC_BTCTRL_BEATSIZE_BYTE_Val: break; case DMAC_BTCTRL_BEATSIZE_HWORD_Val: stat->pending_length *= 2U; break; case DMAC_BTCTRL_BEATSIZE_WORD_Val: stat->pending_length *= 4U; break; default: return -EINVAL; } return 0; } #define DMA_SAM0_IRQ_CONNECT(n) \ do { \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, n, irq), \ DT_INST_IRQ_BY_IDX(0, n, priority), \ dma_sam0_isr, DEVICE_DT_INST_GET(0), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(0, n, irq)); \ } while (false) static int dma_sam0_init(const struct device *dev) { struct dma_sam0_data *data = dev->data; /* Enable clocks. */ #ifdef MCLK MCLK->AHBMASK.bit.DMAC_ = 1; #else PM->AHBMASK.bit.DMAC_ = 1; PM->APBBMASK.bit.DMAC_ = 1; #endif /* Set up the descriptor and write back addresses */ DMA_REGS->BASEADDR.reg = (uintptr_t)&data->descriptors; DMA_REGS->WRBADDR.reg = (uintptr_t)&data->descriptors_wb; /* Statically map each level to the same numeric priority */ DMA_REGS->PRICTRL0.reg = DMAC_PRICTRL0_LVLPRI0(0) | DMAC_PRICTRL0_LVLPRI1(1) | DMAC_PRICTRL0_LVLPRI2(2) | DMAC_PRICTRL0_LVLPRI3(3); /* Enable the unit and enable all priorities */ DMA_REGS->CTRL.reg = DMAC_CTRL_DMAENABLE | DMAC_CTRL_LVLEN(0x0F); #if DT_INST_IRQ_HAS_CELL(0, irq) DMA_SAM0_IRQ_CONNECT(0); #endif #if DT_INST_IRQ_HAS_IDX(0, 1) DMA_SAM0_IRQ_CONNECT(1); #endif #if DT_INST_IRQ_HAS_IDX(0, 2) DMA_SAM0_IRQ_CONNECT(2); #endif #if DT_INST_IRQ_HAS_IDX(0, 3) DMA_SAM0_IRQ_CONNECT(3); #endif #if DT_INST_IRQ_HAS_IDX(0, 4) DMA_SAM0_IRQ_CONNECT(4); #endif return 0; } static struct dma_sam0_data dmac_data; static const struct dma_driver_api dma_sam0_api = { .config = dma_sam0_config, .start = dma_sam0_start, .stop = dma_sam0_stop, .reload = dma_sam0_reload, .get_status = dma_sam0_get_status, }; DEVICE_DT_INST_DEFINE(0, &dma_sam0_init, NULL, &dmac_data, NULL, PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, &dma_sam0_api); ```
/content/code_sandbox/drivers/dma/dma_sam0.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,407
```c /* * */ #define DT_DRV_COMPAT brcm_iproc_pax_dma_v1 #include <zephyr/arch/cpu.h> #include <zephyr/cache.h> #include <errno.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/linker/sections.h> #include <soc.h> #include <string.h> #include <zephyr/toolchain.h> #include <zephyr/types.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/pcie/endpoint/pcie_ep.h> #include "dma_iproc_pax_v1.h" #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(dma_iproc_pax); /* Driver runtime data for PAX DMA and RM */ static struct dma_iproc_pax_data pax_dma_data; static inline uint32_t reset_pkt_id(struct dma_iproc_pax_ring_data *ring) { return ring->pkt_id = 0x0; } /** * @brief Opaque/packet id allocator, range 0 to 31 */ static inline uint32_t alloc_pkt_id(struct dma_iproc_pax_ring_data *ring) { ring->pkt_id = (ring->pkt_id + 1) % 32; return ring->pkt_id; } static inline uint32_t curr_pkt_id(struct dma_iproc_pax_ring_data *ring) { return ring->pkt_id; } static inline uint32_t curr_toggle_val(struct dma_iproc_pax_ring_data *ring) { return ring->curr.toggle; } /** * @brief Populate header descriptor */ static inline void rm_write_header_desc(void *desc, uint32_t toggle, uint32_t opq, uint32_t bdcount) { struct rm_header *r = (struct rm_header *)desc; r->opq = opq; /* DMA descriptor count init value */ r->bdcount = bdcount; r->prot = 0x0; /* No packet extension, start and end set to '1' */ r->start = 1; r->end = 1; r->toggle = toggle; /* RM header type */ r->type = PAX_DMA_TYPE_RM_HEADER; } /** * @brief Fill RM header descriptor for next transfer * with invalid toggle */ static inline void rm_write_header_next_desc(void *desc, struct dma_iproc_pax_ring_data *r, uint32_t opq, uint32_t bdcount) { /* Toggle bit is invalid until next payload configured */ rm_write_header_desc(desc, (r->curr.toggle == 0) ? 1 : 0, opq, bdcount); } static inline void rm_header_set_bd_count(void *desc, uint32_t bdcount) { struct rm_header *r = (struct rm_header *)desc; /* DMA descriptor count */ r->bdcount = bdcount; } static inline void rm_header_set_toggle(void *desc, uint32_t toggle) { struct rm_header *r = (struct rm_header *)desc; r->toggle = toggle; } /** * @brief Populate dma header descriptor */ static inline void rm_write_dma_header_desc(void *desc, struct dma_iproc_pax_payload *pl) { struct dma_header_desc *hdr = (struct dma_header_desc *)desc; hdr->length = pl->xfer_sz; hdr->opcode = pl->direction; /* DMA header type */ hdr->type = PAX_DMA_TYPE_DMA_DESC; } /** * @brief Populate axi address descriptor */ static inline void rm_write_axi_addr_desc(void *desc, struct dma_iproc_pax_payload *pl) { struct axi_addr_desc *axi = (struct axi_addr_desc *)desc; axi->axi_addr = pl->axi_addr; axi->type = PAX_DMA_TYPE_DMA_DESC; } /** * @brief Populate pci address descriptor */ static inline void rm_write_pci_addr_desc(void *desc, struct dma_iproc_pax_payload *pl) { struct pci_addr_desc *pci = (struct pci_addr_desc *)desc; pci->pcie_addr = pl->pci_addr >> PAX_DMA_PCI_ADDR_ALIGNMT_SHIFT; pci->type = PAX_DMA_TYPE_DMA_DESC; } /** * @brief Return's pointer to the descriptor memory to be written next, * skip next pointer descriptor address. */ static void *next_desc_addr(struct dma_iproc_pax_ring_data *ring) { struct next_ptr_desc *nxt; uintptr_t curr; curr = (uintptr_t)ring->curr.write_ptr + PAX_DMA_RM_DESC_BDWIDTH; /* if hit next table ptr, skip to next location, flip toggle */ nxt = (struct next_ptr_desc *)curr; if (nxt->type == PAX_DMA_TYPE_NEXT_PTR) { LOG_DBG("hit next_ptr@0x%lx:T%d, next_table@0x%lx\n", curr, nxt->toggle, (uintptr_t)nxt->addr); uintptr_t last = (uintptr_t)ring->bd + PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS; ring->curr.toggle = (ring->curr.toggle == 0) ? 1 : 0; /* move to next addr, wrap around if hits end */ curr += PAX_DMA_RM_DESC_BDWIDTH; if (curr == last) { curr = (uintptr_t)ring->bd; LOG_DBG("hit end of desc:0x%lx, wrap to 0x%lx\n", last, curr); } } ring->curr.write_ptr = (void *)curr; return (void *)curr; } /** * @brief Populate next ptr descriptor */ static void rm_write_next_table_desc(void *desc, void *next_ptr, uint32_t toggle) { struct next_ptr_desc *nxt = (struct next_ptr_desc *)desc; nxt->addr = (uintptr_t)next_ptr; nxt->type = PAX_DMA_TYPE_NEXT_PTR; nxt->toggle = toggle; } static void prepare_ring(struct dma_iproc_pax_ring_data *ring) { uintptr_t curr, next, last; uint32_t toggle; int buff_count = PAX_DMA_NUM_BD_BUFFS; /* zero out descriptor area */ memset(ring->bd, 0x0, PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS); memset(ring->cmpl, 0x0, PAX_DMA_RM_CMPL_RING_SIZE); /* opaque/packet id value */ rm_write_header_desc(ring->bd, 0x0, reset_pkt_id(ring), PAX_DMA_RM_DESC_BDCOUNT); /* start with first buffer, valid toggle is 0x1 */ toggle = 0x1; curr = (uintptr_t)ring->bd; next = curr + PAX_DMA_RM_DESC_RING_SIZE; last = curr + PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS; do { /* Place next_table desc as last BD entry on each buffer */ rm_write_next_table_desc(PAX_DMA_NEXT_TBL_ADDR((void *)curr), (void *)next, toggle); /* valid toggle flips for each buffer */ toggle = toggle ? 0x0 : 0x1; curr += PAX_DMA_RM_DESC_RING_SIZE; next += PAX_DMA_RM_DESC_RING_SIZE; /* last entry, chain back to first buffer */ if (next == last) { next = (uintptr_t)ring->bd; } } while (--buff_count); dma_mb(); /* start programming from first RM header */ ring->curr.write_ptr = ring->bd; /* valid toggle starts with 1 after reset */ ring->curr.toggle = 1; /* completion read offset */ ring->curr.cmpl_rd_offs = 0; /* init sync data for the ring */ ring->curr.sync_data.signature = PAX_DMA_WRITE_SYNC_SIGNATURE; ring->curr.sync_data.ring = ring->idx; /* pkt id for active dma xfer */ ring->curr.sync_data.opaque = 0x0; /* pkt count for active dma xfer */ ring->curr.sync_data.total_pkts = 0x0; } static int init_rm(struct dma_iproc_pax_data *pd) { int ret = -ETIMEDOUT, timeout = 1000; k_mutex_lock(&pd->dma_lock, K_FOREVER); /* Wait for Ring Manager ready */ do { LOG_DBG("Waiting for RM HW init\n"); if ((sys_read32(RM_COMM_REG(pd, RM_COMM_MAIN_HW_INIT_DONE)) & RM_COMM_MAIN_HW_INIT_DONE_MASK)) { ret = 0; break; } k_sleep(K_MSEC(1)); } while (--timeout); k_mutex_unlock(&pd->dma_lock); if (!timeout) { LOG_WRN("RM HW Init timedout!\n"); } else { LOG_INF("PAX DMA RM HW Init Done\n"); } return ret; } static void rm_cfg_start(struct dma_iproc_pax_data *pd) { uint32_t val; k_mutex_lock(&pd->dma_lock, K_FOREVER); /* set config done 0, enable toggle mode */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); val &= ~RM_COMM_CONTROL_CONFIG_DONE; sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); val &= ~(RM_COMM_CONTROL_MODE_MASK << RM_COMM_CONTROL_MODE_SHIFT); val |= (RM_COMM_CONTROL_MODE_TOGGLE << RM_COMM_CONTROL_MODE_SHIFT); sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); /* Disable MSI */ sys_write32(RM_COMM_MSI_DISABLE_VAL, RM_COMM_REG(pd, RM_COMM_MSI_DISABLE)); /* Enable Line interrupt */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); val |= RM_COMM_CONTROL_LINE_INTR_EN; sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); /* Enable AE_TIMEOUT */ sys_write32(RM_COMM_AE_TIMEOUT_VAL, RM_COMM_REG(pd, RM_COMM_AE_TIMEOUT)); val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); val |= RM_COMM_CONTROL_AE_TIMEOUT_EN; sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); /* AE (Acceleration Engine) grouping to group '0' */ val = sys_read32(RM_COMM_REG(pd, RM_AE0_AE_CONTROL)); val &= ~RM_AE_CTRL_AE_GROUP_MASK; sys_write32(val, RM_COMM_REG(pd, RM_AE0_AE_CONTROL)); val |= RM_AE_CONTROL_ACTIVE; sys_write32(val, RM_COMM_REG(pd, RM_AE0_AE_CONTROL)); /* AXI read/write channel enable */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_AXI_CONTROL)); val |= (RM_COMM_AXI_CONTROL_RD_CH_EN | RM_COMM_AXI_CONTROL_WR_CH_EN); sys_write32(val, RM_COMM_REG(pd, RM_COMM_AXI_CONTROL)); /* Tune RM control programming for 4 rings */ sys_write32(RM_COMM_TIMER_CONTROL0_VAL, RM_COMM_REG(pd, RM_COMM_TIMER_CONTROL_0)); sys_write32(RM_COMM_TIMER_CONTROL1_VAL, RM_COMM_REG(pd, RM_COMM_TIMER_CONTROL_1)); sys_write32(RM_COMM_RM_BURST_LENGTH, RM_COMM_REG(pd, RM_COMM_RM_BURST_LENGTH)); /* Set Sequence max count to the max supported value */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_MASK_SEQUENCE_MAX_COUNT)); val = (val | RING_MASK_SEQ_MAX_COUNT_MASK); sys_write32(val, RM_COMM_REG(pd, RM_COMM_MASK_SEQUENCE_MAX_COUNT)); k_mutex_unlock(&pd->dma_lock); } static void rm_ring_clear_stats(struct dma_iproc_pax_data *pd, enum ring_idx idx) { /* Read ring Tx, Rx, and Outstanding counts to clear */ sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_RECV_LS)); sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_RECV_MS)); sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_TRANS_LS)); sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_TRANS_MS)); sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_OUTSTAND)); } static void rm_cfg_finish(struct dma_iproc_pax_data *pd) { uint32_t val; k_mutex_lock(&pd->dma_lock, K_FOREVER); /* set Ring config done */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); val |= RM_COMM_CONTROL_CONFIG_DONE; sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); k_mutex_unlock(&pd->dma_lock); } /* Activate/Deactivate rings */ static inline void set_ring_active(struct dma_iproc_pax_data *pd, enum ring_idx idx, bool active) { uint32_t val; val = sys_read32(RM_RING_REG(pd, idx, RING_CONTROL)); if (active) { val |= RING_CONTROL_ACTIVE; } else { val &= ~RING_CONTROL_ACTIVE; } sys_write32(val, RM_RING_REG(pd, idx, RING_CONTROL)); } static int init_ring(struct dma_iproc_pax_data *pd, enum ring_idx idx) { uint32_t val; uintptr_t desc = (uintptr_t)pd->ring[idx].bd; uintptr_t cmpl = (uintptr_t)pd->ring[idx].cmpl; int timeout = 5000, ret = 0; k_mutex_lock(&pd->dma_lock, K_FOREVER); /* Read cmpl write ptr incase previous dma stopped */ sys_read32(RM_RING_REG(pd, idx, RING_CMPL_WRITE_PTR)); /* Inactivate ring */ sys_write32(0x0, RM_RING_REG(pd, idx, RING_CONTROL)); /* Flush ring before loading new descriptor */ sys_write32(RING_CONTROL_FLUSH, RM_RING_REG(pd, idx, RING_CONTROL)); do { if (sys_read32(RM_RING_REG(pd, idx, RING_FLUSH_DONE)) & RING_FLUSH_DONE_MASK) { break; } k_busy_wait(1); } while (--timeout); if (!timeout) { LOG_WRN("Ring %d flush timedout!\n", idx); ret = -ETIMEDOUT; goto err; } /* clear ring after flush */ sys_write32(0x0, RM_RING_REG(pd, idx, RING_CONTROL)); /* ring group id set to '0' */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_CTRL_REG(idx))); val &= ~RING_COMM_CTRL_AE_GROUP_MASK; sys_write32(val, RM_COMM_REG(pd, RM_COMM_CTRL_REG(idx))); /* DDR update control, set timeout value */ val = RING_DDR_CONTROL_COUNT(RING_DDR_CONTROL_COUNT_VAL) | RING_DDR_CONTROL_TIMER(RING_DDR_CONTROL_TIMER_VAL) | RING_DDR_CONTROL_ENABLE; sys_write32(val, RM_RING_REG(pd, idx, RING_CMPL_WR_PTR_DDR_CONTROL)); val = (uint32_t)((uintptr_t)desc >> PAX_DMA_RING_BD_ALIGN_ORDER); sys_write32(val, RM_RING_REG(pd, idx, RING_BD_START_ADDR)); val = (uint32_t)((uintptr_t)cmpl >> PAX_DMA_RING_CMPL_ALIGN_ORDER); sys_write32(val, RM_RING_REG(pd, idx, RING_CMPL_START_ADDR)); val = sys_read32(RM_RING_REG(pd, idx, RING_BD_READ_PTR)); /* keep ring inactive after init to avoid BD poll */ set_ring_active(pd, idx, false); rm_ring_clear_stats(pd, idx); err: k_mutex_unlock(&pd->dma_lock); return ret; } static int poll_on_write_sync(const struct device *dev, struct dma_iproc_pax_ring_data *ring) { const struct dma_iproc_pax_cfg *cfg = dev->config; struct dma_iproc_pax_write_sync_data sync_rd, *recv, *sent; uint64_t pci_addr; uint32_t *pci32, *axi32; uint32_t zero_init = 0, timeout = PAX_DMA_MAX_SYNC_WAIT; int ret; recv = &sync_rd; sent = &(ring->curr.sync_data); /* form host pci sync address */ pci32 = (uint32_t *)&pci_addr; pci32[0] = ring->sync_pci.addr_lo; pci32[1] = ring->sync_pci.addr_hi; axi32 = (uint32_t *)&sync_rd; do { ret = pcie_ep_xfer_data_memcpy(cfg->pcie_dev, pci_addr, (uintptr_t *)axi32, 4, PCIE_OB_LOWMEM, HOST_TO_DEVICE); if (memcmp((void *)recv, (void *)sent, 4) == 0) { /* clear the sync word */ ret = pcie_ep_xfer_data_memcpy(cfg->pcie_dev, pci_addr, (uintptr_t *)&zero_init, 4, PCIE_OB_LOWMEM, DEVICE_TO_HOST); dma_mb(); ret = 0; break; } k_busy_wait(1); } while (--timeout); if (!timeout) { LOG_DBG("[ring %d]: not recvd write sync!\n", ring->idx); ret = -ETIMEDOUT; } return ret; } static int process_cmpl_event(const struct device *dev, enum ring_idx idx, uint32_t pl_len) { struct dma_iproc_pax_data *pd = dev->data; uint32_t wr_offs, rd_offs, ret = DMA_STATUS_COMPLETE; struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]); struct cmpl_pkt *c; uint32_t is_outstanding; /* cmpl read offset, unprocessed cmpl location */ rd_offs = ring->curr.cmpl_rd_offs; wr_offs = sys_read32(RM_RING_REG(pd, idx, RING_CMPL_WRITE_PTR)); /* Update read ptr to "processed" */ ring->curr.cmpl_rd_offs = wr_offs; /* * Ensure consistency of completion descriptor * The completion desc is updated by RM via AXI stream * CPU need to ensure the memory operations are completed * before reading cmpl area, by a "dsb" * If Dcache enabled, need to invalidate the cachelines to * read updated cmpl desc. The cache API also issues dsb. */ dma_mb(); /* Decode cmpl pkt id to verify */ c = (struct cmpl_pkt *)((uintptr_t)ring->cmpl + PAX_DMA_CMPL_DESC_SIZE * PAX_DMA_CURR_CMPL_IDX(wr_offs)); LOG_DBG("RING%d WR_PTR:%d opq:%d, rm_status:%x dma_status:%x\n", idx, wr_offs, c->opq, c->rm_status, c->dma_status); is_outstanding = sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_OUTSTAND)); if ((ring->curr.opq != c->opq) && (is_outstanding != 0)) { LOG_ERR("RING%d: pkt id should be %d, rcvd %d outst=%d\n", idx, ring->curr.opq, c->opq, is_outstanding); ret = -EIO; } /* check for completion AE timeout */ if (c->rm_status == RM_COMPLETION_AE_TIMEOUT) { LOG_ERR("RING%d WR_PTR:%d rm_status:%x AE Timeout!\n", idx, wr_offs, c->rm_status); /* TBD: Issue full card reset to restore operations */ LOG_ERR("Needs Card Reset to recover!\n"); ret = -ETIMEDOUT; } if (ring->dma_callback) { ring->dma_callback(dev, ring->callback_arg, idx, ret); } return ret; } #ifdef CONFIG_DMA_IPROC_PAX_POLL_MODE static int peek_ring_cmpl(const struct device *dev, enum ring_idx idx, uint32_t pl_len) { struct dma_iproc_pax_data *pd = dev->data; uint32_t wr_offs, rd_offs, timeout = PAX_DMA_MAX_POLL_WAIT; struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]); /* cmpl read offset, unprocessed cmpl location */ rd_offs = ring->curr.cmpl_rd_offs; /* poll write_ptr until cmpl received for all buffers */ do { wr_offs = sys_read32(RM_RING_REG(pd, idx, RING_CMPL_WRITE_PTR)); if (PAX_DMA_GET_CMPL_COUNT(wr_offs, rd_offs) >= pl_len) break; k_busy_wait(1); } while (--timeout); if (timeout == 0) { LOG_ERR("RING%d timeout, rcvd %d, expected %d!\n", idx, PAX_DMA_GET_CMPL_COUNT(wr_offs, rd_offs), pl_len); /* More debug info on current dma instance */ LOG_ERR("WR_PTR:%x RD_PTR%x\n", wr_offs, rd_offs); return -ETIMEDOUT; } return process_cmpl_event(dev, idx, pl_len); } #else static void rm_isr(const struct device *dev) { uint32_t status, err_stat, idx; struct dma_iproc_pax_data *pd = dev->data; /* read and clear interrupt status */ status = sys_read32(RM_COMM_REG(pd, RM_COMM_MSI_INTR_INTERRUPT_STATUS)); sys_write32(status, RM_COMM_REG(pd, RM_COMM_MSI_INTERRUPT_STATUS_CLEAR)); /* read and clear DME/AE error interrupts */ err_stat = sys_read32(RM_COMM_REG(pd, RM_COMM_DME_INTERRUPT_STATUS_MASK)); sys_write32(err_stat, RM_COMM_REG(pd, RM_COMM_DME_INTERRUPT_STATUS_CLEAR)); err_stat = sys_read32(RM_COMM_REG(pd, RM_COMM_AE_INTERFACE_GROUP_0_INTERRUPT_MASK)); sys_write32(err_stat, RM_COMM_REG(pd, RM_COMM_AE_INTERFACE_GROUP_0_INTERRUPT_CLEAR)); /* alert waiting thread to process, for each completed ring */ for (idx = PAX_DMA_RING0; idx < PAX_DMA_RINGS_MAX; idx++) { if (status & (0x1 << idx)) { k_sem_give(&pd->ring[idx].alert); } } } #endif static int dma_iproc_pax_init(const struct device *dev) { const struct dma_iproc_pax_cfg *cfg = dev->config; struct dma_iproc_pax_data *pd = dev->data; int r; uintptr_t mem_aligned; if (!device_is_ready(cfg->pcie_dev)) { LOG_ERR("PCIe device not ready"); return -ENODEV; } pd->dma_base = cfg->dma_base; pd->rm_comm_base = cfg->rm_comm_base; pd->used_rings = (cfg->use_rings < PAX_DMA_RINGS_MAX) ? cfg->use_rings : PAX_DMA_RINGS_MAX; LOG_DBG("dma base:0x%x, rm comm base:0x%x, needed rings %d\n", pd->dma_base, pd->rm_comm_base, pd->used_rings); /* dma/rm access lock */ k_mutex_init(&pd->dma_lock); /* Ring Manager H/W init */ if (init_rm(pd)) { return -ETIMEDOUT; } /* common rm config */ rm_cfg_start(pd); /* individual ring config */ for (r = 0; r < pd->used_rings; r++) { /* per-ring mutex lock */ k_mutex_init(&pd->ring[r].lock); /* Init alerts */ k_sem_init(&pd->ring[r].alert, 0, 1); pd->ring[r].idx = r; pd->ring[r].ring_base = cfg->rm_base + PAX_DMA_RING_ADDR_OFFSET(r); LOG_DBG("RING%d,VERSION:0x%x\n", pd->ring[r].idx, sys_read32(RM_RING_REG(pd, r, RING_VER))); /* Allocate for 2 BD buffers + cmpl buffer + payload struct */ pd->ring[r].ring_mem = (void *)((uintptr_t)cfg->bd_memory_base + r * PAX_DMA_PER_RING_ALLOC_SIZE); if (!pd->ring[r].ring_mem) { LOG_ERR("RING%d failed to alloc desc memory!\n", r); return -ENOMEM; } /* Find 8K aligned address within allocated region */ mem_aligned = ((uintptr_t)pd->ring[r].ring_mem + PAX_DMA_RING_ALIGN - 1) & ~(PAX_DMA_RING_ALIGN - 1); pd->ring[r].cmpl = (void *)mem_aligned; pd->ring[r].bd = (void *)(mem_aligned + PAX_DMA_RM_CMPL_RING_SIZE); pd->ring[r].payload = (void *)((uintptr_t)pd->ring[r].bd + PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS); LOG_DBG("Ring%d,allocated Mem:0x%p Size %d\n", pd->ring[r].idx, pd->ring[r].ring_mem, PAX_DMA_PER_RING_ALLOC_SIZE); LOG_DBG("Ring%d,BD:0x%p, CMPL:0x%p, PL:0x%p\n", pd->ring[r].idx, pd->ring[r].bd, pd->ring[r].cmpl, pd->ring[r].payload); /* Prepare ring desc table */ prepare_ring(&(pd->ring[r])); /* initialize ring */ init_ring(pd, r); } /* set ring config done */ rm_cfg_finish(pd); #ifndef CONFIG_DMA_IPROC_PAX_POLL_MODE /* Register and enable RM interrupt */ IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), rm_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); #else LOG_INF("%s PAX DMA rings in poll mode!\n", dev->name); #endif LOG_INF("%s RM setup %d rings\n", dev->name, pd->used_rings); return 0; } #ifdef CONFIG_DMA_IPROC_PAX_POLL_MODE static void set_pkt_count(const struct device *dev, enum ring_idx idx, uint32_t pl_len) { /* Nothing needs to be programmed here in poll mode */ } static int wait_for_pkt_completion(const struct device *dev, enum ring_idx idx, uint32_t pl_len) { /* poll for completion */ return peek_ring_cmpl(dev, idx, pl_len + 1); } #else static void set_pkt_count(const struct device *dev, enum ring_idx idx, uint32_t pl_len) { struct dma_iproc_pax_data *pd = dev->data; uint32_t val; /* program packet count for interrupt assertion */ val = sys_read32(RM_RING_REG(pd, idx, RING_CMPL_WR_PTR_DDR_CONTROL)); val &= ~RING_DDR_CONTROL_COUNT_MASK; val |= RING_DDR_CONTROL_COUNT(pl_len); sys_write32(val, RM_RING_REG(pd, idx, RING_CMPL_WR_PTR_DDR_CONTROL)); } static int wait_for_pkt_completion(const struct device *dev, enum ring_idx idx, uint32_t pl_len) { struct dma_iproc_pax_data *pd = dev->data; struct dma_iproc_pax_ring_data *ring; ring = &(pd->ring[idx]); /* wait for sg dma completion alert */ if (k_sem_take(&ring->alert, K_MSEC(PAX_DMA_TIMEOUT)) != 0) { LOG_ERR("PAX DMA [ring %d] Timeout!\n", idx); return -ETIMEDOUT; } return process_cmpl_event(dev, idx, pl_len); } #endif static int dma_iproc_pax_do_xfer(const struct device *dev, enum ring_idx idx, struct dma_iproc_pax_payload *pl, uint32_t pl_len) { struct dma_iproc_pax_data *pd = dev->data; const struct dma_iproc_pax_cfg *cfg = dev->config; int ret = 0, cnt; struct dma_iproc_pax_ring_data *ring; void *hdr; uint32_t toggle_bit; struct dma_iproc_pax_payload sync_pl; struct dma_iproc_pax_addr64 sync; ring = &(pd->ring[idx]); pl = ring->payload; /* * Host sync buffer isn't ready at zephyr/driver init-time * Read the host address location once at first DMA write * on that ring. */ if ((ring->sync_pci.addr_lo == 0x0) && (ring->sync_pci.addr_hi == 0x0)) { /* populate sync data location */ LOG_DBG("sync addr loc 0x%x\n", cfg->scr_addr_loc); sync.addr_lo = sys_read32(cfg->scr_addr_loc + 4); sync.addr_hi = sys_read32(cfg->scr_addr_loc); ring->sync_pci.addr_lo = sync.addr_lo + idx * 4; ring->sync_pci.addr_hi = sync.addr_hi; LOG_DBG("ring:%d,sync addr:0x%x.0x%x\n", idx, ring->sync_pci.addr_hi, ring->sync_pci.addr_lo); } /* account extra sync packet */ ring->curr.sync_data.opaque = ring->curr.opq; ring->curr.sync_data.total_pkts = pl_len; memcpy((void *)&ring->sync_loc, (void *)&(ring->curr.sync_data), 4); sync_pl.pci_addr = ring->sync_pci.addr_lo | (uint64_t)ring->sync_pci.addr_hi << 32; sync_pl.axi_addr = (uintptr_t)&ring->sync_loc; sync_pl.xfer_sz = 4; /* 4-bytes */ sync_pl.direction = CARD_TO_HOST; /* Get descriptor write pointer for first header */ hdr = (void *)ring->curr.write_ptr; /* current toggle bit */ toggle_bit = ring->curr.toggle; /* current opq value for cmpl check */ ring->curr.opq = curr_pkt_id(ring); /* DMA desc count for first payload */ rm_header_set_bd_count(hdr, PAX_DMA_RM_DESC_BDCOUNT); /* Form dma descriptors for total sg payload */ for (cnt = 0; cnt < pl_len; cnt++) { rm_write_dma_header_desc(next_desc_addr(ring), pl + cnt); rm_write_axi_addr_desc(next_desc_addr(ring), pl + cnt); rm_write_pci_addr_desc(next_desc_addr(ring), pl + cnt); /* Toggle may flip, program updated toggle value */ rm_write_header_desc(next_desc_addr(ring), curr_toggle_val(ring), curr_pkt_id(ring), PAX_DMA_RM_DESC_BDCOUNT); } /* Append write sync payload descriptors */ rm_write_dma_header_desc(next_desc_addr(ring), &sync_pl); rm_write_axi_addr_desc(next_desc_addr(ring), &sync_pl); rm_write_pci_addr_desc(next_desc_addr(ring), &sync_pl); /* RM header for next transfer, RM wait on (invalid) toggle bit */ rm_write_header_next_desc(next_desc_addr(ring), ring, alloc_pkt_id(ring), PAX_DMA_RM_DESC_BDCOUNT); set_pkt_count(dev, idx, pl_len + 1); /* Ensure memory write before toggle flip */ dma_mb(); /* set toggle to valid in first header */ rm_header_set_toggle(hdr, toggle_bit); /* activate the ring */ set_ring_active(pd, idx, true); ret = wait_for_pkt_completion(dev, idx, pl_len + 1); if (ret) { goto err_ret; } ret = poll_on_write_sync(dev, ring); k_mutex_lock(&ring->lock, K_FOREVER); ring->ring_active = 0; k_mutex_unlock(&ring->lock); err_ret: ring->ring_active = 0; /* deactivate the ring until next active transfer */ set_ring_active(pd, idx, false); return ret; } static int dma_iproc_pax_configure(const struct device *dev, uint32_t channel, struct dma_config *cfg) { struct dma_iproc_pax_data *pd = dev->data; struct dma_iproc_pax_ring_data *ring; uint32_t xfer_sz; int ret = 0; #ifdef CONFIG_DMA_IPROC_PAX_DEBUG uint32_t *pci_addr32; uint32_t *axi_addr32; #endif if (channel >= PAX_DMA_RINGS_MAX) { LOG_ERR("Invalid ring/channel %d\n", channel); return -EINVAL; } ring = &(pd->ring[channel]); k_mutex_lock(&ring->lock, K_FOREVER); if (cfg->block_count > 1) { /* Scatter/gather list handling is not supported */ ret = -ENOTSUP; goto err; } if (ring->ring_active) { ret = -EBUSY; goto err; } ring->ring_active = 1; if (cfg->channel_direction == MEMORY_TO_PERIPHERAL) { #ifdef CONFIG_DMA_IPROC_PAX_DEBUG axi_addr32 = (uint32_t *)&cfg->head_block->source_address; pci_addr32 = (uint32_t *)&cfg->head_block->dest_address; #endif ring->payload->direction = CARD_TO_HOST; ring->payload->pci_addr = cfg->head_block->dest_address; ring->payload->axi_addr = cfg->head_block->source_address; } else if (cfg->channel_direction == PERIPHERAL_TO_MEMORY) { #ifdef CONFIG_DMA_IPROC_PAX_DEBUG axi_addr32 = (uint32_t *)&cfg->head_block->dest_address; pci_addr32 = (uint32_t *)&cfg->head_block->source_address; #endif ring->payload->direction = HOST_TO_CARD; ring->payload->pci_addr = cfg->head_block->source_address; ring->payload->axi_addr = cfg->head_block->dest_address; } else { ring->ring_active = 0; ret = -ENOTSUP; goto err; } xfer_sz = cfg->head_block->block_size; #ifdef CONFIG_DMA_IPROC_PAX_DEBUG if (xfer_sz > PAX_DMA_MAX_SIZE) { LOG_ERR("Unsupported size: %d\n", xfer_size); ring->ring_active = 0; ret = -EINVAL; goto err; } if (xfer_sz % PAX_DMA_MIN_SIZE) { LOG_ERR("Unaligned size 0x%x\n", xfer_size); ring->ring_active = 0; ret = -EINVAL; goto err; } if (pci_addr32[0] % PAX_DMA_ADDR_ALIGN) { LOG_ERR("Unaligned Host addr: 0x%x.0x%x\n", pci_addr32[1], pci_addr32[0]); ring->ring_active = 0; ret = -EINVAL; goto err; } if (axi_addr32[0] % PAX_DMA_ADDR_ALIGN) { LOG_ERR("Unaligned Card addr: 0x%x.0x%x\n", axi_addr32[1], axi_addr32[0]); ring->ring_active = 0; ret = -EINVAL; goto err; } #endif ring->payload->xfer_sz = xfer_sz; ring->dma_callback = cfg->dma_callback; ring->callback_arg = cfg->user_data; err: k_mutex_unlock(&ring->lock); return ret; } static int dma_iproc_pax_transfer_start(const struct device *dev, uint32_t channel) { int ret = 0; struct dma_iproc_pax_data *pd = dev->data; struct dma_iproc_pax_ring_data *ring; if (channel >= PAX_DMA_RINGS_MAX) { LOG_ERR("Invalid ring %d\n", channel); return -EINVAL; } ring = &(pd->ring[channel]); /* do dma transfer of single buffer */ ret = dma_iproc_pax_do_xfer(dev, channel, ring->payload, 1); return ret; } static int dma_iproc_pax_transfer_stop(const struct device *dev, uint32_t channel) { return 0; } static const struct dma_driver_api pax_dma_driver_api = { .config = dma_iproc_pax_configure, .start = dma_iproc_pax_transfer_start, .stop = dma_iproc_pax_transfer_stop, }; static const struct dma_iproc_pax_cfg pax_dma_cfg = { .dma_base = DT_INST_REG_ADDR_BY_NAME(0, dme_regs), .rm_base = DT_INST_REG_ADDR_BY_NAME(0, rm_ring_regs), .rm_comm_base = DT_INST_REG_ADDR_BY_NAME(0, rm_comm_regs), .use_rings = DT_INST_PROP(0, dma_channels), .bd_memory_base = (void *)DT_INST_PROP_BY_IDX(0, bd_memory, 0), .scr_addr_loc = DT_INST_PROP(0, scr_addr_loc), .pcie_dev = DEVICE_DT_GET(DT_INST_PHANDLE(0, pcie_ep)), }; DEVICE_DT_INST_DEFINE(0, &dma_iproc_pax_init, NULL, &pax_dma_data, &pax_dma_cfg, POST_KERNEL, CONFIG_DMA_INIT_PRIORITY, &pax_dma_driver_api); ```
/content/code_sandbox/drivers/dma/dma_iproc_pax_v1.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,214
```unknown # DMA configuration options config DMA_MCUX_EDMA bool "MCUX DMA driver" default y depends on DT_HAS_NXP_MCUX_EDMA_ENABLED imply NOCACHE_MEMORY if HAS_MCUX_CACHE help DMA driver for MCUX series SoCs. config DMA_MCUX_EDMA_V3 bool "MCUX DMA v3 driver" default y depends on DT_HAS_NXP_MCUX_EDMA_V3_ENABLED help DMA version 3 driver for MCUX series SoCs. config DMA_MCUX_EDMA_V4 bool "MCUX DMA v4 driver" default y depends on DT_HAS_NXP_MCUX_EDMA_V4_ENABLED help DMA version 4 driver for MCUX series SoCs. if DMA_MCUX_EDMA || DMA_MCUX_EDMA_V3 || DMA_MCUX_EDMA_V4 config DMA_TCD_QUEUE_SIZE int "number of TCD in a queue for SG mode" default 2 help number of TCD in a queue for SG mode config DMA_MCUX_TEST_SLOT_START int "test slot start num" depends on (SOC_SERIES_KINETIS_K6X || SOC_SERIES_KINETIS_KE1XF || SOC_SERIES_S32K3) default 58 if SOC_SERIES_KINETIS_K6X default 60 if SOC_SERIES_KINETIS_KE1XF default 62 if SOC_SERIES_S32K3 help test slot start num config DMA_MCUX_USE_DTCM_FOR_DMA_DESCRIPTORS bool "Use DTCM for DMA descriptors" help When this option is activated, the descriptors for DMA transfer are located in the DTCM (Data Tightly Coupled Memory). endif # DMA_MCUX_EDMA || DMA_MCUX_EDMA_V3 || DMA_MCUX_EDMA_V4 ```
/content/code_sandbox/drivers/dma/Kconfig.mcux_edma
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
398
```c /* * */ /** * @brief Common part of DMA drivers for stm32U5. * @note Functions named with stm32_dma_* are SoCs related functions * */ #include "dma_stm32.h" #include <zephyr/init.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/dma/dma_stm32.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(dma_stm32, CONFIG_DMA_LOG_LEVEL); #define DT_DRV_COMPAT st_stm32u5_dma static const uint32_t table_src_size[] = { LL_DMA_SRC_DATAWIDTH_BYTE, LL_DMA_SRC_DATAWIDTH_HALFWORD, LL_DMA_SRC_DATAWIDTH_WORD, }; static const uint32_t table_dst_size[] = { LL_DMA_DEST_DATAWIDTH_BYTE, LL_DMA_DEST_DATAWIDTH_HALFWORD, LL_DMA_DEST_DATAWIDTH_WORD, }; static const uint32_t table_priority[4] = { LL_DMA_LOW_PRIORITY_LOW_WEIGHT, LL_DMA_LOW_PRIORITY_MID_WEIGHT, LL_DMA_LOW_PRIORITY_HIGH_WEIGHT, LL_DMA_HIGH_PRIORITY, }; static void dma_stm32_dump_stream_irq(const struct device *dev, uint32_t id) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); stm32_dma_dump_stream_irq(dma, id); } static void dma_stm32_clear_stream_irq(const struct device *dev, uint32_t id) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); dma_stm32_clear_tc(dma, id); dma_stm32_clear_ht(dma, id); stm32_dma_clear_stream_irq(dma, id); } uint32_t dma_stm32_id_to_stream(uint32_t id) { static const uint32_t stream_nr[] = { LL_DMA_CHANNEL_0, LL_DMA_CHANNEL_1, LL_DMA_CHANNEL_2, LL_DMA_CHANNEL_3, LL_DMA_CHANNEL_4, LL_DMA_CHANNEL_5, LL_DMA_CHANNEL_6, LL_DMA_CHANNEL_7, LL_DMA_CHANNEL_8, LL_DMA_CHANNEL_9, LL_DMA_CHANNEL_10, LL_DMA_CHANNEL_11, LL_DMA_CHANNEL_12, LL_DMA_CHANNEL_13, LL_DMA_CHANNEL_14, LL_DMA_CHANNEL_15, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(stream_nr)); return stream_nr[id]; } bool dma_stm32_is_tc_active(DMA_TypeDef *DMAx, uint32_t id) { return LL_DMA_IsActiveFlag_TC(DMAx, dma_stm32_id_to_stream(id)); } void dma_stm32_clear_tc(DMA_TypeDef *DMAx, uint32_t id) { LL_DMA_ClearFlag_TC(DMAx, dma_stm32_id_to_stream(id)); } /* data transfer error */ static inline bool dma_stm32_is_dte_active(DMA_TypeDef *dma, uint32_t id) { return LL_DMA_IsActiveFlag_DTE(dma, dma_stm32_id_to_stream(id)); } /* link transfer error */ static inline bool dma_stm32_is_ule_active(DMA_TypeDef *dma, uint32_t id) { return LL_DMA_IsActiveFlag_ULE(dma, dma_stm32_id_to_stream(id)); } /* user setting error */ static inline bool dma_stm32_is_use_active(DMA_TypeDef *dma, uint32_t id) { return LL_DMA_IsActiveFlag_USE(dma, dma_stm32_id_to_stream(id)); } /* transfer error either a data or user or link error */ bool dma_stm32_is_te_active(DMA_TypeDef *DMAx, uint32_t id) { return ( LL_DMA_IsActiveFlag_DTE(DMAx, dma_stm32_id_to_stream(id)) || LL_DMA_IsActiveFlag_ULE(DMAx, dma_stm32_id_to_stream(id)) || LL_DMA_IsActiveFlag_USE(DMAx, dma_stm32_id_to_stream(id)) ); } /* clear transfer error either a data or user or link error */ void dma_stm32_clear_te(DMA_TypeDef *DMAx, uint32_t id) { LL_DMA_ClearFlag_DTE(DMAx, dma_stm32_id_to_stream(id)); LL_DMA_ClearFlag_ULE(DMAx, dma_stm32_id_to_stream(id)); LL_DMA_ClearFlag_USE(DMAx, dma_stm32_id_to_stream(id)); } bool dma_stm32_is_ht_active(DMA_TypeDef *DMAx, uint32_t id) { return LL_DMA_IsActiveFlag_HT(DMAx, dma_stm32_id_to_stream(id)); } void dma_stm32_clear_ht(DMA_TypeDef *DMAx, uint32_t id) { LL_DMA_ClearFlag_HT(DMAx, dma_stm32_id_to_stream(id)); } void stm32_dma_dump_stream_irq(DMA_TypeDef *dma, uint32_t id) { LOG_INF("tc: %d, ht: %d, dte: %d, ule: %d, use: %d", dma_stm32_is_tc_active(dma, id), dma_stm32_is_ht_active(dma, id), dma_stm32_is_dte_active(dma, id), dma_stm32_is_ule_active(dma, id), dma_stm32_is_use_active(dma, id) ); } /* Check if nsecure masked interrupt is active on channel */ bool stm32_dma_is_tc_irq_active(DMA_TypeDef *dma, uint32_t id) { return (LL_DMA_IsEnabledIT_TC(dma, dma_stm32_id_to_stream(id)) && LL_DMA_IsActiveFlag_TC(dma, dma_stm32_id_to_stream(id))); } bool stm32_dma_is_ht_irq_active(DMA_TypeDef *dma, uint32_t id) { return (LL_DMA_IsEnabledIT_HT(dma, dma_stm32_id_to_stream(id)) && LL_DMA_IsActiveFlag_HT(dma, dma_stm32_id_to_stream(id))); } static inline bool stm32_dma_is_te_irq_active(DMA_TypeDef *dma, uint32_t id) { return ( (LL_DMA_IsEnabledIT_DTE(dma, dma_stm32_id_to_stream(id)) && LL_DMA_IsActiveFlag_DTE(dma, dma_stm32_id_to_stream(id))) || (LL_DMA_IsEnabledIT_ULE(dma, dma_stm32_id_to_stream(id)) && LL_DMA_IsActiveFlag_ULE(dma, dma_stm32_id_to_stream(id))) || (LL_DMA_IsEnabledIT_USE(dma, dma_stm32_id_to_stream(id)) && LL_DMA_IsActiveFlag_USE(dma, dma_stm32_id_to_stream(id))) ); } /* check if and irq of any type occurred on the channel */ #define stm32_dma_is_irq_active LL_DMA_IsActiveFlag_MIS void stm32_dma_clear_stream_irq(DMA_TypeDef *dma, uint32_t id) { dma_stm32_clear_te(dma, id); LL_DMA_ClearFlag_TO(dma, dma_stm32_id_to_stream(id)); LL_DMA_ClearFlag_SUSP(dma, dma_stm32_id_to_stream(id)); } bool stm32_dma_is_irq_happened(DMA_TypeDef *dma, uint32_t id) { if (dma_stm32_is_te_active(dma, id)) { return true; } return false; } void stm32_dma_enable_stream(DMA_TypeDef *dma, uint32_t id) { LL_DMA_EnableChannel(dma, dma_stm32_id_to_stream(id)); } bool stm32_dma_is_enabled_stream(DMA_TypeDef *dma, uint32_t id) { if (LL_DMA_IsEnabledChannel(dma, dma_stm32_id_to_stream(id)) == 1) { return true; } return false; } int stm32_dma_disable_stream(DMA_TypeDef *dma, uint32_t id) { /* GPDMA channel abort sequence */ LL_DMA_SuspendChannel(dma, dma_stm32_id_to_stream(id)); /* reset the channel will disable it */ LL_DMA_ResetChannel(dma, dma_stm32_id_to_stream(id)); if (!stm32_dma_is_enabled_stream(dma, id)) { return 0; } return -EAGAIN; } void stm32_dma_set_mem_periph_address(DMA_TypeDef *dma, uint32_t channel, uint32_t src_addr, uint32_t dest_addr) { LL_DMA_ConfigAddresses(dma, channel, src_addr, dest_addr); } /* same function to set periph/mem addresses */ void stm32_dma_set_periph_mem_address(DMA_TypeDef *dma, uint32_t channel, uint32_t src_addr, uint32_t dest_addr) { LL_DMA_ConfigAddresses(dma, channel, src_addr, dest_addr); } static void dma_stm32_irq_handler(const struct device *dev, uint32_t id) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); struct dma_stm32_stream *stream; uint32_t callback_arg; __ASSERT_NO_MSG(id < config->max_streams); stream = &config->streams[id]; /* The busy channel is pertinent if not overridden by the HAL */ if ((stream->hal_override != true) && (stream->busy == false)) { /* * When DMA channel is not overridden by HAL, * ignore irq if the channel is not busy anymore */ dma_stm32_clear_stream_irq(dev, id); return; } callback_arg = id + STM32_DMA_STREAM_OFFSET; stream->busy = false; /* The dma stream id is in range from STM32_DMA_STREAM_OFFSET..<dma-requests> */ if (stm32_dma_is_ht_irq_active(dma, id)) { /* Let HAL DMA handle flags on its own */ if (!stream->hal_override) { dma_stm32_clear_ht(dma, id); } stream->dma_callback(dev, stream->user_data, callback_arg, DMA_STATUS_BLOCK); } else if (stm32_dma_is_tc_irq_active(dma, id)) { /* Let HAL DMA handle flags on its own */ if (!stream->hal_override) { dma_stm32_clear_tc(dma, id); } stream->dma_callback(dev, stream->user_data, callback_arg, DMA_STATUS_COMPLETE); } else { LOG_ERR("Transfer Error."); dma_stm32_dump_stream_irq(dev, id); dma_stm32_clear_stream_irq(dev, id); stream->dma_callback(dev, stream->user_data, callback_arg, -EIO); } } static int dma_stm32_get_priority(uint8_t priority, uint32_t *ll_priority) { if (priority > ARRAY_SIZE(table_priority)) { LOG_ERR("Priority error. %d", priority); return -EINVAL; } *ll_priority = table_priority[priority]; return 0; } static int dma_stm32_get_direction(enum dma_channel_direction direction, uint32_t *ll_direction) { switch (direction) { case MEMORY_TO_MEMORY: *ll_direction = LL_DMA_DIRECTION_MEMORY_TO_MEMORY; break; case MEMORY_TO_PERIPHERAL: *ll_direction = LL_DMA_DIRECTION_MEMORY_TO_PERIPH; break; case PERIPHERAL_TO_MEMORY: *ll_direction = LL_DMA_DIRECTION_PERIPH_TO_MEMORY; break; default: LOG_ERR("Direction error. %d", direction); return -EINVAL; } return 0; } static int dma_stm32_disable_stream(DMA_TypeDef *dma, uint32_t id) { int count = 0; for (;;) { if (stm32_dma_disable_stream(dma, id) == 0) { return 0; } /* After trying for 5 seconds, give up */ if (count++ > (5 * 1000)) { return -EBUSY; } k_sleep(K_MSEC(1)); } return 0; } static int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config *config) { const struct dma_stm32_config *dev_config = dev->config; struct dma_stm32_stream *stream = &dev_config->streams[id - STM32_DMA_STREAM_OFFSET]; DMA_TypeDef *dma = (DMA_TypeDef *)dev_config->base; LL_DMA_InitTypeDef DMA_InitStruct; int ret; LL_DMA_StructInit(&DMA_InitStruct); /* Give channel from index 0 */ id = id - STM32_DMA_STREAM_OFFSET; if (id >= dev_config->max_streams) { LOG_ERR("cannot configure the dma stream %d.", id); return -EINVAL; } if (stream->busy) { LOG_ERR("dma stream %d is busy.", id); return -EBUSY; } if (dma_stm32_disable_stream(dma, id) != 0) { LOG_ERR("could not disable dma stream %d.", id); return -EBUSY; } dma_stm32_clear_stream_irq(dev, id); /* Check potential DMA override (if id parameters and stream are valid) */ if (config->linked_channel == STM32_DMA_HAL_OVERRIDE) { /* DMA channel is overridden by HAL DMA * Retain that the channel is busy and proceed to the minimal * configuration to properly route the IRQ */ stream->busy = true; stream->hal_override = true; stream->dma_callback = config->dma_callback; stream->user_data = config->user_data; return 0; } if (config->head_block->block_size > DMA_STM32_MAX_DATA_ITEMS) { LOG_ERR("Data size too big: %d\n", config->head_block->block_size); return -EINVAL; } /* Support only the same data width for source and dest */ if (config->dest_data_size != config->source_data_size) { LOG_ERR("source and dest data size differ."); return -EINVAL; } if (config->source_data_size != 4U && config->source_data_size != 2U && config->source_data_size != 1U) { LOG_ERR("source and dest unit size error, %d", config->source_data_size); return -EINVAL; } /* * STM32's circular mode will auto reset both source address * counter and destination address counter. */ if (config->head_block->source_reload_en != config->head_block->dest_reload_en) { LOG_ERR("source_reload_en and dest_reload_en must " "be the same."); return -EINVAL; } stream->busy = true; stream->dma_callback = config->dma_callback; stream->direction = config->channel_direction; stream->user_data = config->user_data; stream->src_size = config->source_data_size; stream->dst_size = config->dest_data_size; /* Check dest or source memory address, warn if 0 */ if (config->head_block->source_address == 0) { LOG_WRN("source_buffer address is null."); } if (config->head_block->dest_address == 0) { LOG_WRN("dest_buffer address is null."); } DMA_InitStruct.SrcAddress = config->head_block->source_address; DMA_InitStruct.DestAddress = config->head_block->dest_address; DMA_InitStruct.BlkHWRequest = LL_DMA_HWREQUEST_SINGLEBURST; DMA_InitStruct.DataAlignment = LL_DMA_DATA_ALIGN_ZEROPADD; ret = dma_stm32_get_priority(config->channel_priority, &DMA_InitStruct.Priority); if (ret < 0) { return ret; } ret = dma_stm32_get_direction(config->channel_direction, &DMA_InitStruct.Direction); if (ret < 0) { return ret; } /* This part is for source */ switch (config->head_block->source_addr_adj) { case DMA_ADDR_ADJ_INCREMENT: DMA_InitStruct.SrcIncMode = LL_DMA_SRC_INCREMENT; break; case DMA_ADDR_ADJ_NO_CHANGE: DMA_InitStruct.SrcIncMode = LL_DMA_SRC_FIXED; break; case DMA_ADDR_ADJ_DECREMENT: return -ENOTSUP; default: LOG_ERR("Memory increment error. %d", config->head_block->source_addr_adj); return -EINVAL; } LOG_DBG("Channel (%d) src inc (%x).", id, DMA_InitStruct.SrcIncMode); /* This part is for dest */ switch (config->head_block->dest_addr_adj) { case DMA_ADDR_ADJ_INCREMENT: DMA_InitStruct.DestIncMode = LL_DMA_DEST_INCREMENT; break; case DMA_ADDR_ADJ_NO_CHANGE: DMA_InitStruct.DestIncMode = LL_DMA_DEST_FIXED; break; case DMA_ADDR_ADJ_DECREMENT: return -ENOTSUP; default: LOG_ERR("Periph increment error. %d", config->head_block->dest_addr_adj); return -EINVAL; } LOG_DBG("Channel (%d) dest inc (%x).", id, DMA_InitStruct.DestIncMode); stream->source_periph = (stream->direction == PERIPHERAL_TO_MEMORY); /* Set the data width, when source_data_size equals dest_data_size */ int index = find_lsb_set(config->source_data_size) - 1; DMA_InitStruct.SrcDataWidth = table_src_size[index]; index = find_lsb_set(config->dest_data_size) - 1; DMA_InitStruct.DestDataWidth = table_dst_size[index]; DMA_InitStruct.BlkDataLength = config->head_block->block_size; /* The request ID is stored in the dma_slot */ DMA_InitStruct.Request = config->dma_slot; LL_DMA_Init(dma, dma_stm32_id_to_stream(id), &DMA_InitStruct); LL_DMA_EnableIT_TC(dma, dma_stm32_id_to_stream(id)); LL_DMA_EnableIT_USE(dma, dma_stm32_id_to_stream(id)); LL_DMA_EnableIT_ULE(dma, dma_stm32_id_to_stream(id)); LL_DMA_EnableIT_DTE(dma, dma_stm32_id_to_stream(id)); /* Enable Half-Transfer irq if circular mode is enabled */ if (config->head_block->source_reload_en) { LL_DMA_EnableIT_HT(dma, dma_stm32_id_to_stream(id)); } return ret; } static int dma_stm32_reload(const struct device *dev, uint32_t id, uint32_t src, uint32_t dst, size_t size) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); struct dma_stm32_stream *stream; /* Give channel from index 0 */ id = id - STM32_DMA_STREAM_OFFSET; if (id >= config->max_streams) { return -EINVAL; } stream = &config->streams[id]; if (dma_stm32_disable_stream(dma, id) != 0) { return -EBUSY; } if (stream->direction > PERIPHERAL_TO_MEMORY) { return -EINVAL; } LL_DMA_ConfigAddresses(dma, dma_stm32_id_to_stream(id), src, dst); LL_DMA_SetBlkDataLength(dma, dma_stm32_id_to_stream(id), size); /* When reloading the dma, the stream is busy again before enabling */ stream->busy = true; stm32_dma_enable_stream(dma, id); return 0; } static int dma_stm32_start(const struct device *dev, uint32_t id) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); struct dma_stm32_stream *stream; /* Give channel from index 0 */ id = id - STM32_DMA_STREAM_OFFSET; /* Only M2P or M2M mode can be started manually. */ if (id >= config->max_streams) { return -EINVAL; } /* Repeated start : return now if channel is already started */ if (stm32_dma_is_enabled_stream(dma, id)) { return 0; } /* When starting the dma, the stream is busy before enabling */ stream = &config->streams[id]; stream->busy = true; dma_stm32_clear_stream_irq(dev, id); stm32_dma_enable_stream(dma, id); return 0; } static int dma_stm32_suspend(const struct device *dev, uint32_t id) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); /* Give channel from index 0 */ id = id - STM32_DMA_STREAM_OFFSET; if (id >= config->max_streams) { return -EINVAL; } /* Suspend the channel and wait for suspend Flag set */ LL_DMA_SuspendChannel(dma, dma_stm32_id_to_stream(id)); /* It's not enough to wait for the SUSPF bit with LL_DMA_IsActiveFlag_SUSP */ do { k_msleep(1); /* A delay is needed (1ms is valid) */ } while (LL_DMA_IsActiveFlag_SUSP(dma, dma_stm32_id_to_stream(id)) != 1); /* Do not Reset the channel to allow resuming later */ return 0; } static int dma_stm32_resume(const struct device *dev, uint32_t id) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); /* Give channel from index 0 */ id = id - STM32_DMA_STREAM_OFFSET; if (id >= config->max_streams) { return -EINVAL; } /* Resume the channel : it's enough after suspend */ LL_DMA_ResumeChannel(dma, dma_stm32_id_to_stream(id)); return 0; } static int dma_stm32_stop(const struct device *dev, uint32_t id) { const struct dma_stm32_config *config = dev->config; struct dma_stm32_stream *stream = &config->streams[id - STM32_DMA_STREAM_OFFSET]; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); /* Give channel from index 0 */ id = id - STM32_DMA_STREAM_OFFSET; if (id >= config->max_streams) { return -EINVAL; } /* Repeated stop : return now if channel is already stopped */ if (!stm32_dma_is_enabled_stream(dma, id)) { return 0; } LL_DMA_DisableIT_TC(dma, dma_stm32_id_to_stream(id)); LL_DMA_DisableIT_USE(dma, dma_stm32_id_to_stream(id)); LL_DMA_DisableIT_ULE(dma, dma_stm32_id_to_stream(id)); LL_DMA_DisableIT_DTE(dma, dma_stm32_id_to_stream(id)); dma_stm32_clear_stream_irq(dev, id); dma_stm32_disable_stream(dma, id); /* Finally, flag stream as free */ stream->busy = false; return 0; } static int dma_stm32_init(const struct device *dev) { const struct dma_stm32_config *config = dev->config; const struct device *clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (clock_control_on(clk, (clock_control_subsys_t) &config->pclken) != 0) { LOG_ERR("clock op failed\n"); return -EIO; } config->config_irq(dev); for (uint32_t i = 0; i < config->max_streams; i++) { config->streams[i].busy = false; } ((struct dma_stm32_data *)dev->data)->dma_ctx.magic = 0; ((struct dma_stm32_data *)dev->data)->dma_ctx.dma_channels = 0; ((struct dma_stm32_data *)dev->data)->dma_ctx.atomic = 0; return 0; } static int dma_stm32_get_status(const struct device *dev, uint32_t id, struct dma_status *stat) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); struct dma_stm32_stream *stream; /* Give channel from index 0 */ id = id - STM32_DMA_STREAM_OFFSET; if (id >= config->max_streams) { return -EINVAL; } stream = &config->streams[id]; stat->pending_length = LL_DMA_GetBlkDataLength(dma, dma_stm32_id_to_stream(id)); stat->dir = stream->direction; stat->busy = stream->busy; return 0; } static const struct dma_driver_api dma_funcs = { .reload = dma_stm32_reload, .config = dma_stm32_configure, .start = dma_stm32_start, .stop = dma_stm32_stop, .get_status = dma_stm32_get_status, .suspend = dma_stm32_suspend, .resume = dma_stm32_resume, }; /* * Macro to CONNECT and enable each irq (order is given by the 'listify') * chan: channel of the DMA instance (assuming one irq per channel) * stm32U5x has 16 channels * dma : dma instance (one GPDMA instance on stm32U5x) */ #define DMA_STM32_IRQ_CONNECT_CHANNEL(chan, dma) \ do { \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(dma, chan, irq), \ DT_INST_IRQ_BY_IDX(dma, chan, priority), \ dma_stm32_irq_##dma##_##chan, \ DEVICE_DT_INST_GET(dma), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(dma, chan, irq)); \ } while (0) /* * Macro to configure the irq for each dma instance (index) * Loop to CONNECT and enable each irq for each channel * Expecting as many irq as property <dma_channels> */ #define DMA_STM32_IRQ_CONNECT(index) \ static void dma_stm32_config_irq_##index(const struct device *dev) \ { \ ARG_UNUSED(dev); \ \ LISTIFY(DT_INST_PROP(index, dma_channels), \ DMA_STM32_IRQ_CONNECT_CHANNEL, (;), index); \ } /* * Macro to instanciate the irq handler (order is given by the 'listify') * chan: channel of the DMA instance (assuming one irq per channel) * stm32U5x has 16 channels * dma : dma instance (one GPDMA instance on stm32U5x) */ #define DMA_STM32_DEFINE_IRQ_HANDLER(chan, dma) \ static void dma_stm32_irq_##dma##_##chan(const struct device *dev) \ { \ dma_stm32_irq_handler(dev, chan); \ } #define DMA_STM32_INIT_DEV(index) \ BUILD_ASSERT(DT_INST_PROP(index, dma_channels) \ == DT_NUM_IRQS(DT_DRV_INST(index)), \ "Nb of Channels and IRQ mismatch"); \ \ LISTIFY(DT_INST_PROP(index, dma_channels), \ DMA_STM32_DEFINE_IRQ_HANDLER, (;), index); \ \ DMA_STM32_IRQ_CONNECT(index); \ \ static struct dma_stm32_stream \ dma_stm32_streams_##index[DT_INST_PROP_OR(index, dma_channels, \ DT_NUM_IRQS(DT_DRV_INST(index)))]; \ \ const struct dma_stm32_config dma_stm32_config_##index = { \ .pclken = { .bus = DT_INST_CLOCKS_CELL(index, bus), \ .enr = DT_INST_CLOCKS_CELL(index, bits) }, \ .config_irq = dma_stm32_config_irq_##index, \ .base = DT_INST_REG_ADDR(index), \ .max_streams = DT_INST_PROP_OR(index, dma_channels, \ DT_NUM_IRQS(DT_DRV_INST(index)) \ ), \ .streams = dma_stm32_streams_##index, \ }; \ \ static struct dma_stm32_data dma_stm32_data_##index = { \ }; \ \ DEVICE_DT_INST_DEFINE(index, \ &dma_stm32_init, \ NULL, \ &dma_stm32_data_##index, &dma_stm32_config_##index, \ PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \ &dma_funcs); DT_INST_FOREACH_STATUS_OKAY(DMA_STM32_INIT_DEV) ```
/content/code_sandbox/drivers/dma/dma_stm32u5.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,224
```unknown # DMA configuration options config DMA_STM32 bool "STM32 DMA driver" select USE_STM32_LL_DMA default y depends on DT_HAS_ST_STM32_DMA_V1_ENABLED \ || DT_HAS_ST_STM32_DMA_V2_ENABLED \ || DT_HAS_ST_STM32_DMA_V2BIS_ENABLED \ || DT_HAS_ST_STM32_BDMA_ENABLED help Driver for STM32 DMA V1, V2, V2bis and BDMA types. config DMA_STM32U5 bool "STM32U5 serie DMA driver" select USE_STM32_LL_DMA default y depends on DT_HAS_ST_STM32U5_DMA_ENABLED help Enable DMA support mainly for stm32U5 family. It differs from the DMA driver due to the GPDMA peripheral. if DMA_STM32 config DMA_STM32_V1 bool default y depends on DT_HAS_ST_STM32_DMA_V1_ENABLED help Enable DMA V1 support. config DMA_STM32_V2 bool default y depends on DT_HAS_ST_STM32_DMA_V2_ENABLED || DT_HAS_ST_STM32_DMA_V2BIS_ENABLED help Enable DMA V2 or DMA V2bis support. With the versions V2 of DMA, the peripheral request must be specified in the dma slot of the dma cell With the versions V2 bis of DMA, the peripheral request (slot) is not a parameter of the dma-cell. config DMAMUX_STM32 bool default y depends on DT_HAS_ST_STM32_DMAMUX_ENABLED help Enable DMAMUX support. config DMA_STM32_SHARED_IRQS bool default y depends on SOC_SERIES_STM32C0X || SOC_SERIES_STM32F0X || \ SOC_SERIES_STM32G0X || SOC_SERIES_STM32L0X help Enable shared IRQ support on devices where channels share 1 IRQ. config DMA_STM32_BDMA bool "STM32 BDMA driver" default y select USE_STM32_LL_BDMA depends on DT_HAS_ST_STM32_BDMA_ENABLED help BDMA driver for STM32H7 series SoCs. config DMAMUX_STM32_INIT_PRIORITY int "STM32 DMAMUX init priority" depends on DT_HAS_ST_STM32_DMAMUX_ENABLED default 41 help DMAMUX driver device must be init'd after the DMA (CONFIG_DMA_INIT_PRIORITY) DMAMUX driver device initialization priority is greater than DMA one's endif # DMA_STM32 ```
/content/code_sandbox/drivers/dma/Kconfig.stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
577
```unknown config DMA_IPROC_PAX bool prompt "Broadcom PAX(PCIE<->AXI) DMA driver" default y depends on DT_HAS_BRCM_IPROC_PAX_DMA_V1_ENABLED depends on PCIE_EP_IPROC config DMA_IPROC_PAX_V2 bool prompt "Broadcom PAX(PCIE<->AXI) DMA driver version 2" default y depends on DT_HAS_BRCM_IPROC_PAX_DMA_V2_ENABLED depends on PCIE_EP_IPROC_V2 config DMA_IPROC_PAX_V2_INIT_PRIORITY int "Broadcom PAX v2 initialization priority" default 51 depends on DMA_IPROC_PAX_V2 help Broadcom PAX v2 initialization priority. if DMA_IPROC_PAX || DMA_IPROC_PAX_V2 config DMA_IPROC_PAX_DEBUG bool "PAX DMA paranoid debug checks" help Add paranoid checks for buffer address/size alignments for each dma packet. choice prompt "PAX DMA API modes" default DMA_IPROC_PAX_IRQ_SYNC config DMA_IPROC_PAX_POLL_MODE bool "PAX DMA API in polling mode" help PAX DMA API polls for dma completions config DMA_IPROC_PAX_IRQ_SYNC bool "PAX DMA synchronous API with interrupt support" help PAX DMA API blocks until dma completion alert is signalled from ring interrupt handler. endchoice choice prompt "PAX DMA Ring operation mode" default DMA_IPROC_PAX_TOGGLE_MODE config DMA_IPROC_PAX_TOGGLE_MODE bool "PAX DMA Ring toggle mode" help PAX DMA hardware ring operation in toggle mode config DMA_IPROC_PAX_DOORBELL_MODE bool "PAX DMA Ring door bell mode" help PAX DMA hardware ring operation in doorbell mode endchoice endif ```
/content/code_sandbox/drivers/dma/Kconfig.iproc_pax
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
407
```c /* * */ #define DT_DRV_COMPAT intel_lpss #include <errno.h> #include <stdio.h> #include <string.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/dma/dma_intel_lpss.h> #include "dma_dw_common.h" #include <soc.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(dma_intel_lpss, CONFIG_DMA_LOG_LEVEL); struct dma_intel_lpss_cfg { struct dw_dma_dev_cfg dw_cfg; }; int dma_intel_lpss_setup(const struct device *dev) { struct dma_intel_lpss_cfg *dev_cfg = (struct dma_intel_lpss_cfg *)dev->config; if (dev_cfg->dw_cfg.base != 0) { return dw_dma_setup(dev); } return 0; } void dma_intel_lpss_set_base(const struct device *dev, uintptr_t base) { struct dma_intel_lpss_cfg *dev_cfg = (struct dma_intel_lpss_cfg *)dev->config; dev_cfg->dw_cfg.base = base; } #ifdef CONFIG_DMA_64BIT int dma_intel_lpss_reload(const struct device *dev, uint32_t channel, uint64_t src, uint64_t dst, size_t size) #else int dma_intel_lpss_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) #endif { struct dw_dma_dev_data *const dev_data = dev->data; struct dma_intel_lpss_cfg *lpss_dev_cfg = (struct dma_intel_lpss_cfg *)dev->config; struct dw_dma_dev_cfg *const dev_cfg = &lpss_dev_cfg->dw_cfg; struct dw_dma_chan_data *chan_data; uint32_t ctrl_hi = 0; if (channel >= DW_CHAN_COUNT) { return -EINVAL; } chan_data = &dev_data->chan[channel]; chan_data->lli_current->sar = src; chan_data->lli_current->dar = dst; chan_data->ptr_data.current_ptr = dst; chan_data->ptr_data.buffer_bytes = size; ctrl_hi = dw_read(dev_cfg->base, DW_CTRL_HIGH(channel)); ctrl_hi &= ~(DW_CTLH_DONE(1) | DW_CTLH_BLOCK_TS_MASK); ctrl_hi |= size & DW_CTLH_BLOCK_TS_MASK; chan_data->lli_current->ctrl_hi = ctrl_hi; chan_data->ptr_data.start_ptr = DW_DMA_LLI_ADDRESS(chan_data->lli_current, chan_data->direction); chan_data->ptr_data.end_ptr = chan_data->ptr_data.start_ptr + chan_data->ptr_data.buffer_bytes; chan_data->ptr_data.hw_ptr = chan_data->ptr_data.start_ptr; chan_data->state = DW_DMA_PREPARED; return 0; } int dma_intel_lpss_get_status(const struct device *dev, uint32_t channel, struct dma_status *stat) { struct dma_intel_lpss_cfg *lpss_dev_cfg = (struct dma_intel_lpss_cfg *)dev->config; struct dw_dma_dev_cfg *const dev_cfg = &lpss_dev_cfg->dw_cfg; struct dw_dma_dev_data *const dev_data = dev->data; struct dw_dma_chan_data *chan_data; uint32_t ctrl_hi; size_t current_length; bool done; if (channel >= DW_CHAN_COUNT) { return -EINVAL; } chan_data = &dev_data->chan[channel]; ctrl_hi = dw_read(dev_cfg->base, DW_CTRL_HIGH(channel)); current_length = ctrl_hi & DW_CTLH_BLOCK_TS_MASK; done = ctrl_hi & DW_CTLH_DONE(1); if (!(dw_read(dev_cfg->base, DW_DMA_CHAN_EN) & DW_CHAN(channel))) { stat->busy = false; stat->pending_length = chan_data->ptr_data.buffer_bytes; return 0; } stat->busy = true; if (done) { stat->pending_length = 0; } else if (current_length == chan_data->ptr_data.buffer_bytes) { stat->pending_length = chan_data->ptr_data.buffer_bytes; } else { stat->pending_length = chan_data->ptr_data.buffer_bytes - current_length; } return 0; } void dma_intel_lpss_isr(const struct device *dev) { dw_dma_isr(dev); } static const struct dma_driver_api dma_intel_lpss_driver_api = { .config = dw_dma_config, .start = dw_dma_start, .reload = dma_intel_lpss_reload, .get_status = dma_intel_lpss_get_status, .stop = dw_dma_stop, }; #define DMA_INTEL_LPSS_INIT(n) \ \ static struct dw_drv_plat_data dma_intel_lpss##n = { \ .chan[0] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[1] = { \ .class = 6, \ .weight = 0, \ }, \ }; \ \ \ static struct dma_intel_lpss_cfg dma_intel_lpss##n##_config = { \ .dw_cfg = { \ .base = 0, \ }, \ }; \ \ static struct dw_dma_dev_data dma_intel_lpss##n##_data = { \ .channel_data = &dma_intel_lpss##n, \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ NULL, \ NULL, \ &dma_intel_lpss##n##_data, \ &dma_intel_lpss##n##_config, PRE_KERNEL_1, \ CONFIG_DMA_INIT_PRIORITY, \ &dma_intel_lpss_driver_api); \ DT_INST_FOREACH_STATUS_OKAY(DMA_INTEL_LPSS_INIT) ```
/content/code_sandbox/drivers/dma/dma_intel_lpss.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,327
```unknown config DMA_MAX32 bool "MAX32 MCU DMA driver" default y depends on DT_HAS_ADI_MAX32_DMA_ENABLED help Enable DMA support on the MAX32 family of processors. ```
/content/code_sandbox/drivers/dma/Kconfig.max32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
44
```unknown # DMA configuration options config DMA_INTEL_ADSP_GPDMA bool "Intel ADSP General Purpose Direct Memory Access driver" default y depends on DT_HAS_INTEL_ADSP_GPDMA_ENABLED help Intel ADSP DMA driver. if DMA_INTEL_ADSP_GPDMA config DMA_INTEL_ADSP_GPDMA_NEED_CONTROLLER_OWNERSHIP bool default y help Hidden option to indicate that the driver needs to request dma controller ownership from the host. config DMA_INTEL_ADSP_GPDMA_HAS_LLP bool "Intel ADSP GPDMA Linear Link Position Feature" default y if SOC_SERIES_INTEL_ADSP_ACE help Intel ADSP GPDMA may optionally have a linear link position feature. config DMA_INTEL_ADSP_GPDMA_DEBUG bool "Debug dump for IP registers" help Dump Intel ADSP GPDMA registers for debug source "drivers/dma/Kconfig.dw_common" endif # DMA_INTEL_ADSP_GPDMA ```
/content/code_sandbox/drivers/dma/Kconfig.intel_adsp_gpdma
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
220
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_DMA_INTEL_ADSP_HDA_COMMON_H_ #define ZEPHYR_DRIVERS_DMA_INTEL_ADSP_HDA_COMMON_H_ #define INTEL_ADSP_HDA_MAX_CHANNELS DT_PROP(DT_NODELABEL(hda_host_out), dma_channels) /* Minimum recommended FPI increment */ #define INTEL_HDA_MIN_FPI_INCREMENT_FOR_INTERRUPT 32 #include <zephyr/drivers/dma.h> #include <zephyr/pm/device.h> #include <zephyr/pm/device_runtime.h> struct intel_adsp_hda_dma_data { struct dma_context ctx; ATOMIC_DEFINE(channels_atomic, INTEL_ADSP_HDA_MAX_CHANNELS); }; struct intel_adsp_hda_dma_cfg { uint32_t base; uint32_t regblock_size; uint32_t dma_channels; enum dma_channel_direction direction; void (*irq_config)(void); }; int intel_adsp_hda_dma_host_in_config(const struct device *dev, uint32_t channel, struct dma_config *dma_cfg); int intel_adsp_hda_dma_host_out_config(const struct device *dev, uint32_t channel, struct dma_config *dma_cfg); int intel_adsp_hda_dma_link_in_config(const struct device *dev, uint32_t channel, struct dma_config *dma_cfg); int intel_adsp_hda_dma_link_out_config(const struct device *dev, uint32_t channel, struct dma_config *dma_cfg); int intel_adsp_hda_dma_link_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size); int intel_adsp_hda_dma_host_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size); int intel_adsp_hda_dma_status(const struct device *dev, uint32_t channel, struct dma_status *stat); bool intel_adsp_hda_dma_chan_filter(const struct device *dev, int channel, void *filter_param); int intel_adsp_hda_dma_start(const struct device *dev, uint32_t channel); int intel_adsp_hda_dma_stop(const struct device *dev, uint32_t channel); int intel_adsp_hda_dma_init(const struct device *dev); int intel_adsp_hda_dma_get_attribute(const struct device *dev, uint32_t type, uint32_t *value); void intel_adsp_hda_dma_isr(void); #ifdef CONFIG_PM_DEVICE int intel_adsp_hda_dma_pm_action(const struct device *dev, enum pm_device_action action); #endif #endif /* ZEPHYR_DRIVERS_DMA_INTEL_ADSP_HDA_COMMON_H_ */ ```
/content/code_sandbox/drivers/dma/dma_intel_adsp_hda.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
558
```c /* * */ #define DT_DRV_COMPAT intel_adsp_hda_link_in #include <zephyr/drivers/dma.h> #include "dma_intel_adsp_hda.h" #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dma_intel_adsp_hda_dma_link_in); static const struct dma_driver_api intel_adsp_hda_dma_link_in_api = { .config = intel_adsp_hda_dma_link_in_config, .reload = intel_adsp_hda_dma_link_reload, .start = intel_adsp_hda_dma_start, .stop = intel_adsp_hda_dma_stop, .suspend = intel_adsp_hda_dma_stop, .get_status = intel_adsp_hda_dma_status, .get_attribute = intel_adsp_hda_dma_get_attribute, .chan_filter = intel_adsp_hda_dma_chan_filter, }; #define INTEL_ADSP_HDA_DMA_LINK_IN_INIT(inst) \ static const struct intel_adsp_hda_dma_cfg intel_adsp_hda_dma##inst##_config = { \ .base = DT_INST_REG_ADDR(inst), \ .regblock_size = DT_INST_REG_SIZE(inst), \ .dma_channels = DT_INST_PROP(inst, dma_channels), \ .direction = PERIPHERAL_TO_MEMORY, \ .irq_config = NULL \ }; \ \ static struct intel_adsp_hda_dma_data intel_adsp_hda_dma##inst##_data = {}; \ \ PM_DEVICE_DT_INST_DEFINE(inst, intel_adsp_hda_dma_pm_action); \ \ DEVICE_DT_INST_DEFINE(inst, &intel_adsp_hda_dma_init, \ PM_DEVICE_DT_INST_GET(inst), \ &intel_adsp_hda_dma##inst##_data, \ &intel_adsp_hda_dma##inst##_config, POST_KERNEL, \ CONFIG_DMA_INIT_PRIORITY, \ &intel_adsp_hda_dma_link_in_api); DT_INST_FOREACH_STATUS_OKAY(INTEL_ADSP_HDA_DMA_LINK_IN_INIT) ```
/content/code_sandbox/drivers/dma/dma_intel_adsp_hda_link_in.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
442
```c /* * */ /** * @brief Common part of DMAMUX drivers for stm32. * @note api functions named dmamux_stm32_ * are calling the dma_stm32 corresponding function * implemented in dma_stm32.c */ #include <soc.h> #include <stm32_ll_dmamux.h> #include <zephyr/init.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include "dma_stm32.h" #ifdef CONFIG_DMA_STM32_BDMA #include "dma_stm32_bdma.h" #endif #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dmamux_stm32, CONFIG_DMA_LOG_LEVEL); #define DT_DRV_COMPAT st_stm32_dmamux /* this is the configuration of one dmamux channel */ struct dmamux_stm32_channel { /* pointer to the associated dma instance */ const struct device *dev_dma; /* ref of the associated dma stream for this instance */ uint8_t dma_id; }; /* the table of all the dmamux channel */ struct dmamux_stm32_data { void *callback_arg; void (*dmamux_callback)(void *arg, uint32_t id, int error_code); }; /* this is the configuration of the dmamux IP */ struct dmamux_stm32_config { #if DT_INST_NODE_HAS_PROP(0, clocks) struct stm32_pclken pclken; #endif uint32_t base; uint8_t channel_nb; /* total nb of channels */ uint8_t gen_nb; /* total nb of Request generator */ uint8_t req_nb; /* total nb of Peripheral Request inputs */ const struct dmamux_stm32_channel *mux_channels; }; /* * LISTIFY is used to generate arrays with function pointers to check * and clear interrupt flags using LL functions */ #define DMAMUX_CHANNEL(i, _) LL_DMAMUX_CHANNEL_ ## i #define IS_ACTIVE_FLAG_SOX(i, _) LL_DMAMUX_IsActiveFlag_SO ## i #define CLEAR_FLAG_SOX(i, _) LL_DMAMUX_ClearFlag_SO ## i #define IS_ACTIVE_FLAG_RGOX(i, _) LL_DMAMUX_IsActiveFlag_RGO ## i #define CLEAR_FLAG_RGOX(i, _) LL_DMAMUX_ClearFlag_RGO ## i uint32_t table_ll_channel[] = { LISTIFY(DT_INST_PROP(0, dma_channels), DMAMUX_CHANNEL, (,)) }; #if !defined(CONFIG_SOC_SERIES_STM32G0X) #define dmamux_channel_typedef DMAMUX_Channel_TypeDef #else #define dmamux_channel_typedef const DMAMUX_Channel_TypeDef #endif uint32_t (*func_ll_is_active_so[])(dmamux_channel_typedef * DMAMUXx) = { LISTIFY(DT_INST_PROP(0, dma_channels), IS_ACTIVE_FLAG_SOX, (,)) }; void (*func_ll_clear_so[])(dmamux_channel_typedef * DMAMUXx) = { LISTIFY(DT_INST_PROP(0, dma_channels), CLEAR_FLAG_SOX, (,)) }; uint32_t (*func_ll_is_active_rgo[])(dmamux_channel_typedef * DMAMUXx) = { LISTIFY(DT_INST_PROP(0, dma_generators), IS_ACTIVE_FLAG_RGOX, (,)) }; void (*func_ll_clear_rgo[])(dmamux_channel_typedef * DMAMUXx) = { LISTIFY(DT_INST_PROP(0, dma_generators), CLEAR_FLAG_RGOX, (,)) }; typedef int (*dma_configure_fn)(const struct device *dev, uint32_t id, struct dma_config *config); typedef int (*dma_start_fn)(const struct device *dev, uint32_t id); typedef int (*dma_stop_fn)(const struct device *dev, uint32_t id); typedef int (*dma_reload_fn)(const struct device *dev, uint32_t id, uint32_t src, uint32_t dst, size_t size); typedef int (*dma_status_fn)(const struct device *dev, uint32_t id, struct dma_status *stat); struct dmamux_stm32_dma_fops { dma_configure_fn configure; dma_start_fn start; dma_stop_fn stop; dma_reload_fn reload; dma_status_fn get_status; }; #if (defined(CONFIG_DMA_STM32_V1) || defined(CONFIG_DMA_STM32_V2)) && \ DT_NODE_HAS_STATUS(DT_NODELABEL(dmamux1), okay) static const struct dmamux_stm32_dma_fops dmamux1 = { dma_stm32_configure, dma_stm32_start, dma_stm32_stop, dma_stm32_reload, dma_stm32_get_status, }; #endif #if defined(CONFIG_DMA_STM32_BDMA) && DT_NODE_HAS_STATUS(DT_NODELABEL(dmamux2), okay) static const struct dmamux_stm32_dma_fops dmamux2 = { bdma_stm32_configure, bdma_stm32_start, bdma_stm32_stop, bdma_stm32_reload, bdma_stm32_get_status }; #endif /* CONFIG_DMA_STM32_BDMA */ const struct dmamux_stm32_dma_fops *get_dma_fops(const struct dmamux_stm32_config *dev_config) { #if DT_NODE_HAS_STATUS(DT_NODELABEL(dmamux1), okay) if (dev_config->base == DT_REG_ADDR(DT_NODELABEL(dmamux1))) { return &dmamux1; } #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(dmamux1), okay) */ #if DT_NODE_HAS_STATUS(DT_NODELABEL(dmamux2), okay) if (dev_config->base == DT_REG_ADDR(DT_NODELABEL(dmamux2))) { return &dmamux2; } #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(dmamux2), okay) */ __ASSERT(false, "Unknown dma base address %x", dev_config->base); return (void *)0; } int dmamux_stm32_configure(const struct device *dev, uint32_t id, struct dma_config *config) { /* device is the dmamux, id is the dmamux channel from 0 */ const struct dmamux_stm32_config *dev_config = dev->config; const struct dmamux_stm32_dma_fops *dma_device = get_dma_fops(dev_config); /* * request line ID for this mux channel is stored * in the dma_slot parameter */ int request_id = config->dma_slot; if (request_id > dev_config->req_nb + dev_config->gen_nb) { LOG_ERR("request ID %d is not valid.", request_id); return -EINVAL; } /* check if this channel is valid */ if (id >= dev_config->channel_nb) { LOG_ERR("channel ID %d is too big.", id); return -EINVAL; } /* * Also configures the corresponding dma channel * instance is given by the dev_dma * stream is given by the index i * config is directly this dma_config */ /* * This dmamux channel 'id' is now used for this peripheral request * It gives this mux request ID to the dma through the config.dma_slot */ if (dma_device->configure(dev_config->mux_channels[id].dev_dma, dev_config->mux_channels[id].dma_id, config) != 0) { LOG_ERR("cannot configure the dmamux."); return -EINVAL; } /* set the Request Line ID to this dmamux channel i */ DMAMUX_Channel_TypeDef *dmamux = (DMAMUX_Channel_TypeDef *)dev_config->base; LL_DMAMUX_SetRequestID(dmamux, id, request_id); return 0; } int dmamux_stm32_start(const struct device *dev, uint32_t id) { const struct dmamux_stm32_config *dev_config = dev->config; const struct dmamux_stm32_dma_fops *dma_device = get_dma_fops(dev_config); /* check if this channel is valid */ if (id >= dev_config->channel_nb) { LOG_ERR("channel ID %d is too big.", id); return -EINVAL; } if (dma_device->start(dev_config->mux_channels[id].dev_dma, dev_config->mux_channels[id].dma_id) != 0) { LOG_ERR("cannot start the dmamux channel %d.", id); return -EINVAL; } return 0; } int dmamux_stm32_stop(const struct device *dev, uint32_t id) { const struct dmamux_stm32_config *dev_config = dev->config; const struct dmamux_stm32_dma_fops *dma_device = get_dma_fops(dev_config); /* check if this channel is valid */ if (id >= dev_config->channel_nb) { LOG_ERR("channel ID %d is too big.", id); return -EINVAL; } if (dma_device->stop(dev_config->mux_channels[id].dev_dma, dev_config->mux_channels[id].dma_id) != 0) { LOG_ERR("cannot stop the dmamux channel %d.", id); return -EINVAL; } return 0; } int dmamux_stm32_reload(const struct device *dev, uint32_t id, uint32_t src, uint32_t dst, size_t size) { const struct dmamux_stm32_config *dev_config = dev->config; const struct dmamux_stm32_dma_fops *dma_device = get_dma_fops(dev_config); /* check if this channel is valid */ if (id >= dev_config->channel_nb) { LOG_ERR("channel ID %d is too big.", id); return -EINVAL; } if (dma_device->reload(dev_config->mux_channels[id].dev_dma, dev_config->mux_channels[id].dma_id, src, dst, size) != 0) { LOG_ERR("cannot reload the dmamux channel %d.", id); return -EINVAL; } return 0; } int dmamux_stm32_get_status(const struct device *dev, uint32_t id, struct dma_status *stat) { const struct dmamux_stm32_config *dev_config = dev->config; const struct dmamux_stm32_dma_fops *dma_device = get_dma_fops(dev_config); /* check if this channel is valid */ if (id >= dev_config->channel_nb) { LOG_ERR("channel ID %d is too big.", id); return -EINVAL; } if (dma_device->get_status(dev_config->mux_channels[id].dev_dma, dev_config->mux_channels[id].dma_id, stat) != 0) { LOG_ERR("cannot get the status of dmamux channel %d.", id); return -EINVAL; } return 0; } static int dmamux_stm32_init(const struct device *dev) { const struct dmamux_stm32_config *config = dev->config; #if DT_INST_NODE_HAS_PROP(0, clocks) const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(clk)) { LOG_ERR("clock control device not ready"); return -ENODEV; } if (clock_control_on(clk, (clock_control_subsys_t) &config->pclken) != 0) { LOG_ERR("clock op failed\n"); return -EIO; } #endif /* DT_INST_NODE_HAS_PROP(0, clocks) */ #if DT_NODE_HAS_STATUS(DT_NODELABEL(dmamux1), okay) /* DMA 1 and DMA2 for DMAMUX1, BDMA for DMAMUX2 */ if (config->base == DT_REG_ADDR(DT_NODELABEL(dmamux1))) { /* DMAs assigned to DMAMUX channels at build time might not be ready. */ #if DT_NODE_HAS_STATUS(DT_NODELABEL(dma1), okay) if (device_is_ready(DEVICE_DT_GET(DT_NODELABEL(dma1))) == false) { return -ENODEV; } #endif #if DT_NODE_HAS_STATUS(DT_NODELABEL(dma2), okay) if (device_is_ready(DEVICE_DT_GET(DT_NODELABEL(dma2))) == false) { return -ENODEV; } #endif } #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(dmamux1), okay) */ #if DT_NODE_HAS_STATUS(DT_NODELABEL(dmamux2), okay) && DT_NODE_HAS_STATUS(DT_NODELABEL(bdma1), okay) if (config->base == DT_REG_ADDR(DT_NODELABEL(dmamux2))) { if (device_is_ready(DEVICE_DT_GET(DT_NODELABEL(bdma1))) == false) { return -ENODEV; } } #endif return 0; } static const struct dma_driver_api dma_funcs = { .reload = dmamux_stm32_reload, .config = dmamux_stm32_configure, .start = dmamux_stm32_start, .stop = dmamux_stm32_stop, .get_status = dmamux_stm32_get_status, }; /* * Each dmamux channel is hardwired to one dma controllers dma channel. * DMAMUX_CHANNEL_INIT_X macros resolve this mapping at build time for each * dmamux channel using the dma dt properties dma_offset and dma_requests, * such that it can be stored in dmamux_stm32_channels_X configuration. * The Macros to get the corresponding dma device binding and dma channel * for a given dmamux channel, are currently valid for series having * up to 2 dmamuxes and up to 3 dmas. */ #define DMA_1_BEGIN_DMAMUX_CHANNEL DT_PROP_OR(DT_NODELABEL(dma1), dma_offset, 0) #define DMA_1_END_DMAMUX_CHANNEL (DMA_1_BEGIN_DMAMUX_CHANNEL + \ DT_PROP_OR(DT_NODELABEL(dma1), dma_requests, 0)) #define DEV_DMA1 COND_CODE_1(DT_NODE_HAS_STATUS(DT_NODELABEL(dma1), okay), \ DEVICE_DT_GET(DT_NODELABEL(dma1)), NULL) #define DMA_2_BEGIN_DMAMUX_CHANNEL DT_PROP_OR(DT_NODELABEL(dma2), dma_offset, 0) #define DMA_2_END_DMAMUX_CHANNEL (DMA_2_BEGIN_DMAMUX_CHANNEL + \ DT_PROP_OR(DT_NODELABEL(dma2), dma_requests, 0)) #define DEV_DMA2 COND_CODE_1(DT_NODE_HAS_STATUS(DT_NODELABEL(dma2), okay), \ DEVICE_DT_GET(DT_NODELABEL(dma2)), NULL) #define BDMA_1_BEGIN_DMAMUX_CHANNEL DT_PROP_OR(DT_NODELABEL(bdma1), dma_offset, 0) #define BDMA_1_END_DMAMUX_CHANNEL (BDMA_1_BEGIN_DMAMUX_CHANNEL + \ DT_PROP_OR(DT_NODELABEL(bdma1), dma_requests, 0)) #define DEV_BDMA COND_CODE_1(DT_NODE_HAS_STATUS(DT_NODELABEL(bdma1), okay), \ DEVICE_DT_GET(DT_NODELABEL(bdma1)), NULL) #define DEV_DMA_BINDING(mux_channel) \ ((mux_channel < DMA_1_END_DMAMUX_CHANNEL) ? DEV_DMA1 : DEV_DMA2) #define DEV_BDMA_BINDING(mux_channel) \ (DEV_BDMA) #define DMA_CHANNEL(mux_channel) \ ((mux_channel < DMA_1_END_DMAMUX_CHANNEL) ? \ (mux_channel + 1) : (mux_channel - DMA_2_BEGIN_DMAMUX_CHANNEL + 1)) #define BDMA_CHANNEL(mux_channel) \ ((mux_channel < BDMA_1_END_DMAMUX_CHANNEL) ? \ (mux_channel) : 0 /* not supported */) /* * H7 series implements DMAMUX1 and DMAMUX2 * DMAMUX1 is used by DMA1 and DMA2 * DMAMUX2 is used by BDMA * * Note: Instance Number (or index) has no guarantee to which dmamux it refers */ #define INIT_DMAMUX1_CHANNEL(x, ...) \ { .dev_dma = DEV_DMA_BINDING(x), .dma_id = DMA_CHANNEL(x), } #define INIT_DMAMUX2_CHANNEL(x, ...) \ { .dev_dma = DEV_BDMA_BINDING(x), .dma_id = BDMA_CHANNEL(x), } #if DT_SAME_NODE(DT_DRV_INST(0), DT_NODELABEL(dmamux1)) #define INIT_INST0_CHANNEL(x, ...) INIT_DMAMUX1_CHANNEL(x, ...) #define INIT_INST1_CHANNEL(x, ...) INIT_DMAMUX2_CHANNEL(x, ...) #else #define INIT_INST0_CHANNEL(x, ...) INIT_DMAMUX2_CHANNEL(x, ...) #define INIT_INST1_CHANNEL(x, ...) INIT_DMAMUX1_CHANNEL(x, ...) #endif #define DMAMUX_CHANNELS_INIT(index, count) \ LISTIFY(count, INIT_INST##index##_CHANNEL, (,)) #define DMAMUX_CLOCK_INIT(index) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(index, clocks), \ (.pclken = { .bus = DT_INST_CLOCKS_CELL(index, bus), \ .enr = DT_INST_CLOCKS_CELL(index, bits)},), \ ()) #define DMAMUX_INIT(index) \ static const struct dmamux_stm32_channel \ dmamux_stm32_channels_##index[DT_INST_PROP(index, dma_channels)] = { \ DMAMUX_CHANNELS_INIT(index, DT_INST_PROP(index, dma_channels))\ }; \ \ const struct dmamux_stm32_config dmamux_stm32_config_##index = { \ DMAMUX_CLOCK_INIT(index) \ .base = DT_INST_REG_ADDR(index), \ .channel_nb = DT_INST_PROP(index, dma_channels), \ .gen_nb = DT_INST_PROP(index, dma_generators), \ .req_nb = DT_INST_PROP(index, dma_requests), \ .mux_channels = dmamux_stm32_channels_##index, \ }; \ \ static struct dmamux_stm32_data dmamux_stm32_data_##index; \ \ DEVICE_DT_INST_DEFINE(index, \ &dmamux_stm32_init, \ NULL, \ &dmamux_stm32_data_##index, &dmamux_stm32_config_##index,\ PRE_KERNEL_1, CONFIG_DMAMUX_STM32_INIT_PRIORITY, \ &dma_funcs); DT_INST_FOREACH_STATUS_OKAY(DMAMUX_INIT) /* * Make sure that this driver is initialized after the DMA driver (higher priority) */ BUILD_ASSERT(CONFIG_DMAMUX_STM32_INIT_PRIORITY >= CONFIG_DMA_INIT_PRIORITY, "CONFIG_DMAMUX_STM32_INIT_PRIORITY must be higher than CONFIG_DMA_INIT_PRIORITY"); ```
/content/code_sandbox/drivers/dma/dmamux_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,112
```unknown config DMA_MCUX_SMARTDMA bool "MCUX SmartDMA Driver" default y depends on DT_HAS_NXP_SMARTDMA_ENABLED help MCUX SmartDMA driver. ```
/content/code_sandbox/drivers/dma/Kconfig.mcux_smartdma
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
42
```unknown config DMA_ESP32 bool "ESP32 General Purpose DMA driver" depends on DT_HAS_ESPRESSIF_ESP32_GDMA_ENABLED default y help General Purpose DMA for ESP32 series. config DMA_ESP32_MAX_DESCRIPTOR_NUM int "Maximal number of available DMA descriptors" default 16 help Reserves memory for a maximal number of descriptors ```
/content/code_sandbox/drivers/dma/Kconfig.esp32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
82
```c /* * */ #define DT_DRV_COMPAT espressif_esp32_gdma #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dma_esp32_gdma, CONFIG_DMA_LOG_LEVEL); #include <hal/gdma_hal.h> #include <hal/gdma_ll.h> #include <soc/gdma_channel.h> #include <hal/dma_types.h> #include <soc.h> #include <esp_memory_utils.h> #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/dma/dma_esp32.h> #include <zephyr/drivers/clock_control.h> #if defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32C6) #include <zephyr/drivers/interrupt_controller/intc_esp32c3.h> #else #include <zephyr/drivers/interrupt_controller/intc_esp32.h> #endif #if defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32C6) #define ISR_HANDLER isr_handler_t #else #define ISR_HANDLER intr_handler_t #endif #if defined(CONFIG_SOC_SERIES_ESP32C6) #define DMA_MAX_CHANNEL SOC_GDMA_PAIRS_PER_GROUP_MAX #else #define DMA_MAX_CHANNEL SOC_GDMA_PAIRS_PER_GROUP #endif #define ESP_DMA_M2M_ON 0 #define ESP_DMA_M2M_OFF 1 struct dma_esp32_data { gdma_hal_context_t hal; }; enum dma_channel_dir { DMA_RX, DMA_TX, DMA_UNCONFIGURED }; struct dma_esp32_channel { uint8_t dir; uint8_t channel_id; int host_id; int periph_id; dma_callback_t cb; void *user_data; dma_descriptor_t desc_list[CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM]; }; struct dma_esp32_config { int *irq_src; uint8_t irq_size; void **irq_handlers; uint8_t dma_channel_max; uint8_t sram_alignment; struct dma_esp32_channel dma_channel[DMA_MAX_CHANNEL * 2]; void (*config_irq)(const struct device *dev); struct device *src_dev; const struct device *clock_dev; clock_control_subsys_t clock_subsys; }; static void IRAM_ATTR dma_esp32_isr_handle_rx(const struct device *dev, struct dma_esp32_channel *rx, uint32_t intr_status) { struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; uint32_t status; gdma_ll_rx_clear_interrupt_status(data->hal.dev, rx->channel_id, intr_status); if (intr_status == (GDMA_LL_EVENT_RX_SUC_EOF | GDMA_LL_EVENT_RX_DONE)) { status = DMA_STATUS_COMPLETE; } else if (intr_status == GDMA_LL_EVENT_RX_DONE) { status = DMA_STATUS_BLOCK; } else { status = -intr_status; } if (rx->cb) { rx->cb(dev, rx->user_data, rx->channel_id * 2, status); } } static void IRAM_ATTR dma_esp32_isr_handle_tx(const struct device *dev, struct dma_esp32_channel *tx, uint32_t intr_status) { struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; gdma_ll_tx_clear_interrupt_status(data->hal.dev, tx->channel_id, intr_status); intr_status &= ~(GDMA_LL_EVENT_TX_TOTAL_EOF | GDMA_LL_EVENT_TX_DONE | GDMA_LL_EVENT_TX_EOF); if (tx->cb) { tx->cb(dev, tx->user_data, tx->channel_id * 2 + 1, -intr_status); } } #if !defined(CONFIG_SOC_SERIES_ESP32C6) && !defined(CONFIG_SOC_SERIES_ESP32S3) static void IRAM_ATTR dma_esp32_isr_handle(const struct device *dev, uint8_t rx_id, uint8_t tx_id) { struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; struct dma_esp32_channel *dma_channel_rx = &config->dma_channel[rx_id]; struct dma_esp32_channel *dma_channel_tx = &config->dma_channel[tx_id]; uint32_t intr_status = 0; intr_status = gdma_ll_rx_get_interrupt_status(data->hal.dev, dma_channel_rx->channel_id); if (intr_status) { dma_esp32_isr_handle_rx(dev, dma_channel_rx, intr_status); } intr_status = gdma_ll_tx_get_interrupt_status(data->hal.dev, dma_channel_tx->channel_id); if (intr_status) { dma_esp32_isr_handle_tx(dev, dma_channel_tx, intr_status); } } #endif static int dma_esp32_config_rx_descriptor(struct dma_esp32_channel *dma_channel, struct dma_block_config *block) { if (!block) { LOG_ERR("At least one dma block is required"); return -EINVAL; } if (!esp_ptr_dma_capable((uint32_t *)block->dest_address)) { LOG_ERR("Rx buffer not in DMA capable memory: %p", (uint32_t *)block->dest_address); return -EINVAL; } dma_descriptor_t *desc_iter = dma_channel->desc_list; for (int i = 0; i < CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM; ++i) { if (block->block_size > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) { LOG_ERR("Size of block %d is too large", i); return -EINVAL; } memset(desc_iter, 0, sizeof(dma_descriptor_t)); desc_iter->buffer = (void *)block->dest_address; desc_iter->dw0.size = block->block_size; desc_iter->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; if (!block->next_block) { desc_iter->next = NULL; break; } desc_iter->next = desc_iter + 1; desc_iter += 1; block = block->next_block; } if (desc_iter->next) { memset(dma_channel->desc_list, 0, sizeof(dma_channel->desc_list)); LOG_ERR("Too many dma blocks. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM"); return -EINVAL; } return 0; } static int dma_esp32_config_rx(const struct device *dev, struct dma_esp32_channel *dma_channel, struct dma_config *config_dma) { struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; struct dma_block_config *block = config_dma->head_block; dma_channel->dir = DMA_RX; gdma_ll_rx_reset_channel(data->hal.dev, dma_channel->channel_id); gdma_ll_rx_connect_to_periph( data->hal.dev, dma_channel->channel_id, dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0 ? ESP_DMA_M2M_ON : ESP_DMA_M2M_OFF, dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0 ? ESP_DMA_M2M_ON : dma_channel->periph_id); if (config_dma->dest_burst_length) { /* * RX channel burst mode depends on specific data alignment */ gdma_ll_rx_enable_data_burst(data->hal.dev, dma_channel->channel_id, config->sram_alignment >= 4); gdma_ll_rx_enable_descriptor_burst(data->hal.dev, dma_channel->channel_id, config->sram_alignment >= 4); } dma_channel->cb = config_dma->dma_callback; dma_channel->user_data = config_dma->user_data; gdma_ll_rx_clear_interrupt_status(data->hal.dev, dma_channel->channel_id, UINT32_MAX); gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id, UINT32_MAX, config_dma->dma_callback != NULL); return dma_esp32_config_rx_descriptor(dma_channel, config_dma->head_block); } static int dma_esp32_config_tx_descriptor(struct dma_esp32_channel *dma_channel, struct dma_block_config *block) { if (!block) { LOG_ERR("At least one dma block is required"); return -EINVAL; } if (!esp_ptr_dma_capable((uint32_t *)block->source_address)) { LOG_ERR("Tx buffer not in DMA capable memory: %p", (uint32_t *)block->source_address); return -EINVAL; } dma_descriptor_t *desc_iter = dma_channel->desc_list; for (int i = 0; i < CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM; ++i) { if (block->block_size > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) { LOG_ERR("Size of block %d is too large", i); return -EINVAL; } memset(desc_iter, 0, sizeof(dma_descriptor_t)); desc_iter->buffer = (void *)block->source_address; desc_iter->dw0.size = block->block_size; desc_iter->dw0.length = block->block_size; desc_iter->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; if (!block->next_block) { desc_iter->next = NULL; desc_iter->dw0.suc_eof = 1; break; } desc_iter->next = desc_iter + 1; desc_iter += 1; block = block->next_block; } if (desc_iter->next) { memset(dma_channel->desc_list, 0, sizeof(dma_channel->desc_list)); LOG_ERR("Too many dma blocks. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM"); return -EINVAL; } return 0; } static int dma_esp32_config_tx(const struct device *dev, struct dma_esp32_channel *dma_channel, struct dma_config *config_dma) { struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; struct dma_block_config *block = config_dma->head_block; dma_channel->dir = DMA_TX; gdma_ll_tx_reset_channel(data->hal.dev, dma_channel->channel_id); gdma_ll_tx_connect_to_periph( data->hal.dev, dma_channel->channel_id, dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0 ? ESP_DMA_M2M_ON : ESP_DMA_M2M_OFF, dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0 ? ESP_DMA_M2M_ON : dma_channel->periph_id); /* * TX channel can always enable burst mode, no matter data alignment */ if (config_dma->source_burst_length) { gdma_ll_tx_enable_data_burst(data->hal.dev, dma_channel->channel_id, true); gdma_ll_tx_enable_descriptor_burst(data->hal.dev, dma_channel->channel_id, true); } dma_channel->cb = config_dma->dma_callback; dma_channel->user_data = config_dma->user_data; gdma_ll_tx_clear_interrupt_status(data->hal.dev, dma_channel->channel_id, UINT32_MAX); gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id, GDMA_LL_EVENT_TX_EOF, config_dma->dma_callback != NULL); return dma_esp32_config_tx_descriptor(dma_channel, config_dma->head_block); } static int dma_esp32_config(const struct device *dev, uint32_t channel, struct dma_config *config_dma) { struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; struct dma_esp32_channel *dma_channel = &config->dma_channel[channel]; int ret = 0; if (channel >= config->dma_channel_max) { LOG_ERR("Unsupported channel"); return -EINVAL; } if (!config_dma) { return -EINVAL; } if (config_dma->source_burst_length != config_dma->dest_burst_length) { LOG_ERR("Source and destination burst lengths must be equal"); return -EINVAL; } dma_channel->periph_id = config_dma->channel_direction == MEMORY_TO_MEMORY ? SOC_GDMA_TRIG_PERIPH_M2M0 : config_dma->dma_slot; dma_channel->channel_id = channel / 2; switch (config_dma->channel_direction) { case MEMORY_TO_MEMORY: /* * Create both Tx and Rx stream on the same channel_id */ struct dma_esp32_channel *dma_channel_rx = &config->dma_channel[dma_channel->channel_id * 2]; struct dma_esp32_channel *dma_channel_tx = &config->dma_channel[(dma_channel->channel_id * 2) + 1]; dma_channel_rx->channel_id = dma_channel->channel_id; dma_channel_tx->channel_id = dma_channel->channel_id; dma_channel_rx->periph_id = dma_channel->periph_id; dma_channel_tx->periph_id = dma_channel->periph_id; ret = dma_esp32_config_rx(dev, dma_channel_rx, config_dma); ret = dma_esp32_config_tx(dev, dma_channel_tx, config_dma); break; case PERIPHERAL_TO_MEMORY: ret = dma_esp32_config_rx(dev, dma_channel, config_dma); break; case MEMORY_TO_PERIPHERAL: ret = dma_esp32_config_tx(dev, dma_channel, config_dma); break; default: LOG_ERR("Invalid Channel direction"); return -EINVAL; } return ret; } static int dma_esp32_start(const struct device *dev, uint32_t channel) { struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; struct dma_esp32_channel *dma_channel = &config->dma_channel[channel]; if (channel >= config->dma_channel_max) { LOG_ERR("Unsupported channel"); return -EINVAL; } if (dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0) { struct dma_esp32_channel *dma_channel_rx = &config->dma_channel[dma_channel->channel_id * 2]; struct dma_esp32_channel *dma_channel_tx = &config->dma_channel[(dma_channel->channel_id * 2) + 1]; gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id, UINT32_MAX, true); gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id, GDMA_LL_EVENT_TX_EOF, true); gdma_ll_rx_set_desc_addr(data->hal.dev, dma_channel->channel_id, (int32_t)dma_channel_rx->desc_list); gdma_ll_rx_start(data->hal.dev, dma_channel->channel_id); gdma_ll_tx_set_desc_addr(data->hal.dev, dma_channel->channel_id, (int32_t)dma_channel_tx->desc_list); gdma_ll_tx_start(data->hal.dev, dma_channel->channel_id); } else { if (dma_channel->dir == DMA_RX) { gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id, UINT32_MAX, true); gdma_ll_rx_set_desc_addr(data->hal.dev, dma_channel->channel_id, (int32_t)dma_channel->desc_list); gdma_ll_rx_start(data->hal.dev, dma_channel->channel_id); } else if (dma_channel->dir == DMA_TX) { gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id, GDMA_LL_EVENT_TX_EOF, true); gdma_ll_tx_set_desc_addr(data->hal.dev, dma_channel->channel_id, (int32_t)dma_channel->desc_list); gdma_ll_tx_start(data->hal.dev, dma_channel->channel_id); } else { LOG_ERR("Channel %d is not configured", channel); return -EINVAL; } } return 0; } static int dma_esp32_stop(const struct device *dev, uint32_t channel) { struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; struct dma_esp32_channel *dma_channel = &config->dma_channel[channel]; if (channel >= config->dma_channel_max) { LOG_ERR("Unsupported channel"); return -EINVAL; } if (dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0) { gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id, UINT32_MAX, false); gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id, GDMA_LL_EVENT_TX_EOF, false); gdma_ll_rx_stop(data->hal.dev, dma_channel->channel_id); gdma_ll_tx_stop(data->hal.dev, dma_channel->channel_id); } if (dma_channel->dir == DMA_RX) { gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id, UINT32_MAX, false); gdma_ll_rx_stop(data->hal.dev, dma_channel->channel_id); } else if (dma_channel->dir == DMA_TX) { gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id, GDMA_LL_EVENT_TX_EOF, false); gdma_ll_tx_stop(data->hal.dev, dma_channel->channel_id); } return 0; } static int dma_esp32_get_status(const struct device *dev, uint32_t channel, struct dma_status *status) { struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; struct dma_esp32_channel *dma_channel = &config->dma_channel[channel]; dma_descriptor_t *desc; if (channel >= config->dma_channel_max) { LOG_ERR("Unsupported channel"); return -EINVAL; } if (!status) { return -EINVAL; } memset(status, 0, sizeof(struct dma_status)); if (dma_channel->dir == DMA_RX) { status->busy = !gdma_ll_rx_is_fsm_idle(data->hal.dev, dma_channel->channel_id); status->dir = PERIPHERAL_TO_MEMORY; desc = (dma_descriptor_t *)gdma_ll_rx_get_current_desc_addr( data->hal.dev, dma_channel->channel_id); if (desc >= dma_channel->desc_list) { status->read_position = desc - dma_channel->desc_list; status->total_copied = desc->dw0.length + dma_channel->desc_list[0].dw0.size * status->read_position; } } else if (dma_channel->dir == DMA_TX) { status->busy = !gdma_ll_tx_is_fsm_idle(data->hal.dev, dma_channel->channel_id); status->dir = MEMORY_TO_PERIPHERAL; desc = (dma_descriptor_t *)gdma_ll_tx_get_current_desc_addr( data->hal.dev, dma_channel->channel_id); if (desc >= dma_channel->desc_list) { status->write_position = desc - dma_channel->desc_list; } } return 0; } static int dma_esp32_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; struct dma_esp32_channel *dma_channel = &config->dma_channel[channel]; dma_descriptor_t *desc_iter = dma_channel->desc_list; uint32_t buf; if (channel >= config->dma_channel_max) { LOG_ERR("Unsupported channel"); return -EINVAL; } if (dma_channel->dir == DMA_RX) { gdma_ll_rx_reset_channel(data->hal.dev, dma_channel->channel_id); buf = dst; } else if (dma_channel->dir == DMA_TX) { gdma_ll_tx_reset_channel(data->hal.dev, dma_channel->channel_id); buf = src; } else { return -EINVAL; } for (int i = 0; i < ARRAY_SIZE(dma_channel->desc_list); ++i) { memset(desc_iter, 0, sizeof(dma_descriptor_t)); desc_iter->buffer = (void *)(buf + DMA_DESCRIPTOR_BUFFER_MAX_SIZE * i); desc_iter->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; if (size < DMA_DESCRIPTOR_BUFFER_MAX_SIZE) { desc_iter->dw0.size = size; if (dma_channel->dir == DMA_TX) { desc_iter->dw0.length = size; desc_iter->dw0.suc_eof = 1; } desc_iter->next = NULL; break; } desc_iter->dw0.size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE; if (dma_channel->dir == DMA_TX) { desc_iter->dw0.length = DMA_DESCRIPTOR_BUFFER_MAX_SIZE; } size -= DMA_DESCRIPTOR_BUFFER_MAX_SIZE; desc_iter->next = desc_iter + 1; desc_iter += 1; } if (desc_iter->next) { memset(desc_iter, 0, sizeof(dma_descriptor_t)); LOG_ERR("Not enough DMA descriptors. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM"); return -EINVAL; } return 0; } static int dma_esp32_configure_irq(const struct device *dev) { struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; for (uint8_t i = 0; i < config->irq_size; i++) { int ret = esp_intr_alloc(config->irq_src[i], 0, (ISR_HANDLER)config->irq_handlers[i], (void *)dev, NULL); if (ret != 0) { LOG_ERR("Could not allocate interrupt handler"); return ret; } } return 0; } static int dma_esp32_init(const struct device *dev) { struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; struct dma_esp32_data *data = (struct dma_esp32_data *)dev->data; struct dma_esp32_channel *dma_channel; int ret = 0; if (!device_is_ready(config->clock_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } ret = clock_control_on(config->clock_dev, config->clock_subsys); if (ret < 0) { LOG_ERR("Could not initialize clock (%d)", ret); return ret; } ret = dma_esp32_configure_irq(dev); if (ret < 0) { LOG_ERR("Could not configure IRQ (%d)", ret); return ret; } for (uint8_t i = 0; i < DMA_MAX_CHANNEL * 2; i++) { dma_channel = &config->dma_channel[i]; dma_channel->cb = NULL; dma_channel->dir = DMA_UNCONFIGURED; dma_channel->periph_id = ESP_GDMA_TRIG_PERIPH_INVALID; memset(dma_channel->desc_list, 0, sizeof(dma_channel->desc_list)); } gdma_hal_init(&data->hal, 0); gdma_ll_enable_clock(data->hal.dev, true); return 0; } static const struct dma_driver_api dma_esp32_api = { .config = dma_esp32_config, .start = dma_esp32_start, .stop = dma_esp32_stop, .get_status = dma_esp32_get_status, .reload = dma_esp32_reload, }; #if defined(CONFIG_SOC_SERIES_ESP32C6) || defined(CONFIG_SOC_SERIES_ESP32S3) #define DMA_ESP32_DEFINE_IRQ_HANDLER(channel) \ __attribute__((unused)) static void IRAM_ATTR dma_esp32_isr_##channel##_rx( \ const struct device *dev) \ { \ struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; \ struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; \ uint32_t intr_status = gdma_ll_rx_get_interrupt_status(data->hal.dev, channel); \ if (intr_status) { \ dma_esp32_isr_handle_rx(dev, &config->dma_channel[channel * 2], \ intr_status); \ } \ } \ \ __attribute__((unused)) static void IRAM_ATTR dma_esp32_isr_##channel##_tx( \ const struct device *dev) \ { \ struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; \ struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; \ uint32_t intr_status = gdma_ll_tx_get_interrupt_status(data->hal.dev, channel); \ if (intr_status) { \ dma_esp32_isr_handle_tx(dev, &config->dma_channel[channel * 2 + 1], \ intr_status); \ } \ } #else #define DMA_ESP32_DEFINE_IRQ_HANDLER(channel) \ __attribute__((unused)) static void IRAM_ATTR dma_esp32_isr_##channel( \ const struct device *dev) \ { \ dma_esp32_isr_handle(dev, channel * 2, channel * 2 + 1); \ } #endif #if defined(CONFIG_SOC_SERIES_ESP32C6) || defined(CONFIG_SOC_SERIES_ESP32S3) #define ESP32_DMA_HANDLER(channel) dma_esp32_isr_##channel##_rx, dma_esp32_isr_##channel##_tx #else #define ESP32_DMA_HANDLER(channel) dma_esp32_isr_##channel #endif DMA_ESP32_DEFINE_IRQ_HANDLER(0) DMA_ESP32_DEFINE_IRQ_HANDLER(1) DMA_ESP32_DEFINE_IRQ_HANDLER(2) #if DMA_MAX_CHANNEL >= 5 DMA_ESP32_DEFINE_IRQ_HANDLER(3) DMA_ESP32_DEFINE_IRQ_HANDLER(4) #endif static void *irq_handlers[] = { ESP32_DMA_HANDLER(0), ESP32_DMA_HANDLER(1), ESP32_DMA_HANDLER(2), #if DMA_MAX_CHANNEL >= 5 ESP32_DMA_HANDLER(3), ESP32_DMA_HANDLER(4), #endif }; #define DMA_ESP32_INIT(idx) \ static int irq_numbers[] = DT_INST_PROP(idx, interrupts); \ static struct dma_esp32_config dma_config_##idx = { \ .irq_src = irq_numbers, \ .irq_size = ARRAY_SIZE(irq_numbers), \ .irq_handlers = irq_handlers, \ .dma_channel_max = DT_INST_PROP(idx, dma_channels), \ .sram_alignment = DT_INST_PROP(idx, dma_buf_addr_alignment), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(idx)), \ .clock_subsys = (void *)DT_INST_CLOCKS_CELL(idx, offset), \ }; \ static struct dma_esp32_data dma_data_##idx = { \ .hal = \ { \ .dev = (gdma_dev_t *)DT_INST_REG_ADDR(idx), \ }, \ }; \ \ DEVICE_DT_INST_DEFINE(idx, &dma_esp32_init, NULL, &dma_data_##idx, &dma_config_##idx, \ PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, &dma_esp32_api); DT_INST_FOREACH_STATUS_OKAY(DMA_ESP32_INIT) ```
/content/code_sandbox/drivers/dma/dma_esp32_gdma.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,214
```unknown config DMA_NXP_SOF_HOST_DMA bool "NXP DMA driver used by SOF's host component" default y depends on DT_HAS_NXP_SOF_HOST_DMA_ENABLED help Enable NXP's DMA driver used by SOF (Sound Open Firmware) host component. Specifically, this driver is used by the SOF host component to perform transfers between the host memory and firmware (local) memory, which can be accessed without an actual DMA engine. if DMA_NXP_SOF_HOST_DMA config DMA_NXP_SOF_HOST_DMA_ALIGN int "Alignment (in bytes) required for memory regions passed to this driver" default 8 help Use this to set the alignment (in bytes) which shall be used by entities employing this driver to adjust a memory region's size and base address. Since this driver doesn't actually have any hardware to back it up this configuration doesn't make much sense as there's no alignment restrictions imposed by memcpy. Nevertheless, this is needed because this driver needs to act as if it controls a DMA engine. endif # DMA_NXP_SOF_HOST_DMA ```
/content/code_sandbox/drivers/dma/Kconfig.nxp_sof_host_dma
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
267
```c /* * */ #define DT_DRV_COMPAT brcm_iproc_pax_dma_v2 #include <zephyr/arch/cpu.h> #include <zephyr/cache.h> #include <errno.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/linker/sections.h> #include <soc.h> #include <string.h> #include <zephyr/toolchain.h> #include <zephyr/types.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/pcie/endpoint/pcie_ep.h> #include "dma_iproc_pax_v2.h" #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(dma_iproc_pax_v2); /* Driver runtime data for PAX DMA and RM */ static struct dma_iproc_pax_data pax_dma_data; /** * @brief Opaque/packet id allocator, range 0 to 31 */ static inline uint32_t reset_pkt_id(struct dma_iproc_pax_ring_data *ring) { return ring->pkt_id = 0x0; } static inline uint32_t alloc_pkt_id(struct dma_iproc_pax_ring_data *ring) { ring->pkt_id = (ring->pkt_id + 1) % 32; return ring->pkt_id; } static inline uint32_t curr_pkt_id(struct dma_iproc_pax_ring_data *ring) { return ring->pkt_id; } static inline uint32_t curr_toggle_val(struct dma_iproc_pax_ring_data *ring) { return ring->curr.toggle; } /** * @brief Populate header descriptor */ static inline void rm_write_header_desc(void *desc, uint32_t toggle, uint32_t opq, uint32_t bdcount, uint64_t pci_addr) { struct rm_header *r = (struct rm_header *)desc; r->opq = opq; r->bdf = 0x0; r->res1 = 0x0; /* DMA descriptor count init value */ r->bdcount = bdcount; r->prot = 0x0; r->res2 = 0x0; /* No packet extension, start and end set to '1' */ r->start = 1; r->end = 1; /* RM header type */ r->type = PAX_DMA_TYPE_RM_HEADER; r->pcie_addr_msb = PAX_DMA_PCI_ADDR_HI_MSB8(pci_addr); r->res3 = 0x0; r->res4 = 0x0; #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE r->toggle = toggle; #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE r->toggle = 0; #endif } /** * @brief Populate pcie descriptor */ static inline void rm_write_pcie_desc(void *desc, uint32_t toggle, uint64_t pci_addr) { struct pcie_desc *pcie = (struct pcie_desc *)desc; pcie->pcie_addr_lsb = pci_addr; pcie->res1 = 0x0; /* PCIE header type */ pcie->type = PAX_DMA_TYPE_PCIE_DESC; #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE pcie->toggle = toggle; #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE pcie->toggle = 0; #endif } /** * @brief Populate src/destination descriptor */ static inline void rm_write_src_dst_desc(void *desc_ptr, bool is_mega, uint32_t toggle, uint64_t axi_addr, uint32_t size, enum pax_dma_dir direction) { struct src_dst_desc *desc; desc = (struct src_dst_desc *)desc_ptr; desc->axi_addr = axi_addr; desc->length = size; #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE desc->toggle = toggle; #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE desc->toggle = 0; #endif if (direction == CARD_TO_HOST) { desc->type = is_mega ? PAX_DMA_TYPE_MEGA_SRC_DESC : PAX_DMA_TYPE_SRC_DESC; } else { desc->type = is_mega ? PAX_DMA_TYPE_MEGA_DST_DESC : PAX_DMA_TYPE_DST_DESC; } } #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE static void init_toggle(void *desc, uint32_t toggle) { struct rm_header *r = (struct rm_header *)desc; r->toggle = toggle; } #endif /** * @brief Return current descriptor memory address and * increment to point to next descriptor memory address. */ static inline void *get_curr_desc_addr(struct dma_iproc_pax_ring_data *ring) { struct next_ptr_desc *nxt; uintptr_t curr; curr = (uintptr_t)ring->curr.write_ptr; /* if hit next table ptr, skip to next location, flip toggle */ nxt = (struct next_ptr_desc *)curr; if (nxt->type == PAX_DMA_TYPE_NEXT_PTR) { LOG_DBG("hit next_ptr@0x%lx %d, next_table@0x%lx\n", curr, nxt->toggle, (uintptr_t)nxt->addr); uintptr_t last = (uintptr_t)ring->bd + PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS; nxt->toggle = ring->curr.toggle; ring->curr.toggle = (ring->curr.toggle == 0) ? 1 : 0; /* move to next addr, wrap around if hits end */ curr += PAX_DMA_RM_DESC_BDWIDTH; if (curr == last) { curr = (uintptr_t)ring->bd; LOG_DBG("hit end of desc:0x%lx, wrap to 0x%lx\n", last, curr); } ring->descs_inflight++; } ring->curr.write_ptr = (void *)(curr + PAX_DMA_RM_DESC_BDWIDTH); ring->descs_inflight++; return (void *)curr; } /** * @brief Populate next ptr descriptor */ static void rm_write_next_table_desc(void *desc, void *next_ptr, uint32_t toggle) { struct next_ptr_desc *nxt = (struct next_ptr_desc *)desc; nxt->addr = (uintptr_t)next_ptr; nxt->type = PAX_DMA_TYPE_NEXT_PTR; nxt->toggle = toggle; } static void prepare_ring(struct dma_iproc_pax_ring_data *ring) { uintptr_t curr, next, last; int buff_count = PAX_DMA_NUM_BD_BUFFS; #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE uint32_t toggle; #endif /* zero out descriptor area */ memset(ring->bd, 0x0, PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS); memset(ring->cmpl, 0x0, PAX_DMA_RM_CMPL_RING_SIZE); /* start with first buffer, valid toggle is 0x1 */ #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE toggle = 0x1; #endif curr = (uintptr_t)ring->bd; next = curr + PAX_DMA_RM_DESC_RING_SIZE; last = curr + PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS; do { #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE init_toggle((void *)curr, toggle); /* Place next_table desc as last BD entry on each buffer */ rm_write_next_table_desc(PAX_DMA_NEXT_TBL_ADDR((void *)curr), (void *)next, toggle); #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE /* Place next_table desc as last BD entry on each buffer */ rm_write_next_table_desc(PAX_DMA_NEXT_TBL_ADDR((void *)curr), (void *)next, 0); #endif #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE /* valid toggle flips for each buffer */ toggle = toggle ? 0x0 : 0x1; #endif curr += PAX_DMA_RM_DESC_RING_SIZE; next += PAX_DMA_RM_DESC_RING_SIZE; /* last entry, chain back to first buffer */ if (next == last) { next = (uintptr_t)ring->bd; } } while (--buff_count); dma_mb(); /* start programming from first RM header */ ring->curr.write_ptr = ring->bd; /* valid toggle starts with 1 after reset */ ring->curr.toggle = 1; /* completion read offset */ ring->curr.cmpl_rd_offs = 0; /* inflight descs */ ring->descs_inflight = 0; /* init sync data for the ring */ ring->curr.sync_data.signature = PAX_DMA_WRITE_SYNC_SIGNATURE; ring->curr.sync_data.ring = ring->idx; /* pkt id for active dma xfer */ ring->curr.sync_data.opaque = 0x0; /* pkt count for active dma xfer */ ring->curr.sync_data.total_pkts = 0x0; } static int init_rm(struct dma_iproc_pax_data *pd) { int ret = -ETIMEDOUT, timeout = 1000; k_mutex_lock(&pd->dma_lock, K_FOREVER); /* Wait for Ring Manager ready */ do { LOG_DBG("Waiting for RM HW init\n"); if ((sys_read32(RM_COMM_REG(pd, RM_COMM_MAIN_HW_INIT_DONE)) & RM_COMM_MAIN_HW_INIT_DONE_MASK)) { ret = 0; break; } k_sleep(K_MSEC(1)); } while (--timeout); k_mutex_unlock(&pd->dma_lock); if (!timeout) { LOG_WRN("RM HW Init timedout!\n"); } else { LOG_INF("PAX DMA RM HW Init Done\n"); } return ret; } static void rm_cfg_start(struct dma_iproc_pax_data *pd) { uint32_t val; k_mutex_lock(&pd->dma_lock, K_FOREVER); /* set config done 0, enable toggle mode */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); val &= ~RM_COMM_CONTROL_CONFIG_DONE; sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); val &= ~(RM_COMM_CONTROL_MODE_MASK << RM_COMM_CONTROL_MODE_SHIFT); #ifdef CONFIG_DMA_IPROC_PAX_DOORBELL_MODE val |= (RM_COMM_CONTROL_MODE_DOORBELL << RM_COMM_CONTROL_MODE_SHIFT); #elif CONFIG_DMA_IPROC_PAX_TOGGLE_MODE val |= (RM_COMM_CONTROL_MODE_ALL_BD_TOGGLE << RM_COMM_CONTROL_MODE_SHIFT); #endif sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); sys_write32(RM_COMM_MSI_DISABLE_MASK, RM_COMM_REG(pd, RM_COMM_MSI_DISABLE)); val = sys_read32(RM_COMM_REG(pd, RM_COMM_AXI_READ_BURST_THRESHOLD)); val &= ~(RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_MASK << RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_SHIFT); val |= RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_SHIFT_VAL << RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_SHIFT; sys_write32(val, RM_COMM_REG(pd, RM_COMM_AXI_READ_BURST_THRESHOLD)); val = sys_read32(RM_COMM_REG(pd, RM_COMM_FIFO_FULL_THRESHOLD)); val &= ~(RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_MASK << RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_SHIFT); val |= RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_VAL << RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_SHIFT; val &= ~(RM_COMM_BD_FIFO_FULL_THRESHOLD_MASK << RM_COMM_BD_FIFO_FULL_THRESHOLD_SHIFT); val |= RM_COMM_BD_FIFO_FULL_THRESHOLD_VAL << RM_COMM_BD_FIFO_FULL_THRESHOLD_SHIFT; sys_write32(val, RM_COMM_REG(pd, RM_COMM_FIFO_FULL_THRESHOLD)); /* Enable Line interrupt */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); val |= RM_COMM_CONTROL_LINE_INTR_EN; sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); /* Enable AE_TIMEOUT */ sys_write32(RM_COMM_AE_TIMEOUT_VAL, RM_COMM_REG(pd, RM_COMM_AE_TIMEOUT)); val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); val |= RM_COMM_CONTROL_AE_TIMEOUT_EN; sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); /* AE (Acceleration Engine) grouping to group '0' */ val = sys_read32(RM_COMM_REG(pd, RM_AE0_AE_CONTROL)); val &= ~RM_AE_CTRL_AE_GROUP_MASK; sys_write32(val, RM_COMM_REG(pd, RM_AE0_AE_CONTROL)); val |= RM_AE_CONTROL_ACTIVE; sys_write32(val, RM_COMM_REG(pd, RM_AE0_AE_CONTROL)); /* AXI read/write channel enable */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_AXI_CONTROL)); val |= (RM_COMM_AXI_CONTROL_RD_CH_EN | RM_COMM_AXI_CONTROL_WR_CH_EN); sys_write32(val, RM_COMM_REG(pd, RM_COMM_AXI_CONTROL)); /* Tune RM control programming for 4 rings */ sys_write32(RM_COMM_TIMER_CONTROL0_VAL, RM_COMM_REG(pd, RM_COMM_TIMER_CONTROL_0)); sys_write32(RM_COMM_TIMER_CONTROL1_VAL, RM_COMM_REG(pd, RM_COMM_TIMER_CONTROL_1)); val = sys_read32(RM_COMM_REG(pd, RM_COMM_BURST_LENGTH)); val |= RM_COMM_BD_FETCH_CACHE_ALIGNED_DISABLED; val |= RM_COMM_VALUE_FOR_DDR_ADDR_GEN_VAL << RM_COMM_VALUE_FOR_DDR_ADDR_GEN_SHIFT; val |= RM_COMM_VALUE_FOR_TOGGLE_VAL << RM_COMM_VALUE_FOR_TOGGLE_SHIFT; sys_write32(val, RM_COMM_REG(pd, RM_COMM_BURST_LENGTH)); val = sys_read32(RM_COMM_REG(pd, RM_COMM_BD_FETCH_MODE_CONTROL)); val |= RM_COMM_DISABLE_GRP_BD_FIFO_FLOW_CONTROL_FOR_PKT_ALIGNMENT; val |= RM_COMM_DISABLE_PKT_ALIGNMENT_BD_FIFO_FLOW_CONTROL; sys_write32(val, RM_COMM_REG(pd, RM_COMM_BD_FETCH_MODE_CONTROL)); /* Set Sequence max count to the max supported value */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_MASK_SEQUENCE_MAX_COUNT)); val = (val | RING_MASK_SEQ_MAX_COUNT_MASK); sys_write32(val, RM_COMM_REG(pd, RM_COMM_MASK_SEQUENCE_MAX_COUNT)); k_mutex_unlock(&pd->dma_lock); } static void rm_ring_clear_stats(struct dma_iproc_pax_data *pd, enum ring_idx idx) { /* Read ring Tx, Rx, and Outstanding counts to clear */ sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_RECV_LS)); sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_RECV_MS)); sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_TRANS_LS)); sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_TRANS_MS)); sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_OUTSTAND)); } static void rm_cfg_finish(struct dma_iproc_pax_data *pd) { uint32_t val; k_mutex_lock(&pd->dma_lock, K_FOREVER); /* set Ring config done */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); val |= RM_COMM_CONTROL_CONFIG_DONE; sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); k_mutex_unlock(&pd->dma_lock); } static inline void write_doorbell(struct dma_iproc_pax_data *pd, enum ring_idx idx) { struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]); sys_write32(ring->descs_inflight, RM_RING_REG(pd, idx, RING_DOORBELL_BD_WRITE_COUNT)); ring->descs_inflight = 0; } static inline void set_ring_active(struct dma_iproc_pax_data *pd, enum ring_idx idx, bool active) { uint32_t val; val = sys_read32(RM_RING_REG(pd, idx, RING_CONTROL)); if (active) { val |= RING_CONTROL_ACTIVE; } else { val &= ~RING_CONTROL_ACTIVE; } sys_write32(val, RM_RING_REG(pd, idx, RING_CONTROL)); } static int init_ring(struct dma_iproc_pax_data *pd, enum ring_idx idx) { uint32_t val; uintptr_t desc = (uintptr_t)pd->ring[idx].bd; uintptr_t cmpl = (uintptr_t)pd->ring[idx].cmpl; int timeout = 5000, ret = 0; k_mutex_lock(&pd->dma_lock, K_FOREVER); /* Read cmpl write ptr incase previous dma stopped */ sys_read32(RM_RING_REG(pd, idx, RING_CMPL_WRITE_PTR)); /* Inactivate ring */ sys_write32(0x0, RM_RING_REG(pd, idx, RING_CONTROL)); /* set Ring config done */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); val |= RM_COMM_CONTROL_CONFIG_DONE; sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); /* Flush ring before loading new descriptor */ sys_write32(RING_CONTROL_FLUSH, RM_RING_REG(pd, idx, RING_CONTROL)); do { if (sys_read32(RM_RING_REG(pd, idx, RING_FLUSH_DONE)) & RING_FLUSH_DONE_MASK) { break; } k_busy_wait(1); } while (--timeout); if (!timeout) { LOG_WRN("Ring %d flush timedout!\n", idx); ret = -ETIMEDOUT; goto err; } /* clear ring after flush */ sys_write32(0x0, RM_RING_REG(pd, idx, RING_CONTROL)); /* Clear Ring config done */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_CONTROL)); val &= ~(RM_COMM_CONTROL_CONFIG_DONE); sys_write32(val, RM_COMM_REG(pd, RM_COMM_CONTROL)); /* ring group id set to '0' */ val = sys_read32(RM_COMM_REG(pd, RM_COMM_CTRL_REG(idx))); val &= ~RING_COMM_CTRL_AE_GROUP_MASK; sys_write32(val, RM_COMM_REG(pd, RM_COMM_CTRL_REG(idx))); /* DDR update control, set timeout value */ val = RING_DDR_CONTROL_COUNT(RING_DDR_CONTROL_COUNT_VAL) | RING_DDR_CONTROL_TIMER(RING_DDR_CONTROL_TIMER_VAL) | RING_DDR_CONTROL_ENABLE; sys_write32(val, RM_RING_REG(pd, idx, RING_CMPL_WR_PTR_DDR_CONTROL)); /* Disable Ring MSI Timeout */ sys_write32(RING_DISABLE_MSI_TIMEOUT_VALUE, RM_RING_REG(pd, idx, RING_DISABLE_MSI_TIMEOUT)); /* BD and CMPL desc queue start address */ sys_write32((uint32_t)desc, RM_RING_REG(pd, idx, RING_BD_START_ADDR)); sys_write32((uint32_t)cmpl, RM_RING_REG(pd, idx, RING_CMPL_START_ADDR)); val = sys_read32(RM_RING_REG(pd, idx, RING_BD_READ_PTR)); /* keep ring inactive after init to avoid BD poll */ #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE set_ring_active(pd, idx, false); #elif CONFIG_DMA_IPROC_PAX_DOORBELL_MODE set_ring_active(pd, idx, true); #endif #if !defined(CONFIG_DMA_IPROC_PAX_POLL_MODE) /* Enable ring completion interrupt */ sys_write32(0x0, RM_RING_REG(pd, idx, RING_COMPLETION_INTERRUPT_STAT_MASK)); #endif rm_ring_clear_stats(pd, idx); err: k_mutex_unlock(&pd->dma_lock); return ret; } static int poll_on_write_sync(const struct device *dev, struct dma_iproc_pax_ring_data *ring) { const struct dma_iproc_pax_cfg *cfg = dev->config; struct dma_iproc_pax_write_sync_data sync_rd, *recv, *sent; uint64_t pci_addr; uint32_t *pci32, *axi32; uint32_t zero_init = 0, timeout = PAX_DMA_MAX_SYNC_WAIT; int ret; recv = &sync_rd; sent = &(ring->curr.sync_data); /* form host pci sync address */ pci32 = (uint32_t *)&pci_addr; pci32[0] = ring->sync_pci.addr_lo; pci32[1] = ring->sync_pci.addr_hi; axi32 = (uint32_t *)&sync_rd; do { ret = pcie_ep_xfer_data_memcpy(cfg->pcie_dev, pci_addr, (uintptr_t *)axi32, 4, PCIE_OB_LOWMEM, HOST_TO_DEVICE); if (memcmp((void *)recv, (void *)sent, 4) == 0) { /* clear the sync word */ ret = pcie_ep_xfer_data_memcpy(cfg->pcie_dev, pci_addr, (uintptr_t *)&zero_init, 4, PCIE_OB_LOWMEM, DEVICE_TO_HOST); dma_mb(); ret = 0; break; } k_busy_wait(1); } while (--timeout); if (!timeout) { LOG_ERR("[ring %d]: not recvd write sync!\n", ring->idx); ret = -ETIMEDOUT; } return ret; } static int process_cmpl_event(const struct device *dev, enum ring_idx idx, uint32_t pl_len) { struct dma_iproc_pax_data *pd = dev->data; uint32_t wr_offs, rd_offs, ret = 0; struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]); struct cmpl_pkt *c; uint32_t is_outstanding; /* cmpl read offset, unprocessed cmpl location */ rd_offs = ring->curr.cmpl_rd_offs; wr_offs = sys_read32(RM_RING_REG(pd, idx, RING_CMPL_WRITE_PTR)); /* Update read ptr to "processed" */ ring->curr.cmpl_rd_offs = wr_offs; /* * Ensure consistency of completion descriptor * The completion desc is updated by RM via AXI stream * CPU need to ensure the memory operations are completed * before reading cmpl area, by a "dsb" * If Dcache enabled, need to invalidate the cachelines to * read updated cmpl desc. The cache API also issues dsb. */ dma_mb(); /* Decode cmpl pkt id to verify */ c = (struct cmpl_pkt *)((uintptr_t)ring->cmpl + PAX_DMA_CMPL_DESC_SIZE * PAX_DMA_CURR_CMPL_IDX(wr_offs)); LOG_DBG("RING%d WR_PTR:%d opq:%d, rm_status:%x dma_status:%x\n", idx, wr_offs, c->opq, c->rm_status, c->dma_status); is_outstanding = sys_read32(RM_RING_REG(pd, idx, RING_NUM_REQ_OUTSTAND)); if ((ring->curr.opq != c->opq) && (is_outstanding != 0)) { LOG_ERR("RING%d: pkt id should be %d, rcvd %d outst=%d\n", idx, ring->curr.opq, c->opq, is_outstanding); ret = -EIO; } /* check for completion AE timeout */ if (c->rm_status == RM_COMPLETION_AE_TIMEOUT) { LOG_ERR("RING%d WR_PTR:%d rm_status:%x AE Timeout!\n", idx, wr_offs, c->rm_status); /* TBD: Issue full card reset to restore operations */ LOG_ERR("Needs Card Reset to recover!\n"); ret = -ETIMEDOUT; } if (ring->dma_callback) { ring->dma_callback(dev, ring->callback_arg, idx, ret); } /* clear total packet count and non header bd count */ ring->total_pkt_count = 0; return ret; } #ifdef CONFIG_DMA_IPROC_PAX_POLL_MODE static int peek_ring_cmpl(const struct device *dev, enum ring_idx idx, uint32_t pl_len) { struct dma_iproc_pax_data *pd = dev->data; uint32_t wr_offs, rd_offs, timeout = PAX_DMA_MAX_POLL_WAIT; struct dma_iproc_pax_ring_data *ring = &(pd->ring[idx]); /* cmpl read offset, unprocessed cmpl location */ rd_offs = ring->curr.cmpl_rd_offs; /* poll write_ptr until cmpl received for all buffers */ do { wr_offs = sys_read32(RM_RING_REG(pd, idx, RING_CMPL_WRITE_PTR)); if (PAX_DMA_GET_CMPL_COUNT(wr_offs, rd_offs) >= pl_len) break; k_busy_wait(1); } while (--timeout); if (timeout == 0) { LOG_ERR("RING%d timeout, rcvd %d, expected %d!\n", idx, PAX_DMA_GET_CMPL_COUNT(wr_offs, rd_offs), pl_len); /* More debug info on current dma instance */ LOG_ERR("WR_PTR:%x RD_PTR%x\n", wr_offs, rd_offs); return -ETIMEDOUT; } return process_cmpl_event(dev, idx, pl_len); } #else static void rm_isr(const struct device *dev) { uint32_t status, err_stat, idx; struct dma_iproc_pax_data *pd = dev->data; err_stat = sys_read32(RM_COMM_REG(pd, RM_COMM_AE_INTERFACE_GROUP_0_INTERRUPT_MASK)); sys_write32(err_stat, RM_COMM_REG(pd, RM_COMM_AE_INTERFACE_GROUP_0_INTERRUPT_CLEAR)); /* alert waiting thread to process, for each completed ring */ for (idx = PAX_DMA_RING0; idx < PAX_DMA_RINGS_MAX; idx++) { status = sys_read32(RM_RING_REG(pd, idx, RING_COMPLETION_INTERRUPT_STAT)); sys_write32(status, RM_RING_REG(pd, idx, RING_COMPLETION_INTERRUPT_STAT_CLEAR)); if (status & 0x1) { k_sem_give(&pd->ring[idx].alert); } } } #endif static int dma_iproc_pax_init(const struct device *dev) { const struct dma_iproc_pax_cfg *cfg = dev->config; struct dma_iproc_pax_data *pd = dev->data; int r; uintptr_t mem_aligned; if (!device_is_ready(cfg->pcie_dev)) { LOG_ERR("PCIe device not ready"); return -ENODEV; } pd->dma_base = cfg->dma_base; pd->rm_comm_base = cfg->rm_comm_base; pd->used_rings = (cfg->use_rings < PAX_DMA_RINGS_MAX) ? cfg->use_rings : PAX_DMA_RINGS_MAX; /* dma/rm access lock */ k_mutex_init(&pd->dma_lock); /* Ring Manager H/W init */ if (init_rm(pd)) { return -ETIMEDOUT; } /* common rm config */ rm_cfg_start(pd); /* individual ring config */ for (r = 0; r < pd->used_rings; r++) { /* per-ring mutex lock */ k_mutex_init(&pd->ring[r].lock); /* Init alerts */ k_sem_init(&pd->ring[r].alert, 0, 1); pd->ring[r].idx = r; pd->ring[r].ring_base = cfg->rm_base + PAX_DMA_RING_ADDR_OFFSET(r); LOG_DBG("RING%d,VERSION:0x%x\n", pd->ring[r].idx, sys_read32(RM_RING_REG(pd, r, RING_VER))); /* Allocate for 2 BD buffers + cmpl buffer + sync location */ pd->ring[r].ring_mem = (void *)((uintptr_t)cfg->bd_memory_base + r * PAX_DMA_PER_RING_ALLOC_SIZE); if (!pd->ring[r].ring_mem) { LOG_ERR("RING%d failed to alloc desc memory!\n", r); return -ENOMEM; } /* Find 8K aligned address within allocated region */ mem_aligned = ((uintptr_t)pd->ring[r].ring_mem + PAX_DMA_RING_ALIGN - 1) & ~(PAX_DMA_RING_ALIGN - 1); pd->ring[r].cmpl = (void *)mem_aligned; pd->ring[r].bd = (void *)(mem_aligned + PAX_DMA_RM_CMPL_RING_SIZE); pd->ring[r].sync_loc = (void *)((uintptr_t)pd->ring[r].bd + PAX_DMA_RM_DESC_RING_SIZE * PAX_DMA_NUM_BD_BUFFS); LOG_DBG("Ring%d,allocated Mem:0x%p Size %d\n", pd->ring[r].idx, pd->ring[r].ring_mem, PAX_DMA_PER_RING_ALLOC_SIZE); LOG_DBG("Ring%d,BD:0x%p, CMPL:0x%p, SYNC_LOC:0x%p\n", pd->ring[r].idx, pd->ring[r].bd, pd->ring[r].cmpl, pd->ring[r].sync_loc); /* Prepare ring desc table */ prepare_ring(&(pd->ring[r])); /* initialize ring */ init_ring(pd, r); } /* set ring config done */ rm_cfg_finish(pd); #ifndef CONFIG_DMA_IPROC_PAX_POLL_MODE /* Register and enable RM interrupt */ IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), rm_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); #else LOG_INF("%s PAX DMA rings in poll mode!\n", dev->name); #endif LOG_INF("%s RM setup %d rings\n", dev->name, pd->used_rings); return 0; } static int dma_iproc_pax_gen_desc(struct dma_iproc_pax_ring_data *ring, bool is_mega, uint64_t pci_addr, uint64_t axi_addr, uint32_t length, enum pax_dma_dir dir, uint32_t *non_hdr_bd_count) { struct rm_header *hdr; if (*non_hdr_bd_count == 0) { /* Generate Header BD */ ring->current_hdr = (uintptr_t)get_curr_desc_addr(ring); rm_write_header_desc((void *)ring->current_hdr, curr_toggle_val(ring), curr_pkt_id(ring), PAX_DMA_RM_DESC_BDCOUNT, pci_addr); ring->total_pkt_count++; } rm_write_pcie_desc(get_curr_desc_addr(ring), curr_toggle_val(ring), pci_addr); *non_hdr_bd_count = *non_hdr_bd_count + 1; rm_write_src_dst_desc(get_curr_desc_addr(ring), is_mega, curr_toggle_val(ring), axi_addr, length, dir); *non_hdr_bd_count = *non_hdr_bd_count + 1; /* Update Header BD with bd count */ hdr = (struct rm_header *)ring->current_hdr; hdr->bdcount = *non_hdr_bd_count; if (*non_hdr_bd_count == MAX_BD_COUNT_PER_HEADER) { *non_hdr_bd_count = 0; } return 0; } static int dma_iproc_pax_gen_packets(const struct device *dev, struct dma_iproc_pax_ring_data *ring, uint32_t direction, struct dma_block_config *config, uint32_t *non_hdr_bd_count) { uint32_t outstanding, remaining_len; uint32_t offset, curr, mega_len; uint64_t axi_addr; uint64_t pci_addr; enum pax_dma_dir dir; switch (direction) { case MEMORY_TO_PERIPHERAL: pci_addr = config->dest_address; axi_addr = config->source_address; dir = CARD_TO_HOST; break; case PERIPHERAL_TO_MEMORY: axi_addr = config->dest_address; pci_addr = config->source_address; dir = HOST_TO_CARD; break; default: LOG_ERR("not supported transfer direction"); return -EINVAL; } outstanding = config->block_size; offset = 0; while (outstanding) { curr = MIN(outstanding, PAX_DMA_MAX_SZ_PER_BD); mega_len = curr / PAX_DMA_MEGA_LENGTH_MULTIPLE; remaining_len = curr % PAX_DMA_MEGA_LENGTH_MULTIPLE; pci_addr = pci_addr + offset; axi_addr = axi_addr + offset; if (mega_len) { dma_iproc_pax_gen_desc(ring, true, pci_addr, axi_addr, mega_len, dir, non_hdr_bd_count); offset = offset + mega_len * PAX_DMA_MEGA_LENGTH_MULTIPLE; } if (remaining_len) { pci_addr = pci_addr + offset; axi_addr = axi_addr + offset; dma_iproc_pax_gen_desc(ring, false, pci_addr, axi_addr, remaining_len, dir, non_hdr_bd_count); offset = offset + remaining_len; } outstanding = outstanding - curr; } return 0; } #ifdef CONFIG_DMA_IPROC_PAX_POLL_MODE static void set_pkt_count(const struct device *dev, enum ring_idx idx, uint32_t pl_len) { /* Nothing needs to be programmed here in poll mode */ } static int wait_for_pkt_completion(const struct device *dev, enum ring_idx idx, uint32_t pl_len) { /* poll for completion */ return peek_ring_cmpl(dev, idx, pl_len); } #else static void set_pkt_count(const struct device *dev, enum ring_idx idx, uint32_t pl_len) { struct dma_iproc_pax_data *pd = dev->data; uint32_t val; /* program packet count for interrupt assertion */ val = sys_read32(RM_RING_REG(pd, idx, RING_CMPL_WR_PTR_DDR_CONTROL)); val &= ~RING_DDR_CONTROL_COUNT_MASK; val |= RING_DDR_CONTROL_COUNT(pl_len); sys_write32(val, RM_RING_REG(pd, idx, RING_CMPL_WR_PTR_DDR_CONTROL)); } static int wait_for_pkt_completion(const struct device *dev, enum ring_idx idx, uint32_t pl_len) { struct dma_iproc_pax_data *pd = dev->data; struct dma_iproc_pax_ring_data *ring; ring = &(pd->ring[idx]); /* wait for sg dma completion alert */ if (k_sem_take(&ring->alert, K_MSEC(PAX_DMA_TIMEOUT)) != 0) { LOG_ERR("PAX DMA [ring %d] Timeout!\n", idx); return -ETIMEDOUT; } return process_cmpl_event(dev, idx, pl_len); } #endif static int dma_iproc_pax_process_dma_blocks(const struct device *dev, enum ring_idx idx, struct dma_config *config) { struct dma_iproc_pax_data *pd = dev->data; const struct dma_iproc_pax_cfg *cfg = dev->config; int ret = 0; struct dma_iproc_pax_ring_data *ring; uint32_t toggle_bit, non_hdr_bd_count = 0; struct dma_block_config sync_pl; struct dma_iproc_pax_addr64 sync; struct dma_block_config *block_config = config->head_block; if (block_config == NULL) { LOG_ERR("head_block is NULL\n"); return -EINVAL; } ring = &(pd->ring[idx]); /* * Host sync buffer isn't ready at zephyr/driver init-time * Read the host address location once at first DMA write * on that ring. */ if ((ring->sync_pci.addr_lo == 0x0) && (ring->sync_pci.addr_hi == 0x0)) { /* populate sync data location */ LOG_DBG("sync addr loc 0x%x\n", cfg->scr_addr_loc); sync.addr_lo = sys_read32(cfg->scr_addr_loc + 4); sync.addr_hi = sys_read32(cfg->scr_addr_loc); ring->sync_pci.addr_lo = sync.addr_lo + idx * 4; ring->sync_pci.addr_hi = sync.addr_hi; LOG_DBG("ring:%d,sync addr:0x%x.0x%x\n", idx, ring->sync_pci.addr_hi, ring->sync_pci.addr_lo); } /* account extra sync packet */ ring->curr.sync_data.opaque = ring->curr.opq; ring->curr.sync_data.total_pkts = config->block_count; memcpy((void *)ring->sync_loc, (void *)&(ring->curr.sync_data), 4); sync_pl.dest_address = ring->sync_pci.addr_lo | (uint64_t)ring->sync_pci.addr_hi << 32; sync_pl.source_address = (uintptr_t)ring->sync_loc; sync_pl.block_size = 4; /* 4-bytes */ /* current toggle bit */ toggle_bit = ring->curr.toggle; /* current opq value for cmpl check */ ring->curr.opq = curr_pkt_id(ring); /* Form descriptors for total block counts */ while (block_config != NULL) { ret = dma_iproc_pax_gen_packets(dev, ring, config->channel_direction, block_config, &non_hdr_bd_count); if (ret) { goto err; } block_config = block_config->next_block; } /* * Write sync payload descriptors should go with separate RM header * as RM implementation allows all the BD's in a header packet should * have same data transfer direction. Setting non_hdr_bd_count to 0, * helps generate separate packet. */ ring->non_hdr_bd_count = 0; dma_iproc_pax_gen_packets(dev, ring, MEMORY_TO_PERIPHERAL, &sync_pl, &non_hdr_bd_count); alloc_pkt_id(ring); err: return ret; } static int dma_iproc_pax_configure(const struct device *dev, uint32_t channel, struct dma_config *cfg) { struct dma_iproc_pax_data *pd = dev->data; struct dma_iproc_pax_ring_data *ring; int ret = 0; if (channel >= PAX_DMA_RINGS_MAX) { LOG_ERR("Invalid ring/channel %d\n", channel); return -EINVAL; } ring = &(pd->ring[channel]); k_mutex_lock(&ring->lock, K_FOREVER); if (ring->ring_active) { ret = -EBUSY; goto err; } if (cfg->block_count >= RM_V2_MAX_BLOCK_COUNT) { LOG_ERR("Dma block count[%d] supported exceeds limit[%d]\n", cfg->block_count, RM_V2_MAX_BLOCK_COUNT); ret = -ENOTSUP; goto err; } ring->ring_active = 1; ret = dma_iproc_pax_process_dma_blocks(dev, channel, cfg); if (ret) { ring->ring_active = 0; goto err; } ring->dma_callback = cfg->dma_callback; ring->callback_arg = cfg->user_data; err: k_mutex_unlock(&ring->lock); return ret; } static int dma_iproc_pax_transfer_start(const struct device *dev, uint32_t channel) { int ret = 0; struct dma_iproc_pax_data *pd = dev->data; struct dma_iproc_pax_ring_data *ring; if (channel >= PAX_DMA_RINGS_MAX) { LOG_ERR("Invalid ring %d\n", channel); return -EINVAL; } ring = &(pd->ring[channel]); set_pkt_count(dev, channel, ring->total_pkt_count); #ifdef CONFIG_DMA_IPROC_PAX_DOORBELL_MODE write_doorbell(pd, channel); #elif CONFIG_DMA_IPROC_PAX_TOGGLE_MODE /* activate the ring */ set_ring_active(pd, channel, true); #endif ret = wait_for_pkt_completion(dev, channel, ring->total_pkt_count); if (ret) { goto err_ret; } ret = poll_on_write_sync(dev, ring); err_ret: k_mutex_lock(&ring->lock, K_FOREVER); ring->ring_active = 0; k_mutex_unlock(&ring->lock); #ifdef CONFIG_DMA_IPROC_PAX_TOGGLE_MODE /* deactivate the ring until next active transfer */ set_ring_active(pd, channel, false); #endif return ret; } static int dma_iproc_pax_transfer_stop(const struct device *dev, uint32_t channel) { return 0; } static const struct dma_driver_api pax_dma_driver_api = { .config = dma_iproc_pax_configure, .start = dma_iproc_pax_transfer_start, .stop = dma_iproc_pax_transfer_stop, }; static const struct dma_iproc_pax_cfg pax_dma_cfg = { .dma_base = DT_INST_REG_ADDR_BY_NAME(0, dme_regs), .rm_base = DT_INST_REG_ADDR_BY_NAME(0, rm_ring_regs), .rm_comm_base = DT_INST_REG_ADDR_BY_NAME(0, rm_comm_regs), .use_rings = DT_INST_PROP(0, dma_channels), .bd_memory_base = (void *)DT_INST_PROP_BY_IDX(0, bd_memory, 0), .scr_addr_loc = DT_INST_PROP(0, scr_addr_loc), .pcie_dev = DEVICE_DT_GET(DT_INST_PHANDLE(0, pcie_ep)), }; DEVICE_DT_INST_DEFINE(0, &dma_iproc_pax_init, NULL, &pax_dma_data, &pax_dma_cfg, POST_KERNEL, CONFIG_DMA_IPROC_PAX_V2_INIT_PRIORITY, &pax_dma_driver_api); ```
/content/code_sandbox/drivers/dma/dma_iproc_pax_v2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,071
```c /* * */ #include <zephyr/drivers/dma.h> #include <zephyr/internal/syscall_handler.h> /* Both of these APIs are assuming that the drive implementations are checking * the validity of the channel ID and returning -errno if it's bogus */ static inline int z_vrfy_dma_start(const struct device *dev, uint32_t channel) { K_OOPS(K_SYSCALL_DRIVER_DMA(dev, start)); return z_impl_dma_start((const struct device *)dev, channel); } #include <zephyr/syscalls/dma_start_mrsh.c> static inline int z_vrfy_dma_stop(const struct device *dev, uint32_t channel) { K_OOPS(K_SYSCALL_DRIVER_DMA(dev, stop)); return z_impl_dma_stop((const struct device *)dev, channel); } #include <zephyr/syscalls/dma_stop_mrsh.c> ```
/content/code_sandbox/drivers/dma/dma_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
180
```unknown # Intel sedi DMA configuration options config DMA_SEDI bool "SEDI DMA driver" select DMA_64BIT default y depends on DT_HAS_INTEL_SEDI_DMA_ENABLED help This option enables the Intel SEDI DMA driver. This driver is simply a shim driver built upon the SEDI bare metal DMA driver in the hal-intel module ```
/content/code_sandbox/drivers/dma/Kconfig.sedi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
81
```unknown # Nios-II mSGDMA driver configuration options config DMA_NIOS2_MSGDMA bool "Nios-II Modular Scatter-Gather DMA(MSGDMA) driver" default y depends on DT_HAS_ALTR_MSGDMA_ENABLED help Enable Nios-II Modular Scatter-Gather DMA(MSGDMA) driver. ```
/content/code_sandbox/drivers/dma/Kconfig.nios2_msgdma
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
67
```c /* * */ #define DT_DRV_COMPAT infineon_xmc4xxx_dma #include <soc.h> #include <stdint.h> #include <xmc_dma.h> #include <zephyr/device.h> #include <zephyr/drivers/dma.h> #include <zephyr/dt-bindings/dma/infineon-xmc4xxx-dma.h> #include <zephyr/irq.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dma_xmc4xxx, CONFIG_DMA_LOG_LEVEL); #define MAX_PRIORITY 7 #define DMA_MAX_BLOCK_LEN 4095 #define DLR_LINE_UNSET 0xff #define DLR_SRSEL_RS_BITSIZE 4 #define DLR_SRSEL_RS_MSK 0xf #define ALL_EVENTS \ (XMC_DMA_CH_EVENT_TRANSFER_COMPLETE | XMC_DMA_CH_EVENT_BLOCK_TRANSFER_COMPLETE | \ XMC_DMA_CH_EVENT_SRC_TRANSACTION_COMPLETE | XMC_DMA_CH_EVENT_DST_TRANSACTION_COMPLETE | \ XMC_DMA_CH_EVENT_ERROR) struct dma_xmc4xxx_channel { dma_callback_t cb; void *user_data; uint16_t block_ts; uint8_t source_data_size; uint8_t dlr_line; }; struct dma_xmc4xxx_config { XMC_DMA_t *dma; void (*irq_configure)(void); }; struct dma_xmc4xxx_data { struct dma_context ctx; struct dma_xmc4xxx_channel *channels; }; #define HANDLE_EVENT(event_test, get_channels_event, ret) \ do { \ if (event & (XMC_DMA_CH_##event_test)) { \ uint32_t channels_event = get_channels_event(dma); \ int channel = find_lsb_set(channels_event) - 1; \ struct dma_xmc4xxx_channel *dma_channel; \ \ __ASSERT_NO_MSG(channel >= 0); \ dma_channel = &dev_data->channels[channel]; \ /* Event has to be cleared before callback. The callback may call */ \ /* dma_start() and re-enable the event */ \ XMC_DMA_CH_ClearEventStatus(dma, channel, XMC_DMA_CH_##event_test); \ if (dma_channel->cb) { \ dma_channel->cb(dev, dma_channel->user_data, channel, (ret)); \ } \ } \ } while (0) /* Isr is level triggered, so we don't have to loop over all the channels */ /* in a single call */ static void dma_xmc4xxx_isr(const struct device *dev) { struct dma_xmc4xxx_data *dev_data = dev->data; const struct dma_xmc4xxx_config *dev_cfg = dev->config; int num_dma_channels = dev_data->ctx.dma_channels; XMC_DMA_t *dma = dev_cfg->dma; uint32_t event; uint32_t sr_overruns; /* There are two types of possible DMA error events: */ /* 1. Error response from AHB slave on the HRESP bus during DMA transfer. */ /* Treat this as EPERM error. */ /* 2. Service request overruns on the DLR line. */ /* Treat this EIO error. */ event = XMC_DMA_GetEventStatus(dma); HANDLE_EVENT(EVENT_ERROR, XMC_DMA_GetChannelsErrorStatus, -EPERM); HANDLE_EVENT(EVENT_BLOCK_TRANSFER_COMPLETE, XMC_DMA_GetChannelsBlockCompleteStatus, 0); HANDLE_EVENT(EVENT_TRANSFER_COMPLETE, XMC_DMA_GetChannelsTransferCompleteStatus, 0); sr_overruns = DLR->OVRSTAT; if (sr_overruns == 0) { return; } /* clear the overruns */ DLR->OVRCLR = sr_overruns; /* notify about overruns */ for (int i = 0; i < num_dma_channels; i++) { struct dma_xmc4xxx_channel *dma_channel; dma_channel = &dev_data->channels[i]; if (dma_channel->cb && dma_channel->dlr_line != DLR_LINE_UNSET && sr_overruns & BIT(dma_channel->dlr_line)) { LOG_ERR("Overruns detected on channel %d", i); dma_channel->cb(dev, dma_channel->user_data, i, -EIO); /* From XMC4700/4800 reference documentation - Section 4.4.1 */ /* Once the overrun condition is entered the user can clear the */ /* overrun status bits by writing to the DLR_OVRCLR register. */ /* Additionally the pending request must be reset by successively */ /* disabling and enabling the respective line. */ DLR->LNEN &= ~BIT(dma_channel->dlr_line); DLR->LNEN |= BIT(dma_channel->dlr_line); } } } static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct dma_config *config) { struct dma_xmc4xxx_data *dev_data = dev->data; const struct dma_xmc4xxx_config *dev_cfg = dev->config; struct dma_block_config *block = config->head_block; XMC_DMA_t *dma = dev_cfg->dma; uint8_t dlr_line = DLR_LINE_UNSET; if (channel >= dev_data->ctx.dma_channels) { LOG_ERR("Invalid channel number"); return -EINVAL; } if (config->channel_priority > MAX_PRIORITY) { LOG_ERR("Invalid priority"); return -EINVAL; } if (config->source_chaining_en || config->dest_chaining_en) { LOG_ERR("Channel chaining is not supported"); return -EINVAL; } if (config->channel_direction != MEMORY_TO_MEMORY && config->channel_direction != MEMORY_TO_PERIPHERAL && config->channel_direction != PERIPHERAL_TO_MEMORY) { LOG_ERR("Unsupported channel direction"); return -EINVAL; } if (config->block_count != 1) { LOG_ERR("Invalid block count"); return -EINVAL; } if (block->source_gather_en || block->dest_scatter_en) { if (dma != XMC_DMA0 || channel >= 2) { LOG_ERR("Gather/scatter only supported on DMA0 on ch0 and ch1"); return -EINVAL; } } if (config->dest_data_size != 1 && config->dest_data_size != 2 && config->dest_data_size != 4) { LOG_ERR("Invalid dest size, Only 1,2,4 bytes supported"); return -EINVAL; } if (config->source_data_size != 1 && config->source_data_size != 2 && config->source_data_size != 4) { LOG_ERR("Invalid source size, Only 1,2,4 bytes supported"); return -EINVAL; } if (config->source_burst_length != 1 && config->source_burst_length != 4 && config->source_burst_length != 8) { LOG_ERR("Invalid src burst length (data size units). Only 1,4,8 units supported"); return -EINVAL; } if (config->dest_burst_length != 1 && config->dest_burst_length != 4 && config->dest_burst_length != 8) { LOG_ERR("Invalid dest burst length (data size units). Only 1,4,8 units supported"); return -EINVAL; } if (block->block_size / config->source_data_size > DMA_MAX_BLOCK_LEN) { LOG_ERR("Block transactions must be <= 4095"); return -EINVAL; } if (XMC_DMA_CH_IsEnabled(dma, channel)) { LOG_ERR("Channel is still active"); return -EINVAL; } XMC_DMA_CH_ClearEventStatus(dma, channel, ALL_EVENTS); /* check dma slot number */ dma->CH[channel].SAR = block->source_address; dma->CH[channel].DAR = block->dest_address; dma->CH[channel].LLP = 0; /* set number of transactions */ dma->CH[channel].CTLH = block->block_size / config->source_data_size; /* set priority and software handshaking for src/dst. if hardware hankshaking is used */ /* it will be enabled later in the code */ dma->CH[channel].CFGL = (config->channel_priority << GPDMA0_CH_CFGL_CH_PRIOR_Pos) | GPDMA0_CH_CFGL_HS_SEL_SRC_Msk | GPDMA0_CH_CFGL_HS_SEL_DST_Msk; dma->CH[channel].CTLL = config->dest_data_size / 2 << GPDMA0_CH_CTLL_DST_TR_WIDTH_Pos | config->source_data_size / 2 << GPDMA0_CH_CTLL_SRC_TR_WIDTH_Pos | block->dest_addr_adj << GPDMA0_CH_CTLL_DINC_Pos | block->source_addr_adj << GPDMA0_CH_CTLL_SINC_Pos | config->dest_burst_length / 4 << GPDMA0_CH_CTLL_DEST_MSIZE_Pos | config->source_burst_length / 4 << GPDMA0_CH_CTLL_SRC_MSIZE_Pos | BIT(GPDMA0_CH_CTLL_INT_EN_Pos); if (config->channel_direction == MEMORY_TO_PERIPHERAL || config->channel_direction == PERIPHERAL_TO_MEMORY) { uint8_t request_source = XMC4XXX_DMA_GET_REQUEST_SOURCE(config->dma_slot); uint8_t dlr_line_reg = XMC4XXX_DMA_GET_LINE(config->dma_slot); dlr_line = dlr_line_reg; if (dma == XMC_DMA0 && dlr_line > 7) { LOG_ERR("Unsupported request line %d for DMA0." "Should be in range [0,7]", dlr_line); return -EINVAL; } if (dma == XMC_DMA1 && (dlr_line < 8 || dlr_line > 11)) { LOG_ERR("Unsupported request line %d for DMA1." "Should be in range [8,11]", dlr_line); return -EINVAL; } /* clear any overruns */ DLR->OVRCLR = BIT(dlr_line); /* enable the dma line */ DLR->LNEN &= ~BIT(dlr_line); DLR->LNEN |= BIT(dlr_line); /* connect DMA Line to SR */ if (dma == XMC_DMA0) { DLR->SRSEL0 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE)); DLR->SRSEL0 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE); } if (dma == XMC_DMA1) { dlr_line_reg -= 8; DLR->SRSEL1 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE)); DLR->SRSEL1 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE); } /* connect DMA channel to DMA line */ if (config->channel_direction == MEMORY_TO_PERIPHERAL) { dma->CH[channel].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_DEST_PER_Pos) | 4; dma->CH[channel].CFGL &= ~BIT(GPDMA0_CH_CFGL_HS_SEL_DST_Pos); dma->CH[channel].CTLL |= 1 << GPDMA0_CH_CTLL_TT_FC_Pos; } if (config->channel_direction == PERIPHERAL_TO_MEMORY) { dma->CH[channel].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_SRC_PER_Pos) | 4; dma->CH[channel].CFGL &= ~BIT(GPDMA0_CH_CFGL_HS_SEL_SRC_Pos); dma->CH[channel].CTLL |= 2 << GPDMA0_CH_CTLL_TT_FC_Pos; } } if (block->source_gather_en) { dma->CH[channel].CTLL |= BIT(GPDMA0_CH_CTLL_SRC_GATHER_EN_Pos); /* truncate if we are out of range */ dma->CH[channel].SGR = (block->source_gather_interval & GPDMA0_CH_SGR_SGI_Msk) | block->source_gather_count << GPDMA0_CH_SGR_SGC_Pos; } if (block->dest_scatter_en) { dma->CH[channel].CTLL |= BIT(GPDMA0_CH_CTLL_DST_SCATTER_EN_Pos); /* truncate if we are out of range */ dma->CH[channel].DSR = (block->dest_scatter_interval & GPDMA0_CH_DSR_DSI_Msk) | block->dest_scatter_count << GPDMA0_CH_DSR_DSC_Pos; } dev_data->channels[channel].cb = config->dma_callback; dev_data->channels[channel].user_data = config->user_data; dev_data->channels[channel].block_ts = block->block_size / config->source_data_size; dev_data->channels[channel].source_data_size = config->source_data_size; dev_data->channels[channel].dlr_line = dlr_line; XMC_DMA_CH_DisableEvent(dma, channel, ALL_EVENTS); XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_TRANSFER_COMPLETE); /* trigger enable on block transfer complete */ if (config->complete_callback_en) { XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_BLOCK_TRANSFER_COMPLETE); } if (!config->error_callback_dis) { XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_ERROR); } LOG_DBG("Configured channel %d for %08X to %08X (%u)", channel, block->source_address, block->dest_address, block->block_size); return 0; } static int dma_xmc4xxx_start(const struct device *dev, uint32_t channel) { const struct dma_xmc4xxx_config *dev_cfg = dev->config; LOG_DBG("Starting channel %d", channel); XMC_DMA_CH_Enable(dev_cfg->dma, channel); return 0; } static int dma_xmc4xxx_stop(const struct device *dev, uint32_t channel) { const struct dma_xmc4xxx_config *dev_cfg = dev->config; struct dma_xmc4xxx_data *dev_data = dev->data; struct dma_xmc4xxx_channel *dma_channel; XMC_DMA_t *dma = dev_cfg->dma; dma_channel = &dev_data->channels[channel]; XMC_DMA_CH_Suspend(dma, channel); /* wait until ongoing transfer finishes */ while (XMC_DMA_CH_IsEnabled(dma, channel) && (dma->CH[channel].CFGL & GPDMA0_CH_CFGL_FIFO_EMPTY_Msk) == 0) { } /* disconnect DLR line to stop overuns */ if (dma_channel->dlr_line != DLR_LINE_UNSET) { DLR->LNEN &= ~BIT(dma_channel->dlr_line); } dma_channel->dlr_line = DLR_LINE_UNSET; dma_channel->cb = NULL; XMC_DMA_CH_Disable(dma, channel); return 0; } static int dma_xmc4xxx_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { struct dma_xmc4xxx_data *dev_data = dev->data; size_t block_ts; const struct dma_xmc4xxx_config *dev_cfg = dev->config; XMC_DMA_t *dma = dev_cfg->dma; struct dma_xmc4xxx_channel *dma_channel; if (channel >= dev_data->ctx.dma_channels) { LOG_ERR("Invalid channel number"); return -EINVAL; } if (XMC_DMA_CH_IsEnabled(dma, channel)) { LOG_ERR("Channel is still active"); return -EINVAL; } dma_channel = &dev_data->channels[channel]; block_ts = size / dma_channel->source_data_size; if (block_ts > DMA_MAX_BLOCK_LEN) { LOG_ERR("Block transactions must be <= 4095"); return -EINVAL; } dma_channel->block_ts = block_ts; /* do we need to clear any errors */ dma->CH[channel].SAR = src; dma->CH[channel].DAR = dst; dma->CH[channel].CTLH = block_ts; return 0; } static int dma_xmc4xxx_get_status(const struct device *dev, uint32_t channel, struct dma_status *stat) { struct dma_xmc4xxx_data *dev_data = dev->data; const struct dma_xmc4xxx_config *dev_cfg = dev->config; XMC_DMA_t *dma = dev_cfg->dma; struct dma_xmc4xxx_channel *dma_channel; if (channel >= dev_data->ctx.dma_channels) { LOG_ERR("Invalid channel number"); return -EINVAL; } dma_channel = &dev_data->channels[channel]; stat->busy = XMC_DMA_CH_IsEnabled(dma, channel); stat->pending_length = dma_channel->block_ts - XMC_DMA_CH_GetTransferredData(dma, channel); stat->pending_length *= dma_channel->source_data_size; /* stat->dir and other remaining fields are not set. They are not */ /* useful for xmc4xxx peripheral drivers. */ return 0; } static bool dma_xmc4xxx_chan_filter(const struct device *dev, int channel, void *filter_param) { uint32_t requested_channel; if (!filter_param) { return true; } requested_channel = *(uint32_t *)filter_param; if (channel == requested_channel) { return true; } return false; } static int dma_xmc4xxx_suspend(const struct device *dev, uint32_t channel) { struct dma_xmc4xxx_data *dev_data = dev->data; const struct dma_xmc4xxx_config *dev_cfg = dev->config; XMC_DMA_t *dma = dev_cfg->dma; if (channel >= dev_data->ctx.dma_channels) { LOG_ERR("Invalid channel number"); return -EINVAL; } XMC_DMA_CH_Suspend(dma, channel); return 0; } static int dma_xmc4xxx_resume(const struct device *dev, uint32_t channel) { struct dma_xmc4xxx_data *dev_data = dev->data; const struct dma_xmc4xxx_config *dev_cfg = dev->config; XMC_DMA_t *dma = dev_cfg->dma; if (channel >= dev_data->ctx.dma_channels) { LOG_ERR("Invalid channel number"); return -EINVAL; } XMC_DMA_CH_Resume(dma, channel); return 0; } static int dma_xmc4xxx_init(const struct device *dev) { const struct dma_xmc4xxx_config *dev_cfg = dev->config; XMC_DMA_Enable(dev_cfg->dma); dev_cfg->irq_configure(); return 0; } static const struct dma_driver_api dma_xmc4xxx_driver_api = { .config = dma_xmc4xxx_config, .reload = dma_xmc4xxx_reload, .start = dma_xmc4xxx_start, .stop = dma_xmc4xxx_stop, .get_status = dma_xmc4xxx_get_status, .chan_filter = dma_xmc4xxx_chan_filter, .suspend = dma_xmc4xxx_suspend, .resume = dma_xmc4xxx_resume, }; #define XMC4XXX_DMA_INIT(inst) \ static void dma_xmc4xxx##inst##_irq_configure(void) \ { \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, 0, irq), \ DT_INST_IRQ_BY_IDX(inst, 0, priority), \ dma_xmc4xxx_isr, \ DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(inst, 0, irq)); \ } \ static const struct dma_xmc4xxx_config dma_xmc4xxx##inst##_config = { \ .dma = (XMC_DMA_t *)DT_INST_REG_ADDR(inst), \ .irq_configure = dma_xmc4xxx##inst##_irq_configure, \ }; \ \ static struct dma_xmc4xxx_channel \ dma_xmc4xxx##inst##_channels[DT_INST_PROP(inst, dma_channels)]; \ ATOMIC_DEFINE(dma_xmc4xxx_atomic##inst, \ DT_INST_PROP(inst, dma_channels)); \ static struct dma_xmc4xxx_data dma_xmc4xxx##inst##_data = { \ .ctx = { \ .magic = DMA_MAGIC, \ .atomic = dma_xmc4xxx_atomic##inst, \ .dma_channels = DT_INST_PROP(inst, dma_channels), \ }, \ .channels = dma_xmc4xxx##inst##_channels, \ }; \ \ DEVICE_DT_INST_DEFINE(inst, &dma_xmc4xxx_init, NULL, \ &dma_xmc4xxx##inst##_data, \ &dma_xmc4xxx##inst##_config, PRE_KERNEL_1, \ CONFIG_DMA_INIT_PRIORITY, &dma_xmc4xxx_driver_api); DT_INST_FOREACH_STATUS_OKAY(XMC4XXX_DMA_INIT) ```
/content/code_sandbox/drivers/dma/dma_xmc4xxx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,636
```unknown config DMA_MCUX_LPC bool "MCUX LPC DMAC driver" default y depends on DT_HAS_NXP_LPC_DMA_ENABLED help DMA driver for MCUX LPC MCUs. if DMA_MCUX_LPC config DMA_MCUX_LPC_NUMBER_OF_DESCRIPTORS int "Number of DMA descriptors to use" default 16 help Each DMA descriptor can be used to transfer (1024*width) bytes of data. Increase or decrease this value depending on the max number of data transferred by the application. config DMA_MCUX_LPC_NUMBER_OF_CHANNELS_ALLOCATED int "Number of DMA channels to allocate memory for in driver" default 0 help The MCUX LPC DMA driver can save memory by not allocating static data depending on this value. So, the application can save some data memory space by setting this value to suit its needs. The meaning of the value is "total number of unique DMA channels ever expected to be used, maximum out of all DMA controllers". A value of 0 (default) means to allocate as many channel data structures as the maximum number of DMA channels in any DMA controller hardware. About 1 KB per 3-4 channels unused can be saved by fine tuning this Kconfig. endif # DMA_MCUX_LPC ```
/content/code_sandbox/drivers/dma/Kconfig.mcux_lpc
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
285
```unknown # DesignWare DMA common configuration options config DMA_DW_FIFO_PARTITION bool "FIFO Partitioning" help FIFO partition feature config DMA_DW_LLI_POOL_SIZE int "number of LLI structs in an allocation pool" default 2 help The number of LLI structs in a statically allocated pool. Each channel has its own LLI struct pool. If during dma_config() a log notes there are not enough LLI structs then this should be increased to match the need. config DMA_DW_HW_LLI bool "hardware supports scatter gather" default y help The hardware is by default expected to support hardware LLI (scatter gather). When not enabled the driver will still perform scatter gather but using software to run through the scatter gather list. config DMA_DW_SUSPEND_DRAIN bool "channels should be suspended and drained on stop" depends on DMA_INTEL_ADSP_GPDMA help Rather than immediately stopping a DMA channel the channel is suspended with the DRAIN bit flag set to allow for the hardware FIFO to be drained before stopping the channel. config DMA_DW_HOST_MASK int "memory space mask" default 0 help Some instances of the DesignWare DMAC require a mask applied to source/destination addresses to signifiy the memory space the address is in. config DMA_DW_CHANNEL_COUNT int "dw max channel count" default 8 help Channel count for designware DMA instances. ```
/content/code_sandbox/drivers/dma/Kconfig.dw_common
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
320
```c /* * */ #define DT_DRV_COMPAT intel_adsp_hda_host_in #include <zephyr/drivers/dma.h> #include <adsp_interrupt.h> #include "dma_intel_adsp_hda.h" static const struct dma_driver_api intel_adsp_hda_dma_host_in_api = { .config = intel_adsp_hda_dma_host_in_config, .reload = intel_adsp_hda_dma_host_reload, .start = intel_adsp_hda_dma_start, .stop = intel_adsp_hda_dma_stop, .get_status = intel_adsp_hda_dma_status, .get_attribute = intel_adsp_hda_dma_get_attribute, .chan_filter = intel_adsp_hda_dma_chan_filter, }; #define INTEL_ADSP_HDA_DMA_HOST_IN_INIT(inst) \ static void intel_adsp_hda_dma##inst##_irq_config(void); \ \ static const struct intel_adsp_hda_dma_cfg intel_adsp_hda_dma##inst##_config = { \ .base = DT_INST_REG_ADDR(inst), \ .regblock_size = DT_INST_REG_SIZE(inst), \ .dma_channels = DT_INST_PROP(inst, dma_channels), \ .direction = MEMORY_TO_HOST, \ .irq_config = intel_adsp_hda_dma##inst##_irq_config \ }; \ \ static struct intel_adsp_hda_dma_data intel_adsp_hda_dma##inst##_data = {}; \ \ PM_DEVICE_DT_INST_DEFINE(inst, intel_adsp_hda_dma_pm_action); \ \ DEVICE_DT_INST_DEFINE(inst, &intel_adsp_hda_dma_init, \ PM_DEVICE_DT_INST_GET(inst), \ &intel_adsp_hda_dma##inst##_data, \ &intel_adsp_hda_dma##inst##_config, POST_KERNEL, \ CONFIG_DMA_INIT_PRIORITY, \ &intel_adsp_hda_dma_host_in_api); \ \ static void intel_adsp_hda_dma##inst##_irq_config(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), \ DT_INST_IRQ(inst, priority), intel_adsp_hda_dma_isr, \ DEVICE_DT_INST_GET(inst), \ DT_INST_IRQ(inst, sense)); \ irq_enable(DT_INST_IRQN(inst)); \ IF_ENABLED(CONFIG_SOC_SERIES_INTEL_ADSP_ACE, \ (ACE_DINT[0].ie[ACE_INTL_HDAHIDMA] = 1;)) \ } DT_INST_FOREACH_STATUS_OKAY(INTEL_ADSP_HDA_DMA_HOST_IN_INIT) ```
/content/code_sandbox/drivers/dma/dma_intel_adsp_hda_host_in.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
574
```c /* * All rights reserved. * */ #include <zephyr/drivers/dma.h> #include <zephyr/drivers/dma/dma_mcux_pxp.h> #include <zephyr/devicetree.h> #include <fsl_pxp.h> #ifdef CONFIG_HAS_MCUX_CACHE #include <fsl_cache.h> #endif #define DT_DRV_COMPAT nxp_pxp #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dma_mcux_pxp, CONFIG_DMA_LOG_LEVEL); struct dma_mcux_pxp_config { PXP_Type *base; void (*irq_config_func)(const struct device *dev); }; struct dma_mcux_pxp_data { void *user_data; dma_callback_t dma_callback; uint32_t ps_buf_addr; uint32_t ps_buf_size; uint32_t out_buf_addr; uint32_t out_buf_size; }; static void dma_mcux_pxp_irq_handler(const struct device *dev) { const struct dma_mcux_pxp_config *config = dev->config; struct dma_mcux_pxp_data *data = dev->data; PXP_ClearStatusFlags(config->base, kPXP_CompleteFlag); #ifdef CONFIG_HAS_MCUX_CACHE DCACHE_InvalidateByRange((uint32_t)data->out_buf_addr, data->out_buf_size); #endif if (data->dma_callback) { data->dma_callback(dev, data->user_data, 0, 0); } } /* Configure a channel */ static int dma_mcux_pxp_configure(const struct device *dev, uint32_t channel, struct dma_config *config) { const struct dma_mcux_pxp_config *dev_config = dev->config; struct dma_mcux_pxp_data *dev_data = dev->data; pxp_ps_buffer_config_t ps_buffer_cfg; pxp_output_buffer_config_t output_buffer_cfg; uint8_t bytes_per_pixel; pxp_rotate_degree_t rotate; pxp_flip_mode_t flip; ARG_UNUSED(channel); if (config->channel_direction != MEMORY_TO_MEMORY) { return -ENOTSUP; } /* * Use the DMA slot value to get the pixel format and rotation * settings */ switch ((config->dma_slot & DMA_MCUX_PXP_CMD_MASK) >> DMA_MCUX_PXP_CMD_SHIFT) { case DMA_MCUX_PXP_CMD_ROTATE_0: rotate = kPXP_Rotate0; break; case DMA_MCUX_PXP_CMD_ROTATE_90: rotate = kPXP_Rotate90; break; case DMA_MCUX_PXP_CMD_ROTATE_180: rotate = kPXP_Rotate180; break; case DMA_MCUX_PXP_CMD_ROTATE_270: rotate = kPXP_Rotate270; break; default: return -ENOTSUP; } switch ((config->dma_slot & DMA_MCUX_PXP_FMT_MASK) >> DMA_MCUX_PXP_FMT_SHIFT) { case DMA_MCUX_PXP_FMT_RGB565: ps_buffer_cfg.pixelFormat = kPXP_PsPixelFormatRGB565; output_buffer_cfg.pixelFormat = kPXP_OutputPixelFormatRGB565; bytes_per_pixel = 2; break; case DMA_MCUX_PXP_FMT_RGB888: #if (!(defined(FSL_FEATURE_PXP_HAS_NO_EXTEND_PIXEL_FORMAT) && \ FSL_FEATURE_PXP_HAS_NO_EXTEND_PIXEL_FORMAT)) && \ (!(defined(FSL_FEATURE_PXP_V3) && FSL_FEATURE_PXP_V3)) ps_buffer_cfg.pixelFormat = kPXP_PsPixelFormatARGB8888; #else ps_buffer_cfg.pixelFormat = kPXP_PsPixelFormatRGB888; #endif output_buffer_cfg.pixelFormat = kPXP_OutputPixelFormatRGB888; bytes_per_pixel = 3; break; case DMA_MCUX_PXP_FMT_ARGB8888: ps_buffer_cfg.pixelFormat = kPXP_PsPixelFormatARGB8888; output_buffer_cfg.pixelFormat = kPXP_OutputPixelFormatARGB8888; bytes_per_pixel = 4; break; default: return -ENOTSUP; } /* * Use the DMA linked_channel value to get the flip settings. */ switch ((config->linked_channel & DMA_MCUX_PXP_FLIP_MASK) >> DMA_MCUX_PXP_FLIP_SHIFT) { case DMA_MCUX_PXP_FLIP_DISABLE: flip = kPXP_FlipDisable; break; case DMA_MCUX_PXP_FLIP_HORIZONTAL: flip = kPXP_FlipHorizontal; break; case DMA_MCUX_PXP_FLIP_VERTICAL: flip = kPXP_FlipVertical; break; case DMA_MCUX_PXP_FLIP_BOTH: flip = kPXP_FlipBoth; break; default: return -ENOTSUP; } DCACHE_CleanByRange((uint32_t)config->head_block->source_address, config->head_block->block_size); /* * Some notes on how specific fields of the DMA config are used by * the PXP: * head block source address: PS buffer source address * head block destination address: Output buffer address * head block block size: size of destination and source buffer * source data size: width of source buffer in bytes (pitch) * source burst length: height of source buffer in pixels * dest data size: width of destination buffer in bytes (pitch) * dest burst length: height of destination buffer in pixels */ ps_buffer_cfg.swapByte = false; ps_buffer_cfg.bufferAddr = config->head_block->source_address; ps_buffer_cfg.bufferAddrU = 0U; ps_buffer_cfg.bufferAddrV = 0U; ps_buffer_cfg.pitchBytes = config->source_data_size; PXP_SetProcessSurfaceBufferConfig(dev_config->base, &ps_buffer_cfg); output_buffer_cfg.interlacedMode = kPXP_OutputProgressive; output_buffer_cfg.buffer0Addr = config->head_block->dest_address; output_buffer_cfg.buffer1Addr = 0U; output_buffer_cfg.pitchBytes = config->dest_data_size; output_buffer_cfg.width = (config->dest_data_size / bytes_per_pixel); output_buffer_cfg.height = config->dest_burst_length; PXP_SetOutputBufferConfig(dev_config->base, &output_buffer_cfg); /* We only support a process surface that covers the full buffer */ PXP_SetProcessSurfacePosition(dev_config->base, 0U, 0U, output_buffer_cfg.width, output_buffer_cfg.height); /* Setup rotation */ PXP_SetRotateConfig(dev_config->base, kPXP_RotateProcessSurface, rotate, flip); dev_data->ps_buf_addr = config->head_block->source_address; dev_data->ps_buf_size = config->head_block->block_size; dev_data->out_buf_addr = config->head_block->dest_address; dev_data->out_buf_size = config->head_block->block_size; dev_data->dma_callback = config->dma_callback; dev_data->user_data = config->user_data; return 0; } static int dma_mcux_pxp_start(const struct device *dev, uint32_t channel) { const struct dma_mcux_pxp_config *config = dev->config; struct dma_mcux_pxp_data *data = dev->data; #ifdef CONFIG_HAS_MCUX_CACHE DCACHE_CleanByRange((uint32_t)data->ps_buf_addr, data->ps_buf_size); #endif ARG_UNUSED(channel); PXP_Start(config->base); return 0; } static const struct dma_driver_api dma_mcux_pxp_api = { .config = dma_mcux_pxp_configure, .start = dma_mcux_pxp_start, }; static int dma_mcux_pxp_init(const struct device *dev) { const struct dma_mcux_pxp_config *config = dev->config; PXP_Init(config->base); PXP_SetProcessSurfaceBackGroundColor(config->base, 0U); /* Disable alpha surface and CSC1 */ PXP_SetAlphaSurfacePosition(config->base, 0xFFFFU, 0xFFFFU, 0U, 0U); PXP_EnableCsc1(config->base, false); PXP_EnableInterrupts(config->base, kPXP_CompleteInterruptEnable); config->irq_config_func(dev); return 0; } #define DMA_INIT(n) \ static void dma_pxp_config_func##n(const struct device *dev) \ { \ IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 0), \ (IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ dma_mcux_pxp_irq_handler, DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQ(n, irq));)) \ } \ \ static const struct dma_mcux_pxp_config dma_config_##n = { \ .base = (PXP_Type *)DT_INST_REG_ADDR(n), \ .irq_config_func = dma_pxp_config_func##n, \ }; \ \ static struct dma_mcux_pxp_data dma_data_##n; \ \ DEVICE_DT_INST_DEFINE(n, &dma_mcux_pxp_init, NULL, &dma_data_##n, &dma_config_##n, \ PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, &dma_mcux_pxp_api); DT_INST_FOREACH_STATUS_OKAY(DMA_INIT) ```
/content/code_sandbox/drivers/dma/dma_mcux_pxp.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,007
```objective-c /* * */ #ifndef DMA_MCUX_EDMA_H_ #define DMA_MCUX_EDMA_H_ #include <errno.h> #include <zephyr/device.h> #include <zephyr/drivers/dma.h> #include <soc.h> #include <fsl_common.h> #include "fsl_edma.h" #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT #include "fsl_dmamux.h" #endif #endif /* DMA_MCUX_EDMA_H_*/ ```
/content/code_sandbox/drivers/dma/dma_mcux_edma.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
105
```c /* * */ #include <errno.h> #include <stdio.h> #include <string.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/drivers/dma.h> #include <zephyr/pm/device.h> #include <zephyr/pm/device_runtime.h> #include <soc.h> #include "dma_dw_common.h" #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dma_dw_common); /* number of tries to wait for reset */ #define DW_DMA_CFG_TRIES 10000 void dw_dma_isr(const struct device *dev) { const struct dw_dma_dev_cfg *const dev_cfg = dev->config; struct dw_dma_dev_data *const dev_data = dev->data; struct dw_dma_chan_data *chan_data; uint32_t status_tfr = 0U; uint32_t status_block = 0U; uint32_t status_err = 0U; uint32_t status_intr; uint32_t channel; status_intr = dw_read(dev_cfg->base, DW_INTR_STATUS); if (!status_intr) { LOG_ERR("%s: status_intr = %d", dev->name, status_intr); } /* get the source of our IRQ. */ status_block = dw_read(dev_cfg->base, DW_STATUS_BLOCK); status_tfr = dw_read(dev_cfg->base, DW_STATUS_TFR); /* TODO: handle errors, just clear them atm */ status_err = dw_read(dev_cfg->base, DW_STATUS_ERR); if (status_err) { LOG_ERR("%s: status_err = %d\n", dev->name, status_err); dw_write(dev_cfg->base, DW_CLEAR_ERR, status_err); } /* clear interrupts */ dw_write(dev_cfg->base, DW_CLEAR_BLOCK, status_block); dw_write(dev_cfg->base, DW_CLEAR_TFR, status_tfr); /* Dispatch callbacks for channels depending upon the bit set */ while (status_block) { channel = find_lsb_set(status_block) - 1; status_block &= ~(1 << channel); chan_data = &dev_data->chan[channel]; if (chan_data->dma_blkcallback) { LOG_DBG("%s: Dispatching block complete callback fro channel %d", dev->name, channel); /* Ensure the linked list (chan_data->lli) is * freed in the user callback function once * all the blocks are transferred. */ chan_data->dma_blkcallback(dev, chan_data->blkuser_data, channel, DMA_STATUS_BLOCK); } } while (status_tfr) { channel = find_lsb_set(status_tfr) - 1; status_tfr &= ~(1 << channel); chan_data = &dev_data->chan[channel]; /* Transfer complete, channel now idle, a reload * could safely occur in the callback via dma_config * and dma_start */ chan_data->state = DW_DMA_IDLE; if (chan_data->dma_tfrcallback) { LOG_DBG("%s: Dispatching transfer callback for channel %d", dev->name, channel); chan_data->dma_tfrcallback(dev, chan_data->tfruser_data, channel, DMA_STATUS_COMPLETE); } } } /* mask address for dma to identify memory space. */ static void dw_dma_mask_address(struct dma_block_config *block_cfg, struct dw_lli *lli_desc, uint32_t direction) { lli_desc->sar = block_cfg->source_address; lli_desc->dar = block_cfg->dest_address; switch (direction) { case MEMORY_TO_PERIPHERAL: lli_desc->sar |= CONFIG_DMA_DW_HOST_MASK; break; case PERIPHERAL_TO_MEMORY: lli_desc->dar |= CONFIG_DMA_DW_HOST_MASK; break; case MEMORY_TO_MEMORY: lli_desc->sar |= CONFIG_DMA_DW_HOST_MASK; lli_desc->dar |= CONFIG_DMA_DW_HOST_MASK; break; default: break; } } int dw_dma_config(const struct device *dev, uint32_t channel, struct dma_config *cfg) { const struct dw_dma_dev_cfg *const dev_cfg = dev->config; struct dw_dma_dev_data *const dev_data = dev->data; struct dma_block_config *block_cfg; struct dw_lli *lli_desc; struct dw_lli *lli_desc_head; struct dw_lli *lli_desc_tail; uint32_t msize = 3;/* default msize, 8 bytes */ int ret = 0; if (channel >= DW_CHAN_COUNT) { LOG_ERR("%s: invalid dma channel %d", dev->name, channel); ret = -EINVAL; goto out; } struct dw_dma_chan_data *chan_data = &dev_data->chan[channel]; if (chan_data->state != DW_DMA_IDLE && chan_data->state != DW_DMA_PREPARED) { LOG_ERR("%s: channel %d must be inactive to reconfigure, currently %d", dev->name, channel, chan_data->state); ret = -EBUSY; goto out; } LOG_DBG("%s: channel %d config", dev->name, channel); __ASSERT_NO_MSG(cfg->source_data_size == cfg->dest_data_size); __ASSERT_NO_MSG(cfg->source_burst_length == cfg->dest_burst_length); __ASSERT_NO_MSG(cfg->block_count > 0); __ASSERT_NO_MSG(cfg->head_block != NULL); if (cfg->source_data_size != 1 && cfg->source_data_size != 2 && cfg->source_data_size != 4 && cfg->source_data_size != 8 && cfg->source_data_size != 16) { LOG_ERR("%s: channel %d 'invalid source_data_size' value %d", dev->name, channel, cfg->source_data_size); ret = -EINVAL; goto out; } if (cfg->block_count > CONFIG_DMA_DW_LLI_POOL_SIZE) { LOG_ERR("%s: channel %d scatter gather list larger than" " descriptors in pool, consider increasing CONFIG_DMA_DW_LLI_POOL_SIZE", dev->name, channel); ret = -EINVAL; goto out; } /* burst_size = (2 ^ msize) */ msize = find_msb_set(cfg->source_burst_length) - 1; LOG_DBG("%s: channel %d m_size=%d", dev->name, channel, msize); __ASSERT_NO_MSG(msize < 5); /* default channel config */ chan_data->direction = cfg->channel_direction; chan_data->cfg_lo = 0; chan_data->cfg_hi = 0; /* setup a list of lli structs. we don't need to allocate */ chan_data->lli = &dev_data->lli_pool[channel][0]; /* TODO allocate here */ chan_data->lli_count = cfg->block_count; /* zero the scatter gather list */ memset(chan_data->lli, 0, sizeof(struct dw_lli) * chan_data->lli_count); lli_desc = chan_data->lli; lli_desc_head = &chan_data->lli[0]; lli_desc_tail = &chan_data->lli[chan_data->lli_count - 1]; chan_data->ptr_data.buffer_bytes = 0; /* copy the scatter gather list from dma_cfg to dw_lli */ block_cfg = cfg->head_block; for (int i = 0; i < cfg->block_count; i++) { __ASSERT_NO_MSG(block_cfg != NULL); LOG_DBG("%s: copying block_cfg %p to lli_desc %p", dev->name, block_cfg, lli_desc); /* write CTL_LO for each lli */ switch (cfg->source_data_size) { case 1: /* byte at a time transfer */ lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(0); break; case 2: /* non peripheral copies are optimal using words */ switch (cfg->channel_direction) { case MEMORY_TO_MEMORY: /* config the src tr width for 32 bit words */ lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(2); break; default: /* config the src width for 16 bit samples */ lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(1); break; } break; case 4: /* config the src tr width for 24, 32 bit samples */ lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(2); break; default: LOG_ERR("%s: channel %d invalid src width %d", dev->name, channel, cfg->source_data_size); ret = -EINVAL; goto out; } LOG_DBG("%s: source data size: lli_desc %p, ctrl_lo %x", dev->name, lli_desc, lli_desc->ctrl_lo); switch (cfg->dest_data_size) { case 1: /* byte at a time transfer */ lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(0); break; case 2: /* non peripheral copies are optimal using words */ switch (cfg->channel_direction) { case MEMORY_TO_MEMORY: /* config the dest tr width for 32 bit words */ lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(2); break; default: /* config the dest width for 16 bit samples */ lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(1); break; } break; case 4: /* config the dest tr width for 24, 32 bit samples */ lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(2); break; default: LOG_ERR("%s: channel %d invalid dest width %d", dev->name, channel, cfg->dest_data_size); ret = -EINVAL; goto out; } LOG_DBG("%s: dest data size: lli_desc %p, ctrl_lo %x", dev->name, lli_desc, lli_desc->ctrl_lo); lli_desc->ctrl_lo |= DW_CTLL_SRC_MSIZE(msize) | DW_CTLL_DST_MSIZE(msize); if (cfg->dma_callback) { lli_desc->ctrl_lo |= DW_CTLL_INT_EN; /* enable interrupt */ } LOG_DBG("%s: msize, int_en: lli_desc %p, ctrl_lo %x", dev->name, lli_desc, lli_desc->ctrl_lo); /* config the SINC and DINC fields of CTL_LO, * SRC/DST_PER fields of CFG_HI */ switch (cfg->channel_direction) { case MEMORY_TO_MEMORY: lli_desc->ctrl_lo |= DW_CTLL_FC_M2M | DW_CTLL_SRC_INC | DW_CTLL_DST_INC; #if CONFIG_DMA_DW_HW_LLI LOG_DBG("%s: setting LLP_D_EN, LLP_S_EN in lli_desc->ctrl_lo %x", dev->name, lli_desc->ctrl_lo); lli_desc->ctrl_lo |= DW_CTLL_LLP_S_EN | DW_CTLL_LLP_D_EN; LOG_DBG("%s: lli_desc->ctrl_lo %x", dev->name, lli_desc->ctrl_lo); #endif #if CONFIG_DMA_DW chan_data->cfg_lo |= DW_CFGL_SRC_SW_HS; chan_data->cfg_lo |= DW_CFGL_DST_SW_HS; #endif break; case MEMORY_TO_PERIPHERAL: lli_desc->ctrl_lo |= DW_CTLL_FC_M2P | DW_CTLL_SRC_INC | DW_CTLL_DST_FIX; #if CONFIG_DMA_DW_HW_LLI lli_desc->ctrl_lo |= DW_CTLL_LLP_S_EN; chan_data->cfg_lo |= DW_CFGL_RELOAD_DST; #endif /* Assign a hardware handshake interface (0-15) to the * destination of the channel */ chan_data->cfg_hi |= DW_CFGH_DST(cfg->dma_slot); #if CONFIG_DMA_DW chan_data->cfg_lo |= DW_CFGL_SRC_SW_HS; #endif break; case PERIPHERAL_TO_MEMORY: lli_desc->ctrl_lo |= DW_CTLL_FC_P2M | DW_CTLL_SRC_FIX | DW_CTLL_DST_INC; #if CONFIG_DMA_DW_HW_LLI if (!block_cfg->dest_scatter_en) { lli_desc->ctrl_lo |= DW_CTLL_LLP_D_EN; } else { /* Use contiguous auto-reload. Line 3 in * table 3-3 */ lli_desc->ctrl_lo |= DW_CTLL_D_SCAT_EN; } chan_data->cfg_lo |= DW_CFGL_RELOAD_SRC; #endif /* Assign a hardware handshake interface (0-15) to the * source of the channel */ chan_data->cfg_hi |= DW_CFGH_SRC(cfg->dma_slot); #if CONFIG_DMA_DW chan_data->cfg_lo |= DW_CFGL_DST_SW_HS; #endif break; default: LOG_ERR("%s: channel %d invalid direction %d", dev->name, channel, cfg->channel_direction); ret = -EINVAL; goto out; } LOG_DBG("%s: direction: lli_desc %p, ctrl_lo %x, cfg_hi %x, cfg_lo %x", dev->name, lli_desc, lli_desc->ctrl_lo, chan_data->cfg_hi, chan_data->cfg_lo); dw_dma_mask_address(block_cfg, lli_desc, cfg->channel_direction); LOG_DBG("%s: mask address: lli_desc %p, ctrl_lo %x, cfg_hi %x, cfg_lo %x", dev->name, lli_desc, lli_desc->ctrl_lo, chan_data->cfg_hi, chan_data->cfg_lo); if (block_cfg->block_size > DW_CTLH_BLOCK_TS_MASK) { LOG_ERR("%s: channel %d block size too big %d", dev->name, channel, block_cfg->block_size); ret = -EINVAL; goto out; } /* Set class and transfer size */ lli_desc->ctrl_hi |= DW_CTLH_CLASS(dev_data->channel_data->chan[channel].class) | (block_cfg->block_size & DW_CTLH_BLOCK_TS_MASK); LOG_DBG("%s: block_size, class: lli_desc %p, ctrl_lo %x, cfg_hi %x, cfg_lo %x", dev->name, lli_desc, lli_desc->ctrl_lo, chan_data->cfg_hi, chan_data->cfg_lo); chan_data->ptr_data.buffer_bytes += block_cfg->block_size; /* set next descriptor in list */ lli_desc->llp = (uintptr_t)(lli_desc + 1); LOG_DBG("%s: lli_desc llp %x", dev->name, lli_desc->llp); /* next descriptor */ lli_desc++; block_cfg = block_cfg->next_block; } #if CONFIG_DMA_DW_HW_LLI chan_data->cfg_lo |= DW_CFGL_CTL_HI_UPD_EN; #endif /* end of list or cyclic buffer */ if (cfg->cyclic) { lli_desc_tail->llp = (uintptr_t)lli_desc_head; } else { lli_desc_tail->llp = 0; #if CONFIG_DMA_DW_HW_LLI LOG_DBG("%s: Clearing LLP_S_EN, LLP_D_EN from tail LLI %x", dev->name, lli_desc_tail->ctrl_lo); lli_desc_tail->ctrl_lo &= ~(DW_CTLL_LLP_S_EN | DW_CTLL_LLP_D_EN); LOG_DBG("%s: ctrl_lo %x", dev->name, lli_desc_tail->ctrl_lo); #endif } /* set the initial lli, mark the channel as prepared (ready to be started) */ chan_data->state = DW_DMA_PREPARED; chan_data->lli_current = chan_data->lli; /* initialize pointers */ chan_data->ptr_data.start_ptr = DW_DMA_LLI_ADDRESS(chan_data->lli, chan_data->direction); chan_data->ptr_data.end_ptr = chan_data->ptr_data.start_ptr + chan_data->ptr_data.buffer_bytes; chan_data->ptr_data.current_ptr = chan_data->ptr_data.start_ptr; chan_data->ptr_data.hw_ptr = chan_data->ptr_data.start_ptr; /* Configure a callback appropriately depending on whether the * interrupt is requested at the end of transaction completion or * at the end of each block. */ if (cfg->complete_callback_en) { chan_data->dma_blkcallback = cfg->dma_callback; chan_data->blkuser_data = cfg->user_data; dw_write(dev_cfg->base, DW_MASK_BLOCK, DW_CHAN_UNMASK(channel)); } else { chan_data->dma_tfrcallback = cfg->dma_callback; chan_data->tfruser_data = cfg->user_data; dw_write(dev_cfg->base, DW_MASK_TFR, DW_CHAN_UNMASK(channel)); } dw_write(dev_cfg->base, DW_MASK_ERR, DW_CHAN_UNMASK(channel)); /* write interrupt clear registers for the channel * ClearTfr, ClearBlock, ClearSrcTran, ClearDstTran, ClearErr */ dw_write(dev_cfg->base, DW_CLEAR_TFR, 0x1 << channel); dw_write(dev_cfg->base, DW_CLEAR_BLOCK, 0x1 << channel); dw_write(dev_cfg->base, DW_CLEAR_SRC_TRAN, 0x1 << channel); dw_write(dev_cfg->base, DW_CLEAR_DST_TRAN, 0x1 << channel); dw_write(dev_cfg->base, DW_CLEAR_ERR, 0x1 << channel); out: return ret; } bool dw_dma_is_enabled(const struct device *dev, uint32_t channel) { const struct dw_dma_dev_cfg *const dev_cfg = dev->config; return dw_read(dev_cfg->base, DW_DMA_CHAN_EN) & DW_CHAN(channel); } int dw_dma_start(const struct device *dev, uint32_t channel) { const struct dw_dma_dev_cfg *const dev_cfg = dev->config; struct dw_dma_dev_data *dev_data = dev->data; int ret = 0; /* validate channel */ if (channel >= DW_CHAN_COUNT) { ret = -EINVAL; goto out; } if (dw_dma_is_enabled(dev, channel)) { goto out; } struct dw_dma_chan_data *chan_data = &dev_data->chan[channel]; /* validate channel state */ if (chan_data->state != DW_DMA_PREPARED) { LOG_ERR("%s: channel %d not ready ena 0x%x status 0x%x", dev->name, channel, dw_read(dev_cfg->base, DW_DMA_CHAN_EN), chan_data->state); ret = -EBUSY; goto out; } /* is valid stream */ if (!chan_data->lli) { LOG_ERR("%s: channel %d invalid stream", dev->name, channel); ret = -EINVAL; goto out; } LOG_INF("%s: channel %d start", dev->name, channel); struct dw_lli *lli = chan_data->lli_current; #ifdef CONFIG_DMA_DW_HW_LLI /* LLP mode - write LLP pointer */ uint32_t masked_ctrl_lo = lli->ctrl_lo & (DW_CTLL_LLP_D_EN | DW_CTLL_LLP_S_EN); uint32_t llp = 0; if (masked_ctrl_lo) { llp = (uint32_t)lli; LOG_DBG("%s: Setting llp", dev->name); } dw_write(dev_cfg->base, DW_LLP(channel), llp); LOG_DBG("%s: ctrl_lo %x, masked ctrl_lo %x, LLP %x", dev->name, lli->ctrl_lo, masked_ctrl_lo, dw_read(dev_cfg->base, DW_LLP(channel))); #endif /* CONFIG_DMA_DW_HW_LLI */ /* channel needs to start from scratch, so write SAR and DAR */ #ifdef CONFIG_DMA_64BIT dw_write(dev_cfg->base, DW_SAR(channel), (uint32_t)(lli->sar & DW_ADDR_MASK_32)); dw_write(dev_cfg->base, DW_SAR_HI(channel), (uint32_t)(lli->sar >> DW_ADDR_RIGHT_SHIFT)); dw_write(dev_cfg->base, DW_DAR(channel), (uint32_t)(lli->dar & DW_ADDR_MASK_32)); dw_write(dev_cfg->base, DW_DAR_HI(channel), (uint32_t)(lli->dar >> DW_ADDR_RIGHT_SHIFT)); #else dw_write(dev_cfg->base, DW_SAR(channel), lli->sar); dw_write(dev_cfg->base, DW_DAR(channel), lli->dar); #endif /* CONFIG_DMA_64BIT */ /* program CTL_LO and CTL_HI */ dw_write(dev_cfg->base, DW_CTRL_LOW(channel), lli->ctrl_lo); dw_write(dev_cfg->base, DW_CTRL_HIGH(channel), lli->ctrl_hi); /* program CFG_LO and CFG_HI */ dw_write(dev_cfg->base, DW_CFG_LOW(channel), chan_data->cfg_lo); dw_write(dev_cfg->base, DW_CFG_HIGH(channel), chan_data->cfg_hi); #ifdef CONFIG_DMA_64BIT LOG_DBG("%s: sar %llx, dar %llx, ctrl_lo %x, ctrl_hi %x, cfg_lo %x, cfg_hi %x, llp %x", dev->name, lli->sar, lli->dar, lli->ctrl_lo, lli->ctrl_hi, chan_data->cfg_lo, chan_data->cfg_hi, dw_read(dev_cfg->base, DW_LLP(channel)) ); #else LOG_DBG("%s: sar %x, dar %x, ctrl_lo %x, ctrl_hi %x, cfg_lo %x, cfg_hi %x, llp %x", dev->name, lli->sar, lli->dar, lli->ctrl_lo, lli->ctrl_hi, chan_data->cfg_lo, chan_data->cfg_hi, dw_read(dev_cfg->base, DW_LLP(channel)) ); #endif /* CONFIG_DMA_64BIT */ #ifdef CONFIG_DMA_DW_HW_LLI if (lli->ctrl_lo & DW_CTLL_D_SCAT_EN) { LOG_DBG("%s: configuring DW_DSR", dev->name); uint32_t words_per_tfr = (lli->ctrl_hi & DW_CTLH_BLOCK_TS_MASK) >> ((lli->ctrl_lo & DW_CTLL_DST_WIDTH_MASK) >> DW_CTLL_DST_WIDTH_SHIFT); dw_write(dev_cfg->base, DW_DSR(channel), DW_DSR_DSC(words_per_tfr) | DW_DSR_DSI(words_per_tfr)); } #endif /* CONFIG_DMA_DW_HW_LLI */ chan_data->state = DW_DMA_ACTIVE; /* enable the channel */ dw_write(dev_cfg->base, DW_DMA_CHAN_EN, DW_CHAN_UNMASK(channel)); ret = pm_device_runtime_get(dev); out: return ret; } int dw_dma_stop(const struct device *dev, uint32_t channel) { const struct dw_dma_dev_cfg *const dev_cfg = dev->config; struct dw_dma_dev_data *dev_data = dev->data; struct dw_dma_chan_data *chan_data = &dev_data->chan[channel]; enum pm_device_state pm_state; int ret = 0; if (channel >= DW_CHAN_COUNT) { ret = -EINVAL; goto out; } /* * skip if device is not active. if we get an error for state_get, * do not skip but check actual hardware state and stop if * needed */ ret = pm_device_state_get(dev, &pm_state); if (!ret && pm_state != PM_DEVICE_STATE_ACTIVE) { goto out; } if (!dw_dma_is_enabled(dev, channel) && chan_data->state != DW_DMA_SUSPENDED) { ret = 0; goto out; } #ifdef CONFIG_DMA_DW_HW_LLI struct dw_lli *lli = chan_data->lli; int i; #endif LOG_INF("%s: channel %d stop", dev->name, channel); /* Validate the channel state */ if (chan_data->state != DW_DMA_ACTIVE && chan_data->state != DW_DMA_SUSPENDED) { ret = -EINVAL; goto out; } #ifdef CONFIG_DMA_DW_SUSPEND_DRAIN /* channel cannot be disabled right away, so first we need to) * suspend it and drain the FIFO */ dw_write(dev_cfg->base, DW_CFG_LOW(channel), chan_data->cfg_lo | DW_CFGL_SUSPEND | DW_CFGL_DRAIN); /* now we wait for FIFO to be empty */ bool fifo_empty = WAIT_FOR(dw_read(dev_cfg->base, DW_CFG_LOW(channel)) & DW_CFGL_FIFO_EMPTY, DW_DMA_TIMEOUT, k_busy_wait(DW_DMA_TIMEOUT/10)); if (!fifo_empty) { LOG_WRN("%s: channel %d drain time out", dev->name, channel); /* Continue even if draining timed out to make sure that the channel is going to be * disabled. * The same channel might be requested for other purpose (or for same) next time * which will fail if the channel has been left enabled. */ } #endif dw_write(dev_cfg->base, DW_DMA_CHAN_EN, DW_CHAN_MASK(channel)); /* now we wait for channel to be disabled */ bool is_disabled = WAIT_FOR(!(dw_read(dev_cfg->base, DW_DMA_CHAN_EN) & DW_CHAN(channel)), DW_DMA_TIMEOUT, k_busy_wait(DW_DMA_TIMEOUT/10)); if (!is_disabled) { LOG_ERR("%s: channel %d disable timeout", dev->name, channel); return -ETIMEDOUT; } #if CONFIG_DMA_DW_HW_LLI for (i = 0; i < chan_data->lli_count; i++) { lli->ctrl_hi &= ~DW_CTLH_DONE(1); lli++; } #endif chan_data->state = DW_DMA_IDLE; ret = pm_device_runtime_put(dev); out: return ret; } int dw_dma_resume(const struct device *dev, uint32_t channel) { const struct dw_dma_dev_cfg *const dev_cfg = dev->config; struct dw_dma_dev_data *dev_data = dev->data; int ret = 0; /* Validate channel index */ if (channel >= DW_CHAN_COUNT) { ret = -EINVAL; goto out; } struct dw_dma_chan_data *chan_data = &dev_data->chan[channel]; /* Validate channel state */ if (chan_data->state != DW_DMA_SUSPENDED) { ret = -EINVAL; goto out; } LOG_DBG("%s: channel %d resume", dev->name, channel); dw_write(dev_cfg->base, DW_CFG_LOW(channel), chan_data->cfg_lo); /* Channel is now active */ chan_data->state = DW_DMA_ACTIVE; out: return ret; } int dw_dma_suspend(const struct device *dev, uint32_t channel) { const struct dw_dma_dev_cfg *const dev_cfg = dev->config; struct dw_dma_dev_data *dev_data = dev->data; int ret = 0; /* Validate channel index */ if (channel >= DW_CHAN_COUNT) { ret = -EINVAL; goto out; } struct dw_dma_chan_data *chan_data = &dev_data->chan[channel]; /* Validate channel state */ if (chan_data->state != DW_DMA_ACTIVE) { ret = -EINVAL; goto out; } LOG_DBG("%s: channel %d suspend", dev->name, channel); dw_write(dev_cfg->base, DW_CFG_LOW(channel), chan_data->cfg_lo | DW_CFGL_SUSPEND); /* Channel is now suspended */ chan_data->state = DW_DMA_SUSPENDED; out: return ret; } int dw_dma_setup(const struct device *dev) { const struct dw_dma_dev_cfg *const dev_cfg = dev->config; int i, ret = 0; /* we cannot config DMAC if DMAC has been already enabled by host */ if (dw_read(dev_cfg->base, DW_DMA_CFG) != 0) { dw_write(dev_cfg->base, DW_DMA_CFG, 0x0); } for (i = DW_DMA_CFG_TRIES; i > 0; i--) { if (!dw_read(dev_cfg->base, DW_DMA_CFG)) { break; } } if (!i) { LOG_ERR("%s: setup failed", dev->name); ret = -EIO; goto out; } LOG_DBG("%s: ENTER", dev->name); for (i = 0; i < DW_CHAN_COUNT; i++) { dw_read(dev_cfg->base, DW_DMA_CHAN_EN); } /* enable the DMA controller */ dw_write(dev_cfg->base, DW_DMA_CFG, 1); /* mask all interrupts for all 8 channels */ dw_write(dev_cfg->base, DW_MASK_TFR, DW_CHAN_MASK_ALL); dw_write(dev_cfg->base, DW_MASK_BLOCK, DW_CHAN_MASK_ALL); dw_write(dev_cfg->base, DW_MASK_SRC_TRAN, DW_CHAN_MASK_ALL); dw_write(dev_cfg->base, DW_MASK_DST_TRAN, DW_CHAN_MASK_ALL); dw_write(dev_cfg->base, DW_MASK_ERR, DW_CHAN_MASK_ALL); #ifdef CONFIG_DMA_DW_FIFO_PARTITION /* allocate FIFO partitions for each channel */ dw_write(dev_cfg->base, DW_FIFO_PART1_HI, DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE)); dw_write(dev_cfg->base, DW_FIFO_PART1_LO, DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE)); dw_write(dev_cfg->base, DW_FIFO_PART0_HI, DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE)); dw_write(dev_cfg->base, DW_FIFO_PART0_LO, DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE) | DW_FIFO_UPD); #endif /* CONFIG_DMA_DW_FIFO_PARTITION */ /* TODO add baytrail/cherrytrail workaround */ out: return ret; } static int dw_dma_avail_data_size(const struct device *dev, uint32_t base, struct dw_dma_chan_data *chan_data, uint32_t channel) { int32_t read_ptr = chan_data->ptr_data.current_ptr; int32_t write_ptr = dw_read(base, DW_DAR(channel)); int32_t delta = write_ptr - chan_data->ptr_data.hw_ptr; int size; chan_data->ptr_data.hw_ptr = write_ptr; size = write_ptr - read_ptr; if (size < 0) { size += chan_data->ptr_data.buffer_bytes; } else if (!size) { /* * Buffer is either full or empty. If the DMA pointer has * changed, then the DMA has filled the buffer. */ if (delta) { size = chan_data->ptr_data.buffer_bytes; } else { LOG_DBG("%s: channel %d: size is 0!", dev->name, channel); } } LOG_DBG("%s: channel %d: DAR %x reader 0x%x free 0x%x avail 0x%x", dev->name, channel, write_ptr, read_ptr, chan_data->ptr_data.buffer_bytes - size, size); return size; } static int dw_dma_free_data_size(const struct device *dev, uint32_t base, struct dw_dma_chan_data *chan_data, uint32_t channel) { int32_t read_ptr = dw_read(base, DW_SAR(channel)); int32_t write_ptr = chan_data->ptr_data.current_ptr; int32_t delta = read_ptr - chan_data->ptr_data.hw_ptr; int size; chan_data->ptr_data.hw_ptr = read_ptr; size = read_ptr - write_ptr; if (size < 0) { size += chan_data->ptr_data.buffer_bytes; } else if (!size) { /* * Buffer is either full or empty. If the DMA pointer has * changed, then the DMA has emptied the buffer. */ if (delta) { size = chan_data->ptr_data.buffer_bytes; } else { LOG_DBG("%s: channel %d: size is 0!", dev->name, channel); } } LOG_DBG("%s: channel %d: SAR %x writer 0x%x free 0x%x avail 0x%x", dev->name, channel, read_ptr, write_ptr, size, chan_data->ptr_data.buffer_bytes - size); return size; } int dw_dma_get_status(const struct device *dev, uint32_t channel, struct dma_status *stat) { struct dw_dma_dev_data *const dev_data = dev->data; const struct dw_dma_dev_cfg *const dev_cfg = dev->config; struct dw_dma_chan_data *chan_data; if (channel >= DW_CHAN_COUNT) { return -EINVAL; } chan_data = &dev_data->chan[channel]; if (chan_data->direction == MEMORY_TO_MEMORY || chan_data->direction == PERIPHERAL_TO_MEMORY) { stat->pending_length = dw_dma_avail_data_size(dev, dev_cfg->base, chan_data, channel); stat->free = chan_data->ptr_data.buffer_bytes - stat->pending_length; } else { stat->free = dw_dma_free_data_size(dev, dev_cfg->base, chan_data, channel); stat->pending_length = chan_data->ptr_data.buffer_bytes - stat->free; } #if CONFIG_DMA_DW_HW_LLI if (!(dw_read(dev_cfg->base, DW_DMA_CHAN_EN) & DW_CHAN(channel))) { LOG_ERR("%s: xrun detected", dev->name); return -EPIPE; } #endif return 0; } ```
/content/code_sandbox/drivers/dma/dma_dw_common.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,420
```c /* * */ #include <zephyr/device.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/dma.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <zephyr/drivers/clock_control/adi_max32_clock_control.h> #include <wrap_max32_dma.h> #define DT_DRV_COMPAT adi_max32_dma LOG_MODULE_REGISTER(max32_dma, CONFIG_DMA_LOG_LEVEL); struct max32_dma_config { mxc_dma_regs_t *regs; const struct device *clock; struct max32_perclk perclk; uint8_t channels; void (*irq_configure)(void); }; struct max32_dma_data { dma_callback_t callback; void *cb_data; uint32_t err_cb_dis; }; static inline bool max32_dma_ch_prio_valid(uint32_t ch_prio) { /* mxc_dma_priority_t is limited to values 0-3 */ if (!(ch_prio >= 0 && ch_prio <= 3)) { LOG_ERR("Invalid DMA priority - must be type mxc_dma_priority_t (0-3)"); return false; } return true; } static inline int max32_dma_width(uint32_t width) { switch (width) { case 1: return MXC_DMA_WIDTH_BYTE; case 2: return MXC_DMA_WIDTH_HALFWORD; case 4: return MXC_DMA_WIDTH_WORD; default: LOG_ERR("Invalid DMA width - must be byte (1), halfword (2) or word (4)"); return -EINVAL; } } static inline int max32_dma_addr_adj(uint16_t addr_adj) { switch (addr_adj) { case DMA_ADDR_ADJ_NO_CHANGE: return 0; case DMA_ADDR_ADJ_INCREMENT: return 1; default: LOG_ERR("Invalid DMA address adjust - must be NO_CHANGE (0) or INCREMENT (1)"); return 0; } } static inline int max32_dma_ch_index(mxc_dma_regs_t *dma, uint8_t ch) { return (ch + MXC_DMA_GET_IDX(dma) * (MXC_DMA_CHANNELS / MXC_DMA_INSTANCES)); } static int max32_dma_config(const struct device *dev, uint32_t channel, struct dma_config *config) { int ret = 0; const struct max32_dma_config *cfg = dev->config; struct max32_dma_data *data = dev->data; uint32_t ch; if (channel >= cfg->channels) { LOG_ERR("Invalid DMA channel - must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, channel); return -EINVAL; } ch = max32_dma_ch_index(cfg->regs, channel); /* DMA Channel Config */ mxc_dma_config_t mxc_dma_cfg; mxc_dma_cfg.ch = ch; mxc_dma_cfg.reqsel = config->dma_slot << ADI_MAX32_DMA_CFG_REQ_POS; if (((max32_dma_width(config->source_data_size)) < 0) || ((max32_dma_width(config->dest_data_size)) < 0)) { return -EINVAL; } mxc_dma_cfg.srcwd = max32_dma_width(config->source_data_size); mxc_dma_cfg.dstwd = max32_dma_width(config->dest_data_size); mxc_dma_cfg.srcinc_en = max32_dma_addr_adj(config->head_block->source_addr_adj); mxc_dma_cfg.dstinc_en = max32_dma_addr_adj(config->head_block->dest_addr_adj); /* DMA Channel Advanced Config */ mxc_dma_adv_config_t mxc_dma_cfg_adv; mxc_dma_cfg_adv.ch = ch; if (!max32_dma_ch_prio_valid(config->channel_priority)) { return -EINVAL; } mxc_dma_cfg_adv.prio = config->channel_priority; mxc_dma_cfg_adv.reqwait_en = 0; mxc_dma_cfg_adv.tosel = MXC_DMA_TIMEOUT_4_CLK; mxc_dma_cfg_adv.pssel = MXC_DMA_PRESCALE_DISABLE; mxc_dma_cfg_adv.burst_size = config->source_burst_length; /* DMA Transfer Config */ mxc_dma_srcdst_t txfer; txfer.ch = ch; txfer.source = (void *)config->head_block->source_address; txfer.dest = (void *)config->head_block->dest_address; txfer.len = config->head_block->block_size; ret = MXC_DMA_ConfigChannel(mxc_dma_cfg, txfer); if (ret != E_NO_ERROR) { return ret; } ret = MXC_DMA_AdvConfigChannel(mxc_dma_cfg_adv); if (ret) { return ret; } /* Enable interrupts for the DMA peripheral */ ret = MXC_DMA_EnableInt(ch); if (ret != E_NO_ERROR) { return ret; } /* Enable complete and count-to-zero interrupts for the channel */ ret = MXC_DMA_ChannelEnableInt(ch, ADI_MAX32_DMA_CTRL_DIS_IE | ADI_MAX32_DMA_CTRL_CTZIEN); if (ret != E_NO_ERROR) { return ret; } data[channel].callback = config->dma_callback; data[channel].cb_data = config->user_data; data[channel].err_cb_dis = config->error_callback_dis; return ret; } static int max32_dma_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { const struct max32_dma_config *cfg = dev->config; mxc_dma_srcdst_t reload; int flags; if (channel >= cfg->channels) { LOG_ERR("Invalid DMA channel - must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, channel); return -EINVAL; } channel = max32_dma_ch_index(cfg->regs, channel); flags = MXC_DMA_ChannelGetFlags(channel); if (flags & ADI_MAX32_DMA_STATUS_ST) { return -EBUSY; } reload.ch = channel; reload.source = (void *)src; reload.dest = (void *)dst; reload.len = size; return MXC_DMA_SetSrcDst(reload); } static int max32_dma_start(const struct device *dev, uint32_t channel) { const struct max32_dma_config *cfg = dev->config; int flags; if (channel >= cfg->channels) { LOG_ERR("Invalid DMA channel - must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, channel); return -EINVAL; } channel = max32_dma_ch_index(cfg->regs, channel); flags = MXC_DMA_ChannelGetFlags(channel); if (flags & ADI_MAX32_DMA_STATUS_ST) { return -EBUSY; } return MXC_DMA_Start(channel); } static int max32_dma_stop(const struct device *dev, uint32_t channel) { const struct max32_dma_config *cfg = dev->config; if (channel >= cfg->channels) { LOG_ERR("Invalid DMA channel - must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, channel); return -EINVAL; } channel = max32_dma_ch_index(cfg->regs, channel); return MXC_DMA_Stop(channel); } static int max32_dma_get_status(const struct device *dev, uint32_t channel, struct dma_status *stat) { const struct max32_dma_config *cfg = dev->config; int ret = 0; int flags = 0; mxc_dma_srcdst_t txfer; if (channel >= cfg->channels) { LOG_ERR("Invalid DMA channel - must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, channel); return -EINVAL; } channel = max32_dma_ch_index(cfg->regs, channel); txfer.ch = channel; flags = MXC_DMA_ChannelGetFlags(channel); ret = MXC_DMA_GetSrcDst(&txfer); if (ret != E_NO_ERROR) { return ret; } /* Channel is busy if channel status is enabled */ stat->busy = (flags & ADI_MAX32_DMA_STATUS_ST) != 0; stat->pending_length = txfer.len; return ret; } static void max32_dma_isr(const struct device *dev) { const struct max32_dma_config *cfg = dev->config; struct max32_dma_data *data = dev->data; mxc_dma_regs_t *regs = cfg->regs; int ch, c; int flags; int status = 0; uint8_t channel_base = max32_dma_ch_index(cfg->regs, 0); for (ch = channel_base, c = 0; c < cfg->channels; ch++, c++) { flags = MXC_DMA_ChannelGetFlags(ch); /* Check if channel is in use, if not, move to next channel */ if (flags <= 0) { continue; } /* Check for error interrupts */ if (flags & (ADI_MAX32_DMA_STATUS_BUS_ERR | ADI_MAX32_DMA_STATUS_TO_IF)) { status = -EIO; } MXC_DMA_ChannelClearFlags(ch, flags); if (data[c].callback) { /* Only call error callback if enabled during DMA config */ if (status < 0 && (data[c].err_cb_dis)) { break; } data[c].callback(dev, data[c].cb_data, c, status); } /* No need to check rest of the channels if no interrupt flags set */ if (MXC_DMA_GetIntFlags(regs) == 0) { break; } } } static int max32_dma_init(const struct device *dev) { int ret = 0; const struct max32_dma_config *cfg = dev->config; if (!device_is_ready(cfg->clock)) { return -ENODEV; } /* Enable peripheral clock */ ret = clock_control_on(cfg->clock, (clock_control_subsys_t) &(cfg->perclk)); if (ret) { return ret; } ret = Wrap_MXC_DMA_Init(cfg->regs); if (ret) { return ret; } /* Acquire all channels so they are available to Zephyr application */ for (int i = 0; i < cfg->channels; i++) { ret = Wrap_MXC_DMA_AcquireChannel(cfg->regs); if (ret < 0) { break; } /* Channels already acquired */ } cfg->irq_configure(); return 0; } static const struct dma_driver_api max32_dma_driver_api = { .config = max32_dma_config, .reload = max32_dma_reload, .start = max32_dma_start, .stop = max32_dma_stop, .get_status = max32_dma_get_status, }; #define MAX32_DMA_IRQ_CONNECT(n, inst) \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, n, irq), DT_INST_IRQ_BY_IDX(inst, n, priority), \ max32_dma_isr, DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(inst, n, irq)); #define CONFIGURE_ALL_IRQS(inst, n) LISTIFY(n, MAX32_DMA_IRQ_CONNECT, (), inst) #define MAX32_DMA_INIT(inst) \ static struct max32_dma_data dma##inst##_data[DT_INST_PROP(inst, dma_channels)]; \ static void max32_dma##inst##_irq_configure(void) \ { \ CONFIGURE_ALL_IRQS(inst, DT_NUM_IRQS(DT_DRV_INST(inst))); \ } \ static const struct max32_dma_config dma##inst##_cfg = { \ .regs = (mxc_dma_regs_t *)DT_INST_REG_ADDR(inst), \ .clock = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(inst)), \ .perclk.bus = DT_INST_CLOCKS_CELL(inst, offset), \ .perclk.bit = DT_INST_CLOCKS_CELL(inst, bit), \ .channels = DT_INST_PROP(inst, dma_channels), \ .irq_configure = max32_dma##inst##_irq_configure, \ }; \ DEVICE_DT_INST_DEFINE(inst, &max32_dma_init, NULL, &dma##inst##_data, &dma##inst##_cfg, \ PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, &max32_dma_driver_api); DT_INST_FOREACH_STATUS_OKAY(MAX32_DMA_INIT) ```
/content/code_sandbox/drivers/dma/dma_max32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,647
```c /* * */ /** * @brief DMA low level driver implementation for F2/F4/F7 series SoCs. */ #include "dma_stm32.h" #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dma_stm32_v1); /* DMA burst length */ #define BURST_TRANS_LENGTH_1 0 uint32_t dma_stm32_id_to_stream(uint32_t id) { static const uint32_t stream_nr[] = { LL_DMA_STREAM_0, LL_DMA_STREAM_1, LL_DMA_STREAM_2, LL_DMA_STREAM_3, LL_DMA_STREAM_4, LL_DMA_STREAM_5, LL_DMA_STREAM_6, LL_DMA_STREAM_7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(stream_nr)); return stream_nr[id]; } #if !defined(CONFIG_DMAMUX_STM32) uint32_t dma_stm32_slot_to_channel(uint32_t slot) { static const uint32_t channel_nr[] = { LL_DMA_CHANNEL_0, LL_DMA_CHANNEL_1, LL_DMA_CHANNEL_2, LL_DMA_CHANNEL_3, LL_DMA_CHANNEL_4, LL_DMA_CHANNEL_5, LL_DMA_CHANNEL_6, LL_DMA_CHANNEL_7, }; __ASSERT_NO_MSG(slot < ARRAY_SIZE(channel_nr)); return channel_nr[slot]; } #endif void dma_stm32_clear_ht(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_clear_flag_func func[] = { LL_DMA_ClearFlag_HT0, LL_DMA_ClearFlag_HT1, LL_DMA_ClearFlag_HT2, LL_DMA_ClearFlag_HT3, LL_DMA_ClearFlag_HT4, LL_DMA_ClearFlag_HT5, LL_DMA_ClearFlag_HT6, LL_DMA_ClearFlag_HT7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); func[id](DMAx); } void dma_stm32_clear_tc(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_clear_flag_func func[] = { LL_DMA_ClearFlag_TC0, LL_DMA_ClearFlag_TC1, LL_DMA_ClearFlag_TC2, LL_DMA_ClearFlag_TC3, LL_DMA_ClearFlag_TC4, LL_DMA_ClearFlag_TC5, LL_DMA_ClearFlag_TC6, LL_DMA_ClearFlag_TC7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); func[id](DMAx); } bool dma_stm32_is_ht_active(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_check_flag_func func[] = { LL_DMA_IsActiveFlag_HT0, LL_DMA_IsActiveFlag_HT1, LL_DMA_IsActiveFlag_HT2, LL_DMA_IsActiveFlag_HT3, LL_DMA_IsActiveFlag_HT4, LL_DMA_IsActiveFlag_HT5, LL_DMA_IsActiveFlag_HT6, LL_DMA_IsActiveFlag_HT7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); return func[id](DMAx); } bool dma_stm32_is_tc_active(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_check_flag_func func[] = { LL_DMA_IsActiveFlag_TC0, LL_DMA_IsActiveFlag_TC1, LL_DMA_IsActiveFlag_TC2, LL_DMA_IsActiveFlag_TC3, LL_DMA_IsActiveFlag_TC4, LL_DMA_IsActiveFlag_TC5, LL_DMA_IsActiveFlag_TC6, LL_DMA_IsActiveFlag_TC7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); return func[id](DMAx); } void dma_stm32_clear_te(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_clear_flag_func func[] = { LL_DMA_ClearFlag_TE0, LL_DMA_ClearFlag_TE1, LL_DMA_ClearFlag_TE2, LL_DMA_ClearFlag_TE3, LL_DMA_ClearFlag_TE4, LL_DMA_ClearFlag_TE5, LL_DMA_ClearFlag_TE6, LL_DMA_ClearFlag_TE7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); func[id](DMAx); } void dma_stm32_clear_dme(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_clear_flag_func func[] = { LL_DMA_ClearFlag_DME0, LL_DMA_ClearFlag_DME1, LL_DMA_ClearFlag_DME2, LL_DMA_ClearFlag_DME3, LL_DMA_ClearFlag_DME4, LL_DMA_ClearFlag_DME5, LL_DMA_ClearFlag_DME6, LL_DMA_ClearFlag_DME7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); func[id](DMAx); } void dma_stm32_clear_fe(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_clear_flag_func func[] = { LL_DMA_ClearFlag_FE0, LL_DMA_ClearFlag_FE1, LL_DMA_ClearFlag_FE2, LL_DMA_ClearFlag_FE3, LL_DMA_ClearFlag_FE4, LL_DMA_ClearFlag_FE5, LL_DMA_ClearFlag_FE6, LL_DMA_ClearFlag_FE7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); func[id](DMAx); } bool dma_stm32_is_te_active(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_check_flag_func func[] = { LL_DMA_IsActiveFlag_TE0, LL_DMA_IsActiveFlag_TE1, LL_DMA_IsActiveFlag_TE2, LL_DMA_IsActiveFlag_TE3, LL_DMA_IsActiveFlag_TE4, LL_DMA_IsActiveFlag_TE5, LL_DMA_IsActiveFlag_TE6, LL_DMA_IsActiveFlag_TE7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); return func[id](DMAx); } bool dma_stm32_is_dme_active(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_check_flag_func func[] = { LL_DMA_IsActiveFlag_DME0, LL_DMA_IsActiveFlag_DME1, LL_DMA_IsActiveFlag_DME2, LL_DMA_IsActiveFlag_DME3, LL_DMA_IsActiveFlag_DME4, LL_DMA_IsActiveFlag_DME5, LL_DMA_IsActiveFlag_DME6, LL_DMA_IsActiveFlag_DME7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); return func[id](DMAx); } bool dma_stm32_is_fe_active(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_check_flag_func func[] = { LL_DMA_IsActiveFlag_FE0, LL_DMA_IsActiveFlag_FE1, LL_DMA_IsActiveFlag_FE2, LL_DMA_IsActiveFlag_FE3, LL_DMA_IsActiveFlag_FE4, LL_DMA_IsActiveFlag_FE5, LL_DMA_IsActiveFlag_FE6, LL_DMA_IsActiveFlag_FE7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); return func[id](DMAx); } void stm32_dma_dump_stream_irq(DMA_TypeDef *dma, uint32_t id) { LOG_INF("tc: %d, ht: %d, te: %d, dme: %d, fe: %d", dma_stm32_is_tc_active(dma, id), dma_stm32_is_ht_active(dma, id), dma_stm32_is_te_active(dma, id), dma_stm32_is_dme_active(dma, id), dma_stm32_is_fe_active(dma, id)); } inline bool stm32_dma_is_tc_irq_active(DMA_TypeDef *dma, uint32_t id) { return LL_DMA_IsEnabledIT_TC(dma, dma_stm32_id_to_stream(id)) && dma_stm32_is_tc_active(dma, id); } inline bool stm32_dma_is_ht_irq_active(DMA_TypeDef *dma, uint32_t id) { return LL_DMA_IsEnabledIT_HT(dma, dma_stm32_id_to_stream(id)) && dma_stm32_is_ht_active(dma, id); } static inline bool stm32_dma_is_te_irq_active(DMA_TypeDef *dma, uint32_t id) { return LL_DMA_IsEnabledIT_TE(dma, dma_stm32_id_to_stream(id)) && dma_stm32_is_te_active(dma, id); } static inline bool stm32_dma_is_dme_irq_active(DMA_TypeDef *dma, uint32_t id) { return LL_DMA_IsEnabledIT_DME(dma, dma_stm32_id_to_stream(id)) && dma_stm32_is_dme_active(dma, id); } static inline bool stm32_dma_is_fe_irq_active(DMA_TypeDef *dma, uint32_t id) { return LL_DMA_IsEnabledIT_FE(dma, dma_stm32_id_to_stream(id)) && dma_stm32_is_fe_active(dma, id); } bool stm32_dma_is_irq_active(DMA_TypeDef *dma, uint32_t id) { return stm32_dma_is_tc_irq_active(dma, id) || stm32_dma_is_ht_irq_active(dma, id) || stm32_dma_is_te_irq_active(dma, id) || stm32_dma_is_dme_irq_active(dma, id) || stm32_dma_is_fe_irq_active(dma, id); } void stm32_dma_clear_stream_irq(DMA_TypeDef *dma, uint32_t id) { dma_stm32_clear_te(dma, id); dma_stm32_clear_dme(dma, id); dma_stm32_clear_fe(dma, id); } bool stm32_dma_is_irq_happened(DMA_TypeDef *dma, uint32_t id) { if (LL_DMA_IsEnabledIT_FE(dma, dma_stm32_id_to_stream(id)) && dma_stm32_is_fe_active(dma, id)) { return true; } return false; } bool stm32_dma_is_unexpected_irq_happened(DMA_TypeDef *dma, uint32_t id) { if (LL_DMA_IsEnabledIT_FE(dma, dma_stm32_id_to_stream(id)) && dma_stm32_is_fe_active(dma, id)) { LOG_ERR("FiFo error."); stm32_dma_dump_stream_irq(dma, id); stm32_dma_clear_stream_irq(dma, id); return true; } return false; } void stm32_dma_enable_stream(DMA_TypeDef *dma, uint32_t id) { LL_DMA_EnableStream(dma, dma_stm32_id_to_stream(id)); } bool stm32_dma_is_enabled_stream(DMA_TypeDef *dma, uint32_t id) { if (LL_DMA_IsEnabledStream(dma, dma_stm32_id_to_stream(id)) == 1) { return true; } return false; } int stm32_dma_disable_stream(DMA_TypeDef *dma, uint32_t id) { LL_DMA_DisableStream(dma, dma_stm32_id_to_stream(id)); while (stm32_dma_is_enabled_stream(dma, id)) { } dma_stm32_clear_tc(dma, id); return 0; } void stm32_dma_disable_fifo_irq(DMA_TypeDef *dma, uint32_t id) { LL_DMA_DisableIT_FE(dma, dma_stm32_id_to_stream(id)); } #if !defined(CONFIG_DMAMUX_STM32) void stm32_dma_config_channel_function(DMA_TypeDef *dma, uint32_t id, uint32_t slot) { LL_DMA_SetChannelSelection(dma, dma_stm32_id_to_stream(id), dma_stm32_slot_to_channel(slot)); } #endif uint32_t stm32_dma_get_mburst(struct dma_config *config, bool source_periph) { uint32_t memory_burst; if (source_periph) { memory_burst = config->dest_burst_length; } else { memory_burst = config->source_burst_length; } switch (memory_burst) { case 1: return LL_DMA_MBURST_SINGLE; case 4: return LL_DMA_MBURST_INC4; case 8: return LL_DMA_MBURST_INC8; case 16: return LL_DMA_MBURST_INC16; default: LOG_ERR("Memory burst size error," "using single burst as default"); return LL_DMA_MBURST_SINGLE; } } uint32_t stm32_dma_get_pburst(struct dma_config *config, bool source_periph) { uint32_t periph_burst; if (source_periph) { periph_burst = config->source_burst_length; } else { periph_burst = config->dest_burst_length; } switch (periph_burst) { case 1: return LL_DMA_PBURST_SINGLE; case 4: return LL_DMA_PBURST_INC4; case 8: return LL_DMA_PBURST_INC8; case 16: return LL_DMA_PBURST_INC16; default: LOG_ERR("Peripheral burst size error," "using single burst as default"); return LL_DMA_PBURST_SINGLE; } } /* * This function checks if the msize, mburst and fifo level is * compatible. If they are not compatible, refer to the 'FIFO' * section in the 'DMA' chapter in the Reference Manual for more * information. * This function does not have the obligation of checking the parameters. */ bool stm32_dma_check_fifo_mburst(LL_DMA_InitTypeDef *DMAx) { uint32_t msize = DMAx->MemoryOrM2MDstDataSize; uint32_t fifo_level = DMAx->FIFOThreshold; uint32_t mburst = DMAx->MemBurst; switch (msize) { case LL_DMA_MDATAALIGN_BYTE: switch (mburst) { case LL_DMA_MBURST_INC4: return true; case LL_DMA_MBURST_INC8: if (fifo_level == LL_DMA_FIFOTHRESHOLD_1_2 || fifo_level == LL_DMA_FIFOTHRESHOLD_FULL) { return true; } break; case LL_DMA_MBURST_INC16: if (fifo_level == LL_DMA_FIFOTHRESHOLD_FULL) { return true; } break; } break; case LL_DMA_MDATAALIGN_HALFWORD: switch (mburst) { case LL_DMA_MBURST_INC4: if (fifo_level == LL_DMA_FIFOTHRESHOLD_1_2 || fifo_level == LL_DMA_FIFOTHRESHOLD_FULL) { return true; } break; case LL_DMA_MBURST_INC8: if (fifo_level == LL_DMA_FIFOTHRESHOLD_FULL) { return true; } break; } break; case LL_DMA_MDATAALIGN_WORD: if (mburst == LL_DMA_MBURST_INC4 && fifo_level == LL_DMA_FIFOTHRESHOLD_FULL) { return true; } } /* Other combinations are forbidden. */ return false; } uint32_t stm32_dma_get_fifo_threshold(uint16_t fifo_mode_control) { switch (fifo_mode_control) { case 0: return LL_DMA_FIFOTHRESHOLD_1_4; case 1: return LL_DMA_FIFOTHRESHOLD_1_2; case 2: return LL_DMA_FIFOTHRESHOLD_3_4; case 3: return LL_DMA_FIFOTHRESHOLD_FULL; default: LOG_WRN("FIFO threshold parameter error, reset to 1/4"); return LL_DMA_FIFOTHRESHOLD_1_4; } } ```
/content/code_sandbox/drivers/dma/dma_stm32_v1.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,388
```unknown # DesignWare DMA configuration options config DMA_DW_AXI bool "DesignWare AXI DMA driver" default y depends on DT_HAS_SNPS_DESIGNWARE_DMA_AXI_ENABLED imply DMA_64BIT help DesignWare AXI DMA driver. if DMA_DW_AXI config DMA_DW_AXI_MAX_DESC int "allocate number of lli descriptor" default 10 help creates number of descriptor per channel in a statically allocated pool. Each channel has its own dedicated pool. config DMA_DW_AXI_LLI_SUPPORT bool "hardware supports linked list multi block transfer" default y help This flag can be enabled if hardware support Linked List multi-block transfer config DMA_CHANNEL_STATUS_TIMEOUT int "Channel status timeout" default 1000 help Max timeout to abort or disable the channel config DMA_DW_AXI_MAX_BURST_TXN_LEN int "max burst transaction length" default 8 help set max number of source and destination data units supported config DMA_DW_AXI_DATA_WIDTH int "data bus width" default 64 help update this flag to change the axi master interface data width config DMA_DW_AXI_MAX_BLOCK_TS int "max block size" default 32767 help update this config to set maximum value of block size endif # DMA_DW_AXI ```
/content/code_sandbox/drivers/dma/Kconfig.dw_axi_dmac
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
298
```c /* * */ /** * @brief Common part of BDMA drivers for stm32. */ #include "dma_stm32_bdma.h" #include <zephyr/init.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/dma/dma_stm32.h> #include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(dma_stm32_bdma, CONFIG_DMA_LOG_LEVEL); #define DT_DRV_COMPAT st_stm32_bdma #define BDMA_STM32_0_CHANNEL_COUNT 8 static const uint32_t table_m_size[] = { LL_BDMA_MDATAALIGN_BYTE, LL_BDMA_MDATAALIGN_HALFWORD, LL_BDMA_MDATAALIGN_WORD, }; static const uint32_t table_p_size[] = { LL_BDMA_PDATAALIGN_BYTE, LL_BDMA_PDATAALIGN_HALFWORD, LL_BDMA_PDATAALIGN_WORD, }; uint32_t bdma_stm32_id_to_channel(uint32_t id) { static const uint32_t channel_nr[] = { LL_BDMA_CHANNEL_0, LL_BDMA_CHANNEL_1, LL_BDMA_CHANNEL_2, LL_BDMA_CHANNEL_3, LL_BDMA_CHANNEL_4, LL_BDMA_CHANNEL_5, LL_BDMA_CHANNEL_6, LL_BDMA_CHANNEL_7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(channel_nr)); return channel_nr[id]; } #if !defined(CONFIG_DMAMUX_STM32) uint32_t bdma_stm32_slot_to_channel(uint32_t slot) { static const uint32_t channel_nr[] = { LL_BDMA_CHANNEL_0, LL_BDMA_CHANNEL_1, LL_BDMA_CHANNEL_2, LL_BDMA_CHANNEL_3, LL_BDMA_CHANNEL_4, LL_BDMA_CHANNEL_5, LL_BDMA_CHANNEL_6, LL_BDMA_CHANNEL_7, }; __ASSERT_NO_MSG(slot < ARRAY_SIZE(channel_nr)); return channel_nr[slot]; } #endif void bdma_stm32_clear_ht(BDMA_TypeDef *DMAx, uint32_t id) { static const bdma_stm32_clear_flag_func func[] = { LL_BDMA_ClearFlag_HT0, LL_BDMA_ClearFlag_HT1, LL_BDMA_ClearFlag_HT2, LL_BDMA_ClearFlag_HT3, LL_BDMA_ClearFlag_HT4, LL_BDMA_ClearFlag_HT5, LL_BDMA_ClearFlag_HT6, LL_BDMA_ClearFlag_HT7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); func[id](DMAx); } void bdma_stm32_clear_tc(BDMA_TypeDef *DMAx, uint32_t id) { static const bdma_stm32_clear_flag_func func[] = { LL_BDMA_ClearFlag_TC0, LL_BDMA_ClearFlag_TC1, LL_BDMA_ClearFlag_TC2, LL_BDMA_ClearFlag_TC3, LL_BDMA_ClearFlag_TC4, LL_BDMA_ClearFlag_TC5, LL_BDMA_ClearFlag_TC6, LL_BDMA_ClearFlag_TC7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); func[id](DMAx); } bool bdma_stm32_is_ht_active(BDMA_TypeDef *DMAx, uint32_t id) { static const bdma_stm32_check_flag_func func[] = { LL_BDMA_IsActiveFlag_HT0, LL_BDMA_IsActiveFlag_HT1, LL_BDMA_IsActiveFlag_HT2, LL_BDMA_IsActiveFlag_HT3, LL_BDMA_IsActiveFlag_HT4, LL_BDMA_IsActiveFlag_HT5, LL_BDMA_IsActiveFlag_HT6, LL_BDMA_IsActiveFlag_HT7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); return func[id](DMAx); } bool bdma_stm32_is_tc_active(BDMA_TypeDef *DMAx, uint32_t id) { static const bdma_stm32_check_flag_func func[] = { LL_BDMA_IsActiveFlag_TC0, LL_BDMA_IsActiveFlag_TC1, LL_BDMA_IsActiveFlag_TC2, LL_BDMA_IsActiveFlag_TC3, LL_BDMA_IsActiveFlag_TC4, LL_BDMA_IsActiveFlag_TC5, LL_BDMA_IsActiveFlag_TC6, LL_BDMA_IsActiveFlag_TC7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); return func[id](DMAx); } void bdma_stm32_clear_te(BDMA_TypeDef *DMAx, uint32_t id) { static const bdma_stm32_clear_flag_func func[] = { LL_BDMA_ClearFlag_TE0, LL_BDMA_ClearFlag_TE1, LL_BDMA_ClearFlag_TE2, LL_BDMA_ClearFlag_TE3, LL_BDMA_ClearFlag_TE4, LL_BDMA_ClearFlag_TE5, LL_BDMA_ClearFlag_TE6, LL_BDMA_ClearFlag_TE7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); func[id](DMAx); } void bdma_stm32_clear_gi(BDMA_TypeDef *DMAx, uint32_t id) { static const bdma_stm32_clear_flag_func func[] = { LL_BDMA_ClearFlag_GI0, LL_BDMA_ClearFlag_GI1, LL_BDMA_ClearFlag_GI2, LL_BDMA_ClearFlag_GI3, LL_BDMA_ClearFlag_GI4, LL_BDMA_ClearFlag_GI5, LL_BDMA_ClearFlag_GI6, LL_BDMA_ClearFlag_GI7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); func[id](DMAx); } bool bdma_stm32_is_te_active(BDMA_TypeDef *DMAx, uint32_t id) { static const bdma_stm32_check_flag_func func[] = { LL_BDMA_IsActiveFlag_TE0, LL_BDMA_IsActiveFlag_TE1, LL_BDMA_IsActiveFlag_TE2, LL_BDMA_IsActiveFlag_TE3, LL_BDMA_IsActiveFlag_TE4, LL_BDMA_IsActiveFlag_TE5, LL_BDMA_IsActiveFlag_TE6, LL_BDMA_IsActiveFlag_TE7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); return func[id](DMAx); } bool bdma_stm32_is_gi_active(BDMA_TypeDef *DMAx, uint32_t id) { static const bdma_stm32_check_flag_func func[] = { LL_BDMA_IsActiveFlag_GI0, LL_BDMA_IsActiveFlag_GI1, LL_BDMA_IsActiveFlag_GI2, LL_BDMA_IsActiveFlag_GI3, LL_BDMA_IsActiveFlag_GI4, LL_BDMA_IsActiveFlag_GI5, LL_BDMA_IsActiveFlag_GI6, LL_BDMA_IsActiveFlag_GI7, }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); return func[id](DMAx); } void stm32_bdma_dump_channel_irq(BDMA_TypeDef *dma, uint32_t id) { LOG_INF("te: %d, ht: %d, tc: %d, gi: %d", bdma_stm32_is_te_active(dma, id), bdma_stm32_is_ht_active(dma, id), bdma_stm32_is_tc_active(dma, id), bdma_stm32_is_gi_active(dma, id)); } inline bool stm32_bdma_is_tc_irq_active(BDMA_TypeDef *dma, uint32_t id) { return LL_BDMA_IsEnabledIT_TC(dma, bdma_stm32_id_to_channel(id)) && bdma_stm32_is_tc_active(dma, id); } inline bool stm32_bdma_is_ht_irq_active(BDMA_TypeDef *dma, uint32_t id) { return LL_BDMA_IsEnabledIT_HT(dma, bdma_stm32_id_to_channel(id)) && bdma_stm32_is_ht_active(dma, id); } static inline bool stm32_bdma_is_te_irq_active(BDMA_TypeDef *dma, uint32_t id) { return LL_BDMA_IsEnabledIT_TE(dma, bdma_stm32_id_to_channel(id)) && bdma_stm32_is_te_active(dma, id); } bool stm32_bdma_is_irq_active(BDMA_TypeDef *dma, uint32_t id) { return stm32_bdma_is_tc_irq_active(dma, id) || stm32_bdma_is_ht_irq_active(dma, id) || stm32_bdma_is_te_irq_active(dma, id); } void stm32_bdma_clear_channel_irq(BDMA_TypeDef *dma, uint32_t id) { bdma_stm32_clear_gi(dma, id); bdma_stm32_clear_tc(dma, id); bdma_stm32_clear_ht(dma, id); bdma_stm32_clear_te(dma, id); } bool stm32_bdma_is_enabled_channel(BDMA_TypeDef *dma, uint32_t id) { if (LL_BDMA_IsEnabledChannel(dma, bdma_stm32_id_to_channel(id)) == 1) { return true; } return false; } int stm32_bdma_disable_channel(BDMA_TypeDef *dma, uint32_t id) { LL_BDMA_DisableChannel(dma, bdma_stm32_id_to_channel(id)); if (!LL_BDMA_IsEnabledChannel(dma, bdma_stm32_id_to_channel(id))) { return 0; } return -EAGAIN; } void stm32_bdma_enable_channel(BDMA_TypeDef *dma, uint32_t id) { LL_BDMA_EnableChannel(dma, bdma_stm32_id_to_channel(id)); } static void bdma_stm32_dump_channel_irq(const struct device *dev, uint32_t id) { const struct bdma_stm32_config *config = dev->config; BDMA_TypeDef *dma = (BDMA_TypeDef *)(config->base); stm32_bdma_dump_channel_irq(dma, id); } static void bdma_stm32_clear_channel_irq(const struct device *dev, uint32_t id) { const struct bdma_stm32_config *config = dev->config; BDMA_TypeDef *dma = (BDMA_TypeDef *)(config->base); bdma_stm32_clear_tc(dma, id); bdma_stm32_clear_ht(dma, id); stm32_bdma_clear_channel_irq(dma, id); } static void bdma_stm32_irq_handler(const struct device *dev, uint32_t id) { const struct bdma_stm32_config *config = dev->config; BDMA_TypeDef *dma = (BDMA_TypeDef *)(config->base); struct bdma_stm32_channel *channel; uint32_t callback_arg; __ASSERT_NO_MSG(id < config->max_channels); channel = &config->channels[id]; /* The busy channel is pertinent if not overridden by the HAL */ if ((channel->hal_override != true) && (channel->busy == false)) { /* * When DMA channel is not overridden by HAL, * ignore irq if the channel is not busy anymore */ bdma_stm32_clear_channel_irq(dev, id); return; } #ifdef CONFIG_DMAMUX_STM32 callback_arg = channel->mux_channel; #else callback_arg = id; #endif /* CONFIG_DMAMUX_STM32 */ if (!IS_ENABLED(CONFIG_DMAMUX_STM32)) { channel->busy = false; } /* the dma channel id is in range from 0..<dma-requests> */ if (stm32_bdma_is_ht_irq_active(dma, id)) { /* Let HAL DMA handle flags on its own */ if (!channel->hal_override) { bdma_stm32_clear_ht(dma, id); } channel->bdma_callback(dev, channel->user_data, callback_arg, 0); } else if (stm32_bdma_is_tc_irq_active(dma, id)) { #ifdef CONFIG_DMAMUX_STM32 channel->busy = false; #endif /* Let HAL DMA handle flags on its own */ if (!channel->hal_override) { bdma_stm32_clear_tc(dma, id); } channel->bdma_callback(dev, channel->user_data, callback_arg, 0); } else { LOG_ERR("Transfer Error."); bdma_stm32_dump_channel_irq(dev, id); bdma_stm32_clear_channel_irq(dev, id); channel->bdma_callback(dev, channel->user_data, callback_arg, -EIO); } } static int bdma_stm32_get_priority(uint8_t priority, uint32_t *ll_priority) { switch (priority) { case 0x0: *ll_priority = LL_BDMA_PRIORITY_LOW; break; case 0x1: *ll_priority = LL_BDMA_PRIORITY_MEDIUM; break; case 0x2: *ll_priority = LL_BDMA_PRIORITY_HIGH; break; case 0x3: *ll_priority = LL_BDMA_PRIORITY_VERYHIGH; break; default: LOG_ERR("Priority error. %d", priority); return -EINVAL; } return 0; } static int bdma_stm32_get_direction(enum dma_channel_direction direction, uint32_t *ll_direction) { switch (direction) { case MEMORY_TO_MEMORY: *ll_direction = LL_BDMA_DIRECTION_MEMORY_TO_MEMORY; break; case MEMORY_TO_PERIPHERAL: *ll_direction = LL_BDMA_DIRECTION_MEMORY_TO_PERIPH; break; case PERIPHERAL_TO_MEMORY: *ll_direction = LL_BDMA_DIRECTION_PERIPH_TO_MEMORY; break; default: LOG_ERR("Direction error. %d", direction); return -EINVAL; } return 0; } static int bdma_stm32_get_memory_increment(enum dma_addr_adj increment, uint32_t *ll_increment) { switch (increment) { case DMA_ADDR_ADJ_INCREMENT: *ll_increment = LL_BDMA_MEMORY_INCREMENT; break; case DMA_ADDR_ADJ_NO_CHANGE: *ll_increment = LL_BDMA_MEMORY_NOINCREMENT; break; case DMA_ADDR_ADJ_DECREMENT: return -ENOTSUP; default: LOG_ERR("Memory increment error. %d", increment); return -EINVAL; } return 0; } static int bdma_stm32_get_periph_increment(enum dma_addr_adj increment, uint32_t *ll_increment) { switch (increment) { case DMA_ADDR_ADJ_INCREMENT: *ll_increment = LL_BDMA_PERIPH_INCREMENT; break; case DMA_ADDR_ADJ_NO_CHANGE: *ll_increment = LL_BDMA_PERIPH_NOINCREMENT; break; case DMA_ADDR_ADJ_DECREMENT: return -ENOTSUP; default: LOG_ERR("Periph increment error. %d", increment); return -EINVAL; } return 0; } static int bdma_stm32_disable_channel(BDMA_TypeDef *bdma, uint32_t id) { int count = 0; for (;;) { if (stm32_bdma_disable_channel(bdma, id) == 0) { return 0; } /* After trying for 5 seconds, give up */ if (count++ > (5 * 1000)) { return -EBUSY; } k_sleep(K_MSEC(1)); } return 0; } static bool bdma_stm32_is_valid_memory_address(const uint32_t address, const uint32_t size) { /* The BDMA can only access memory addresses in SRAM4 */ const uint32_t sram4_start = DT_REG_ADDR(DT_NODELABEL(sram4)); const uint32_t sram4_end = sram4_start + DT_REG_SIZE(DT_NODELABEL(sram4)); if (address < sram4_start) { return false; } if (address + size > sram4_end) { return false; } return true; } BDMA_STM32_EXPORT_API int bdma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config *config) { const struct bdma_stm32_config *dev_config = dev->config; struct bdma_stm32_channel *channel = &dev_config->channels[id]; BDMA_TypeDef *bdma = (BDMA_TypeDef *)dev_config->base; LL_BDMA_InitTypeDef BDMA_InitStruct; int index; int ret; LL_BDMA_StructInit(&BDMA_InitStruct); if (id >= dev_config->max_channels) { LOG_ERR("cannot configure the bdma channel %d.", id); return -EINVAL; } if (channel->busy) { LOG_ERR("bdma channel %d is busy.", id); return -EBUSY; } if (bdma_stm32_disable_channel(bdma, id) != 0) { LOG_ERR("could not disable bdma channel %d.", id); return -EBUSY; } bdma_stm32_clear_channel_irq(dev, id); if (config->head_block->block_size > BDMA_STM32_MAX_DATA_ITEMS) { LOG_ERR("Data size too big: %d\n", config->head_block->block_size); return -EINVAL; } if ((config->channel_direction == MEMORY_TO_MEMORY) && (!dev_config->support_m2m)) { LOG_ERR("Memcopy not supported for device %s", dev->name); return -ENOTSUP; } /* support only the same data width for source and dest */ if (config->dest_data_size != config->source_data_size) { LOG_ERR("source and dest data size differ."); return -EINVAL; } if (config->source_data_size != 4U && config->source_data_size != 2U && config->source_data_size != 1U) { LOG_ERR("source and dest unit size error, %d", config->source_data_size); return -EINVAL; } /* * STM32's circular mode will auto reset both source address * counter and destination address counter. */ if (config->head_block->source_reload_en != config->head_block->dest_reload_en) { LOG_ERR("source_reload_en and dest_reload_en must " "be the same."); return -EINVAL; } channel->busy = true; channel->bdma_callback = config->dma_callback; channel->direction = config->channel_direction; channel->user_data = config->user_data; channel->src_size = config->source_data_size; channel->dst_size = config->dest_data_size; /* check dest or source memory address, warn if 0 */ if (config->head_block->source_address == 0) { LOG_WRN("source_buffer address is null."); } if (config->head_block->dest_address == 0) { LOG_WRN("dest_buffer address is null."); } /* ensure all memory addresses are in SRAM4 */ if (channel->direction == MEMORY_TO_PERIPHERAL || channel->direction == MEMORY_TO_MEMORY) { if (!bdma_stm32_is_valid_memory_address(config->head_block->source_address, config->head_block->block_size)) { LOG_ERR("invalid source address"); return -EINVAL; } } if (channel->direction == PERIPHERAL_TO_MEMORY || channel->direction == MEMORY_TO_MEMORY) { if (!bdma_stm32_is_valid_memory_address(config->head_block->dest_address, config->head_block->block_size)) { LOG_ERR("invalid destination address"); return -EINVAL; } } if (channel->direction == MEMORY_TO_PERIPHERAL) { BDMA_InitStruct.MemoryOrM2MDstAddress = config->head_block->source_address; BDMA_InitStruct.PeriphOrM2MSrcAddress = config->head_block->dest_address; } else { BDMA_InitStruct.PeriphOrM2MSrcAddress = config->head_block->source_address; BDMA_InitStruct.MemoryOrM2MDstAddress = config->head_block->dest_address; } uint16_t memory_addr_adj = 0, periph_addr_adj = 0; ret = bdma_stm32_get_priority(config->channel_priority, &BDMA_InitStruct.Priority); if (ret < 0) { return ret; } ret = bdma_stm32_get_direction(config->channel_direction, &BDMA_InitStruct.Direction); if (ret < 0) { return ret; } switch (config->channel_direction) { case MEMORY_TO_MEMORY: case PERIPHERAL_TO_MEMORY: memory_addr_adj = config->head_block->dest_addr_adj; periph_addr_adj = config->head_block->source_addr_adj; break; case MEMORY_TO_PERIPHERAL: memory_addr_adj = config->head_block->source_addr_adj; periph_addr_adj = config->head_block->dest_addr_adj; break; /* Direction has been asserted in bdma_stm32_get_direction. */ default: LOG_ERR("Channel direction error (%d).", config->channel_direction); return -EINVAL; } ret = bdma_stm32_get_memory_increment(memory_addr_adj, &BDMA_InitStruct.MemoryOrM2MDstIncMode); if (ret < 0) { return ret; } ret = bdma_stm32_get_periph_increment(periph_addr_adj, &BDMA_InitStruct.PeriphOrM2MSrcIncMode); if (ret < 0) { return ret; } if (config->head_block->source_reload_en) { BDMA_InitStruct.Mode = LL_BDMA_MODE_CIRCULAR; } else { BDMA_InitStruct.Mode = LL_BDMA_MODE_NORMAL; } channel->source_periph = (channel->direction == PERIPHERAL_TO_MEMORY); /* set the data width, when source_data_size equals dest_data_size */ index = find_lsb_set(config->source_data_size) - 1; BDMA_InitStruct.PeriphOrM2MSrcDataSize = table_p_size[index]; index = find_lsb_set(config->dest_data_size) - 1; BDMA_InitStruct.MemoryOrM2MDstDataSize = table_m_size[index]; if (channel->source_periph) { BDMA_InitStruct.NbData = config->head_block->block_size / config->source_data_size; } else { BDMA_InitStruct.NbData = config->head_block->block_size / config->dest_data_size; } #if defined(CONFIG_DMAMUX_STM32) /* * with bdma mux, * the request ID is stored in the dma_slot */ BDMA_InitStruct.PeriphRequest = config->dma_slot; #endif LL_BDMA_Init(bdma, bdma_stm32_id_to_channel(id), &BDMA_InitStruct); LL_BDMA_EnableIT_TC(bdma, bdma_stm32_id_to_channel(id)); /* Enable Half-Transfer irq if circular mode is enabled */ if (config->head_block->source_reload_en) { LL_BDMA_EnableIT_HT(bdma, bdma_stm32_id_to_channel(id)); } return ret; } BDMA_STM32_EXPORT_API int bdma_stm32_reload(const struct device *dev, uint32_t id, uint32_t src, uint32_t dst, size_t size) { const struct bdma_stm32_config *config = dev->config; BDMA_TypeDef *bdma = (BDMA_TypeDef *)(config->base); struct bdma_stm32_channel *channel; if (id >= config->max_channels) { return -EINVAL; } channel = &config->channels[id]; if (bdma_stm32_disable_channel(bdma, id) != 0) { return -EBUSY; } switch (channel->direction) { case MEMORY_TO_PERIPHERAL: LL_BDMA_SetMemoryAddress(bdma, bdma_stm32_id_to_channel(id), src); LL_BDMA_SetPeriphAddress(bdma, bdma_stm32_id_to_channel(id), dst); break; case MEMORY_TO_MEMORY: case PERIPHERAL_TO_MEMORY: LL_BDMA_SetPeriphAddress(bdma, bdma_stm32_id_to_channel(id), src); LL_BDMA_SetMemoryAddress(bdma, bdma_stm32_id_to_channel(id), dst); break; default: return -EINVAL; } if (channel->source_periph) { LL_BDMA_SetDataLength(bdma, bdma_stm32_id_to_channel(id), size / channel->src_size); } else { LL_BDMA_SetDataLength(bdma, bdma_stm32_id_to_channel(id), size / channel->dst_size); } /* When reloading the dma, the channel is busy again before enabling */ channel->busy = true; stm32_bdma_enable_channel(bdma, id); return 0; } BDMA_STM32_EXPORT_API int bdma_stm32_start(const struct device *dev, uint32_t id) { const struct bdma_stm32_config *config = dev->config; BDMA_TypeDef *bdma = (BDMA_TypeDef *)(config->base); struct bdma_stm32_channel *channel; /* Only M2P or M2M mode can be started manually. */ if (id >= config->max_channels) { return -EINVAL; } /* Repeated start : return now if channel is already started */ if (stm32_bdma_is_enabled_channel(bdma, id)) { return 0; } /* When starting the dma, the channel is busy before enabling */ channel = &config->channels[id]; channel->busy = true; bdma_stm32_clear_channel_irq(dev, id); stm32_bdma_enable_channel(bdma, id); return 0; } BDMA_STM32_EXPORT_API int bdma_stm32_stop(const struct device *dev, uint32_t id) { const struct bdma_stm32_config *config = dev->config; struct bdma_stm32_channel *channel = &config->channels[id]; BDMA_TypeDef *bdma = (BDMA_TypeDef *)(config->base); if (id >= config->max_channels) { return -EINVAL; } /* Repeated stop : return now if channel is already stopped */ if (!stm32_bdma_is_enabled_channel(bdma, id)) { return 0; } /* in bdma_stm32_configure, enabling is done regardless of defines */ LL_BDMA_DisableIT_TC(bdma, bdma_stm32_id_to_channel(id)); LL_BDMA_DisableIT_HT(bdma, bdma_stm32_id_to_channel(id)); bdma_stm32_disable_channel(bdma, id); bdma_stm32_clear_channel_irq(dev, id); /* Finally, flag channel as free */ channel->busy = false; return 0; } static int bdma_stm32_init(const struct device *dev) { const struct bdma_stm32_config *config = dev->config; const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(clk)) { LOG_ERR("clock control device not ready"); return -ENODEV; } if (clock_control_on(clk, (clock_control_subsys_t) &config->pclken) != 0) { LOG_ERR("clock op failed\n"); return -EIO; } config->config_irq(dev); for (uint32_t i = 0; i < config->max_channels; i++) { config->channels[i].busy = false; #ifdef CONFIG_DMAMUX_STM32 /* each further channel->mux_channel is fixed here */ config->channels[i].mux_channel = i + config->offset; #endif /* CONFIG_DMAMUX_STM32 */ } ((struct bdma_stm32_data *)dev->data)->dma_ctx.magic = 0; ((struct bdma_stm32_data *)dev->data)->dma_ctx.dma_channels = 0; ((struct bdma_stm32_data *)dev->data)->dma_ctx.atomic = 0; /* The BDMA can only access SRAM4 and assumes it's nocachable * This check verifies that the non-cachable flag is set in the DTS. * For example: * &sram4 { * zephyr,memory-attr = "RAM_NOCACHE"; * }; */ #if DT_NODE_HAS_PROP(DT_NODELABEL(sram4), zephyr_memory_attr) if ((DT_PROP(DT_NODELABEL(sram4), zephyr_memory_attr) & DT_MEM_ARM_MPU_RAM_NOCACHE) == 0) { LOG_ERR("SRAM4 is not set as non-cachable."); return -EIO; } #else #error "BDMA driver expects SRAM4 to be set as RAM_NOCACHE in DTS" #endif return 0; } BDMA_STM32_EXPORT_API int bdma_stm32_get_status(const struct device *dev, uint32_t id, struct dma_status *stat) { const struct bdma_stm32_config *config = dev->config; BDMA_TypeDef *bdma = (BDMA_TypeDef *)(config->base); struct bdma_stm32_channel *channel; if (id >= config->max_channels) { return -EINVAL; } channel = &config->channels[id]; stat->pending_length = LL_BDMA_GetDataLength(bdma, bdma_stm32_id_to_channel(id)); stat->dir = channel->direction; stat->busy = channel->busy; return 0; } static const struct dma_driver_api dma_funcs = { .reload = bdma_stm32_reload, .config = bdma_stm32_configure, .start = bdma_stm32_start, .stop = bdma_stm32_stop, .get_status = bdma_stm32_get_status, }; #ifdef CONFIG_DMAMUX_STM32 #define BDMA_STM32_OFFSET_INIT(index) \ .offset = DT_INST_PROP(index, dma_offset), #else #define BDMA_STM32_OFFSET_INIT(index) #endif /* CONFIG_DMAMUX_STM32 */ #define BDMA_STM32_INIT_DEV(index) \ static struct bdma_stm32_channel \ bdma_stm32_channels_##index[BDMA_STM32_##index##_CHANNEL_COUNT];\ \ const struct bdma_stm32_config bdma_stm32_config_##index = { \ .pclken = { .bus = DT_INST_CLOCKS_CELL(index, bus), \ .enr = DT_INST_CLOCKS_CELL(index, bits) }, \ .config_irq = bdma_stm32_config_irq_##index, \ .base = DT_INST_REG_ADDR(index), \ .support_m2m = DT_INST_PROP(index, st_mem2mem), \ .max_channels = BDMA_STM32_##index##_CHANNEL_COUNT, \ .channels = bdma_stm32_channels_##index, \ BDMA_STM32_OFFSET_INIT(index) \ }; \ \ static struct bdma_stm32_data bdma_stm32_data_##index = { \ }; \ \ DEVICE_DT_INST_DEFINE(index, \ &bdma_stm32_init, \ NULL, \ &bdma_stm32_data_##index, &bdma_stm32_config_##index, \ PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, \ &dma_funcs) #define BDMA_STM32_DEFINE_IRQ_HANDLER(bdma, chan) \ static void bdma_stm32_irq_##bdma##_##chan(const struct device *dev) \ { \ bdma_stm32_irq_handler(dev, chan); \ } #define BDMA_STM32_IRQ_CONNECT(bdma, chan) \ do { \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(bdma, chan, irq), \ DT_INST_IRQ_BY_IDX(bdma, chan, priority), \ bdma_stm32_irq_##bdma##_##chan, \ DEVICE_DT_INST_GET(bdma), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(bdma, chan, irq)); \ } while (false) #if DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) #define BDMA_STM32_DEFINE_IRQ_HANDLER_GEN(i, _) \ BDMA_STM32_DEFINE_IRQ_HANDLER(0, i) LISTIFY(DT_NUM_IRQS(DT_DRV_INST(0)), BDMA_STM32_DEFINE_IRQ_HANDLER_GEN, (;)); static void bdma_stm32_config_irq_0(const struct device *dev) { ARG_UNUSED(dev); #define BDMA_STM32_IRQ_CONNECT_GEN(i, _) \ BDMA_STM32_IRQ_CONNECT(0, i); LISTIFY(DT_NUM_IRQS(DT_DRV_INST(0)), BDMA_STM32_IRQ_CONNECT_GEN, (;)); } BDMA_STM32_INIT_DEV(0); #endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) */ ```
/content/code_sandbox/drivers/dma/dma_stm32_bdma.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,196
```unknown # Atmel XDMAC SAM driver configuration options config DMA_SAM_XDMAC bool "Atmel SAM DMA (XDMAC) driver" default y depends on DT_HAS_ATMEL_SAM_XDMAC_ENABLED help Enable Atmel SAM MCU Family Direct Memory Access (XDMAC) driver. ```
/content/code_sandbox/drivers/dma/Kconfig.sam_xdmac
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
70
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_DMA_DMA_NXP_EDMA_H_ #define ZEPHYR_DRIVERS_DMA_DMA_NXP_EDMA_H_ #include <zephyr/device.h> #include <zephyr/irq.h> #include <zephyr/drivers/dma.h> #include <zephyr/logging/log.h> #include "fsl_edma_soc_rev2.h" LOG_MODULE_REGISTER(nxp_edma); /* used for driver binding */ #define DT_DRV_COMPAT nxp_edma /* workaround the fact that device_map() is not defined for SoCs with no MMU */ #ifndef DEVICE_MMIO_IS_IN_RAM #define device_map(virt, phys, size, flags) *(virt) = (phys) #endif /* DEVICE_MMIO_IS_IN_RAM */ /* macros used to parse DTS properties */ /* used in conjunction with LISTIFY which expects F to also take a variable * number of arguments. Since IDENTITY doesn't do that we need to use a version * of it which also takes a variable number of arguments. */ #define IDENTITY_VARGS(V, ...) IDENTITY(V) /* used to generate an array of indexes for the channels */ #define _EDMA_CHANNEL_INDEX_ARRAY(inst)\ LISTIFY(DT_INST_PROP_LEN_OR(inst, valid_channels, 0), IDENTITY_VARGS, (,)) /* used to generate an array of indexes for the channels - this is different * from _EDMA_CHANNEL_INDEX_ARRAY because the number of channels is passed * explicitly through dma-channels so no need to deduce it from the length * of the valid-channels property. */ #define _EDMA_CHANNEL_INDEX_ARRAY_EXPLICIT(inst)\ LISTIFY(DT_INST_PROP_OR(inst, dma_channels, 0), IDENTITY_VARGS, (,)) /* used to generate an array of indexes for the interrupt */ #define _EDMA_INT_INDEX_ARRAY(inst)\ LISTIFY(DT_NUM_IRQS(DT_INST(inst, DT_DRV_COMPAT)), IDENTITY_VARGS, (,)) /* used to register an ISR/arg pair. TODO: should we also use the priority? */ #define _EDMA_INT_CONNECT(idx, inst) \ IRQ_CONNECT(DT_INST_IRQN_BY_IDX(inst, idx), \ 0, edma_isr, \ &channels_##inst[idx], 0) /* used to declare a struct edma_channel by the non-explicit macro suite */ #define _EDMA_CHANNEL_DECLARE(idx, inst) \ { \ .id = DT_INST_PROP_BY_IDX(inst, valid_channels, idx), \ .dev = DEVICE_DT_INST_GET(inst), \ .irq = DT_INST_IRQN_BY_IDX(inst, idx), \ } /* used to declare a struct edma_channel by the explicit macro suite */ #define _EDMA_CHANNEL_DECLARE_EXPLICIT(idx, inst) \ { \ .id = idx, \ .dev = DEVICE_DT_INST_GET(inst), \ .irq = DT_INST_IRQN_BY_IDX(inst, idx), \ } /* used to create an array of channel IDs via the valid-channels property */ #define _EDMA_CHANNEL_ARRAY(inst) \ { FOR_EACH_FIXED_ARG(_EDMA_CHANNEL_DECLARE, (,), \ inst, _EDMA_CHANNEL_INDEX_ARRAY(inst)) } /* used to create an array of channel IDs via the dma-channels property */ #define _EDMA_CHANNEL_ARRAY_EXPLICIT(inst) \ { FOR_EACH_FIXED_ARG(_EDMA_CHANNEL_DECLARE_EXPLICIT, (,), inst, \ _EDMA_CHANNEL_INDEX_ARRAY_EXPLICIT(inst)) } /* used to construct the channel array based on the specified property: * dma-channels or valid-channels. */ #define EDMA_CHANNEL_ARRAY_GET(inst) \ COND_CODE_1(DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), dma_channels), \ (_EDMA_CHANNEL_ARRAY_EXPLICIT(inst)), \ (_EDMA_CHANNEL_ARRAY(inst))) #define EDMA_HAL_CFG_GET(inst) \ COND_CODE_1(DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), hal_cfg_index), \ (s_edmaConfigs[DT_INST_PROP(inst, hal_cfg_index)]), \ (s_edmaConfigs[0])) /* used to register edma_isr for all specified interrupts */ #define EDMA_CONNECT_INTERRUPTS(inst) \ FOR_EACH_FIXED_ARG(_EDMA_INT_CONNECT, (;), \ inst, _EDMA_INT_INDEX_ARRAY(inst)) #define EDMA_CHANS_ARE_CONTIGUOUS(inst)\ DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), dma_channels) /* utility macros */ /* a few words about EDMA_CHAN_PRODUCE_CONSUME_{A/B}: * - in the context of cyclic buffers we introduce * the concepts of consumer and producer channels. * * - a consumer channel is a channel for which the * DMA copies data from a buffer, thus leading to * less data in said buffer (data is consumed with * each transfer). * * - a producer channel is a channel for which the * DMA copies data into a buffer, thus leading to * more data in said buffer (data is produced with * each transfer). * * - for consumer channels, each DMA interrupt will * signal that an amount of data has been consumed * from the buffer (half of the buffer size if * HALFMAJOR is enabled, the whole buffer otherwise). * * - for producer channels, each DMA interrupt will * signal that an amount of data has been added * to the buffer. * * - to signal this, the ISR uses EDMA_CHAN_PRODUCE_CONSUME_A * which will "consume" data from the buffer for * consumer channels and "produce" data for * producer channels. * * - since the upper layers using this driver need * to let the EDMA driver know whenever they've produced * (in the case of consumer channels) or consumed * data (in the case of producer channels) they can * do so through the reload() function. * * - reload() uses EDMA_CHAN_PRODUCE_CONSUME_B which * for consumer channels will "produce" data and * "consume" data for producer channels, thus letting * the driver know what action the upper layer has * performed (if the channel is a consumer it's only * natural that the upper layer will write/produce more * data to the buffer. The same rationale applies to * producer channels). * * - EDMA_CHAN_PRODUCE_CONSUME_B is just the opposite * of EDMA_CHAN_PRODUCE_CONSUME_A. If one produces * data, the other will consume and vice-versa. * * - all of this information is valid only in the * context of cyclic buffers. If this behaviour is * not enabled, querying the status will simply * resolve to querying CITER and BITER. */ #define EDMA_CHAN_PRODUCE_CONSUME_A(chan, size)\ ((chan)->type == CHAN_TYPE_CONSUMER ?\ edma_chan_cyclic_consume(chan, size) :\ edma_chan_cyclic_produce(chan, size)) #define EDMA_CHAN_PRODUCE_CONSUME_B(chan, size)\ ((chan)->type == CHAN_TYPE_CONSUMER ?\ edma_chan_cyclic_produce(chan, size) :\ edma_chan_cyclic_consume(chan, size)) enum channel_type { CHAN_TYPE_CONSUMER = 0, CHAN_TYPE_PRODUCER, }; enum channel_state { CHAN_STATE_INIT = 0, CHAN_STATE_CONFIGURED, CHAN_STATE_STARTED, CHAN_STATE_STOPPED, CHAN_STATE_SUSPENDED, }; struct edma_channel { /* channel ID, needs to be the same as the hardware channel ID */ uint32_t id; /* pointer to device representing the EDMA instance, used by edma_isr */ const struct device *dev; /* current state of the channel */ enum channel_state state; /* type of the channel (PRODUCER/CONSUMER) - only applicable to cyclic * buffer configurations. */ enum channel_type type; /* argument passed to the user-defined DMA callback */ void *arg; /* user-defined callback, called at the end of a channel's interrupt * handling. */ dma_callback_t cb; /* INTID associated with the channel */ int irq; /* the channel's status */ struct dma_status stat; /* cyclic buffer size - currently, this is set to head_block's size */ uint32_t bsize; /* set to true if the channel uses a cyclic buffer configuration */ bool cyclic_buffer; }; struct edma_data { /* this needs to be the first member */ struct dma_context ctx; mm_reg_t regmap; struct edma_channel *channels; atomic_t channel_flags; edma_config_t *hal_cfg; }; struct edma_config { uint32_t regmap_phys; uint32_t regmap_size; void (*irq_config)(void); /* true if channels are contiguous. The channels may not be contiguous * if the valid-channels property is used instead of dma-channels. This * is used to improve the time complexity of the channel lookup * function. */ bool contiguous_channels; }; static inline int channel_change_state(struct edma_channel *chan, enum channel_state next) { enum channel_state prev = chan->state; LOG_DBG("attempting to change state from %d to %d for channel %d", prev, next, chan->id); /* validate transition */ switch (prev) { case CHAN_STATE_INIT: if (next != CHAN_STATE_CONFIGURED) { return -EPERM; } break; case CHAN_STATE_CONFIGURED: if (next != CHAN_STATE_STARTED && next != CHAN_STATE_CONFIGURED) { return -EPERM; } break; case CHAN_STATE_STARTED: if (next != CHAN_STATE_STOPPED && next != CHAN_STATE_SUSPENDED) { return -EPERM; } break; case CHAN_STATE_STOPPED: if (next != CHAN_STATE_CONFIGURED) { return -EPERM; } break; case CHAN_STATE_SUSPENDED: if (next != CHAN_STATE_STARTED && next != CHAN_STATE_STOPPED) { return -EPERM; } break; default: LOG_ERR("invalid channel previous state: %d", prev); return -EINVAL; } /* transition OK, proceed */ chan->state = next; return 0; } static inline int get_transfer_type(enum dma_channel_direction dir, uint32_t *type) { switch (dir) { case MEMORY_TO_MEMORY: *type = kEDMA_TransferTypeM2M; break; case MEMORY_TO_PERIPHERAL: *type = kEDMA_TransferTypeM2P; break; case PERIPHERAL_TO_MEMORY: *type = kEDMA_TransferTypeP2M; break; default: LOG_ERR("invalid channel direction: %d", dir); return -EINVAL; } return 0; } static inline bool data_size_is_valid(uint16_t size) { switch (size) { case 1: case 2: case 4: case 8: case 16: case 32: case 64: break; default: return false; } return true; } /* TODO: we may require setting the channel type through DTS * or through struct dma_config. For now, we'll only support * MEMORY_TO_PERIPHERAL and PERIPHERAL_TO_MEMORY directions * and assume that these are bound to a certain channel type. */ static inline int edma_set_channel_type(struct edma_channel *chan, enum dma_channel_direction dir) { switch (dir) { case MEMORY_TO_PERIPHERAL: chan->type = CHAN_TYPE_CONSUMER; break; case PERIPHERAL_TO_MEMORY: chan->type = CHAN_TYPE_PRODUCER; break; default: LOG_ERR("unsupported transfer direction: %d", dir); return -ENOTSUP; } return 0; } /* this function is used in cyclic buffer configurations. What it does * is it updates the channel's read position based on the number of * bytes requested. If the number of bytes that's being read is higher * than the number of bytes available in the buffer (pending_length) * this will lead to an error. The main point of this check is to * provide a way for the user to determine if data is consumed at a * higher rate than it is being produced. * * This function is used in edma_isr() for CONSUMER channels to mark * that data has been consumed (i.e: data has been transferred to the * destination) (this is done via EDMA_CHAN_PRODUCE_CONSUME_A that's * called in edma_isr()). For producer channels, this function is used * in edma_reload() to mark the fact that the user of the EDMA driver * has consumed data. */ static inline int edma_chan_cyclic_consume(struct edma_channel *chan, uint32_t bytes) { if (bytes > chan->stat.pending_length) { return -EINVAL; } chan->stat.read_position = (chan->stat.read_position + bytes) % chan->bsize; if (chan->stat.read_position > chan->stat.write_position) { chan->stat.free = chan->stat.read_position - chan->stat.write_position; } else if (chan->stat.read_position == chan->stat.write_position) { chan->stat.free = chan->bsize; } else { chan->stat.free = chan->bsize - (chan->stat.write_position - chan->stat.read_position); } chan->stat.pending_length = chan->bsize - chan->stat.free; return 0; } /* this function is used in cyclic buffer configurations. What it does * is it updates the channel's write position based on the number of * bytes requested. If the number of bytes that's being written is higher * than the number of free bytes in the buffer this will lead to an error. * The main point of this check is to provide a way for the user to determine * if data is produced at a higher rate than it is being consumed. * * This function is used in edma_isr() for PRODUCER channels to mark * that data has been produced (i.e: data has been transferred to the * destination) (this is done via EDMA_CHAN_PRODUCE_CONSUME_A that's * called in edma_isr()). For consumer channels, this function is used * in edma_reload() to mark the fact that the user of the EDMA driver * has produced data. */ static inline int edma_chan_cyclic_produce(struct edma_channel *chan, uint32_t bytes) { if (bytes > chan->stat.free) { return -EINVAL; } chan->stat.write_position = (chan->stat.write_position + bytes) % chan->bsize; if (chan->stat.write_position > chan->stat.read_position) { chan->stat.pending_length = chan->stat.write_position - chan->stat.read_position; } else if (chan->stat.write_position == chan->stat.read_position) { chan->stat.pending_length = chan->bsize; } else { chan->stat.pending_length = chan->bsize - (chan->stat.read_position - chan->stat.write_position); } chan->stat.free = chan->bsize - chan->stat.pending_length; return 0; } static inline void edma_dump_channel_registers(struct edma_data *data, uint32_t chan_id) { LOG_DBG("dumping channel data for channel %d", chan_id); LOG_DBG("CH_CSR: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR)); LOG_DBG("CH_ES: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_ES)); LOG_DBG("CH_INT: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_INT)); LOG_DBG("CH_SBR: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_SBR)); LOG_DBG("CH_PRI: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_PRI)); if (EDMA_HAS_MUX(data->hal_cfg)) { LOG_DBG("CH_MUX: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_MUX)); } LOG_DBG("TCD_SADDR: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_SADDR)); LOG_DBG("TCD_SOFF: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_SOFF)); LOG_DBG("TCD_ATTR: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_ATTR)); LOG_DBG("TCD_NBYTES: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_NBYTES)); LOG_DBG("TCD_SLAST_SDA: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_SLAST_SDA)); LOG_DBG("TCD_DADDR: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_DADDR)); LOG_DBG("TCD_DOFF: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_DOFF)); LOG_DBG("TCD_CITER: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CITER)); LOG_DBG("TCD_DLAST_SGA: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_DLAST_SGA)); LOG_DBG("TCD_CSR: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CSR)); LOG_DBG("TCD_BITER: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_BITER)); } static inline int set_slast_dlast(struct dma_config *dma_cfg, uint32_t transfer_type, struct edma_data *data, uint32_t chan_id) { int32_t slast, dlast; if (transfer_type == kEDMA_TransferTypeP2M) { slast = 0; } else { switch (dma_cfg->head_block->source_addr_adj) { case DMA_ADDR_ADJ_INCREMENT: slast = (int32_t)dma_cfg->head_block->block_size; break; case DMA_ADDR_ADJ_DECREMENT: slast = (-1) * (int32_t)dma_cfg->head_block->block_size; break; default: LOG_ERR("unsupported SADDR adjustment: %d", dma_cfg->head_block->source_addr_adj); return -EINVAL; } } if (transfer_type == kEDMA_TransferTypeM2P) { dlast = 0; } else { switch (dma_cfg->head_block->dest_addr_adj) { case DMA_ADDR_ADJ_INCREMENT: dlast = (int32_t)dma_cfg->head_block->block_size; break; case DMA_ADDR_ADJ_DECREMENT: dlast = (-1) * (int32_t)dma_cfg->head_block->block_size; break; default: LOG_ERR("unsupported DADDR adjustment: %d", dma_cfg->head_block->dest_addr_adj); return -EINVAL; } } LOG_DBG("attempting to commit SLAST %d", slast); LOG_DBG("attempting to commit DLAST %d", dlast); /* commit configuration */ EDMA_ChannelRegWrite(data->hal_cfg, chan_id, EDMA_TCD_SLAST_SDA, slast); EDMA_ChannelRegWrite(data->hal_cfg, chan_id, EDMA_TCD_DLAST_SGA, dlast); return 0; } /* the NXP HAL EDMA driver uses some custom return values * that need to be converted to standard error codes. This function * performs exactly this translation. */ static inline int to_std_error(int edma_err) { switch (edma_err) { case kStatus_EDMA_InvalidConfiguration: case kStatus_InvalidArgument: return -EINVAL; case kStatus_Busy: return -EBUSY; default: LOG_ERR("unknown EDMA error code: %d", edma_err); return -EINVAL; } } #endif /* ZEPHYR_DRIVERS_DMA_DMA_NXP_EDMA_H_ */ ```
/content/code_sandbox/drivers/dma/dma_nxp_edma.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,593
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/dma.h> #include <zephyr/irq.h> #include <DA1469xAB.h> #include <da1469x_pd.h> #include <da1469x_config.h> #include <system_DA1469x.h> #include <da1469x_otp.h> #include <zephyr/drivers/dma/dma_smartbond.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dma_smartbond, CONFIG_DMA_LOG_LEVEL); #define DT_DRV_COMPAT renesas_smartbond_dma #define SMARTBOND_IRQN DT_INST_IRQN(0) #define SMARTBOND_IRQ_PRIO DT_INST_IRQ(0, priority) #define DMA_CHANNELS_COUNT DT_PROP(DT_NODELABEL(dma), dma_channels) #define DMA_BLOCK_COUNT DT_PROP(DT_NODELABEL(dma), block_count) #define DMA_SECURE_CHANNEL 7 #define DMA_CTRL_REG_SET_FIELD(_field, _var, _val) \ (_var) = \ (((_var) & ~DMA_DMA0_CTRL_REG_ ## _field ## _Msk) | \ (((_val) << DMA_DMA0_CTRL_REG_ ## _field ## _Pos) & DMA_DMA0_CTRL_REG_ ## _field ## _Msk)) #define DMA_CTRL_REG_GET_FIELD(_field, _var) \ (((_var) & DMA_DMA0_CTRL_REG_ ## _field ## _Msk) >> DMA_DMA0_CTRL_REG_ ## _field ## _Pos) #define DMA_CHN2REG(_idx) (&((struct channel_regs *)DMA)[(_idx)]) #define DMA_MUX_SHIFT(_idx) (((_idx) >> 1) * 4) #define DMA_REQ_MUX_REG_SET(_idx, _val) \ DMA->DMA_REQ_MUX_REG = \ (DMA->DMA_REQ_MUX_REG & ~(0xf << DMA_MUX_SHIFT((_idx)))) | \ (((_val) & 0xf) << DMA_MUX_SHIFT((_idx))) #define DMA_REQ_MUX_REG_GET(_idx) \ ((DMA->DMA_REQ_MUX_REG >> DMA_MUX_SHIFT((_idx))) & 0xf) #define CRYPTO_KEYS_BUF_ADDR 0x30040100 #define CRYPTO_KEYS_BUF_SIZE 0x100 #define IS_AES_KEYS_BUF_RANGE(_a) ((uint32_t)(_a) >= (uint32_t)(CRYPTO_KEYS_BUF_ADDR)) && \ ((uint32_t)(_a) < (uint32_t)(CRYPTO_KEYS_BUF_ADDR + CRYPTO_KEYS_BUF_SIZE)) /* * DMA channel priority level. The smaller the value the lower the priority granted to a channel * when two or more channels request the bus at the same time. For channels of same priority an * inherent mechanism is applied in which the lower the channel number the higher the priority. */ enum dma_smartbond_channel_prio { DMA_SMARTBOND_CHANNEL_PRIO_0 = 0x0, /* Lowest channel priority */ DMA_SMARTBOND_CHANNEL_PRIO_1, DMA_SMARTBOND_CHANNEL_PRIO_2, DMA_SMARTBOND_CHANNEL_PRIO_3, DMA_SMARTBOND_CHANNEL_PRIO_4, DMA_SMARTBOND_CHANNEL_PRIO_5, DMA_SMARTBOND_CHANNEL_PRIO_6, DMA_SMARTBOND_CHANNEL_PRIO_7, /* Highest channel priority */ DMA_SMARTBOND_CHANNEL_PRIO_MAX }; enum dma_smartbond_channel { DMA_SMARTBOND_CHANNEL_0 = 0x0, DMA_SMARTBOND_CHANNEL_1, DMA_SMARTBOND_CHANNEL_2, DMA_SMARTBOND_CHANNEL_3, DMA_SMARTBOND_CHANNEL_4, DMA_SMARTBOND_CHANNEL_5, DMA_SMARTBOND_CHANNEL_6, DMA_SMARTBOND_CHANNEL_7, DMA_SMARTBOND_CHANNEL_MAX }; enum dma_smartbond_burst_len { DMA_SMARTBOND_BURST_LEN_1B = 0x1, /* Burst mode is disabled */ DMA_SMARTBOND_BURST_LEN_4B = 0x4, /* Perform bursts of 4 beats (INCR4) */ DMA_SMARTBOND_BURST_LEN_8B = 0x8 /* Perform bursts of 8 beats (INCR8) */ }; /* * DMA bus width indicating how many bytes are retrived/written per transfer. * Note that the bus width is the same for the source and destination. */ enum dma_smartbond_bus_width { DMA_SMARTBOND_BUS_WIDTH_1B = 0x1, DMA_SMARTBOND_BUS_WIDTH_2B = 0x2, DMA_SMARTBOND_BUS_WIDTH_4B = 0x4 }; enum dreq_mode { DREQ_MODE_SW = 0x0, DREQ_MODE_HW }; enum burst_mode { BURST_MODE_0B = 0x0, BURST_MODE_4B = 0x1, BURST_MODE_8B = 0x2 }; enum bus_width { BUS_WIDTH_1B = 0x0, BUS_WIDTH_2B = 0x1, BUS_WIDTH_4B = 0x2 }; enum addr_adj { ADDR_ADJ_NO_CHANGE = 0x0, ADDR_ADJ_INCR }; enum copy_mode { COPY_MODE_BLOCK = 0x0, COPY_MODE_INIT }; enum req_sense { REQ_SENSE_LEVEL = 0x0, REQ_SENSE_EDGE }; struct channel_regs { __IO uint32_t DMA_A_START; __IO uint32_t DMA_B_START; __IO uint32_t DMA_INT_REG; __IO uint32_t DMA_LEN_REG; __IO uint32_t DMA_CTRL_REG; __I uint32_t DMA_IDX_REG; __I uint32_t RESERVED[2]; }; struct dma_channel_data { dma_callback_t cb; void *user_data; enum dma_smartbond_bus_width bus_width; enum dma_smartbond_burst_len burst_len; enum dma_channel_direction dir; bool is_dma_configured; }; struct dma_smartbond_data { /* Should be the first member of the driver data */ struct dma_context dma_ctx; ATOMIC_DEFINE(channels_atomic, DMA_CHANNELS_COUNT); /* User callbacks and data to be stored per channel */ struct dma_channel_data channel_data[DMA_CHANNELS_COUNT]; }; /* True if there is any DMA activity on any channel, false otheriwise. */ static bool dma_smartbond_is_dma_active(void) { int idx; struct channel_regs *regs; for (idx = 0; idx < DMA_CHANNELS_COUNT; idx++) { regs = DMA_CHN2REG(idx); if (DMA_CTRL_REG_GET_FIELD(DMA_ON, regs->DMA_CTRL_REG)) { return true; } } return false; } static inline void dma_smartbond_pm_policy_state_lock_get(void) { #if defined(CONFIG_PM_DEVICE) pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); #endif } static inline void dma_smartbond_pm_policy_state_lock_put(void) { #if defined(CONFIG_PM_DEVICE) pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES); #endif } static void dma_smartbond_set_channel_status(const struct device *dev, uint32_t channel, bool status) { unsigned int key; struct channel_regs *regs = DMA_CHN2REG(channel); key = irq_lock(); if (status) { /* Make sure the status register for the requested channel is cleared. */ DMA->DMA_CLEAR_INT_REG |= BIT(channel); /* Enable interrupts for the requested channel. */ DMA->DMA_INT_MASK_REG |= BIT(channel); /* Check if this is the first attempt to enable DMA interrupts. */ if (!irq_is_enabled(SMARTBOND_IRQN)) { irq_enable(SMARTBOND_IRQN); /* Prevent sleep as long as DMA operations are ongoing */ dma_smartbond_pm_policy_state_lock_get(); } DMA_CTRL_REG_SET_FIELD(DMA_ON, regs->DMA_CTRL_REG, 0x1); } else { DMA_CTRL_REG_SET_FIELD(DMA_ON, regs->DMA_CTRL_REG, 0x0); /* * It might happen that DMA is already in progress. Make sure the current * on-going transfer is complete (cannot be interrupted). */ while (DMA_CTRL_REG_GET_FIELD(DMA_ON, regs->DMA_CTRL_REG)) { } /* Disable interrupts for the requested channel */ DMA->DMA_INT_MASK_REG &= ~(BIT(channel)); /* Clear the status register; the requested channel should be considered obsolete */ DMA->DMA_CLEAR_INT_REG |= BIT(channel); /* DMA interrupts should be disabled only if all channels are disabled. */ if (!dma_smartbond_is_dma_active()) { irq_disable(SMARTBOND_IRQN); /* Allow entering sleep once all DMA channels are inactive */ dma_smartbond_pm_policy_state_lock_put(); } } irq_unlock(key); } static bool dma_channel_dst_addr_check_and_adjust(uint32_t channel, uint32_t *dst) { uint32_t phy_address; uint32_t secure_boot_reg; bool is_aes_keys_protected, is_qspic_keys_protected; phy_address = black_orca_phy_addr(*dst); secure_boot_reg = CRG_TOP->SECURE_BOOT_REG; is_aes_keys_protected = (secure_boot_reg & CRG_TOP_SECURE_BOOT_REG_PROT_AES_KEY_READ_Msk); is_qspic_keys_protected = (secure_boot_reg & CRG_TOP_SECURE_BOOT_REG_PROT_QSPI_KEY_READ_Msk); /* * If the destination address reflects the AES key buffer area and secure keys are protected * then only the secure channel #7 can be used to transfer data to AES key buffer. */ if ((IS_AES_KEYS_BUF_RANGE(phy_address) && (is_aes_keys_protected || is_qspic_keys_protected) && (channel != DMA_SECURE_CHANNEL))) { LOG_ERR("Keys are protected. Only secure channel #7 can be employed."); return false; } if (IS_QSPIF_ADDRESS(phy_address) || IS_QSPIF_CACHED_ADDRESS(phy_address) || IS_OTP_ADDRESS(phy_address) || IS_OTP_P_ADDRESS(phy_address)) { LOG_ERR("Invalid destination location."); return false; } *dst = phy_address; return true; } static bool dma_channel_src_addr_check_and_adjust(uint32_t channel, uint32_t *src) { uint32_t phy_address; uint32_t secure_boot_reg; bool is_aes_keys_protected, is_qspic_keys_protected; /* DMA can only access physical addresses, not remapped. */ phy_address = black_orca_phy_addr(*src); if (IS_QSPIF_CACHED_ADDRESS(phy_address)) { /* * To achiebe max. perfomance, peripherals should not access the Flash memory * through the instruction cache controller (avoid cache misses). */ phy_address += (MCU_QSPIF_M_BASE - MCU_QSPIF_M_CACHED_BASE); } else if (IS_OTP_ADDRESS(phy_address)) { /* Peripherals should access OTP through its peripheral address space. */ phy_address += (MCU_OTP_M_P_BASE - MCU_OTP_M_BASE); } secure_boot_reg = CRG_TOP->SECURE_BOOT_REG; is_aes_keys_protected = (secure_boot_reg & CRG_TOP_SECURE_BOOT_REG_PROT_AES_KEY_READ_Msk); is_qspic_keys_protected = (secure_boot_reg & CRG_TOP_SECURE_BOOT_REG_PROT_QSPI_KEY_READ_Msk); /* * If the source address reflects protected area in OTP then only the * secure channel #7 can be used to fetch secure keys data. */ if (((IS_ADDRESS_USER_DATA_KEYS_SEGMENT(phy_address) && is_aes_keys_protected) || (IS_ADDRESS_QSPI_FW_KEYS_SEGMENT(phy_address) && is_qspic_keys_protected)) && (channel != DMA_SECURE_CHANNEL)) { LOG_ERR("Keys are protected. Only secure channel #7 can be employed."); return false; } *src = phy_address; return true; } static bool dma_channel_update_dreq_mode(enum dma_channel_direction direction, uint32_t *dma_ctrl_reg) { switch (direction) { case MEMORY_TO_HOST: case HOST_TO_MEMORY: case MEMORY_TO_MEMORY: /* DMA channel starts immediately */ DMA_CTRL_REG_SET_FIELD(DREQ_MODE, *dma_ctrl_reg, DREQ_MODE_SW); break; case PERIPHERAL_TO_MEMORY: case MEMORY_TO_PERIPHERAL: case PERIPHERAL_TO_PERIPHERAL: /* DMA channels starts by peripheral DMA req */ DMA_CTRL_REG_SET_FIELD(DREQ_MODE, *dma_ctrl_reg, DREQ_MODE_HW); break; default: return false; }; return true; } static bool dma_channel_update_src_addr_adj(enum dma_addr_adj addr_adj, uint32_t *dma_ctrl_reg) { switch (addr_adj) { case DMA_ADDR_ADJ_NO_CHANGE: DMA_CTRL_REG_SET_FIELD(AINC, *dma_ctrl_reg, ADDR_ADJ_NO_CHANGE); break; case DMA_ADDR_ADJ_INCREMENT: DMA_CTRL_REG_SET_FIELD(AINC, *dma_ctrl_reg, ADDR_ADJ_INCR); break; default: return false; } return true; } static bool dma_channel_update_dst_addr_adj(enum dma_addr_adj addr_adj, uint32_t *dma_ctrl_reg) { switch (addr_adj) { case DMA_ADDR_ADJ_NO_CHANGE: DMA_CTRL_REG_SET_FIELD(BINC, *dma_ctrl_reg, ADDR_ADJ_NO_CHANGE); break; case DMA_ADDR_ADJ_INCREMENT: DMA_CTRL_REG_SET_FIELD(BINC, *dma_ctrl_reg, ADDR_ADJ_INCR); break; default: return false; } return true; } static bool dma_channel_update_bus_width(uint16_t bw, uint32_t *dma_ctrl_reg) { switch (bw) { case DMA_SMARTBOND_BUS_WIDTH_1B: DMA_CTRL_REG_SET_FIELD(BW, *dma_ctrl_reg, BUS_WIDTH_1B); break; case DMA_SMARTBOND_BUS_WIDTH_2B: DMA_CTRL_REG_SET_FIELD(BW, *dma_ctrl_reg, BUS_WIDTH_2B); break; case DMA_SMARTBOND_BUS_WIDTH_4B: DMA_CTRL_REG_SET_FIELD(BW, *dma_ctrl_reg, BUS_WIDTH_4B); break; default: return false; } return true; } static bool dma_channel_update_burst_mode(uint16_t burst, uint32_t *dma_ctrl_reg) { switch (burst) { case DMA_SMARTBOND_BURST_LEN_1B: DMA_CTRL_REG_SET_FIELD(BURST_MODE, *dma_ctrl_reg, BURST_MODE_0B); break; case DMA_SMARTBOND_BURST_LEN_4B: DMA_CTRL_REG_SET_FIELD(BURST_MODE, *dma_ctrl_reg, BURST_MODE_4B); break; case DMA_SMARTBOND_BURST_LEN_8B: DMA_CTRL_REG_SET_FIELD(BURST_MODE, *dma_ctrl_reg, BURST_MODE_8B); break; default: return false; } return true; } static void dma_channel_update_req_sense(enum dma_smartbond_trig_mux trig_mux, uint32_t channel, uint32_t *dma_ctrl_reg) { switch (trig_mux) { case DMA_SMARTBOND_TRIG_MUX_UART: case DMA_SMARTBOND_TRIG_MUX_UART2: case DMA_SMARTBOND_TRIG_MUX_UART3: case DMA_SMARTBOND_TRIG_MUX_I2C: case DMA_SMARTBOND_TRIG_MUX_I2C2: case DMA_SMARTBOND_TRIG_MUX_USB: /* Odd channel numbers should reflect TX path */ if (channel & BIT(0)) { DMA_CTRL_REG_SET_FIELD(REQ_SENSE, *dma_ctrl_reg, REQ_SENSE_EDGE); break; } default: DMA_CTRL_REG_SET_FIELD(REQ_SENSE, *dma_ctrl_reg, REQ_SENSE_LEVEL); } } static void dma_set_mux_request(enum dma_smartbond_trig_mux trig_mux, uint32_t channel) { unsigned int key; key = irq_lock(); DMA_REQ_MUX_REG_SET(channel, trig_mux); /* * Having same trigger for different channels can cause unpredictable results. * The audio triggers (src and pcm) are an exception, as they use 2 pairs each * for DMA access. * The lesser significant selector has higher priority and will control * the DMA acknowledge signal driven to the selected peripheral. Make sure * the current selector does not match with selectors of * higher priorities (dma channels of lower indexing). It's OK if a * channel of higher indexing defines the same peripheral request source * (should be ignored as it has lower priority). */ if (trig_mux != DMA_SMARTBOND_TRIG_MUX_NONE) { switch (channel) { case DMA_SMARTBOND_CHANNEL_7: case DMA_SMARTBOND_CHANNEL_6: if (DMA_REQ_MUX_REG_GET(DMA_SMARTBOND_CHANNEL_5) == trig_mux) { DMA_REQ_MUX_REG_SET(DMA_SMARTBOND_CHANNEL_5, DMA_SMARTBOND_TRIG_MUX_NONE); } /* fall-through */ case DMA_SMARTBOND_CHANNEL_5: case DMA_SMARTBOND_CHANNEL_4: if (DMA_REQ_MUX_REG_GET(DMA_SMARTBOND_CHANNEL_3) == trig_mux) { DMA_REQ_MUX_REG_SET(DMA_SMARTBOND_CHANNEL_3, DMA_SMARTBOND_TRIG_MUX_NONE); } /* fall-through */ case DMA_SMARTBOND_CHANNEL_3: case DMA_SMARTBOND_CHANNEL_2: if (DMA_REQ_MUX_REG_GET(DMA_SMARTBOND_CHANNEL_1) == trig_mux) { DMA_REQ_MUX_REG_SET(DMA_SMARTBOND_CHANNEL_1, DMA_SMARTBOND_TRIG_MUX_NONE); } case DMA_SMARTBOND_CHANNEL_1: case DMA_SMARTBOND_CHANNEL_0: break; } } irq_unlock(key); } static int dma_smartbond_config(const struct device *dev, uint32_t channel, struct dma_config *cfg) { struct dma_smartbond_data *data = dev->data; struct channel_regs *regs; uint32_t dma_ctrl_reg; uint32_t src_dst_address; if (channel >= DMA_CHANNELS_COUNT) { LOG_ERR("Inavlid DMA channel index"); return -EINVAL; } regs = DMA_CHN2REG(channel); dma_ctrl_reg = regs->DMA_CTRL_REG; if (DMA_CTRL_REG_GET_FIELD(DMA_ON, dma_ctrl_reg)) { LOG_ERR("Requested channel is enabled. It should first be disabled"); return -EIO; } if (cfg == NULL || cfg->head_block == NULL) { LOG_ERR("Missing configuration structure"); return -EINVAL; } /* Error handling is not supported; just warn user. */ if (!cfg->error_callback_dis) { LOG_WRN("Error handling is not supported"); } if (!cfg->complete_callback_en) { data->channel_data[channel].cb = cfg->dma_callback; data->channel_data[channel].user_data = cfg->user_data; } else { LOG_WRN("User callback can only be called at completion only and not per block."); /* Nulify pointers to indicate notifications are disabled. */ data->channel_data[channel].cb = NULL; data->channel_data[channel].user_data = NULL; } data->channel_data[channel].dir = cfg->channel_direction; if (cfg->block_count > DMA_BLOCK_COUNT) { LOG_WRN("A single block is supported. The rest blocks will be discarded"); } if (cfg->channel_priority >= DMA_SMARTBOND_CHANNEL_PRIO_MAX) { cfg->channel_priority = DMA_SMARTBOND_CHANNEL_PRIO_7; LOG_WRN("Channel priority exceeded max. Setting to highest valid level"); } DMA_CTRL_REG_SET_FIELD(DMA_PRIO, dma_ctrl_reg, cfg->channel_priority); if (((cfg->source_burst_length != cfg->dest_burst_length) || !dma_channel_update_burst_mode(cfg->source_burst_length, &dma_ctrl_reg))) { LOG_ERR("Invalid burst mode or source and destination mode mismatch"); return -EINVAL; } data->channel_data[channel].burst_len = cfg->source_burst_length; if (cfg->source_data_size != cfg->dest_data_size || !dma_channel_update_bus_width(cfg->source_data_size, &dma_ctrl_reg)) { LOG_ERR("Invalid bus width or source and destination bus width mismatch"); return -EINVAL; } data->channel_data[channel].bus_width = cfg->source_data_size; if (cfg->source_chaining_en || cfg->dest_chaining_en || cfg->head_block->source_gather_en || cfg->head_block->dest_scatter_en || cfg->head_block->source_reload_en || cfg->head_block->dest_reload_en) { LOG_WRN("Chainning, scattering, gathering or reloading is not supported"); } if (!dma_channel_update_src_addr_adj(cfg->head_block->source_addr_adj, &dma_ctrl_reg)) { LOG_ERR("Invalid source address adjustment"); return -EINVAL; } if (!dma_channel_update_dst_addr_adj(cfg->head_block->dest_addr_adj, &dma_ctrl_reg)) { LOG_ERR("Invalid destination address adjustment"); return -EINVAL; } if (!dma_channel_update_dreq_mode(cfg->channel_direction, &dma_ctrl_reg)) { LOG_ERR("Inavlid channel direction"); return -EINVAL; } /* Cyclic is valid only when DREQ_MODE is set */ if (cfg->cyclic && DMA_CTRL_REG_GET_FIELD(DREQ_MODE, dma_ctrl_reg) != DREQ_MODE_HW) { LOG_ERR("Circular mode is only supported for non memory-memory transfers"); return -EINVAL; } DMA_CTRL_REG_SET_FIELD(CIRCULAR, dma_ctrl_reg, cfg->cyclic); if (DMA_CTRL_REG_GET_FIELD(DREQ_MODE, dma_ctrl_reg) == DREQ_MODE_SW && DMA_CTRL_REG_GET_FIELD(AINC, dma_ctrl_reg) == ADDR_ADJ_NO_CHANGE && DMA_CTRL_REG_GET_FIELD(BINC, dma_ctrl_reg) == ADDR_ADJ_INCR) { /* * Valid for memory initialization to a specific value. This process * cannot be interrupted by other DMA channels. */ DMA_CTRL_REG_SET_FIELD(DMA_INIT, dma_ctrl_reg, COPY_MODE_INIT); } else { DMA_CTRL_REG_SET_FIELD(DMA_INIT, dma_ctrl_reg, COPY_MODE_BLOCK); } dma_channel_update_req_sense(cfg->dma_slot, channel, &dma_ctrl_reg); regs->DMA_CTRL_REG = dma_ctrl_reg; /* Requested address might be changed */ src_dst_address = cfg->head_block->source_address; if (!dma_channel_src_addr_check_and_adjust(channel, &src_dst_address)) { return -EINVAL; } if (src_dst_address % cfg->source_data_size) { LOG_ERR("Source address is not bus width aligned"); return -EINVAL; } regs->DMA_A_START = src_dst_address; src_dst_address = cfg->head_block->dest_address; if (!dma_channel_dst_addr_check_and_adjust(channel, &src_dst_address)) { return -EINVAL; } if (src_dst_address % cfg->dest_data_size) { LOG_ERR("Destination address is not bus width aligned"); return -EINVAL; } regs->DMA_B_START = src_dst_address; if (cfg->head_block->block_size % (cfg->source_data_size * cfg->source_burst_length)) { LOG_ERR("Requested data size is not multiple of bus width"); return -EINVAL; } regs->DMA_LEN_REG = (cfg->head_block->block_size / cfg->source_data_size) - 1; /* Interrupt will be raised once all transfers are complete. */ regs->DMA_INT_REG = (cfg->head_block->block_size / cfg->source_data_size) - 1; if ((cfg->source_handshake != cfg->dest_handshake) || (cfg->source_handshake != 0)/*HW*/) { LOG_ERR("Source/destination handshakes mismatch or invalid"); return -EINVAL; } dma_set_mux_request(cfg->dma_slot, channel); /* Designate that channel has been configured */ data->channel_data[channel].is_dma_configured = true; return 0; } static int dma_smartbond_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { struct dma_smartbond_data *data = dev->data; struct channel_regs *regs; if (channel >= DMA_CHANNELS_COUNT) { LOG_ERR("Inavlid DMA channel index"); return -EINVAL; } regs = DMA_CHN2REG(channel); if (!data->channel_data[channel].is_dma_configured) { LOG_ERR("Requested DMA channel should first be configured"); return -EINVAL; } if (size == 0) { LOG_ERR("Min. transfer size is one"); return -EINVAL; } if (DMA_CTRL_REG_GET_FIELD(DMA_ON, regs->DMA_CTRL_REG)) { LOG_ERR("Channel is busy, settings cannot be changed mid-transfer"); return -EBUSY; } if (src % data->channel_data[channel].bus_width) { LOG_ERR("Source address is not bus width aligned"); return -EINVAL; } if (!dma_channel_src_addr_check_and_adjust(channel, &src)) { return -EINVAL; } regs->DMA_A_START = src; if (dst % data->channel_data[channel].bus_width) { LOG_ERR("Destination address is not bus width aligned"); return -EINVAL; } if (!dma_channel_dst_addr_check_and_adjust(channel, &dst)) { return -EINVAL; } regs->DMA_B_START = dst; if (size % (data->channel_data[channel].burst_len * data->channel_data[channel].bus_width)) { LOG_ERR("Requested data size is not multiple of bus width"); return -EINVAL; } regs->DMA_LEN_REG = (size / data->channel_data[channel].bus_width) - 1; /* Interrupt will be raised once all transfers are complete. */ regs->DMA_INT_REG = (size / data->channel_data[channel].bus_width) - 1; return 0; } static int dma_smartbond_start(const struct device *dev, uint32_t channel) { struct channel_regs *regs; struct dma_smartbond_data *data = dev->data; if (channel >= DMA_CHANNELS_COUNT) { LOG_ERR("Inavlid DMA channel index"); return -EINVAL; } regs = DMA_CHN2REG(channel); if (!data->channel_data[channel].is_dma_configured) { LOG_ERR("Requested DMA channel should first be configured"); return -EINVAL; } /* Should return succss if the requested channel is already started. */ if (DMA_CTRL_REG_GET_FIELD(DMA_ON, regs->DMA_CTRL_REG)) { return 0; } dma_smartbond_set_channel_status(dev, channel, true); return 0; } static int dma_smartbond_stop(const struct device *dev, uint32_t channel) { struct channel_regs *regs; if (channel >= DMA_CHANNELS_COUNT) { LOG_ERR("Inavlid DMA channel index"); return -EINVAL; } regs = DMA_CHN2REG(channel); /* * In normal mode DMA_ON is cleared automatically. However we need to clear * the corresponding register mask and disable NVIC if there is no other * channel in use. */ dma_smartbond_set_channel_status(dev, channel, false); return 0; } static int dma_smartbond_suspend(const struct device *dev, uint32_t channel) { if (channel >= DMA_CHANNELS_COUNT) { LOG_ERR("Inavlid DMA channel index"); return -EINVAL; } /* * Freezing the DMA engine is valid for memory-to-memory operations. * Valid memory locations are SYSRAM and/or PSRAM. */ LOG_WRN("DMA is freezed globally"); /* * Freezing the DMA engine can be done universally and not per channel!. * An attempt to disable the channel would result in resetting the IDX * register next time the channel was re-enabled. */ GPREG->SET_FREEZE_REG = GPREG_SET_FREEZE_REG_FRZ_DMA_Msk; return 0; } static int dma_smartbond_resume(const struct device *dev, uint32_t channel) { if (channel >= DMA_CHANNELS_COUNT) { LOG_ERR("Inavlid DMA channel index"); return -EINVAL; } LOG_WRN("DMA is unfreezed globally"); /* Unfreezing the DMA engine can be done unviversally and not per channel! */ GPREG->RESET_FREEZE_REG = GPREG_RESET_FREEZE_REG_FRZ_DMA_Msk; return 0; } static int dma_smartbond_get_status(const struct device *dev, uint32_t channel, struct dma_status *stat) { struct channel_regs *regs; int key; struct dma_smartbond_data *data = dev->data; uint8_t bus_width; uint32_t dma_ctrl_reg, dma_idx_reg, dma_len_reg; if (channel >= DMA_CHANNELS_COUNT) { LOG_ERR("Inavlid DMA channel index"); return -EINVAL; } if (stat == NULL) { LOG_ERR("User should provide a valid pointer to store the status info requested"); } if (!data->channel_data[channel].is_dma_configured) { LOG_ERR("Requested DMA channel should first be configured"); return -EINVAL; } regs = DMA_CHN2REG(channel); /* * The DMA is running in parallel with CPU and so it might happen that an on-going transfer * might be completed the moment user parses the status results. Disable interrupts globally * so there is no chance for a new transfer to be initiated from within ISR and so changing * the channel registers values. */ key = irq_lock(); dma_ctrl_reg = regs->DMA_CTRL_REG; dma_idx_reg = regs->DMA_IDX_REG; dma_len_reg = regs->DMA_LEN_REG; /* Calculate how many byes each transfer consists of. */ bus_width = DMA_CTRL_REG_GET_FIELD(BW, dma_ctrl_reg); if (bus_width == BUS_WIDTH_1B) { bus_width = 1; } else { bus_width <<= 1; } /* Convert transfers to bytes. */ stat->total_copied = dma_idx_reg * bus_width; stat->pending_length = (dma_len_reg - dma_idx_reg) * bus_width; stat->busy = DMA_CTRL_REG_GET_FIELD(DMA_ON, dma_ctrl_reg); stat->dir = data->channel_data[channel].dir; /* DMA does not support circular buffer functionality */ stat->free = 0; stat->read_position = 0; stat->write_position = 0; irq_unlock(key); return 0; } static int dma_smartbond_get_attribute(const struct device *dev, uint32_t type, uint32_t *value) { if (value == NULL) { LOG_ERR("User should provide a valid pointer to attribute value"); return -EINVAL; } switch (type) { /* * Source and destination addresses should be multiple of a channel's bus width. * This info could be provided at runtime given that attributes of a specific * channel could be requested. */ case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT: case DMA_ATTR_COPY_ALIGNMENT: /* * Buffer size should be multiple of a channel's bus width multiplied by burst length. * This info could be provided at runtime given that attributes of a specific channel * could be requested. */ case DMA_ATTR_BUFFER_SIZE_ALIGNMENT: return -ENOSYS; case DMA_ATTR_MAX_BLOCK_COUNT: *value = DMA_BLOCK_COUNT; return 0; default: return -EINVAL; } } static bool dma_smartbond_chan_filter(const struct device *dev, int channel, void *filter_param) { uint32_t requested_channel; if (channel >= DMA_CHANNELS_COUNT) { LOG_ERR("Inavlid DMA channel index"); return -EINVAL; } /* If user does not provide any channel request explicitly, return true. */ if (filter_param == NULL) { return true; } requested_channel = *(uint32_t *)filter_param; if (channel == requested_channel) { return true; } return false; } static struct dma_driver_api dma_smartbond_driver_api = { .config = dma_smartbond_config, .reload = dma_smartbond_reload, .start = dma_smartbond_start, .stop = dma_smartbond_stop, .suspend = dma_smartbond_suspend, .resume = dma_smartbond_resume, .get_status = dma_smartbond_get_status, .get_attribute = dma_smartbond_get_attribute, .chan_filter = dma_smartbond_chan_filter }; static void smartbond_dma_isr(const void *arg) { uint16_t dma_int_status_reg; int i; struct channel_regs *regs; struct dma_smartbond_data *data = ((const struct device *)arg)->data; /* * A single interrupt line is generated for all channels and so each channel * should be parsed separately. */ for (i = 0, dma_int_status_reg = DMA->DMA_INT_STATUS_REG; i < DMA_CHANNELS_COUNT && dma_int_status_reg != 0; ++i, dma_int_status_reg >>= 1) { /* Check if the selected channel has raised the interrupt line */ if (dma_int_status_reg & BIT(0)) { regs = DMA_CHN2REG(i); /* * Should be valid if callbacks are explicitly enabled by users. * Interrupt should be triggered only when the total size of * bytes has been transferred. Bus errors cannot raise interrupts. */ if (data->channel_data[i].cb) { data->channel_data[i].cb((const struct device *)arg, data->channel_data[i].user_data, i, DMA_STATUS_COMPLETE); } /* Channel line should be cleared otherwise ISR will keep firing! */ DMA->DMA_CLEAR_INT_REG = BIT(i); } } } #if defined(CONFIG_PM_DEVICE) static bool dma_smartbond_is_sleep_allowed(const struct device *dev) { struct dma_smartbond_data *data = dev->data; for (int i = 0; i < data->dma_ctx.dma_channels; i++) { if (atomic_test_bit(data->dma_ctx.atomic, i)) { /* Abort sleeping if at least one dma channel is acquired */ return false; } } return true; } static int dma_smartbond_pm_action(const struct device *dev, enum pm_device_action action) { int ret = 0; switch (action) { case PM_DEVICE_ACTION_SUSPEND: /* * When we reach this point there should be no ongoing DMA transfers. * However, a DMA channel can still be acquired and so the configured * channel(s) should be retained. To avoid reconfiguring DMA or * read/write DMA channels' registers we assume that sleep is not allowed * as long as all DMA channels are released. */ if (!dma_smartbond_is_sleep_allowed(dev)) { ret = -EBUSY; } /* * No need to perform any actions here as the DMA engine * should already be turned off. */ break; case PM_DEVICE_ACTION_RESUME: /* * No need to perform any actions here as the DMA engine * will be configured by application explicitly. */ break; default: return -ENOTSUP; } return ret; } #endif static int dma_smartbond_init(const struct device *dev) { #ifdef CONFIG_DMA_64BIT LOG_ERR("64-bit addressing mode is not supported\n"); return -ENOSYS; #endif int idx; struct dma_smartbond_data *data; data = dev->data; data->dma_ctx.magic = DMA_MAGIC; data->dma_ctx.dma_channels = DMA_CHANNELS_COUNT; data->dma_ctx.atomic = data->channels_atomic; /* Make sure that all channels are disabled. */ for (idx = 0; idx < DMA_CHANNELS_COUNT; idx++) { dma_smartbond_set_channel_status(dev, idx, false); data->channel_data[idx].is_dma_configured = false; } IRQ_CONNECT(SMARTBOND_IRQN, SMARTBOND_IRQ_PRIO, smartbond_dma_isr, DEVICE_DT_INST_GET(0), 0); return 0; } #define SMARTBOND_DMA_INIT(inst) \ BUILD_ASSERT((inst) == 0, "multiple instances are not supported"); \ \ PM_DEVICE_DT_INST_DEFINE(inst, dma_smartbond_pm_action); \ \ static struct dma_smartbond_data dma_smartbond_data_ ## inst; \ \ DEVICE_DT_INST_DEFINE(0, dma_smartbond_init, \ PM_DEVICE_DT_INST_GET(inst), \ &dma_smartbond_data_ ## inst, NULL, \ POST_KERNEL, \ CONFIG_DMA_INIT_PRIORITY, \ &dma_smartbond_driver_api); DT_INST_FOREACH_STATUS_OKAY(SMARTBOND_DMA_INIT) ```
/content/code_sandbox/drivers/dma/dma_smartbond.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,143
```unknown # Smartbond DMA Accelerator Configuration Options config DMA_SMARTBOND bool "Smartbond DMA Accelerator Driver" depends on DT_HAS_RENESAS_SMARTBOND_DMA_ENABLED default y help Enable Smartbond DMA Accelerator Driver ```
/content/code_sandbox/drivers/dma/Kconfig.smartbond
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
55
```unknown config DMA_NXP_EDMA bool "NXP enhanced Direct Memory Access (eDMA) driver" default y depends on DT_HAS_NXP_EDMA_ENABLED help Enable driver for NXP's eDMA IP. if DMA_NXP_EDMA config DMA_NXP_EDMA_ALIGN int "Alignment (in bytes) required for the transfers" default 8 help Use this to set the alignment (in bytes) used by entities employing this driver to adjust the addresses and sizes of the memory regions involved in the transfer process. This value needs to match one of the possible values for SSIZE and DSIZE, otherwise the driver will return an error upon configuration. config DMA_NXP_EDMA_ENABLE_HALFMAJOR_IRQ bool "Set if CPU should be interrupted when CITER = BITER / 2" default n help Enable this configuration if the CPU should be interrupted when CITER = BITER / 2. Using this, the CPU will be interrupted when CITER = BITER and when CITER = BITER / 2. endif # DMA_NXP_EDMA ```
/content/code_sandbox/drivers/dma/Kconfig.nxp_edma
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
252
```c /* * */ #define DT_DRV_COMPAT intel_adsp_hda_link_out #include <zephyr/drivers/dma.h> #include "dma_intel_adsp_hda.h" #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dma_intel_adsp_hda_dma_link_out); static const struct dma_driver_api intel_adsp_hda_dma_link_out_api = { .config = intel_adsp_hda_dma_link_out_config, .reload = intel_adsp_hda_dma_link_reload, .start = intel_adsp_hda_dma_start, .stop = intel_adsp_hda_dma_stop, .suspend = intel_adsp_hda_dma_stop, .get_status = intel_adsp_hda_dma_status, .get_attribute = intel_adsp_hda_dma_get_attribute, .chan_filter = intel_adsp_hda_dma_chan_filter, }; #define INTEL_ADSP_HDA_DMA_LINK_OUT_INIT(inst) \ static const struct intel_adsp_hda_dma_cfg intel_adsp_hda_dma##inst##_config = { \ .base = DT_INST_REG_ADDR(inst), \ .regblock_size = DT_INST_REG_SIZE(inst), \ .dma_channels = DT_INST_PROP(inst, dma_channels), \ .direction = MEMORY_TO_PERIPHERAL, \ .irq_config = NULL \ }; \ \ static struct intel_adsp_hda_dma_data intel_adsp_hda_dma##inst##_data = {}; \ \ PM_DEVICE_DT_INST_DEFINE(inst, intel_adsp_hda_dma_pm_action); \ \ DEVICE_DT_INST_DEFINE(inst, &intel_adsp_hda_dma_init, \ PM_DEVICE_DT_INST_GET(inst), \ &intel_adsp_hda_dma##inst##_data, \ &intel_adsp_hda_dma##inst##_config, POST_KERNEL, \ CONFIG_DMA_INIT_PRIORITY, \ &intel_adsp_hda_dma_link_out_api); DT_INST_FOREACH_STATUS_OKAY(INTEL_ADSP_HDA_DMA_LINK_OUT_INIT) ```
/content/code_sandbox/drivers/dma/dma_intel_adsp_hda_link_out.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
442
```unknown config DMA_GD32 bool "Gigadevice GD32 DMA driver" default y depends on DT_HAS_GD_GD32_DMA_ENABLED || DT_HAS_GD_GD32_DMA_V1_ENABLED select USE_GD32_DMA help DMA driver for GigaDevice GD32 series MCUs. ```
/content/code_sandbox/drivers/dma/Kconfig.gd32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
69
```unknown config DMA_EMUL bool "Emulated DMA driver [EXPERIMENTAL]" depends on DT_HAS_ZEPHYR_DMA_EMUL_ENABLED select EXPERIMENTAL help Emulated DMA Driver ```
/content/code_sandbox/drivers/dma/Kconfig.emul
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
46
```objective-c /* * */ #ifndef DMA_PL330_H #define DMA_PL330_H #include <zephyr/drivers/dma.h> #define DT_DRV_COMPAT arm_dma_pl330 /* * Max burst length and max burst size for 32bit system with * 128bit bus width for memory to memory data transfer * * Burst length is encoded in following format for pl330 * b0000 = 1 data transfer * b0001 = 2 data transfers * b0010 = 3 data transfers * . * . * b1111 = 16 data transfers * * Burst size is encoded in following format for pl330 * b000 = 1 byte * b001 = 2 bytes * b010 = 4 bytes * b011 = 8 bytes * b100 = 16 bytes * b101 = 32 bytes * b110 = 64 bytes * b111 = 128 bytes. */ #define MAX_BURST_LEN 0xf /* 16byte data */ #define MAX_BURST_SIZE_LOG2 4 /* * PL330 works only on 4GB boundary. * PL330 has 32bit registers for source and destination addresses */ #define PL330_MAX_OFFSET 0x100000000 /* PL330 supports max 16MB dma based on AXI bus size */ #define PL330_MAX_DMA_SIZE 0x1000000 /* Maximum possible values for PL330 ucode loop counters */ #define PL330_LOOP_COUNTER0_MAX 0x100 #define PL330_LOOP_COUNTER1_MAX 0x100 #define MAX_DMA_CHANNELS DT_INST_PROP(0, dma_channels) #define DMAC_PL330_CS0 0x100 #define DMAC_PL330_DBGSTATUS 0xd00 #define DMAC_PL330_DBGCMD 0xd04 #define DMAC_PL330_DBGINST0 0xd08 #define DMAC_PL330_DBGINST1 0xd0c /* * TIMEOUT value of 100000us is kept to cover all possible data * transfer sizes, with lesser time out value(10us) DMA channel * appears to be busy on FPGA/Emul environment. Ideally 100000us * timeout value should never hit. */ #define DMA_TIMEOUT_US 100000 #define CH_STATUS_MASK 0xf #define DATA_MASK 0xf #define DMA_INTSR1_SHIFT 24 #define DMA_INTSR0_SHIFT 16 #define DMA_INTSR0 0xa0 #define DMA_SECURE_SHIFT 17 #define DMA_CH_SHIFT 8 #define CONTROL_OFFSET 0x4 #define HIGHER_32_ADDR_MASK 0x0f #define DST_ADDR_SHIFT 0x4 #define MICROCODE_SIZE_MAX 0x400 #define TOTAL_MICROCODE_SIZE (MAX_DMA_CHANNELS * MICROCODE_SIZE_MAX) #define GET_MAX_DMA_SIZE(byte_width, burst_len) \ (PL330_LOOP_COUNTER0_MAX * PL330_LOOP_COUNTER1_MAX * \ (byte_width) * ((burst_len) + 1)) #define CC_SRCINC_SHIFT 0 #define CC_DSTINC_SHIFT 14 #define CC_SRCPRI_SHIFT 8 #define CC_DSTPRI_SHIFT 22 #define CC_DSTNS_SHIFT 23 #define CC_SRCBRSTLEN_SHIFT 4 #define CC_DSTBRSTLEN_SHIFT 18 #define CC_SRCBRSTSIZE_SHIFT 1 #define CC_DSTBRSTSIZE_SHIFT 15 #define CC_SRCCCTRL_SHIFT 11 #define CC_SRCCCTRL_MASK 0x7 #define CC_DSTCCTRL_SHIFT 25 #define CC_DRCCCTRL_MASK 0x7 #define CC_SWAP_SHIFT 28 #define SRC_PRI_NONSEC_VALUE 0x2 #define SRC_PRI_SEC_VALUE 0x0 #define OP_DMA_MOV 0xbc #define OP_DMA_LOOP_COUNT1 0x22 #define OP_DMA_LOOP 0x20 #define OP_DMA_LD 0x4 #define OP_DMA_ST 0x8 #define OP_DMA_SEV 0x34 #define OP_DMA_END 0x00 #define OP_DMA_LP_BK_JMP1 0x38 #define OP_DMA_LP_BK_JMP2 0x3c #define SZ_CMD_DMAMOV 0x6 enum dmamov_type { /* Source Address Register */ SAR = 0, /* Channel Control Register */ CCR, /* Destination Address Register */ DAR, }; /* Channel specific private data */ struct dma_pl330_ch_internal { uint64_t src_addr; uint64_t dst_addr; int src_burst_sz; uint32_t src_burst_len; int dst_burst_sz; uint32_t dst_burst_len; uint32_t trans_size; uint32_t dst_id; uint32_t src_id; uint32_t perip_type; uint32_t breq_only; uint32_t src_cache_ctrl; uint32_t dst_cache_ctrl; uint32_t dst_inc; uint32_t src_inc; int nonsec_mode; }; struct dma_pl330_ch_config { /* Channel configuration details */ uint64_t src_addr; enum dma_addr_adj src_addr_adj; uint64_t dst_addr; enum dma_addr_adj dst_addr_adj; enum dma_channel_direction direction; uint32_t trans_size; void *user_data; dma_callback_t dma_callback; mem_addr_t dma_exec_addr; struct k_mutex ch_mutex; int channel_active; /* Channel specific private data */ struct dma_pl330_ch_internal internal; }; struct dma_pl330_config { mem_addr_t mcode_base; mem_addr_t reg_base; #ifdef CONFIG_DMA_64BIT mem_addr_t control_reg_base; #endif }; struct dma_pl330_dev_data { struct dma_pl330_ch_config channels[MAX_DMA_CHANNELS]; }; #endif ```
/content/code_sandbox/drivers/dma/dma_pl330.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,219
```c /* * */ #include <errno.h> #include <stdint.h> #include <stdio.h> #include <zephyr/device.h> #include <zephyr/drivers/dma.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/pm/device.h> #include <zephyr/sys/util.h> #define DT_DRV_COMPAT zephyr_dma_emul #ifdef CONFIG_DMA_64BIT #define dma_addr_t uint64_t #else #define dma_addr_t uint32_t #endif enum dma_emul_channel_state { DMA_EMUL_CHANNEL_UNUSED, DMA_EMUL_CHANNEL_LOADED, DMA_EMUL_CHANNEL_STARTED, DMA_EMUL_CHANNEL_STOPPED, }; struct dma_emul_xfer_desc { struct dma_config config; }; struct dma_emul_work { const struct device *dev; uint32_t channel; struct k_work work; }; struct dma_emul_config { uint32_t channel_mask; size_t num_channels; size_t num_requests; size_t addr_align; size_t size_align; size_t copy_align; k_thread_stack_t *work_q_stack; size_t work_q_stack_size; int work_q_priority; /* points to an array of size num_channels */ struct dma_emul_xfer_desc *xfer; /* points to an array of size num_channels * num_requests */ struct dma_block_config *block; }; struct dma_emul_data { struct dma_context dma_ctx; atomic_t *channels_atomic; struct k_spinlock lock; struct k_work_q work_q; struct dma_emul_work work; }; static void dma_emul_work_handler(struct k_work *work); LOG_MODULE_REGISTER(dma_emul, CONFIG_DMA_LOG_LEVEL); static inline bool dma_emul_xfer_is_error_status(int status) { return status < 0; } static inline const char *const dma_emul_channel_state_to_string(enum dma_emul_channel_state state) { switch (state) { case DMA_EMUL_CHANNEL_UNUSED: return "UNUSED"; case DMA_EMUL_CHANNEL_LOADED: return "LOADED"; case DMA_EMUL_CHANNEL_STARTED: return "STARTED"; case DMA_EMUL_CHANNEL_STOPPED: return "STOPPED"; default: return "(invalid)"; }; } /* * Repurpose the "_reserved" field for keeping track of internal * channel state. * * Note: these must be called with data->lock locked! */ static enum dma_emul_channel_state dma_emul_get_channel_state(const struct device *dev, uint32_t channel) { const struct dma_emul_config *config = dev->config; __ASSERT_NO_MSG(channel < config->num_channels); return (enum dma_emul_channel_state)config->xfer[channel].config._reserved; } static void dma_emul_set_channel_state(const struct device *dev, uint32_t channel, enum dma_emul_channel_state state) { const struct dma_emul_config *config = dev->config; LOG_DBG("setting channel %u state to %s", channel, dma_emul_channel_state_to_string(state)); __ASSERT_NO_MSG(channel < config->num_channels); __ASSERT_NO_MSG(state >= DMA_EMUL_CHANNEL_UNUSED && state <= DMA_EMUL_CHANNEL_STOPPED); config->xfer[channel].config._reserved = state; } static const char *dma_emul_xfer_config_to_string(const struct dma_config *cfg) { static char buffer[1024]; snprintf(buffer, sizeof(buffer), "{" "\n\tslot: %u" "\n\tchannel_direction: %u" "\n\tcomplete_callback_en: %u" "\n\terror_callback_dis: %u" "\n\tsource_handshake: %u" "\n\tdest_handshake: %u" "\n\tchannel_priority: %u" "\n\tsource_chaining_en: %u" "\n\tdest_chaining_en: %u" "\n\tlinked_channel: %u" "\n\tcyclic: %u" "\n\t_reserved: %u" "\n\tsource_data_size: %u" "\n\tdest_data_size: %u" "\n\tsource_burst_length: %u" "\n\tdest_burst_length: %u" "\n\tblock_count: %u" "\n\thead_block: %p" "\n\tuser_data: %p" "\n\tdma_callback: %p" "\n}", cfg->dma_slot, cfg->channel_direction, cfg->complete_callback_en, cfg->error_callback_dis, cfg->source_handshake, cfg->dest_handshake, cfg->channel_priority, cfg->source_chaining_en, cfg->dest_chaining_en, cfg->linked_channel, cfg->cyclic, cfg->_reserved, cfg->source_data_size, cfg->dest_data_size, cfg->source_burst_length, cfg->dest_burst_length, cfg->block_count, cfg->head_block, cfg->user_data, cfg->dma_callback); return buffer; } static const char *dma_emul_block_config_to_string(const struct dma_block_config *cfg) { static char buffer[1024]; snprintf(buffer, sizeof(buffer), "{" "\n\tsource_address: %p" "\n\tdest_address: %p" "\n\tsource_gather_interval: %u" "\n\tdest_scatter_interval: %u" "\n\tdest_scatter_count: %u" "\n\tsource_gather_count: %u" "\n\tblock_size: %u" "\n\tnext_block: %p" "\n\tsource_gather_en: %u" "\n\tdest_scatter_en: %u" "\n\tsource_addr_adj: %u" "\n\tdest_addr_adj: %u" "\n\tsource_reload_en: %u" "\n\tdest_reload_en: %u" "\n\tfifo_mode_control: %u" "\n\tflow_control_mode: %u" "\n\t_reserved: %u" "\n}", (void *)cfg->source_address, (void *)cfg->dest_address, cfg->source_gather_interval, cfg->dest_scatter_interval, cfg->dest_scatter_count, cfg->source_gather_count, cfg->block_size, cfg->next_block, cfg->source_gather_en, cfg->dest_scatter_en, cfg->source_addr_adj, cfg->dest_addr_adj, cfg->source_reload_en, cfg->dest_reload_en, cfg->fifo_mode_control, cfg->flow_control_mode, cfg->_reserved ); return buffer; } static void dma_emul_work_handler(struct k_work *work) { size_t i; size_t bytes; uint32_t channel; k_spinlock_key_t key; struct dma_block_config block; struct dma_config xfer_config; enum dma_emul_channel_state state; struct dma_emul_xfer_desc *xfer; struct dma_emul_work *dma_work = CONTAINER_OF(work, struct dma_emul_work, work); const struct device *dev = dma_work->dev; struct dma_emul_data *data = dev->data; const struct dma_emul_config *config = dev->config; channel = dma_work->channel; do { key = k_spin_lock(&data->lock); xfer = &config->xfer[channel]; /* * copy the dma_config so we don't have to worry about * it being asynchronously updated. */ memcpy(&xfer_config, &xfer->config, sizeof(xfer_config)); k_spin_unlock(&data->lock, key); LOG_DBG("processing xfer %p for channel %u", xfer, channel); for (i = 0; i < xfer_config.block_count; ++i) { LOG_DBG("processing block %zu", i); key = k_spin_lock(&data->lock); /* * copy the dma_block_config so we don't have to worry about * it being asynchronously updated. */ memcpy(&block, &config->block[channel * config->num_requests + xfer_config.dma_slot + i], sizeof(block)); k_spin_unlock(&data->lock, key); /* transfer data in bursts */ for (bytes = MIN(block.block_size, xfer_config.dest_burst_length); bytes > 0; block.block_size -= bytes, block.source_address += bytes, block.dest_address += bytes, bytes = MIN(block.block_size, xfer_config.dest_burst_length)) { key = k_spin_lock(&data->lock); state = dma_emul_get_channel_state(dev, channel); k_spin_unlock(&data->lock, key); if (state == DMA_EMUL_CHANNEL_STOPPED) { LOG_DBG("asynchronously canceled"); if (!xfer_config.error_callback_dis) { xfer_config.dma_callback(dev, xfer_config.user_data, channel, -ECANCELED); } else { LOG_DBG("error_callback_dis is not set (async " "cancel)"); } goto out; } __ASSERT_NO_MSG(state == DMA_EMUL_CHANNEL_STARTED); /* * FIXME: create a backend API (memcpy, TCP/UDP socket, etc) * Simple copy for now */ memcpy((void *)(uintptr_t)block.dest_address, (void *)(uintptr_t)block.source_address, bytes); } } key = k_spin_lock(&data->lock); dma_emul_set_channel_state(dev, channel, DMA_EMUL_CHANNEL_STOPPED); k_spin_unlock(&data->lock, key); /* FIXME: tests/drivers/dma/chan_blen_transfer/ does not set complete_callback_en */ if (true) { xfer_config.dma_callback(dev, xfer_config.user_data, channel, DMA_STATUS_COMPLETE); } else { LOG_DBG("complete_callback_en is not set"); } if (xfer_config.source_chaining_en || xfer_config.dest_chaining_en) { LOG_DBG("%s(): Linked channel %u -> %u", __func__, channel, xfer_config.linked_channel); __ASSERT_NO_MSG(channel != xfer_config.linked_channel); channel = xfer_config.linked_channel; } else { LOG_DBG("%s(): done!", __func__); break; } } while (true); out: return; } static bool dma_emul_config_valid(const struct device *dev, uint32_t channel, const struct dma_config *xfer_config) { size_t i; struct dma_block_config *block; const struct dma_emul_config *config = dev->config; if (xfer_config->dma_slot >= config->num_requests) { LOG_ERR("invalid dma_slot %u", xfer_config->dma_slot); return false; } if (channel >= config->num_channels) { LOG_ERR("invalid DMA channel %u", channel); return false; } if (xfer_config->dest_burst_length != xfer_config->source_burst_length) { LOG_ERR("burst length does not agree. source: %u dest: %u ", xfer_config->source_burst_length, xfer_config->dest_burst_length); return false; } for (i = 0, block = xfer_config->head_block; i < xfer_config->block_count; ++i, block = block->next_block) { if (block == NULL) { LOG_ERR("block %zu / %u is NULL", i + 1, xfer_config->block_count); return false; } if (i >= config->num_requests) { LOG_ERR("not enough slots to store block %zu / %u", i + 1, xfer_config->block_count); return false; } } /* * FIXME: * * Need to verify all of the fields in struct dma_config with different DT * configurations so that the driver model is at least consistent and * verified by CI. */ return true; } static int dma_emul_configure(const struct device *dev, uint32_t channel, struct dma_config *xfer_config) { size_t i; int ret = 0; size_t block_idx; k_spinlock_key_t key; struct dma_block_config *block; struct dma_block_config *block_it; enum dma_emul_channel_state state; struct dma_emul_xfer_desc *xfer; struct dma_emul_data *data = dev->data; const struct dma_emul_config *config = dev->config; if (!dma_emul_config_valid(dev, channel, xfer_config)) { return -EINVAL; } key = k_spin_lock(&data->lock); xfer = &config->xfer[channel]; LOG_DBG("%s():\nchannel: %u\nconfig: %s", __func__, channel, dma_emul_xfer_config_to_string(xfer_config)); block_idx = channel * config->num_requests + xfer_config->dma_slot; block = &config->block[channel * config->num_requests + xfer_config->dma_slot]; state = dma_emul_get_channel_state(dev, channel); switch (state) { case DMA_EMUL_CHANNEL_UNUSED: case DMA_EMUL_CHANNEL_STOPPED: /* copy the configuration into the driver */ memcpy(&xfer->config, xfer_config, sizeof(xfer->config)); /* copy all blocks into slots */ for (i = 0, block_it = xfer_config->head_block; i < xfer_config->block_count; ++i, block_it = block_it->next_block, ++block) { __ASSERT_NO_MSG(block_it != NULL); LOG_DBG("block_config %s", dma_emul_block_config_to_string(block_it)); memcpy(block, block_it, sizeof(*block)); } dma_emul_set_channel_state(dev, channel, DMA_EMUL_CHANNEL_LOADED); break; default: LOG_ERR("attempt to configure DMA in state %d", state); ret = -EBUSY; } k_spin_unlock(&data->lock, key); return ret; } static int dma_emul_reload(const struct device *dev, uint32_t channel, dma_addr_t src, dma_addr_t dst, size_t size) { LOG_DBG("%s()", __func__); return -ENOSYS; } static int dma_emul_start(const struct device *dev, uint32_t channel) { int ret = 0; k_spinlock_key_t key; enum dma_emul_channel_state state; struct dma_emul_xfer_desc *xfer; struct dma_config *xfer_config; struct dma_emul_data *data = dev->data; const struct dma_emul_config *config = dev->config; LOG_DBG("%s(channel: %u)", __func__, channel); if (channel >= config->num_channels) { return -EINVAL; } key = k_spin_lock(&data->lock); xfer = &config->xfer[channel]; state = dma_emul_get_channel_state(dev, channel); switch (state) { case DMA_EMUL_CHANNEL_STARTED: /* start after being started already is a no-op */ break; case DMA_EMUL_CHANNEL_LOADED: case DMA_EMUL_CHANNEL_STOPPED: data->work.channel = channel; while (true) { dma_emul_set_channel_state(dev, channel, DMA_EMUL_CHANNEL_STARTED); xfer_config = &config->xfer[channel].config; if (xfer_config->source_chaining_en || xfer_config->dest_chaining_en) { LOG_DBG("%s(): Linked channel %u -> %u", __func__, channel, xfer_config->linked_channel); channel = xfer_config->linked_channel; } else { break; } } ret = k_work_submit_to_queue(&data->work_q, &data->work.work); ret = (ret < 0) ? ret : 0; break; default: LOG_ERR("attempt to start dma in invalid state %d", state); ret = -EIO; break; } k_spin_unlock(&data->lock, key); return ret; } static int dma_emul_stop(const struct device *dev, uint32_t channel) { k_spinlock_key_t key; struct dma_emul_data *data = dev->data; key = k_spin_lock(&data->lock); dma_emul_set_channel_state(dev, channel, DMA_EMUL_CHANNEL_STOPPED); k_spin_unlock(&data->lock, key); return 0; } static int dma_emul_suspend(const struct device *dev, uint32_t channel) { LOG_DBG("%s()", __func__); return -ENOSYS; } static int dma_emul_resume(const struct device *dev, uint32_t channel) { LOG_DBG("%s()", __func__); return -ENOSYS; } static int dma_emul_get_status(const struct device *dev, uint32_t channel, struct dma_status *status) { LOG_DBG("%s()", __func__); return -ENOSYS; } static int dma_emul_get_attribute(const struct device *dev, uint32_t type, uint32_t *value) { LOG_DBG("%s()", __func__); return -ENOSYS; } static bool dma_emul_chan_filter(const struct device *dev, int channel, void *filter_param) { bool success; k_spinlock_key_t key; struct dma_emul_data *data = dev->data; key = k_spin_lock(&data->lock); /* lets assume the struct dma_context handles races properly */ success = dma_emul_get_channel_state(dev, channel) == DMA_EMUL_CHANNEL_UNUSED; k_spin_unlock(&data->lock, key); return success; } static const struct dma_driver_api dma_emul_driver_api = { .config = dma_emul_configure, .reload = dma_emul_reload, .start = dma_emul_start, .stop = dma_emul_stop, .suspend = dma_emul_suspend, .resume = dma_emul_resume, .get_status = dma_emul_get_status, .get_attribute = dma_emul_get_attribute, .chan_filter = dma_emul_chan_filter, }; #ifdef CONFIG_PM_DEVICE static int dma_emul_pm_device_pm_action(const struct device *dev, enum pm_device_action action) { ARG_UNUSED(dev); ARG_UNUSED(action); return 0; } #endif static int dma_emul_init(const struct device *dev) { struct dma_emul_data *data = dev->data; const struct dma_emul_config *config = dev->config; data->work.dev = dev; data->dma_ctx.magic = DMA_MAGIC; data->dma_ctx.dma_channels = config->num_channels; data->dma_ctx.atomic = data->channels_atomic; k_work_queue_init(&data->work_q); k_work_init(&data->work.work, dma_emul_work_handler); k_work_queue_start(&data->work_q, config->work_q_stack, config->work_q_stack_size, config->work_q_priority, NULL); return 0; } #define DMA_EMUL_INST_HAS_PROP(_inst, _prop) DT_NODE_HAS_PROP(DT_DRV_INST(_inst), _prop) #define DMA_EMUL_INST_CHANNEL_MASK(_inst) \ DT_INST_PROP_OR(_inst, dma_channel_mask, \ DMA_EMUL_INST_HAS_PROP(_inst, dma_channels) \ ? ((DT_INST_PROP(_inst, dma_channels) > 0) \ ? BIT_MASK(DT_INST_PROP_OR(_inst, dma_channels, 0)) \ : 0) \ : 0) #define DMA_EMUL_INST_NUM_CHANNELS(_inst) \ DT_INST_PROP_OR(_inst, dma_channels, \ DMA_EMUL_INST_HAS_PROP(_inst, dma_channel_mask) \ ? POPCOUNT(DT_INST_PROP_OR(_inst, dma_channel_mask, 0)) \ : 0) #define DMA_EMUL_INST_NUM_REQUESTS(_inst) DT_INST_PROP_OR(_inst, dma_requests, 1) #define DEFINE_DMA_EMUL(_inst) \ BUILD_ASSERT(DMA_EMUL_INST_HAS_PROP(_inst, dma_channel_mask) || \ DMA_EMUL_INST_HAS_PROP(_inst, dma_channels), \ "at least one of dma_channel_mask or dma_channels must be provided"); \ \ BUILD_ASSERT(DMA_EMUL_INST_NUM_CHANNELS(_inst) <= 32, "invalid dma-channels property"); \ \ static K_THREAD_STACK_DEFINE(work_q_stack_##_inst, DT_INST_PROP(_inst, stack_size)); \ \ static struct dma_emul_xfer_desc \ dma_emul_xfer_desc_##_inst[DMA_EMUL_INST_NUM_CHANNELS(_inst)]; \ \ static struct dma_block_config \ dma_emul_block_config_##_inst[DMA_EMUL_INST_NUM_CHANNELS(_inst) * \ DMA_EMUL_INST_NUM_REQUESTS(_inst)]; \ \ static const struct dma_emul_config dma_emul_config_##_inst = { \ .channel_mask = DMA_EMUL_INST_CHANNEL_MASK(_inst), \ .num_channels = DMA_EMUL_INST_NUM_CHANNELS(_inst), \ .num_requests = DMA_EMUL_INST_NUM_REQUESTS(_inst), \ .addr_align = DT_INST_PROP_OR(_inst, dma_buf_addr_alignment, 1), \ .size_align = DT_INST_PROP_OR(_inst, dma_buf_size_alignment, 1), \ .copy_align = DT_INST_PROP_OR(_inst, dma_copy_alignment, 1), \ .work_q_stack = (k_thread_stack_t *)&work_q_stack_##_inst, \ .work_q_stack_size = K_THREAD_STACK_SIZEOF(work_q_stack_##_inst), \ .work_q_priority = DT_INST_PROP_OR(_inst, priority, 0), \ .xfer = dma_emul_xfer_desc_##_inst, \ .block = dma_emul_block_config_##_inst, \ }; \ \ static ATOMIC_DEFINE(dma_emul_channels_atomic_##_inst, \ DT_INST_PROP_OR(_inst, dma_channels, 0)); \ \ static struct dma_emul_data dma_emul_data_##_inst = { \ .channels_atomic = dma_emul_channels_atomic_##_inst, \ }; \ \ PM_DEVICE_DT_INST_DEFINE(_inst, dma_emul_pm_device_pm_action); \ \ DEVICE_DT_INST_DEFINE(_inst, dma_emul_init, PM_DEVICE_DT_INST_GET(_inst), \ &dma_emul_data_##_inst, &dma_emul_config_##_inst, POST_KERNEL, \ CONFIG_DMA_INIT_PRIORITY, &dma_emul_driver_api); DT_INST_FOREACH_STATUS_OKAY(DEFINE_DMA_EMUL) ```
/content/code_sandbox/drivers/dma/dma_emul.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,895
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_DMA_DMA_DW_COMMON_H_ #define ZEPHYR_DRIVERS_DMA_DMA_DW_COMMON_H_ #include <zephyr/sys/atomic.h> #include <zephyr/drivers/dma.h> #ifdef __cplusplus extern "C" { #endif #define MASK(b_hi, b_lo) \ (((1ULL << ((b_hi) - (b_lo) + 1ULL)) - 1ULL) << (b_lo)) #define SET_BIT(b, x) (((x) & 1) << (b)) #define SET_BITS(b_hi, b_lo, x) \ (((x) & ((1ULL << ((b_hi) - (b_lo) + 1ULL)) - 1ULL)) << (b_lo)) #define DW_MAX_CHAN 8 #define DW_CHAN_COUNT CONFIG_DMA_DW_CHANNEL_COUNT #define DW_CH_SIZE 0x58 #define DW_CHAN_OFFSET(chan) (DW_CH_SIZE * chan) #define DW_ADDR_MASK_32 BIT_MASK(32) #define DW_ADDR_RIGHT_SHIFT 32 #define DW_SAR(chan) \ (0x0000 + DW_CHAN_OFFSET(chan)) #define DW_DAR(chan) \ (0x0008 + DW_CHAN_OFFSET(chan)) #define DW_LLP(chan) \ (0x0010 + DW_CHAN_OFFSET(chan)) #define DW_CTRL_LOW(chan) \ (0x0018 + DW_CHAN_OFFSET(chan)) #define DW_CTRL_HIGH(chan) \ (0x001C + DW_CHAN_OFFSET(chan)) #define DW_CFG_LOW(chan) \ (0x0040 + DW_CHAN_OFFSET(chan)) #define DW_CFG_HIGH(chan) \ (0x0044 + DW_CHAN_OFFSET(chan)) #define DW_DSR(chan) \ (0x0050 + DW_CHAN_OFFSET(chan)) #ifdef CONFIG_DMA_64BIT #define DW_SAR_HI(chan) \ (0x0004 + DW_CHAN_OFFSET(chan)) #define DW_DAR_HI(chan) \ (0x000C + DW_CHAN_OFFSET(chan)) #endif /* registers */ #define DW_RAW_TFR 0x02C0 #define DW_RAW_BLOCK 0x02C8 #define DW_RAW_SRC_TRAN 0x02D0 #define DW_RAW_DST_TRAN 0x02D8 #define DW_RAW_ERR 0x02E0 #define DW_STATUS_TFR 0x02E8 #define DW_STATUS_BLOCK 0x02F0 #define DW_STATUS_SRC_TRAN 0x02F8 #define DW_STATUS_DST_TRAN 0x0300 #define DW_STATUS_ERR 0x0308 #define DW_MASK_TFR 0x0310 #define DW_MASK_BLOCK 0x0318 #define DW_MASK_SRC_TRAN 0x0320 #define DW_MASK_DST_TRAN 0x0328 #define DW_MASK_ERR 0x0330 #define DW_CLEAR_TFR 0x0338 #define DW_CLEAR_BLOCK 0x0340 #define DW_CLEAR_SRC_TRAN 0x0348 #define DW_CLEAR_DST_TRAN 0x0350 #define DW_CLEAR_ERR 0x0358 #define DW_INTR_STATUS 0x0360 #define DW_DMA_CFG 0x0398 #define DW_DMA_CHAN_EN 0x03A0 #define DW_FIFO_PART0_LO 0x400 #define DW_FIFO_PART0_HI 0x404 #define DW_FIFO_PART1_LO 0x408 #define DW_FIFO_PART1_HI 0x40C /* channel bits */ #define DW_CHAN_WRITE_EN_ALL MASK(2 * DW_MAX_CHAN - 1, DW_MAX_CHAN) #define DW_CHAN_WRITE_EN(chan) BIT((chan) + DW_MAX_CHAN) #define DW_CHAN_ALL MASK(DW_MAX_CHAN - 1, 0) #define DW_CHAN(chan) BIT(chan) #define DW_CHAN_MASK_ALL DW_CHAN_WRITE_EN_ALL #define DW_CHAN_MASK(chan) DW_CHAN_WRITE_EN(chan) #define DW_CHAN_UNMASK_ALL (DW_CHAN_WRITE_EN_ALL | DW_CHAN_ALL) #define DW_CHAN_UNMASK(chan) (DW_CHAN_WRITE_EN(chan) | DW_CHAN(chan)) /* CFG_LO */ #define DW_CFGL_RELOAD_DST BIT(31) #define DW_CFGL_RELOAD_SRC BIT(30) #define DW_CFGL_DRAIN BIT(10) /* For Intel GPDMA variant only */ #define DW_CFGL_SRC_SW_HS BIT(10) /* For Synopsys variant only */ #define DW_CFGL_DST_SW_HS BIT(11) /* For Synopsys variant only */ #define DW_CFGL_FIFO_EMPTY BIT(9) #define DW_CFGL_SUSPEND BIT(8) #define DW_CFGL_CTL_HI_UPD_EN BIT(5) /* CFG_HI */ #define DW_CFGH_DST_PER_EXT(x) SET_BITS(31, 30, x) #define DW_CFGH_SRC_PER_EXT(x) SET_BITS(29, 28, x) #define DW_CFGH_DST_PER(x) SET_BITS(7, 4, x) #define DW_CFGH_SRC_PER(x) SET_BITS(3, 0, x) #define DW_CFGH_DST(x) \ (DW_CFGH_DST_PER_EXT((x) >> 4) | DW_CFGH_DST_PER(x)) #define DW_CFGH_SRC(x) \ (DW_CFGH_SRC_PER_EXT((x) >> 4) | DW_CFGH_SRC_PER(x)) /* CTL_LO */ #define DW_CTLL_RELOAD_DST BIT(31) #define DW_CTLL_RELOAD_SRC BIT(30) #define DW_CTLL_LLP_S_EN BIT(28) #define DW_CTLL_LLP_D_EN BIT(27) #define DW_CTLL_SMS(x) SET_BIT(25, x) #define DW_CTLL_DMS(x) SET_BIT(23, x) #define DW_CTLL_FC_P2P SET_BITS(21, 20, 3) #define DW_CTLL_FC_P2M SET_BITS(21, 20, 2) #define DW_CTLL_FC_M2P SET_BITS(21, 20, 1) #define DW_CTLL_FC_M2M SET_BITS(21, 20, 0) #define DW_CTLL_D_SCAT_EN BIT(18) #define DW_CTLL_S_GATH_EN BIT(17) #define DW_CTLL_SRC_MSIZE(x) SET_BITS(16, 14, x) #define DW_CTLL_DST_MSIZE(x) SET_BITS(13, 11, x) #define DW_CTLL_SRC_FIX SET_BITS(10, 9, 2) #define DW_CTLL_SRC_DEC SET_BITS(10, 9, 1) #define DW_CTLL_SRC_INC SET_BITS(10, 9, 0) #define DW_CTLL_DST_FIX SET_BITS(8, 7, 2) #define DW_CTLL_DST_DEC SET_BITS(8, 7, 1) #define DW_CTLL_DST_INC SET_BITS(8, 7, 0) #define DW_CTLL_SRC_WIDTH(x) SET_BITS(6, 4, x) #define DW_CTLL_DST_WIDTH(x) SET_BITS(3, 1, x) #define DW_CTLL_INT_EN BIT(0) #define DW_CTLL_SRC_WIDTH_MASK MASK(6, 4) #define DW_CTLL_SRC_WIDTH_SHIFT 4 #define DW_CTLL_DST_WIDTH_MASK MASK(3, 1) #define DW_CTLL_DST_WIDTH_SHIFT 1 /* CTL_HI */ #define DW_CTLH_CLASS(x) SET_BITS(31, 29, x) #define DW_CTLH_WEIGHT(x) SET_BITS(28, 18, x) #define DW_CTLH_DONE(x) SET_BIT(17, x) #define DW_CTLH_BLOCK_TS_MASK MASK(16, 0) /* DSR */ #define DW_DSR_DSC(x) SET_BITS(31, 20, x) #define DW_DSR_DSI(x) SET_BITS(19, 0, x) /* FIFO_PART */ #define DW_FIFO_SIZE 0x80 #define DW_FIFO_UPD BIT(26) #define DW_FIFO_CHx(x) SET_BITS(25, 13, x) #define DW_FIFO_CHy(x) SET_BITS(12, 0, x) /* number of tries to wait for reset */ #define DW_DMA_CFG_TRIES 10000 /* channel drain timeout in microseconds */ #define DW_DMA_TIMEOUT 1333 /* min number of elems for config with irq disabled */ #define DW_DMA_CFG_NO_IRQ_MIN_ELEMS 3 #define DW_DMA_CHANNEL_REGISTER_OFFSET_END 0x50 #define DW_DMA_IP_REGISTER_OFFSET_END 0x418 #define DW_DMA_IP_REGISTER_OFFSET_START 0x2C0 /* linked list item address */ #define DW_DMA_LLI_ADDRESS(lli, dir) \ (((dir) == MEMORY_TO_PERIPHERAL) ? ((lli)->sar) : ((lli)->dar)) /* TODO: add FIFO sizes */ struct dw_chan_arbit_data { uint16_t class; uint16_t weight; }; struct dw_drv_plat_data { struct dw_chan_arbit_data chan[DW_CHAN_COUNT]; }; /* DMA descriptor used by HW */ struct dw_lli { #ifdef CONFIG_DMA_64BIT uint64_t sar; uint64_t dar; #else uint32_t sar; uint32_t dar; #endif uint32_t llp; uint32_t ctrl_lo; uint32_t ctrl_hi; uint32_t sstat; uint32_t dstat; /* align to 32 bytes to not cross cache line * in case of more than two items */ uint32_t reserved; } __packed; /* pointer data for DW DMA buffer */ struct dw_dma_ptr_data { uint32_t current_ptr; uint32_t start_ptr; uint32_t end_ptr; uint32_t hw_ptr; uint32_t buffer_bytes; }; /* State tracking for each channel */ enum dw_dma_state { DW_DMA_IDLE, DW_DMA_PREPARED, DW_DMA_SUSPENDED, DW_DMA_ACTIVE, }; /* data for each DMA channel */ struct dw_dma_chan_data { uint32_t direction; enum dw_dma_state state; struct dw_lli *lli; /* allocated array of LLI's */ uint32_t lli_count; /* number of lli's in the allocation */ struct dw_lli *lli_current; /* current LLI being used */ uint32_t cfg_lo; uint32_t cfg_hi; struct dw_dma_ptr_data ptr_data; /* pointer data */ dma_callback_t dma_blkcallback; void *blkuser_data; dma_callback_t dma_tfrcallback; void *tfruser_data; }; /* use array to get burst_elems for specific slot number setting. * the relation between msize and burst_elems should be * 2 ^ msize = burst_elems */ static const uint32_t burst_elems[] = {1, 2, 4, 8}; /* Device run time data */ struct dw_dma_dev_data { struct dma_context dma_ctx; struct dw_drv_plat_data *channel_data; struct dw_dma_chan_data chan[DW_CHAN_COUNT]; struct dw_lli lli_pool[DW_CHAN_COUNT][CONFIG_DMA_DW_LLI_POOL_SIZE] __aligned(64); ATOMIC_DEFINE(channels_atomic, DW_CHAN_COUNT); }; /* Device constant configuration parameters */ struct dw_dma_dev_cfg { uintptr_t base; void (*irq_config)(void); }; static ALWAYS_INLINE void dw_write(uintptr_t dma_base, uint32_t reg, uint32_t value) { *((volatile uint32_t *)(dma_base + reg)) = value; } static ALWAYS_INLINE uint32_t dw_read(uintptr_t dma_base, uint32_t reg) { return *((volatile uint32_t *)(dma_base + reg)); } int dw_dma_setup(const struct device *dev); int dw_dma_config(const struct device *dev, uint32_t channel, struct dma_config *cfg); int dw_dma_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size); int dw_dma_start(const struct device *dev, uint32_t channel); int dw_dma_stop(const struct device *dev, uint32_t channel); int dw_dma_suspend(const struct device *dev, uint32_t channel); int dw_dma_resume(const struct device *dev, uint32_t channel); void dw_dma_isr(const struct device *dev); int dw_dma_get_status(const struct device *dev, uint32_t channel, struct dma_status *stat); #ifdef __cplusplus } #endif #endif /* ZEPHYR_DRIVERS_DMA_DMA_DW_COMMON_H_ */ ```
/content/code_sandbox/drivers/dma/dma_dw_common.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,652
```c /* * */ #include <zephyr/device.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/gd32.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/reset.h> #include <zephyr/logging/log.h> #include <gd32_dma.h> #include <zephyr/irq.h> #if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1) #define DT_DRV_COMPAT gd_gd32_dma_v1 #elif DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma) #define DT_DRV_COMPAT gd_gd32_dma #endif #if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1) #define CHXCTL_PERIEN_OFFSET ((uint32_t)25U) #define GD32_DMA_CHXCTL_DIR BIT(6) #define GD32_DMA_CHXCTL_M2M BIT(7) #define GD32_DMA_INTERRUPT_ERRORS (DMA_CHXCTL_SDEIE | DMA_CHXCTL_TAEIE) #define GD32_DMA_FLAG_ERRORS (DMA_FLAG_SDE | DMA_FLAG_TAE) #else #define GD32_DMA_CHXCTL_DIR BIT(4) #define GD32_DMA_CHXCTL_M2M BIT(14) #define GD32_DMA_INTERRUPT_ERRORS DMA_CHXCTL_ERRIE #define GD32_DMA_FLAG_ERRORS DMA_FLAG_ERR #endif #ifdef CONFIG_SOC_SERIES_GD32F3X0 #undef DMA_INTF #undef DMA_INTC #undef DMA_CHCTL #undef DMA_CHCNT #undef DMA_CHPADDR #undef DMA_CHMADDR #define DMA_INTF(dma) REG32(dma + 0x00UL) #define DMA_INTC(dma) REG32(dma + 0x04UL) #define DMA_CHCTL(dma, ch) REG32((dma + 0x08UL) + 0x14UL * (uint32_t)(ch)) #define DMA_CHCNT(dma, ch) REG32((dma + 0x0CUL) + 0x14UL * (uint32_t)(ch)) #define DMA_CHPADDR(dma, ch) REG32((dma + 0x10UL) + 0x14UL * (uint32_t)(ch)) #define DMA_CHMADDR(dma, ch) REG32((dma + 0x14UL) + 0x14UL * (uint32_t)(ch)) #endif #define GD32_DMA_INTF(dma) DMA_INTF(dma) #define GD32_DMA_INTC(dma) DMA_INTC(dma) #define GD32_DMA_CHCTL(dma, ch) DMA_CHCTL((dma), (ch)) #define GD32_DMA_CHCNT(dma, ch) DMA_CHCNT((dma), (ch)) #define GD32_DMA_CHPADDR(dma, ch) DMA_CHPADDR((dma), (ch)) #define GD32_DMA_CHMADDR(dma, ch) DMA_CHMADDR((dma), (ch)) LOG_MODULE_REGISTER(dma_gd32, CONFIG_DMA_LOG_LEVEL); struct dma_gd32_config { uint32_t reg; uint32_t channels; uint16_t clkid; bool mem2mem; #if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1) struct reset_dt_spec reset; #endif void (*irq_configure)(void); }; struct dma_gd32_channel { dma_callback_t callback; void *user_data; uint32_t direction; bool busy; }; struct dma_gd32_data { struct dma_context ctx; struct dma_gd32_channel *channels; }; struct dma_gd32_srcdst_config { uint32_t addr; uint32_t adj; uint32_t width; }; /* * Register access functions */ static inline void gd32_dma_periph_increase_enable(uint32_t reg, dma_channel_enum ch) { GD32_DMA_CHCTL(reg, ch) |= DMA_CHXCTL_PNAGA; } static inline void gd32_dma_periph_increase_disable(uint32_t reg, dma_channel_enum ch) { GD32_DMA_CHCTL(reg, ch) &= ~DMA_CHXCTL_PNAGA; } static inline void gd32_dma_transfer_set_memory_to_memory(uint32_t reg, dma_channel_enum ch) { GD32_DMA_CHCTL(reg, ch) |= GD32_DMA_CHXCTL_M2M; GD32_DMA_CHCTL(reg, ch) &= ~GD32_DMA_CHXCTL_DIR; } static inline void gd32_dma_transfer_set_memory_to_periph(uint32_t reg, dma_channel_enum ch) { GD32_DMA_CHCTL(reg, ch) &= ~GD32_DMA_CHXCTL_M2M; GD32_DMA_CHCTL(reg, ch) |= GD32_DMA_CHXCTL_DIR; } static inline void gd32_dma_transfer_set_periph_to_memory(uint32_t reg, dma_channel_enum ch) { GD32_DMA_CHCTL(reg, ch) &= ~GD32_DMA_CHXCTL_M2M; GD32_DMA_CHCTL(reg, ch) &= ~GD32_DMA_CHXCTL_DIR; } static inline void gd32_dma_memory_increase_enable(uint32_t reg, dma_channel_enum ch) { GD32_DMA_CHCTL(reg, ch) |= DMA_CHXCTL_MNAGA; } static inline void gd32_dma_memory_increase_disable(uint32_t reg, dma_channel_enum ch) { GD32_DMA_CHCTL(reg, ch) &= ~DMA_CHXCTL_MNAGA; } static inline void gd32_dma_circulation_enable(uint32_t reg, dma_channel_enum ch) { GD32_DMA_CHCTL(reg, ch) |= DMA_CHXCTL_CMEN; } static inline void gd32_dma_circulation_disable(uint32_t reg, dma_channel_enum ch) { GD32_DMA_CHCTL(reg, ch) &= ~DMA_CHXCTL_CMEN; } static inline void gd32_dma_channel_enable(uint32_t reg, dma_channel_enum ch) { GD32_DMA_CHCTL(reg, ch) |= DMA_CHXCTL_CHEN; } static inline void gd32_dma_channel_disable(uint32_t reg, dma_channel_enum ch) { GD32_DMA_CHCTL(reg, ch) &= ~DMA_CHXCTL_CHEN; } static inline void gd32_dma_interrupt_enable(uint32_t reg, dma_channel_enum ch, uint32_t source) { GD32_DMA_CHCTL(reg, ch) |= source; } static inline void gd32_dma_interrupt_disable(uint32_t reg, dma_channel_enum ch, uint32_t source) { GD32_DMA_CHCTL(reg, ch) &= ~source; } static inline void gd32_dma_priority_config(uint32_t reg, dma_channel_enum ch, uint32_t priority) { uint32_t ctl = GD32_DMA_CHCTL(reg, ch); GD32_DMA_CHCTL(reg, ch) = (ctl & (~DMA_CHXCTL_PRIO)) | priority; } static inline void gd32_dma_memory_width_config(uint32_t reg, dma_channel_enum ch, uint32_t mwidth) { uint32_t ctl = GD32_DMA_CHCTL(reg, ch); GD32_DMA_CHCTL(reg, ch) = (ctl & (~DMA_CHXCTL_MWIDTH)) | mwidth; } static inline void gd32_dma_periph_width_config(uint32_t reg, dma_channel_enum ch, uint32_t pwidth) { uint32_t ctl = GD32_DMA_CHCTL(reg, ch); GD32_DMA_CHCTL(reg, ch) = (ctl & (~DMA_CHXCTL_PWIDTH)) | pwidth; } #if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1) static inline void gd32_dma_channel_subperipheral_select(uint32_t reg, dma_channel_enum ch, dma_subperipheral_enum sub_periph) { uint32_t ctl = GD32_DMA_CHCTL(reg, ch); GD32_DMA_CHCTL(reg, ch) = (ctl & (~DMA_CHXCTL_PERIEN)) | ((uint32_t)sub_periph << CHXCTL_PERIEN_OFFSET); } #endif static inline void gd32_dma_periph_address_config(uint32_t reg, dma_channel_enum ch, uint32_t addr) { GD32_DMA_CHPADDR(reg, ch) = addr; } static inline void gd32_dma_memory_address_config(uint32_t reg, dma_channel_enum ch, uint32_t addr) { #if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1) DMA_CHM0ADDR(reg, ch) = addr; #else GD32_DMA_CHMADDR(reg, ch) = addr; #endif } static inline void gd32_dma_transfer_number_config(uint32_t reg, dma_channel_enum ch, uint32_t num) { GD32_DMA_CHCNT(reg, ch) = (num & DMA_CHXCNT_CNT); } static inline uint32_t gd32_dma_transfer_number_get(uint32_t reg, dma_channel_enum ch) { return GD32_DMA_CHCNT(reg, ch); } static inline void gd32_dma_interrupt_flag_clear(uint32_t reg, dma_channel_enum ch, uint32_t flag) { #if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1) if (ch < DMA_CH4) { DMA_INTC0(reg) |= DMA_FLAG_ADD(flag, ch); } else { DMA_INTC1(reg) |= DMA_FLAG_ADD(flag, ch - DMA_CH4); } #else GD32_DMA_INTC(reg) |= DMA_FLAG_ADD(flag, ch); #endif } static inline void gd32_dma_flag_clear(uint32_t reg, dma_channel_enum ch, uint32_t flag) { #if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1) if (ch < DMA_CH4) { DMA_INTC0(reg) |= DMA_FLAG_ADD(flag, ch); } else { DMA_INTC1(reg) |= DMA_FLAG_ADD(flag, ch - DMA_CH4); } #else GD32_DMA_INTC(reg) |= DMA_FLAG_ADD(flag, ch); #endif } static inline uint32_t gd32_dma_interrupt_flag_get(uint32_t reg, dma_channel_enum ch, uint32_t flag) { #if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1) if (ch < DMA_CH4) { return (DMA_INTF0(reg) & DMA_FLAG_ADD(flag, ch)); } else { return (DMA_INTF1(reg) & DMA_FLAG_ADD(flag, ch - DMA_CH4)); } #else return (GD32_DMA_INTF(reg) & DMA_FLAG_ADD(flag, ch)); #endif } static inline void gd32_dma_deinit(uint32_t reg, dma_channel_enum ch) { GD32_DMA_CHCTL(reg, ch) &= ~DMA_CHXCTL_CHEN; GD32_DMA_CHCTL(reg, ch) = DMA_CHCTL_RESET_VALUE; GD32_DMA_CHCNT(reg, ch) = DMA_CHCNT_RESET_VALUE; GD32_DMA_CHPADDR(reg, ch) = DMA_CHPADDR_RESET_VALUE; #if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1) DMA_CHM0ADDR(reg, ch) = DMA_CHMADDR_RESET_VALUE; DMA_CHFCTL(reg, ch) = DMA_CHFCTL_RESET_VALUE; if (ch < DMA_CH4) { DMA_INTC0(reg) |= DMA_FLAG_ADD(DMA_CHINTF_RESET_VALUE, ch); } else { DMA_INTC1(reg) |= DMA_FLAG_ADD(DMA_CHINTF_RESET_VALUE, ch - DMA_CH4); } #else GD32_DMA_CHMADDR(reg, ch) = DMA_CHMADDR_RESET_VALUE; GD32_DMA_INTC(reg) |= DMA_FLAG_ADD(DMA_CHINTF_RESET_VALUE, ch); #endif } /* * Utility functions */ static inline uint32_t dma_gd32_priority(uint32_t prio) { return CHCTL_PRIO(prio); } static inline uint32_t dma_gd32_memory_width(uint32_t width) { switch (width) { case 4: return CHCTL_MWIDTH(2); case 2: return CHCTL_MWIDTH(1); default: return CHCTL_MWIDTH(0); } } static inline uint32_t dma_gd32_periph_width(uint32_t width) { switch (width) { case 4: return CHCTL_PWIDTH(2); case 2: return CHCTL_PWIDTH(1); default: return CHCTL_PWIDTH(0); } } /* * API functions */ static int dma_gd32_config(const struct device *dev, uint32_t channel, struct dma_config *dma_cfg) { const struct dma_gd32_config *cfg = dev->config; struct dma_gd32_data *data = dev->data; struct dma_gd32_srcdst_config src_cfg; struct dma_gd32_srcdst_config dst_cfg; struct dma_gd32_srcdst_config *memory_cfg = NULL; struct dma_gd32_srcdst_config *periph_cfg = NULL; if (channel >= cfg->channels) { LOG_ERR("channel must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, channel); return -EINVAL; } if (dma_cfg->block_count != 1) { LOG_ERR("chained block transfer not supported."); return -ENOTSUP; } if (dma_cfg->channel_priority > 3) { LOG_ERR("channel_priority must be < 4 (%" PRIu32 ")", dma_cfg->channel_priority); return -EINVAL; } if (dma_cfg->head_block->source_addr_adj == DMA_ADDR_ADJ_DECREMENT) { LOG_ERR("source_addr_adj not supported DMA_ADDR_ADJ_DECREMENT"); return -ENOTSUP; } if (dma_cfg->head_block->dest_addr_adj == DMA_ADDR_ADJ_DECREMENT) { LOG_ERR("dest_addr_adj not supported DMA_ADDR_ADJ_DECREMENT"); return -ENOTSUP; } if (dma_cfg->head_block->source_addr_adj != DMA_ADDR_ADJ_INCREMENT && dma_cfg->head_block->source_addr_adj != DMA_ADDR_ADJ_NO_CHANGE) { LOG_ERR("invalid source_addr_adj %" PRIu16, dma_cfg->head_block->source_addr_adj); return -ENOTSUP; } if (dma_cfg->head_block->dest_addr_adj != DMA_ADDR_ADJ_INCREMENT && dma_cfg->head_block->dest_addr_adj != DMA_ADDR_ADJ_NO_CHANGE) { LOG_ERR("invalid dest_addr_adj %" PRIu16, dma_cfg->head_block->dest_addr_adj); return -ENOTSUP; } if (dma_cfg->source_data_size != 1 && dma_cfg->source_data_size != 2 && dma_cfg->source_data_size != 4) { LOG_ERR("source_data_size must be 1, 2, or 4 (%" PRIu32 ")", dma_cfg->source_data_size); return -EINVAL; } if (dma_cfg->dest_data_size != 1 && dma_cfg->dest_data_size != 2 && dma_cfg->dest_data_size != 4) { LOG_ERR("dest_data_size must be 1, 2, or 4 (%" PRIu32 ")", dma_cfg->dest_data_size); return -EINVAL; } if (dma_cfg->channel_direction > PERIPHERAL_TO_MEMORY) { LOG_ERR("channel_direction must be MEMORY_TO_MEMORY, " "MEMORY_TO_PERIPHERAL or PERIPHERAL_TO_MEMORY (%" PRIu32 ")", dma_cfg->channel_direction); return -ENOTSUP; } if (dma_cfg->channel_direction == MEMORY_TO_MEMORY && !cfg->mem2mem) { LOG_ERR("not supporting MEMORY_TO_MEMORY"); return -ENOTSUP; } #if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1) if (dma_cfg->dma_slot > 0xF) { LOG_ERR("dma_slot must be <7 (%" PRIu32 ")", dma_cfg->dma_slot); return -EINVAL; } #endif gd32_dma_deinit(cfg->reg, channel); src_cfg.addr = dma_cfg->head_block->source_address; src_cfg.adj = dma_cfg->head_block->source_addr_adj; src_cfg.width = dma_cfg->source_data_size; dst_cfg.addr = dma_cfg->head_block->dest_address; dst_cfg.adj = dma_cfg->head_block->dest_addr_adj; dst_cfg.width = dma_cfg->dest_data_size; switch (dma_cfg->channel_direction) { case MEMORY_TO_MEMORY: gd32_dma_transfer_set_memory_to_memory(cfg->reg, channel); memory_cfg = &dst_cfg; periph_cfg = &src_cfg; break; case PERIPHERAL_TO_MEMORY: gd32_dma_transfer_set_periph_to_memory(cfg->reg, channel); memory_cfg = &dst_cfg; periph_cfg = &src_cfg; break; case MEMORY_TO_PERIPHERAL: gd32_dma_transfer_set_memory_to_periph(cfg->reg, channel); memory_cfg = &src_cfg; periph_cfg = &dst_cfg; break; } gd32_dma_memory_address_config(cfg->reg, channel, memory_cfg->addr); if (memory_cfg->adj == DMA_ADDR_ADJ_INCREMENT) { gd32_dma_memory_increase_enable(cfg->reg, channel); } else { gd32_dma_memory_increase_disable(cfg->reg, channel); } gd32_dma_periph_address_config(cfg->reg, channel, periph_cfg->addr); if (periph_cfg->adj == DMA_ADDR_ADJ_INCREMENT) { gd32_dma_periph_increase_enable(cfg->reg, channel); } else { gd32_dma_periph_increase_disable(cfg->reg, channel); } gd32_dma_transfer_number_config(cfg->reg, channel, dma_cfg->head_block->block_size); gd32_dma_priority_config(cfg->reg, channel, dma_gd32_priority(dma_cfg->channel_priority)); gd32_dma_memory_width_config(cfg->reg, channel, dma_gd32_memory_width(memory_cfg->width)); gd32_dma_periph_width_config(cfg->reg, channel, dma_gd32_periph_width(periph_cfg->width)); gd32_dma_circulation_disable(cfg->reg, channel); #if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1) if (dma_cfg->channel_direction != MEMORY_TO_MEMORY) { gd32_dma_channel_subperipheral_select(cfg->reg, channel, dma_cfg->dma_slot); } #endif data->channels[channel].callback = dma_cfg->dma_callback; data->channels[channel].user_data = dma_cfg->user_data; data->channels[channel].direction = dma_cfg->channel_direction; return 0; } static int dma_gd32_reload(const struct device *dev, uint32_t ch, uint32_t src, uint32_t dst, size_t size) { const struct dma_gd32_config *cfg = dev->config; struct dma_gd32_data *data = dev->data; if (ch >= cfg->channels) { LOG_ERR("reload channel must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, ch); return -EINVAL; } if (data->channels[ch].busy) { return -EBUSY; } gd32_dma_channel_disable(cfg->reg, ch); gd32_dma_transfer_number_config(cfg->reg, ch, size); switch (data->channels[ch].direction) { case MEMORY_TO_MEMORY: case PERIPHERAL_TO_MEMORY: gd32_dma_memory_address_config(cfg->reg, ch, dst); gd32_dma_periph_address_config(cfg->reg, ch, src); break; case MEMORY_TO_PERIPHERAL: gd32_dma_memory_address_config(cfg->reg, ch, src); gd32_dma_periph_address_config(cfg->reg, ch, dst); break; } gd32_dma_channel_enable(cfg->reg, ch); return 0; } static int dma_gd32_start(const struct device *dev, uint32_t ch) { const struct dma_gd32_config *cfg = dev->config; struct dma_gd32_data *data = dev->data; if (ch >= cfg->channels) { LOG_ERR("start channel must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, ch); return -EINVAL; } gd32_dma_interrupt_enable(cfg->reg, ch, DMA_CHXCTL_FTFIE | GD32_DMA_INTERRUPT_ERRORS); gd32_dma_channel_enable(cfg->reg, ch); data->channels[ch].busy = true; return 0; } static int dma_gd32_stop(const struct device *dev, uint32_t ch) { const struct dma_gd32_config *cfg = dev->config; struct dma_gd32_data *data = dev->data; if (ch >= cfg->channels) { LOG_ERR("stop channel must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, ch); return -EINVAL; } gd32_dma_interrupt_disable( cfg->reg, ch, DMA_CHXCTL_FTFIE | GD32_DMA_INTERRUPT_ERRORS); gd32_dma_interrupt_flag_clear(cfg->reg, ch, DMA_FLAG_FTF | GD32_DMA_FLAG_ERRORS); gd32_dma_channel_disable(cfg->reg, ch); data->channels[ch].busy = false; return 0; } static int dma_gd32_get_status(const struct device *dev, uint32_t ch, struct dma_status *stat) { const struct dma_gd32_config *cfg = dev->config; struct dma_gd32_data *data = dev->data; if (ch >= cfg->channels) { LOG_ERR("channel must be < %" PRIu32 " (%" PRIu32 ")", cfg->channels, ch); return -EINVAL; } stat->pending_length = gd32_dma_transfer_number_get(cfg->reg, ch); stat->dir = data->channels[ch].direction; stat->busy = data->channels[ch].busy; return 0; } static bool dma_gd32_api_chan_filter(const struct device *dev, int ch, void *filter_param) { uint32_t filter; if (!filter_param) { LOG_ERR("filter_param must not be NULL"); return false; } filter = *((uint32_t *)filter_param); return (filter & BIT(ch)); } static int dma_gd32_init(const struct device *dev) { const struct dma_gd32_config *cfg = dev->config; (void)clock_control_on(GD32_CLOCK_CONTROLLER, (clock_control_subsys_t)&cfg->clkid); #if DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1) (void)reset_line_toggle_dt(&cfg->reset); #endif for (uint32_t i = 0; i < cfg->channels; i++) { gd32_dma_interrupt_disable(cfg->reg, i, DMA_CHXCTL_FTFIE | GD32_DMA_INTERRUPT_ERRORS); gd32_dma_deinit(cfg->reg, i); } cfg->irq_configure(); return 0; } static void dma_gd32_isr(const struct device *dev) { const struct dma_gd32_config *cfg = dev->config; struct dma_gd32_data *data = dev->data; uint32_t errflag, ftfflag; int err = 0; for (uint32_t i = 0; i < cfg->channels; i++) { errflag = gd32_dma_interrupt_flag_get(cfg->reg, i, GD32_DMA_FLAG_ERRORS); ftfflag = gd32_dma_interrupt_flag_get(cfg->reg, i, DMA_FLAG_FTF); if (errflag == 0 && ftfflag == 0) { continue; } if (errflag) { err = -EIO; } gd32_dma_interrupt_flag_clear( cfg->reg, i, DMA_FLAG_FTF | GD32_DMA_FLAG_ERRORS); data->channels[i].busy = false; if (data->channels[i].callback) { data->channels[i].callback( dev, data->channels[i].user_data, i, err); } } } static const struct dma_driver_api dma_gd32_driver_api = { .config = dma_gd32_config, .reload = dma_gd32_reload, .start = dma_gd32_start, .stop = dma_gd32_stop, .get_status = dma_gd32_get_status, .chan_filter = dma_gd32_api_chan_filter, }; #define IRQ_CONFIGURE(n, inst) \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, n, irq), \ DT_INST_IRQ_BY_IDX(inst, n, priority), dma_gd32_isr, \ DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(inst, n, irq)); #define CONFIGURE_ALL_IRQS(inst, n) LISTIFY(n, IRQ_CONFIGURE, (), inst) #define GD32_DMA_INIT(inst) \ static void dma_gd32##inst##_irq_configure(void) \ { \ CONFIGURE_ALL_IRQS(inst, DT_NUM_IRQS(DT_DRV_INST(inst))); \ } \ static const struct dma_gd32_config dma_gd32##inst##_config = { \ .reg = DT_INST_REG_ADDR(inst), \ .channels = DT_INST_PROP(inst, dma_channels), \ .clkid = DT_INST_CLOCKS_CELL(inst, id), \ .mem2mem = DT_INST_PROP(inst, gd_mem2mem), \ IF_ENABLED(DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1), \ (.reset = RESET_DT_SPEC_INST_GET(inst),)) \ .irq_configure = dma_gd32##inst##_irq_configure, \ }; \ \ static struct dma_gd32_channel \ dma_gd32##inst##_channels[DT_INST_PROP(inst, dma_channels)]; \ ATOMIC_DEFINE(dma_gd32_atomic##inst, \ DT_INST_PROP(inst, dma_channels)); \ static struct dma_gd32_data dma_gd32##inst##_data = { \ .ctx = { \ .magic = DMA_MAGIC, \ .atomic = dma_gd32_atomic##inst, \ .dma_channels = DT_INST_PROP(inst, dma_channels), \ }, \ .channels = dma_gd32##inst##_channels, \ }; \ \ DEVICE_DT_INST_DEFINE(inst, &dma_gd32_init, NULL, \ &dma_gd32##inst##_data, \ &dma_gd32##inst##_config, POST_KERNEL, \ CONFIG_DMA_INIT_PRIORITY, &dma_gd32_driver_api); DT_INST_FOREACH_STATUS_OKAY(GD32_DMA_INIT) ```
/content/code_sandbox/drivers/dma/dma_gd32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,811
```unknown # Microchip XEC DMAC configuration options config DMA_MCHP_XEC bool "Microchip XEC series DMAC driver" default y depends on DT_HAS_MICROCHIP_XEC_DMAC_ENABLED help DMA driver for Microchip XEC series MCUs. ```
/content/code_sandbox/drivers/dma/Kconfig.xec
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
61
```unknown config DMA_PL330 bool prompt "PL330 DMA driver" default y depends on DT_HAS_ARM_DMA_PL330_ENABLED help This option enables support of pl330 DMA Controller. ```
/content/code_sandbox/drivers/dma/Kconfig.dma_pl330
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
44
```unknown # LPSS DMA configuration options config DMA_INTEL_LPSS bool "INTEL LPSS DMA driver" default y depends on DT_HAS_INTEL_LPSS_ENABLED select DEVICE_DEPS help INTEL LPSS DMA driver. if DMA_INTEL_LPSS source "drivers/dma/Kconfig.dw_common" endif # DMA_INTEL_LPSS ```
/content/code_sandbox/drivers/dma/Kconfig.intel_lpss
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
78
```c /* * */ /** * @brief DMA low level driver implementation for F0/F1/F3/L0/L4 series SoCs. */ #include "dma_stm32.h" #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dma_stm32_v2); uint32_t dma_stm32_id_to_stream(uint32_t id) { static const uint32_t stream_nr[] = { LL_DMA_CHANNEL_1, LL_DMA_CHANNEL_2, LL_DMA_CHANNEL_3, #if defined(LL_DMA_CHANNEL_4) LL_DMA_CHANNEL_4, LL_DMA_CHANNEL_5, #if defined(LL_DMA_CHANNEL_6) LL_DMA_CHANNEL_6, #if defined(LL_DMA_CHANNEL_7) LL_DMA_CHANNEL_7, #if defined(LL_DMA_CHANNEL_8) LL_DMA_CHANNEL_8, #endif /* LL_DMA_CHANNEL_8 */ #endif /* LL_DMA_CHANNEL_7 */ #endif /* LL_DMA_CHANNEL_6 */ #endif /* LL_DMA_CHANNEL_4 */ }; __ASSERT_NO_MSG(id < ARRAY_SIZE(stream_nr)); return stream_nr[id]; } void dma_stm32_clear_ht(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_clear_flag_func func[] = { LL_DMA_ClearFlag_HT1, LL_DMA_ClearFlag_HT2, LL_DMA_ClearFlag_HT3, #if defined(LL_DMA_IFCR_CHTIF4) LL_DMA_ClearFlag_HT4, LL_DMA_ClearFlag_HT5, #if defined(LL_DMA_IFCR_CHTIF6) LL_DMA_ClearFlag_HT6, #if defined(LL_DMA_IFCR_CHTIF7) LL_DMA_ClearFlag_HT7, #if defined(LL_DMA_IFCR_CHTIF8) LL_DMA_ClearFlag_HT8, #endif /* LL_DMA_IFCR_CHTIF8 */ #endif /* LL_DMA_IFCR_CHTIF7 */ #endif /* LL_DMA_IFCR_CHTIF6 */ #endif /* LL_DMA_IFCR_CHTIF4 */ }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); func[id](DMAx); } void dma_stm32_clear_tc(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_clear_flag_func func[] = { LL_DMA_ClearFlag_TC1, LL_DMA_ClearFlag_TC2, LL_DMA_ClearFlag_TC3, #if defined(LL_DMA_IFCR_CTCIF4) LL_DMA_ClearFlag_TC4, LL_DMA_ClearFlag_TC5, #if defined(LL_DMA_IFCR_CTCIF6) LL_DMA_ClearFlag_TC6, #if defined(LL_DMA_IFCR_CTCIF7) LL_DMA_ClearFlag_TC7, #if defined(LL_DMA_IFCR_CTCIF8) LL_DMA_ClearFlag_TC8, #endif /* LL_DMA_IFCR_CTCIF8 */ #endif /* LL_DMA_IFCR_CTCIF7 */ #endif /* LL_DMA_IFCR_CTCIF6 */ #endif /* LL_DMA_IFCR_CTCIF4 */ }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); func[id](DMAx); } bool dma_stm32_is_ht_active(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_check_flag_func func[] = { LL_DMA_IsActiveFlag_HT1, LL_DMA_IsActiveFlag_HT2, LL_DMA_IsActiveFlag_HT3, #if defined(LL_DMA_IFCR_CHTIF4) LL_DMA_IsActiveFlag_HT4, LL_DMA_IsActiveFlag_HT5, #if defined(LL_DMA_IFCR_CHTIF6) LL_DMA_IsActiveFlag_HT6, #if defined(LL_DMA_IFCR_CHTIF7) LL_DMA_IsActiveFlag_HT7, #if defined(LL_DMA_IFCR_CHTIF8) LL_DMA_IsActiveFlag_HT8, #endif /* LL_DMA_IFCR_CHTIF8 */ #endif /* LL_DMA_IFCR_CHTIF7 */ #endif /* LL_DMA_IFCR_CHTIF6 */ #endif /* LL_DMA_IFCR_CHTIF4 */ }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); return func[id](DMAx); } bool dma_stm32_is_tc_active(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_check_flag_func func[] = { LL_DMA_IsActiveFlag_TC1, LL_DMA_IsActiveFlag_TC2, LL_DMA_IsActiveFlag_TC3, #if defined(LL_DMA_IFCR_CTCIF4) LL_DMA_IsActiveFlag_TC4, LL_DMA_IsActiveFlag_TC5, #if defined(LL_DMA_IFCR_CTCIF6) LL_DMA_IsActiveFlag_TC6, #if defined(LL_DMA_IFCR_CTCIF7) LL_DMA_IsActiveFlag_TC7, #if defined(LL_DMA_IFCR_CTCIF8) LL_DMA_IsActiveFlag_TC8, #endif /* LL_DMA_IFCR_CTCIF8 */ #endif /* LL_DMA_IFCR_CTCIF7 */ #endif /* LL_DMA_IFCR_CTCIF6 */ #endif /* LL_DMA_IFCR_CTCIF4 */ }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); return func[id](DMAx); } void dma_stm32_clear_te(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_clear_flag_func func[] = { LL_DMA_ClearFlag_TE1, LL_DMA_ClearFlag_TE2, LL_DMA_ClearFlag_TE3, #if defined(LL_DMA_IFCR_CTEIF4) LL_DMA_ClearFlag_TE4, LL_DMA_ClearFlag_TE5, #if defined(LL_DMA_IFCR_CTEIF6) LL_DMA_ClearFlag_TE6, #if defined(LL_DMA_IFCR_CTEIF7) LL_DMA_ClearFlag_TE7, #if defined(LL_DMA_IFCR_CTEIF8) LL_DMA_ClearFlag_TE8, #endif /* LL_DMA_IFCR_CTEIF4 */ #endif /* LL_DMA_IFCR_CTEIF6 */ #endif /* LL_DMA_IFCR_CTEIF7 */ #endif /* LL_DMA_IFCR_CTEIF8 */ }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); func[id](DMAx); } void dma_stm32_clear_gi(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_clear_flag_func func[] = { LL_DMA_ClearFlag_GI1, LL_DMA_ClearFlag_GI2, LL_DMA_ClearFlag_GI3, #if defined(LL_DMA_IFCR_CGIF4) LL_DMA_ClearFlag_GI4, LL_DMA_ClearFlag_GI5, #if defined(LL_DMA_IFCR_CGIF6) LL_DMA_ClearFlag_GI6, #if defined(LL_DMA_IFCR_CGIF7) LL_DMA_ClearFlag_GI7, #if defined(LL_DMA_IFCR_CGIF8) LL_DMA_ClearFlag_GI8, #endif /* LL_DMA_IFCR_CGIF4 */ #endif /* LL_DMA_IFCR_CGIF6 */ #endif /* LL_DMA_IFCR_CGIF7 */ #endif /* LL_DMA_IFCR_CGIF8 */ }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); func[id](DMAx); } bool dma_stm32_is_te_active(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_check_flag_func func[] = { LL_DMA_IsActiveFlag_TE1, LL_DMA_IsActiveFlag_TE2, LL_DMA_IsActiveFlag_TE3, #if defined(LL_DMA_IFCR_CTEIF4) LL_DMA_IsActiveFlag_TE4, LL_DMA_IsActiveFlag_TE5, #if defined(LL_DMA_IFCR_CTEIF6) LL_DMA_IsActiveFlag_TE6, #if defined(LL_DMA_IFCR_CTEIF7) LL_DMA_IsActiveFlag_TE7, #if defined(LL_DMA_IFCR_CTEIF8) LL_DMA_IsActiveFlag_TE8, #endif /* LL_DMA_IFCR_CTEIF4 */ #endif /* LL_DMA_IFCR_CTEIF6 */ #endif /* LL_DMA_IFCR_CTEIF7 */ #endif /* LL_DMA_IFCR_CTEIF8 */ }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); return func[id](DMAx); } bool dma_stm32_is_gi_active(DMA_TypeDef *DMAx, uint32_t id) { static const dma_stm32_check_flag_func func[] = { LL_DMA_IsActiveFlag_GI1, LL_DMA_IsActiveFlag_GI2, LL_DMA_IsActiveFlag_GI3, #if defined(LL_DMA_IFCR_CGIF4) LL_DMA_IsActiveFlag_GI4, LL_DMA_IsActiveFlag_GI5, #if defined(LL_DMA_IFCR_CGIF6) LL_DMA_IsActiveFlag_GI6, #if defined(LL_DMA_IFCR_CGIF7) LL_DMA_IsActiveFlag_GI7, #if defined(LL_DMA_IFCR_CGIF8) LL_DMA_IsActiveFlag_GI8, #endif /* LL_DMA_IFCR_CGIF4 */ #endif /* LL_DMA_IFCR_CGIF6 */ #endif /* LL_DMA_IFCR_CGIF7 */ #endif /* LL_DMA_IFCR_CGIF8 */ }; __ASSERT_NO_MSG(id < ARRAY_SIZE(func)); return func[id](DMAx); } void stm32_dma_dump_stream_irq(DMA_TypeDef *dma, uint32_t id) { LOG_INF("tc: %d, ht: %d, te: %d, gi: %d", dma_stm32_is_tc_active(dma, id), dma_stm32_is_ht_active(dma, id), dma_stm32_is_te_active(dma, id), dma_stm32_is_gi_active(dma, id)); } bool stm32_dma_is_tc_irq_active(DMA_TypeDef *dma, uint32_t id) { return LL_DMA_IsEnabledIT_TC(dma, dma_stm32_id_to_stream(id)) && dma_stm32_is_tc_active(dma, id); } bool stm32_dma_is_ht_irq_active(DMA_TypeDef *dma, uint32_t id) { return LL_DMA_IsEnabledIT_HT(dma, dma_stm32_id_to_stream(id)) && dma_stm32_is_ht_active(dma, id); } static inline bool stm32_dma_is_te_irq_active(DMA_TypeDef *dma, uint32_t id) { return LL_DMA_IsEnabledIT_TE(dma, dma_stm32_id_to_stream(id)) && dma_stm32_is_te_active(dma, id); } bool stm32_dma_is_irq_active(DMA_TypeDef *dma, uint32_t id) { return stm32_dma_is_tc_irq_active(dma, id) || stm32_dma_is_ht_irq_active(dma, id) || stm32_dma_is_te_irq_active(dma, id); } void stm32_dma_clear_stream_irq(DMA_TypeDef *dma, uint32_t id) { dma_stm32_clear_te(dma, id); } bool stm32_dma_is_irq_happened(DMA_TypeDef *dma, uint32_t id) { if (dma_stm32_is_te_active(dma, id)) { return true; } return false; } bool stm32_dma_is_unexpected_irq_happened(DMA_TypeDef *dma, uint32_t id) { /* Preserve for future amending. */ return false; } void stm32_dma_enable_stream(DMA_TypeDef *dma, uint32_t id) { LL_DMA_EnableChannel(dma, dma_stm32_id_to_stream(id)); } bool stm32_dma_is_enabled_stream(DMA_TypeDef *dma, uint32_t id) { if (LL_DMA_IsEnabledChannel(dma, dma_stm32_id_to_stream(id)) == 1) { return true; } return false; } int stm32_dma_disable_stream(DMA_TypeDef *dma, uint32_t id) { LL_DMA_DisableChannel(dma, dma_stm32_id_to_stream(id)); if (!LL_DMA_IsEnabledChannel(dma, dma_stm32_id_to_stream(id))) { return 0; } return -EAGAIN; } ```
/content/code_sandbox/drivers/dma/dma_stm32_v2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,524
```c /* * */ #define DT_DRV_COMPAT altr_msgdma #include <zephyr/device.h> #include <errno.h> #include <zephyr/init.h> #include <string.h> #include <soc.h> #include <zephyr/drivers/dma.h> #include <altera_common.h> #include "altera_msgdma_csr_regs.h" #include "altera_msgdma_descriptor_regs.h" #include "altera_msgdma.h" #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(dma_nios2, CONFIG_DMA_LOG_LEVEL); /* Device configuration parameters */ struct nios2_msgdma_dev_data { const struct device *dev; alt_msgdma_dev *msgdma_dev; alt_msgdma_standard_descriptor desc; uint32_t direction; struct k_sem sem_lock; void *user_data; dma_callback_t dma_callback; }; static void nios2_msgdma_isr(void *arg) { const struct device *dev = (const struct device *)arg; struct nios2_msgdma_dev_data *dev_data = (struct nios2_msgdma_dev_data *)dev->data; /* Call Altera HAL driver ISR */ alt_handle_irq(dev_data->msgdma_dev, DT_INST_IRQN(0)); } static void nios2_msgdma_callback(void *context) { struct nios2_msgdma_dev_data *dev_data = (struct nios2_msgdma_dev_data *)context; int dma_status; uint32_t status; status = IORD_ALTERA_MSGDMA_CSR_STATUS(dev_data->msgdma_dev->csr_base); if (status & ALTERA_MSGDMA_CSR_STOPPED_ON_ERROR_MASK) { dma_status = -EIO; } else if (status & ALTERA_MSGDMA_CSR_BUSY_MASK) { dma_status = -EBUSY; } else { dma_status = DMA_STATUS_COMPLETE; } LOG_DBG("msgdma csr status Reg: 0x%x", status); dev_data->dma_callback(dev_data->dev, dev_data->user_data, 0, dma_status); } static int nios2_msgdma_config(const struct device *dev, uint32_t channel, struct dma_config *cfg) { struct nios2_msgdma_dev_data *dev_data = (struct nios2_msgdma_dev_data *)dev->data; struct dma_block_config *dma_block; int status; uint32_t control; /* Nios-II MSGDMA supports only one channel per DMA core */ if (channel != 0U) { LOG_ERR("invalid channel number"); return -EINVAL; } #if MSGDMA_0_CSR_PREFETCHER_ENABLE if (cfg->block_count > 1) { LOG_ERR("driver yet add support multiple descriptors"); return -EINVAL; } #else if (cfg->block_count != 1U) { LOG_ERR("invalid block count!!"); return -EINVAL; } #endif if (cfg->head_block == NULL) { LOG_ERR("head_block ptr NULL!!"); return -EINVAL; } if (cfg->head_block->block_size > MSGDMA_0_DESCRIPTOR_SLAVE_MAX_BYTE) { LOG_ERR("DMA error: Data size too big: %d", cfg->head_block->block_size); return -EINVAL; } k_sem_take(&dev_data->sem_lock, K_FOREVER); dev_data->dma_callback = cfg->dma_callback; dev_data->user_data = cfg->user_data; dev_data->direction = cfg->channel_direction; dma_block = cfg->head_block; control = ALTERA_MSGDMA_DESCRIPTOR_CONTROL_TRANSFER_COMPLETE_IRQ_MASK | ALTERA_MSGDMA_DESCRIPTOR_CONTROL_EARLY_TERMINATION_IRQ_MASK; if (dev_data->direction == MEMORY_TO_MEMORY) { status = alt_msgdma_construct_standard_mm_to_mm_descriptor( dev_data->msgdma_dev, &dev_data->desc, (alt_u32 *)dma_block->source_address, (alt_u32 *)dma_block->dest_address, dma_block->block_size, control); } else if (dev_data->direction == MEMORY_TO_PERIPHERAL) { status = alt_msgdma_construct_standard_mm_to_st_descriptor( dev_data->msgdma_dev, &dev_data->desc, (alt_u32 *)dma_block->source_address, dma_block->block_size, control); } else if (dev_data->direction == PERIPHERAL_TO_MEMORY) { status = alt_msgdma_construct_standard_st_to_mm_descriptor( dev_data->msgdma_dev, &dev_data->desc, (alt_u32 *)dma_block->dest_address, dma_block->block_size, control); } else { LOG_ERR("invalid channel direction"); status = -EINVAL; } /* Register msgdma callback */ alt_msgdma_register_callback(dev_data->msgdma_dev, nios2_msgdma_callback, ALTERA_MSGDMA_CSR_GLOBAL_INTERRUPT_MASK | ALTERA_MSGDMA_CSR_STOP_ON_ERROR_MASK | ALTERA_MSGDMA_CSR_STOP_ON_EARLY_TERMINATION_MASK, dev_data); /* Clear the IRQ status */ IOWR_ALTERA_MSGDMA_CSR_STATUS(dev_data->msgdma_dev->csr_base, ALTERA_MSGDMA_CSR_IRQ_SET_MASK); k_sem_give(&dev_data->sem_lock); return status; } static int nios2_msgdma_transfer_start(const struct device *dev, uint32_t channel) { struct nios2_msgdma_dev_data *cfg = (struct nios2_msgdma_dev_data *)dev->data; int status; /* Nios-II MSGDMA supports only one channel per DMA core */ if (channel != 0U) { LOG_ERR("Invalid channel number"); return -EINVAL; } k_sem_take(&cfg->sem_lock, K_FOREVER); status = alt_msgdma_standard_descriptor_async_transfer(cfg->msgdma_dev, &cfg->desc); k_sem_give(&cfg->sem_lock); if (status < 0) { LOG_ERR("DMA transfer error (%d)", status); } return status; } static int nios2_msgdma_transfer_stop(const struct device *dev, uint32_t channel) { struct nios2_msgdma_dev_data *cfg = (struct nios2_msgdma_dev_data *)dev->data; int ret = -EIO; uint32_t status; k_sem_take(&cfg->sem_lock, K_FOREVER); /* Stop the DMA Dispatcher */ IOWR_ALTERA_MSGDMA_CSR_CONTROL(cfg->msgdma_dev->csr_base, ALTERA_MSGDMA_CSR_STOP_MASK); status = IORD_ALTERA_MSGDMA_CSR_STATUS(cfg->msgdma_dev->csr_base); k_sem_give(&cfg->sem_lock); if (status & ALTERA_MSGDMA_CSR_STOP_STATE_MASK) { LOG_DBG("DMA Dispatcher stopped"); ret = 0; } LOG_DBG("msgdma csr status Reg: 0x%x", status); return status; } static const struct dma_driver_api nios2_msgdma_driver_api = { .config = nios2_msgdma_config, .start = nios2_msgdma_transfer_start, .stop = nios2_msgdma_transfer_stop, }; static int nios2_msgdma0_initialize(const struct device *dev) { struct nios2_msgdma_dev_data *dev_data = (struct nios2_msgdma_dev_data *)dev->data; dev_data->dev = dev; /* Initialize semaphore */ k_sem_init(&dev_data->sem_lock, 1, 1); alt_msgdma_init(dev_data->msgdma_dev, 0, DT_INST_IRQN(0)); IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), nios2_msgdma_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); return 0; } ALTERA_MSGDMA_CSR_DESCRIPTOR_SLAVE_INSTANCE(MSGDMA_0, MSGDMA_0_CSR, MSGDMA_0_DESCRIPTOR_SLAVE, msgdma_dev0) static struct nios2_msgdma_dev_data dma0_nios2_data = { .msgdma_dev = &msgdma_dev0, }; DEVICE_DT_INST_DEFINE(0, &nios2_msgdma0_initialize, NULL, &dma0_nios2_data, NULL, POST_KERNEL, CONFIG_DMA_INIT_PRIORITY, &nios2_msgdma_driver_api); ```
/content/code_sandbox/drivers/dma/dma_nios2_msgdma.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,782
```unknown # DesignWare DMA configuration options config DMA_DW bool "DesignWare DMA driver" default y depends on DT_HAS_SNPS_DESIGNWARE_DMA_ENABLED help DesignWare DMA driver. if DMA_DW source "drivers/dma/Kconfig.dw_common" endif # DMA_DW ```
/content/code_sandbox/drivers/dma/Kconfig.dw
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
65
```c /* * */ #define DT_DRV_COMPAT intel_sedi_dma #include <errno.h> #include <stdio.h> #include <zephyr/kernel.h> #include <zephyr/pm/device.h> #include <string.h> #include <zephyr/init.h> #include <zephyr/drivers/dma.h> #include <zephyr/devicetree.h> #include <zephyr/cache.h> #include <soc.h> #include "sedi_driver_dma.h" #include "sedi_driver_core.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(sedi_dma, CONFIG_DMA_LOG_LEVEL); extern void dma_isr(sedi_dma_t dma_device); struct dma_sedi_config_info { sedi_dma_t peripheral_id; /* Controller instance. */ uint8_t chn_num; void (*irq_config)(void); }; struct dma_sedi_driver_data { struct dma_config dma_configs[DMA_CHANNEL_NUM]; }; #define DEV_DATA(dev) ((struct dma_sedi_driver_data *const)(dev)->data) #define DEV_CFG(dev) \ ((const struct dma_sedi_config_info *const)(dev)->config) /* * this function will be called when dma transferring is completed * or error happened */ static void dma_handler(sedi_dma_t dma_device, int channel, int event_id, void *args) { ARG_UNUSED(args); const struct device *dev = (const struct device *)args; struct dma_sedi_driver_data *const data = DEV_DATA(dev); struct dma_config *config = &(data->dma_configs[channel]); /* run user-defined callback */ if (config->dma_callback) { if ((event_id == SEDI_DMA_EVENT_TRANSFER_DONE) && (config->complete_callback_en)) { config->dma_callback(dev, config->user_data, channel, 0); } else if (!config->error_callback_dis) { config->dma_callback(dev, config->user_data, channel, event_id); } } } /* map width to certain macros*/ static int width_index(uint32_t num_bytes, uint32_t *index) { switch (num_bytes) { case 1: *index = DMA_TRANS_WIDTH_8; break; case 2: *index = DMA_TRANS_WIDTH_16; break; case 4: *index = DMA_TRANS_WIDTH_32; break; case 8: *index = DMA_TRANS_WIDTH_64; break; case 16: *index = DMA_TRANS_WIDTH_128; break; case 32: *index = DMA_TRANS_WIDTH_256; break; default: return -ENOTSUP; } return 0; } /* map burst size to certain macros*/ static int burst_index(uint32_t num_units, uint32_t *index) { switch (num_units) { case 1: *index = DMA_BURST_TRANS_LENGTH_1; break; case 4: *index = DMA_BURST_TRANS_LENGTH_4; break; case 8: *index = DMA_BURST_TRANS_LENGTH_8; break; case 16: *index = DMA_BURST_TRANS_LENGTH_16; break; case 32: *index = DMA_BURST_TRANS_LENGTH_32; break; case 64: *index = DMA_BURST_TRANS_LENGTH_64; break; case 128: *index = DMA_BURST_TRANS_LENGTH_128; break; case 256: *index = DMA_BURST_TRANS_LENGTH_256; break; default: return -ENOTSUP; } return 0; } static void dma_config_convert(struct dma_config *config, dma_memory_type_t *src_mem, dma_memory_type_t *dst_mem, uint8_t *sedi_dma_dir) { *src_mem = DMA_SRAM_MEM; *dst_mem = DMA_SRAM_MEM; *sedi_dma_dir = MEMORY_TO_MEMORY; switch (config->channel_direction) { case MEMORY_TO_MEMORY: case MEMORY_TO_PERIPHERAL: case PERIPHERAL_TO_MEMORY: case PERIPHERAL_TO_PERIPHERAL: *sedi_dma_dir = config->channel_direction; break; case MEMORY_TO_HOST: *dst_mem = DMA_DRAM_MEM; break; case HOST_TO_MEMORY: *src_mem = DMA_DRAM_MEM; break; #ifdef MEMORY_TO_IMR case MEMORY_TO_IMR: *dst_mem = DMA_UMA_MEM; break; #endif #ifdef IMR_TO_MEMORY case IMR_TO_MEMORY: *src_mem = DMA_UMA_MEM; break; #endif } } /* config basic dma */ static int dma_sedi_apply_common_config(sedi_dma_t dev, uint32_t channel, struct dma_config *config, uint8_t *dir) { uint8_t direction = MEMORY_TO_MEMORY; dma_memory_type_t src_mem = DMA_SRAM_MEM, dst_mem = DMA_SRAM_MEM; dma_config_convert(config, &src_mem, &dst_mem, &direction); if (dir) { *dir = direction; } /* configure dma transferring direction*/ sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_DIRECTION, direction); if (direction == MEMORY_TO_MEMORY) { sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_SR_MEM_TYPE, src_mem); sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_DT_MEM_TYPE, dst_mem); } else if (direction == MEMORY_TO_PERIPHERAL) { sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_HS_DEVICE_ID, config->dma_slot); sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_HS_POLARITY, DMA_HS_POLARITY_HIGH); sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_HS_DEVICE_ID_PER_DIR, DMA_HS_PER_TX); } else if (direction == PERIPHERAL_TO_MEMORY) { sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_HS_DEVICE_ID, config->dma_slot); sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_HS_POLARITY, DMA_HS_POLARITY_HIGH); sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_HS_DEVICE_ID_PER_DIR, DMA_HS_PER_RX); } else { return -1; } return 0; } static int dma_sedi_apply_single_config(sedi_dma_t dev, uint32_t channel, struct dma_config *config) { int ret = 0; uint32_t temp = 0; ret = dma_sedi_apply_common_config(dev, channel, config, NULL); if (ret != 0) { goto INVALID_ARGS; } /* configurate dma width of source data*/ ret = width_index(config->source_data_size, &temp); if (ret != 0) { goto INVALID_ARGS; } sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_SR_TRANS_WIDTH, temp); /* configurate dma width of destination data*/ ret = width_index(config->dest_data_size, &temp); if (ret != 0) { goto INVALID_ARGS; } sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_DT_TRANS_WIDTH, temp); /* configurate dma burst size*/ ret = burst_index(config->source_burst_length, &temp); if (ret != 0) { goto INVALID_ARGS; } sedi_dma_control(dev, channel, SEDI_CONFIG_DMA_BURST_LENGTH, temp); return 0; INVALID_ARGS: return ret; } static int dma_sedi_chan_config(const struct device *dev, uint32_t channel, struct dma_config *config) { if ((dev == NULL) || (channel >= DEV_CFG(dev)->chn_num) || (config == NULL) || (config->block_count != 1)) { goto INVALID_ARGS; } const struct dma_sedi_config_info *const info = DEV_CFG(dev); struct dma_sedi_driver_data *const data = DEV_DATA(dev); memcpy(&(data->dma_configs[channel]), config, sizeof(struct dma_config)); /* initialize the dma controller, following the sedi api*/ sedi_dma_event_cb_t cb = dma_handler; sedi_dma_init(info->peripheral_id, (int)channel, cb, (void *)dev); return 0; INVALID_ARGS: return -1; } static int dma_sedi_reload(const struct device *dev, uint32_t channel, uint64_t src, uint64_t dst, size_t size) { if ((dev == NULL) || (channel >= DEV_CFG(dev)->chn_num)) { LOG_ERR("dma reload failed for invalid args"); return -ENOTSUP; } int ret = 0; struct dma_sedi_driver_data *const data = DEV_DATA(dev); struct dma_config *config = &(data->dma_configs[channel]); struct dma_block_config *block_config; if ((config == NULL) || (config->head_block == NULL)) { LOG_ERR("dma reload failed, no config found"); return -ENOTSUP; } block_config = config->head_block; if ((config->block_count == 1) || (block_config->next_block == NULL)) { block_config->source_address = src; block_config->dest_address = dst; block_config->block_size = size; } else { LOG_ERR("no reload support for multi-linkedlist mode"); return -ENOTSUP; } return ret; } static int dma_sedi_start(const struct device *dev, uint32_t channel) { if ((dev == NULL) || (channel >= DEV_CFG(dev)->chn_num)) { LOG_ERR("dma transferring failed for invalid args"); return -ENOTSUP; } int ret = -1; const struct dma_sedi_config_info *const info = DEV_CFG(dev); struct dma_sedi_driver_data *const data = DEV_DATA(dev); struct dma_config *config = &(data->dma_configs[channel]); struct dma_block_config *block_config = config->head_block; uint64_t src_addr, dst_addr; if (config->block_count == 1) { /* call sedi start function */ ret = dma_sedi_apply_single_config(info->peripheral_id, channel, config); if (ret) { goto ERR; } src_addr = block_config->source_address; dst_addr = block_config->dest_address; ret = sedi_dma_start_transfer(info->peripheral_id, channel, src_addr, dst_addr, block_config->block_size); } else { LOG_ERR("MULTIPLE_BLOCK CONFIG is not set"); goto ERR; } if (ret != SEDI_DRIVER_OK) { goto ERR; } return ret; ERR: LOG_ERR("dma transfer failed"); return ret; } static int dma_sedi_stop(const struct device *dev, uint32_t channel) { const struct dma_sedi_config_info *const info = DEV_CFG(dev); LOG_DBG("stopping dma: %p, %d", dev, channel); sedi_dma_abort_transfer(info->peripheral_id, channel); return 0; } static const struct dma_driver_api dma_funcs = { .config = dma_sedi_chan_config, .start = dma_sedi_start, .stop = dma_sedi_stop, .reload = dma_sedi_reload, .get_status = NULL }; static int dma_sedi_init(const struct device *dev) { const struct dma_sedi_config_info *const config = DEV_CFG(dev); config->irq_config(); return 0; } #define DMA_DEVICE_INIT_SEDI(inst) \ static void dma_sedi_##inst##_irq_config(void); \ \ static struct dma_sedi_driver_data dma_sedi_dev_data_##inst; \ static const struct dma_sedi_config_info dma_sedi_config_data_##inst = { \ .peripheral_id = DT_INST_PROP(inst, peripheral_id), \ .chn_num = DT_INST_PROP(inst, dma_channels), \ .irq_config = dma_sedi_##inst##_irq_config \ }; \ DEVICE_DT_DEFINE(DT_INST(inst, DT_DRV_COMPAT), &dma_sedi_init, \ NULL, &dma_sedi_dev_data_##inst, &dma_sedi_config_data_##inst, PRE_KERNEL_2, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, (void *)&dma_funcs); \ \ static void dma_sedi_##inst##_irq_config(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), \ DT_INST_IRQ(inst, priority), dma_isr, \ (void *)DT_INST_PROP(inst, peripheral_id), \ DT_INST_IRQ(inst, sense)); \ irq_enable(DT_INST_IRQN(inst)); \ } DT_INST_FOREACH_STATUS_OKAY(DMA_DEVICE_INIT_SEDI) ```
/content/code_sandbox/drivers/dma/dma_sedi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,742
```c /* * */ #include <zephyr/device.h> #include <zephyr/drivers/dma.h> #include <errno.h> #include <zephyr/init.h> #include <string.h> #include <soc.h> #include <zephyr/sys/__assert.h> #include "dma_pl330.h" #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dma_pl330); #define BYTE_WIDTH(burst_size) (1 << (burst_size)) static int dma_pl330_submit(const struct device *dev, uint64_t dst, uint64_t src, uint32_t channel, uint32_t size); static void dma_pl330_get_counter(struct dma_pl330_ch_internal *ch_handle, uint32_t *psrc_byte_width, uint32_t *pdst_byte_width, uint32_t *ploop_counter, uint32_t *presidue) { uint32_t srcbytewidth, dstbytewidth; uint32_t loop_counter, residue; srcbytewidth = BYTE_WIDTH(ch_handle->src_burst_sz); dstbytewidth = BYTE_WIDTH(ch_handle->dst_burst_sz); loop_counter = ch_handle->trans_size / (srcbytewidth * (ch_handle->src_burst_len + 1)); residue = ch_handle->trans_size - loop_counter * (srcbytewidth * (ch_handle->src_burst_len + 1)); *psrc_byte_width = srcbytewidth; *pdst_byte_width = dstbytewidth; *ploop_counter = loop_counter; *presidue = residue; } static uint32_t dma_pl330_ch_ccr(struct dma_pl330_ch_internal *ch_handle) { uint32_t ccr; int secure = ch_handle->nonsec_mode ? SRC_PRI_NONSEC_VALUE : SRC_PRI_SEC_VALUE; ccr = ((ch_handle->dst_cache_ctrl & CC_SRCCCTRL_MASK) << CC_DSTCCTRL_SHIFT) + ((ch_handle->nonsec_mode) << CC_DSTNS_SHIFT) + (ch_handle->dst_burst_len << CC_DSTBRSTLEN_SHIFT) + (ch_handle->dst_burst_sz << CC_DSTBRSTSIZE_SHIFT) + (ch_handle->dst_inc << CC_DSTINC_SHIFT) + ((ch_handle->src_cache_ctrl & CC_SRCCCTRL_MASK) << CC_SRCCCTRL_SHIFT) + (secure << CC_SRCPRI_SHIFT) + (ch_handle->src_burst_len << CC_SRCBRSTLEN_SHIFT) + (ch_handle->src_burst_sz << CC_SRCBRSTSIZE_SHIFT) + (ch_handle->src_inc << CC_SRCINC_SHIFT); return ccr; } static void dma_pl330_calc_burstsz_len(struct dma_pl330_ch_internal *ch_handle, uint64_t dst, uint64_t src, uint32_t size) { uint32_t byte_width, burst_sz, burst_len; burst_sz = MAX_BURST_SIZE_LOG2; /* src, dst and size should be aligned to burst size in bytes */ while ((src | dst | size) & ((BYTE_WIDTH(burst_sz)) - 1)) { burst_sz--; } byte_width = BYTE_WIDTH(burst_sz); burst_len = MAX_BURST_LEN; while (burst_len) { /* Choose burst length so that size is aligned */ if (!(size % ((burst_len + 1) << byte_width))) { break; } burst_len--; } ch_handle->src_burst_len = burst_len; ch_handle->src_burst_sz = burst_sz; ch_handle->dst_burst_len = burst_len; ch_handle->dst_burst_sz = burst_sz; } #ifdef CONFIG_DMA_64BIT static void dma_pl330_cfg_dmac_add_control(uint32_t control_reg_base, uint64_t dst, uint64_t src, int ch) { uint32_t src_h = src >> 32; uint32_t dst_h = dst >> 32; uint32_t dmac_higher_addr; dmac_higher_addr = ((dst_h & HIGHER_32_ADDR_MASK) << DST_ADDR_SHIFT) | (src_h & HIGHER_32_ADDR_MASK); sys_write32(dmac_higher_addr, control_reg_base + (ch * CONTROL_OFFSET) ); } #endif static void dma_pl330_config_channel(struct dma_pl330_ch_config *ch_cfg, uint64_t dst, uint64_t src, uint32_t size) { struct dma_pl330_ch_internal *ch_handle = &ch_cfg->internal; ch_handle->src_addr = src; ch_handle->dst_addr = dst; ch_handle->trans_size = size; if (ch_cfg->src_addr_adj == DMA_ADDR_ADJ_INCREMENT) { ch_handle->src_inc = 1; } if (ch_cfg->dst_addr_adj == DMA_ADDR_ADJ_INCREMENT) { ch_handle->dst_inc = 1; } } static inline uint32_t dma_pl330_gen_mov(mem_addr_t buf, enum dmamov_type type, uint32_t val) { sys_write8(OP_DMA_MOV, buf); sys_write8(type, buf + 1); sys_write8(val, buf + 2); sys_write8(val >> 8, buf + 3); sys_write8(val >> 16, buf + 4); sys_write8(val >> 24, buf + 5); return SZ_CMD_DMAMOV; } static inline void dma_pl330_gen_op(uint8_t opcode, uint32_t addr, uint32_t val) { sys_write8(opcode, addr); sys_write8(val, addr + 1); } static int dma_pl330_setup_ch(const struct device *dev, struct dma_pl330_ch_internal *ch_dat, int ch) { mem_addr_t dma_exec_addr; uint32_t offset = 0, ccr; uint32_t lp0_start, lp1_start; uint32_t loop_counter0 = 0, loop_counter1 = 0; uint32_t srcbytewidth, dstbytewidth; uint32_t loop_counter, residue; struct dma_pl330_dev_data *const dev_data = dev->data; struct dma_pl330_ch_config *channel_cfg; int secure = ch_dat->nonsec_mode ? SRC_PRI_NONSEC_VALUE : SRC_PRI_SEC_VALUE; channel_cfg = &dev_data->channels[ch]; dma_exec_addr = channel_cfg->dma_exec_addr; offset += dma_pl330_gen_mov(dma_exec_addr, SAR, ch_dat->src_addr); offset += dma_pl330_gen_mov(dma_exec_addr + offset, DAR, ch_dat->dst_addr); ccr = dma_pl330_ch_ccr(ch_dat); offset += dma_pl330_gen_mov(dma_exec_addr + offset, CCR, ccr); dma_pl330_get_counter(ch_dat, &srcbytewidth, &dstbytewidth, &loop_counter, &residue); if (loop_counter >= PL330_LOOP_COUNTER0_MAX) { loop_counter0 = PL330_LOOP_COUNTER0_MAX - 1; loop_counter1 = loop_counter / PL330_LOOP_COUNTER0_MAX - 1; dma_pl330_gen_op(OP_DMA_LOOP_COUNT1, dma_exec_addr + offset, loop_counter1 & 0xff); offset = offset + 2; dma_pl330_gen_op(OP_DMA_LOOP, dma_exec_addr + offset, loop_counter0 & 0xff); offset = offset + 2; lp1_start = offset; lp0_start = offset; sys_write8(OP_DMA_LD, dma_exec_addr + offset); sys_write8(OP_DMA_ST, dma_exec_addr + offset + 1); offset = offset + 2; dma_pl330_gen_op(OP_DMA_LP_BK_JMP1, dma_exec_addr + offset, ((offset - lp0_start) & 0xff)); offset = offset + 2; dma_pl330_gen_op(OP_DMA_LOOP, dma_exec_addr + offset, (loop_counter0 & 0xff)); offset = offset + 2; loop_counter1--; dma_pl330_gen_op(OP_DMA_LP_BK_JMP2, dma_exec_addr + offset, ((offset - lp1_start) & 0xff)); offset = offset + 2; } if ((loop_counter % PL330_LOOP_COUNTER0_MAX) != 0) { loop_counter0 = (loop_counter % PL330_LOOP_COUNTER0_MAX) - 1; dma_pl330_gen_op(OP_DMA_LOOP, dma_exec_addr + offset, (loop_counter0 & 0xff)); offset = offset + 2; loop_counter1--; lp0_start = offset; sys_write8(OP_DMA_LD, dma_exec_addr + offset); sys_write8(OP_DMA_ST, dma_exec_addr + offset + 1); offset = offset + 2; dma_pl330_gen_op(OP_DMA_LP_BK_JMP1, dma_exec_addr + offset, ((offset - lp0_start) & 0xff)); offset = offset + 2; } if (residue != 0) { ccr = ((ch_dat->nonsec_mode) << CC_DSTNS_SHIFT) + (0x0 << CC_DSTBRSTLEN_SHIFT) + (0x0 << CC_DSTBRSTSIZE_SHIFT) + (ch_dat->dst_inc << CC_DSTINC_SHIFT) + (secure << CC_SRCPRI_SHIFT) + (0x0 << CC_SRCBRSTLEN_SHIFT) + (0x0 << CC_SRCBRSTSIZE_SHIFT) + ch_dat->src_inc; offset += dma_pl330_gen_mov(dma_exec_addr + offset, CCR, ccr); dma_pl330_gen_op(OP_DMA_LOOP, dma_exec_addr + offset, ((residue - 1) & 0xff)); offset = offset + 2; lp0_start = offset; sys_write8(OP_DMA_LD, dma_exec_addr + offset); sys_write8(OP_DMA_ST, dma_exec_addr + offset + 1); offset = offset + 2; dma_pl330_gen_op(OP_DMA_LP_BK_JMP1, dma_exec_addr + offset, ((offset - lp0_start) & 0xff)); offset = offset + 2; } sys_write8(OP_DMA_END, dma_exec_addr + offset); sys_write8(OP_DMA_END, dma_exec_addr + offset + 1); sys_write8(OP_DMA_END, dma_exec_addr + offset + 2); sys_write8(OP_DMA_END, dma_exec_addr + offset + 3); return 0; } static int dma_pl330_start_dma_ch(const struct device *dev, uint32_t reg_base, int ch, int secure) { struct dma_pl330_dev_data *const dev_data = dev->data; struct dma_pl330_ch_config *channel_cfg; uint32_t count = 0U; uint32_t data; channel_cfg = &dev_data->channels[ch]; do { data = sys_read32(reg_base + DMAC_PL330_DBGSTATUS); if (++count > DMA_TIMEOUT_US) { return -ETIMEDOUT; } k_busy_wait(1); } while ((data & DATA_MASK) != 0); sys_write32(((ch << DMA_INTSR1_SHIFT) + (DMA_INTSR0 << DMA_INTSR0_SHIFT) + (secure << DMA_SECURE_SHIFT) + (ch << DMA_CH_SHIFT)), reg_base + DMAC_PL330_DBGINST0); sys_write32(channel_cfg->dma_exec_addr, reg_base + DMAC_PL330_DBGINST1); sys_write32(0x0, reg_base + DMAC_PL330_DBGCMD); count = 0U; do { data = sys_read32(reg_base + DMAC_PL330_DBGCMD); if (++count > DMA_TIMEOUT_US) { return -ETIMEDOUT; } k_busy_wait(1); } while ((data & DATA_MASK) != 0); return 0; } static int dma_pl330_wait(uint32_t reg_base, int ch) { int count = 0U; uint32_t cs0_reg = reg_base + DMAC_PL330_CS0; do { if (++count > DMA_TIMEOUT_US) { return -ETIMEDOUT; } k_busy_wait(1); } while (((sys_read32(cs0_reg + ch * 8)) & CH_STATUS_MASK) != 0); return 0; } static int dma_pl330_xfer(const struct device *dev, uint64_t dst, uint64_t src, uint32_t size, uint32_t channel, uint32_t *xfer_size) { struct dma_pl330_dev_data *const dev_data = dev->data; const struct dma_pl330_config *const dev_cfg = dev->config; struct dma_pl330_ch_config *channel_cfg; struct dma_pl330_ch_internal *ch_handle; int ret; uint32_t max_size; channel_cfg = &dev_data->channels[channel]; ch_handle = &channel_cfg->internal; dma_pl330_calc_burstsz_len(ch_handle, dst, src, size); max_size = GET_MAX_DMA_SIZE((1 << ch_handle->src_burst_sz), ch_handle->src_burst_len); if (size > max_size) { size = max_size; } dma_pl330_config_channel(channel_cfg, dst, src, size); #ifdef CONFIG_DMA_64BIT /* * Pl330 supports only 4GB boundary, but boundary region can be * configured. * Support added for 36bit address, lower 32bit address are configured * in pl330 registers and higher 4bit address are configured in * LS_ICFG_DMAC_AXI_ADD_CONTROL registers. * Each channel has 1 control register to configure higher 4bit address. */ dma_pl330_cfg_dmac_add_control(dev_cfg->control_reg_base, dst, src, channel); #endif ret = dma_pl330_setup_ch(dev, ch_handle, channel); if (ret) { LOG_ERR("Failed to setup channel for DMA PL330"); goto err; } ret = dma_pl330_start_dma_ch(dev, dev_cfg->reg_base, channel, ch_handle->nonsec_mode); if (ret) { LOG_ERR("Failed to start DMA PL330"); goto err; } ret = dma_pl330_wait(dev_cfg->reg_base, channel); if (ret) { LOG_ERR("Failed waiting to finish DMA PL330"); goto err; } *xfer_size = size; err: return ret; } #if CONFIG_DMA_64BIT static int dma_pl330_handle_boundary(const struct device *dev, uint64_t dst, uint64_t src, uint32_t channel, uint32_t size) { uint32_t dst_low = (uint32_t)dst; uint32_t src_low = (uint32_t)src; uint32_t transfer_size; int ret; /* * Pl330 has only 32bit registers and supports 4GB memory. * 4GB memory window can be configured using DMAC_AXI_ADD_CONTROL * registers. * Divide the DMA operation in 2 parts, 1st DMA from given address * to boundary (0xffffffff) and 2nd DMA on remaining size. */ if (size > (PL330_MAX_OFFSET - dst_low)) { transfer_size = PL330_MAX_OFFSET - dst_low; ret = dma_pl330_submit(dev, dst, src, channel, transfer_size); if (ret < 0) { return ret; } dst += transfer_size; src += transfer_size; size -= transfer_size; return dma_pl330_submit(dev, dst, src, channel, size); } if (size > (PL330_MAX_OFFSET - src_low)) { transfer_size = PL330_MAX_OFFSET - src_low; ret = dma_pl330_submit(dev, dst, src, channel, transfer_size); if (ret < 0) { return ret; } src += transfer_size; dst += transfer_size; size -= transfer_size; return dma_pl330_submit(dev, dst, src, channel, size); } return 0; } #endif static int dma_pl330_submit(const struct device *dev, uint64_t dst, uint64_t src, uint32_t channel, uint32_t size) { int ret; uint32_t xfer_size; #if CONFIG_DMA_64BIT /* * Pl330 has only 32bit registers and supports 4GB memory. * 4GB memory window can be configured using DMAC_AXI_ADD_CONTROL * registers. 32bit boundary (0xffffffff) should be check. * DMA on boundary condition is taken care in below function. */ if ((size > (PL330_MAX_OFFSET - (uint32_t)dst)) || (size > (PL330_MAX_OFFSET - (uint32_t)src))) { return dma_pl330_handle_boundary(dev, dst, src, channel, size); } #endif while (size) { xfer_size = 0; ret = dma_pl330_xfer(dev, dst, src, size, channel, &xfer_size); if (ret) { return ret; } if (xfer_size > size) { return -EFAULT; } size -= xfer_size; dst += xfer_size; src += xfer_size; } return 0; } static int dma_pl330_configure(const struct device *dev, uint32_t channel, struct dma_config *cfg) { struct dma_pl330_dev_data *const dev_data = dev->data; struct dma_pl330_ch_config *channel_cfg; struct dma_pl330_ch_internal *ch_handle; if (channel >= MAX_DMA_CHANNELS) { return -EINVAL; } channel_cfg = &dev_data->channels[channel]; k_mutex_lock(&channel_cfg->ch_mutex, K_FOREVER); if (channel_cfg->channel_active) { k_mutex_unlock(&channel_cfg->ch_mutex); return -EBUSY; } channel_cfg->channel_active = 1; k_mutex_unlock(&channel_cfg->ch_mutex); if (cfg->channel_direction != MEMORY_TO_MEMORY) { return -ENOTSUP; } ch_handle = &channel_cfg->internal; memset(ch_handle, 0, sizeof(*ch_handle)); channel_cfg->direction = cfg->channel_direction; channel_cfg->dst_addr_adj = cfg->head_block->dest_addr_adj; channel_cfg->src_addr = cfg->head_block->source_address; channel_cfg->dst_addr = cfg->head_block->dest_address; channel_cfg->trans_size = cfg->head_block->block_size; channel_cfg->dma_callback = cfg->dma_callback; channel_cfg->user_data = cfg->user_data; if (cfg->head_block->source_addr_adj == DMA_ADDR_ADJ_INCREMENT || cfg->head_block->source_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) { channel_cfg->src_addr_adj = cfg->head_block->source_addr_adj; } else { return -ENOTSUP; } if (cfg->head_block->dest_addr_adj == DMA_ADDR_ADJ_INCREMENT || cfg->head_block->dest_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) { channel_cfg->dst_addr_adj = cfg->head_block->dest_addr_adj; } else { return -ENOTSUP; } return 0; } static int dma_pl330_transfer_start(const struct device *dev, uint32_t channel) { struct dma_pl330_dev_data *const dev_data = dev->data; struct dma_pl330_ch_config *channel_cfg; int ret; if (channel >= MAX_DMA_CHANNELS) { return -EINVAL; } channel_cfg = &dev_data->channels[channel]; ret = dma_pl330_submit(dev, channel_cfg->dst_addr, channel_cfg->src_addr, channel, channel_cfg->trans_size); k_mutex_lock(&channel_cfg->ch_mutex, K_FOREVER); channel_cfg->channel_active = 0; k_mutex_unlock(&channel_cfg->ch_mutex); return ret; } static int dma_pl330_transfer_stop(const struct device *dev, uint32_t channel) { if (channel >= MAX_DMA_CHANNELS) { return -EINVAL; } /* Nothing as of now */ return 0; } static int dma_pl330_initialize(const struct device *dev) { const struct dma_pl330_config *const dev_cfg = dev->config; struct dma_pl330_dev_data *const dev_data = dev->data; struct dma_pl330_ch_config *channel_cfg; for (int channel = 0; channel < MAX_DMA_CHANNELS; channel++) { channel_cfg = &dev_data->channels[channel]; channel_cfg->dma_exec_addr = dev_cfg->mcode_base + (channel * MICROCODE_SIZE_MAX); k_mutex_init(&channel_cfg->ch_mutex); } LOG_INF("Device %s initialized", dev->name); return 0; } static const struct dma_driver_api pl330_driver_api = { .config = dma_pl330_configure, .start = dma_pl330_transfer_start, .stop = dma_pl330_transfer_stop, }; static const struct dma_pl330_config pl330_config = { .reg_base = DT_INST_REG_ADDR(0), #ifdef CONFIG_DMA_64BIT .control_reg_base = DT_INST_REG_ADDR_BY_NAME(0, control_regs), #endif .mcode_base = DT_INST_PROP_BY_IDX(0, microcode, 0), }; static struct dma_pl330_dev_data pl330_data; DEVICE_DT_INST_DEFINE(0, &dma_pl330_initialize, NULL, &pl330_data, &pl330_config, POST_KERNEL, CONFIG_DMA_INIT_PRIORITY, &pl330_driver_api); ```
/content/code_sandbox/drivers/dma/dma_pl330.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,655
```c /* * */ #include "dma_nxp_edma.h" /* TODO list: * 1) Support for requesting a specific channel. * 2) Support for checking if DMA transfer is pending when attempting config. (?) * 3) Support for error interrupt. * 4) Support for error if buffer overflow/underrun. * 5) Ideally, HALFMAJOR should be set on a per-channel basis not through a * config. If not possible, this should be done through a DTS property. Also, * maybe do the same for INTMAJOR IRQ. */ static void edma_isr(const void *parameter) { const struct edma_config *cfg; struct edma_data *data; struct edma_channel *chan; int ret; uint32_t update_size; chan = (struct edma_channel *)parameter; cfg = chan->dev->config; data = chan->dev->data; if (!EDMA_ChannelRegRead(data->hal_cfg, chan->id, EDMA_TCD_CH_INT)) { /* skip, interrupt was probably triggered by another channel */ return; } /* clear interrupt */ EDMA_ChannelRegUpdate(data->hal_cfg, chan->id, EDMA_TCD_CH_INT, EDMA_TCD_CH_INT_MASK, 0); if (chan->cyclic_buffer) { update_size = chan->bsize; if (IS_ENABLED(CONFIG_DMA_NXP_EDMA_ENABLE_HALFMAJOR_IRQ)) { update_size = chan->bsize / 2; } else { update_size = chan->bsize; } /* TODO: add support for error handling here */ ret = EDMA_CHAN_PRODUCE_CONSUME_A(chan, update_size); if (ret < 0) { LOG_ERR("chan %d buffer overflow/underrun", chan->id); } } /* TODO: are there any sanity checks we have to perform before invoking * the registered callback? */ if (chan->cb) { chan->cb(chan->dev, chan->arg, chan->id, DMA_STATUS_COMPLETE); } } static struct edma_channel *lookup_channel(const struct device *dev, uint32_t chan_id) { struct edma_data *data; const struct edma_config *cfg; int i; data = dev->data; cfg = dev->config; /* optimization: if dma-channels property is present then * the channel data associated with the passed channel ID * can be found at index chan_id in the array of channels. */ if (cfg->contiguous_channels) { /* check for index out of bounds */ if (chan_id >= data->ctx.dma_channels) { return NULL; } return &data->channels[chan_id]; } /* channels are passed through the valid-channels property. * As such, since some channels may be missing we need to * look through the entire channels array for an ID match. */ for (i = 0; i < data->ctx.dma_channels; i++) { if (data->channels[i].id == chan_id) { return &data->channels[i]; } } return NULL; } static int edma_config(const struct device *dev, uint32_t chan_id, struct dma_config *dma_cfg) { struct edma_data *data; const struct edma_config *cfg; struct edma_channel *chan; uint32_t transfer_type; int ret; data = dev->data; cfg = dev->config; if (!dma_cfg->head_block) { LOG_ERR("head block shouldn't be NULL"); return -EINVAL; } /* validate source data size (SSIZE) */ if (!EDMA_TransferWidthIsValid(data->hal_cfg, dma_cfg->source_data_size)) { LOG_ERR("invalid source data size: %d", dma_cfg->source_data_size); return -EINVAL; } /* validate destination data size (DSIZE) */ if (!EDMA_TransferWidthIsValid(data->hal_cfg, dma_cfg->dest_data_size)) { LOG_ERR("invalid destination data size: %d", dma_cfg->dest_data_size); return -EINVAL; } /* validate configured alignment */ if (!EDMA_TransferWidthIsValid(data->hal_cfg, CONFIG_DMA_NXP_EDMA_ALIGN)) { LOG_ERR("configured alignment %d is invalid", CONFIG_DMA_NXP_EDMA_ALIGN); return -EINVAL; } /* Scatter-Gather configurations currently not supported */ if (dma_cfg->block_count != 1) { LOG_ERR("number of blocks %d not supported", dma_cfg->block_count); return -ENOTSUP; } /* source address shouldn't be NULL */ if (!dma_cfg->head_block->source_address) { LOG_ERR("source address cannot be NULL"); return -EINVAL; } /* destination address shouldn't be NULL */ if (!dma_cfg->head_block->dest_address) { LOG_ERR("destination address cannot be NULL"); return -EINVAL; } /* check source address's (SADDR) alignment with respect to the data size (SSIZE) * * Failing to meet this condition will lead to the assertion of the SAE * bit (see CHn_ES register). * * TODO: this will also restrict scenarios such as the following: * SADDR is 8B aligned and SSIZE is 16B. I've tested this * scenario and seems to raise no hardware errors (I'm assuming * because this doesn't break the 8B boundary of the 64-bit system * I tested it on). Is there a need to allow such a scenario? */ if (dma_cfg->head_block->source_address % dma_cfg->source_data_size) { LOG_ERR("source address 0x%x alignment doesn't match data size %d", dma_cfg->head_block->source_address, dma_cfg->source_data_size); return -EINVAL; } /* check destination address's (DADDR) alignment with respect to the data size (DSIZE) * Failing to meet this condition will lead to the assertion of the DAE * bit (see CHn_ES register). */ if (dma_cfg->head_block->dest_address % dma_cfg->dest_data_size) { LOG_ERR("destination address 0x%x alignment doesn't match data size %d", dma_cfg->head_block->dest_address, dma_cfg->dest_data_size); return -EINVAL; } /* source burst length should match destination burst length. * This is because the burst length is the equivalent of NBYTES which * is used for both the destination and the source. */ if (dma_cfg->source_burst_length != dma_cfg->dest_burst_length) { LOG_ERR("source burst length %d doesn't match destination burst length %d", dma_cfg->source_burst_length, dma_cfg->dest_burst_length); return -EINVAL; } /* total number of bytes should be a multiple of NBYTES. * * This is needed because the EDMA engine performs transfers based * on CITER (integer value) and NBYTES, thus it has no knowledge of * the total transfer size. If the total transfer size is not a * multiple of NBYTES then we'll end up with copying a wrong number * of bytes (CITER = TOTAL_SIZE / BITER). This, of course, raises * no error in the hardware but it's still wrong. */ if (dma_cfg->head_block->block_size % dma_cfg->source_burst_length) { LOG_ERR("block size %d should be a multiple of NBYTES %d", dma_cfg->head_block->block_size, dma_cfg->source_burst_length); return -EINVAL; } /* check if NBYTES is a multiple of MAX(SSIZE, DSIZE). * * This stems from the fact that NBYTES needs to be a multiple * of SSIZE AND DSIZE. If NBYTES is a multiple of MAX(SSIZE, DSIZE) * then it will for sure satisfy the aforementioned condition (since * SSIZE and DSIZE are powers of 2). * * Failing to meet this condition will lead to the assertion of the * NCE bit (see CHn_ES register). */ if (dma_cfg->source_burst_length % MAX(dma_cfg->source_data_size, dma_cfg->dest_data_size)) { LOG_ERR("NBYTES %d should be a multiple of MAX(SSIZE(%d), DSIZE(%d))", dma_cfg->source_burst_length, dma_cfg->source_data_size, dma_cfg->dest_data_size); return -EINVAL; } /* fetch channel data */ chan = lookup_channel(dev, chan_id); if (!chan) { LOG_ERR("channel ID %u is not valid", chan_id); return -EINVAL; } /* save the block size for later usage in edma_reload */ chan->bsize = dma_cfg->head_block->block_size; if (dma_cfg->cyclic) { chan->cyclic_buffer = true; chan->stat.read_position = 0; chan->stat.write_position = 0; /* ASSUMPTION: for CONSUMER-type channels, the buffer from * which the engine consumes should be full, while in the * case of PRODUCER-type channels it should be empty. */ switch (dma_cfg->channel_direction) { case MEMORY_TO_PERIPHERAL: chan->type = CHAN_TYPE_CONSUMER; chan->stat.free = 0; chan->stat.pending_length = chan->bsize; break; case PERIPHERAL_TO_MEMORY: chan->type = CHAN_TYPE_PRODUCER; chan->stat.pending_length = 0; chan->stat.free = chan->bsize; break; default: LOG_ERR("unsupported transfer dir %d for cyclic mode", dma_cfg->channel_direction); return -ENOTSUP; } } else { chan->cyclic_buffer = false; } /* change channel's state to CONFIGURED */ ret = channel_change_state(chan, CHAN_STATE_CONFIGURED); if (ret < 0) { LOG_ERR("failed to change channel %d state to CONFIGURED", chan_id); return ret; } ret = get_transfer_type(dma_cfg->channel_direction, &transfer_type); if (ret < 0) { return ret; } chan->cb = dma_cfg->dma_callback; chan->arg = dma_cfg->user_data; /* warning: this sets SOFF and DOFF to SSIZE and DSIZE which are POSITIVE. */ ret = EDMA_ConfigureTransfer(data->hal_cfg, chan_id, dma_cfg->head_block->source_address, dma_cfg->head_block->dest_address, dma_cfg->source_data_size, dma_cfg->dest_data_size, dma_cfg->source_burst_length, dma_cfg->head_block->block_size, transfer_type); if (ret < 0) { LOG_ERR("failed to configure transfer"); return to_std_error(ret); } /* TODO: channel MUX should be forced to 0 based on the previous state */ if (EDMA_HAS_MUX(data->hal_cfg)) { ret = EDMA_SetChannelMux(data->hal_cfg, chan_id, dma_cfg->dma_slot); if (ret < 0) { LOG_ERR("failed to set channel MUX"); return to_std_error(ret); } } /* set SLAST and DLAST */ ret = set_slast_dlast(dma_cfg, transfer_type, data, chan_id); if (ret < 0) { return ret; } /* allow interrupting the CPU when a major cycle is completed. * * interesting note: only 1 major loop is performed per slave peripheral * DMA request. For instance, if block_size = 768 and burst_size = 192 * we're going to get 4 transfers of 192 bytes. Each of these transfers * translates to a DMA request made by the slave peripheral. */ EDMA_ChannelRegUpdate(data->hal_cfg, chan_id, EDMA_TCD_CSR, EDMA_TCD_CSR_INTMAJOR_MASK, 0); if (IS_ENABLED(CONFIG_DMA_NXP_EDMA_ENABLE_HALFMAJOR_IRQ)) { /* if enabled through the above configuration, also * allow the CPU to be interrupted when CITER = BITER / 2. */ EDMA_ChannelRegUpdate(data->hal_cfg, chan_id, EDMA_TCD_CSR, EDMA_TCD_CSR_INTHALF_MASK, 0); } /* enable channel interrupt */ irq_enable(chan->irq); /* dump register status - for debugging purposes */ edma_dump_channel_registers(data, chan_id); return 0; } static int edma_get_status(const struct device *dev, uint32_t chan_id, struct dma_status *stat) { struct edma_data *data; struct edma_channel *chan; uint32_t citer, biter, done; unsigned int key; data = dev->data; /* fetch channel data */ chan = lookup_channel(dev, chan_id); if (!chan) { LOG_ERR("channel ID %u is not valid", chan_id); return -EINVAL; } if (chan->cyclic_buffer) { key = irq_lock(); stat->free = chan->stat.free; stat->pending_length = chan->stat.pending_length; irq_unlock(key); } else { /* note: no locking required here. The DMA interrupts * have no effect over CITER and BITER. */ citer = EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CITER); biter = EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_BITER); done = EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR) & EDMA_TCD_CH_CSR_DONE_MASK; if (done) { stat->free = chan->bsize; stat->pending_length = 0; } else { stat->free = (biter - citer) * (chan->bsize / biter); stat->pending_length = chan->bsize - stat->free; } } LOG_DBG("free: %d, pending: %d", stat->free, stat->pending_length); return 0; } static int edma_suspend(const struct device *dev, uint32_t chan_id) { struct edma_data *data; const struct edma_config *cfg; struct edma_channel *chan; int ret; data = dev->data; cfg = dev->config; /* fetch channel data */ chan = lookup_channel(dev, chan_id); if (!chan) { LOG_ERR("channel ID %u is not valid", chan_id); return -EINVAL; } edma_dump_channel_registers(data, chan_id); /* change channel's state to SUSPENDED */ ret = channel_change_state(chan, CHAN_STATE_SUSPENDED); if (ret < 0) { LOG_ERR("failed to change channel %d state to SUSPENDED", chan_id); return ret; } LOG_DBG("suspending channel %u", chan_id); /* disable HW requests */ EDMA_ChannelRegUpdate(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR, 0, EDMA_TCD_CH_CSR_ERQ_MASK); return 0; } static int edma_stop(const struct device *dev, uint32_t chan_id) { struct edma_data *data; const struct edma_config *cfg; struct edma_channel *chan; enum channel_state prev_state; int ret; data = dev->data; cfg = dev->config; /* fetch channel data */ chan = lookup_channel(dev, chan_id); if (!chan) { LOG_ERR("channel ID %u is not valid", chan_id); return -EINVAL; } prev_state = chan->state; /* change channel's state to STOPPED */ ret = channel_change_state(chan, CHAN_STATE_STOPPED); if (ret < 0) { LOG_ERR("failed to change channel %d state to STOPPED", chan_id); return ret; } LOG_DBG("stopping channel %u", chan_id); if (prev_state == CHAN_STATE_SUSPENDED) { /* if the channel has been suspended then there's * no point in disabling the HW requests again. Just * jump to the channel release operation. */ goto out_release_channel; } /* disable HW requests */ EDMA_ChannelRegUpdate(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR, 0, EDMA_TCD_CH_CSR_ERQ_MASK); out_release_channel: /* clear the channel MUX so that it can used by a different peripheral. * * note: because the channel is released during dma_stop() that means * dma_start() can no longer be immediately called. This is because * one needs to re-configure the channel MUX which can only be done * through dma_config(). As such, if one intends to reuse the current * configuration then please call dma_suspend() instead of dma_stop(). */ if (EDMA_HAS_MUX(data->hal_cfg)) { ret = EDMA_SetChannelMux(data->hal_cfg, chan_id, 0); if (ret < 0) { LOG_ERR("failed to set channel MUX"); return to_std_error(ret); } } edma_dump_channel_registers(data, chan_id); return 0; } static int edma_start(const struct device *dev, uint32_t chan_id) { struct edma_data *data; const struct edma_config *cfg; struct edma_channel *chan; int ret; data = dev->data; cfg = dev->config; /* fetch channel data */ chan = lookup_channel(dev, chan_id); if (!chan) { LOG_ERR("channel ID %u is not valid", chan_id); return -EINVAL; } /* change channel's state to STARTED */ ret = channel_change_state(chan, CHAN_STATE_STARTED); if (ret < 0) { LOG_ERR("failed to change channel %d state to STARTED", chan_id); return ret; } LOG_DBG("starting channel %u", chan_id); /* enable HW requests */ EDMA_ChannelRegUpdate(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR, EDMA_TCD_CH_CSR_ERQ_MASK, 0); return 0; } static int edma_reload(const struct device *dev, uint32_t chan_id, uint32_t src, uint32_t dst, size_t size) { struct edma_data *data; struct edma_channel *chan; int ret; unsigned int key; data = dev->data; /* fetch channel data */ chan = lookup_channel(dev, chan_id); if (!chan) { LOG_ERR("channel ID %u is not valid", chan_id); return -EINVAL; } /* channel needs to be started to allow reloading */ if (chan->state != CHAN_STATE_STARTED) { LOG_ERR("reload is only supported on started channels"); return -EINVAL; } if (chan->cyclic_buffer) { key = irq_lock(); ret = EDMA_CHAN_PRODUCE_CONSUME_B(chan, size); irq_unlock(key); if (ret < 0) { LOG_ERR("chan %d buffer overflow/underrun", chan_id); return ret; } } return 0; } static int edma_get_attribute(const struct device *dev, uint32_t type, uint32_t *val) { switch (type) { case DMA_ATTR_BUFFER_SIZE_ALIGNMENT: case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT: *val = CONFIG_DMA_NXP_EDMA_ALIGN; break; case DMA_ATTR_MAX_BLOCK_COUNT: /* this is restricted to 1 because SG configurations are not supported */ *val = 1; break; default: LOG_ERR("invalid attribute type: %d", type); return -EINVAL; } return 0; } static bool edma_channel_filter(const struct device *dev, int chan_id, void *param) { int *requested_channel; if (!param) { return false; } requested_channel = param; if (*requested_channel == chan_id && lookup_channel(dev, chan_id)) { return true; } return false; } static const struct dma_driver_api edma_api = { .reload = edma_reload, .config = edma_config, .start = edma_start, .stop = edma_stop, .suspend = edma_suspend, .resume = edma_start, .get_status = edma_get_status, .get_attribute = edma_get_attribute, .chan_filter = edma_channel_filter, }; static int edma_init(const struct device *dev) { const struct edma_config *cfg; struct edma_data *data; mm_reg_t regmap; data = dev->data; cfg = dev->config; /* map instance MMIO */ device_map(&regmap, cfg->regmap_phys, cfg->regmap_size, K_MEM_CACHE_NONE); /* overwrite physical address set in the HAL configuration. * We can down-cast the virtual address to a 32-bit address because * we know we're working with 32-bit addresses only. */ data->hal_cfg->regmap = (uint32_t)POINTER_TO_UINT(regmap); cfg->irq_config(); /* dma_request_channel() uses this variable to keep track of the * available channels. As such, it needs to be initialized with NULL * which signifies that all channels are initially available. */ data->channel_flags = ATOMIC_INIT(0); data->ctx.atomic = &data->channel_flags; data->ctx.dma_channels = data->hal_cfg->channels; return 0; } /* a few comments about the BUILD_ASSERT statements: * 1) dma-channels and valid-channels should be mutually exclusive. * This means that you specify the one or the other. There's no real * need to have both of them. * 2) Number of channels should match the number of interrupts for * said channels (TODO: what about error interrupts?) * 3) The channel-mux property shouldn't be specified unless * the eDMA is MUX-capable (signaled via the EDMA_HAS_CHAN_MUX * configuration). */ #define EDMA_INIT(inst) \ \ BUILD_ASSERT(!DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), dma_channels) || \ !DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), valid_channels), \ "dma_channels and valid_channels are mutually exclusive"); \ \ BUILD_ASSERT(DT_INST_PROP_OR(inst, dma_channels, 0) == \ DT_NUM_IRQS(DT_INST(inst, DT_DRV_COMPAT)) || \ DT_INST_PROP_LEN_OR(inst, valid_channels, 0) == \ DT_NUM_IRQS(DT_INST(inst, DT_DRV_COMPAT)), \ "number of interrupts needs to match number of channels"); \ \ BUILD_ASSERT(DT_PROP_OR(DT_INST(inst, DT_DRV_COMPAT), hal_cfg_index, 0) < \ ARRAY_SIZE(s_edmaConfigs), \ "HAL configuration index out of bounds"); \ \ static struct edma_channel channels_##inst[] = EDMA_CHANNEL_ARRAY_GET(inst); \ \ static void interrupt_config_function_##inst(void) \ { \ EDMA_CONNECT_INTERRUPTS(inst); \ } \ \ static struct edma_config edma_config_##inst = { \ .regmap_phys = DT_INST_REG_ADDR(inst), \ .regmap_size = DT_INST_REG_SIZE(inst), \ .irq_config = interrupt_config_function_##inst, \ .contiguous_channels = EDMA_CHANS_ARE_CONTIGUOUS(inst), \ }; \ \ static struct edma_data edma_data_##inst = { \ .channels = channels_##inst, \ .ctx.magic = DMA_MAGIC, \ .hal_cfg = &EDMA_HAL_CFG_GET(inst), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, &edma_init, NULL, \ &edma_data_##inst, &edma_config_##inst, \ PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \ &edma_api); \ DT_INST_FOREACH_STATUS_OKAY(EDMA_INIT); ```
/content/code_sandbox/drivers/dma/dma_nxp_edma.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,412
```c /* * */ #include <zephyr/device.h> #include <zephyr/drivers/dma.h> #include <zephyr/logging/log.h> #include <zephyr/pm/policy.h> #include <zephyr/drivers/dma/dma_mcux_smartdma.h> #include <soc.h> #include <fsl_smartdma.h> #include <fsl_inputmux.h> #include <fsl_power.h> #define DT_DRV_COMPAT nxp_smartdma LOG_MODULE_REGISTER(dma_mcux_smartdma, CONFIG_DMA_LOG_LEVEL); /* SMARTDMA peripheral registers, taken from MCUX driver implementation*/ struct smartdma_periph { volatile uint32_t BOOT; volatile uint32_t CTRL; volatile uint32_t PC; volatile uint32_t SP; volatile uint32_t BREAK_ADDR; volatile uint32_t BREAK_VECT; volatile uint32_t EMER_VECT; volatile uint32_t EMER_SEL; volatile uint32_t ARM2SMARTDMA; volatile uint32_t SMARTDMA2ARM; volatile uint32_t PENDTRAP; }; struct dma_mcux_smartdma_config { struct smartdma_periph *base; void (*irq_config_func)(const struct device *dev); void (**smartdma_progs)(void); }; struct dma_mcux_smartdma_data { uint32_t smartdma_stack[32]; /* Stack for SMARTDMA */ /* Installed DMA callback and user data */ dma_callback_t callback; void *user_data; }; /* Seems to be written to smartDMA control register when it is configured */ #define SMARTDMA_MAGIC 0xC0DE0000U /* These bits are set when the SMARTDMA boots, cleared to reset it */ #define SMARTDMA_BOOT 0x11 static inline bool dma_mcux_smartdma_prog_is_mipi(uint32_t prog) { return ((prog == kSMARTDMA_MIPI_RGB565_DMA) || (prog == kSMARTDMA_MIPI_RGB888_DMA) || (prog == kSMARTDMA_MIPI_RGB565_R180_DMA) || (prog == kSMARTDMA_MIPI_RGB888_R180_DMA)); } /* Configure a channel */ static int dma_mcux_smartdma_configure(const struct device *dev, uint32_t channel, struct dma_config *config) { const struct dma_mcux_smartdma_config *dev_config = dev->config; struct dma_mcux_smartdma_data *data = dev->data; uint32_t prog_idx; bool swap_pixels = false; /* SMARTDMA does not have channels */ ARG_UNUSED(channel); data->callback = config->dma_callback; data->user_data = config->user_data; /* Reset smartDMA */ SMARTDMA_Reset(); /* * The dma_slot parameter is used to determine which SMARTDMA program * to run. First, convert the Zephyr define to a HAL enum. */ switch (config->dma_slot) { case DMA_SMARTDMA_MIPI_RGB565_DMA: prog_idx = kSMARTDMA_MIPI_RGB565_DMA; break; case DMA_SMARTDMA_MIPI_RGB888_DMA: prog_idx = kSMARTDMA_MIPI_RGB888_DMA; break; case DMA_SMARTDMA_MIPI_RGB565_180: prog_idx = kSMARTDMA_MIPI_RGB565_R180_DMA; break; case DMA_SMARTDMA_MIPI_RGB888_180: prog_idx = kSMARTDMA_MIPI_RGB888_R180_DMA; break; case DMA_SMARTDMA_MIPI_RGB565_DMA_SWAP: swap_pixels = true; prog_idx = kSMARTDMA_MIPI_RGB565_DMA; break; case DMA_SMARTDMA_MIPI_RGB888_DMA_SWAP: swap_pixels = true; prog_idx = kSMARTDMA_MIPI_RGB888_DMA; break; case DMA_SMARTDMA_MIPI_RGB565_180_SWAP: swap_pixels = true; prog_idx = kSMARTDMA_MIPI_RGB565_R180_DMA; break; case DMA_SMARTDMA_MIPI_RGB888_180_SWAP: swap_pixels = true; prog_idx = kSMARTDMA_MIPI_RGB888_R180_DMA; break; default: prog_idx = config->dma_slot; break; } if (dma_mcux_smartdma_prog_is_mipi(prog_idx)) { smartdma_dsi_param_t param = {.disablePixelByteSwap = (swap_pixels == false)}; if (config->block_count != 1) { return -ENOTSUP; } /* Setup SMARTDMA */ param.p_buffer = (uint8_t *)config->head_block->source_address; param.buffersize = config->head_block->block_size; param.smartdma_stack = data->smartdma_stack; /* Save configuration to SMARTDMA */ dev_config->base->ARM2SMARTDMA = (uint32_t)(&param); } else { /* For other cases, we simply pass the entire DMA config * struct to the SMARTDMA. The user's application could either * populate this structure with data, or choose to write * different configuration data to the SMARTDMA in their * application */ dev_config->base->ARM2SMARTDMA = ((uint32_t)config); } /* Save program */ dev_config->base->BOOT = (uint32_t)dev_config->smartdma_progs[prog_idx]; LOG_DBG("Boot address set to 0x%X", dev_config->base->BOOT); return 0; } static int dma_mcux_smartdma_start(const struct device *dev, uint32_t channel) { const struct dma_mcux_smartdma_config *config = dev->config; /* Block PM transition until DMA completes */ pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); /* Kick off SMARTDMA */ config->base->CTRL = SMARTDMA_MAGIC | SMARTDMA_BOOT; return 0; } static int dma_mcux_smartdma_stop(const struct device *dev, uint32_t channel) { ARG_UNUSED(dev); ARG_UNUSED(channel); /* Stop DMA */ SMARTDMA_Reset(); /* Release PM lock */ pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); return 0; } static int dma_mcux_smartdma_init(const struct device *dev) { const struct dma_mcux_smartdma_config *config = dev->config; /* * Initialize the SMARTDMA with firmware. The default firmware * from MCUX SDK is a display firmware, which has functions * implemented above in the dma configuration function. The * user can install another firmware using `dma_smartdma_install_fw` */ SMARTDMA_Init((uint32_t)config->smartdma_progs, s_smartdmaDisplayFirmware, SMARTDMA_DISPLAY_FIRMWARE_SIZE); config->irq_config_func(dev); return 0; } static void dma_mcux_smartdma_irq(const struct device *dev) { const struct dma_mcux_smartdma_data *data = dev->data; if (data->callback) { data->callback(dev, data->user_data, 0, 0); } /* Release PM lock */ pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); } /** * @brief install SMARTDMA firmware * * Install a custom firmware for the smartDMA. This function allows the user * to install a custom firmware into the smartDMA, which implements * different API functions than the standard MCUX SDK firmware. * @param dev: smartDMA device * @param firmware: address of buffer containing smartDMA firmware * @param len: length of firmware buffer */ void dma_smartdma_install_fw(const struct device *dev, uint8_t *firmware, uint32_t len) { const struct dma_mcux_smartdma_config *config = dev->config; SMARTDMA_InstallFirmware((uint32_t)config->smartdma_progs, firmware, len); } static const struct dma_driver_api dma_mcux_smartdma_api = { .config = dma_mcux_smartdma_configure, .start = dma_mcux_smartdma_start, .stop = dma_mcux_smartdma_stop, }; #define SMARTDMA_INIT(n) \ static void dma_mcux_smartdma_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ dma_mcux_smartdma_irq, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } \ \ static const struct dma_mcux_smartdma_config smartdma_##n##_config = { \ .base = (struct smartdma_periph *)DT_INST_REG_ADDR(n), \ .smartdma_progs = (void (**)(void))DT_INST_PROP(n, program_mem),\ .irq_config_func = dma_mcux_smartdma_config_func_##n, \ }; \ static struct dma_mcux_smartdma_data smartdma_##n##_data; \ \ DEVICE_DT_INST_DEFINE(n, \ &dma_mcux_smartdma_init, \ NULL, \ &smartdma_##n##_data, &smartdma_##n##_config, \ POST_KERNEL, CONFIG_DMA_INIT_PRIORITY, \ &dma_mcux_smartdma_api); DT_INST_FOREACH_STATUS_OKAY(SMARTDMA_INIT) ```
/content/code_sandbox/drivers/dma/dma_mcux_smartdma.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,011
```unknown # DMA configuration options # # DMA options # menuconfig DMA bool "Direct Memory Access (DMA) drivers" if DMA config DMA_64BIT bool "DMA 64 bit address support" help When this option is true, 64 bit source and dest DMA addresses are supported. config DMA_INIT_PRIORITY int "DMA init priority" default KERNEL_INIT_PRIORITY_DEFAULT help DMA driver device initialization priority. module = DMA module-str = dma source "subsys/logging/Kconfig.template.log_config" source "drivers/dma/Kconfig.stm32" source "drivers/dma/Kconfig.sam_xdmac" source "drivers/dma/Kconfig.dw" source "drivers/dma/Kconfig.nios2_msgdma" source "drivers/dma/Kconfig.sam0" source "drivers/dma/Kconfig.mcux_edma" source "drivers/dma/Kconfig.mcux_lpc" source "drivers/dma/Kconfig.dma_pl330" source "drivers/dma/Kconfig.iproc_pax" source "drivers/dma/Kconfig.intel_adsp_gpdma" source "drivers/dma/Kconfig.intel_adsp_hda" source "drivers/dma/Kconfig.gd32" source "drivers/dma/Kconfig.esp32" source "drivers/dma/Kconfig.xec" source "drivers/dma/Kconfig.xmc4xxx" source "drivers/dma/Kconfig.rpi_pico" source "drivers/dma/Kconfig.intel_lpss" source "drivers/dma/Kconfig.mcux_pxp" source "drivers/dma/Kconfig.max32" source "drivers/dma/Kconfig.mcux_smartdma" source "drivers/dma/Kconfig.andes_atcdmac300" source "drivers/dma/Kconfig.sedi" source "drivers/dma/Kconfig.smartbond" source "drivers/dma/Kconfig.nxp_sof_host_dma" source "drivers/dma/Kconfig.emul" source "drivers/dma/Kconfig.nxp_edma" source "drivers/dma/Kconfig.dw_axi_dmac" endif # DMA ```
/content/code_sandbox/drivers/dma/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
432
```c /* * */ #define DT_DRV_COMPAT snps_designware_dma_axi #include <zephyr/device.h> #include <zephyr/drivers/dma.h> #include <zephyr/logging/log.h> #include <zephyr/drivers/reset.h> #include <zephyr/cache.h> LOG_MODULE_REGISTER(dma_designware_axi, CONFIG_DMA_LOG_LEVEL); #define DEV_CFG(_dev) ((const struct dma_dw_axi_dev_cfg *)(_dev)->config) #define DEV_DATA(_dev) ((struct dma_dw_axi_dev_data *const)(_dev)->data) /* mask for block transfer size */ #define BLOCK_TS_MASK GENMASK(21, 0) /* blen : number of data units * blen will always be in power of two * * when blen is 1 then set msize to zero otherwise find most significant bit set * and subtract two (as IP doesn't support number of data items 2) */ #define DMA_DW_AXI_GET_MSIZE(blen) ((blen == 1) ? (0U) : (find_msb_set(blen) - 2U)) /* Common_Registers_Address_Block */ #define DMA_DW_AXI_IDREG 0x0 #define DMA_DW_AXI_COMPVERREG 0x08 #define DMA_DW_AXI_CFGREG 0x10 #define DMA_DW_AXI_CHENREG 0x18 #define DMA_DW_AXI_INTSTATUSREG 0x30 #define DMA_DW_AXI_COMMONREG_INTCLEARREG 0x38 #define DMA_DW_AXI_COMMONREG_INTSTATUS_ENABLEREG 0x40 #define DMA_DW_AXI_COMMONREG_INTSIGNAL_ENABLEREG 0x48 #define DMA_DW_AXI_COMMONREG_INTSTATUSREG 0x50 #define DMA_DW_AXI_RESETREG 0x58 #define DMA_DW_AXI_LOWPOWER_CFGREG 0x60 /* Channel enable by setting ch_en and ch_en_we */ #define CH_EN(chan) (BIT64(8 + chan) | BIT64(chan)) /* Channel enable by setting ch_susp and ch_susp_we */ #define CH_SUSP(chan) (BIT64(24 + chan) | BIT64(16 + chan)) /* Channel enable by setting ch_abort and ch_abort_we */ #define CH_ABORT(chan) (BIT64(40 + chan) | BIT64(32 + chan)) /* channel susp/resume write enable pos */ #define CH_RESUME_WE(chan) (BIT64(24 + chan)) /* channel resume bit pos */ #define CH_RESUME(chan) (BIT64(16 + chan)) #define DMA_DW_AXI_CHAN_OFFSET(chan) (0x100 * chan) /* source address register for a channel */ #define DMA_DW_AXI_CH_SAR(chan) (0x100 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* destination address register for a channel */ #define DMA_DW_AXI_CH_DAR(chan) (0x108 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* block transfer size register for a channel */ #define DMA_DW_AXI_CH_BLOCK_TS(chan) (0x110 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* channel control register */ #define DMA_DW_AXI_CH_CTL(chan) (0x118 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* channel configuration register */ #define DMA_DW_AXI_CH_CFG(chan) (0x120 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* linked list pointer register */ #define DMA_DW_AXI_CH_LLP(chan) (0x128 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* channel status register */ #define DMA_DW_AXI_CH_STATUSREG(chan) (0x130 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* channel software handshake source register */ #define DMA_DW_AXI_CH_SWHSSRCREG(chan) (0x138 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* channel software handshake destination register */ #define DMA_DW_AXI_CH_SWHSDSTREG(chan) (0x140 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* channel block transfer resume request register */ #define DMA_DW_AXI_CH_BLK_TFR_RESUMEREQREG(chan) (0x148 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* channel AXI ID rester */ #define DMA_DW_AXI_CH_AXI_IDREG(chan) (0x150 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* channel AXI QOS register */ #define DMA_DW_AXI_CH_AXI_QOSREG(chan) (0x158 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* channel interrupt status enable register */ #define DMA_DW_AXI_CH_INTSTATUS_ENABLEREG(chan) (0x180 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* channel interrupt status register */ #define DMA_DW_AXI_CH_INTSTATUS(chan) (0x188 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* channel interrupt signal enable register */ #define DMA_DW_AXI_CH_INTSIGNAL_ENABLEREG(chan) (0x190 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* channel interrupt clear register */ #define DMA_DW_AXI_CH_INTCLEARREG(chan) (0x198 + DMA_DW_AXI_CHAN_OFFSET(chan)) /* bitfield configuration for multi-block transfer */ #define DMA_DW_AXI_CFG_SRC_MULTBLK_TYPE(x) FIELD_PREP(GENMASK64(1, 0), x) #define DMA_DW_AXI_CFG_DST_MULTBLK_TYPE(x) FIELD_PREP(GENMASK64(3, 2), x) /* bitfield configuration to assign handshaking interface to source and destination */ #define DMA_DW_AXI_CFG_SRC_PER(x) FIELD_PREP(GENMASK64(9, 4), x) #define DMA_DW_AXI_CFG_DST_PER(x) FIELD_PREP(GENMASK64(16, 11), x) /* bitfield configuration for transfer type and flow controller */ #define DMA_DW_AXI_CFG_TT_FC(x) FIELD_PREP(GENMASK64(34, 32), x) #define DMA_DW_AXI_CFG_HW_HS_SRC_BIT_POS 35 #define DMA_DW_AXI_CFG_HW_HS_DST_BIT_POS 36 #define DMA_DW_AXI_CFG_PRIORITY(x) FIELD_PREP(GENMASK64(51, 47), x) /* descriptor valid or not */ #define DMA_DW_AXI_CTL_LLI_VALID BIT64(63) /* descriptor is last or not in a link */ #define DMA_DW_AXI_CTL_LLI_LAST BIT64(62) /* interrupt on completion of block transfer */ #define DMA_DW_AXI_CTL_IOC_BLK_TFR BIT64(58) /* source status enable bit */ #define DMA_DW_AXI_CTL_SRC_STAT_EN BIT64(56) /* destination status enable bit */ #define DMA_DW_AXI_CTL_DST_STAT_EN BIT64(57) /* source burst length enable */ #define DMA_DW_AXI_CTL_ARLEN_EN BIT64(38) /* source burst length(considered when corresponding enable bit is set) */ #define DMA_DW_AXI_CTL_ARLEN(x) FIELD_PREP(GENMASK64(46, 39), x) /* destination burst length enable */ #define DMA_DW_AXI_CTL_AWLEN_EN BIT64(47) /* destination burst length(considered when corresponding enable bit is set) */ #define DMA_DW_AXI_CTL_AWLEN(x) FIELD_PREP(GENMASK64(55, 48), x) /* source burst transaction length */ #define DMA_DW_AXI_CTL_SRC_MSIZE(x) FIELD_PREP(GENMASK64(17, 14), x) /* destination burst transaction length */ #define DMA_DW_AXI_CTL_DST_MSIZE(x) FIELD_PREP(GENMASK64(21, 18), x) /* source transfer width */ #define DMA_DW_AXI_CTL_SRC_WIDTH(x) FIELD_PREP(GENMASK64(10, 8), x) /* destination transfer width */ #define DMA_DW_AXI_CTL_DST_WIDTH(x) FIELD_PREP(GENMASK64(13, 11), x) /* mask all the interrupts */ #define DMA_DW_AXI_IRQ_NONE 0 /* enable block completion transfer interrupt */ #define DMA_DW_AXI_IRQ_BLOCK_TFR BIT64(0) /* enable transfer completion interrupt */ #define DMA_DW_AXI_IRQ_DMA_TFR BIT64(1) /* enable interrupts on any dma transfer error */ #define DMA_DW_AXI_IRQ_ALL_ERR (GENMASK64(14, 5) | GENMASK64(21, 16)) /* global enable bit for dma controller */ #define DMA_DW_AXI_CFG_EN BIT64(0) /* global enable bit for interrupt */ #define DMA_DW_AXI_CFG_INT_EN BIT64(1) /* descriptor used by dw axi dma controller*/ struct dma_lli { uint64_t sar; uint64_t dar; uint32_t block_ts_lo; uint32_t reserved; uint64_t llp; uint64_t ctl; uint32_t sstat; uint32_t dstat; uint64_t llp_status; uint64_t reserved1; } __aligned(64); /* status of the channel */ enum dma_dw_axi_ch_state { DMA_DW_AXI_CH_IDLE, DMA_DW_AXI_CH_SUSPENDED, DMA_DW_AXI_CH_ACTIVE, DMA_DW_AXI_CH_PREPARED, }; /* source and destination transfer width */ enum dma_dw_axi_ch_width { BITS_8, BITS_16, BITS_32, BITS_64, BITS_128, BITS_256, BITS_512, }; /* transfer direction and flow controller */ enum dma_dw_axi_tt_fc { M2M_DMAC, M2P_DMAC, P2M_DMAC, P2P_DMAC, P2M_SRC, P2P_SRC, M2P_DST, P2P_DST, }; /* type of multi-block transfer */ enum dma_dw_axi_multi_blk_type { MULTI_BLK_CONTIGUOUS, MULTI_BLK_RELOAD, MULTI_BLK_SHADOW_REG, MULTI_BLK_LLI, }; /* dma driver channel specific information */ struct dma_dw_axi_ch_data { /* lli descriptor base */ struct dma_lli *lli_desc_base; /* lli current descriptor */ struct dma_lli *lli_desc_current; /* dma channel state */ enum dma_dw_axi_ch_state ch_state; /* direction of transfer */ uint32_t direction; /* number of descriptors */ uint32_t lli_desc_count; /* cfg register configuration for dma transfer */ uint64_t cfg; /* mask and unmask interrupts */ uint64_t irq_unmask; /* user call back for dma transfer completion */ dma_callback_t dma_xfer_callback; /* user data for dma callback for dma transfer completion */ void *priv_data_xfer; /* user call back for dma block transfer completion */ dma_callback_t dma_blk_xfer_callback; /* user data for dma callback for dma block transfer completion */ void *priv_data_blk_tfr; }; /* dma controller driver data structure */ struct dma_dw_axi_dev_data { /* dma context */ struct dma_context dma_ctx; /* mmio address mapping info for dma controller */ DEVICE_MMIO_NAMED_RAM(dma_mmio); /* pointer to store channel specific info */ struct dma_dw_axi_ch_data *chan; /* pointer to hold descriptor base address */ struct dma_lli *dma_desc_pool; }; /* Device constant configuration parameters */ struct dma_dw_axi_dev_cfg { /* dma address space to map */ DEVICE_MMIO_NAMED_ROM(dma_mmio); #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(resets) /* Reset controller device configurations */ const struct reset_dt_spec reset; #endif /* dma controller interrupt configuration function pointer */ void (*irq_config)(void); }; /** * @brief get current status of the channel * * @param dev Pointer to the device structure for the driver instance * @param channel channel number * * @retval status of the channel */ static enum dma_dw_axi_ch_state dma_dw_axi_get_ch_status(const struct device *dev, uint32_t ch) { uint32_t bit_status; uint64_t ch_status; uintptr_t reg_base = DEVICE_MMIO_NAMED_GET(dev, dma_mmio); ch_status = sys_read64(reg_base + DMA_DW_AXI_CHENREG); /* channel is active/busy in the dma transfer */ bit_status = ((ch_status >> ch) & 1); if (bit_status) { return DMA_DW_AXI_CH_ACTIVE; } /* channel is currently suspended */ bit_status = ((ch_status >> (16 + ch)) & 1); if (bit_status) { return DMA_DW_AXI_CH_SUSPENDED; } /* channel is idle */ return DMA_DW_AXI_CH_IDLE; } static void dma_dw_axi_isr(const struct device *dev) { unsigned int channel; uint64_t status, ch_status; int ret_status = 0; struct dma_dw_axi_ch_data *chan_data; uintptr_t reg_base = DEVICE_MMIO_NAMED_GET(dev, dma_mmio); struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev); /* read interrupt status register to find interrupt is for which channel */ status = sys_read64(reg_base + DMA_DW_AXI_INTSTATUSREG); channel = find_lsb_set(status) - 1; if (channel < 0) { LOG_ERR("Spurious interrupt received channel:%u\n", channel); return; } if (channel > (dw_dev_data->dma_ctx.dma_channels - 1)) { LOG_ERR("Interrupt received on invalid channel:%d\n", channel); return; } /* retrieve channel specific data pointer for a channel */ chan_data = &dw_dev_data->chan[channel]; /* get dma transfer status */ ch_status = sys_read64(reg_base + DMA_DW_AXI_CH_INTSTATUS(channel)); if (!ch_status) { LOG_ERR("Spurious interrupt received ch_status:0x%llx\n", ch_status); return; } /* handle dma transfer errors if any */ if (ch_status & DMA_DW_AXI_IRQ_ALL_ERR) { sys_write64(DMA_DW_AXI_IRQ_ALL_ERR, reg_base + DMA_DW_AXI_CH_INTCLEARREG(channel)); LOG_ERR("DMA Error: Channel:%d Channel interrupt status:0x%llx\n", channel, ch_status); ret_status = -(ch_status & DMA_DW_AXI_IRQ_ALL_ERR); } /* handle block transfer completion */ if (ch_status & DMA_DW_AXI_IRQ_BLOCK_TFR) { sys_write64(DMA_DW_AXI_IRQ_ALL_ERR | DMA_DW_AXI_IRQ_BLOCK_TFR, reg_base + DMA_DW_AXI_CH_INTCLEARREG(channel)); if (chan_data->dma_blk_xfer_callback) { chan_data->dma_blk_xfer_callback(dev, chan_data->priv_data_blk_tfr, channel, ret_status); } } /* handle dma transfer completion */ if (ch_status & DMA_DW_AXI_IRQ_DMA_TFR) { sys_write64(DMA_DW_AXI_IRQ_ALL_ERR | DMA_DW_AXI_IRQ_DMA_TFR, reg_base + DMA_DW_AXI_CH_INTCLEARREG(channel)); if (chan_data->dma_xfer_callback) { chan_data->dma_xfer_callback(dev, chan_data->priv_data_xfer, channel, ret_status); chan_data->ch_state = dma_dw_axi_get_ch_status(dev, channel); } } } /** * @brief set data source and destination data width * * @param lli_desc Pointer to the descriptor * @param src_data_width source data width * @param dest_data_width destination data width * * @retval 0 on success, -ENOTSUP if the data width is not supported */ static int dma_dw_axi_set_data_width(struct dma_lli *lli_desc, uint32_t src_data_width, uint32_t dest_data_width) { if (src_data_width > CONFIG_DMA_DW_AXI_DATA_WIDTH || dest_data_width > CONFIG_DMA_DW_AXI_DATA_WIDTH) { LOG_ERR("transfer width more than %u not supported", CONFIG_DMA_DW_AXI_DATA_WIDTH); return -ENOTSUP; } switch (src_data_width) { case 1: /* one byte transfer */ lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_8); break; case 2: /* 2-bytes transfer width */ lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_16); break; case 4: /* 4-bytes transfer width */ lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_32); break; case 8: /* 8-bytes transfer width */ lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_64); break; case 16: /* 16-bytes transfer width */ lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_128); break; case 32: /* 32-bytes transfer width */ lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_256); break; case 64: /* 64-bytes transfer width */ lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_WIDTH(BITS_512); break; default: LOG_ERR("Source transfer width not supported"); return -ENOTSUP; } switch (dest_data_width) { case 1: /* one byte transfer */ lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_8); break; case 2: /* 2-bytes transfer width */ lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_16); break; case 4: /* 4-bytes transfer width */ lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_32); break; case 8: /* 8-bytes transfer width */ lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_64); break; case 16: /* 16-bytes transfer width */ lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_128); break; case 32: /* 32-bytes transfer width */ lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_256); break; case 64: /* 64-bytes transfer width */ lli_desc->ctl |= DMA_DW_AXI_CTL_DST_WIDTH(BITS_512); break; default: LOG_ERR("Destination transfer width not supported"); return -ENOTSUP; } return 0; } static int dma_dw_axi_config(const struct device *dev, uint32_t channel, struct dma_config *cfg) { int ret; uint32_t msize_src, msize_dst, i, ch_state; struct dma_dw_axi_ch_data *chan_data; struct dma_block_config *blk_cfg; struct dma_lli *lli_desc; struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev); /* check for invalid parameters before dereferencing them. */ if (cfg == NULL) { LOG_ERR("invalid dma config :%p", cfg); return -ENODATA; } /* check if the channel is valid */ if (channel > (dw_dev_data->dma_ctx.dma_channels - 1)) { LOG_ERR("invalid dma channel %d", channel); return -EINVAL; } /* return if the channel is not idle */ ch_state = dma_dw_axi_get_ch_status(dev, channel); if (ch_state != DMA_DW_AXI_CH_IDLE) { LOG_ERR("DMA channel:%d is not idle(status:%d)", channel, ch_state); return -EBUSY; } if (!cfg->block_count) { LOG_ERR("no blocks to transfer"); return -EINVAL; } /* descriptor should be less than max configured descriptor */ if (cfg->block_count > CONFIG_DMA_DW_AXI_MAX_DESC) { LOG_ERR("dma:%s channel %d descriptor block count: %d larger than" " max descriptors in pool: %d", dev->name, channel, cfg->block_count, CONFIG_DMA_DW_AXI_MAX_DESC); return -EINVAL; } if (cfg->source_burst_length > CONFIG_DMA_DW_AXI_MAX_BURST_TXN_LEN || cfg->dest_burst_length > CONFIG_DMA_DW_AXI_MAX_BURST_TXN_LEN || cfg->source_burst_length == 0 || cfg->dest_burst_length == 0) { LOG_ERR("dma:%s burst length not supported", dev->name); return -ENOTSUP; } /* get channel specific data pointer */ chan_data = &dw_dev_data->chan[channel]; /* check if the channel is currently idle */ if (chan_data->ch_state != DMA_DW_AXI_CH_IDLE) { LOG_ERR("DMA channel:%d is busy", channel); return -EBUSY; } /* burst transaction length for source and destination */ msize_src = DMA_DW_AXI_GET_MSIZE(cfg->source_burst_length); msize_dst = DMA_DW_AXI_GET_MSIZE(cfg->dest_burst_length); chan_data->cfg = 0; chan_data->irq_unmask = 0; chan_data->direction = cfg->channel_direction; chan_data->lli_desc_base = &dw_dev_data->dma_desc_pool[channel * CONFIG_DMA_DW_AXI_MAX_DESC]; chan_data->lli_desc_count = cfg->block_count; memset(chan_data->lli_desc_base, 0, sizeof(struct dma_lli) * chan_data->lli_desc_count); lli_desc = chan_data->lli_desc_base; blk_cfg = cfg->head_block; /* max channel priority can be MAX_CHANNEL - 1 */ if (cfg->channel_priority < dw_dev_data->dma_ctx.dma_channels) { chan_data->cfg |= DMA_DW_AXI_CFG_PRIORITY(cfg->channel_priority); } /* configure all the descriptors in a loop */ for (i = 0; i < cfg->block_count; i++) { ret = dma_dw_axi_set_data_width(lli_desc, cfg->source_data_size, cfg->dest_data_size); if (ret) { return ret; } lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_STAT_EN | DMA_DW_AXI_CTL_DST_STAT_EN | DMA_DW_AXI_CTL_IOC_BLK_TFR; lli_desc->sar = blk_cfg->source_address; lli_desc->dar = blk_cfg->dest_address; /* set block transfer size*/ lli_desc->block_ts_lo = (blk_cfg->block_size / cfg->source_data_size) - 1; if (lli_desc->block_ts_lo > CONFIG_DMA_DW_AXI_MAX_BLOCK_TS) { LOG_ERR("block transfer size more than %u not supported", CONFIG_DMA_DW_AXI_MAX_BLOCK_TS); return -ENOTSUP; } /* configuration based on channel direction */ if (cfg->channel_direction == MEMORY_TO_MEMORY) { chan_data->cfg |= DMA_DW_AXI_CFG_TT_FC(M2M_DMAC); lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_MSIZE(msize_src) | DMA_DW_AXI_CTL_DST_MSIZE(msize_dst); } else if (cfg->channel_direction == MEMORY_TO_PERIPHERAL) { chan_data->cfg |= DMA_DW_AXI_CFG_TT_FC(M2P_DMAC); lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_MSIZE(msize_src) | DMA_DW_AXI_CTL_DST_MSIZE(msize_dst); WRITE_BIT(chan_data->cfg, DMA_DW_AXI_CFG_HW_HS_DST_BIT_POS, 0); /* assign a hardware handshake interface */ chan_data->cfg |= DMA_DW_AXI_CFG_DST_PER(cfg->dma_slot); } else if (cfg->channel_direction == PERIPHERAL_TO_MEMORY) { lli_desc->ctl |= DMA_DW_AXI_CTL_SRC_MSIZE(msize_src) | DMA_DW_AXI_CTL_DST_MSIZE(msize_dst); chan_data->cfg |= DMA_DW_AXI_CFG_TT_FC(P2M_DMAC); WRITE_BIT(chan_data->cfg, DMA_DW_AXI_CFG_HW_HS_SRC_BIT_POS, 0); /* assign a hardware handshake interface */ chan_data->cfg |= DMA_DW_AXI_CFG_SRC_PER(cfg->dma_slot); } else { LOG_ERR("%s: dma %s channel %d invalid direction %d", __func__, dev->name, channel, cfg->channel_direction); return -EINVAL; } /* set pointer to the next descriptor */ lli_desc->llp = ((uint64_t)(lli_desc + 1)); #if defined(CONFIG_DMA_DW_AXI_LLI_SUPPORT) /* configure multi block transfer size as linked list */ chan_data->cfg |= DMA_DW_AXI_CFG_SRC_MULTBLK_TYPE(MULTI_BLK_LLI) | DMA_DW_AXI_CFG_DST_MULTBLK_TYPE(MULTI_BLK_LLI); lli_desc->ctl |= DMA_DW_AXI_CTL_LLI_VALID; /* last descriptor*/ if ((i + 1) == chan_data->lli_desc_count) { lli_desc->ctl |= DMA_DW_AXI_CTL_LLI_LAST | DMA_DW_AXI_CTL_LLI_VALID; lli_desc->llp = 0; } #else /* configure multi-block transfer as contiguous mode */ chan_data->cfg |= DMA_DW_AXI_CFG_SRC_MULTBLK_TYPE(MULTI_BLK_CONTIGUOUS) | DMA_DW_AXI_CFG_DST_MULTBLK_TYPE(MULTI_BLK_CONTIGUOUS); #endif /* next descriptor to configure*/ lli_desc++; blk_cfg = blk_cfg->next_block; } arch_dcache_flush_range((void *)chan_data->lli_desc_base, sizeof(struct dma_lli) * cfg->block_count); chan_data->lli_desc_current = chan_data->lli_desc_base; /* enable an interrupt depending on whether the callback is requested after dma transfer * completion or dma block transfer completion * * disable an interrupt if callback is not requested */ if (cfg->dma_callback && cfg->complete_callback_en) { chan_data->dma_blk_xfer_callback = cfg->dma_callback; chan_data->priv_data_blk_tfr = cfg->user_data; chan_data->irq_unmask = DMA_DW_AXI_IRQ_BLOCK_TFR | DMA_DW_AXI_IRQ_DMA_TFR; } else if (cfg->dma_callback && !cfg->complete_callback_en) { chan_data->dma_xfer_callback = cfg->dma_callback; chan_data->priv_data_xfer = cfg->user_data; chan_data->irq_unmask = DMA_DW_AXI_IRQ_DMA_TFR; } else { chan_data->irq_unmask = DMA_DW_AXI_IRQ_NONE; } /* unmask error interrupts when error_callback_dis is 0 */ if (!cfg->error_callback_dis) { chan_data->irq_unmask |= DMA_DW_AXI_IRQ_ALL_ERR; } /* dma descriptors are configured, ready to start dma transfer */ chan_data->ch_state = DMA_DW_AXI_CH_PREPARED; return 0; } static int dma_dw_axi_start(const struct device *dev, uint32_t channel) { uint32_t ch_state; struct dma_dw_axi_ch_data *chan_data; struct dma_lli *lli_desc; struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev); uintptr_t reg_base = DEVICE_MMIO_NAMED_GET(dev, dma_mmio); /* validate channel number */ if (channel > (dw_dev_data->dma_ctx.dma_channels - 1)) { LOG_ERR("invalid dma channel %d", channel); return -EINVAL; } /* check whether channel is idle before initiating DMA transfer */ ch_state = dma_dw_axi_get_ch_status(dev, channel); if (ch_state != DMA_DW_AXI_CH_IDLE) { LOG_ERR("DMA channel:%d is not idle", channel); return -EBUSY; } /* get channel specific data pointer */ chan_data = &dw_dev_data->chan[channel]; if (chan_data->ch_state != DMA_DW_AXI_CH_PREPARED) { LOG_ERR("DMA descriptors not configured"); return -EINVAL; } /* enable dma controller and global interrupt bit */ sys_write64(DMA_DW_AXI_CFG_INT_EN | DMA_DW_AXI_CFG_EN, reg_base + DMA_DW_AXI_CFGREG); sys_write64(chan_data->cfg, reg_base + DMA_DW_AXI_CH_CFG(channel)); sys_write64(chan_data->irq_unmask, reg_base + DMA_DW_AXI_CH_INTSTATUS_ENABLEREG(channel)); sys_write64(chan_data->irq_unmask, reg_base + DMA_DW_AXI_CH_INTSIGNAL_ENABLEREG(channel)); lli_desc = chan_data->lli_desc_current; #if defined(CONFIG_DMA_DW_AXI_LLI_SUPPORT) sys_write64(((uint64_t)lli_desc), reg_base + DMA_DW_AXI_CH_LLP(channel)); #else /* Program Source and Destination addresses */ sys_write64(lli_desc->sar, reg_base + DMA_DW_AXI_CH_SAR(channel)); sys_write64(lli_desc->dar, reg_base + DMA_DW_AXI_CH_DAR(channel)); sys_write64(lli_desc->block_ts_lo & BLOCK_TS_MASK, reg_base + DMA_DW_AXI_CH_BLOCK_TS(channel)); /* Program CH.CTL register */ sys_write64(lli_desc->ctl, reg_base + DMA_DW_AXI_CH_CTL(channel)); #endif /* Enable the channel which will initiate DMA transfer */ sys_write64(CH_EN(channel), reg_base + DMA_DW_AXI_CHENREG); chan_data->ch_state = dma_dw_axi_get_ch_status(dev, channel); return 0; } static int dma_dw_axi_stop(const struct device *dev, uint32_t channel) { bool is_channel_busy; uint32_t ch_state; struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev); uintptr_t reg_base = DEVICE_MMIO_NAMED_GET(dev, dma_mmio); /* channel should be valid */ if (channel > (dw_dev_data->dma_ctx.dma_channels - 1)) { LOG_ERR("invalid dma channel %d", channel); return -EINVAL; } /* return if the channel is idle as there is nothing to stop */ ch_state = dma_dw_axi_get_ch_status(dev, channel); if (ch_state == DMA_DW_AXI_CH_IDLE) { /* channel is already idle */ return 0; } /* To stop transfer or abort the channel in case of abnormal state: * 1. To disable channel, first suspend channel and drain the FIFO * 2. Disable the channel. Channel may get hung and can't be disabled * if there is no response from peripheral * 3. If channel is not disabled, Abort the channel. Aborting channel will * Flush out FIFO and data will be lost. Then corresponding interrupt will * be raised for abort and CH_EN bit will be cleared from CHENREG register */ sys_write64(CH_SUSP(channel), reg_base + DMA_DW_AXI_CHENREG); /* Try to disable the channel */ sys_clear_bit(reg_base + DMA_DW_AXI_CHENREG, channel); is_channel_busy = WAIT_FOR((sys_read64(reg_base + DMA_DW_AXI_CHENREG)) & (BIT(channel)), CONFIG_DMA_CHANNEL_STATUS_TIMEOUT, k_busy_wait(10)); if (is_channel_busy) { LOG_WRN("No response from handshaking interface... Aborting a channel..."); sys_write64(CH_ABORT(channel), reg_base + DMA_DW_AXI_CHENREG); is_channel_busy = WAIT_FOR((sys_read64(reg_base + DMA_DW_AXI_CHENREG)) & (BIT(channel)), CONFIG_DMA_CHANNEL_STATUS_TIMEOUT, k_busy_wait(10)); if (is_channel_busy) { LOG_ERR("Channel abort failed"); return -EBUSY; } } return 0; } static int dma_dw_axi_resume(const struct device *dev, uint32_t channel) { uint32_t reg; uintptr_t reg_base = DEVICE_MMIO_NAMED_GET(dev, dma_mmio); struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev); uint32_t ch_state; /* channel should be valid */ if (channel > (dw_dev_data->dma_ctx.dma_channels - 1)) { LOG_ERR("invalid dma channel %d", channel); return -EINVAL; } ch_state = dma_dw_axi_get_ch_status(dev, channel); if (ch_state != DMA_DW_AXI_CH_SUSPENDED) { LOG_INF("channel %u is not in suspended state so cannot resume channel", channel); return 0; } reg = sys_read64(reg_base + DMA_DW_AXI_CHENREG); /* channel susp write enable bit has to be asserted */ WRITE_BIT(reg, CH_RESUME_WE(channel), 1); /* channel susp bit must be cleared to resume a channel*/ WRITE_BIT(reg, CH_RESUME(channel), 0); /* resume a channel by writing 0: ch_susp and 1: ch_susp_we */ sys_write64(reg, reg_base + DMA_DW_AXI_CHENREG); return 0; } /* suspend a dma channel */ static int dma_dw_axi_suspend(const struct device *dev, uint32_t channel) { int ret; uintptr_t reg_base = DEVICE_MMIO_NAMED_GET(dev, dma_mmio); struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev); uint32_t ch_state; /* channel should be valid */ if (channel > (dw_dev_data->dma_ctx.dma_channels - 1)) { LOG_ERR("invalid dma channel %u", channel); return -EINVAL; } ch_state = dma_dw_axi_get_ch_status(dev, channel); if (ch_state != DMA_DW_AXI_CH_ACTIVE) { LOG_INF("nothing to suspend as dma channel %u is not busy", channel); return 0; } /* suspend dma transfer */ sys_write64(CH_SUSP(channel), reg_base + DMA_DW_AXI_CHENREG); ret = WAIT_FOR(dma_dw_axi_get_ch_status(dev, channel) & DMA_DW_AXI_CH_SUSPENDED, CONFIG_DMA_CHANNEL_STATUS_TIMEOUT, k_busy_wait(10)); if (ret == 0) { LOG_ERR("channel suspend failed"); return ret; } return 0; } static int dma_dw_axi_init(const struct device *dev) { DEVICE_MMIO_NAMED_MAP(dev, dma_mmio, K_MEM_CACHE_NONE); int i, ret; struct dma_dw_axi_ch_data *chan_data; const struct dma_dw_axi_dev_cfg *dw_dma_config = DEV_CFG(dev); struct dma_dw_axi_dev_data *const dw_dev_data = DEV_DATA(dev); #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(resets) if (dw_dma_config->reset.dev != NULL) { /* check if reset manager is in ready state */ if (!device_is_ready(dw_dma_config->reset.dev)) { LOG_ERR("reset controller device not found"); return -ENODEV; } /* assert and de-assert dma controller */ ret = reset_line_toggle(dw_dma_config->reset.dev, dw_dma_config->reset.id); if (ret != 0) { LOG_ERR("failed to reset dma controller"); return ret; } } #endif /* initialize channel state variable */ for (i = 0; i < dw_dev_data->dma_ctx.dma_channels; i++) { chan_data = &dw_dev_data->chan[i]; /* initialize channel state */ chan_data->ch_state = DMA_DW_AXI_CH_IDLE; } /* configure and enable interrupt lines */ dw_dma_config->irq_config(); return 0; } static const struct dma_driver_api dma_dw_axi_driver_api = { .config = dma_dw_axi_config, .start = dma_dw_axi_start, .stop = dma_dw_axi_stop, .suspend = dma_dw_axi_suspend, .resume = dma_dw_axi_resume, }; /* enable irq lines */ #define CONFIGURE_DMA_IRQ(idx, inst) \ IF_ENABLED(DT_INST_IRQ_HAS_IDX(inst, idx), ( \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, idx, irq), \ DT_INST_IRQ_BY_IDX(inst, idx, priority), \ dma_dw_axi_isr, \ DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(inst, idx, irq)); \ )) #define DW_AXI_DMA_RESET_SPEC_INIT(inst) \ .reset = RESET_DT_SPEC_INST_GET(inst), \ #define DW_AXI_DMAC_INIT(inst) \ static struct dma_dw_axi_ch_data chan_##inst[DT_INST_PROP(inst, dma_channels)]; \ static struct dma_lli \ dma_desc_pool_##inst[DT_INST_PROP(inst, dma_channels) * \ CONFIG_DMA_DW_AXI_MAX_DESC]; \ ATOMIC_DEFINE(dma_dw_axi_atomic##inst, \ DT_INST_PROP(inst, dma_channels)); \ static struct dma_dw_axi_dev_data dma_dw_axi_data_##inst = { \ .dma_ctx = { \ .magic = DMA_MAGIC, \ .atomic = dma_dw_axi_atomic##inst, \ .dma_channels = DT_INST_PROP(inst, dma_channels), \ }, \ .chan = chan_##inst, \ .dma_desc_pool = dma_desc_pool_##inst, \ }; \ static void dw_dma_irq_config_##inst(void); \ static const struct dma_dw_axi_dev_cfg dma_dw_axi_config_##inst = { \ DEVICE_MMIO_NAMED_ROM_INIT(dma_mmio, DT_DRV_INST(inst)), \ IF_ENABLED(DT_INST_NODE_HAS_PROP(inst, resets), \ (DW_AXI_DMA_RESET_SPEC_INIT(inst))) \ .irq_config = dw_dma_irq_config_##inst, \ }; \ \ DEVICE_DT_INST_DEFINE(inst, \ &dma_dw_axi_init, \ NULL, \ &dma_dw_axi_data_##inst, \ &dma_dw_axi_config_##inst, POST_KERNEL, \ CONFIG_DMA_INIT_PRIORITY, \ &dma_dw_axi_driver_api); \ \ static void dw_dma_irq_config_##inst(void) \ { \ LISTIFY(DT_NUM_IRQS(DT_DRV_INST(inst)), CONFIGURE_DMA_IRQ, (), inst) \ } DT_INST_FOREACH_STATUS_OKAY(DW_AXI_DMAC_INIT) ```
/content/code_sandbox/drivers/dma/dma_dw_axi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,610
```c /* * */ #define DT_DRV_COMPAT atmel_sam_xdmac /** @file * @brief Atmel SAM MCU family Direct Memory Access (XDMAC) driver. */ #include <errno.h> #include <zephyr/sys/__assert.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <string.h> #include <soc.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/clock_control/atmel_sam_pmc.h> #include "dma_sam_xdmac.h" #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(dma_sam_xdmac); #define XDMAC_INT_ERR (XDMAC_CIE_RBIE | XDMAC_CIE_WBIE | XDMAC_CIE_ROIE) #define DMA_CHANNELS_NO XDMACCHID_NUMBER /* DMA channel configuration */ struct sam_xdmac_channel_cfg { void *user_data; dma_callback_t callback; uint32_t data_size; }; /* Device constant configuration parameters */ struct sam_xdmac_dev_cfg { Xdmac *regs; void (*irq_config)(void); const struct atmel_sam_pmc_config clock_cfg; uint8_t irq_id; }; /* Device run time data */ struct sam_xdmac_dev_data { struct sam_xdmac_channel_cfg dma_channels[DMA_CHANNELS_NO]; }; static void sam_xdmac_isr(const struct device *dev) { const struct sam_xdmac_dev_cfg *const dev_cfg = dev->config; struct sam_xdmac_dev_data *const dev_data = dev->data; Xdmac * const xdmac = dev_cfg->regs; struct sam_xdmac_channel_cfg *channel_cfg; uint32_t isr_status; uint32_t err; /* Get global interrupt status */ isr_status = xdmac->XDMAC_GIS; for (int channel = 0; channel < DMA_CHANNELS_NO; channel++) { if (!(isr_status & (1 << channel))) { continue; } channel_cfg = &dev_data->dma_channels[channel]; /* Get channel errors */ err = xdmac->XDMAC_CHID[channel].XDMAC_CIS & XDMAC_INT_ERR; /* Execute callback */ if (channel_cfg->callback) { channel_cfg->callback(dev, channel_cfg->user_data, channel, err); } } } int sam_xdmac_channel_configure(const struct device *dev, uint32_t channel, struct sam_xdmac_channel_config *param) { const struct sam_xdmac_dev_cfg *const dev_cfg = dev->config; Xdmac * const xdmac = dev_cfg->regs; if (channel >= DMA_CHANNELS_NO) { return -EINVAL; } /* Check if the channel is enabled */ if (xdmac->XDMAC_GS & (XDMAC_GS_ST0 << channel)) { return -EBUSY; } /* Disable all channel interrupts */ xdmac->XDMAC_CHID[channel].XDMAC_CID = 0xFF; /* Clear pending Interrupt Status bit(s) */ (void)xdmac->XDMAC_CHID[channel].XDMAC_CIS; /* NOTE: * Setting channel configuration is not required for linked list view 2 * to 3 modes. It is done anyway to keep the code simple. It has no * negative impact on the DMA functionality. */ /* Set channel configuration */ xdmac->XDMAC_CHID[channel].XDMAC_CC = param->cfg; /* Set data stride memory pattern */ xdmac->XDMAC_CHID[channel].XDMAC_CDS_MSP = param->ds_msp; /* Set source microblock stride */ xdmac->XDMAC_CHID[channel].XDMAC_CSUS = param->sus; /* Set destination microblock stride */ xdmac->XDMAC_CHID[channel].XDMAC_CDUS = param->dus; /* Enable selected channel interrupts */ xdmac->XDMAC_CHID[channel].XDMAC_CIE = param->cie; return 0; } int sam_xdmac_transfer_configure(const struct device *dev, uint32_t channel, struct sam_xdmac_transfer_config *param) { const struct sam_xdmac_dev_cfg *const dev_cfg = dev->config; Xdmac * const xdmac = dev_cfg->regs; if (channel >= DMA_CHANNELS_NO) { return -EINVAL; } /* Check if the channel is enabled */ if (xdmac->XDMAC_GS & (XDMAC_GS_ST0 << channel)) { return -EBUSY; } /* NOTE: * Setting source, destination address is not required for linked list * view 1 to 3 modes. It is done anyway to keep the code simple. It has * no negative impact on the DMA functionality. */ /* Set source address */ xdmac->XDMAC_CHID[channel].XDMAC_CSA = param->sa; /* Set destination address */ xdmac->XDMAC_CHID[channel].XDMAC_CDA = param->da; if ((param->ndc & XDMAC_CNDC_NDE) == XDMAC_CNDC_NDE_DSCR_FETCH_DIS) { /* * Linked List is disabled, configure additional transfer * parameters. */ /* Set length of data in the microblock */ xdmac->XDMAC_CHID[channel].XDMAC_CUBC = param->ublen; /* Set block length: block length is (blen+1) microblocks */ xdmac->XDMAC_CHID[channel].XDMAC_CBC = param->blen; } else { /* * Linked List is enabled, configure additional transfer * parameters. */ /* Set next descriptor address */ xdmac->XDMAC_CHID[channel].XDMAC_CNDA = param->nda; } /* Set next descriptor configuration */ xdmac->XDMAC_CHID[channel].XDMAC_CNDC = param->ndc; return 0; } static int sam_xdmac_config(const struct device *dev, uint32_t channel, struct dma_config *cfg) { struct sam_xdmac_dev_data *const dev_data = dev->data; struct sam_xdmac_channel_config channel_cfg; struct sam_xdmac_transfer_config transfer_cfg; uint32_t burst_size; uint32_t data_size; int ret; if (channel >= DMA_CHANNELS_NO) { return -EINVAL; } __ASSERT_NO_MSG(cfg->source_data_size == cfg->dest_data_size); __ASSERT_NO_MSG(cfg->source_burst_length == cfg->dest_burst_length); if (cfg->source_data_size != 1U && cfg->source_data_size != 2U && cfg->source_data_size != 4U) { LOG_ERR("Invalid 'source_data_size' value"); return -EINVAL; } if (cfg->block_count != 1U) { LOG_ERR("Only single block transfer is currently supported." " Please submit a patch."); return -EINVAL; } burst_size = find_msb_set(cfg->source_burst_length) - 1; LOG_DBG("burst_size=%d", burst_size); data_size = find_msb_set(cfg->source_data_size) - 1; dev_data->dma_channels[channel].data_size = data_size; LOG_DBG("data_size=%d", data_size); uint32_t xdmac_inc_cfg = 0; if (cfg->head_block->source_addr_adj == DMA_ADDR_ADJ_INCREMENT && cfg->channel_direction == MEMORY_TO_PERIPHERAL) { xdmac_inc_cfg |= XDMAC_CC_SAM_INCREMENTED_AM; } else { xdmac_inc_cfg |= XDMAC_CC_SAM_FIXED_AM; } if (cfg->head_block->dest_addr_adj == DMA_ADDR_ADJ_INCREMENT && cfg->channel_direction == PERIPHERAL_TO_MEMORY) { xdmac_inc_cfg |= XDMAC_CC_DAM_INCREMENTED_AM; } else { xdmac_inc_cfg |= XDMAC_CC_DAM_FIXED_AM; } switch (cfg->channel_direction) { case MEMORY_TO_MEMORY: channel_cfg.cfg = XDMAC_CC_TYPE_MEM_TRAN | XDMAC_CC_MBSIZE(burst_size == 0U ? 0 : burst_size - 1) | XDMAC_CC_SAM_INCREMENTED_AM | XDMAC_CC_DAM_INCREMENTED_AM; break; case MEMORY_TO_PERIPHERAL: channel_cfg.cfg = XDMAC_CC_TYPE_PER_TRAN | XDMAC_CC_CSIZE(burst_size) | XDMAC_CC_DSYNC_MEM2PER | xdmac_inc_cfg; break; case PERIPHERAL_TO_MEMORY: channel_cfg.cfg = XDMAC_CC_TYPE_PER_TRAN | XDMAC_CC_CSIZE(burst_size) | XDMAC_CC_DSYNC_PER2MEM | xdmac_inc_cfg; break; default: LOG_ERR("'channel_direction' value %d is not supported", cfg->channel_direction); return -EINVAL; } channel_cfg.cfg |= XDMAC_CC_DWIDTH(data_size) | XDMAC_CC_SIF_AHB_IF1 | XDMAC_CC_DIF_AHB_IF1 | XDMAC_CC_PERID(cfg->dma_slot); channel_cfg.ds_msp = 0U; channel_cfg.sus = 0U; channel_cfg.dus = 0U; channel_cfg.cie = (cfg->complete_callback_en ? XDMAC_CIE_BIE : XDMAC_CIE_LIE) | (cfg->error_callback_dis ? 0 : XDMAC_INT_ERR); ret = sam_xdmac_channel_configure(dev, channel, &channel_cfg); if (ret < 0) { return ret; } dev_data->dma_channels[channel].callback = cfg->dma_callback; dev_data->dma_channels[channel].user_data = cfg->user_data; (void)memset(&transfer_cfg, 0, sizeof(transfer_cfg)); transfer_cfg.sa = cfg->head_block->source_address; transfer_cfg.da = cfg->head_block->dest_address; transfer_cfg.ublen = cfg->head_block->block_size >> data_size; ret = sam_xdmac_transfer_configure(dev, channel, &transfer_cfg); return ret; } static int sam_xdmac_transfer_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { struct sam_xdmac_dev_data *const dev_data = dev->data; struct sam_xdmac_transfer_config transfer_cfg = { .sa = src, .da = dst, .ublen = size >> dev_data->dma_channels[channel].data_size, }; return sam_xdmac_transfer_configure(dev, channel, &transfer_cfg); } int sam_xdmac_transfer_start(const struct device *dev, uint32_t channel) { const struct sam_xdmac_dev_cfg *config = dev->config; Xdmac * const xdmac = config->regs; if (channel >= DMA_CHANNELS_NO) { LOG_DBG("Channel %d out of range", channel); return -EINVAL; } /* Check if the channel is enabled */ if (xdmac->XDMAC_GS & (XDMAC_GS_ST0 << channel)) { LOG_DBG("Channel %d already enabled", channel); return -EBUSY; } /* Enable channel interrupt */ xdmac->XDMAC_GIE = XDMAC_GIE_IE0 << channel; /* Enable channel */ xdmac->XDMAC_GE = XDMAC_GE_EN0 << channel; return 0; } int sam_xdmac_transfer_stop(const struct device *dev, uint32_t channel) { const struct sam_xdmac_dev_cfg *config = dev->config; Xdmac * const xdmac = config->regs; if (channel >= DMA_CHANNELS_NO) { return -EINVAL; } /* Check if the channel is enabled */ if (!(xdmac->XDMAC_GS & (XDMAC_GS_ST0 << channel))) { return 0; } /* Disable channel */ xdmac->XDMAC_GD = XDMAC_GD_DI0 << channel; /* Disable channel interrupt */ xdmac->XDMAC_GID = XDMAC_GID_ID0 << channel; /* Disable all channel interrupts */ xdmac->XDMAC_CHID[channel].XDMAC_CID = 0xFF; /* Clear the pending Interrupt Status bit(s) */ (void)xdmac->XDMAC_CHID[channel].XDMAC_CIS; return 0; } static int sam_xdmac_initialize(const struct device *dev) { const struct sam_xdmac_dev_cfg *const dev_cfg = dev->config; Xdmac * const xdmac = dev_cfg->regs; /* Configure interrupts */ dev_cfg->irq_config(); /* Enable XDMAC clock in PMC */ (void)clock_control_on(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&dev_cfg->clock_cfg); /* Disable all channels */ xdmac->XDMAC_GD = UINT32_MAX; /* Disable all channel interrupts */ xdmac->XDMAC_GID = UINT32_MAX; /* Enable module's IRQ */ irq_enable(dev_cfg->irq_id); LOG_INF("Device %s initialized", dev->name); return 0; } static int sam_xdmac_get_status(const struct device *dev, uint32_t channel, struct dma_status *status) { const struct sam_xdmac_dev_cfg *const dev_cfg = dev->config; Xdmac * const xdmac = dev_cfg->regs; uint32_t chan_cfg = xdmac->XDMAC_CHID[channel].XDMAC_CC; uint32_t ublen = xdmac->XDMAC_CHID[channel].XDMAC_CUBC; /* we need to check some of the XDMAC_CC registers to determine the DMA direction */ if ((chan_cfg & XDMAC_CC_TYPE_Msk) == 0) { status->dir = MEMORY_TO_MEMORY; } else if ((chan_cfg & XDMAC_CC_DSYNC_Msk) == XDMAC_CC_DSYNC_MEM2PER) { status->dir = MEMORY_TO_PERIPHERAL; } else { status->dir = PERIPHERAL_TO_MEMORY; } status->busy = ((chan_cfg & XDMAC_CC_INITD_Msk) != 0) || (ublen > 0); status->pending_length = ublen; return 0; } static const struct dma_driver_api sam_xdmac_driver_api = { .config = sam_xdmac_config, .reload = sam_xdmac_transfer_reload, .start = sam_xdmac_transfer_start, .stop = sam_xdmac_transfer_stop, .get_status = sam_xdmac_get_status, }; /* DMA0 */ static void dma0_sam_irq_config(void) { IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), sam_xdmac_isr, DEVICE_DT_INST_GET(0), 0); } static const struct sam_xdmac_dev_cfg dma0_sam_config = { .regs = (Xdmac *)DT_INST_REG_ADDR(0), .irq_config = dma0_sam_irq_config, .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(0), .irq_id = DT_INST_IRQN(0), }; static struct sam_xdmac_dev_data dma0_sam_data; DEVICE_DT_INST_DEFINE(0, &sam_xdmac_initialize, NULL, &dma0_sam_data, &dma0_sam_config, POST_KERNEL, CONFIG_DMA_INIT_PRIORITY, &sam_xdmac_driver_api); ```
/content/code_sandbox/drivers/dma/dma_sam_xdmac.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,447
```c /* * */ #include <adsp_interrupt.h> #include <zephyr/drivers/dma.h> #include <zephyr/cache.h> #define DT_DRV_COMPAT intel_adsp_gpdma #define GPDMA_CTL_OFFSET 0x0004 #define GPDMA_CTL_FDCGB BIT(0) #define GPDMA_CTL_DCGD BIT(30) /* TODO make device tree defined? */ #define GPDMA_CHLLPC_OFFSET(channel) (0x0010 + channel*0x10) #define GPDMA_CHLLPC_EN BIT(7) #define GPDMA_CHLLPC_DHRS(x) SET_BITS(6, 0, x) /* TODO make device tree defined? */ #define GPDMA_CHLLPL(channel) (0x0018 + channel*0x10) #define GPDMA_CHLLPU(channel) (0x001c + channel*0x10) #define GPDMA_OSEL(x) SET_BITS(25, 24, x) #define SHIM_CLKCTL_LPGPDMA_SPA BIT(0) #define SHIM_CLKCTL_LPGPDMA_CPA BIT(8) # define DSP_INIT_LPGPDMA(x) (0x71A60 + (2*x)) # define LPGPDMA_CTLOSEL_FLAG BIT(15) # define LPGPDMA_CHOSEL_FLAG 0xFF #include "dma_dw_common.h" #include <zephyr/pm/device.h> #include <zephyr/pm/device_runtime.h> #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(dma_intel_adsp_gpdma); /* Device run time data */ struct intel_adsp_gpdma_data { struct dw_dma_dev_data dw_data; }; /* Device constant configuration parameters */ struct intel_adsp_gpdma_cfg { struct dw_dma_dev_cfg dw_cfg; uint32_t shim; }; #ifdef DMA_INTEL_ADSP_GPDMA_DEBUG static void intel_adsp_gpdma_dump_registers(const struct device *dev, uint32_t channel) { const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config; const struct dw_dma_dev_cfg *const dw_cfg = &dev_cfg->dw_cfg; uint32_t cap, ctl, ipptr, llpc, llpl, llpu; int i; /* Shims */ cap = dw_read(dev_cfg->shim, 0x0); ctl = dw_read(dev_cfg->shim, 0x4); ipptr = dw_read(dev_cfg->shim, 0x8); llpc = dw_read(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel)); llpl = dw_read(dev_cfg->shim, GPDMA_CHLLPL(channel)); llpu = dw_read(dev_cfg->shim, GPDMA_CHLLPU(channel)); LOG_INF("%s: channel: %d cap %x, ctl %x, ipptr %x, llpc %x, llpl %x, llpu %x", dev->name, channel, cap, ctl, ipptr, llpc, llpl, llpu); /* Channel Register Dump */ for (i = 0; i <= DW_DMA_CHANNEL_REGISTER_OFFSET_END; i += 0x8) LOG_INF(" channel register offset: %#x value: %#x\n", chan_reg_offs[i], dw_read(dw_cfg->base, DW_CHAN_OFFSET(channel) + chan_reg_offs[i])); /* IP Register Dump */ for (i = DW_DMA_CHANNEL_REGISTER_OFFSET_START; i <= DW_DMA_CHANNEL_REGISTER_OFFSET_END; i += 0x8) LOG_INF(" ip register offset: %#x value: %#x\n", ip_reg_offs[i], dw_read(dw_cfg->base, ip_reg_offs[i])); } #endif static void intel_adsp_gpdma_llp_config(const struct device *dev, uint32_t channel, uint32_t dma_slot) { #ifdef CONFIG_DMA_INTEL_ADSP_GPDMA_HAS_LLP const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config; dw_write(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel), GPDMA_CHLLPC_DHRS(dma_slot)); #endif } static inline void intel_adsp_gpdma_llp_enable(const struct device *dev, uint32_t channel) { #ifdef CONFIG_DMA_INTEL_ADSP_GPDMA_HAS_LLP const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config; uint32_t val; val = dw_read(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel)); if (!(val & GPDMA_CHLLPC_EN)) { dw_write(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel), val | GPDMA_CHLLPC_EN); } #endif } static inline void intel_adsp_gpdma_llp_disable(const struct device *dev, uint32_t channel) { #ifdef CONFIG_DMA_INTEL_ADSP_GPDMA_HAS_LLP const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config; uint32_t val; val = dw_read(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel)); dw_write(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel), val | GPDMA_CHLLPC_EN); #endif } static inline void intel_adsp_gpdma_llp_read(const struct device *dev, uint32_t channel, uint32_t *llp_l, uint32_t *llp_u) { #ifdef CONFIG_DMA_INTEL_ADSP_GPDMA_HAS_LLP const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config; *llp_l = dw_read(dev_cfg->shim, GPDMA_CHLLPL(channel)); *llp_u = dw_read(dev_cfg->shim, GPDMA_CHLLPU(channel)); #endif } static int intel_adsp_gpdma_config(const struct device *dev, uint32_t channel, struct dma_config *cfg) { int res = dw_dma_config(dev, channel, cfg); if (res != 0) { return res; } /* Assume all scatter/gathers are for the same device? */ switch (cfg->channel_direction) { case MEMORY_TO_PERIPHERAL: case PERIPHERAL_TO_MEMORY: LOG_DBG("%s: channel %d configuring llp for %x", dev->name, channel, cfg->dma_slot); intel_adsp_gpdma_llp_config(dev, channel, cfg->dma_slot); break; default: break; } return res; } static int intel_adsp_gpdma_start(const struct device *dev, uint32_t channel) { int ret = 0; #if CONFIG_PM_DEVICE && CONFIG_SOC_SERIES_INTEL_ADSP_ACE bool first_use = false; enum pm_device_state state; /* We need to power-up device before using it. So in case of a GPDMA, we need to check if * the current instance is already active, and if not, we let the power manager know that * we want to use it. */ if (pm_device_state_get(dev, &state) != -ENOSYS) { first_use = state != PM_DEVICE_STATE_ACTIVE; if (first_use) { ret = pm_device_runtime_get(dev); if (ret < 0) { return ret; } } } #endif intel_adsp_gpdma_llp_enable(dev, channel); ret = dw_dma_start(dev, channel); if (ret != 0) { intel_adsp_gpdma_llp_disable(dev, channel); } #if CONFIG_PM_DEVICE && CONFIG_SOC_SERIES_INTEL_ADSP_ACE /* Device usage is counted by the calls of dw_dma_start and dw_dma_stop. For the first use, * we need to make sure that the pm_device_runtime_get and pm_device_runtime_put functions * calls are balanced. */ if (first_use) { ret = pm_device_runtime_put(dev); } #endif return ret; } static int intel_adsp_gpdma_stop(const struct device *dev, uint32_t channel) { int ret = dw_dma_stop(dev, channel); if (ret == 0) { intel_adsp_gpdma_llp_disable(dev, channel); } return ret; } static int intel_adsp_gpdma_copy(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { struct dw_dma_dev_data *const dev_data = dev->data; struct dw_dma_chan_data *chan_data; if (channel >= DW_MAX_CHAN) { return -EINVAL; } chan_data = &dev_data->chan[channel]; /* default action is to clear the DONE bit for all LLI making * sure the cache is coherent between DSP and DMAC. */ for (int i = 0; i < chan_data->lli_count; i++) { chan_data->lli[i].ctrl_hi &= ~DW_CTLH_DONE(1); } chan_data->ptr_data.current_ptr += size; if (chan_data->ptr_data.current_ptr >= chan_data->ptr_data.end_ptr) { chan_data->ptr_data.current_ptr = chan_data->ptr_data.start_ptr + (chan_data->ptr_data.current_ptr - chan_data->ptr_data.end_ptr); } return 0; } /* Disables automatic clock gating (force disable clock gate) */ static void intel_adsp_gpdma_clock_enable(const struct device *dev) { const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config; uint32_t reg = dev_cfg->shim + GPDMA_CTL_OFFSET; uint32_t val; if (IS_ENABLED(CONFIG_SOC_SERIES_INTEL_ADSP_ACE)) { val = sys_read32(reg) | GPDMA_CTL_DCGD; } else { val = GPDMA_CTL_FDCGB; } sys_write32(val, reg); } #ifdef CONFIG_PM_DEVICE static void intel_adsp_gpdma_clock_disable(const struct device *dev) { #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config; uint32_t reg = dev_cfg->shim + GPDMA_CTL_OFFSET; uint32_t val = sys_read32(reg) & ~GPDMA_CTL_DCGD; sys_write32(val, reg); #endif } #endif static void intel_adsp_gpdma_claim_ownership(const struct device *dev) { #ifdef CONFIG_DMA_INTEL_ADSP_GPDMA_NEED_CONTROLLER_OWNERSHIP #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config; uint32_t reg = dev_cfg->shim + GPDMA_CTL_OFFSET; uint32_t val = sys_read32(reg) | GPDMA_OSEL(0x3); sys_write32(val, reg); #else sys_write32(LPGPDMA_CHOSEL_FLAG | LPGPDMA_CTLOSEL_FLAG, DSP_INIT_LPGPDMA(0)); sys_write32(LPGPDMA_CHOSEL_FLAG | LPGPDMA_CTLOSEL_FLAG, DSP_INIT_LPGPDMA(1)); ARG_UNUSED(dev); #endif /* CONFIG_SOC_SERIES_INTEL_ADSP_ACE */ #endif /* CONFIG_DMA_INTEL_ADSP_GPDMA_NEED_CONTROLLER_OWNERSHIP */ } #ifdef CONFIG_PM_DEVICE static void intel_adsp_gpdma_release_ownership(const struct device *dev) { #ifdef CONFIG_DMA_INTEL_ADSP_GPDMA_NEED_CONTROLLER_OWNERSHIP #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config; uint32_t reg = dev_cfg->shim + GPDMA_CTL_OFFSET; uint32_t val = sys_read32(reg) & ~GPDMA_OSEL(0x3); sys_write32(val, reg); /* CHECKME: Do CAVS platforms set ownership over DMA, * if yes, add support for it releasing. */ #endif /* CONFIG_SOC_SERIES_INTEL_ADSP_ACE */ #endif /* CONFIG_DMA_INTEL_ADSP_GPDMA_NEED_CONTROLLER_OWNERSHIP */ } #endif #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE static int intel_adsp_gpdma_enable(const struct device *dev) { const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config; uint32_t reg = dev_cfg->shim + GPDMA_CTL_OFFSET; sys_write32(SHIM_CLKCTL_LPGPDMA_SPA, reg); if (!WAIT_FOR((sys_read32(reg) & SHIM_CLKCTL_LPGPDMA_CPA), 10000, k_busy_wait(1))) { return -1; } return 0; } #ifdef CONFIG_PM_DEVICE static int intel_adsp_gpdma_disable(const struct device *dev) { const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config; uint32_t reg = dev_cfg->shim + GPDMA_CTL_OFFSET; sys_write32(sys_read32(reg) & ~SHIM_CLKCTL_LPGPDMA_SPA, reg); return 0; } #endif /* CONFIG_PM_DEVICE */ #endif /* CONFIG_SOC_SERIES_INTEL_ADSP_ACE */ static int intel_adsp_gpdma_power_on(const struct device *dev) { const struct intel_adsp_gpdma_cfg *const dev_cfg = dev->config; int ret; #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE /* Power up */ ret = intel_adsp_gpdma_enable(dev); if (ret != 0) { LOG_ERR("%s: failed to initialize", dev->name); goto out; } #endif /* DW DMA Owner Select to DSP */ intel_adsp_gpdma_claim_ownership(dev); /* Disable dynamic clock gating appropriately before initializing */ intel_adsp_gpdma_clock_enable(dev); /* Disable all channels and Channel interrupts */ ret = dw_dma_setup(dev); if (ret != 0) { LOG_ERR("%s: failed to initialize", dev->name); goto out; } /* Configure interrupts */ dev_cfg->dw_cfg.irq_config(); LOG_INF("%s: initialized", dev->name); out: return 0; } #ifdef CONFIG_PM_DEVICE static int intel_adsp_gpdma_power_off(const struct device *dev) { LOG_INF("%s: power off", dev->name); /* Enabling dynamic clock gating */ intel_adsp_gpdma_clock_disable(dev); /* Relesing DMA ownership*/ intel_adsp_gpdma_release_ownership(dev); #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE /* Power down */ return intel_adsp_gpdma_disable(dev); #else return 0; #endif /* CONFIG_SOC_SERIES_INTEL_ADSP_ACE */ } #endif /* CONFIG_PM_DEVICE */ int intel_adsp_gpdma_get_status(const struct device *dev, uint32_t channel, struct dma_status *stat) { uint32_t llp_l = 0; uint32_t llp_u = 0; if (channel >= DW_MAX_CHAN) { return -EINVAL; } intel_adsp_gpdma_llp_read(dev, channel, &llp_l, &llp_u); stat->total_copied = ((uint64_t)llp_u << 32) | llp_l; return dw_dma_get_status(dev, channel, stat); } int intel_adsp_gpdma_get_attribute(const struct device *dev, uint32_t type, uint32_t *value) { switch (type) { case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT: *value = sys_cache_data_line_size_get(); break; case DMA_ATTR_BUFFER_SIZE_ALIGNMENT: *value = DMA_BUF_SIZE_ALIGNMENT(DT_COMPAT_GET_ANY_STATUS_OKAY(intel_adsp_gpdma)); break; case DMA_ATTR_COPY_ALIGNMENT: *value = DMA_COPY_ALIGNMENT(DT_COMPAT_GET_ANY_STATUS_OKAY(intel_adsp_gpdma)); break; case DMA_ATTR_MAX_BLOCK_COUNT: *value = CONFIG_DMA_DW_LLI_POOL_SIZE; break; default: return -EINVAL; } return 0; } #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE static inline void ace_gpdma_intc_unmask(void) { ACE_DINT[0].ie[ACE_INTL_GPDMA] = BIT(0); } #else static inline void ace_gpdma_intc_unmask(void) {} #endif int intel_adsp_gpdma_init(const struct device *dev) { struct dw_dma_dev_data *const dev_data = dev->data; /* Setup context and atomics for channels */ dev_data->dma_ctx.magic = DMA_MAGIC; dev_data->dma_ctx.dma_channels = DW_MAX_CHAN; dev_data->dma_ctx.atomic = dev_data->channels_atomic; ace_gpdma_intc_unmask(); #if CONFIG_PM_DEVICE && CONFIG_SOC_SERIES_INTEL_ADSP_ACE if (pm_device_on_power_domain(dev)) { pm_device_init_off(dev); } else { pm_device_init_suspended(dev); } return 0; #else return intel_adsp_gpdma_power_on(dev); #endif } #ifdef CONFIG_PM_DEVICE static int gpdma_pm_action(const struct device *dev, enum pm_device_action action) { switch (action) { case PM_DEVICE_ACTION_RESUME: return intel_adsp_gpdma_power_on(dev); case PM_DEVICE_ACTION_SUSPEND: return intel_adsp_gpdma_power_off(dev); /* ON and OFF actions are used only by the power domain to change internal power status of * the device. OFF state mean that device and its power domain are disabled, SUSPEND mean * that device is power off but domain is already power on. */ case PM_DEVICE_ACTION_TURN_ON: case PM_DEVICE_ACTION_TURN_OFF: break; default: return -ENOTSUP; } return 0; } #endif static const struct dma_driver_api intel_adsp_gpdma_driver_api = { .config = intel_adsp_gpdma_config, .reload = intel_adsp_gpdma_copy, .start = intel_adsp_gpdma_start, .stop = intel_adsp_gpdma_stop, .suspend = dw_dma_suspend, .resume = dw_dma_resume, .get_status = intel_adsp_gpdma_get_status, .get_attribute = intel_adsp_gpdma_get_attribute, }; #define INTEL_ADSP_GPDMA_CHAN_ARB_DATA(inst) \ static struct dw_drv_plat_data dmac##inst = { \ .chan[0] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[1] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[2] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[3] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[4] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[5] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[6] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[7] = { \ .class = 6, \ .weight = 0, \ }, \ } #define INTEL_ADSP_GPDMA_INIT(inst) \ INTEL_ADSP_GPDMA_CHAN_ARB_DATA(inst); \ static void intel_adsp_gpdma##inst##_irq_config(void); \ \ static const struct intel_adsp_gpdma_cfg intel_adsp_gpdma##inst##_config = {\ .dw_cfg = { \ .base = DT_INST_REG_ADDR(inst), \ .irq_config = intel_adsp_gpdma##inst##_irq_config,\ }, \ .shim = DT_INST_PROP_BY_IDX(inst, shim, 0), \ }; \ \ static struct intel_adsp_gpdma_data intel_adsp_gpdma##inst##_data = {\ .dw_data = { \ .channel_data = &dmac##inst, \ }, \ }; \ \ PM_DEVICE_DT_INST_DEFINE(inst, gpdma_pm_action); \ \ DEVICE_DT_INST_DEFINE(inst, \ &intel_adsp_gpdma_init, \ PM_DEVICE_DT_INST_GET(inst), \ &intel_adsp_gpdma##inst##_data, \ &intel_adsp_gpdma##inst##_config, POST_KERNEL,\ CONFIG_DMA_INIT_PRIORITY, \ &intel_adsp_gpdma_driver_api); \ \ static void intel_adsp_gpdma##inst##_irq_config(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), \ DT_INST_IRQ(inst, priority), dw_dma_isr, \ DEVICE_DT_INST_GET(inst), \ DT_INST_IRQ(inst, sense)); \ irq_enable(DT_INST_IRQN(inst)); \ } DT_INST_FOREACH_STATUS_OKAY(INTEL_ADSP_GPDMA_INIT) ```
/content/code_sandbox/drivers/dma/dma_intel_adsp_gpdma.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,748
```c /* * */ /** * @brief Common part of DMA drivers for stm32. * @note Functions named with stm32_dma_* are SoCs related functions * implemented in dma_stm32_v*.c */ #include "dma_stm32.h" #include <zephyr/init.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/dma/dma_stm32.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(dma_stm32, CONFIG_DMA_LOG_LEVEL); #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_dma_v1) #define DT_DRV_COMPAT st_stm32_dma_v1 #elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32_dma_v2) #define DT_DRV_COMPAT st_stm32_dma_v2 #elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32_dma_v2bis) #define DT_DRV_COMPAT st_stm32_dma_v2bis #endif #if DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) #if DT_INST_IRQ_HAS_IDX(0, 7) #define DMA_STM32_0_STREAM_COUNT 8 #elif DT_INST_IRQ_HAS_IDX(0, 6) #define DMA_STM32_0_STREAM_COUNT 7 #elif DT_INST_IRQ_HAS_IDX(0, 5) #define DMA_STM32_0_STREAM_COUNT 6 #elif DT_INST_IRQ_HAS_IDX(0, 4) #define DMA_STM32_0_STREAM_COUNT 5 #else #define DMA_STM32_0_STREAM_COUNT 3 #endif #endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) */ #if DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay) #if DT_INST_IRQ_HAS_IDX(1, 7) #define DMA_STM32_1_STREAM_COUNT 8 #elif DT_INST_IRQ_HAS_IDX(1, 6) #define DMA_STM32_1_STREAM_COUNT 7 #elif DT_INST_IRQ_HAS_IDX(1, 5) #define DMA_STM32_1_STREAM_COUNT 6 #else #define DMA_STM32_1_STREAM_COUNT 5 #endif #endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay) */ static const uint32_t table_m_size[] = { LL_DMA_MDATAALIGN_BYTE, LL_DMA_MDATAALIGN_HALFWORD, LL_DMA_MDATAALIGN_WORD, }; static const uint32_t table_p_size[] = { LL_DMA_PDATAALIGN_BYTE, LL_DMA_PDATAALIGN_HALFWORD, LL_DMA_PDATAALIGN_WORD, }; static void dma_stm32_dump_stream_irq(const struct device *dev, uint32_t id) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); stm32_dma_dump_stream_irq(dma, id); } static void dma_stm32_clear_stream_irq(const struct device *dev, uint32_t id) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); dma_stm32_clear_tc(dma, id); dma_stm32_clear_ht(dma, id); stm32_dma_clear_stream_irq(dma, id); } static void dma_stm32_irq_handler(const struct device *dev, uint32_t id) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); struct dma_stm32_stream *stream; uint32_t callback_arg; __ASSERT_NO_MSG(id < config->max_streams); stream = &config->streams[id]; /* The busy channel is pertinent if not overridden by the HAL */ if ((stream->hal_override != true) && (stream->busy == false)) { /* * When DMA channel is not overridden by HAL, * ignore irq if the channel is not busy anymore */ dma_stm32_clear_stream_irq(dev, id); return; } #ifdef CONFIG_DMAMUX_STM32 callback_arg = stream->mux_channel; #else callback_arg = id + STM32_DMA_STREAM_OFFSET; #endif /* CONFIG_DMAMUX_STM32 */ if (!IS_ENABLED(CONFIG_DMAMUX_STM32)) { stream->busy = false; } /* The dma stream id is in range from STM32_DMA_STREAM_OFFSET..<dma-requests> */ if (stm32_dma_is_ht_irq_active(dma, id)) { /* Let HAL DMA handle flags on its own */ if (!stream->hal_override) { dma_stm32_clear_ht(dma, id); } stream->dma_callback(dev, stream->user_data, callback_arg, DMA_STATUS_BLOCK); } else if (stm32_dma_is_tc_irq_active(dma, id)) { #ifdef CONFIG_DMAMUX_STM32 /* Circular buffer never stops receiving as long as peripheral is enabled */ if (!stream->cyclic) { stream->busy = false; } #endif /* Let HAL DMA handle flags on its own */ if (!stream->hal_override) { dma_stm32_clear_tc(dma, id); } stream->dma_callback(dev, stream->user_data, callback_arg, DMA_STATUS_COMPLETE); } else if (stm32_dma_is_unexpected_irq_happened(dma, id)) { LOG_ERR("Unexpected irq happened."); stream->dma_callback(dev, stream->user_data, callback_arg, -EIO); } else { LOG_ERR("Transfer Error."); dma_stm32_dump_stream_irq(dev, id); dma_stm32_clear_stream_irq(dev, id); stream->dma_callback(dev, stream->user_data, callback_arg, -EIO); } } #ifdef CONFIG_DMA_STM32_SHARED_IRQS #define HANDLE_IRQS(index) \ static const struct device *const dev_##index = \ DEVICE_DT_INST_GET(index); \ const struct dma_stm32_config *cfg_##index = dev_##index->config; \ DMA_TypeDef *dma_##index = (DMA_TypeDef *)(cfg_##index->base); \ \ for (id = 0; id < cfg_##index->max_streams; ++id) { \ if (stm32_dma_is_irq_active(dma_##index, id)) { \ dma_stm32_irq_handler(dev_##index, id); \ } \ } static void dma_stm32_shared_irq_handler(const struct device *dev) { ARG_UNUSED(dev); uint32_t id = 0; DT_INST_FOREACH_STATUS_OKAY(HANDLE_IRQS) } #endif /* CONFIG_DMA_STM32_SHARED_IRQS */ static int dma_stm32_get_priority(uint8_t priority, uint32_t *ll_priority) { switch (priority) { case 0x0: *ll_priority = LL_DMA_PRIORITY_LOW; break; case 0x1: *ll_priority = LL_DMA_PRIORITY_MEDIUM; break; case 0x2: *ll_priority = LL_DMA_PRIORITY_HIGH; break; case 0x3: *ll_priority = LL_DMA_PRIORITY_VERYHIGH; break; default: LOG_ERR("Priority error. %d", priority); return -EINVAL; } return 0; } static int dma_stm32_get_direction(enum dma_channel_direction direction, uint32_t *ll_direction) { switch (direction) { case MEMORY_TO_MEMORY: *ll_direction = LL_DMA_DIRECTION_MEMORY_TO_MEMORY; break; case MEMORY_TO_PERIPHERAL: *ll_direction = LL_DMA_DIRECTION_MEMORY_TO_PERIPH; break; case PERIPHERAL_TO_MEMORY: *ll_direction = LL_DMA_DIRECTION_PERIPH_TO_MEMORY; break; default: LOG_ERR("Direction error. %d", direction); return -EINVAL; } return 0; } static int dma_stm32_get_memory_increment(enum dma_addr_adj increment, uint32_t *ll_increment) { switch (increment) { case DMA_ADDR_ADJ_INCREMENT: *ll_increment = LL_DMA_MEMORY_INCREMENT; break; case DMA_ADDR_ADJ_NO_CHANGE: *ll_increment = LL_DMA_MEMORY_NOINCREMENT; break; case DMA_ADDR_ADJ_DECREMENT: return -ENOTSUP; default: LOG_ERR("Memory increment error. %d", increment); return -EINVAL; } return 0; } static int dma_stm32_get_periph_increment(enum dma_addr_adj increment, uint32_t *ll_increment) { switch (increment) { case DMA_ADDR_ADJ_INCREMENT: *ll_increment = LL_DMA_PERIPH_INCREMENT; break; case DMA_ADDR_ADJ_NO_CHANGE: *ll_increment = LL_DMA_PERIPH_NOINCREMENT; break; case DMA_ADDR_ADJ_DECREMENT: return -ENOTSUP; default: LOG_ERR("Periph increment error. %d", increment); return -EINVAL; } return 0; } static int dma_stm32_disable_stream(DMA_TypeDef *dma, uint32_t id) { int count = 0; for (;;) { if (stm32_dma_disable_stream(dma, id) == 0) { return 0; } /* After trying for 5 seconds, give up */ if (count++ > (5 * 1000)) { return -EBUSY; } k_sleep(K_MSEC(1)); } return 0; } DMA_STM32_EXPORT_API int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config *config) { const struct dma_stm32_config *dev_config = dev->config; struct dma_stm32_stream *stream = &dev_config->streams[id - STM32_DMA_STREAM_OFFSET]; DMA_TypeDef *dma = (DMA_TypeDef *)dev_config->base; LL_DMA_InitTypeDef DMA_InitStruct; int ret; LL_DMA_StructInit(&DMA_InitStruct); /* Give channel from index 0 */ id = id - STM32_DMA_STREAM_OFFSET; if (id >= dev_config->max_streams) { LOG_ERR("cannot configure the dma stream %d.", id); return -EINVAL; } if (stream->busy) { LOG_ERR("dma stream %d is busy.", id); return -EBUSY; } if (dma_stm32_disable_stream(dma, id) != 0) { LOG_ERR("could not disable dma stream %d.", id); return -EBUSY; } dma_stm32_clear_stream_irq(dev, id); /* Check potential DMA override (if id parameters and stream are valid) */ if (config->linked_channel == STM32_DMA_HAL_OVERRIDE) { /* DMA channel is overridden by HAL DMA * Retain that the channel is busy and proceed to the minimal * configuration to properly route the IRQ */ stream->busy = true; stream->hal_override = true; stream->dma_callback = config->dma_callback; stream->user_data = config->user_data; stream->cyclic = false; return 0; } if (config->head_block->block_size > DMA_STM32_MAX_DATA_ITEMS) { LOG_ERR("Data size too big: %d\n", config->head_block->block_size); return -EINVAL; } #ifdef CONFIG_DMA_STM32_V1 if ((config->channel_direction == MEMORY_TO_MEMORY) && (!dev_config->support_m2m)) { LOG_ERR("Memcopy not supported for device %s", dev->name); return -ENOTSUP; } #endif /* CONFIG_DMA_STM32_V1 */ /* Support only the same data width for source and dest */ if ((config->dest_data_size != config->source_data_size)) { LOG_ERR("source and dest data size differ."); return -EINVAL; } if (config->source_data_size != 4U && config->source_data_size != 2U && config->source_data_size != 1U) { LOG_ERR("source and dest unit size error, %d", config->source_data_size); return -EINVAL; } /* * STM32's circular mode will auto reset both source address * counter and destination address counter. */ if (config->head_block->source_reload_en != config->head_block->dest_reload_en) { LOG_ERR("source_reload_en and dest_reload_en must " "be the same."); return -EINVAL; } stream->busy = true; stream->dma_callback = config->dma_callback; stream->direction = config->channel_direction; stream->user_data = config->user_data; stream->src_size = config->source_data_size; stream->dst_size = config->dest_data_size; stream->cyclic = config->head_block->source_reload_en; /* Check dest or source memory address, warn if 0 */ if (config->head_block->source_address == 0) { LOG_WRN("source_buffer address is null."); } if (config->head_block->dest_address == 0) { LOG_WRN("dest_buffer address is null."); } if (stream->direction == MEMORY_TO_PERIPHERAL) { DMA_InitStruct.MemoryOrM2MDstAddress = config->head_block->source_address; DMA_InitStruct.PeriphOrM2MSrcAddress = config->head_block->dest_address; } else { DMA_InitStruct.PeriphOrM2MSrcAddress = config->head_block->source_address; DMA_InitStruct.MemoryOrM2MDstAddress = config->head_block->dest_address; } uint16_t memory_addr_adj = 0, periph_addr_adj = 0; ret = dma_stm32_get_priority(config->channel_priority, &DMA_InitStruct.Priority); if (ret < 0) { return ret; } ret = dma_stm32_get_direction(config->channel_direction, &DMA_InitStruct.Direction); if (ret < 0) { return ret; } switch (config->channel_direction) { case MEMORY_TO_MEMORY: case PERIPHERAL_TO_MEMORY: memory_addr_adj = config->head_block->dest_addr_adj; periph_addr_adj = config->head_block->source_addr_adj; break; case MEMORY_TO_PERIPHERAL: memory_addr_adj = config->head_block->source_addr_adj; periph_addr_adj = config->head_block->dest_addr_adj; break; /* Direction has been asserted in dma_stm32_get_direction. */ default: LOG_ERR("Channel direction error (%d).", config->channel_direction); return -EINVAL; } ret = dma_stm32_get_memory_increment(memory_addr_adj, &DMA_InitStruct.MemoryOrM2MDstIncMode); if (ret < 0) { return ret; } LOG_DBG("Channel (%d) memory inc (%x).", id, DMA_InitStruct.MemoryOrM2MDstIncMode); ret = dma_stm32_get_periph_increment(periph_addr_adj, &DMA_InitStruct.PeriphOrM2MSrcIncMode); if (ret < 0) { return ret; } LOG_DBG("Channel (%d) peripheral inc (%x).", id, DMA_InitStruct.PeriphOrM2MSrcIncMode); if (stream->cyclic) { DMA_InitStruct.Mode = LL_DMA_MODE_CIRCULAR; } else { DMA_InitStruct.Mode = LL_DMA_MODE_NORMAL; } stream->source_periph = (stream->direction == PERIPHERAL_TO_MEMORY); /* set the data width, when source_data_size equals dest_data_size */ int index = find_lsb_set(config->source_data_size) - 1; DMA_InitStruct.PeriphOrM2MSrcDataSize = table_p_size[index]; index = find_lsb_set(config->dest_data_size) - 1; DMA_InitStruct.MemoryOrM2MDstDataSize = table_m_size[index]; #if defined(CONFIG_DMA_STM32_V1) DMA_InitStruct.MemBurst = stm32_dma_get_mburst(config, stream->source_periph); DMA_InitStruct.PeriphBurst = stm32_dma_get_pburst(config, stream->source_periph); #if !defined(CONFIG_SOC_SERIES_STM32H7X) && !defined(CONFIG_SOC_SERIES_STM32MP1X) if (config->channel_direction != MEMORY_TO_MEMORY) { if (config->dma_slot >= 8) { LOG_ERR("dma slot error."); return -EINVAL; } } else { if (config->dma_slot >= 8) { LOG_ERR("dma slot is too big, using 0 as default."); config->dma_slot = 0; } } DMA_InitStruct.Channel = dma_stm32_slot_to_channel(config->dma_slot); #endif DMA_InitStruct.FIFOThreshold = stm32_dma_get_fifo_threshold( config->head_block->fifo_mode_control); if (stm32_dma_check_fifo_mburst(&DMA_InitStruct)) { DMA_InitStruct.FIFOMode = LL_DMA_FIFOMODE_ENABLE; } else { DMA_InitStruct.FIFOMode = LL_DMA_FIFOMODE_DISABLE; } #endif if (stream->source_periph) { DMA_InitStruct.NbData = config->head_block->block_size / config->source_data_size; } else { DMA_InitStruct.NbData = config->head_block->block_size / config->dest_data_size; } #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_dma_v2) || DT_HAS_COMPAT_STATUS_OKAY(st_stm32_dmamux) /* With dma V2 and dmamux,the request ID is stored in the dma_slot */ DMA_InitStruct.PeriphRequest = config->dma_slot; #endif LL_DMA_Init(dma, dma_stm32_id_to_stream(id), &DMA_InitStruct); LL_DMA_EnableIT_TC(dma, dma_stm32_id_to_stream(id)); /* Enable Half-Transfer irq if circular mode is enabled */ if (stream->cyclic) { LL_DMA_EnableIT_HT(dma, dma_stm32_id_to_stream(id)); } #if defined(CONFIG_DMA_STM32_V1) if (DMA_InitStruct.FIFOMode == LL_DMA_FIFOMODE_ENABLE) { LL_DMA_EnableFifoMode(dma, dma_stm32_id_to_stream(id)); LL_DMA_EnableIT_FE(dma, dma_stm32_id_to_stream(id)); } else { LL_DMA_DisableFifoMode(dma, dma_stm32_id_to_stream(id)); LL_DMA_DisableIT_FE(dma, dma_stm32_id_to_stream(id)); } #endif return ret; } DMA_STM32_EXPORT_API int dma_stm32_reload(const struct device *dev, uint32_t id, uint32_t src, uint32_t dst, size_t size) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); struct dma_stm32_stream *stream; /* Give channel from index 0 */ id = id - STM32_DMA_STREAM_OFFSET; if (id >= config->max_streams) { return -EINVAL; } stream = &config->streams[id]; if (dma_stm32_disable_stream(dma, id) != 0) { return -EBUSY; } switch (stream->direction) { case MEMORY_TO_PERIPHERAL: LL_DMA_SetMemoryAddress(dma, dma_stm32_id_to_stream(id), src); LL_DMA_SetPeriphAddress(dma, dma_stm32_id_to_stream(id), dst); break; case MEMORY_TO_MEMORY: case PERIPHERAL_TO_MEMORY: LL_DMA_SetPeriphAddress(dma, dma_stm32_id_to_stream(id), src); LL_DMA_SetMemoryAddress(dma, dma_stm32_id_to_stream(id), dst); break; default: return -EINVAL; } if (stream->source_periph) { LL_DMA_SetDataLength(dma, dma_stm32_id_to_stream(id), size / stream->src_size); } else { LL_DMA_SetDataLength(dma, dma_stm32_id_to_stream(id), size / stream->dst_size); } /* When reloading the dma, the stream is busy again before enabling */ stream->busy = true; stm32_dma_enable_stream(dma, id); return 0; } DMA_STM32_EXPORT_API int dma_stm32_start(const struct device *dev, uint32_t id) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); struct dma_stm32_stream *stream; /* Give channel from index 0 */ id = id - STM32_DMA_STREAM_OFFSET; /* Only M2P or M2M mode can be started manually. */ if (id >= config->max_streams) { return -EINVAL; } /* Repeated start : return now if channel is already started */ if (stm32_dma_is_enabled_stream(dma, id)) { return 0; } /* When starting the dma, the stream is busy before enabling */ stream = &config->streams[id]; stream->busy = true; dma_stm32_clear_stream_irq(dev, id); stm32_dma_enable_stream(dma, id); return 0; } DMA_STM32_EXPORT_API int dma_stm32_stop(const struct device *dev, uint32_t id) { const struct dma_stm32_config *config = dev->config; struct dma_stm32_stream *stream = &config->streams[id - STM32_DMA_STREAM_OFFSET]; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); /* Give channel from index 0 */ id = id - STM32_DMA_STREAM_OFFSET; if (id >= config->max_streams) { return -EINVAL; } /* Repeated stop : return now if channel is already stopped */ if (!stm32_dma_is_enabled_stream(dma, id)) { return 0; } #if !defined(CONFIG_DMAMUX_STM32) \ || defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32MP1X) LL_DMA_DisableIT_TC(dma, dma_stm32_id_to_stream(id)); #endif /* CONFIG_DMAMUX_STM32 */ #if defined(CONFIG_DMA_STM32_V1) stm32_dma_disable_fifo_irq(dma, id); #endif dma_stm32_clear_stream_irq(dev, id); dma_stm32_disable_stream(dma, id); /* Finally, flag stream as free */ stream->busy = false; return 0; } static int dma_stm32_init(const struct device *dev) { const struct dma_stm32_config *config = dev->config; const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(clk)) { LOG_ERR("clock control device not ready"); return -ENODEV; } if (clock_control_on(clk, (clock_control_subsys_t) &config->pclken) != 0) { LOG_ERR("clock op failed\n"); return -EIO; } config->config_irq(dev); for (uint32_t i = 0; i < config->max_streams; i++) { config->streams[i].busy = false; #ifdef CONFIG_DMAMUX_STM32 /* Each further stream->mux_channel is fixed here */ config->streams[i].mux_channel = i + config->offset; #endif /* CONFIG_DMAMUX_STM32 */ } ((struct dma_stm32_data *)dev->data)->dma_ctx.magic = 0; ((struct dma_stm32_data *)dev->data)->dma_ctx.dma_channels = 0; ((struct dma_stm32_data *)dev->data)->dma_ctx.atomic = 0; return 0; } DMA_STM32_EXPORT_API int dma_stm32_get_status(const struct device *dev, uint32_t id, struct dma_status *stat) { const struct dma_stm32_config *config = dev->config; DMA_TypeDef *dma = (DMA_TypeDef *)(config->base); struct dma_stm32_stream *stream; /* Give channel from index 0 */ id = id - STM32_DMA_STREAM_OFFSET; if (id >= config->max_streams) { return -EINVAL; } stream = &config->streams[id]; stat->pending_length = LL_DMA_GetDataLength(dma, dma_stm32_id_to_stream(id)); stat->dir = stream->direction; stat->busy = stream->busy; return 0; } static const struct dma_driver_api dma_funcs = { .reload = dma_stm32_reload, .config = dma_stm32_configure, .start = dma_stm32_start, .stop = dma_stm32_stop, .get_status = dma_stm32_get_status, }; #define DMA_STM32_INIT_DEV(index) \ static struct dma_stm32_stream \ dma_stm32_streams_##index[DMA_STM32_##index##_STREAM_COUNT]; \ \ const struct dma_stm32_config dma_stm32_config_##index = { \ .pclken = { .bus = DT_INST_CLOCKS_CELL(index, bus), \ .enr = DT_INST_CLOCKS_CELL(index, bits) }, \ .config_irq = dma_stm32_config_irq_##index, \ .base = DT_INST_REG_ADDR(index), \ IF_ENABLED(CONFIG_DMA_STM32_V1, \ (.support_m2m = DT_INST_PROP(index, st_mem2mem),)) \ .max_streams = DMA_STM32_##index##_STREAM_COUNT, \ .streams = dma_stm32_streams_##index, \ IF_ENABLED(CONFIG_DMAMUX_STM32, \ (.offset = DT_INST_PROP(index, dma_offset),)) \ }; \ \ static struct dma_stm32_data dma_stm32_data_##index = { \ }; \ \ DEVICE_DT_INST_DEFINE(index, \ &dma_stm32_init, \ NULL, \ &dma_stm32_data_##index, &dma_stm32_config_##index, \ PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \ &dma_funcs) #ifdef CONFIG_DMA_STM32_SHARED_IRQS #define DMA_STM32_DEFINE_IRQ_HANDLER(dma, chan) /* nothing */ #define DMA_STM32_IRQ_CONNECT(dma, chan) \ do { \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(dma, chan, irq), \ DT_INST_IRQ_BY_IDX(dma, chan, priority), \ dma_stm32_shared_irq_handler, \ DEVICE_DT_INST_GET(dma), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(dma, chan, irq)); \ } while (false) #else /* CONFIG_DMA_STM32_SHARED_IRQS */ #define DMA_STM32_DEFINE_IRQ_HANDLER(dma, chan) \ static void dma_stm32_irq_##dma##_##chan(const struct device *dev) \ { \ dma_stm32_irq_handler(dev, chan); \ } #define DMA_STM32_IRQ_CONNECT(dma, chan) \ do { \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(dma, chan, irq), \ DT_INST_IRQ_BY_IDX(dma, chan, priority), \ dma_stm32_irq_##dma##_##chan, \ DEVICE_DT_INST_GET(dma), 0); \ irq_enable(DT_INST_IRQ_BY_IDX(dma, chan, irq)); \ } while (false) #endif /* CONFIG_DMA_STM32_SHARED_IRQS */ #if DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) DMA_STM32_DEFINE_IRQ_HANDLER(0, 0); DMA_STM32_DEFINE_IRQ_HANDLER(0, 1); DMA_STM32_DEFINE_IRQ_HANDLER(0, 2); #if DT_INST_IRQ_HAS_IDX(0, 3) DMA_STM32_DEFINE_IRQ_HANDLER(0, 3); DMA_STM32_DEFINE_IRQ_HANDLER(0, 4); #if DT_INST_IRQ_HAS_IDX(0, 5) DMA_STM32_DEFINE_IRQ_HANDLER(0, 5); #if DT_INST_IRQ_HAS_IDX(0, 6) DMA_STM32_DEFINE_IRQ_HANDLER(0, 6); #if DT_INST_IRQ_HAS_IDX(0, 7) DMA_STM32_DEFINE_IRQ_HANDLER(0, 7); #endif /* DT_INST_IRQ_HAS_IDX(0, 3) */ #endif /* DT_INST_IRQ_HAS_IDX(0, 5) */ #endif /* DT_INST_IRQ_HAS_IDX(0, 6) */ #endif /* DT_INST_IRQ_HAS_IDX(0, 7) */ static void dma_stm32_config_irq_0(const struct device *dev) { ARG_UNUSED(dev); DMA_STM32_IRQ_CONNECT(0, 0); DMA_STM32_IRQ_CONNECT(0, 1); #ifndef CONFIG_DMA_STM32_SHARED_IRQS DMA_STM32_IRQ_CONNECT(0, 2); #endif /* CONFIG_DMA_STM32_SHARED_IRQS */ #if DT_INST_IRQ_HAS_IDX(0, 3) DMA_STM32_IRQ_CONNECT(0, 3); #ifndef CONFIG_DMA_STM32_SHARED_IRQS DMA_STM32_IRQ_CONNECT(0, 4); #if DT_INST_IRQ_HAS_IDX(0, 5) DMA_STM32_IRQ_CONNECT(0, 5); #if DT_INST_IRQ_HAS_IDX(0, 6) DMA_STM32_IRQ_CONNECT(0, 6); #if DT_INST_IRQ_HAS_IDX(0, 7) DMA_STM32_IRQ_CONNECT(0, 7); #endif /* DT_INST_IRQ_HAS_IDX(0, 3) */ #endif /* DT_INST_IRQ_HAS_IDX(0, 5) */ #endif /* DT_INST_IRQ_HAS_IDX(0, 6) */ #endif /* DT_INST_IRQ_HAS_IDX(0, 7) */ #endif /* CONFIG_DMA_STM32_SHARED_IRQS */ /* Either 3 or 5 or 6 or 7 or 8 channels for DMA across all stm32 series. */ } DMA_STM32_INIT_DEV(0); #endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) */ #if DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay) DMA_STM32_DEFINE_IRQ_HANDLER(1, 0); DMA_STM32_DEFINE_IRQ_HANDLER(1, 1); DMA_STM32_DEFINE_IRQ_HANDLER(1, 2); DMA_STM32_DEFINE_IRQ_HANDLER(1, 3); #if DT_INST_IRQ_HAS_IDX(1, 4) DMA_STM32_DEFINE_IRQ_HANDLER(1, 4); #if DT_INST_IRQ_HAS_IDX(1, 5) DMA_STM32_DEFINE_IRQ_HANDLER(1, 5); #if DT_INST_IRQ_HAS_IDX(1, 6) DMA_STM32_DEFINE_IRQ_HANDLER(1, 6); #if DT_INST_IRQ_HAS_IDX(1, 7) DMA_STM32_DEFINE_IRQ_HANDLER(1, 7); #endif /* DT_INST_IRQ_HAS_IDX(1, 4) */ #endif /* DT_INST_IRQ_HAS_IDX(1, 5) */ #endif /* DT_INST_IRQ_HAS_IDX(1, 6) */ #endif /* DT_INST_IRQ_HAS_IDX(1, 7) */ static void dma_stm32_config_irq_1(const struct device *dev) { ARG_UNUSED(dev); #ifndef CONFIG_DMA_STM32_SHARED_IRQS DMA_STM32_IRQ_CONNECT(1, 0); DMA_STM32_IRQ_CONNECT(1, 1); DMA_STM32_IRQ_CONNECT(1, 2); DMA_STM32_IRQ_CONNECT(1, 3); #if DT_INST_IRQ_HAS_IDX(1, 4) DMA_STM32_IRQ_CONNECT(1, 4); #if DT_INST_IRQ_HAS_IDX(1, 5) DMA_STM32_IRQ_CONNECT(1, 5); #if DT_INST_IRQ_HAS_IDX(1, 6) DMA_STM32_IRQ_CONNECT(1, 6); #if DT_INST_IRQ_HAS_IDX(1, 7) DMA_STM32_IRQ_CONNECT(1, 7); #endif /* DT_INST_IRQ_HAS_IDX(1, 4) */ #endif /* DT_INST_IRQ_HAS_IDX(1, 5) */ #endif /* DT_INST_IRQ_HAS_IDX(1, 6) */ #endif /* DT_INST_IRQ_HAS_IDX(1, 7) */ #endif /* CONFIG_DMA_STM32_SHARED_IRQS */ /* * Either 5 or 6 or 7 or 8 channels for DMA across all stm32 series. * STM32F0 and STM32G0: if dma2 exits, the channel interrupts overlap with dma1 */ } DMA_STM32_INIT_DEV(1); #endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay) */ ```
/content/code_sandbox/drivers/dma/dma_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,109
```objective-c /* * */ #ifndef DMA_STM32_H_ #define DMA_STM32_H_ #include <soc.h> #include <stm32_ll_dma.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> /* Maximum data sent in single transfer (Bytes) */ #define DMA_STM32_MAX_DATA_ITEMS 0xffff struct dma_stm32_stream { uint32_t direction; #ifdef CONFIG_DMAMUX_STM32 int mux_channel; /* stores the dmamux channel */ #endif /* CONFIG_DMAMUX_STM32 */ bool source_periph; bool hal_override; volatile bool busy; uint32_t src_size; uint32_t dst_size; void *user_data; /* holds the client data */ dma_callback_t dma_callback; bool cyclic; }; struct dma_stm32_data { struct dma_context dma_ctx; }; struct dma_stm32_config { struct stm32_pclken pclken; void (*config_irq)(const struct device *dev); bool support_m2m; uint32_t base; uint32_t max_streams; #ifdef CONFIG_DMAMUX_STM32 uint8_t offset; /* position in the list of dmamux channel list */ #endif struct dma_stm32_stream *streams; }; uint32_t dma_stm32_id_to_stream(uint32_t id); #if !defined(CONFIG_DMAMUX_STM32) uint32_t dma_stm32_slot_to_channel(uint32_t id); #endif typedef void (*dma_stm32_clear_flag_func)(DMA_TypeDef *DMAx); #if !defined(CONFIG_SOC_SERIES_STM32G0X) typedef uint32_t (*dma_stm32_check_flag_func)(DMA_TypeDef *DMAx); #else typedef uint32_t (*dma_stm32_check_flag_func)(const DMA_TypeDef *DMAx); #endif bool dma_stm32_is_tc_active(DMA_TypeDef *DMAx, uint32_t id); void dma_stm32_clear_tc(DMA_TypeDef *DMAx, uint32_t id); bool dma_stm32_is_ht_active(DMA_TypeDef *DMAx, uint32_t id); void dma_stm32_clear_ht(DMA_TypeDef *DMAx, uint32_t id); bool dma_stm32_is_te_active(DMA_TypeDef *DMAx, uint32_t id); void dma_stm32_clear_te(DMA_TypeDef *DMAx, uint32_t id); #ifdef CONFIG_DMA_STM32_V1 bool dma_stm32_is_dme_active(DMA_TypeDef *DMAx, uint32_t id); void dma_stm32_clear_dme(DMA_TypeDef *DMAx, uint32_t id); bool dma_stm32_is_fe_active(DMA_TypeDef *DMAx, uint32_t id); void dma_stm32_clear_fe(DMA_TypeDef *DMAx, uint32_t id); #endif #ifdef CONFIG_DMA_STM32_V2 bool dma_stm32_is_gi_active(DMA_TypeDef *DMAx, uint32_t id); void dma_stm32_clear_gi(DMA_TypeDef *DMAx, uint32_t id); #endif bool stm32_dma_is_irq_active(DMA_TypeDef *dma, uint32_t id); bool stm32_dma_is_ht_irq_active(DMA_TypeDef *dma, uint32_t id); bool stm32_dma_is_tc_irq_active(DMA_TypeDef *dma, uint32_t id); void stm32_dma_dump_stream_irq(DMA_TypeDef *dma, uint32_t id); void stm32_dma_clear_stream_irq(DMA_TypeDef *dma, uint32_t id); bool stm32_dma_is_irq_happened(DMA_TypeDef *dma, uint32_t id); bool stm32_dma_is_unexpected_irq_happened(DMA_TypeDef *dma, uint32_t id); void stm32_dma_enable_stream(DMA_TypeDef *dma, uint32_t id); bool stm32_dma_is_enabled_stream(DMA_TypeDef *dma, uint32_t id); int stm32_dma_disable_stream(DMA_TypeDef *dma, uint32_t id); #if !defined(CONFIG_DMAMUX_STM32) void stm32_dma_config_channel_function(DMA_TypeDef *dma, uint32_t id, uint32_t slot); #endif #ifdef CONFIG_DMA_STM32_V1 void stm32_dma_disable_fifo_irq(DMA_TypeDef *dma, uint32_t id); bool stm32_dma_check_fifo_mburst(LL_DMA_InitTypeDef *DMAx); uint32_t stm32_dma_get_fifo_threshold(uint16_t fifo_mode_control); uint32_t stm32_dma_get_mburst(struct dma_config *config, bool source_periph); uint32_t stm32_dma_get_pburst(struct dma_config *config, bool source_periph); #endif #ifdef CONFIG_DMAMUX_STM32 /* dma_stm32_ api functions are exported to the dmamux_stm32 */ #define DMA_STM32_EXPORT_API int dma_stm32_configure(const struct device *dev, uint32_t id, struct dma_config *config); int dma_stm32_reload(const struct device *dev, uint32_t id, uint32_t src, uint32_t dst, size_t size); int dma_stm32_start(const struct device *dev, uint32_t id); int dma_stm32_stop(const struct device *dev, uint32_t id); int dma_stm32_get_status(const struct device *dev, uint32_t id, struct dma_status *stat); #else #define DMA_STM32_EXPORT_API static #endif /* CONFIG_DMAMUX_STM32 */ #endif /* DMA_STM32_H_*/ ```
/content/code_sandbox/drivers/dma/dma_stm32.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,149
```c /* * */ /** * @brief Intel ADSP HDA DMA (Stream) driver * * HDA is effectively, from the DSP, a ringbuffer (fifo) where the read * and write positions are maintained by the hardware and the software may * commit read/writes by writing to another register (DGFPBI) the length of * the read or write. * * It's important that the software knows the position in the ringbuffer to read * or write from. It's also important that the buffer be placed in the correct * memory region and aligned to 128 bytes. Lastly it's important the host and * dsp coordinate the order in which operations takes place. Doing all that * HDA streams are a fantastic bit of hardware and do their job well. * * There are 4 types of streams, with a set of each available to be used to * communicate to or from the Host or Link. Each stream set is uni directional. */ #include <zephyr/drivers/dma.h> #include "dma_intel_adsp_hda.h" #include <intel_adsp_hda.h> int intel_adsp_hda_dma_host_in_config(const struct device *dev, uint32_t channel, struct dma_config *dma_cfg) { const struct intel_adsp_hda_dma_cfg *const cfg = dev->config; struct dma_block_config *blk_cfg; uint8_t *buf; int res; __ASSERT(channel < cfg->dma_channels, "Channel does not exist"); __ASSERT(dma_cfg->block_count == 1, "HDA does not support scatter gather or chained " "block transfers."); __ASSERT(dma_cfg->channel_direction == cfg->direction, "Unexpected channel direction, HDA host in supports " "MEMORY_TO_HOST"); blk_cfg = dma_cfg->head_block; buf = (uint8_t *)(uintptr_t)(blk_cfg->source_address); res = intel_adsp_hda_set_buffer(cfg->base, cfg->regblock_size, channel, buf, blk_cfg->block_size); if (res == 0) { *DGMBS(cfg->base, cfg->regblock_size, channel) = blk_cfg->block_size & HDA_ALIGN_MASK; intel_adsp_hda_set_sample_container_size(cfg->base, cfg->regblock_size, channel, dma_cfg->source_data_size); } return res; } int intel_adsp_hda_dma_host_out_config(const struct device *dev, uint32_t channel, struct dma_config *dma_cfg) { const struct intel_adsp_hda_dma_cfg *const cfg = dev->config; struct dma_block_config *blk_cfg; uint8_t *buf; int res; __ASSERT(channel < cfg->dma_channels, "Channel does not exist"); __ASSERT(dma_cfg->block_count == 1, "HDA does not support scatter gather or chained " "block transfers."); __ASSERT(dma_cfg->channel_direction == cfg->direction, "Unexpected channel direction, HDA host out supports " "HOST_TO_MEMORY"); blk_cfg = dma_cfg->head_block; buf = (uint8_t *)(uintptr_t)(blk_cfg->dest_address); res = intel_adsp_hda_set_buffer(cfg->base, cfg->regblock_size, channel, buf, blk_cfg->block_size); if (res == 0) { *DGMBS(cfg->base, cfg->regblock_size, channel) = blk_cfg->block_size & HDA_ALIGN_MASK; intel_adsp_hda_set_sample_container_size(cfg->base, cfg->regblock_size, channel, dma_cfg->dest_data_size); } return res; } int intel_adsp_hda_dma_link_in_config(const struct device *dev, uint32_t channel, struct dma_config *dma_cfg) { const struct intel_adsp_hda_dma_cfg *const cfg = dev->config; struct dma_block_config *blk_cfg; uint8_t *buf; int res; __ASSERT(channel < cfg->dma_channels, "Channel does not exist"); __ASSERT(dma_cfg->block_count == 1, "HDA does not support scatter gather or chained " "block transfers."); __ASSERT(dma_cfg->channel_direction == cfg->direction, "Unexpected channel direction, HDA link in supports " "PERIPHERAL_TO_MEMORY"); blk_cfg = dma_cfg->head_block; buf = (uint8_t *)(uintptr_t)(blk_cfg->dest_address); res = intel_adsp_hda_set_buffer(cfg->base, cfg->regblock_size, channel, buf, blk_cfg->block_size); if (res == 0) { intel_adsp_hda_set_sample_container_size(cfg->base, cfg->regblock_size, channel, dma_cfg->dest_data_size); } return res; } int intel_adsp_hda_dma_link_out_config(const struct device *dev, uint32_t channel, struct dma_config *dma_cfg) { const struct intel_adsp_hda_dma_cfg *const cfg = dev->config; struct dma_block_config *blk_cfg; uint8_t *buf; int res; __ASSERT(channel < cfg->dma_channels, "Channel does not exist"); __ASSERT(dma_cfg->block_count == 1, "HDA does not support scatter gather or chained " "block transfers."); __ASSERT(dma_cfg->channel_direction == cfg->direction, "Unexpected channel direction, HDA link out supports " "MEMORY_TO_PERIPHERAL"); blk_cfg = dma_cfg->head_block; buf = (uint8_t *)(uintptr_t)(blk_cfg->source_address); res = intel_adsp_hda_set_buffer(cfg->base, cfg->regblock_size, channel, buf, blk_cfg->block_size); if (res == 0) { intel_adsp_hda_set_sample_container_size(cfg->base, cfg->regblock_size, channel, dma_cfg->source_data_size); } return res; } int intel_adsp_hda_dma_link_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { const struct intel_adsp_hda_dma_cfg *const cfg = dev->config; __ASSERT(channel < cfg->dma_channels, "Channel does not exist"); intel_adsp_hda_link_commit(cfg->base, cfg->regblock_size, channel, size); return 0; } int intel_adsp_hda_dma_host_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { const struct intel_adsp_hda_dma_cfg *const cfg = dev->config; __ASSERT(channel < cfg->dma_channels, "Channel does not exist"); #if CONFIG_DMA_INTEL_ADSP_HDA_TIMING_L1_EXIT const size_t buf_size = intel_adsp_hda_get_buffer_size(cfg->base, cfg->regblock_size, channel); if (!buf_size) { return -EIO; } intel_adsp_force_dmi_l0_state(); switch (cfg->direction) { case HOST_TO_MEMORY: ; /* Only statements can be labeled in C, a declaration is not valid */ const uint32_t rp = *DGBRP(cfg->base, cfg->regblock_size, channel); const uint32_t next_rp = (rp + INTEL_HDA_MIN_FPI_INCREMENT_FOR_INTERRUPT) % buf_size; intel_adsp_hda_set_buffer_segment_ptr(cfg->base, cfg->regblock_size, channel, next_rp); intel_adsp_hda_enable_buffer_interrupt(cfg->base, cfg->regblock_size, channel); break; case MEMORY_TO_HOST: ; const uint32_t wp = *DGBWP(cfg->base, cfg->regblock_size, channel); const uint32_t next_wp = (wp + INTEL_HDA_MIN_FPI_INCREMENT_FOR_INTERRUPT) % buf_size; intel_adsp_hda_set_buffer_segment_ptr(cfg->base, cfg->regblock_size, channel, next_wp); intel_adsp_hda_enable_buffer_interrupt(cfg->base, cfg->regblock_size, channel); break; default: break; } #endif intel_adsp_hda_host_commit(cfg->base, cfg->regblock_size, channel, size); return 0; } int intel_adsp_hda_dma_status(const struct device *dev, uint32_t channel, struct dma_status *stat) { const struct intel_adsp_hda_dma_cfg *const cfg = dev->config; bool xrun_det; __ASSERT(channel < cfg->dma_channels, "Channel does not exist"); uint32_t unused = intel_adsp_hda_unused(cfg->base, cfg->regblock_size, channel); uint32_t used = *DGBS(cfg->base, cfg->regblock_size, channel) - unused; stat->dir = cfg->direction; stat->busy = *DGCS(cfg->base, cfg->regblock_size, channel) & DGCS_GBUSY; stat->write_position = *DGBWP(cfg->base, cfg->regblock_size, channel); stat->read_position = *DGBRP(cfg->base, cfg->regblock_size, channel); stat->pending_length = used; stat->free = unused; switch (cfg->direction) { case MEMORY_TO_PERIPHERAL: xrun_det = intel_adsp_hda_is_buffer_underrun(cfg->base, cfg->regblock_size, channel); if (xrun_det) { intel_adsp_hda_underrun_clear(cfg->base, cfg->regblock_size, channel); return -EPIPE; } break; case PERIPHERAL_TO_MEMORY: xrun_det = intel_adsp_hda_is_buffer_overrun(cfg->base, cfg->regblock_size, channel); if (xrun_det) { intel_adsp_hda_overrun_clear(cfg->base, cfg->regblock_size, channel); return -EPIPE; } break; default: break; } return 0; } bool intel_adsp_hda_dma_chan_filter(const struct device *dev, int channel, void *filter_param) { uint32_t requested_channel; if (!filter_param) { return true; } requested_channel = *(uint32_t *)filter_param; if (channel == requested_channel) { return true; } return false; } int intel_adsp_hda_dma_start(const struct device *dev, uint32_t channel) { const struct intel_adsp_hda_dma_cfg *const cfg = dev->config; uint32_t size; bool set_fifordy; __ASSERT(channel < cfg->dma_channels, "Channel does not exist"); #if CONFIG_PM_DEVICE_RUNTIME bool first_use = false; enum pm_device_state state; /* If the device is used for the first time, we need to let the power domain know that * we want to use it. */ if (pm_device_state_get(dev, &state) == 0) { first_use = state != PM_DEVICE_STATE_ACTIVE; if (first_use) { int ret = pm_device_runtime_get(dev); if (ret < 0) { return ret; } } } #endif if (intel_adsp_hda_is_enabled(cfg->base, cfg->regblock_size, channel)) { return 0; } set_fifordy = (cfg->direction == HOST_TO_MEMORY || cfg->direction == MEMORY_TO_HOST); intel_adsp_hda_enable(cfg->base, cfg->regblock_size, channel, set_fifordy); if (cfg->direction == MEMORY_TO_PERIPHERAL) { size = intel_adsp_hda_get_buffer_size(cfg->base, cfg->regblock_size, channel); intel_adsp_hda_link_commit(cfg->base, cfg->regblock_size, channel, size); } #if CONFIG_PM_DEVICE_RUNTIME if (!first_use) { return pm_device_runtime_get(dev); } #endif return 0; } int intel_adsp_hda_dma_stop(const struct device *dev, uint32_t channel) { const struct intel_adsp_hda_dma_cfg *const cfg = dev->config; __ASSERT(channel < cfg->dma_channels, "Channel does not exist"); if (!intel_adsp_hda_is_enabled(cfg->base, cfg->regblock_size, channel)) { return 0; } intel_adsp_hda_disable(cfg->base, cfg->regblock_size, channel); if (!WAIT_FOR(!intel_adsp_hda_is_enabled(cfg->base, cfg->regblock_size, channel), 1000, k_busy_wait(1))) { return -EBUSY; } return pm_device_runtime_put(dev); } static void intel_adsp_hda_channels_init(const struct device *dev) { const struct intel_adsp_hda_dma_cfg *const cfg = dev->config; for (uint32_t i = 0; i < cfg->dma_channels; i++) { intel_adsp_hda_init(cfg->base, cfg->regblock_size, i); if (intel_adsp_hda_is_enabled(cfg->base, cfg->regblock_size, i)) { uint32_t size; size = intel_adsp_hda_get_buffer_size(cfg->base, cfg->regblock_size, i); intel_adsp_hda_disable(cfg->base, cfg->regblock_size, i); intel_adsp_hda_link_commit(cfg->base, cfg->regblock_size, i, size); } } #if CONFIG_DMA_INTEL_ADSP_HDA_TIMING_L1_EXIT /* Configure interrupts */ if (cfg->irq_config) { cfg->irq_config(); } #endif } int intel_adsp_hda_dma_init(const struct device *dev) { struct intel_adsp_hda_dma_data *data = dev->data; const struct intel_adsp_hda_dma_cfg *const cfg = dev->config; data->ctx.dma_channels = cfg->dma_channels; data->ctx.atomic = data->channels_atomic; data->ctx.magic = DMA_MAGIC; #ifdef CONFIG_PM_DEVICE_RUNTIME if (pm_device_on_power_domain(dev)) { pm_device_init_off(dev); } else { intel_adsp_hda_channels_init(dev); pm_device_init_suspended(dev); } return pm_device_runtime_enable(dev); #else intel_adsp_hda_channels_init(dev); return 0; #endif } int intel_adsp_hda_dma_get_attribute(const struct device *dev, uint32_t type, uint32_t *value) { switch (type) { case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT: *value = DMA_BUF_ADDR_ALIGNMENT( DT_COMPAT_GET_ANY_STATUS_OKAY(intel_adsp_hda_link_out)); break; case DMA_ATTR_BUFFER_SIZE_ALIGNMENT: *value = DMA_BUF_SIZE_ALIGNMENT( DT_COMPAT_GET_ANY_STATUS_OKAY(intel_adsp_hda_link_out)); break; case DMA_ATTR_COPY_ALIGNMENT: *value = DMA_COPY_ALIGNMENT(DT_COMPAT_GET_ANY_STATUS_OKAY(intel_adsp_hda_link_out)); break; case DMA_ATTR_MAX_BLOCK_COUNT: *value = 1; break; default: return -EINVAL; } return 0; } #ifdef CONFIG_PM_DEVICE int intel_adsp_hda_dma_pm_action(const struct device *dev, enum pm_device_action action) { switch (action) { case PM_DEVICE_ACTION_RESUME: intel_adsp_hda_channels_init(dev); break; case PM_DEVICE_ACTION_SUSPEND: case PM_DEVICE_ACTION_TURN_ON: case PM_DEVICE_ACTION_TURN_OFF: break; default: return -ENOTSUP; } return 0; } #endif #define DEVICE_DT_GET_AND_COMMA(node_id) DEVICE_DT_GET(node_id), void intel_adsp_hda_dma_isr(void) { #if CONFIG_DMA_INTEL_ADSP_HDA_TIMING_L1_EXIT struct dma_context *dma_ctx; const struct intel_adsp_hda_dma_cfg *cfg; bool triggered_interrupts = false; int i, j; int expected_interrupts = 0; const struct device *host_dev[] = { #if CONFIG_DMA_INTEL_ADSP_HDA_HOST_OUT DT_FOREACH_STATUS_OKAY(intel_adsp_hda_host_out, DEVICE_DT_GET_AND_COMMA) #endif #if CONFIG_DMA_INTEL_ADSP_HDA_HOST_IN DT_FOREACH_STATUS_OKAY(intel_adsp_hda_host_in, DEVICE_DT_GET_AND_COMMA) #endif }; /* * To initiate transfer, DSP must be in L0 state. Once the transfer is started, DSP can go * to the low power L1 state, and the transfer will be able to continue and finish in L1 * state. Interrupts are configured to trigger after the first 32 bytes of data arrive. * Once such an interrupt arrives, the transfer has already started. If all expected * transfers have started, it is safe to allow the low power L1 state. */ for (i = 0; i < ARRAY_SIZE(host_dev); i++) { dma_ctx = (struct dma_context *)host_dev[i]->data; cfg = host_dev[i]->config; for (j = 0; j < dma_ctx->dma_channels; j++) { if (!atomic_test_bit(dma_ctx->atomic, j)) continue; if (!intel_adsp_hda_is_buffer_interrupt_enabled(cfg->base, cfg->regblock_size, j)) continue; if (intel_adsp_hda_check_buffer_interrupt(cfg->base, cfg->regblock_size, j)) { triggered_interrupts = true; intel_adsp_hda_disable_buffer_interrupt(cfg->base, cfg->regblock_size, j); intel_adsp_hda_clear_buffer_interrupt(cfg->base, cfg->regblock_size, j); } else { expected_interrupts++; } } } /* * Allow entering low power L1 state only after all enabled interrupts arrived, i.e., * transfers started on all channels. */ if (triggered_interrupts && expected_interrupts == 0) { intel_adsp_allow_dmi_l1_state(); } #endif } ```
/content/code_sandbox/drivers/dma/dma_intel_adsp_hda.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,912
```c /* * */ #define DT_DRV_COMPAT snps_designware_dma #include <errno.h> #include <stdio.h> #include <string.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/drivers/dma.h> #include <soc.h> #include "dma_dw_common.h" #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(dma_dw, CONFIG_DMA_LOG_LEVEL); /* Device constant configuration parameters */ struct dw_dma_cfg { struct dw_dma_dev_cfg dw_cfg; void (*irq_config)(void); }; static int dw_dma_init(const struct device *dev) { const struct dw_dma_cfg *const dev_cfg = dev->config; /* Disable all channels and Channel interrupts */ int ret = dw_dma_setup(dev); if (ret != 0) { LOG_ERR("failed to initialize DW DMA %s", dev->name); goto out; } /* Configure interrupts */ dev_cfg->irq_config(); LOG_INF("Device %s initialized", dev->name); out: return ret; } static const struct dma_driver_api dw_dma_driver_api = { .config = dw_dma_config, .start = dw_dma_start, .stop = dw_dma_stop, }; #define DW_DMAC_INIT(inst) \ \ static struct dw_drv_plat_data dmac##inst = { \ .chan[0] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[1] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[2] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[3] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[4] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[5] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[6] = { \ .class = 6, \ .weight = 0, \ }, \ .chan[7] = { \ .class = 6, \ .weight = 0, \ }, \ }; \ \ static void dw_dma##inst##_irq_config(void); \ \ static const struct dw_dma_cfg dw_dma##inst##_config = { \ .dw_cfg = { \ .base = DT_INST_REG_ADDR(inst), \ }, \ .irq_config = dw_dma##inst##_irq_config \ }; \ \ static struct dw_dma_dev_data dw_dma##inst##_data = { \ .channel_data = &dmac##inst, \ }; \ \ DEVICE_DT_INST_DEFINE(inst, \ &dw_dma_init, \ NULL, \ &dw_dma##inst##_data, \ &dw_dma##inst##_config, POST_KERNEL, \ CONFIG_DMA_INIT_PRIORITY, \ &dw_dma_driver_api); \ \ static void dw_dma##inst##_irq_config(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), \ DT_INST_IRQ(inst, priority), dw_dma_isr, \ DEVICE_DT_INST_GET(inst), \ DT_INST_IRQ(inst, sense)); \ irq_enable(DT_INST_IRQN(inst)); \ } DT_INST_FOREACH_STATUS_OKAY(DW_DMAC_INIT) ```
/content/code_sandbox/drivers/dma/dma_dw.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
910
```c /* * */ #include <errno.h> #include <stdio.h> #include <string.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/drivers/dma.h> #define DT_DRV_COMPAT andestech_atcdmac300 #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dma_andes_atcdmac300); #define ATCDMAC100_MAX_CHAN 8 #define DMA_ABORT(dev) (((struct dma_atcdmac300_cfg *)dev->config)->base + 0x24) #define DMA_INT_STATUS(dev) \ (((struct dma_atcdmac300_cfg *)dev->config)->base + 0x30) #define DMA_CH_OFFSET(ch) (ch * 0x20) #define DMA_CH_CTRL(dev, ch) \ (((struct dma_atcdmac300_cfg *)dev->config)->base + 0x40 + DMA_CH_OFFSET(ch)) #define DMA_CH_TRANSIZE(dev, ch) \ (((struct dma_atcdmac300_cfg *)dev->config)->base + 0x44 + DMA_CH_OFFSET(ch)) #define DMA_CH_SRC_ADDR_L(dev, ch) \ (((struct dma_atcdmac300_cfg *)dev->config)->base + 0x48 + DMA_CH_OFFSET(ch)) #define DMA_CH_SRC_ADDR_H(dev, ch) \ (((struct dma_atcdmac300_cfg *)dev->config)->base + 0x4C + DMA_CH_OFFSET(ch)) #define DMA_CH_DST_ADDR_L(dev, ch) \ (((struct dma_atcdmac300_cfg *)dev->config)->base + 0x50 + DMA_CH_OFFSET(ch)) #define DMA_CH_DST_ADDR_H(dev, ch) \ (((struct dma_atcdmac300_cfg *)dev->config)->base + 0x54 + DMA_CH_OFFSET(ch)) #define DMA_CH_LL_PTR_L(dev, ch) \ (((struct dma_atcdmac300_cfg *)dev->config)->base + 0x58 + DMA_CH_OFFSET(ch)) #define DMA_CH_LL_PTR_H(dev, ch) \ (((struct dma_atcdmac300_cfg *)dev->config)->base + 0x5C + DMA_CH_OFFSET(ch)) /* Source burst size options */ #define DMA_BSIZE_1 (0) #define DMA_BSIZE_2 (1) #define DMA_BSIZE_4 (2) #define DMA_BSIZE_8 (3) #define DMA_BSIZE_16 (4) #define DMA_BSIZE_32 (5) #define DMA_BSIZE_64 (6) #define DMA_BSIZE_128 (7) #define DMA_BSIZE_256 (8) #define DMA_BSIZE_512 (9) #define DMA_BSIZE_1024 (10) /* Source/Destination transfer width options */ #define DMA_WIDTH_BYTE (0) #define DMA_WIDTH_HALFWORD (1) #define DMA_WIDTH_WORD (2) #define DMA_WIDTH_DWORD (3) #define DMA_WIDTH_QWORD (4) #define DMA_WIDTH_EWORD (5) /* Bus interface index */ #define DMA_INF_IDX0 (0) #define DMA_INF_IDX1 (1) /* DMA Channel Control Register Definition */ #define DMA_CH_CTRL_SBINF_MASK BIT(31) #define DMA_CH_CTRL_DBINF_MASK BIT(30) #define DMA_CH_CTRL_PRIORITY_HIGH BIT(29) #define DMA_CH_CTRL_SBSIZE_MASK GENMASK(27, 24) #define DMA_CH_CTRL_SBSIZE(n) FIELD_PREP(DMA_CH_CTRL_SBSIZE_MASK, (n)) #define DMA_CH_CTRL_SWIDTH_MASK GENMASK(23, 21) #define DMA_CH_CTRL_SWIDTH(n) FIELD_PREP(DMA_CH_CTRL_SWIDTH_MASK, (n)) #define DMA_CH_CTRL_DWIDTH_MASK GENMASK(20, 18) #define DMA_CH_CTRL_DWIDTH(n) FIELD_PREP(DMA_CH_CTRL_DWIDTH_MASK, (n)) #define DMA_CH_CTRL_SMODE_HANDSHAKE BIT(17) #define DMA_CH_CTRL_DMODE_HANDSHAKE BIT(16) #define DMA_CH_CTRL_SRCADDRCTRL_MASK GENMASK(15, 14) #define DMA_CH_CTRL_SRCADDR_INC FIELD_PREP(DMA_CH_CTRL_SRCADDRCTRL_MASK, (0)) #define DMA_CH_CTRL_SRCADDR_DEC FIELD_PREP(DMA_CH_CTRL_SRCADDRCTRL_MASK, (1)) #define DMA_CH_CTRL_SRCADDR_FIX FIELD_PREP(DMA_CH_CTRL_SRCADDRCTRL_MASK, (2)) #define DMA_CH_CTRL_DSTADDRCTRL_MASK GENMASK(13, 12) #define DMA_CH_CTRL_DSTADDR_INC FIELD_PREP(DMA_CH_CTRL_DSTADDRCTRL_MASK, (0)) #define DMA_CH_CTRL_DSTADDR_DEC FIELD_PREP(DMA_CH_CTRL_DSTADDRCTRL_MASK, (1)) #define DMA_CH_CTRL_DSTADDR_FIX FIELD_PREP(DMA_CH_CTRL_DSTADDRCTRL_MASK, (2)) #define DMA_CH_CTRL_SRCREQ_MASK GENMASK(11, 8) #define DMA_CH_CTRL_SRCREQ(n) FIELD_PREP(DMA_CH_CTRL_SRCREQ_MASK, (n)) #define DMA_CH_CTRL_DSTREQ_MASK GENMASK(7, 4) #define DMA_CH_CTRL_DSTREQ(n) FIELD_PREP(DMA_CH_CTRL_DSTREQ_MASK, (n)) #define DMA_CH_CTRL_INTABT BIT(3) #define DMA_CH_CTRL_INTERR BIT(2) #define DMA_CH_CTRL_INTTC BIT(1) #define DMA_CH_CTRL_ENABLE BIT(0) /* DMA Interrupt Status Register Definition */ #define DMA_INT_STATUS_TC_MASK GENMASK(23, 16) #define DMA_INT_STATUS_ABORT_MASK GENMASK(15, 8) #define DMA_INT_STATUS_ERROR_MASK GENMASK(7, 0) #define DMA_INT_STATUS_TC_VAL(x) FIELD_GET(DMA_INT_STATUS_TC_MASK, (x)) #define DMA_INT_STATUS_ABORT_VAL(x) FIELD_GET(DMA_INT_STATUS_ABORT_MASK, (x)) #define DMA_INT_STATUS_ERROR_VAL(x) FIELD_GET(DMA_INT_STATUS_ERROR_MASK, (x)) #define DMA_INT_STATUS_CH_MSK(ch) (0x111 << ch) typedef void (*atcdmac300_cfg_func_t)(void); struct chain_block { uint32_t ctrl; uint32_t transize; uint32_t srcaddrl; uint32_t srcaddrh; uint32_t dstaddrl; uint32_t dstaddrh; uint32_t llpointerl; uint32_t llpointerh; #if __riscv_xlen == 32 uint32_t reserved; #endif struct chain_block *next_block; }; /* data for each DMA channel */ struct dma_chan_data { void *blkuser_data; dma_callback_t blkcallback; struct chain_block *head_block; struct dma_status status; }; /* Device run time data */ struct dma_atcdmac300_data { struct dma_chan_data chan[ATCDMAC100_MAX_CHAN]; struct k_spinlock lock; }; /* Device constant configuration parameters */ struct dma_atcdmac300_cfg { atcdmac300_cfg_func_t irq_config; uint32_t base; uint32_t irq_num; }; static struct __aligned(64) chain_block dma_chain[ATCDMAC100_MAX_CHAN][sizeof(struct chain_block) * 16]; static void dma_atcdmac300_isr(const struct device *dev) { uint32_t int_status, int_ch_status, channel; struct dma_atcdmac300_data *const data = dev->data; struct dma_chan_data *ch_data; k_spinlock_key_t key; key = k_spin_lock(&data->lock); int_status = sys_read32(DMA_INT_STATUS(dev)); /* Clear interrupt*/ sys_write32(int_status, DMA_INT_STATUS(dev)); k_spin_unlock(&data->lock, key); /* Handle terminal count status */ int_ch_status = DMA_INT_STATUS_TC_VAL(int_status); while (int_ch_status) { channel = find_msb_set(int_ch_status) - 1; int_ch_status &= ~(BIT(channel)); ch_data = &data->chan[channel]; if (ch_data->blkcallback) { ch_data->blkcallback(dev, ch_data->blkuser_data, channel, 0); } data->chan[channel].status.busy = false; } /* Handle error status */ int_ch_status = DMA_INT_STATUS_ERROR_VAL(int_status); while (int_ch_status) { channel = find_msb_set(int_ch_status) - 1; int_ch_status &= ~(BIT(channel)); ch_data = &data->chan[channel]; if (ch_data->blkcallback) { ch_data->blkcallback(dev, ch_data->blkuser_data, channel, -EIO); } } } static int dma_atcdmac300_config(const struct device *dev, uint32_t channel, struct dma_config *cfg) { struct dma_atcdmac300_data *const data = dev->data; uint32_t src_width, dst_width, src_burst_size, ch_ctrl, tfr_size; int32_t ret = 0; struct dma_block_config *cfg_blocks; k_spinlock_key_t key; if (channel >= ATCDMAC100_MAX_CHAN) { return -EINVAL; } __ASSERT_NO_MSG(cfg->source_data_size == cfg->dest_data_size); __ASSERT_NO_MSG(cfg->source_burst_length == cfg->dest_burst_length); if (cfg->source_data_size != 1 && cfg->source_data_size != 2 && cfg->source_data_size != 4) { LOG_ERR("Invalid 'source_data_size' value"); ret = -EINVAL; goto end; } cfg_blocks = cfg->head_block; if (cfg_blocks == NULL) { ret = -EINVAL; goto end; } tfr_size = cfg_blocks->block_size/cfg->source_data_size; if (tfr_size == 0) { ret = -EINVAL; goto end; } ch_ctrl = 0; switch (cfg->channel_direction) { case MEMORY_TO_MEMORY: break; case MEMORY_TO_PERIPHERAL: ch_ctrl |= DMA_CH_CTRL_DSTREQ(cfg->dma_slot); ch_ctrl |= DMA_CH_CTRL_DMODE_HANDSHAKE; break; case PERIPHERAL_TO_MEMORY: ch_ctrl |= DMA_CH_CTRL_SRCREQ(cfg->dma_slot); ch_ctrl |= DMA_CH_CTRL_SMODE_HANDSHAKE; break; default: ret = -EINVAL; goto end; } switch (cfg_blocks->source_addr_adj) { case DMA_ADDR_ADJ_INCREMENT: ch_ctrl |= DMA_CH_CTRL_SRCADDR_INC; break; case DMA_ADDR_ADJ_DECREMENT: ch_ctrl |= DMA_CH_CTRL_SRCADDR_DEC; break; case DMA_ADDR_ADJ_NO_CHANGE: ch_ctrl |= DMA_CH_CTRL_SRCADDR_FIX; break; default: ret = -EINVAL; goto end; } switch (cfg_blocks->dest_addr_adj) { case DMA_ADDR_ADJ_INCREMENT: ch_ctrl |= DMA_CH_CTRL_DSTADDR_INC; break; case DMA_ADDR_ADJ_DECREMENT: ch_ctrl |= DMA_CH_CTRL_DSTADDR_DEC; break; case DMA_ADDR_ADJ_NO_CHANGE: ch_ctrl |= DMA_CH_CTRL_DSTADDR_FIX; break; default: ret = -EINVAL; goto end; } ch_ctrl |= DMA_CH_CTRL_INTABT; /* Disable the error callback */ if (!cfg->error_callback_dis) { ch_ctrl |= DMA_CH_CTRL_INTERR; } src_width = find_msb_set(cfg->source_data_size) - 1; dst_width = find_msb_set(cfg->dest_data_size) - 1; src_burst_size = find_msb_set(cfg->source_burst_length) - 1; ch_ctrl |= DMA_CH_CTRL_SWIDTH(src_width) | DMA_CH_CTRL_DWIDTH(dst_width) | DMA_CH_CTRL_SBSIZE(src_burst_size); /* Reset DMA channel configuration */ sys_write32(0, DMA_CH_CTRL(dev, channel)); key = k_spin_lock(&data->lock); /* Clear DMA interrupts status */ sys_write32(DMA_INT_STATUS_CH_MSK(channel), DMA_INT_STATUS(dev)); k_spin_unlock(&data->lock, key); /* Set transfer size */ sys_write32(tfr_size, DMA_CH_TRANSIZE(dev, channel)); /* Update the status of channel */ data->chan[channel].status.dir = cfg->channel_direction; data->chan[channel].status.pending_length = cfg->source_data_size; /* Configure a callback appropriately depending on whether the * interrupt is requested at the end of transaction completion or * at the end of each block. */ data->chan[channel].blkcallback = cfg->dma_callback; data->chan[channel].blkuser_data = cfg->user_data; sys_write32(ch_ctrl, DMA_CH_CTRL(dev, channel)); /* Set source and destination address */ sys_write32(cfg_blocks->source_address, DMA_CH_SRC_ADDR_L(dev, channel)); sys_write32(0, DMA_CH_SRC_ADDR_H(dev, channel)); sys_write32(cfg_blocks->dest_address, DMA_CH_DST_ADDR_L(dev, channel)); sys_write32(0, DMA_CH_DST_ADDR_H(dev, channel)); if (cfg->dest_chaining_en == 1 && cfg_blocks->next_block) { uint32_t current_block_idx = 0; sys_write32((uint32_t)((long)&dma_chain[channel][current_block_idx]), DMA_CH_LL_PTR_L(dev, channel)); sys_write32(0, DMA_CH_LL_PTR_H(dev, channel)); for (cfg_blocks = cfg_blocks->next_block; cfg_blocks != NULL; cfg_blocks = cfg_blocks->next_block) { ch_ctrl &= ~(DMA_CH_CTRL_SRCADDRCTRL_MASK | DMA_CH_CTRL_DSTADDRCTRL_MASK); switch (cfg_blocks->source_addr_adj) { case DMA_ADDR_ADJ_INCREMENT: ch_ctrl |= DMA_CH_CTRL_SRCADDR_INC; break; case DMA_ADDR_ADJ_DECREMENT: ch_ctrl |= DMA_CH_CTRL_SRCADDR_DEC; break; case DMA_ADDR_ADJ_NO_CHANGE: ch_ctrl |= DMA_CH_CTRL_SRCADDR_FIX; break; default: ret = -EINVAL; goto end; } switch (cfg_blocks->dest_addr_adj) { case DMA_ADDR_ADJ_INCREMENT: ch_ctrl |= DMA_CH_CTRL_DSTADDR_INC; break; case DMA_ADDR_ADJ_DECREMENT: ch_ctrl |= DMA_CH_CTRL_DSTADDR_DEC; break; case DMA_ADDR_ADJ_NO_CHANGE: ch_ctrl |= DMA_CH_CTRL_DSTADDR_FIX; break; default: ret = -EINVAL; goto end; } dma_chain[channel][current_block_idx].ctrl = ch_ctrl; dma_chain[channel][current_block_idx].transize = cfg_blocks->block_size/cfg->source_data_size; dma_chain[channel][current_block_idx].srcaddrl = (uint32_t)cfg_blocks->source_address; dma_chain[channel][current_block_idx].srcaddrh = 0x0; dma_chain[channel][current_block_idx].dstaddrl = (uint32_t)((long)cfg_blocks->dest_address); dma_chain[channel][current_block_idx].dstaddrh = 0x0; if (cfg_blocks->next_block) { dma_chain[channel][current_block_idx].llpointerl = (uint32_t)&dma_chain[channel][current_block_idx + 1]; dma_chain[channel][current_block_idx].llpointerh = 0x0; current_block_idx = current_block_idx + 1; } else { dma_chain[channel][current_block_idx].llpointerl = 0x0; dma_chain[channel][current_block_idx].llpointerh = 0x0; dma_chain[channel][current_block_idx].next_block = NULL; } } } else { /* Single transfer is supported, but Chain transfer is still * not supported. Therefore, set LLPointer to zero */ sys_write32(0, DMA_CH_LL_PTR_L(dev, channel)); sys_write32(0, DMA_CH_LL_PTR_H(dev, channel)); } end: return ret; } static int dma_atcdmac300_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { uint32_t src_width; if (channel >= ATCDMAC100_MAX_CHAN) { return -EINVAL; } /* Set source and destination address */ sys_write32(src, DMA_CH_SRC_ADDR_L(dev, channel)); sys_write32(0, DMA_CH_SRC_ADDR_H(dev, channel)); sys_write32(dst, DMA_CH_DST_ADDR_L(dev, channel)); sys_write32(0, DMA_CH_DST_ADDR_H(dev, channel)); src_width = FIELD_GET(DMA_CH_CTRL_SWIDTH_MASK, sys_read32(DMA_CH_CTRL(dev, channel))); src_width = BIT(src_width); /* Set transfer size */ sys_write32(size/src_width, DMA_CH_TRANSIZE(dev, channel)); return 0; } static int dma_atcdmac300_transfer_start(const struct device *dev, uint32_t channel) { struct dma_atcdmac300_data *const data = dev->data; if (channel >= ATCDMAC100_MAX_CHAN) { return -EINVAL; } sys_write32(sys_read32(DMA_CH_CTRL(dev, channel)) | DMA_CH_CTRL_ENABLE, DMA_CH_CTRL(dev, channel)); data->chan[channel].status.busy = true; return 0; } static int dma_atcdmac300_transfer_stop(const struct device *dev, uint32_t channel) { struct dma_atcdmac300_data *const data = dev->data; k_spinlock_key_t key; if (channel >= ATCDMAC100_MAX_CHAN) { return -EINVAL; } key = k_spin_lock(&data->lock); sys_write32(BIT(channel), DMA_ABORT(dev)); sys_write32(0, DMA_CH_CTRL(dev, channel)); sys_write32(FIELD_GET(DMA_INT_STATUS_ABORT_MASK, (channel)), DMA_INT_STATUS(dev)); data->chan[channel].status.busy = false; k_spin_unlock(&data->lock, key); return 0; } static int dma_atcdmac300_init(const struct device *dev) { const struct dma_atcdmac300_cfg *const config = (struct dma_atcdmac300_cfg *)dev->config; uint32_t ch_num; /* Disable all channels and Channel interrupts */ for (ch_num = 0; ch_num < ATCDMAC100_MAX_CHAN; ch_num++) { sys_write32(0, DMA_CH_CTRL(dev, ch_num)); } sys_write32(0xFFFFFF, DMA_INT_STATUS(dev)); /* Configure interrupts */ config->irq_config(); irq_enable(config->irq_num); return 0; } static int dma_atcdmac300_get_status(const struct device *dev, uint32_t channel, struct dma_status *stat) { struct dma_atcdmac300_data *const data = dev->data; stat->busy = data->chan[channel].status.busy; stat->dir = data->chan[channel].status.dir; stat->pending_length = data->chan[channel].status.pending_length; return 0; } static const struct dma_driver_api dma_atcdmac300_api = { .config = dma_atcdmac300_config, .reload = dma_atcdmac300_reload, .start = dma_atcdmac300_transfer_start, .stop = dma_atcdmac300_transfer_stop, .get_status = dma_atcdmac300_get_status }; #define ATCDMAC300_INIT(n) \ \ static void dma_atcdmac300_irq_config_##n(void); \ \ static const struct dma_atcdmac300_cfg dma_config_##n = { \ .irq_config = dma_atcdmac300_irq_config_##n, \ .base = DT_INST_REG_ADDR(n), \ .irq_num = DT_INST_IRQN(n), \ }; \ \ static struct dma_atcdmac300_data dma_data_##n; \ \ DEVICE_DT_INST_DEFINE(0, \ dma_atcdmac300_init, \ NULL, \ &dma_data_##n, \ &dma_config_##n, \ POST_KERNEL, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &dma_atcdmac300_api); \ \ static void dma_atcdmac300_irq_config_##n(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ 1, \ dma_atcdmac300_isr, \ DEVICE_DT_INST_GET(n), \ 0); \ } DT_INST_FOREACH_STATUS_OKAY(ATCDMAC300_INIT) ```
/content/code_sandbox/drivers/dma/dma_andes_atcdmac300.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,498
```objective-c /* * */ #ifndef DMA_IPROC_PAX #define DMA_IPROC_PAX /* Broadcom PAX-DMA RM register defines */ #define PAX_DMA_REG_ADDR(_base, _offs) ((_base) + (_offs)) #define PAX_DMA_RING_ADDR_OFFSET(_ring) (0x10000 * (_ring)) /* Per-Ring register offsets */ #define RING_VER 0x000 #define RING_BD_START_ADDR 0x004 #define RING_BD_READ_PTR 0x008 #define RING_BD_WRITE_PTR 0x00c #define RING_BD_READ_PTR_DDR_LS 0x010 #define RING_BD_READ_PTR_DDR_MS 0x014 #define RING_CMPL_START_ADDR 0x018 #define RING_CMPL_WRITE_PTR 0x01c #define RING_NUM_REQ_RECV_LS 0x020 #define RING_NUM_REQ_RECV_MS 0x024 #define RING_NUM_REQ_TRANS_LS 0x028 #define RING_NUM_REQ_TRANS_MS 0x02c #define RING_NUM_REQ_OUTSTAND 0x030 #define RING_CONTROL 0x034 #define RING_FLUSH_DONE 0x038 #define RING_MSI_ADDR_LS 0x03c #define RING_MSI_ADDR_MS 0x040 #define RING_CMPL_WR_PTR_DDR_CONTROL 0x048 #define RING_BD_READ_PTR_DDR_CONTROL 0x04c #define RING_WRITE_SEQ_NUM 0x050 #define RING_READ_SEQ_NUM 0x054 #define RING_BD_MEM_WRITE_ADDRESS 0x058 #define RING_AXI_BEAT_CNT 0x05c #define RING_AXI_BURST_CNT 0x060 #define RING_MSI_DATA_VALUE 0x064 #define RING_PACKET_ALIGNMENT_STATUS0 0x068 #define RING_PACKET_ALIGNMENT_STATUS1 0x06c #define RING_PACKET_ALIGNMENT_STATUS2 0x070 #define RING_DOORBELL_BD_WRITE_COUNT 0x074 /* RING Manager Common Registers */ #define RM_COMM_CTRL_REG(_ring) (0x100 * (_ring)) #define RM_MSI_DEVID_REG(_ring) (0x100 * (_ring) + 0x4) #define RM_AE0_AE_CONTROL 0x2000 #define RM_AE0_NUMBER_OF_PACKETS_RECEIVED_LS_BITS 0x2004 #define RM_AE0_NUMBER_OF_PACKETS_RECEIVED_MS_BITS 0x2008 #define RM_AE0_NUMBER_OF_PACKETS_TRANSMITTED_LS_BITS 0x200c #define RM_AE0_NUMBER_OF_PACKETS_TRANSMITTED_MS_BITS 0x2010 #define RM_AE0_OUTSTANDING_PACKET 0x2014 #define RM_AE0_AE_FLUSH_STATUS 0x2018 #define RM_AE0_AE_FIFO_WRITE_POINTER 0x201c #define RM_AE0_AE_FIFO_READ_POINTER 0x2020 #define RM_AE1_AE_CONTROL 0x2100 #define RM_AE1_NUMBER_OF_PACKETS_RECEIVED_LS_BITS 0x2104 #define RM_AE1_NUMBER_OF_PACKETS_RECEIVED_MS_BITS 0x2108 #define RM_AE1_NUMBER_OF_PACKETS_TRANSMITTED_LS_BITS 0x210c #define RM_AE1_NUMBER_OF_PACKETS_TRANSMITTED_MS_BITS 0x2110 #define RM_AE1_OUTSTANDING_PACKET 0x2114 #define RM_AE1_AE_FLUSH_STATUS 0x2118 #define RM_AE1_AE_FIFO_WRITE_POINTER 0x211c #define RM_AE1_AE_FIFO_READ_POINTER 0x2120 #define RM_COMM_RING_SECURITY_SETTING 0x3000 #define RM_COMM_CONTROL 0x3008 #define RM_COMM_TIMER_CONTROL_0 0x300c #define RM_COMM_TIMER_CONTROL_1 0x3010 #define RM_COMM_BD_THRESHOLD 0x3014 #define RM_COMM_BURST_LENGTH 0x3018 #define RM_COMM_FIFO_FULL_THRESHOLD 0x301c #define RM_COMM_MASK_SEQUENCE_MAX_COUNT 0x3020 #define RM_COMM_AE_TIMEOUT 0x3024 #define RM_COMM_RING_OR_AE_STATUS_LOG_ENABLE 0x3028 #define RM_COMM_RING_FLUSH_TIMEOUT 0x302c #define RM_COMM_MEMORY_CONFIGURATION 0x3030 #define RM_COMM_AXI_CONTROL 0x3034 #define RM_COMM_GENERAL_MSI_DEVICE_ID 0x3038 #define RM_COMM_GENERAL_MSI_ADDRESS_LS 0x303c #define RM_COMM_GENERAL_MSI_ADDRESS_MS 0x3040 #define RM_COMM_CONFIG_INTERRUPT_STATUS_MASK 0x3044 #define RM_COMM_CONFIG_INTERRUPT_STATUS_CLEAR 0x3048 #define RM_COMM_TOGGLE_INTERRUPT_STATUS_MASK 0x304c #define RM_COMM_TOGGLE_INTERRUPT_STATUS_CLEAR 0x3050 #define RM_COMM_DDR_ADDR_GEN_INTERRUPT_STATUS_MASK 0x3054 #define RM_COMM_DDR_ADDR_GEN_INTERRUPT_STATUS_CLEAR 0x3058 #define RM_COMM_PACKET_ALIGNMENT_INTERRUPT_STATUS_MASK 0x305c #define RM_COMM_PACKET_ALIGNMENT_INTERRUPT_STATUS_CLEAR 0x3060 #define RM_COMM_AE_INTERFACE_GROUP_0_INTERRUPT_MASK 0x3064 #define RM_COMM_AE_INTERFACE_GROUP_0_INTERRUPT_CLEAR 0x3068 #define RM_COMM_AE_INTERFACE_GROUP_1_INTERRUPT_MASK 0x306c #define RM_COMM_AE_INTERFACE_GROUP_1_INTERRUPT_CLEAR 0x3070 #define RM_COMM_AE_INTERFACE_GROUP_2_INTERRUPT_MASK 0x3074 #define RM_COMM_AE_INTERFACE_GROUP_2_INTERRUPT_CLEAR 0x3078 #define RM_COMM_AE_INTERFACE_GROUP_3_INTERRUPT_MASK 0x307c #define RM_COMM_AE_INTERFACE_GROUP_3_INTERRUPT_CLEAR 0x3080 #define RM_COMM_AE_INTERFACE_GROUP_4_INTERRUPT_MASK 0x3084 #define RM_COMM_AE_INTERFACE_GROUP_4_INTERRUPT_CLEAR 0x3088 #define RM_COMM_AE_INTERFACE_GROUP_5_INTERRUPT_MASK 0x308c #define RM_COMM_AE_INTERFACE_GROUP_5_INTERRUPT_CLEAR 0x3090 #define RM_COMM_AE_INTERFACE_GROUP_6_INTERRUPT_MASK 0x3094 #define RM_COMM_AE_INTERFACE_GROUP_6_INTERRUPT_CLEAR 0x3098 #define RM_COMM_AE_INTERFACE_GROUP_7_INTERRUPT_MASK 0x309c #define RM_COMM_AE_INTERFACE_GROUP_7_INTERRUPT_CLEAR 0x30a0 #define RM_COMM_AE_INTERFACE_TOP_INTERRUPT_STATUS_MASK 0x30a4 #define RM_COMM_AE_INTERFACE_TOP_INTERRUPT_STATUS_CLEAR 0x30a8 #define RM_COMM_REORDER_INTERRUPT_STATUS_MASK 0x30ac #define RM_COMM_REORDER_INTERRUPT_STATUS_CLEAR 0x30b0 #define RM_COMM_DME_INTERRUPT_STATUS_MASK 0x30b4 #define RM_COMM_DME_INTERRUPT_STATUS_CLEAR 0x30b8 #define RM_COMM_REORDER_FIFO_PROG_THRESHOLD 0x30bc #define RM_COMM_GROUP_PKT_EXTENSION_SUPPORT 0x30c0 #define RM_COMM_GENERAL_MSI_DATA_VALUE 0x30c4 #define RM_COMM_AXI_READ_BURST_THRESHOLD 0x30c8 #define RM_COMM_GROUP_RING_COUNT 0x30cc #define RM_COMM_MSI_DISABLE 0x30d8 #define RM_COMM_RESERVE 0x30fc #define RM_COMM_RING_FLUSH_STATUS 0x3100 #define RM_COMM_RING_SEQUENCE_NUMBER_OVERFLOW 0x3104 #define RM_COMM_AE_SEQUENCE_NUMBER_OVERFLOW 0x3108 #define RM_COMM_MAX_SEQUENCE_NUMBER_FOR_ANY_RING 0x310c #define RM_COMM_MAX_SEQUENCE_NUMBER_ON_MONITOR_RING 0x3110 #define RM_COMM_MAX_SEQUENCE_NUMBER_ON_ANY_AE 0x3114 #define RM_COMM_MAX_SEQUENCE_NUMBER_ON_MONITOR_AE 0x3118 #define RM_COMM_MIN_MAX_LATENCY_MONITOR_RING_TOGGLE 0x311c #define RM_COMM_MIN_MAX_LATENCY_MONITOR_RING_ADDRESSGEN 0x3120 #define RM_COMM_RING_ACTIVITY 0x3124 #define RM_COMM_AE_ACTIVITY 0x3128 #define RM_COMM_MAIN_HW_INIT_DONE 0x312c #define RM_COMM_MEMORY_POWER_STATUS 0x3130 #define RM_COMM_CONFIG_STATUS_0 0x3134 #define RM_COMM_CONFIG_STATUS_1 0x3138 #define RM_COMM_TOGGLE_STATUS_0 0x313c #define RM_COMM_TOGGLE_STATUS_1 0x3140 #define RM_COMM_DDR_ADDR_GEN_STATUS_0 0x3144 #define RM_COMM_DDR_ADDR_GEN_STATUS_1 0x3148 #define RM_COMM_PACKET_ALIGNMENT_STATUS_0 0x314c #define RM_COMM_PACKET_ALIGNMENT_STATUS_1 0x3150 #define RM_COMM_PACKET_ALIGNMENT_STATUS_2 0x3154 #define RM_COMM_PACKET_ALIGNMENT_STATUS_3 0x3158 #define RM_COMM_AE_INTERFACE_GROUP_0_STATUS_0 0x315c #define RM_COMM_AE_INTERFACE_GROUP_0_STATUS_1 0x3160 #define RM_COMM_AE_INTERFACE_GROUP_1_STATUS_0 0x3164 #define RM_COMM_AE_INTERFACE_GROUP_1_STATUS_1 0x3168 #define RM_COMM_AE_INTERFACE_GROUP_2_STATUS_0 0x316c #define RM_COMM_AE_INTERFACE_GROUP_2_STATUS_1 0x3170 #define RM_COMM_AE_INTERFACE_GROUP_3_STATUS_0 0x3174 #define RM_COMM_AE_INTERFACE_GROUP_3_STATUS_1 0x3178 #define RM_COMM_AE_INTERFACE_GROUP_4_STATUS_0 0x317c #define RM_COMM_AE_INTERFACE_GROUP_4_STATUS_1 0x3180 #define RM_COMM_AE_INTERFACE_GROUP_5_STATUS_0 0x3184 #define RM_COMM_AE_INTERFACE_GROUP_5_STATUS_1 0x3188 #define RM_COMM_AE_INTERFACE_GROUP_6_STATUS_0 0x318c #define RM_COMM_AE_INTERFACE_GROUP_6_STATUS_1 0x3190 #define RM_COMM_AE_INTERFACE_GROUP_7_STATUS_0 0x3194 #define RM_COMM_AE_INTERFACE_GROUP_7_STATUS_1 0x3198 #define RM_COMM_AE_INTERFACE_TOP_STATUS_0 0x319c #define RM_COMM_AE_INTERFACE_TOP_STATUS_1 0x31a0 #define RM_COMM_REORDER_STATUS_0 0x31a4 #define RM_COMM_REORDER_STATUS_1 0x31a8 #define RM_COMM_REORDER_STATUS_2 0x31ac #define RM_COMM_REORDER_STATUS_3 0x31b0 #define RM_COMM_REORDER_STATUS_4 0x31b4 #define RM_COMM_REORDER_STATUS_5 0x31b8 #define RM_COMM_CONFIG_INTERRUPT_STATUS 0x31bc #define RM_COMM_TOGGLE_INTERRUPT_STATUS 0x31c0 #define RM_COMM_DDR_ADDR_GEN_INTERRUPT_STATUS 0x31c4 #define RM_COMM_PACKET_ALIGNMENT_INTERRUPT_STATUS 0x31c8 #define RM_COMM_AE_INTERFACE_GROUP_0_INTERRUPT_STATUS 0x31cc #define RM_COMM_AE_INTERFACE_GROUP_1_INTERRUPT_STATUS 0x31d0 #define RM_COMM_AE_INTERFACE_GROUP_2_INTERRUPT_STATUS 0x31d4 #define RM_COMM_AE_INTERFACE_GROUP_3_INTERRUPT_STATUS 0x31d8 #define RM_COMM_AE_INTERFACE_GROUP_4_INTERRUPT_STATUS 0x31dc #define RM_COMM_AE_INTERFACE_GROUP_5_INTERRUPT_STATUS 0x31e0 #define RM_COMM_AE_INTERFACE_GROUP_6_INTERRUPT_STATUS 0x31e4 #define RM_COMM_AE_INTERFACE_GROUP_7_INTERRUPT_STATUS 0x31e8 #define RM_COMM_AE_INTERFACE_TOP_INTERRUPT_STATUS 0x31ec #define RM_COMM_REORDER_INTERRUPT_STATUS 0x31f0 #define RM_COMM_DME_INTERRUPT_STATUS 0x31f4 #define RM_COMM_PACKET_ALIGNMENT_STATUS_4 0x31f8 #define RM_COMM_PACKET_ALIGNMENT_STATUS_5 0x31fc #define RM_COMM_PACKET_ALIGNMENT_STATUS_6 0x3200 #define RM_COMM_MSI_INTR_INTERRUPT_STATUS 0x3204 #define RM_COMM_BD_FETCH_MODE_CONTROL 0x3360 #define RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_SHIFT 16 #define RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_SHIFT_VAL 32 #define RM_COMM_THRESHOLD_CFG_RD_FIFO_MAX_THRESHOLD_MASK 0x1FF #define RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_SHIFT 25 #define RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_VAL 40 #define RM_COMM_PKT_ALIGNMENT_BD_FIFO_FULL_THRESHOLD_MASK 0x7F #define RM_COMM_BD_FIFO_FULL_THRESHOLD_VAL 224 #define RM_COMM_BD_FIFO_FULL_THRESHOLD_SHIFT 16 #define RM_COMM_BD_FIFO_FULL_THRESHOLD_MASK 0x1FF /* PAX_DMA_RM_COMM_RM_BURST_LENGTH */ #define RM_COMM_BD_FETCH_CACHE_ALIGNED_DISABLED BIT(28) #define RM_COMM_VALUE_FOR_DDR_ADDR_GEN_SHIFT 16 #define RM_COMM_VALUE_FOR_TOGGLE_SHIFT 0 #define RM_COMM_VALUE_FOR_DDR_ADDR_GEN_VAL 32 #define RM_COMM_VALUE_FOR_TOGGLE_VAL 32 #define RM_COMM_DISABLE_GRP_BD_FIFO_FLOW_CONTROL_FOR_PKT_ALIGNMENT BIT(1) #define RM_COMM_DISABLE_PKT_ALIGNMENT_BD_FIFO_FLOW_CONTROL BIT(0) /* RM version */ #define RING_VER_MAGIC 0x76303031 /* Register RING_CONTROL fields */ #define RING_CONTROL_MASK_DISABLE_CONTROL 6 #define RING_CONTROL_FLUSH BIT(5) #define RING_CONTROL_ACTIVE BIT(4) /* Register RING_FLUSH_DONE fields */ #define RING_FLUSH_DONE_MASK 0x1 #define RING_MASK_SEQ_MAX_COUNT_MASK 0x3ff /* RM_COMM_MAIN_HW_INIT_DONE DONE fields */ #define RM_COMM_MAIN_HW_INIT_DONE_MASK 0x1 /* Register RING_CMPL_WR_PTR_DDR_CONTROL fields */ #define RING_BD_READ_PTR_DDR_TIMER_VAL_SHIFT 16 #define RING_BD_READ_PTR_DDR_TIMER_VAL_MASK 0xffff #define RING_BD_READ_PTR_DDR_ENABLE_SHIFT 15 #define RING_BD_READ_PTR_DDR_ENABLE_MASK 0x1 /* Register RING_BD_READ_PTR_DDR_CONTROL fields */ #define RING_BD_CMPL_WR_PTR_DDR_TIMER_VAL_SHIFT 16 #define RING_BD_CMPL_WR_PTR_DDR_TIMER_VAL_MASK 0xffff #define RING_BD_CMPL_WR_PTR_DDR_ENABLE_SHIFT 15 #define RING_BD_CMPL_WR_PTR_DDR_ENABLE_MASK 0x1 /* * AE_TIMEOUT is (2^AE_TIMEOUT_BITS) - (2 * NumOfAEs * 2^FIFO_DEPTH_BITS) * AE_TIMEOUT_BITS=32, NumOfAEs=2, FIFO_DEPTH_BITS=5 * timeout val = 2^32 - 2*2*2^5 */ #define RM_COMM_AE_TIMEOUT_VAL 0xffffff80 /* RM timer control fields for 4 rings */ #define RM_COMM_TIMER_CONTROL_FAST 0xaf #define RM_COMM_TIMER_CONTROL_FAST_SHIFT 16 #define RM_COMM_TIMER_CONTROL_MEDIUM 0x15e #define RM_COMM_TIMER_CONTROL0_VAL \ ((RM_COMM_TIMER_CONTROL_FAST << RM_COMM_TIMER_CONTROL_FAST_SHIFT) | \ (RM_COMM_TIMER_CONTROL_MEDIUM)) #define RM_COMM_TIMER_CONTROL_SLOW 0x2bc #define RM_COMM_TIMER_CONTROL_SLOW_SHIFT 16 #define RM_COMM_TIMER_CONTROL_IDLE 0x578 #define RM_COMM_TIMER_CONTROL1_VAL \ ((RM_COMM_TIMER_CONTROL_SLOW << RM_COMM_TIMER_CONTROL_SLOW_SHIFT) | \ (RM_COMM_TIMER_CONTROL_IDLE)) #define RM_COMM_RM_BURST_LENGTH 0x80008 /* Register RM_COMM_AXI_CONTROL fields */ #define RM_COMM_AXI_CONTROL_RD_CH_EN_SHIFT 24 #define RM_COMM_AXI_CONTROL_RD_CH_EN \ BIT(RM_COMM_AXI_CONTROL_RD_CH_EN_SHIFT) #define RM_COMM_AXI_CONTROL_WR_CH_EN_SHIFT 28 #define RM_COMM_AXI_CONTROL_WR_CH_EN \ BIT(RM_COMM_AXI_CONTROL_WR_CH_EN_SHIFT) /* Register Per-ring RING_COMMON_CONTROL fields */ #define RING_COMM_CTRL_AE_GROUP_SHIFT 0 #define RING_COMM_CTRL_AE_GROUP_MASK (0x7 << RING_COMM_CTRL_AE_GROUP_SHIFT) /* Register AE0_AE_CONTROL/AE1_AE_CONTROL fields */ #define RM_AE_CONTROL_ACTIVE BIT(4) #define RM_AE_CTRL_AE_GROUP_SHIFT 0 #define RM_AE_CTRL_AE_GROUP_MASK (0x7 << RM_AE_CTRL_AE_GROUP_SHIFT) /* Register RING_CMPL_WR_PTR_DDR_CONTROL fields */ #define RING_DDR_CONTROL_COUNT_SHIFT 0 #define RING_DDR_CONTROL_COUNT_MASK 0x3ff #define RING_DDR_CONTROL_COUNT(x) (((x) & RING_DDR_CONTROL_COUNT_MASK) \ << RING_DDR_CONTROL_COUNT_SHIFT) #define RING_DDR_CONTROL_COUNT_VAL 0x1U #define RING_DDR_CONTROL_ENABLE_SHIFT 15 #define RING_DDR_CONTROL_ENABLE BIT(RING_DDR_CONTROL_ENABLE_SHIFT) #define RING_DDR_CONTROL_TIMER_SHIFT 16 #define RING_DDR_CONTROL_TIMER_MASK 0xffff #define RING_DDR_CONTROL_TIMER(x) (((x) & RING_DDR_CONTROL_TIMER_MASK) \ << RING_DDR_CONTROL_TIMER_SHIFT) /* * Set no timeout value for completion write path as it would generate * multiple interrupts during large transfers. And if timeout value is * set, completion write pointers has to be checked on each interrupt * to ensure that transfer is actually done. */ #define RING_DDR_CONTROL_TIMER_VAL (0xFFFF) /* completion DME status code */ #define PAX_DMA_STATUS_AXI_RRESP_ERR BIT(0) #define PAX_DMA_STATUS_AXI_BRESP_ERR BIT(1) #define PAX_DMA_STATUS_PCIE_CA_ERR BIT(2) #define PAX_DMA_STATUS_PCIE_UR_ERR BIT(3) #define PAX_DMA_STATUS_PCIE_CMPL_TOUT_ERR BIT(4) #define PAX_DMA_STATUS_PCIE_RX_POISON BIT(5) #define PAX_DMA_STATUS_ERROR_MASK ( \ PAX_DMA_STATUS_AXI_RRESP_ERR | \ PAX_DMA_STATUS_AXI_BRESP_ERR | \ PAX_DMA_STATUS_PCIE_CA_ERR | \ PAX_DMA_STATUS_PCIE_UR_ERR | \ PAX_DMA_STATUS_PCIE_CMPL_TOUT_ERR | \ PAX_DMA_STATUS_PCIE_RX_POISON \ ) /* completion RM status code */ #define RM_COMPLETION_SUCCESS 0x0 #define RM_COMPLETION_AE_TIMEOUT 0x3FF #define RM_COMM_MSI_CONFIG_INTERRUPT_ACCESS_ERR_MASK BIT(9) #define RM_COMM_MSI_CONFIG_INTERRUPT_BRESP_ERR_MASK BIT(8) #define RM_COMM_MSI_DISABLE_MASK BIT(0) /* Buffer Descriptor definitions */ #define PAX_DMA_TYPE_RM_HEADER 0x1 #define PAX_DMA_TYPE_NEXT_PTR 0x5 /* one desc ring size( is 4K, 4K aligned */ #define PAX_DMA_RM_DESC_RING_SIZE 4096 #define PAX_DMA_RING_BD_ALIGN_ORDER 12 /* completion ring size(bytes) is 8K, 8K aligned */ #define PAX_DMA_RM_CMPL_RING_SIZE 8192 #define PAX_DMA_RING_CMPL_ALIGN_ORDER 13 #define PAX_DMA_RING_BD_ALIGN_CHECK(addr) \ (!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1))) #define RING_CMPL_ALIGN_CHECK(addr) \ (!((addr) & ((0x1 << RING_CMPL_ALIGN_ORDER) - 1))) /* RM descriptor width: 8 bytes */ #define PAX_DMA_RM_DESC_BDWIDTH 8 /* completion msg desc takes 1 BD */ #define PAX_DMA_CMPL_DESC_SIZE PAX_DMA_RM_DESC_BDWIDTH /* Next table desc takes 1 BD */ #define PAX_DMA_NEXT_TBL_DESC_SIZE PAX_DMA_RM_DESC_BDWIDTH /* Header desc takes 1 BD */ #define PAX_DMA_HEADER_DESC_SIZE PAX_DMA_RM_DESC_BDWIDTH /* Total BDs in ring: 4K/8bytes = 512 BDs */ #define PAX_DMA_RM_RING_BD_COUNT (PAX_DMA_RM_DESC_RING_SIZE / \ PAX_DMA_RM_DESC_BDWIDTH) /* Initial RM header is first BD in ring */ #define PAX_DMA_HEADER_INDEX 0 #define PAX_DMA_HEADER_ADDR(_ring) (void *)((uintptr_t)(_ring) + \ PAX_DMA_HEADER_INDEX * PAX_DMA_RM_DESC_BDWIDTH) /* NEXT TABLE desc offset is last BD in ring */ #define PAX_DMA_NEXT_TBL_INDEX (PAX_DMA_RM_RING_BD_COUNT - 1) #define PAX_DMA_NEXT_TBL_ADDR(_ring) (void *)((uintptr_t)(_ring) + \ PAX_DMA_NEXT_TBL_INDEX * PAX_DMA_RM_DESC_BDWIDTH) /* DMA transfers supported from 4 bytes thru 16M, size aligned to 4 bytes */ #define PAX_DMA_MIN_SIZE 4 #define PAX_DMA_MAX_SIZE (16 * 1024 * 1024) /* Host and Card address need 4-byte alignment */ #define PAX_DMA_ADDR_ALIGN 4 #define RM_RING_REG(_pd, _r, _write_ptr) \ ((_pd)->ring[_r].ring_base + (_write_ptr)) #define RM_COMM_REG(_pd, _write_ptr) ((_pd)->rm_comm_base + (_write_ptr)) #define PAX_DMA_REG(_pd, _write_ptr) ((_pd)->dma_base + (_write_ptr)) #define PAX_DMA_MAX_CMPL_COUNT 1024 #define PAX_DMA_LAST_CMPL_IDX (PAX_DMA_MAX_CMPL_COUNT - 1) #define PAX_DMA_RING_ALIGN BIT(PAX_DMA_RING_CMPL_ALIGN_ORDER) /* num of completions received, circular buffer */ #define PAX_DMA_GET_CMPL_COUNT(wptr, rptr) (((wptr) >= (rptr)) ? \ ((wptr) - (rptr)) : (PAX_DMA_MAX_CMPL_COUNT - (rptr) + (wptr))) /* location of current cmpl pkt, take care of pointer wrap-around */ #define PAX_DMA_CURR_CMPL_IDX(wptr) \ (((wptr) == 0) ? PAX_DMA_LAST_CMPL_IDX : (wptr) - 1) /* Timeout (milliseconds) for completion alert in interrupt mode */ #define PAX_DMA_TIMEOUT 10000 /* TODO: add macro to enable data memory barrier, to ensure writes to memory */ #define dma_mb() /* Max polling cycles for completion wait, >= 1 second */ #define PAX_DMA_MAX_POLL_WAIT 1000000 /* Max polling cycles for posted write sync >= 1 second */ #define PAX_DMA_MAX_SYNC_WAIT 1000000 enum ring_idx { PAX_DMA_RING0 = 0, PAX_DMA_RING1, PAX_DMA_RING2, PAX_DMA_RING3, PAX_DMA_RINGS_MAX }; /* * DMA direction */ enum pax_dma_dir { CARD_TO_HOST = 0x1, HOST_TO_CARD = 0x2 }; /* Completion packet */ struct cmpl_pkt { uint64_t opq : 16; /*pkt_id 15:0*/ uint64_t res : 16; /*reserved 16:31*/ uint64_t dma_status : 16; /*PAX DMA status 32:47*/ uint64_t ae_num : 6; /*RM status[47:53] processing AE number */ uint64_t rm_status : 10; /*RM status[54:63] completion/timeout status*/ } __attribute__ ((__packed__)); /* Driver internal structures */ struct dma_iproc_pax_addr64 { uint32_t addr_lo; uint32_t addr_hi; } __attribute__((__packed__)); /* DMA payload for RM internal API */ struct dma_iproc_pax_payload { uint64_t pci_addr; uint64_t axi_addr; uint32_t xfer_sz; enum pax_dma_dir direction; }; /* magic to sync completion of posted writes to host */ struct dma_iproc_pax_write_sync_data { /* sglist count, max 254 */ uint32_t total_pkts:9; /* ring-id 0-3 */ uint32_t ring:2; /* opaque-id 0-31 */ uint32_t opaque:5; /* magic pattern */ uint32_t signature:16; }; /* BD ring status */ struct dma_iproc_pax_ring_status { /* current desc write_ptret, write pointer */ void *write_ptr; /* current valid toggle */ uint32_t toggle; /* completion queue read offset */ uint32_t cmpl_rd_offs; /* opaque value for current payload */ uint32_t opq; /* posted write sync data */ struct dma_iproc_pax_write_sync_data sync_data; }; struct dma_iproc_pax_ring_data { /* ring index */ uint32_t idx; /* Per-Ring register base */ uint32_t ring_base; /* Allocated mem for BD and CMPL */ void *ring_mem; /* Buffer descriptors, 4K aligned */ void *bd; /* Completion descriptors, 8K aligned */ void *cmpl; /* payload struct for internal API */ struct dma_iproc_pax_payload *payload; /* ring current status */ struct dma_iproc_pax_ring_status curr; /* assigned packet id upto 32 values */ uint32_t pkt_id; /* per-ring lock */ struct k_mutex lock; /* alert for the ring */ struct k_sem alert; /* posted write sync src location */ struct dma_iproc_pax_write_sync_data *sync_loc; /* posted write sync pci dst address */ struct dma_iproc_pax_addr64 sync_pci; /* ring status */ int ring_active; /* dma callback and argument */ dma_callback_t dma_callback; void *callback_arg; uint32_t descs_inflight; uint32_t non_hdr_bd_count; uint32_t total_pkt_count; uintptr_t current_hdr; }; struct dma_iproc_pax_data { /* PAXB0 PAX DMA registers */ uint32_t dma_base; /* Ring manager common registers */ uint32_t rm_comm_base; /* Num of rings to use in s/w */ int used_rings; /* DMA lock */ struct k_mutex dma_lock; /* Per-Ring data */ struct dma_iproc_pax_ring_data ring[PAX_DMA_RINGS_MAX]; }; /* PAX DMA config */ struct dma_iproc_pax_cfg { /* PAXB0 PAX DMA registers */ uint32_t dma_base; /* Per-Ring register base addr */ uint32_t rm_base; /* Ring manager common registers */ uint32_t rm_comm_base; /* Num of rings to be used */ int use_rings; void *bd_memory_base; uint32_t scr_addr_loc; const struct device *pcie_dev; }; #endif ```
/content/code_sandbox/drivers/dma/dma_iproc_pax.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,856
```unknown # Andestech ATCDMAC300 configuration options config DMA_ANDES_ATCDMAC300 bool "Using Andes ATCDMAC300 DMA driver" default y depends on DT_HAS_ANDESTECH_ATCDMAC300_ENABLED help Andes ATCDMAC300 DMA driver. ```
/content/code_sandbox/drivers/dma/Kconfig.andes_atcdmac300
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
63
```c /* * */ #define DT_DRV_COMPAT intel_adsp_hda_host_out #include <zephyr/drivers/dma.h> #include <adsp_interrupt.h> #include "dma_intel_adsp_hda.h" #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dma_intel_adsp_hda_dma_host_out); static const struct dma_driver_api intel_adsp_hda_dma_host_out_api = { .config = intel_adsp_hda_dma_host_out_config, .reload = intel_adsp_hda_dma_host_reload, .start = intel_adsp_hda_dma_start, .stop = intel_adsp_hda_dma_stop, .get_status = intel_adsp_hda_dma_status, .get_attribute = intel_adsp_hda_dma_get_attribute, .chan_filter = intel_adsp_hda_dma_chan_filter, }; #define INTEL_ADSP_HDA_DMA_HOST_OUT_INIT(inst) \ static void intel_adsp_hda_dma##inst##_irq_config(void); \ \ static const struct intel_adsp_hda_dma_cfg intel_adsp_hda_dma##inst##_config = { \ .base = DT_INST_REG_ADDR(inst), \ .regblock_size = DT_INST_REG_SIZE(inst), \ .dma_channels = DT_INST_PROP(inst, dma_channels), \ .direction = HOST_TO_MEMORY, \ .irq_config = intel_adsp_hda_dma##inst##_irq_config, \ }; \ \ static struct intel_adsp_hda_dma_data intel_adsp_hda_dma##inst##_data = {}; \ \ PM_DEVICE_DT_INST_DEFINE(inst, intel_adsp_hda_dma_pm_action); \ \ DEVICE_DT_INST_DEFINE(inst, &intel_adsp_hda_dma_init, \ PM_DEVICE_DT_INST_GET(inst), \ &intel_adsp_hda_dma##inst##_data, \ &intel_adsp_hda_dma##inst##_config, POST_KERNEL, \ CONFIG_DMA_INIT_PRIORITY, \ &intel_adsp_hda_dma_host_out_api); \ \ static void intel_adsp_hda_dma##inst##_irq_config(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), \ DT_INST_IRQ(inst, priority), intel_adsp_hda_dma_isr, \ DEVICE_DT_INST_GET(inst), \ DT_INST_IRQ(inst, sense)); \ irq_enable(DT_INST_IRQN(inst)); \ IF_ENABLED(CONFIG_SOC_SERIES_INTEL_ADSP_ACE, \ (ACE_DINT[0].ie[ACE_INTL_HDAHODMA] = 1;)) \ } DT_INST_FOREACH_STATUS_OKAY(INTEL_ADSP_HDA_DMA_HOST_OUT_INIT) ```
/content/code_sandbox/drivers/dma/dma_intel_adsp_hda_host_out.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
609
```unknown # Infineon XMC4xxx DMA configuration options config DMA_XMC4XXX bool "Infineon xmc4xxx series DMA driver" default y depends on DT_HAS_INFINEON_XMC4XXX_DMA_ENABLED help DMA driver for Infineon xmc4xxx series MCUs. ```
/content/code_sandbox/drivers/dma/Kconfig.xmc4xxx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
68
```unknown # Atmel SAM DMAC configuration options config DMA_SAM0 bool "Atmel SAM0 series DMAC driver" default y depends on DT_HAS_ATMEL_SAM0_DMAC_ENABLED help DMA driver for Atmel SAM0 series MCUs. ```
/content/code_sandbox/drivers/dma/Kconfig.sam0
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
59
```c /* * */ /** * @brief Common part of DMA drivers for some NXP SoC. */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <soc.h> #include <zephyr/drivers/dma.h> #include <fsl_dma.h> #include <fsl_inputmux.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <zephyr/sys/barrier.h> #include <zephyr/sys/util.h> #include <zephyr/sys/util_macro.h> #include <zephyr/drivers/dma/dma_mcux_lpc.h> #define DT_DRV_COMPAT nxp_lpc_dma LOG_MODULE_REGISTER(dma_mcux_lpc, CONFIG_DMA_LOG_LEVEL); struct dma_mcux_lpc_config { DMA_Type *base; uint32_t otrig_base_address; uint32_t itrig_base_address; uint8_t num_of_channels; uint8_t num_of_otrigs; void (*irq_config_func)(const struct device *dev); }; struct channel_data { SDK_ALIGN(dma_descriptor_t dma_descriptor_table[CONFIG_DMA_MCUX_LPC_NUMBER_OF_DESCRIPTORS], FSL_FEATURE_DMA_LINK_DESCRIPTOR_ALIGN_SIZE); dma_handle_t dma_handle; const struct device *dev; void *user_data; dma_callback_t dma_callback; enum dma_channel_direction dir; uint8_t src_inc; uint8_t dst_inc; dma_descriptor_t *curr_descriptor; uint8_t num_of_descriptors; bool descriptors_queued; uint32_t width; bool busy; }; struct dma_otrig { int8_t source_channel; int8_t linked_channel; }; struct dma_mcux_lpc_dma_data { struct channel_data *channel_data; struct dma_otrig *otrig_array; int8_t *channel_index; uint8_t num_channels_used; }; struct k_spinlock configuring_otrigs; #define NXP_LPC_DMA_MAX_XFER ((DMA_CHANNEL_XFERCFG_XFERCOUNT_MASK >> \ DMA_CHANNEL_XFERCFG_XFERCOUNT_SHIFT) + 1) #define DEV_BASE(dev) \ ((DMA_Type *)((const struct dma_mcux_lpc_config *const)(dev)->config)->base) #define DEV_CHANNEL_DATA(dev, ch) \ ((struct channel_data *)(&(((struct dma_mcux_lpc_dma_data *)dev->data)->channel_data[ch]))) #define DEV_DMA_HANDLE(dev, ch) \ ((dma_handle_t *)(&(DEV_CHANNEL_DATA(dev, ch)->dma_handle))) #define EMPTY_OTRIG -1 static void nxp_lpc_dma_callback(dma_handle_t *handle, void *param, bool transferDone, uint32_t intmode) { int ret = -EIO; struct channel_data *data = (struct channel_data *)param; uint32_t channel = handle->channel; if (intmode == kDMA_IntError) { DMA_AbortTransfer(handle); } else if (intmode == kDMA_IntA) { ret = DMA_STATUS_BLOCK; } else { ret = DMA_STATUS_COMPLETE; } data->busy = DMA_ChannelIsBusy(data->dma_handle.base, channel); if (data->dma_callback) { data->dma_callback(data->dev, data->user_data, channel, ret); } } /* Handles DMA interrupts and dispatches to the individual channel */ static void dma_mcux_lpc_irq_handler(const struct device *dev) { DMA_IRQHandle(DEV_BASE(dev)); /* * Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F Store * immediate overlapping exception return operation might vector * to incorrect interrupt */ #if defined __CORTEX_M && (__CORTEX_M == 4U) barrier_dsync_fence_full(); #endif } #ifdef CONFIG_SOC_SERIES_RW6XX static inline void rw6xx_dma_addr_fixup(struct dma_block_config *block) { /* RW6xx AHB design does not route DMA engine through FlexSPI CACHE. * Therefore, to use DMA from the FlexSPI space we must adjust the * source address to use the non cached FlexSPI region. * FlexSPI cached region is at 0x800_0000 (nonsecure) or 0x1800_0000 * (secure). We move the address into non cached region, which is at * 0x4800_0000 or 0x5800_000. */ if (((block->source_address & 0xF8000000) == 0x18000000) || ((block->source_address & 0xF8000000) == 0x8000000)) { block->source_address = block->source_address + 0x40000000; } if (((block->dest_address & 0xF8000000) == 0x18000000) || ((block->dest_address & 0xF8000000) == 0x8000000)) { block->dest_address = block->dest_address + 0x40000000; } } #endif static int dma_mcux_lpc_queue_descriptors(struct channel_data *data, struct dma_block_config *block, uint8_t src_inc, uint8_t dest_inc, bool callback_en) { uint32_t xfer_config = 0U; dma_descriptor_t *next_descriptor = NULL; uint32_t width = data->width; uint32_t max_xfer_bytes = NXP_LPC_DMA_MAX_XFER * width; bool setup_extra_descriptor = false; /* intA is used to indicate transfer of a block */ uint8_t enable_a_interrupt; /* intB is used to indicate complete transfer of the list of blocks */ uint8_t enable_b_interrupt; uint8_t reload; struct dma_block_config local_block; bool last_block = false; memcpy(&local_block, block, sizeof(struct dma_block_config)); do { /* Descriptors are queued during dma_configure, do not add more * during dma_reload. */ if (!data->descriptors_queued) { /* Increase the number of descriptors queued */ data->num_of_descriptors++; if (data->num_of_descriptors >= CONFIG_DMA_MCUX_LPC_NUMBER_OF_DESCRIPTORS) { return -ENOMEM; } /* Do we need to queue additional DMA descriptors for this block */ if ((local_block.block_size > max_xfer_bytes) || (local_block.next_block != NULL)) { /* Allocate DMA descriptors */ next_descriptor = &data->dma_descriptor_table[data->num_of_descriptors]; } else { /* Check if this is the last block to transfer */ if (local_block.next_block == NULL) { last_block = true; /* Last descriptor, check if we should setup a * circular chain */ if (!local_block.source_reload_en) { /* No more descriptors */ next_descriptor = NULL; } else if (data->num_of_descriptors == 1) { /* Allocate one more descriptors for * ping-pong transfer */ next_descriptor = &data->dma_descriptor_table[ data->num_of_descriptors]; setup_extra_descriptor = true; } else { /* Loop back to the head */ next_descriptor = data->dma_descriptor_table; } } } } else { /* Descriptors have already been allocated, reuse them as this * is called from a reload function */ next_descriptor = data->curr_descriptor->linkToNextDesc; } /* SPI TX transfers need to queue a DMA descriptor to * indicate an end of transfer. Source or destination * address does not need to be change for these * transactions and the transfer width is 4 bytes */ if ((local_block.source_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) && (local_block.dest_addr_adj == DMA_ADDR_ADJ_NO_CHANGE)) { src_inc = 0; dest_inc = 0; width = sizeof(uint32_t); } /* Fire an interrupt after the whole block has been transferred */ if (local_block.block_size > max_xfer_bytes) { enable_a_interrupt = 0; enable_b_interrupt = 0; } else { /* Use intB when this is the end of the block list and transfer */ if (last_block) { enable_a_interrupt = 0; enable_b_interrupt = 1; } else { /* Use intA when we need an interrupt per block * Enable or disable intA based on user configuration */ enable_a_interrupt = callback_en; enable_b_interrupt = 0; } } /* Reload if we have more descriptors */ if (next_descriptor) { reload = 1; } else { reload = 0; } /* Enable interrupt and reload for the descriptor */ xfer_config = DMA_CHANNEL_XFER(reload, 0UL, enable_a_interrupt, enable_b_interrupt, width, src_inc, dest_inc, MIN(local_block.block_size, max_xfer_bytes)); #ifdef CONFIG_SOC_SERIES_RW6XX rw6xx_dma_addr_fixup(&local_block); #endif DMA_SetupDescriptor(data->curr_descriptor, xfer_config, (void *)local_block.source_address, (void *)local_block.dest_address, (void *)next_descriptor); data->curr_descriptor = next_descriptor; if (local_block.block_size > max_xfer_bytes) { local_block.block_size -= max_xfer_bytes; if (src_inc) { local_block.source_address += max_xfer_bytes; } if (dest_inc) { local_block.dest_address += max_xfer_bytes; } } else { local_block.block_size = 0; } } while (local_block.block_size > 0); /* If an additional descriptor is queued for a certain case, set it up here. */ if (setup_extra_descriptor) { /* Increase the number of descriptors queued */ data->num_of_descriptors++; /* Loop back to the head */ next_descriptor = data->dma_descriptor_table; /* Leave curr pointer unchanged so we start queuing new data from * this descriptor */ /* Enable or disable interrupt based on user request. * Reload for the descriptor. */ xfer_config = DMA_CHANNEL_XFER(1UL, 0UL, callback_en, 0U, width, src_inc, dest_inc, MIN(local_block.block_size, max_xfer_bytes)); /* Mark this as invalid */ xfer_config &= ~DMA_CHANNEL_XFERCFG_CFGVALID_MASK; #ifdef CONFIG_SOC_SERIES_RW6XX rw6xx_dma_addr_fixup(&local_block); #endif DMA_SetupDescriptor(data->curr_descriptor, xfer_config, (void *)local_block.source_address, (void *)local_block.dest_address, (void *)next_descriptor); } return 0; } static void dma_mcux_lpc_clear_channel_data(struct channel_data *data) { data->dma_callback = NULL; data->dir = 0; data->src_inc = 0; data->dst_inc = 0; data->descriptors_queued = false; data->num_of_descriptors = 0; data->curr_descriptor = NULL; data->width = 0; } /* Configure a channel */ static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel, struct dma_config *config) { const struct dma_mcux_lpc_config *dev_config; dma_handle_t *p_handle; uint32_t xfer_config = 0U; struct channel_data *data; struct dma_mcux_lpc_dma_data *dma_data; struct dma_block_config *block_config; uint32_t virtual_channel; uint8_t otrig_index; uint8_t src_inc = 1, dst_inc = 1; bool is_periph = true; uint8_t width; uint32_t max_xfer_bytes; uint8_t reload = 0; bool complete_callback; if (NULL == dev || NULL == config) { return -EINVAL; } dev_config = dev->config; dma_data = dev->data; block_config = config->head_block; /* The DMA controller deals with just one transfer * size, though the API provides separate sizes * for source and dest. So assert that the source * and dest sizes are the same. */ assert(config->dest_data_size == config->source_data_size); width = config->dest_data_size; /* If skip is set on both source and destination * then skip by the same amount on both sides */ if (block_config->source_gather_en && block_config->dest_scatter_en) { assert(block_config->source_gather_interval == block_config->dest_scatter_interval); } max_xfer_bytes = NXP_LPC_DMA_MAX_XFER * width; /* * Check if circular mode is requested. */ if (config->head_block->source_reload_en || config->head_block->dest_reload_en) { reload = 1; } /* Check if have a free slot to store DMA channel data */ if (dma_data->num_channels_used > dev_config->num_of_channels) { LOG_ERR("out of DMA channel %d", channel); return -EINVAL; } /* Check if the dma channel number is valid */ if (channel >= dev_config->num_of_channels) { LOG_ERR("invalid DMA channel number %d", channel); return -EINVAL; } if (config->source_data_size != 4U && config->source_data_size != 2U && config->source_data_size != 1U) { LOG_ERR("Source unit size error, %d", config->source_data_size); return -EINVAL; } if (config->dest_data_size != 4U && config->dest_data_size != 2U && config->dest_data_size != 1U) { LOG_ERR("Dest unit size error, %d", config->dest_data_size); return -EINVAL; } switch (config->channel_direction) { case MEMORY_TO_MEMORY: is_periph = false; if (block_config->source_gather_en) { src_inc = block_config->source_gather_interval / width; /* The current controller only supports incrementing the * source and destination up to 4 time transfer width */ if ((src_inc > 4) || (src_inc == 3)) { return -EINVAL; } } if (block_config->dest_scatter_en) { dst_inc = block_config->dest_scatter_interval / width; /* The current controller only supports incrementing the * source and destination up to 4 time transfer width */ if ((dst_inc > 4) || (dst_inc == 3)) { return -EINVAL; } } break; case MEMORY_TO_PERIPHERAL: /* Set the source increment value */ if (block_config->source_gather_en) { src_inc = block_config->source_gather_interval / width; /* The current controller only supports incrementing the * source and destination up to 4 time transfer width */ if ((src_inc > 4) || (src_inc == 3)) { return -EINVAL; } } dst_inc = 0; break; case PERIPHERAL_TO_MEMORY: src_inc = 0; /* Set the destination increment value */ if (block_config->dest_scatter_en) { dst_inc = block_config->dest_scatter_interval / width; /* The current controller only supports incrementing the * source and destination up to 4 time transfer width */ if ((dst_inc > 4) || (dst_inc == 3)) { return -EINVAL; } } break; default: LOG_ERR("not support transfer direction"); return -EINVAL; } /* Check if user does not want to increment address */ if (block_config->source_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) { src_inc = 0; } if (block_config->dest_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) { dst_inc = 0; } /* If needed, allocate a slot to store dma channel data */ if (dma_data->channel_index[channel] == -1) { dma_data->channel_index[channel] = dma_data->num_channels_used; dma_data->num_channels_used++; /* Get the slot number that has the dma channel data */ virtual_channel = dma_data->channel_index[channel]; /* dma channel data */ p_handle = DEV_DMA_HANDLE(dev, virtual_channel); data = DEV_CHANNEL_DATA(dev, virtual_channel); DMA_CreateHandle(p_handle, DEV_BASE(dev), channel); DMA_SetCallback(p_handle, nxp_lpc_dma_callback, (void *)data); } else { /* Get the slot number that has the dma channel data */ virtual_channel = dma_data->channel_index[channel]; /* dma channel data */ p_handle = DEV_DMA_HANDLE(dev, virtual_channel); data = DEV_CHANNEL_DATA(dev, virtual_channel); } dma_mcux_lpc_clear_channel_data(data); data->dir = config->channel_direction; /* Save the increment values for the reload function */ data->src_inc = src_inc; data->dst_inc = dst_inc; if (data->busy) { DMA_AbortTransfer(p_handle); } LOG_DBG("channel is %d", p_handle->channel); k_spinlock_key_t otrigs_key = k_spin_lock(&configuring_otrigs); data->width = width; if (config->source_chaining_en || config->dest_chaining_en) { /* Chaining is enabled */ if (!dev_config->otrig_base_address || !dev_config->itrig_base_address) { LOG_ERR("Calling function tried to setup up channel" " chaining but the current platform is missing" " the correct trigger base addresses."); k_spin_unlock(&configuring_otrigs, otrigs_key); return -ENXIO; } LOG_DBG("link dma 0 channel %d with channel %d", channel, config->linked_channel); uint8_t is_otrig_available = 0; for (otrig_index = 0; otrig_index < dev_config->num_of_otrigs; ++otrig_index) { if (dma_data->otrig_array[otrig_index].linked_channel == EMPTY_OTRIG || dma_data->otrig_array[otrig_index].source_channel == channel) { if (dma_data->otrig_array[otrig_index].source_channel == channel) { int ChannelToDisable = dma_data->otrig_array[otrig_index].linked_channel; DMA_DisableChannel(DEV_BASE(dev), ChannelToDisable); DEV_BASE(dev)->CHANNEL[ChannelToDisable].CFG &= ~DMA_CHANNEL_CFG_HWTRIGEN_MASK; } is_otrig_available = 1; break; } } if (!is_otrig_available) { LOG_ERR("Calling function tried to setup up multiple" " channels to be configured but the dma driver has" " run out of OTrig Muxes"); k_spin_unlock(&configuring_otrigs, otrigs_key); return -EINVAL; } /* Since INPUTMUX handles the dma signals and * must be hardware triggered via the INPUTMUX * hardware. */ DEV_BASE(dev)->CHANNEL[config->linked_channel].CFG |= DMA_CHANNEL_CFG_HWTRIGEN_MASK; DMA_EnableChannel(DEV_BASE(dev), config->linked_channel); /* Link OTrig Muxes with passed-in channels */ INPUTMUX_AttachSignal(INPUTMUX, otrig_index, dev_config->otrig_base_address + channel); INPUTMUX_AttachSignal(INPUTMUX, config->linked_channel, dev_config->itrig_base_address + otrig_index); /* Otrig is now connected with linked channel */ dma_data->otrig_array[otrig_index].source_channel = channel; dma_data->otrig_array[otrig_index].linked_channel = config->linked_channel; } else { /* Chaining is _NOT_ enabled, Freeing connected OTrig */ for (otrig_index = 0; otrig_index < dev_config->num_of_otrigs; otrig_index++) { if (dma_data->otrig_array[otrig_index].linked_channel != EMPTY_OTRIG && (channel == dma_data->otrig_array[otrig_index].source_channel)) { int ChannelToDisable = dma_data->otrig_array[otrig_index].linked_channel; DMA_DisableChannel(DEV_BASE(dev), ChannelToDisable); DEV_BASE(dev)->CHANNEL[ChannelToDisable].CFG &= ~DMA_CHANNEL_CFG_HWTRIGEN_MASK; dma_data->otrig_array[otrig_index].linked_channel = EMPTY_OTRIG; dma_data->otrig_array[otrig_index].source_channel = EMPTY_OTRIG; break; } } } k_spin_unlock(&configuring_otrigs, otrigs_key); complete_callback = config->complete_callback_en; /* Check if we need to queue DMA descriptors */ if ((block_config->block_size > max_xfer_bytes) || (block_config->next_block != NULL)) { /* Allocate a DMA descriptor */ data->curr_descriptor = data->dma_descriptor_table; if (block_config->block_size > max_xfer_bytes) { /* Disable interrupt as this is not the entire data. * Reload for the descriptor */ xfer_config = DMA_CHANNEL_XFER(1UL, 0UL, 0UL, 0UL, width, src_inc, dst_inc, max_xfer_bytes); } else { /* Enable INTA interrupt if user requested DMA for each block. * Reload for the descriptor. */ xfer_config = DMA_CHANNEL_XFER(1UL, 0UL, complete_callback, 0UL, width, src_inc, dst_inc, block_config->block_size); } } else { /* Enable interrupt for the descriptor */ xfer_config = DMA_CHANNEL_XFER(0UL, 0UL, 1UL, 0UL, width, src_inc, dst_inc, block_config->block_size); } /* DMA controller requires that the address be aligned to transfer size */ assert(block_config->source_address == ROUND_UP(block_config->source_address, width)); assert(block_config->dest_address == ROUND_UP(block_config->dest_address, width)); #ifdef CONFIG_SOC_SERIES_RW6XX rw6xx_dma_addr_fixup(block_config); #endif DMA_SubmitChannelTransferParameter(p_handle, xfer_config, (void *)block_config->source_address, (void *)block_config->dest_address, (void *)data->curr_descriptor); /* Start queuing DMA descriptors */ if (data->curr_descriptor) { if (block_config->block_size > max_xfer_bytes) { /* Queue additional DMA descriptors because the amount of data to * be transferred is greater that the DMA descriptors max XFERCOUNT. */ struct dma_block_config local_block = { 0 }; if (src_inc) { local_block.source_address = block_config->source_address + max_xfer_bytes; } else { local_block.source_address = block_config->source_address; } if (dst_inc) { local_block.dest_address = block_config->dest_address + max_xfer_bytes; } else { local_block.dest_address = block_config->dest_address; } local_block.block_size = block_config->block_size - max_xfer_bytes; local_block.next_block = block_config->next_block; local_block.source_reload_en = reload; if (block_config->next_block == NULL) { /* This is the last block, enable callback. */ complete_callback = true; } if (dma_mcux_lpc_queue_descriptors(data, &local_block, src_inc, dst_inc, complete_callback)) { return -ENOMEM; } } /* Get the next block to transfer */ block_config = block_config->next_block; while (block_config != NULL) { block_config->source_reload_en = reload; /* DMA controller requires that the address be aligned to transfer size */ assert(block_config->source_address == ROUND_UP(block_config->source_address, width)); assert(block_config->dest_address == ROUND_UP(block_config->dest_address, width)); if (block_config->next_block == NULL) { /* This is the last block. Enable callback if not enabled. */ complete_callback = true; } if (dma_mcux_lpc_queue_descriptors(data, block_config, src_inc, dst_inc, complete_callback)) { return -ENOMEM; } /* Get the next block and start queuing descriptors */ block_config = block_config->next_block; } /* We have finished queuing DMA descriptors */ data->descriptors_queued = true; } if (config->dma_slot) { uint32_t cfg_reg = 0; /* User supplied manual trigger configuration */ if (config->dma_slot & LPC_DMA_PERIPH_REQ_EN) { cfg_reg |= DMA_CHANNEL_CFG_PERIPHREQEN_MASK; } if (config->dma_slot & LPC_DMA_HWTRIG_EN) { /* Setup hardware trigger */ cfg_reg |= DMA_CHANNEL_CFG_HWTRIGEN_MASK; if (config->dma_slot & LPC_DMA_TRIGTYPE_LEVEL) { cfg_reg |= DMA_CHANNEL_CFG_TRIGTYPE_MASK; } if (config->dma_slot & LPC_DMA_TRIGPOL_HIGH_RISING) { cfg_reg |= DMA_CHANNEL_CFG_TRIGPOL_MASK; } if (config->dma_slot & LPC_DMA_TRIGBURST) { cfg_reg |= DMA_CHANNEL_CFG_TRIGBURST_MASK; cfg_reg |= DMA_CHANNEL_CFG_BURSTPOWER( LPC_DMA_GET_BURSTPOWER(config->dma_slot)); } } p_handle->base->CHANNEL[p_handle->channel].CFG = cfg_reg; } else if (is_periph) { DMA_EnableChannelPeriphRq(p_handle->base, p_handle->channel); } else { DMA_DisableChannelPeriphRq(p_handle->base, p_handle->channel); } DMA_SetChannelPriority(p_handle->base, p_handle->channel, config->channel_priority); data->busy = false; if (config->dma_callback) { LOG_DBG("INSTALL call back on channel %d", channel); data->user_data = config->user_data; data->dma_callback = config->dma_callback; data->dev = dev; } return 0; } static int dma_mcux_lpc_start(const struct device *dev, uint32_t channel) { struct dma_mcux_lpc_dma_data *dev_data = dev->data; int8_t virtual_channel = dev_data->channel_index[channel]; struct channel_data *data = DEV_CHANNEL_DATA(dev, virtual_channel); LOG_DBG("START TRANSFER"); LOG_DBG("DMA CTRL 0x%x", DEV_BASE(dev)->CTRL); data->busy = true; DMA_StartTransfer(DEV_DMA_HANDLE(dev, virtual_channel)); return 0; } static int dma_mcux_lpc_stop(const struct device *dev, uint32_t channel) { struct dma_mcux_lpc_dma_data *dev_data = dev->data; int8_t virtual_channel = dev_data->channel_index[channel]; struct channel_data *data = DEV_CHANNEL_DATA(dev, virtual_channel); if (!data->busy) { return 0; } DMA_AbortTransfer(DEV_DMA_HANDLE(dev, virtual_channel)); DMA_DisableChannel(DEV_BASE(dev), channel); data->busy = false; return 0; } static int dma_mcux_lpc_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst, size_t size) { struct dma_mcux_lpc_dma_data *dev_data = dev->data; int8_t virtual_channel = dev_data->channel_index[channel]; struct channel_data *data = DEV_CHANNEL_DATA(dev, virtual_channel); uint32_t xfer_config = 0U; /* DMA controller requires that the address be aligned to transfer size */ assert(src == ROUND_UP(src, data->width)); assert(dst == ROUND_UP(dst, data->width)); if (!data->descriptors_queued) { dma_handle_t *p_handle; p_handle = DEV_DMA_HANDLE(dev, virtual_channel); /* Only one buffer, enable interrupt */ xfer_config = DMA_CHANNEL_XFER(0UL, 0UL, 1UL, 0UL, data->width, data->src_inc, data->dst_inc, size); DMA_SubmitChannelTransferParameter(p_handle, xfer_config, (void *)src, (void *)dst, NULL); } else { struct dma_block_config local_block = { 0 }; local_block.source_address = src; local_block.dest_address = dst; local_block.block_size = size; local_block.source_reload_en = 1; dma_mcux_lpc_queue_descriptors(data, &local_block, data->src_inc, data->dst_inc, true); } return 0; } static int dma_mcux_lpc_get_status(const struct device *dev, uint32_t channel, struct dma_status *status) { const struct dma_mcux_lpc_config *config = dev->config; struct dma_mcux_lpc_dma_data *dev_data = dev->data; int8_t virtual_channel = dev_data->channel_index[channel]; struct channel_data *data = DEV_CHANNEL_DATA(dev, virtual_channel); if (channel > config->num_of_channels) { return -EINVAL; } /* If channel is actually busy or the virtual channel is just not set up */ if (data->busy && (virtual_channel != -1)) { status->busy = true; status->pending_length = DMA_GetRemainingBytes(DEV_BASE(dev), channel); } else { status->busy = false; status->pending_length = 0; } status->dir = data->dir; LOG_DBG("DMA CR 0x%x", DEV_BASE(dev)->CTRL); LOG_DBG("DMA INT 0x%x", DEV_BASE(dev)->INTSTAT); return 0; } static int dma_mcux_lpc_init(const struct device *dev) { const struct dma_mcux_lpc_config *config = dev->config; struct dma_mcux_lpc_dma_data *data = dev->data; /* Indicate that the Otrig Muxes are not connected */ for (int i = 0; i < config->num_of_otrigs; i++) { data->otrig_array[i].source_channel = EMPTY_OTRIG; data->otrig_array[i].linked_channel = EMPTY_OTRIG; } /* * Initialize to -1 to indicate dma channel does not have a slot * assigned to store dma channel data */ for (int i = 0; i < config->num_of_channels; i++) { data->channel_index[i] = -1; } data->num_channels_used = 0; DMA_Init(DEV_BASE(dev)); INPUTMUX_Init(INPUTMUX); config->irq_config_func(dev); return 0; } static const struct dma_driver_api dma_mcux_lpc_api = { .config = dma_mcux_lpc_configure, .start = dma_mcux_lpc_start, .stop = dma_mcux_lpc_stop, .reload = dma_mcux_lpc_reload, .get_status = dma_mcux_lpc_get_status, }; #define DMA_MCUX_LPC_CONFIG_FUNC(n) \ static void dma_mcux_lpc_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ dma_mcux_lpc_irq_handler, DEVICE_DT_INST_GET(n), 0);\ \ irq_enable(DT_INST_IRQN(n)); \ } #define DMA_MCUX_LPC_IRQ_CFG_FUNC_INIT(n) \ .irq_config_func = dma_mcux_lpc_config_func_##n #define DMA_MCUX_LPC_INIT_CFG(n) \ DMA_MCUX_LPC_DECLARE_CFG(n, \ DMA_MCUX_LPC_IRQ_CFG_FUNC_INIT(n)) #define DMA_MCUX_LPC_NUM_USED_CHANNELS(n) \ COND_CODE_0(CONFIG_DMA_MCUX_LPC_NUMBER_OF_CHANNELS_ALLOCATED, \ (DT_INST_PROP(n, dma_channels)), \ (MIN(CONFIG_DMA_MCUX_LPC_NUMBER_OF_CHANNELS_ALLOCATED, \ DT_INST_PROP(n, dma_channels)))) #define DMA_MCUX_LPC_DECLARE_CFG(n, IRQ_FUNC_INIT) \ static const struct dma_mcux_lpc_config dma_##n##_config = { \ .base = (DMA_Type *)DT_INST_REG_ADDR(n), \ .num_of_channels = DT_INST_PROP(n, dma_channels), \ .num_of_otrigs = DT_INST_PROP_OR(n, nxp_dma_num_of_otrigs, 0), \ .otrig_base_address = DT_INST_PROP_OR(n, nxp_dma_otrig_base_address, 0x0), \ .itrig_base_address = DT_INST_PROP_OR(n, nxp_dma_itrig_base_address, 0x0), \ IRQ_FUNC_INIT \ } #define DMA_INIT(n) \ \ static const struct dma_mcux_lpc_config dma_##n##_config; \ \ static struct channel_data dma_##n##_channel_data_arr \ [DMA_MCUX_LPC_NUM_USED_CHANNELS(n)] = {0}; \ \ static struct dma_otrig dma_##n##_otrig_arr \ [DT_INST_PROP_OR(n, nxp_dma_num_of_otrigs, 0)]; \ \ static int8_t \ dma_##n##_channel_index_arr \ [DT_INST_PROP(n, dma_channels)] = {0}; \ \ static struct dma_mcux_lpc_dma_data dma_data_##n = { \ .channel_data = dma_##n##_channel_data_arr, \ .channel_index = dma_##n##_channel_index_arr, \ .otrig_array = dma_##n##_otrig_arr, \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ &dma_mcux_lpc_init, \ NULL, \ &dma_data_##n, &dma_##n##_config, \ PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \ &dma_mcux_lpc_api); \ \ DMA_MCUX_LPC_CONFIG_FUNC(n) \ DMA_MCUX_LPC_INIT_CFG(n); DT_INST_FOREACH_STATUS_OKAY(DMA_INIT) ```
/content/code_sandbox/drivers/dma/dma_mcux_lpc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,711
```c /* * */ #include <zephyr/drivers/dma.h> #include <zephyr/logging/log.h> #include <zephyr/cache.h> /* used for driver binding */ #define DT_DRV_COMPAT nxp_sof_host_dma /* macros used to parse DTS properties */ #define IDENTITY_VARGS(V, ...) IDENTITY(V) #define _SOF_HOST_DMA_CHANNEL_INDEX_ARRAY(inst)\ LISTIFY(DT_INST_PROP_OR(inst, dma_channels, 0), IDENTITY_VARGS, (,)) #define _SOF_HOST_DMA_CHANNEL_DECLARE(idx) {} #define SOF_HOST_DMA_CHANNELS_DECLARE(inst)\ FOR_EACH(_SOF_HOST_DMA_CHANNEL_DECLARE,\ (,), _SOF_HOST_DMA_CHANNEL_INDEX_ARRAY(inst)) LOG_MODULE_REGISTER(nxp_sof_host_dma); /* note: This driver doesn't attempt to provide * a generic software-based DMA engine implementation. * As its name suggests, its only usage is in SOF * (Sound Open Firmware) for NXP plaforms which are * able to access the host memory directly from the * core on which the firmware is running. */ enum channel_state { CHAN_STATE_INIT = 0, CHAN_STATE_CONFIGURED, }; struct sof_host_dma_channel { uint32_t src; uint32_t dest; uint32_t size; uint32_t direction; enum channel_state state; }; struct sof_host_dma_data { /* this needs to be first */ struct dma_context ctx; atomic_t channel_flags; struct sof_host_dma_channel *channels; }; static int channel_change_state(struct sof_host_dma_channel *chan, enum channel_state next) { enum channel_state prev = chan->state; /* validate transition */ switch (prev) { case CHAN_STATE_INIT: case CHAN_STATE_CONFIGURED: if (next != CHAN_STATE_CONFIGURED) { return -EPERM; } break; default: LOG_ERR("invalid channel previous state: %d", prev); return -EINVAL; } chan->state = next; return 0; } static int sof_host_dma_reload(const struct device *dev, uint32_t chan_id, uint32_t src, uint32_t dst, size_t size) { ARG_UNUSED(src); ARG_UNUSED(dst); ARG_UNUSED(size); struct sof_host_dma_data *data; struct sof_host_dma_channel *chan; int ret; data = dev->data; if (chan_id >= data->ctx.dma_channels) { LOG_ERR("channel %d is not a valid channel ID", chan_id); return -EINVAL; } /* fetch channel data */ chan = &data->channels[chan_id]; /* validate state */ if (chan->state != CHAN_STATE_CONFIGURED) { LOG_ERR("attempting to reload unconfigured DMA channel %d", chan_id); return -EINVAL; } if (chan->direction == HOST_TO_MEMORY) { /* the host may have modified the region we're about to copy * to local memory. In this case, the data cache holds stale * data so invalidate it to force a read from the main memory. */ ret = sys_cache_data_invd_range(UINT_TO_POINTER(chan->src), chan->size); if (ret < 0) { LOG_ERR("failed to invalidate data cache range"); return ret; } } memcpy(UINT_TO_POINTER(chan->dest), UINT_TO_POINTER(chan->src), chan->size); if (chan->direction == MEMORY_TO_HOST) { /* force range to main memory so that host doesn't read any * stale data. */ ret = sys_cache_data_flush_range(UINT_TO_POINTER(chan->dest), chan->size); if (ret < 0) { LOG_ERR("failed to flush data cache range"); return ret; } } return 0; } static int sof_host_dma_config(const struct device *dev, uint32_t chan_id, struct dma_config *config) { struct sof_host_dma_data *data; struct sof_host_dma_channel *chan; int ret; data = dev->data; if (chan_id >= data->ctx.dma_channels) { LOG_ERR("channel %d is not a valid channel ID", chan_id); return -EINVAL; } /* fetch channel data */ chan = &data->channels[chan_id]; /* attempt a state transition */ ret = channel_change_state(chan, CHAN_STATE_CONFIGURED); if (ret < 0) { LOG_ERR("failed to change channel %d's state to CONFIGURED", chan_id); return ret; } /* SG configurations are not currently supported */ if (config->block_count != 1) { LOG_ERR("invalid number of blocks: %d", config->block_count); return -EINVAL; } if (!config->head_block->source_address) { LOG_ERR("got NULL source address"); return -EINVAL; } if (!config->head_block->dest_address) { LOG_ERR("got NULL destination address"); return -EINVAL; } if (!config->head_block->block_size) { LOG_ERR("got 0 bytes to copy"); return -EINVAL; } /* for now, only H2M and M2H transfers are supported */ if (config->channel_direction != HOST_TO_MEMORY && config->channel_direction != MEMORY_TO_HOST) { LOG_ERR("invalid channel direction: %d", config->channel_direction); return -EINVAL; } /* latch onto the passed configuration */ chan->src = config->head_block->source_address; chan->dest = config->head_block->dest_address; chan->size = config->head_block->block_size; chan->direction = config->channel_direction; LOG_DBG("configured channel %d with SRC 0x%x DST 0x%x SIZE 0x%x", chan_id, chan->src, chan->dest, chan->size); return 0; } static int sof_host_dma_start(const struct device *dev, uint32_t chan_id) { /* nothing to be done here */ return 0; } static int sof_host_dma_stop(const struct device *dev, uint32_t chan_id) { /* nothing to be done here */ return 0; } static int sof_host_dma_suspend(const struct device *dev, uint32_t chan_id) { /* nothing to be done here */ return 0; } static int sof_host_dma_resume(const struct device *dev, uint32_t chan_id) { /* nothing to be done here */ return 0; } static int sof_host_dma_get_status(const struct device *dev, uint32_t chan_id, struct dma_status *stat) { /* nothing to be done here */ return 0; } static int sof_host_dma_get_attribute(const struct device *dev, uint32_t type, uint32_t *val) { switch (type) { case DMA_ATTR_COPY_ALIGNMENT: case DMA_ATTR_BUFFER_SIZE_ALIGNMENT: case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT: *val = CONFIG_DMA_NXP_SOF_HOST_DMA_ALIGN; break; default: LOG_ERR("invalid attribute type: %d", type); return -EINVAL; } return 0; } static const struct dma_driver_api sof_host_dma_api = { .reload = sof_host_dma_reload, .config = sof_host_dma_config, .start = sof_host_dma_start, .stop = sof_host_dma_stop, .suspend = sof_host_dma_suspend, .resume = sof_host_dma_resume, .get_status = sof_host_dma_get_status, .get_attribute = sof_host_dma_get_attribute, }; static int sof_host_dma_init(const struct device *dev) { struct sof_host_dma_data *data = dev->data; data->channel_flags = ATOMIC_INIT(0); data->ctx.atomic = &data->channel_flags; return 0; } static struct sof_host_dma_channel channels[] = { SOF_HOST_DMA_CHANNELS_DECLARE(0), }; static struct sof_host_dma_data sof_host_dma_data = { .ctx.magic = DMA_MAGIC, .ctx.dma_channels = ARRAY_SIZE(channels), .channels = channels, }; /* assumption: only 1 SOF_HOST_DMA instance */ DEVICE_DT_INST_DEFINE(0, sof_host_dma_init, NULL, &sof_host_dma_data, NULL, PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, &sof_host_dma_api); ```
/content/code_sandbox/drivers/dma/dma_nxp_sof_host_dma.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,770
```unknown config DMA_RPI_PICO bool "Raspberry Pi Pico DMA driver" default y depends on DT_HAS_RASPBERRYPI_PICO_DMA_ENABLED select PICOSDK_USE_DMA select PICOSDK_USE_CLAIM depends on RESET help DMA driver for RaspberryPi Pico. ```
/content/code_sandbox/drivers/dma/Kconfig.rpi_pico
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
68
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/drivers/video.h> #define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(video_sw_generator); #define VIDEO_PATTERN_COLOR_BAR 0 #define VIDEO_PATTERN_FPS 30 struct video_sw_generator_data { const struct device *dev; struct video_format fmt; struct k_fifo fifo_in; struct k_fifo fifo_out; struct k_work_delayable buf_work; struct k_work_sync work_sync; int pattern; bool ctrl_hflip; bool ctrl_vflip; struct k_poll_signal *signal; }; static const struct video_format_cap fmts[] = {{ .pixelformat = VIDEO_PIX_FMT_RGB565, .width_min = 64, .width_max = 1920, .height_min = 64, .height_max = 1080, .width_step = 1, .height_step = 1, }, { .pixelformat = VIDEO_PIX_FMT_XRGB32, .width_min = 64, .width_max = 1920, .height_min = 64, .height_max = 1080, .width_step = 1, .height_step = 1, }, {0}}; static int video_sw_generator_set_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { struct video_sw_generator_data *data = dev->data; int i = 0; if (ep != VIDEO_EP_OUT) { return -EINVAL; } for (i = 0; i < ARRAY_SIZE(fmts); ++i) { if (fmt->pixelformat == fmts[i].pixelformat && fmt->width >= fmts[i].width_min && fmt->width <= fmts[i].width_max && fmt->height >= fmts[i].height_min && fmt->height <= fmts[i].height_max) { break; } } if (i == ARRAY_SIZE(fmts)) { LOG_ERR("Unsupported pixel format or resolution"); return -ENOTSUP; } data->fmt = *fmt; return 0; } static int video_sw_generator_get_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { struct video_sw_generator_data *data = dev->data; if (ep != VIDEO_EP_OUT) { return -EINVAL; } *fmt = data->fmt; return 0; } static int video_sw_generator_stream_start(const struct device *dev) { struct video_sw_generator_data *data = dev->data; k_work_schedule(&data->buf_work, K_MSEC(1000 / VIDEO_PATTERN_FPS)); return 0; } static int video_sw_generator_stream_stop(const struct device *dev) { struct video_sw_generator_data *data = dev->data; k_work_cancel_delayable_sync(&data->buf_work, &data->work_sync); return 0; } /* Black, Blue, Red, Purple, Green, Aqua, Yellow, White */ uint16_t rgb565_colorbar_value[] = {0x0000, 0x001F, 0xF800, 0xF81F, 0x07E0, 0x07FF, 0xFFE0, 0xFFFF}; uint32_t xrgb32_colorbar_value[] = {0xFF000000, 0xFF0000FF, 0xFFFF0000, 0xFFFF00FF, 0xFF00FF00, 0xFF00FFFF, 0xFFFFFF00, 0xFFFFFFFF}; static void __fill_buffer_colorbar(struct video_sw_generator_data *data, struct video_buffer *vbuf) { int bw = data->fmt.width / 8; int h, w, i = 0; for (h = 0; h < data->fmt.height; h++) { for (w = 0; w < data->fmt.width; w++) { int color_idx = data->ctrl_vflip ? 7 - w / bw : w / bw; if (data->fmt.pixelformat == VIDEO_PIX_FMT_RGB565) { uint16_t *pixel = (uint16_t *)&vbuf->buffer[i]; *pixel = rgb565_colorbar_value[color_idx]; i += 2; } else if (data->fmt.pixelformat == VIDEO_PIX_FMT_XRGB32) { uint32_t *pixel = (uint32_t *)&vbuf->buffer[i]; *pixel = xrgb32_colorbar_value[color_idx]; i += 4; } } } vbuf->timestamp = k_uptime_get_32(); vbuf->bytesused = i; } static void __buffer_work(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct video_sw_generator_data *data; struct video_buffer *vbuf; data = CONTAINER_OF(dwork, struct video_sw_generator_data, buf_work); k_work_reschedule(&data->buf_work, K_MSEC(1000 / VIDEO_PATTERN_FPS)); vbuf = k_fifo_get(&data->fifo_in, K_NO_WAIT); if (vbuf == NULL) { return; } switch (data->pattern) { case VIDEO_PATTERN_COLOR_BAR: __fill_buffer_colorbar(data, vbuf); break; } k_fifo_put(&data->fifo_out, vbuf); if (IS_ENABLED(CONFIG_POLL) && data->signal) { k_poll_signal_raise(data->signal, VIDEO_BUF_DONE); } k_yield(); } static int video_sw_generator_enqueue(const struct device *dev, enum video_endpoint_id ep, struct video_buffer *vbuf) { struct video_sw_generator_data *data = dev->data; if (ep != VIDEO_EP_OUT) { return -EINVAL; } k_fifo_put(&data->fifo_in, vbuf); return 0; } static int video_sw_generator_dequeue(const struct device *dev, enum video_endpoint_id ep, struct video_buffer **vbuf, k_timeout_t timeout) { struct video_sw_generator_data *data = dev->data; if (ep != VIDEO_EP_OUT) { return -EINVAL; } *vbuf = k_fifo_get(&data->fifo_out, timeout); if (*vbuf == NULL) { return -EAGAIN; } return 0; } static int video_sw_generator_flush(const struct device *dev, enum video_endpoint_id ep, bool cancel) { struct video_sw_generator_data *data = dev->data; struct video_buffer *vbuf; if (!cancel) { /* wait for all buffer to be processed */ do { k_sleep(K_MSEC(1)); } while (!k_fifo_is_empty(&data->fifo_in)); } else { while ((vbuf = k_fifo_get(&data->fifo_in, K_NO_WAIT))) { k_fifo_put(&data->fifo_out, vbuf); if (IS_ENABLED(CONFIG_POLL) && data->signal) { k_poll_signal_raise(data->signal, VIDEO_BUF_ABORTED); } } } return 0; } static int video_sw_generator_get_caps(const struct device *dev, enum video_endpoint_id ep, struct video_caps *caps) { caps->format_caps = fmts; caps->min_vbuf_count = 0; return 0; } #ifdef CONFIG_POLL static int video_sw_generator_set_signal(const struct device *dev, enum video_endpoint_id ep, struct k_poll_signal *signal) { struct video_sw_generator_data *data = dev->data; if (data->signal && signal != NULL) { return -EALREADY; } data->signal = signal; return 0; } #endif static inline int video_sw_generator_set_ctrl(const struct device *dev, unsigned int cid, void *value) { struct video_sw_generator_data *data = dev->data; switch (cid) { case VIDEO_CID_VFLIP: data->ctrl_vflip = (bool)value; break; default: return -ENOTSUP; } return 0; } static const struct video_driver_api video_sw_generator_driver_api = { .set_format = video_sw_generator_set_fmt, .get_format = video_sw_generator_get_fmt, .stream_start = video_sw_generator_stream_start, .stream_stop = video_sw_generator_stream_stop, .flush = video_sw_generator_flush, .enqueue = video_sw_generator_enqueue, .dequeue = video_sw_generator_dequeue, .get_caps = video_sw_generator_get_caps, .set_ctrl = video_sw_generator_set_ctrl, #ifdef CONFIG_POLL .set_signal = video_sw_generator_set_signal, #endif }; static struct video_sw_generator_data video_sw_generator_data_0 = { .fmt.width = 320, .fmt.height = 160, .fmt.pitch = 320 * 2, .fmt.pixelformat = VIDEO_PIX_FMT_RGB565, }; static int video_sw_generator_init(const struct device *dev) { struct video_sw_generator_data *data = dev->data; data->dev = dev; k_fifo_init(&data->fifo_in); k_fifo_init(&data->fifo_out); k_work_init_delayable(&data->buf_work, __buffer_work); return 0; } DEVICE_DEFINE(video_sw_generator, "VIDEO_SW_GENERATOR", &video_sw_generator_init, NULL, &video_sw_generator_data_0, NULL, POST_KERNEL, CONFIG_VIDEO_INIT_PRIORITY, &video_sw_generator_driver_api); ```
/content/code_sandbox/drivers/video/video_sw_generator.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,041
```c /* * */ #define DT_DRV_COMPAT st_stm32_dcmi #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/drivers/video.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/irq.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/dma/dma_stm32.h> #include <stm32_ll_dma.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(video_stm32_dcmi, CONFIG_STM32_DCMI_LOG_LEVEL); K_HEAP_DEFINE(video_stm32_buffer_pool, CONFIG_VIDEO_BUFFER_POOL_SZ_MAX); typedef void (*irq_config_func_t)(const struct device *dev); struct stream { DMA_TypeDef *reg; const struct device *dma_dev; uint32_t channel; struct dma_config cfg; }; struct video_stm32_dcmi_data { const struct device *dev; DCMI_HandleTypeDef hdcmi; struct video_format fmt; struct k_fifo fifo_in; struct k_fifo fifo_out; uint32_t pixel_format; uint32_t height; uint32_t width; uint32_t pitch; uint8_t *buffer; }; struct video_stm32_dcmi_config { struct stm32_pclken pclken; irq_config_func_t irq_config; const struct pinctrl_dev_config *pctrl; const struct device *sensor_dev; const struct stream dma; }; static inline unsigned int video_pix_fmt_bpp(uint32_t pixelformat) { switch (pixelformat) { case VIDEO_PIX_FMT_BGGR8: case VIDEO_PIX_FMT_GBRG8: case VIDEO_PIX_FMT_GRBG8: case VIDEO_PIX_FMT_RGGB8: return 1; case VIDEO_PIX_FMT_RGB565: case VIDEO_PIX_FMT_YUYV: return 2; default: return 0; } } void HAL_DCMI_ErrorCallback(DCMI_HandleTypeDef *hdcmi) { LOG_WRN("%s", __func__); } void HAL_DCMI_FrameEventCallback(DCMI_HandleTypeDef *hdcmi) { struct video_stm32_dcmi_data *dev_data = CONTAINER_OF(hdcmi, struct video_stm32_dcmi_data, hdcmi); struct video_buffer *vbuf; HAL_DCMI_Suspend(hdcmi); vbuf = k_fifo_get(&dev_data->fifo_in, K_NO_WAIT); if (vbuf == NULL) { LOG_DBG("Failed to get buffer from fifo"); goto resume; } vbuf->timestamp = k_uptime_get_32(); memcpy(vbuf->buffer, dev_data->buffer, vbuf->bytesused); k_fifo_put(&dev_data->fifo_out, vbuf); resume: HAL_DCMI_Resume(hdcmi); } static void stm32_dcmi_isr(const struct device *dev) { struct video_stm32_dcmi_data *data = dev->data; HAL_DCMI_IRQHandler(&data->hdcmi); } static void dmci_dma_callback(const struct device *dev, void *arg, uint32_t channel, int status) { DMA_HandleTypeDef *hdma = arg; ARG_UNUSED(dev); if (status < 0) { LOG_ERR("DMA callback error with channel %d.", channel); } HAL_DMA_IRQHandler(hdma); } void HAL_DMA_ErrorCallback(DMA_HandleTypeDef *hdma) { LOG_WRN("%s", __func__); } static int stm32_dma_init(const struct device *dev) { struct video_stm32_dcmi_data *data = dev->data; const struct video_stm32_dcmi_config *config = dev->config; int ret; /* Check if the DMA device is ready */ if (!device_is_ready(config->dma.dma_dev)) { LOG_ERR("%s DMA device not ready", config->dma.dma_dev->name); return -ENODEV; } /* * DMA configuration * Due to use of QSPI HAL API in current driver, * both HAL and Zephyr DMA drivers should be configured. * The required configuration for Zephyr DMA driver should only provide * the minimum information to inform the DMA slot will be in used and * how to route callbacks. */ struct dma_config dma_cfg = config->dma.cfg; static DMA_HandleTypeDef hdma; /* Proceed to the minimum Zephyr DMA driver init */ dma_cfg.user_data = &hdma; /* HACK: This field is used to inform driver that it is overridden */ dma_cfg.linked_channel = STM32_DMA_HAL_OVERRIDE; /* Because of the STREAM OFFSET, the DMA channel given here is from 1 - 8 */ ret = dma_config(config->dma.dma_dev, config->dma.channel + STM32_DMA_STREAM_OFFSET, &dma_cfg); if (ret != 0) { LOG_ERR("Failed to configure DMA channel %d", config->dma.channel + STM32_DMA_STREAM_OFFSET); return ret; } /*** Configure the DMA ***/ /* Set the parameters to be configured */ hdma.Init.Request = DMA_REQUEST_DCMI; hdma.Init.Direction = DMA_PERIPH_TO_MEMORY; hdma.Init.PeriphInc = DMA_PINC_DISABLE; hdma.Init.MemInc = DMA_MINC_ENABLE; hdma.Init.PeriphDataAlignment = DMA_PDATAALIGN_WORD; hdma.Init.MemDataAlignment = DMA_MDATAALIGN_WORD; hdma.Init.Mode = DMA_CIRCULAR; hdma.Init.Priority = DMA_PRIORITY_HIGH; hdma.Init.FIFOMode = DMA_FIFOMODE_DISABLE; hdma.Instance = __LL_DMA_GET_STREAM_INSTANCE(config->dma.reg, config->dma.channel); /* Initialize DMA HAL */ __HAL_LINKDMA(&data->hdcmi, DMA_Handle, hdma); if (HAL_DMA_Init(&hdma) != HAL_OK) { LOG_ERR("DCMI DMA Init failed"); return -EIO; } return 0; } static int stm32_dcmi_enable_clock(const struct device *dev) { const struct video_stm32_dcmi_config *config = dev->config; const struct device *dcmi_clock = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); int err; if (!device_is_ready(dcmi_clock)) { LOG_ERR("clock control device not ready"); return -ENODEV; } /* Turn on DCMI peripheral clock */ err = clock_control_on(dcmi_clock, (clock_control_subsys_t *) &config->pclken); if (err < 0) { LOG_ERR("Failed to enable DCMI clock. Error %d", err); return err; } return 0; } static int video_stm32_dcmi_set_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { const struct video_stm32_dcmi_config *config = dev->config; struct video_stm32_dcmi_data *data = dev->data; unsigned int bpp = video_pix_fmt_bpp(fmt->pixelformat); if (!bpp || ep != VIDEO_EP_OUT) { return -EINVAL; } data->pixel_format = fmt->pixelformat; data->pitch = fmt->pitch; data->height = fmt->height; data->width = fmt->width; if (video_set_format(config->sensor_dev, ep, fmt)) { return -EIO; } return 0; } static int video_stm32_dcmi_get_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { struct video_stm32_dcmi_data *data = dev->data; const struct video_stm32_dcmi_config *config = dev->config; if ((fmt == NULL) || (ep != VIDEO_EP_OUT)) { return -EINVAL; } if (!video_get_format(config->sensor_dev, ep, fmt)) { /* align DCMI with sensor fmt */ return video_stm32_dcmi_set_fmt(dev, ep, fmt); } fmt->pixelformat = data->pixel_format; fmt->height = data->height; fmt->width = data->width; fmt->pitch = data->pitch; return 0; } static int video_stm32_dcmi_stream_start(const struct device *dev) { struct video_stm32_dcmi_data *data = dev->data; const struct video_stm32_dcmi_config *config = dev->config; size_t buffer_size = data->pitch * data->height; data->buffer = k_heap_alloc(&video_stm32_buffer_pool, buffer_size, K_NO_WAIT); if (data->buffer == NULL) { LOG_ERR("Failed to allocate DCMI buffer for image. Size %d bytes", buffer_size); return -ENOMEM; } int err = HAL_DCMI_Start_DMA(&data->hdcmi, DCMI_MODE_CONTINUOUS, (uint32_t)data->buffer, buffer_size / 4); if (err != HAL_OK) { LOG_ERR("Failed to start DCMI DMA"); return -EIO; } if (video_stream_start(config->sensor_dev)) { return -EIO; } return 0; } static int video_stm32_dcmi_stream_stop(const struct device *dev) { struct video_stm32_dcmi_data *data = dev->data; const struct video_stm32_dcmi_config *config = dev->config; int err; if (video_stream_stop(config->sensor_dev)) { return -EIO; } /* Release the buffer allocated in stream_start */ k_heap_free(&video_stm32_buffer_pool, data->buffer); err = HAL_DCMI_Stop(&data->hdcmi); if (err != HAL_OK) { LOG_ERR("Failed to stop DCMI"); return -EIO; } return 0; } static int video_stm32_dcmi_enqueue(const struct device *dev, enum video_endpoint_id ep, struct video_buffer *vbuf) { struct video_stm32_dcmi_data *data = dev->data; const uint32_t buffer_size = data->pitch * data->height; if (ep != VIDEO_EP_OUT) { return -EINVAL; } if (buffer_size > vbuf->size) { return -EINVAL; } vbuf->bytesused = buffer_size; k_fifo_put(&data->fifo_in, vbuf); return 0; } static int video_stm32_dcmi_dequeue(const struct device *dev, enum video_endpoint_id ep, struct video_buffer **vbuf, k_timeout_t timeout) { struct video_stm32_dcmi_data *data = dev->data; if (ep != VIDEO_EP_OUT) { return -EINVAL; } *vbuf = k_fifo_get(&data->fifo_out, timeout); if (*vbuf == NULL) { return -EAGAIN; } return 0; } static int video_stm32_dcmi_get_caps(const struct device *dev, enum video_endpoint_id ep, struct video_caps *caps) { const struct video_stm32_dcmi_config *config = dev->config; int ret = -ENODEV; if (ep != VIDEO_EP_OUT) { return -EINVAL; } /* Forward the message to the sensor device */ ret = video_get_caps(config->sensor_dev, ep, caps); return ret; } static inline int video_stm32_dcmi_set_ctrl(const struct device *dev, unsigned int cid, void *value) { const struct video_stm32_dcmi_config *config = dev->config; int ret; /* Forward to source dev if any */ ret = video_set_ctrl(config->sensor_dev, cid, value); return ret; } static inline int video_stm32_dcmi_get_ctrl(const struct device *dev, unsigned int cid, void *value) { const struct video_stm32_dcmi_config *config = dev->config; int ret; /* Forward to source dev if any */ ret = video_get_ctrl(config->sensor_dev, cid, value); return ret; } static const struct video_driver_api video_stm32_dcmi_driver_api = { .set_format = video_stm32_dcmi_set_fmt, .get_format = video_stm32_dcmi_get_fmt, .stream_start = video_stm32_dcmi_stream_start, .stream_stop = video_stm32_dcmi_stream_stop, .enqueue = video_stm32_dcmi_enqueue, .dequeue = video_stm32_dcmi_dequeue, .get_caps = video_stm32_dcmi_get_caps, .set_ctrl = video_stm32_dcmi_set_ctrl, .get_ctrl = video_stm32_dcmi_get_ctrl, }; static void video_stm32_dcmi_irq_config_func(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), stm32_dcmi_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); } #define DCMI_DMA_CHANNEL_INIT(index, src_dev, dest_dev) \ .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_IDX(index, 0)), \ .channel = DT_INST_DMAS_CELL_BY_IDX(index, 0, channel), \ .reg = (DMA_TypeDef *)DT_REG_ADDR( \ DT_PHANDLE_BY_IDX(DT_DRV_INST(0), dmas, 0)), \ .cfg = { \ .dma_slot = STM32_DMA_SLOT_BY_IDX(index, 0, slot), \ .channel_direction = STM32_DMA_CONFIG_DIRECTION( \ STM32_DMA_CHANNEL_CONFIG_BY_IDX(index, 0)), \ .source_data_size = STM32_DMA_CONFIG_##src_dev##_DATA_SIZE( \ STM32_DMA_CHANNEL_CONFIG_BY_IDX(index, 0)), \ .dest_data_size = STM32_DMA_CONFIG_##dest_dev##_DATA_SIZE( \ STM32_DMA_CHANNEL_CONFIG_BY_IDX(index, 0)), \ .source_burst_length = 1, /* SINGLE transfer */ \ .dest_burst_length = 1, /* SINGLE transfer */ \ .channel_priority = STM32_DMA_CONFIG_PRIORITY( \ STM32_DMA_CHANNEL_CONFIG_BY_IDX(index, 0)), \ .dma_callback = dmci_dma_callback, \ }, \ PINCTRL_DT_INST_DEFINE(0); #define STM32_DCMI_GET_CAPTURE_RATE(capture_rate) \ ((capture_rate) == 1 ? DCMI_CR_ALL_FRAME : \ (capture_rate) == 2 ? DCMI_CR_ALTERNATE_2_FRAME : \ (capture_rate) == 4 ? DCMI_CR_ALTERNATE_4_FRAME : \ DCMI_CR_ALL_FRAME) #define STM32_DCMI_GET_BUS_WIDTH(bus_width) \ ((bus_width) == 8 ? DCMI_EXTEND_DATA_8B : \ (bus_width) == 10 ? DCMI_EXTEND_DATA_10B : \ (bus_width) == 12 ? DCMI_EXTEND_DATA_12B : \ (bus_width) == 14 ? DCMI_EXTEND_DATA_14B : \ DCMI_EXTEND_DATA_8B) #define DCMI_DMA_CHANNEL(id, src, dest) \ .dma = { \ COND_CODE_1(DT_INST_DMAS_HAS_IDX(id, 0), \ (DCMI_DMA_CHANNEL_INIT(id, src, dest)), \ (NULL)) \ }, static struct video_stm32_dcmi_data video_stm32_dcmi_data_0 = { .hdcmi = { .Instance = (DCMI_TypeDef *) DT_INST_REG_ADDR(0), .Init = { .SynchroMode = DCMI_SYNCHRO_HARDWARE, .PCKPolarity = (DT_INST_PROP(0, pixelclk_active) ? DCMI_PCKPOLARITY_RISING : DCMI_PCKPOLARITY_FALLING), .HSPolarity = (DT_INST_PROP(0, hsync_active) ? DCMI_HSPOLARITY_HIGH : DCMI_HSPOLARITY_LOW), .VSPolarity = (DT_INST_PROP(0, vsync_active) ? DCMI_VSPOLARITY_HIGH : DCMI_VSPOLARITY_LOW), .CaptureRate = STM32_DCMI_GET_CAPTURE_RATE( DT_INST_PROP(0, capture_rate)), .ExtendedDataMode = STM32_DCMI_GET_BUS_WIDTH( DT_INST_PROP(0, bus_width)), .JPEGMode = DCMI_JPEG_DISABLE, .ByteSelectMode = DCMI_BSM_ALL, .ByteSelectStart = DCMI_OEBS_ODD, .LineSelectMode = DCMI_LSM_ALL, .LineSelectStart = DCMI_OELS_ODD, }, }, }; static const struct video_stm32_dcmi_config video_stm32_dcmi_config_0 = { .pclken = { .enr = DT_INST_CLOCKS_CELL(0, bits), .bus = DT_INST_CLOCKS_CELL(0, bus) }, .irq_config = video_stm32_dcmi_irq_config_func, .pctrl = PINCTRL_DT_INST_DEV_CONFIG_GET(0), .sensor_dev = DEVICE_DT_GET(DT_INST_PHANDLE(0, sensor)), DCMI_DMA_CHANNEL(0, PERIPHERAL, MEMORY) }; static int video_stm32_dcmi_init(const struct device *dev) { const struct video_stm32_dcmi_config *config = dev->config; struct video_stm32_dcmi_data *data = dev->data; int err; /* Configure DT provided pins */ err = pinctrl_apply_state(config->pctrl, PINCTRL_STATE_DEFAULT); if (err < 0) { LOG_ERR("pinctrl setup failed. Error %d.", err); return err; } /* Initialize DMA peripheral */ err = stm32_dma_init(dev); if (err < 0) { LOG_ERR("DMA initialization failed."); return err; } /* Enable DCMI clock */ err = stm32_dcmi_enable_clock(dev); if (err < 0) { LOG_ERR("Clock enabling failed."); return -EIO; } data->dev = dev; k_fifo_init(&data->fifo_in); k_fifo_init(&data->fifo_out); /* Run IRQ init */ config->irq_config(dev); /* Initialize DCMI peripheral */ err = HAL_DCMI_Init(&data->hdcmi); if (err != HAL_OK) { LOG_ERR("DCMI initialization failed."); return -EIO; } k_sleep(K_MSEC(100)); LOG_DBG("%s inited", dev->name); return 0; } DEVICE_DT_INST_DEFINE(0, &video_stm32_dcmi_init, NULL, &video_stm32_dcmi_data_0, &video_stm32_dcmi_config_0, POST_KERNEL, CONFIG_VIDEO_INIT_PRIORITY, &video_stm32_dcmi_driver_api); ```
/content/code_sandbox/drivers/video/video_stm32_dcmi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,192
```c /* * */ #define DT_DRV_COMPAT aptina_mt9m114 #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/sys/byteorder.h> #include <zephyr/drivers/video.h> #include <zephyr/drivers/i2c.h> #define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(mt9m114); #define MT9M114_CHIP_ID_VAL 0x2481 /* Sysctl registers */ #define MT9M114_CHIP_ID 0x0000 #define MT9M114_COMMAND_REGISTER 0x0080 #define MT9M114_COMMAND_REGISTER_SET_STATE (1 << 1) #define MT9M114_COMMAND_REGISTER_OK (1 << 15) #define MT9M114_RST_AND_MISC_CONTROL 0x001A /* Camera Control registers */ #define MT9M114_CAM_SENSOR_CFG_Y_ADDR_START 0xC800 #define MT9M114_CAM_SENSOR_CFG_X_ADDR_START 0xC802 #define MT9M114_CAM_SENSOR_CFG_Y_ADDR_END 0xC804 #define MT9M114_CAM_SENSOR_CFG_X_ADDR_END 0xC806 #define MT9M114_CAM_SENSOR_CFG_CPIPE_LAST_ROW 0xC818 #define MT9M114_CAM_CROP_WINDOW_WIDTH 0xC858 #define MT9M114_CAM_CROP_WINDOW_HEIGHT 0xC85A #define MT9M114_CAM_OUTPUT_WIDTH 0xC868 #define MT9M114_CAM_OUTPUT_HEIGHT 0xC86A #define MT9M114_CAM_OUTPUT_FORMAT 0xC86C #define MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XEND 0xC918 #define MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YEND 0xC91A #define MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XEND 0xC920 #define MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YEND 0xC922 /* System Manager registers */ #define MT9M114_SYSMGR_NEXT_STATE 0xDC00 /* System States */ #define MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE 0x28 #define MT9M114_SYS_STATE_START_STREAMING 0x34 #define MT9M114_SYS_STATE_ENTER_SUSPEND 0x40 /* Camera output format */ #define MT9M114_CAM_OUTPUT_FORMAT_FORMAT_YUV (0 << 8) #define MT9M114_CAM_OUTPUT_FORMAT_FORMAT_RGB (1 << 8) struct mt9m114_config { struct i2c_dt_spec i2c; }; struct mt9m114_data { struct video_format fmt; }; struct mt9m114_reg { uint16_t addr; uint16_t value_size; uint32_t value; }; struct mt9m114_resolution_config { uint16_t width; uint16_t height; struct mt9m114_reg *params; }; static struct mt9m114_reg mt9m114_init_config[] = { {0x098E, 2, 0x1000}, /* LOGICAL_ADDRESS_ACCESS */ {0xC97E, 1, 0x01}, /* CAM_SYSCTL_PLL_ENABLE */ {0xC980, 2, 0x0120}, /* CAM_SYSCTL_PLL_DIVIDER_M_N = 288 */ {0xC982, 2, 0x0700}, /* CAM_SYSCTL_PLL_DIVIDER_P = 1792 */ {0xC808, 4, 0x2DC6C00}, /* CAM_SENSOR_CFG_PIXCLK = 48 Mhz */ {0x316A, 2, 0x8270}, /* Auto txlo_row for hot pixel and linear full well optimization */ {0x316C, 2, 0x8270}, /* Auto txlo for hot pixel and linear full well optimization */ {0x3ED0, 2, 0x2305}, /* Eclipse setting, ecl range=1, ecl value=2, ivln=3 */ {0x3ED2, 2, 0x77CF}, /* TX_hi = 12 */ {0x316E, 2, 0x8202}, /* Auto ecl , threshold 2x, ecl=0 at high gain, ecl=2 for low gain */ {0x3180, 2, 0x87FF}, /* Enable delta dark */ {0x30D4, 2, 0x6080}, /* Disable column correction due to AE oscillation problem */ {0xA802, 2, 0x0008}, /* RESERVED_AE_TRACK_02 */ {0x3E14, 2, 0xFF39}, /* Enabling pixout clamping to VAA to solve column band issue */ {0xC80C, 2, 0x0001}, /* CAM_SENSOR_CFG_ROW_SPEED */ {0xC80E, 2, 0x00DB}, /* CAM_SENSOR_CFG_FINE_INTEG_TIME_MIN = 219 */ {0xC810, 2, 0x07C2}, /* CAM_SENSOR_CFG_FINE_INTEG_TIME_MAX = 1986 */ {0xC812, 2, 0x02FE}, /* CAM_SENSOR_CFG_FRAME_LENGTH_LINES = 766 */ {0xC814, 2, 0x0845}, /* CAM_SENSOR_CFG_LINE_LENGTH_PCK = 2117 */ {0xC816, 2, 0x0060}, /* CAM_SENSOR_CFG_FINE_CORRECTION = 96 */ {0xC826, 2, 0x0020}, /* CAM_SENSOR_CFG_REG_0_DATA = 32 */ {0xC834, 2, 0x0000}, /* CAM_SENSOR_CONTROL_READ_MODE */ {0xC854, 2, 0x0000}, /* CAM_CROP_WINDOW_XOFFSET */ {0xC856, 2, 0x0000}, /* CAM_CROP_WINDOW_YOFFSET */ {0xC85C, 1, 0x03}, /* CAM_CROP_CROPMODE */ {0xC878, 1, 0x00}, /* CAM_AET_AEMODE */ {0xC88C, 2, 0x1D9A}, /* CAM_AET_MAX_FRAME_RATE = 7578 */ {0xC88E, 2, 0x1D9A}, /* CAM_AET_MIN_FRAME_RATE = 7578 */ {0xC914, 2, 0x0000}, /* CAM_STAT_AWB_CLIP_WINDOW_XSTART */ {0xC916, 2, 0x0000}, /* CAM_STAT_AWB_CLIP_WINDOW_YSTART */ {0xC91C, 2, 0x0000}, /* CAM_STAT_AE_INITIAL_WINDOW_XSTART */ {0xC91E, 2, 0x0000}, /* CAM_STAT_AE_INITIAL_WINDOW_YSTART */ {0x001E, 2, 0x0777}, /* REG_PAD_SLEW */ {0xC86E, 2, 0x0038}, /* CAM_OUTPUT_FORMAT_YUV_CLIP for CSI */ {0xC984, 2, 0x8000}, /* CAM_PORT_OUTPUT_CONTROL, for MIPI CSI-2 interface : 0x8000 */ {/* NULL terminated */}}; static struct mt9m114_reg mt9m114_480_272[] = { {MT9M114_CAM_SENSOR_CFG_Y_ADDR_START, 2, 0x00D4}, /* 212 */ {MT9M114_CAM_SENSOR_CFG_X_ADDR_START, 2, 0x00A4}, /* 164 */ {MT9M114_CAM_SENSOR_CFG_Y_ADDR_END, 2, 0x02FB}, /* 763 */ {MT9M114_CAM_SENSOR_CFG_X_ADDR_END, 2, 0x046B}, /* 1131 */ {MT9M114_CAM_SENSOR_CFG_CPIPE_LAST_ROW, 2, 0x0223}, /* 547 */ {MT9M114_CAM_CROP_WINDOW_WIDTH, 2, 0x03C0}, /* 960 */ {MT9M114_CAM_CROP_WINDOW_HEIGHT, 2, 0x0220}, /* 544 */ {MT9M114_CAM_OUTPUT_WIDTH, 2, 0x01E0}, /* 480 */ {MT9M114_CAM_OUTPUT_HEIGHT, 2, 0x0110}, /* 272 */ {MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XEND, 2, 0x01DF}, /* 479 */ {MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YEND, 2, 0x010F}, /* 271 */ {MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XEND, 2, 0x005F}, /* 95 */ {MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YEND, 2, 0x0035}, /* 53 */ {/* NULL terminated */}}; static struct mt9m114_reg mt9m114_640_480[] = { {MT9M114_CAM_SENSOR_CFG_Y_ADDR_START, 2, 0x0000}, /* 0 */ {MT9M114_CAM_SENSOR_CFG_X_ADDR_START, 2, 0x0000}, /* 0 */ {MT9M114_CAM_SENSOR_CFG_Y_ADDR_END, 2, 0x03CD}, /* 973 */ {MT9M114_CAM_SENSOR_CFG_X_ADDR_END, 2, 0x050D}, /* 1293 */ {MT9M114_CAM_SENSOR_CFG_CPIPE_LAST_ROW, 2, 0x01E3}, /* 483 */ {MT9M114_CAM_CROP_WINDOW_WIDTH, 2, 0x0280}, /* 640 */ {MT9M114_CAM_CROP_WINDOW_HEIGHT, 2, 0x01E0}, /* 480 */ {MT9M114_CAM_OUTPUT_WIDTH, 2, 0x0280}, /* 640 */ {MT9M114_CAM_OUTPUT_HEIGHT, 2, 0x01E0}, /* 480 */ {MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XEND, 2, 0x027F}, /* 639 */ {MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YEND, 2, 0x01DF}, /* 479 */ {MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XEND, 2, 0x007F}, /* 127 */ {MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YEND, 2, 0x005F}, /* 95 */ {/* NULL terminated */}}; static struct mt9m114_reg mt9m114_1280_720[] = { {MT9M114_CAM_SENSOR_CFG_Y_ADDR_START, 2, 0x007C}, /* 124 */ {MT9M114_CAM_SENSOR_CFG_X_ADDR_START, 2, 0x0004}, /* 4 */ {MT9M114_CAM_SENSOR_CFG_Y_ADDR_END, 2, 0x0353}, /* 851 */ {MT9M114_CAM_SENSOR_CFG_X_ADDR_END, 2, 0x050B}, /* 1291 */ {MT9M114_CAM_SENSOR_CFG_CPIPE_LAST_ROW, 2, 0x02D3}, /* 723 */ {MT9M114_CAM_CROP_WINDOW_WIDTH, 2, 0x0500}, /* 1280 */ {MT9M114_CAM_CROP_WINDOW_HEIGHT, 2, 0x02D0}, /* 720 */ {MT9M114_CAM_OUTPUT_WIDTH, 2, 0x0500}, /* 1280 */ {MT9M114_CAM_OUTPUT_HEIGHT, 2, 0x02D0}, /* 720 */ {MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XEND, 2, 0x04FF}, /* 1279 */ {MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YEND, 2, 0x02CF}, /* 719 */ {MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XEND, 2, 0x00FF}, /* 255 */ {MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YEND, 2, 0x008F}, /* 143 */ {/* NULL terminated */}}; static struct mt9m114_resolution_config resolutionConfigs[] = { {.width = 480, .height = 272, .params = mt9m114_480_272}, {.width = 640, .height = 480, .params = mt9m114_640_480}, {.width = 1280, .height = 720, .params = mt9m114_1280_720}, }; #define MT9M114_VIDEO_FORMAT_CAP(width, height, format) \ { \ .pixelformat = (format), .width_min = (width), .width_max = (width), \ .height_min = (height), .height_max = (height), .width_step = 0, .height_step = 0 \ } static const struct video_format_cap fmts[] = { MT9M114_VIDEO_FORMAT_CAP(480, 272, VIDEO_PIX_FMT_RGB565), MT9M114_VIDEO_FORMAT_CAP(480, 272, VIDEO_PIX_FMT_YUYV), MT9M114_VIDEO_FORMAT_CAP(640, 480, VIDEO_PIX_FMT_RGB565), MT9M114_VIDEO_FORMAT_CAP(640, 480, VIDEO_PIX_FMT_YUYV), MT9M114_VIDEO_FORMAT_CAP(1280, 720, VIDEO_PIX_FMT_RGB565), MT9M114_VIDEO_FORMAT_CAP(1280, 720, VIDEO_PIX_FMT_YUYV), {0}}; static inline int i2c_burst_read16_dt(const struct i2c_dt_spec *spec, uint16_t start_addr, uint8_t *buf, uint32_t num_bytes) { uint8_t addr_buffer[2]; addr_buffer[1] = start_addr & 0xFF; addr_buffer[0] = start_addr >> 8; return i2c_write_read_dt(spec, addr_buffer, sizeof(addr_buffer), buf, num_bytes); } static inline int i2c_burst_write16_dt(const struct i2c_dt_spec *spec, uint16_t start_addr, const uint8_t *buf, uint32_t num_bytes) { uint8_t addr_buffer[2]; struct i2c_msg msg[2]; addr_buffer[1] = start_addr & 0xFF; addr_buffer[0] = start_addr >> 8; msg[0].buf = addr_buffer; msg[0].len = 2U; msg[0].flags = I2C_MSG_WRITE; msg[1].buf = (uint8_t *)buf; msg[1].len = num_bytes; msg[1].flags = I2C_MSG_WRITE | I2C_MSG_STOP; return i2c_transfer_dt(spec, msg, 2); } static int mt9m114_write_reg(const struct device *dev, uint16_t reg_addr, uint8_t reg_size, void *value) { const struct mt9m114_config *cfg = dev->config; switch (reg_size) { case 2: *(uint16_t *)value = sys_cpu_to_be16(*(uint16_t *)value); break; case 4: *(uint32_t *)value = sys_cpu_to_be32(*(uint32_t *)value); break; case 1: break; default: return -ENOTSUP; } return i2c_burst_write16_dt(&cfg->i2c, reg_addr, value, reg_size); } static int mt9m114_read_reg(const struct device *dev, uint16_t reg_addr, uint8_t reg_size, void *value) { const struct mt9m114_config *cfg = dev->config; int err; if (reg_size > 4) { return -ENOTSUP; } err = i2c_burst_read16_dt(&cfg->i2c, reg_addr, value, reg_size); if (err) { return err; } switch (reg_size) { case 2: *(uint16_t *)value = sys_be16_to_cpu(*(uint16_t *)value); break; case 4: *(uint32_t *)value = sys_be32_to_cpu(*(uint32_t *)value); break; case 1: break; default: return -ENOTSUP; } return 0; } static int mt9m114_modify_reg(const struct device *dev, const uint16_t addr, const uint8_t mask, const uint8_t val) { uint8_t oldVal; uint8_t newVal; int ret = mt9m114_read_reg(dev, addr, sizeof(oldVal), &oldVal); if (ret) { return ret; } newVal = (oldVal & ~mask) | (val & mask); return mt9m114_write_reg(dev, addr, sizeof(newVal), &newVal); } static int mt9m114_write_all(const struct device *dev, struct mt9m114_reg *reg) { int i = 0; while (reg[i].value_size) { int err; err = mt9m114_write_reg(dev, reg[i].addr, reg[i].value_size, &reg[i].value); if (err) { return err; } i++; } return 0; } static int mt9m114_software_reset(const struct device *dev) { int ret = mt9m114_modify_reg(dev, MT9M114_RST_AND_MISC_CONTROL, 0x01, 0x01); if (ret) { return ret; } k_sleep(K_MSEC(1)); ret = mt9m114_modify_reg(dev, MT9M114_RST_AND_MISC_CONTROL, 0x01, 0x00); if (ret) { return ret; } k_sleep(K_MSEC(45)); return 0; } static int mt9m114_set_state(const struct device *dev, uint8_t state) { uint16_t val; int err; /* Set next state. */ mt9m114_write_reg(dev, MT9M114_SYSMGR_NEXT_STATE, 1, &state); /* Check that the FW is ready to accept a new command. */ while (1) { err = mt9m114_read_reg(dev, MT9M114_COMMAND_REGISTER, 2, &val); if (err) { return err; } if (!(val & MT9M114_COMMAND_REGISTER_SET_STATE)) { break; } k_sleep(K_MSEC(1)); } /* Issue the Set State command. */ val = MT9M114_COMMAND_REGISTER_SET_STATE | MT9M114_COMMAND_REGISTER_OK; mt9m114_write_reg(dev, MT9M114_COMMAND_REGISTER, 2, &val); /* Wait for the FW to complete the command. */ while (1) { err = mt9m114_read_reg(dev, MT9M114_COMMAND_REGISTER, 2, &val); if (err) { return err; } if (!(val & MT9M114_COMMAND_REGISTER_SET_STATE)) { break; } k_sleep(K_MSEC(1)); } /* Check the 'OK' bit to see if the command was successful. */ err = mt9m114_read_reg(dev, MT9M114_COMMAND_REGISTER, 2, &val); if (err || !(val & MT9M114_COMMAND_REGISTER_OK)) { return -EIO; } return 0; } static int mt9m114_set_output_format(const struct device *dev, int pixel_format) { int ret = 0; uint16_t output_format; if (pixel_format == VIDEO_PIX_FMT_YUYV) { output_format = (MT9M114_CAM_OUTPUT_FORMAT_FORMAT_YUV | (1U << 1U)); } else if (pixel_format == VIDEO_PIX_FMT_RGB565) { output_format = (MT9M114_CAM_OUTPUT_FORMAT_FORMAT_RGB | (1U << 1U)); } ret = mt9m114_write_reg(dev, MT9M114_CAM_OUTPUT_FORMAT, sizeof(output_format), &output_format); return ret; } static int mt9m114_set_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { struct mt9m114_data *drv_data = dev->data; int ret; int i = 0; while (fmts[i].pixelformat) { if (fmt->pixelformat == fmts[i].pixelformat && fmt->width >= fmts[i].width_min && fmt->width <= fmts[i].width_max && fmt->height >= fmts[i].height_min && fmt->height <= fmts[i].height_max) { break; } i++; } if (i == (ARRAY_SIZE(fmts) - 1)) { LOG_ERR("Unsupported pixel format or resolution"); return -ENOTSUP; } if (!memcmp(&drv_data->fmt, fmt, sizeof(drv_data->fmt))) { /* nothing to do */ return 0; } drv_data->fmt = *fmt; /* Set output pixel format */ ret = mt9m114_set_output_format(dev, fmt->pixelformat); if (ret) { LOG_ERR("Unable to set pixel format"); return ret; } /* Set output resolution */ for (i = 0; i < ARRAY_SIZE(resolutionConfigs); i++) { if (fmt->width == resolutionConfigs[i].width && fmt->height == resolutionConfigs[i].height) { ret = mt9m114_write_all(dev, resolutionConfigs[i].params); if (ret) { LOG_ERR("Unable to set resolution"); return ret; } break; } } /* Apply Config */ return mt9m114_set_state(dev, MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE); } static int mt9m114_get_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { struct mt9m114_data *drv_data = dev->data; *fmt = drv_data->fmt; return 0; } static int mt9m114_stream_start(const struct device *dev) { return mt9m114_set_state(dev, MT9M114_SYS_STATE_START_STREAMING); } static int mt9m114_stream_stop(const struct device *dev) { return mt9m114_set_state(dev, MT9M114_SYS_STATE_ENTER_SUSPEND); } static int mt9m114_get_caps(const struct device *dev, enum video_endpoint_id ep, struct video_caps *caps) { caps->format_caps = fmts; return 0; } static const struct video_driver_api mt9m114_driver_api = { .set_format = mt9m114_set_fmt, .get_format = mt9m114_get_fmt, .get_caps = mt9m114_get_caps, .stream_start = mt9m114_stream_start, .stream_stop = mt9m114_stream_stop, }; static int mt9m114_init(const struct device *dev) { struct video_format fmt; uint16_t val; int ret; /* no power control, wait for camera ready */ k_sleep(K_MSEC(100)); ret = mt9m114_read_reg(dev, MT9M114_CHIP_ID, sizeof(val), &val); if (ret) { LOG_ERR("Unable to read chip ID"); return -ENODEV; } if (val != MT9M114_CHIP_ID_VAL) { LOG_ERR("Wrong ID: %04x (exp %04x)", val, MT9M114_CHIP_ID_VAL); return -ENODEV; } /* SW reset */ mt9m114_software_reset(dev); /* Init registers */ ret = mt9m114_write_all(dev, mt9m114_init_config); if (ret) { LOG_ERR("Unable to initialize mt9m114 config"); return ret; } /* Set default format to 480x272 RGB565 */ fmt.pixelformat = VIDEO_PIX_FMT_RGB565; fmt.width = 480; fmt.height = 272; fmt.pitch = fmt.width * 2; ret = mt9m114_set_fmt(dev, VIDEO_EP_OUT, &fmt); if (ret) { LOG_ERR("Unable to configure default format"); return -EIO; } /* Suspend any stream */ mt9m114_set_state(dev, MT9M114_SYS_STATE_ENTER_SUSPEND); return 0; } #if 1 /* Unique Instance */ static const struct mt9m114_config mt9m114_cfg_0 = { .i2c = I2C_DT_SPEC_INST_GET(0), }; static struct mt9m114_data mt9m114_data_0; static int mt9m114_init_0(const struct device *dev) { const struct mt9m114_config *cfg = dev->config; if (!device_is_ready(cfg->i2c.bus)) { LOG_ERR("Bus device is not ready"); return -ENODEV; } return mt9m114_init(dev); } DEVICE_DT_INST_DEFINE(0, &mt9m114_init_0, NULL, &mt9m114_data_0, &mt9m114_cfg_0, POST_KERNEL, CONFIG_VIDEO_INIT_PRIORITY, &mt9m114_driver_api); #endif ```
/content/code_sandbox/drivers/video/mt9m114.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,587
```unknown # MT9m114 config VIDEO_SW_GENERATOR bool "Video Software Generator" help Enable video pattern generator (for testing purposes). ```
/content/code_sandbox/drivers/video/Kconfig.sw_generator
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
31
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/drivers/video.h> K_HEAP_DEFINE(video_buffer_pool, CONFIG_VIDEO_BUFFER_POOL_SZ_MAX * CONFIG_VIDEO_BUFFER_POOL_NUM_MAX); static struct video_buffer video_buf[CONFIG_VIDEO_BUFFER_POOL_NUM_MAX]; struct mem_block { void *data; }; static struct mem_block video_block[CONFIG_VIDEO_BUFFER_POOL_NUM_MAX]; struct video_buffer *video_buffer_aligned_alloc(size_t size, size_t align) { struct video_buffer *vbuf = NULL; struct mem_block *block; int i; /* find available video buffer */ for (i = 0; i < ARRAY_SIZE(video_buf); i++) { if (video_buf[i].buffer == NULL) { vbuf = &video_buf[i]; block = &video_block[i]; break; } } if (vbuf == NULL) { return NULL; } /* Alloc buffer memory */ block->data = k_heap_aligned_alloc(&video_buffer_pool, align, size, K_FOREVER); if (block->data == NULL) { return NULL; } vbuf->buffer = block->data; vbuf->size = size; vbuf->bytesused = 0; return vbuf; } struct video_buffer *video_buffer_alloc(size_t size) { return video_buffer_aligned_alloc(size, sizeof(void *)); } void video_buffer_release(struct video_buffer *vbuf) { struct mem_block *block = NULL; int i; /* vbuf to block */ for (i = 0; i < ARRAY_SIZE(video_block); i++) { if (video_block[i].data == vbuf->buffer) { block = &video_block[i]; break; } } vbuf->buffer = NULL; if (block) { k_heap_free(&video_buffer_pool, block->data); } } ```
/content/code_sandbox/drivers/video/video_common.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
392
```unknown # OV7725 config VIDEO_OV7725 bool "OV7725 CMOS digital image sensor" select I2C depends on DT_HAS_OVTI_OV7725_ENABLED default y help Enable driver for OV7725 CMOS digital image sensor device. ```
/content/code_sandbox/drivers/video/Kconfig.ov7725
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
62
```c /* * */ #define DT_DRV_COMPAT ovti_ov2640 #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/video.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/gpio.h> #define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ov2640); /* DSP register bank FF=0x00*/ #define QS 0x44 #define HSIZE 0x51 #define VSIZE 0x52 #define XOFFL 0x53 #define YOFFL 0x54 #define VHYX 0x55 #define TEST 0x57 #define ZMOW 0x5A #define ZMOH 0x5B #define ZMHH 0x5C #define BPADDR 0x7C #define BPDATA 0x7D #define SIZEL 0x8C #define HSIZE8 0xC0 #define VSIZE8 0xC1 #define CTRL1 0xC3 #define CTRLI 0x50 #define CTRLI_LP_DP 0x80 #define CTRL0 0xC2 #define CTRL0_YUV422 0x08 #define CTRL0_YUV_EN 0x04 #define CTRL0_RGB_EN 0x02 #define CTRL2 0x86 #define CTRL2_DCW_EN 0x20 #define CTRL2_SDE_EN 0x10 #define CTRL2_UV_ADJ_EN 0x08 #define CTRL2_UV_AVG_EN 0x04 #define CTRL2_CMX_EN 0x01 #define CTRL3 0x87 #define CTRL3_BPC_EN 0x80 #define CTRL3_WPC_EN 0x40 #define R_DVP_SP 0xD3 #define R_DVP_SP_AUTO_MODE 0x80 #define R_BYPASS 0x05 #define R_BYPASS_DSP_EN 0x00 #define R_BYPASS_DSP_BYPAS 0x01 #define IMAGE_MODE 0xDA #define IMAGE_MODE_JPEG_EN 0x10 #define IMAGE_MODE_RGB565 0x08 #define RESET 0xE0 #define RESET_JPEG 0x10 #define RESET_DVP 0x04 #define MC_BIST 0xF9 #define MC_BIST_RESET 0x80 #define MC_BIST_BOOT_ROM_SEL 0x40 #define BANK_SEL 0xFF #define BANK_SEL_DSP 0x00 #define BANK_SEL_SENSOR 0x01 /* Sensor register bank FF=0x01*/ #define COM1 0x03 #define REG_PID 0x0A #define REG_PID_VAL 0x26 #define REG_VER 0x0B #define REG_VER_VAL 0x42 #define AEC 0x10 #define CLKRC 0x11 #define COM10 0x15 #define HSTART 0x17 #define HSTOP 0x18 #define VSTART 0x19 #define VSTOP 0x1A #define AEW 0x24 #define AEB 0x25 #define ARCOM2 0x34 #define FLL 0x46 #define FLH 0x47 #define COM19 0x48 #define ZOOMS 0x49 #define BD50 0x4F #define BD60 0x50 #define REG5D 0x5D #define REG5E 0x5E #define REG5F 0x5F #define REG60 0x60 #define HISTO_LOW 0x61 #define HISTO_HIGH 0x62 #define REG04 0x04 #define REG04_DEFAULT 0x28 #define REG04_HFLIP_IMG 0x80 #define REG04_VFLIP_IMG 0x40 #define REG04_VREF_EN 0x10 #define REG04_HREF_EN 0x08 #define REG04_SET(x) (REG04_DEFAULT | x) #define COM2 0x09 #define COM2_OUT_DRIVE_3x 0x02 #define COM3 0x0C #define COM3_DEFAULT 0x38 #define COM3_BAND_AUTO 0x02 #define COM3_BAND_SET(x) (COM3_DEFAULT | x) #define COM7 0x12 #define COM7_SRST 0x80 #define COM7_RES_UXGA 0x00 /* UXGA */ #define COM7_ZOOM_EN 0x04 /* Enable Zoom */ #define COM7_COLOR_BAR 0x02 /* Enable Color Bar Test */ #define COM8 0x13 #define COM8_DEFAULT 0xC0 #define COM8_BNDF_EN 0x20 /* Enable Banding filter */ #define COM8_AGC_EN 0x04 /* AGC Auto/Manual control selection */ #define COM8_AEC_EN 0x01 /* Auto/Manual Exposure control */ #define COM8_SET(x) (COM8_DEFAULT | x) #define COM9 0x14 /* AGC gain ceiling */ #define COM9_DEFAULT 0x08 #define COM9_AGC_GAIN_8x 0x02 /* AGC: 8x */ #define COM9_AGC_SET(x) (COM9_DEFAULT | (x << 5)) #define COM10 0x15 #define CTRL1_AWB 0x08 /* Enable AWB */ #define VV 0x26 #define VV_AGC_TH_SET(h, l) ((h << 4) | (l & 0x0F)) #define REG32 0x32 #define REG32_UXGA 0x36 /* Configuration arrays */ #define SVGA_HSIZE (800) #define SVGA_VSIZE (600) #define UXGA_HSIZE (1600) #define UXGA_VSIZE (1200) struct ov2640_reg { uint8_t addr; uint8_t value; }; static const struct ov2640_reg default_regs[] = { { BANK_SEL, BANK_SEL_DSP }, { 0x2c, 0xff }, { 0x2e, 0xdf }, { BANK_SEL, BANK_SEL_SENSOR }, { 0x3c, 0x32 }, { CLKRC, 0x80 }, /* Set PCLK divider */ { COM2, COM2_OUT_DRIVE_3x }, /* Output drive x2 */ { REG04, REG04_SET(REG04_HREF_EN)}, { COM8, COM8_SET(COM8_BNDF_EN | COM8_AGC_EN | COM8_AEC_EN) }, { COM9, COM9_AGC_SET(COM9_AGC_GAIN_8x)}, { COM10, 0x00 }, /* Invert VSYNC */ { 0x2c, 0x0c }, { 0x33, 0x78 }, { 0x3a, 0x33 }, { 0x3b, 0xfb }, { 0x3e, 0x00 }, { 0x43, 0x11 }, { 0x16, 0x10 }, { 0x39, 0x02 }, { 0x35, 0x88 }, { 0x22, 0x0a }, { 0x37, 0x40 }, { 0x23, 0x00 }, { ARCOM2, 0xa0 }, { 0x06, 0x02 }, { 0x06, 0x88 }, { 0x07, 0xc0 }, { 0x0d, 0xb7 }, { 0x0e, 0x01 }, { 0x4c, 0x00 }, { 0x4a, 0x81 }, { 0x21, 0x99 }, { AEW, 0x40 }, { AEB, 0x38 }, /* AGC/AEC fast mode operating region */ { VV, VV_AGC_TH_SET(0x08, 0x02) }, { COM19, 0x00 }, /* Zoom control 2 LSBs */ { ZOOMS, 0x00 }, /* Zoom control 8 MSBs */ { 0x5c, 0x00 }, { 0x63, 0x00 }, { FLL, 0x00 }, { FLH, 0x00 }, /* Set banding filter */ { COM3, COM3_BAND_SET(COM3_BAND_AUTO) }, { REG5D, 0x55 }, { REG5E, 0x7d }, { REG5F, 0x7d }, { REG60, 0x55 }, { HISTO_LOW, 0x70 }, { HISTO_HIGH, 0x80 }, { 0x7c, 0x05 }, { 0x20, 0x80 }, { 0x28, 0x30 }, { 0x6c, 0x00 }, { 0x6d, 0x80 }, { 0x6e, 0x00 }, { 0x70, 0x02 }, { 0x71, 0x94 }, { 0x73, 0xc1 }, { 0x3d, 0x34 }, /* { COM7, COM7_RES_UXGA | COM7_ZOOM_EN }, */ { 0x5a, 0x57 }, { BD50, 0xbb }, { BD60, 0x9c }, { BANK_SEL, BANK_SEL_DSP }, { 0xe5, 0x7f }, { MC_BIST, MC_BIST_RESET | MC_BIST_BOOT_ROM_SEL }, { 0x41, 0x24 }, { RESET, RESET_JPEG | RESET_DVP }, { 0x76, 0xff }, { 0x33, 0xa0 }, { 0x42, 0x20 }, { 0x43, 0x18 }, { 0x4c, 0x00 }, { CTRL3, CTRL3_BPC_EN | CTRL3_WPC_EN | 0x10 }, { 0x88, 0x3f }, { 0xd7, 0x03 }, { 0xd9, 0x10 }, { R_DVP_SP, R_DVP_SP_AUTO_MODE | 0x2 }, { 0xc8, 0x08 }, { 0xc9, 0x80 }, { BPADDR, 0x00 }, { BPDATA, 0x00 }, { BPADDR, 0x03 }, { BPDATA, 0x48 }, { BPDATA, 0x48 }, { BPADDR, 0x08 }, { BPDATA, 0x20 }, { BPDATA, 0x10 }, { BPDATA, 0x0e }, { 0x90, 0x00 }, { 0x91, 0x0e }, { 0x91, 0x1a }, { 0x91, 0x31 }, { 0x91, 0x5a }, { 0x91, 0x69 }, { 0x91, 0x75 }, { 0x91, 0x7e }, { 0x91, 0x88 }, { 0x91, 0x8f }, { 0x91, 0x96 }, { 0x91, 0xa3 }, { 0x91, 0xaf }, { 0x91, 0xc4 }, { 0x91, 0xd7 }, { 0x91, 0xe8 }, { 0x91, 0x20 }, { 0x92, 0x00 }, { 0x93, 0x06 }, { 0x93, 0xe3 }, { 0x93, 0x03 }, { 0x93, 0x03 }, { 0x93, 0x00 }, { 0x93, 0x02 }, { 0x93, 0x00 }, { 0x93, 0x00 }, { 0x93, 0x00 }, { 0x93, 0x00 }, { 0x93, 0x00 }, { 0x93, 0x00 }, { 0x93, 0x00 }, { 0x96, 0x00 }, { 0x97, 0x08 }, { 0x97, 0x19 }, { 0x97, 0x02 }, { 0x97, 0x0c }, { 0x97, 0x24 }, { 0x97, 0x30 }, { 0x97, 0x28 }, { 0x97, 0x26 }, { 0x97, 0x02 }, { 0x97, 0x98 }, { 0x97, 0x80 }, { 0x97, 0x00 }, { 0x97, 0x00 }, { 0xa4, 0x00 }, { 0xa8, 0x00 }, { 0xc5, 0x11 }, { 0xc6, 0x51 }, { 0xbf, 0x80 }, { 0xc7, 0x10 }, { 0xb6, 0x66 }, { 0xb8, 0xA5 }, { 0xb7, 0x64 }, { 0xb9, 0x7C }, { 0xb3, 0xaf }, { 0xb4, 0x97 }, { 0xb5, 0xFF }, { 0xb0, 0xC5 }, { 0xb1, 0x94 }, { 0xb2, 0x0f }, { 0xc4, 0x5c }, { 0xa6, 0x00 }, { 0xa7, 0x20 }, { 0xa7, 0xd8 }, { 0xa7, 0x1b }, { 0xa7, 0x31 }, { 0xa7, 0x00 }, { 0xa7, 0x18 }, { 0xa7, 0x20 }, { 0xa7, 0xd8 }, { 0xa7, 0x19 }, { 0xa7, 0x31 }, { 0xa7, 0x00 }, { 0xa7, 0x18 }, { 0xa7, 0x20 }, { 0xa7, 0xd8 }, { 0xa7, 0x19 }, { 0xa7, 0x31 }, { 0xa7, 0x00 }, { 0xa7, 0x18 }, { 0x7f, 0x00 }, { 0xe5, 0x1f }, { 0xe1, 0x77 }, { 0xdd, 0x7f }, { CTRL0, CTRL0_YUV422 | CTRL0_YUV_EN | CTRL0_RGB_EN }, { 0x00, 0x00 } }; static const struct ov2640_reg uxga_regs[] = { { BANK_SEL, BANK_SEL_SENSOR }, /* DSP input image resolution and window size control */ { COM7, COM7_RES_UXGA}, { COM1, 0x0F }, /* UXGA=0x0F, SVGA=0x0A, CIF=0x06 */ { REG32, REG32_UXGA }, /* UXGA=0x36, SVGA/CIF=0x09 */ { HSTART, 0x11 }, /* UXGA=0x11, SVGA/CIF=0x11 */ { HSTOP, 0x75 }, /* UXGA=0x75, SVGA/CIF=0x43 */ { VSTART, 0x01 }, /* UXGA=0x01, SVGA/CIF=0x00 */ { VSTOP, 0x97 }, /* UXGA=0x97, SVGA/CIF=0x4b */ { 0x3d, 0x34 }, /* UXGA=0x34, SVGA/CIF=0x38 */ { 0x35, 0x88 }, { 0x22, 0x0a }, { 0x37, 0x40 }, { 0x34, 0xa0 }, { 0x06, 0x02 }, { 0x0d, 0xb7 }, { 0x0e, 0x01 }, { 0x42, 0x83 }, /* * Set DSP input image size and offset. * The sensor output image can be scaled with OUTW/OUTH */ { BANK_SEL, BANK_SEL_DSP }, { R_BYPASS, R_BYPASS_DSP_BYPAS }, { RESET, RESET_DVP }, { HSIZE8, (UXGA_HSIZE>>3)}, /* Image Horizontal Size HSIZE[10:3] */ { VSIZE8, (UXGA_VSIZE>>3)}, /* Image Vertical Size VSIZE[10:3] */ /* {HSIZE[11], HSIZE[2:0], VSIZE[2:0]} */ { SIZEL, ((UXGA_HSIZE>>6)&0x40) | ((UXGA_HSIZE&0x7)<<3) | (UXGA_VSIZE&0x7)}, { XOFFL, 0x00 }, /* OFFSET_X[7:0] */ { YOFFL, 0x00 }, /* OFFSET_Y[7:0] */ { HSIZE, ((UXGA_HSIZE>>2)&0xFF) }, /* H_SIZE[7:0] real/4 */ { VSIZE, ((UXGA_VSIZE>>2)&0xFF) }, /* V_SIZE[7:0] real/4 */ /* V_SIZE[8]/OFFSET_Y[10:8]/H_SIZE[8]/OFFSET_X[10:8] */ { VHYX, ((UXGA_VSIZE>>3)&0x80) | ((UXGA_HSIZE>>7)&0x08) }, { TEST, (UXGA_HSIZE>>4)&0x80}, /* H_SIZE[9] */ { CTRL2, CTRL2_DCW_EN | CTRL2_SDE_EN | CTRL2_UV_AVG_EN | CTRL2_CMX_EN | CTRL2_UV_ADJ_EN }, /* H_DIVIDER/V_DIVIDER */ { CTRLI, CTRLI_LP_DP | 0x00}, /* DVP prescaler */ { R_DVP_SP, R_DVP_SP_AUTO_MODE | 0x04}, { R_BYPASS, R_BYPASS_DSP_EN }, { RESET, 0x00 }, {0, 0}, }; #define NUM_BRIGHTNESS_LEVELS (5) static const uint8_t brightness_regs[NUM_BRIGHTNESS_LEVELS + 1][5] = { { BPADDR, BPDATA, BPADDR, BPDATA, BPDATA }, { 0x00, 0x04, 0x09, 0x00, 0x00 }, /* -2 */ { 0x00, 0x04, 0x09, 0x10, 0x00 }, /* -1 */ { 0x00, 0x04, 0x09, 0x20, 0x00 }, /* 0 */ { 0x00, 0x04, 0x09, 0x30, 0x00 }, /* +1 */ { 0x00, 0x04, 0x09, 0x40, 0x00 }, /* +2 */ }; #define NUM_CONTRAST_LEVELS (5) static const uint8_t contrast_regs[NUM_CONTRAST_LEVELS + 1][7] = { { BPADDR, BPDATA, BPADDR, BPDATA, BPDATA, BPDATA, BPDATA }, { 0x00, 0x04, 0x07, 0x20, 0x18, 0x34, 0x06 }, /* -2 */ { 0x00, 0x04, 0x07, 0x20, 0x1c, 0x2a, 0x06 }, /* -1 */ { 0x00, 0x04, 0x07, 0x20, 0x20, 0x20, 0x06 }, /* 0 */ { 0x00, 0x04, 0x07, 0x20, 0x24, 0x16, 0x06 }, /* +1 */ { 0x00, 0x04, 0x07, 0x20, 0x28, 0x0c, 0x06 }, /* +2 */ }; #define NUM_SATURATION_LEVELS (5) static const uint8_t saturation_regs[NUM_SATURATION_LEVELS + 1][5] = { { BPADDR, BPDATA, BPADDR, BPDATA, BPDATA }, { 0x00, 0x02, 0x03, 0x28, 0x28 }, /* -2 */ { 0x00, 0x02, 0x03, 0x38, 0x38 }, /* -1 */ { 0x00, 0x02, 0x03, 0x48, 0x48 }, /* 0 */ { 0x00, 0x02, 0x03, 0x58, 0x58 }, /* +1 */ { 0x00, 0x02, 0x03, 0x58, 0x58 }, /* +2 */ }; struct ov2640_config { struct i2c_dt_spec i2c; #if DT_INST_NODE_HAS_PROP(0, reset_gpios) struct gpio_dt_spec reset_gpio; #endif uint8_t clock_rate_control; }; struct ov2640_data { struct video_format fmt; }; #define OV2640_VIDEO_FORMAT_CAP(width, height, format) \ { \ .pixelformat = (format), \ .width_min = (width), \ .width_max = (width), \ .height_min = (height), \ .height_max = (height), \ .width_step = 0, \ .height_step = 0 \ } static const struct video_format_cap fmts[] = { OV2640_VIDEO_FORMAT_CAP(160, 120, VIDEO_PIX_FMT_RGB565), /* QQVGA */ OV2640_VIDEO_FORMAT_CAP(176, 144, VIDEO_PIX_FMT_RGB565), /* QCIF */ OV2640_VIDEO_FORMAT_CAP(240, 160, VIDEO_PIX_FMT_RGB565), /* HQVGA */ OV2640_VIDEO_FORMAT_CAP(320, 240, VIDEO_PIX_FMT_RGB565), /* QVGA */ OV2640_VIDEO_FORMAT_CAP(352, 288, VIDEO_PIX_FMT_RGB565), /* CIF */ OV2640_VIDEO_FORMAT_CAP(640, 480, VIDEO_PIX_FMT_RGB565), /* VGA */ OV2640_VIDEO_FORMAT_CAP(800, 600, VIDEO_PIX_FMT_RGB565), /* SVGA */ OV2640_VIDEO_FORMAT_CAP(1024, 768, VIDEO_PIX_FMT_RGB565), /* XVGA */ OV2640_VIDEO_FORMAT_CAP(1280, 1024, VIDEO_PIX_FMT_RGB565), /* SXGA */ OV2640_VIDEO_FORMAT_CAP(1600, 1200, VIDEO_PIX_FMT_RGB565), /* UXGA */ OV2640_VIDEO_FORMAT_CAP(160, 120, VIDEO_PIX_FMT_JPEG), /* QQVGA */ OV2640_VIDEO_FORMAT_CAP(176, 144, VIDEO_PIX_FMT_JPEG), /* QCIF */ OV2640_VIDEO_FORMAT_CAP(240, 160, VIDEO_PIX_FMT_JPEG), /* HQVGA */ OV2640_VIDEO_FORMAT_CAP(320, 240, VIDEO_PIX_FMT_JPEG), /* QVGA */ OV2640_VIDEO_FORMAT_CAP(352, 288, VIDEO_PIX_FMT_JPEG), /* CIF */ OV2640_VIDEO_FORMAT_CAP(640, 480, VIDEO_PIX_FMT_JPEG), /* VGA */ OV2640_VIDEO_FORMAT_CAP(800, 600, VIDEO_PIX_FMT_JPEG), /* SVGA */ OV2640_VIDEO_FORMAT_CAP(1024, 768, VIDEO_PIX_FMT_JPEG), /* XVGA */ OV2640_VIDEO_FORMAT_CAP(1280, 1024, VIDEO_PIX_FMT_JPEG), /* SXGA */ OV2640_VIDEO_FORMAT_CAP(1600, 1200, VIDEO_PIX_FMT_JPEG), /* UXGA */ { 0 } }; static int ov2640_write_reg(const struct i2c_dt_spec *spec, uint8_t reg_addr, uint8_t value) { uint8_t tries = 3; /** * It rarely happens that the camera does not respond with ACK signal. * In that case it usually responds on 2nd try but there is a 3rd one * just to be sure that the connection error is not caused by driver * itself. */ while (tries--) { if (!i2c_reg_write_byte_dt(spec, reg_addr, value)) { return 0; } /* If writing failed wait 5ms before next attempt */ k_msleep(5); } LOG_ERR("failed to write 0x%x to 0x%x", value, reg_addr); return -1; } static int ov2640_read_reg(const struct i2c_dt_spec *spec, uint8_t reg_addr) { uint8_t tries = 3; uint8_t value; /** * It rarely happens that the camera does not respond with ACK signal. * In that case it usually responds on 2nd try but there is a 3rd one * just to be sure that the connection error is not caused by driver * itself. */ while (tries--) { if (!i2c_reg_read_byte_dt(spec, reg_addr, &value)) { return value; } /* If reading failed wait 5ms before next attempt */ k_msleep(5); } LOG_ERR("failed to read 0x%x register", reg_addr); return -1; } static int ov2640_write_all(const struct device *dev, const struct ov2640_reg *regs, uint16_t reg_num) { uint16_t i = 0; const struct ov2640_config *cfg = dev->config; for (i = 0; i < reg_num; i++) { int err; err = ov2640_write_reg(&cfg->i2c, regs[i].addr, regs[i].value); if (err) { return err; } } return 0; } static int ov2640_soft_reset(const struct device *dev) { int ret = 0; const struct ov2640_config *cfg = dev->config; /* Switch to DSP register bank */ ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_SENSOR); /* Initiate system reset */ ret |= ov2640_write_reg(&cfg->i2c, COM7, COM7_SRST); return ret; } static int ov2640_set_level(const struct device *dev, int level, int max_level, int cols, const uint8_t regs[][cols]) { int ret = 0; const struct ov2640_config *cfg = dev->config; level += (max_level / 2 + 1); if (level < 0 || level > max_level) { return -ENOTSUP; } /* Switch to DSP register bank */ ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_DSP); for (int i = 0; i < (ARRAY_SIZE(regs[0]) / sizeof(regs[0][0])); i++) { ret |= ov2640_write_reg(&cfg->i2c, regs[0][i], regs[level][i]); } return ret; } static int ov2640_set_brightness(const struct device *dev, int level) { int ret = 0; ret = ov2640_set_level(dev, level, NUM_BRIGHTNESS_LEVELS, ARRAY_SIZE(brightness_regs[0]), brightness_regs); if (ret == -ENOTSUP) { LOG_ERR("Brightness level %d not supported", level); } return ret; } static int ov2640_set_saturation(const struct device *dev, int level) { int ret = 0; ret = ov2640_set_level(dev, level, NUM_SATURATION_LEVELS, ARRAY_SIZE(saturation_regs[0]), saturation_regs); if (ret == -ENOTSUP) { LOG_ERR("Saturation level %d not supported", level); } return ret; } static int ov2640_set_contrast(const struct device *dev, int level) { int ret = 0; ret = ov2640_set_level(dev, level, NUM_CONTRAST_LEVELS, ARRAY_SIZE(contrast_regs[0]), contrast_regs); if (ret == -ENOTSUP) { LOG_ERR("Contrast level %d not supported", level); } return ret; } static int ov2640_set_output_format(const struct device *dev, int output_format) { int ret = 0; const struct ov2640_config *cfg = dev->config; /* Switch to DSP register bank */ ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_DSP); if (output_format == VIDEO_PIX_FMT_JPEG) { /* Enable JPEG compression */ ret |= ov2640_write_reg(&cfg->i2c, IMAGE_MODE, IMAGE_MODE_JPEG_EN); } else if (output_format == VIDEO_PIX_FMT_RGB565) { /* Disable JPEG compression and set output to RGB565 */ ret |= ov2640_write_reg(&cfg->i2c, IMAGE_MODE, IMAGE_MODE_RGB565); } else { LOG_ERR("Image format not supported"); return -ENOTSUP; } k_msleep(30); return ret; } static int ov2640_set_quality(const struct device *dev, int qs) { int ret = 0; const struct ov2640_config *cfg = dev->config; /* Switch to DSP register bank */ ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_DSP); /* Write QS register */ ret |= ov2640_write_reg(&cfg->i2c, QS, qs); return ret; } static int ov2640_set_colorbar(const struct device *dev, uint8_t enable) { int ret = 0; const struct ov2640_config *cfg = dev->config; uint8_t reg; /* Switch to SENSOR register bank */ ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_SENSOR); /* Update COM7 to enable/disable color bar test pattern */ reg = ov2640_read_reg(&cfg->i2c, COM7); if (enable) { reg |= COM7_COLOR_BAR; } else { reg &= ~COM7_COLOR_BAR; } ret |= ov2640_write_reg(&cfg->i2c, COM7, reg); return ret; } static int ov2640_set_white_bal(const struct device *dev, int enable) { int ret = 0; const struct ov2640_config *cfg = dev->config; uint8_t reg; /* Switch to SENSOR register bank */ ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_SENSOR); /* Update CTRL1 to enable/disable automatic white balance*/ reg = ov2640_read_reg(&cfg->i2c, CTRL1); if (enable) { reg |= CTRL1_AWB; } else { reg &= ~CTRL1_AWB; } ret |= ov2640_write_reg(&cfg->i2c, CTRL1, reg); return ret; } static int ov2640_set_gain_ctrl(const struct device *dev, int enable) { int ret = 0; const struct ov2640_config *cfg = dev->config; uint8_t reg; /* Switch to SENSOR register bank */ ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_SENSOR); /* Update COM8 to enable/disable automatic gain control */ reg = ov2640_read_reg(&cfg->i2c, COM8); if (enable) { reg |= COM8_AGC_EN; } else { reg &= ~COM8_AGC_EN; } ret |= ov2640_write_reg(&cfg->i2c, COM8, reg); return ret; } static int ov2640_set_exposure_ctrl(const struct device *dev, int enable) { int ret = 0; const struct ov2640_config *cfg = dev->config; uint8_t reg; /* Switch to SENSOR register bank */ ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_SENSOR); /* Update COM8 to enable/disable automatic exposure control */ reg = ov2640_read_reg(&cfg->i2c, COM8); if (enable) { reg |= COM8_AEC_EN; } else { reg &= ~COM8_AEC_EN; } ret |= ov2640_write_reg(&cfg->i2c, COM8, reg); return ret; } static int ov2640_set_horizontal_mirror(const struct device *dev, int enable) { int ret = 0; const struct ov2640_config *cfg = dev->config; uint8_t reg; /* Switch to SENSOR register bank */ ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_SENSOR); /* Update REG04 to enable/disable horizontal mirror */ reg = ov2640_read_reg(&cfg->i2c, REG04); if (enable) { reg |= REG04_HFLIP_IMG; } else { reg &= ~REG04_HFLIP_IMG; } ret |= ov2640_write_reg(&cfg->i2c, REG04, reg); return ret; } static int ov2640_set_vertical_flip(const struct device *dev, int enable) { int ret = 0; const struct ov2640_config *cfg = dev->config; uint8_t reg; /* Switch to SENSOR register bank */ ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_SENSOR); /* Update REG04 to enable/disable vertical flip */ reg = ov2640_read_reg(&cfg->i2c, REG04); if (enable) { reg |= REG04_VFLIP_IMG | REG04_VREF_EN; } else { reg &= ~(REG04_VFLIP_IMG | REG04_VREF_EN); } ret |= ov2640_write_reg(&cfg->i2c, REG04, reg); return ret; } static int ov2640_set_resolution(const struct device *dev, uint16_t img_width, uint16_t img_height) { int ret = 0; const struct ov2640_config *cfg = dev->config; uint16_t w = img_width; uint16_t h = img_height; /* Disable DSP */ ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_DSP); ret |= ov2640_write_reg(&cfg->i2c, R_BYPASS, R_BYPASS_DSP_BYPAS); /* Write output width */ ret |= ov2640_write_reg(&cfg->i2c, ZMOW, (w >> 2) & 0xFF); /* OUTW[7:0] (real/4) */ ret |= ov2640_write_reg(&cfg->i2c, ZMOH, (h >> 2) & 0xFF); /* OUTH[7:0] (real/4) */ ret |= ov2640_write_reg(&cfg->i2c, ZMHH, ((h >> 8) & 0x04) | ((w>>10) & 0x03)); /* OUTH[8]/OUTW[9:8] */ /* Set CLKRC */ ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_SENSOR); ret |= ov2640_write_reg(&cfg->i2c, CLKRC, cfg->clock_rate_control); /* Write DSP input registers */ ov2640_write_all(dev, uxga_regs, ARRAY_SIZE(uxga_regs)); /* Enable DSP */ ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_DSP); ret |= ov2640_write_reg(&cfg->i2c, R_BYPASS, R_BYPASS_DSP_EN); k_msleep(30); return ret; } uint8_t ov2640_check_connection(const struct device *dev) { int ret = 0; const struct ov2640_config *cfg = dev->config; uint8_t reg_pid_val, reg_ver_val; ret |= ov2640_write_reg(&cfg->i2c, BANK_SEL, BANK_SEL_SENSOR); reg_pid_val = ov2640_read_reg(&cfg->i2c, REG_PID); reg_ver_val = ov2640_read_reg(&cfg->i2c, REG_VER); if (REG_PID_VAL != reg_pid_val || REG_VER_VAL != reg_ver_val) { LOG_ERR("OV2640 not detected\n"); return -ENODEV; } return ret; } static int ov2640_set_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { struct ov2640_data *drv_data = dev->data; uint16_t width, height; int ret = 0; int i = 0; /* We only support RGB565 and JPEG pixel formats */ if (fmt->pixelformat != VIDEO_PIX_FMT_RGB565 && fmt->pixelformat != VIDEO_PIX_FMT_JPEG) { LOG_ERR("ov2640 camera supports only RGB565 and JPG pixelformats!"); return -ENOTSUP; } width = fmt->width; height = fmt->height; if (!memcmp(&drv_data->fmt, fmt, sizeof(drv_data->fmt))) { /* nothing to do */ return 0; } drv_data->fmt = *fmt; /* Set output format */ ret |= ov2640_set_output_format(dev, fmt->pixelformat); /* Check if camera is capable of handling given format */ while (fmts[i].pixelformat) { if (fmts[i].width_min == width && fmts[i].height_min == height && fmts[i].pixelformat == fmt->pixelformat) { /* Set window size */ ret |= ov2640_set_resolution(dev, fmt->width, fmt->height); return ret; } i++; } /* Camera is not capable of handling given format */ LOG_ERR("Image format not supported\n"); return -ENOTSUP; } static int ov2640_get_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { struct ov2640_data *drv_data = dev->data; *fmt = drv_data->fmt; return 0; } static int ov2640_stream_start(const struct device *dev) { return 0; } static int ov2640_stream_stop(const struct device *dev) { return 0; } static int ov2640_get_caps(const struct device *dev, enum video_endpoint_id ep, struct video_caps *caps) { caps->format_caps = fmts; return 0; } static int ov2640_set_ctrl(const struct device *dev, unsigned int cid, void *value) { int ret = 0; switch (cid) { case VIDEO_CID_HFLIP: ret |= ov2640_set_horizontal_mirror(dev, (int)value); break; case VIDEO_CID_VFLIP: ret |= ov2640_set_vertical_flip(dev, (int)value); break; case VIDEO_CID_CAMERA_EXPOSURE: ret |= ov2640_set_exposure_ctrl(dev, (int)value); break; case VIDEO_CID_CAMERA_GAIN: ret |= ov2640_set_gain_ctrl(dev, (int)value); break; case VIDEO_CID_CAMERA_BRIGHTNESS: ret |= ov2640_set_brightness(dev, (int)value); break; case VIDEO_CID_CAMERA_SATURATION: ret |= ov2640_set_saturation(dev, (int)value); break; case VIDEO_CID_CAMERA_WHITE_BAL: ret |= ov2640_set_white_bal(dev, (int)value); break; case VIDEO_CID_CAMERA_CONTRAST: ret |= ov2640_set_contrast(dev, (int)value); break; case VIDEO_CID_CAMERA_COLORBAR: ret |= ov2640_set_colorbar(dev, (int)value); break; case VIDEO_CID_CAMERA_QUALITY: ret |= ov2640_set_quality(dev, (int)value); break; default: return -ENOTSUP; } return ret; } static const struct video_driver_api ov2640_driver_api = { .set_format = ov2640_set_fmt, .get_format = ov2640_get_fmt, .get_caps = ov2640_get_caps, .stream_start = ov2640_stream_start, .stream_stop = ov2640_stream_stop, .set_ctrl = ov2640_set_ctrl, }; static int ov2640_init(const struct device *dev) { struct video_format fmt; int ret = 0; #if DT_INST_NODE_HAS_PROP(0, reset_gpios) const struct ov2640_config *cfg = dev->config; ret = gpio_pin_configure_dt(&cfg->reset_gpio, GPIO_OUTPUT_ACTIVE); if (ret) { return ret; } gpio_pin_set_dt(&cfg->reset_gpio, 0); k_sleep(K_MSEC(1)); gpio_pin_set_dt(&cfg->reset_gpio, 1); k_sleep(K_MSEC(1)); #endif ret = ov2640_check_connection(dev); if (ret) { return ret; } ov2640_soft_reset(dev); k_msleep(300); ov2640_write_all(dev, default_regs, ARRAY_SIZE(default_regs)); /* set default/init format SVGA RGB565 */ fmt.pixelformat = VIDEO_PIX_FMT_RGB565; fmt.width = SVGA_HSIZE; fmt.height = SVGA_VSIZE; fmt.pitch = SVGA_HSIZE * 2; ret = ov2640_set_fmt(dev, VIDEO_EP_OUT, &fmt); if (ret) { LOG_ERR("Unable to configure default format"); return -EIO; } ret |= ov2640_set_exposure_ctrl(dev, 1); ret |= ov2640_set_white_bal(dev, 1); return ret; } /* Unique Instance */ static const struct ov2640_config ov2640_cfg_0 = { .i2c = I2C_DT_SPEC_INST_GET(0), #if DT_INST_NODE_HAS_PROP(0, reset_gpios) .reset_gpio = GPIO_DT_SPEC_INST_GET(0, reset_gpios), #endif .clock_rate_control = DT_INST_PROP(0, clock_rate_control), }; static struct ov2640_data ov2640_data_0; static int ov2640_init_0(const struct device *dev) { const struct ov2640_config *cfg = dev->config; if (!device_is_ready(cfg->i2c.bus)) { LOG_ERR("Bus device is not ready"); return -ENODEV; } #if DT_INST_NODE_HAS_PROP(0, reset_gpios) if (!gpio_is_ready_dt(&cfg->reset_gpio)) { LOG_ERR("%s: device %s is not ready", dev->name, cfg->reset_gpio.port->name); return -ENODEV; } #endif uint32_t i2c_cfg = I2C_MODE_CONTROLLER | I2C_SPEED_SET(I2C_SPEED_STANDARD); if (i2c_configure(cfg->i2c.bus, i2c_cfg)) { LOG_ERR("Failed to configure ov2640 i2c interface."); } return ov2640_init(dev); } DEVICE_DT_INST_DEFINE(0, &ov2640_init_0, NULL, &ov2640_data_0, &ov2640_cfg_0, POST_KERNEL, CONFIG_VIDEO_INIT_PRIORITY, &ov2640_driver_api); ```
/content/code_sandbox/drivers/video/ov2640.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
10,213
```unknown # OV2640 config VIDEO_OV2640 bool "OV2640 CMOS digital image sensor" select I2C depends on DT_HAS_OVTI_OV2640_ENABLED default y help Enable driver for OV2640 CMOS digital image sensor device. ```
/content/code_sandbox/drivers/video/Kconfig.ov2640
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
62
```unknown # MT9m114 config VIDEO_MT9M114 bool "MT9M114 Aptina CMOS digital image sensor" select I2C depends on DT_HAS_APTINA_MT9M114_ENABLED default y help Enable driver for MT9M114 CMOS digital image sensor device. ```
/content/code_sandbox/drivers/video/Kconfig.mt9m114
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
67
```unknown config VIDEO_OV7670 bool "OV7670 CMOS digital image sensor" select I2C depends on DT_HAS_OVTI_OV7670_ENABLED default y help Enable driver for OV7670 CMOS digital image sensor device. ```
/content/code_sandbox/drivers/video/Kconfig.ov7670
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
58
```unknown # STM32 DCMI driver configuration options DT_STM32_DCMI_HAS_DMA := $(dt_nodelabel_has_prop,dcmi,dmas) config VIDEO_STM32_DCMI bool "STM32 Digital camera interface (DCMI) driver" default y depends on DT_HAS_ST_STM32_DCMI_ENABLED select USE_STM32_HAL_DCMI select USE_STM32_HAL_MDMA if SOC_SERIES_STM32H7X select DMA if $(DT_STM32_DCMI_HAS_DMA) select USE_STM32_HAL_DMA if $(DT_STM32_DCMI_HAS_DMA) select USE_STM32_HAL_DMA_EX if $(DT_STM32_DCMI_HAS_DMA) help Enable driver for STM32 Digital camera interface periheral. module = STM32_DCMI module-str = stm32_dcmi source "subsys/logging/Kconfig.template.log_config" ```
/content/code_sandbox/drivers/video/Kconfig.stm32_dcmi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
189
```c /* * */ #define DT_DRV_COMPAT nxp_mipi_csi2rx #include <fsl_mipi_csi2rx.h> #include <zephyr/drivers/video.h> #include <zephyr/kernel.h> #define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(mipi_csi); /* * Two data lanes are set by default as 2-lanes camera sensors are * more common and more performant but single lane is also supported. */ #define DEFAULT_MIPI_CSI_NUM_LANES 2 #define DEFAULT_CAMERA_FRAME_RATE 30 struct mipi_csi2rx_config { const MIPI_CSI2RX_Type *base; const struct device *sensor_dev; }; struct mipi_csi2rx_data { csi2rx_config_t csi2rxConfig; }; static int mipi_csi2rx_set_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { const struct mipi_csi2rx_config *config = dev->config; struct mipi_csi2rx_data *drv_data = dev->data; csi2rx_config_t csi2rxConfig = {0}; uint8_t i = 0; /* * Initialize the MIPI CSI2 * * From D-PHY specification, the T-HSSETTLE should in the range of 85ns+6*UI to 145ns+10*UI * UI is Unit Interval, equal to the duration of any HS state on the Clock Lane * * T-HSSETTLE = csi2rxConfig.tHsSettle_EscClk * (Tperiod of RxClkInEsc) * * csi2rxConfig.tHsSettle_EscClk setting for camera: * * Resolution | frame rate | T_HS_SETTLE * ============================================= * 720P | 30 | 0x12 * --------------------------------------------- * 720P | 15 | 0x17 * --------------------------------------------- * VGA | 30 | 0x1F * --------------------------------------------- * VGA | 15 | 0x24 * --------------------------------------------- * QVGA | 30 | 0x1F * --------------------------------------------- * QVGA | 15 | 0x24 * --------------------------------------------- */ static const uint32_t csi2rxHsSettle[][4] = { { 1280, 720, 30, 0x12, }, { 1280, 720, 15, 0x17, }, { 640, 480, 30, 0x1F, }, { 640, 480, 15, 0x24, }, { 320, 240, 30, 0x1F, }, { 320, 240, 15, 0x24, }, }; csi2rxConfig.laneNum = DEFAULT_MIPI_CSI_NUM_LANES; for (i = 0; i < ARRAY_SIZE(csi2rxHsSettle); i++) { if ((fmt->width == csi2rxHsSettle[i][0]) && (fmt->height == csi2rxHsSettle[i][1]) && (DEFAULT_CAMERA_FRAME_RATE == csi2rxHsSettle[i][2])) { csi2rxConfig.tHsSettle_EscClk = csi2rxHsSettle[i][3]; break; } } if (i == ARRAY_SIZE(csi2rxHsSettle)) { LOG_ERR("Unsupported resolution"); return -ENOTSUP; } drv_data->csi2rxConfig = csi2rxConfig; if (video_set_format(config->sensor_dev, ep, fmt)) { return -EIO; } return 0; } static int mipi_csi2rx_get_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { const struct mipi_csi2rx_config *config = dev->config; if (fmt == NULL || ep != VIDEO_EP_OUT) { return -EINVAL; } if (video_get_format(config->sensor_dev, ep, fmt)) { return -EIO; } return 0; } static int mipi_csi2rx_stream_start(const struct device *dev) { const struct mipi_csi2rx_config *config = dev->config; struct mipi_csi2rx_data *drv_data = dev->data; CSI2RX_Init((MIPI_CSI2RX_Type *)config->base, &drv_data->csi2rxConfig); if (video_stream_start(config->sensor_dev)) { return -EIO; } return 0; } static int mipi_csi2rx_stream_stop(const struct device *dev) { const struct mipi_csi2rx_config *config = dev->config; if (video_stream_stop(config->sensor_dev)) { return -EIO; } CSI2RX_Deinit((MIPI_CSI2RX_Type *)config->base); return 0; } static int mipi_csi2rx_get_caps(const struct device *dev, enum video_endpoint_id ep, struct video_caps *caps) { const struct mipi_csi2rx_config *config = dev->config; if (ep != VIDEO_EP_OUT) { return -EINVAL; } /* Just forward to sensor dev for now */ return video_get_caps(config->sensor_dev, ep, caps); } static const struct video_driver_api mipi_csi2rx_driver_api = { .get_caps = mipi_csi2rx_get_caps, .get_format = mipi_csi2rx_get_fmt, .set_format = mipi_csi2rx_set_fmt, .stream_start = mipi_csi2rx_stream_start, .stream_stop = mipi_csi2rx_stream_stop, }; static int mipi_csi2rx_init(const struct device *dev) { const struct mipi_csi2rx_config *config = dev->config; /* Check if there is any sensor device */ if (!device_is_ready(config->sensor_dev)) { return -ENODEV; } return 0; } #define MIPI_CSI2RX_INIT(n) \ static struct mipi_csi2rx_data mipi_csi2rx_data_##n; \ \ static const struct mipi_csi2rx_config mipi_csi2rx_config_##n = { \ .base = (MIPI_CSI2RX_Type *)DT_INST_REG_ADDR(n), \ .sensor_dev = DEVICE_DT_GET(DT_INST_PHANDLE(n, sensor)), \ }; \ \ DEVICE_DT_INST_DEFINE(n, &mipi_csi2rx_init, NULL, &mipi_csi2rx_data_##n, \ &mipi_csi2rx_config_##n, POST_KERNEL, CONFIG_VIDEO_INIT_PRIORITY, \ &mipi_csi2rx_driver_api); DT_INST_FOREACH_STATUS_OKAY(MIPI_CSI2RX_INIT) ```
/content/code_sandbox/drivers/video/video_mcux_mipi_csi2rx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,626
```c /* * */ #define DT_DRV_COMPAT ovti_ov5640 #include <zephyr/device.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/video.h> #include <zephyr/kernel.h> #define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ov5640); #include <zephyr/sys/byteorder.h> #define CHIP_ID_REG 0x300a #define CHIP_ID_VAL 0x5640 #define SYS_CTRL0_REG 0x3008 #define SYS_CTRL0_SW_PWDN 0x42 #define SYS_CTRL0_SW_PWUP 0x02 #define SYS_CTRL0_SW_RST 0x82 #define SYS_RESET00_REG 0x3000 #define SYS_RESET02_REG 0x3002 #define SYS_CLK_ENABLE00_REG 0x3004 #define SYS_CLK_ENABLE02_REG 0x3006 #define IO_MIPI_CTRL00_REG 0x300e #define SYSTEM_CONTROL1_REG 0x302e #define SCCB_SYS_CTRL1_REG 0x3103 #define TIMING_TC_REG20_REG 0x3820 #define TIMING_TC_REG21_REG 0x3821 #define HZ5060_CTRL01_REG 0x3c01 #define ISP_CTRL01_REG 0x5001 #define SC_PLL_CTRL0_REG 0x3034 #define SC_PLL_CTRL1_REG 0x3035 #define SC_PLL_CTRL2_REG 0x3036 #define SC_PLL_CTRL3_REG 0x3037 #define SYS_ROOT_DIV_REG 0x3108 #define PCLK_PERIOD_REG 0x4837 #define AEC_CTRL00_REG 0x3a00 #define AEC_CTRL0F_REG 0x3a0f #define AEC_CTRL10_REG 0x3a10 #define AEC_CTRL11_REG 0x3a11 #define AEC_CTRL1B_REG 0x3a1b #define AEC_CTRL1E_REG 0x3a1e #define AEC_CTRL1F_REG 0x3a1f #define BLC_CTRL01_REG 0x4001 #define BLC_CTRL04_REG 0x4004 #define BLC_CTRL05_REG 0x4005 #define AWB_CTRL00_REG 0x5180 #define AWB_CTRL01_REG 0x5181 #define AWB_CTRL02_REG 0x5182 #define AWB_CTRL03_REG 0x5183 #define AWB_CTRL04_REG 0x5184 #define AWB_CTRL05_REG 0x5185 #define AWB_CTRL17_REG 0x5191 #define AWB_CTRL18_REG 0x5192 #define AWB_CTRL19_REG 0x5193 #define AWB_CTRL20_REG 0x5194 #define AWB_CTRL21_REG 0x5195 #define AWB_CTRL22_REG 0x5196 #define AWB_CTRL23_REG 0x5197 #define AWB_CTRL30_REG 0x519e #define SDE_CTRL0_REG 0x5580 #define SDE_CTRL3_REG 0x5583 #define SDE_CTRL4_REG 0x5584 #define SDE_CTRL9_REG 0x5589 #define SDE_CTRL10_REG 0x558a #define SDE_CTRL11_REG 0x558b #define DEFAULT_MIPI_CHANNEL 0 #define OV5640_RESOLUTION_PARAM_NUM 24 struct ov5640_config { struct i2c_dt_spec i2c; struct gpio_dt_spec reset_gpio; struct gpio_dt_spec powerdown_gpio; }; struct ov5640_data { struct video_format fmt; }; struct ov5640_reg { uint16_t addr; uint8_t val; }; struct ov5640_mipi_clock_config { uint8_t pllCtrl1; uint8_t pllCtrl2; }; struct ov5640_resolution_config { uint16_t width; uint16_t height; const struct ov5640_reg *res_params; const struct ov5640_mipi_clock_config mipi_pclk; }; static const struct ov5640_reg ov5640InitParams[] = { /* Power down */ {SYS_CTRL0_REG, SYS_CTRL0_SW_PWDN}, /* System setting. */ {SCCB_SYS_CTRL1_REG, 0x13}, {SCCB_SYS_CTRL1_REG, 0x03}, {SYS_RESET00_REG, 0x00}, {SYS_CLK_ENABLE00_REG, 0xff}, {SYS_RESET02_REG, 0x1c}, {SYS_CLK_ENABLE02_REG, 0xc3}, {SYSTEM_CONTROL1_REG, 0x08}, {0x3618, 0x00}, {0x3612, 0x29}, {0x3708, 0x64}, {0x3709, 0x52}, {0x370c, 0x03}, {TIMING_TC_REG20_REG, 0x41}, {TIMING_TC_REG21_REG, 0x07}, {0x3630, 0x36}, {0x3631, 0x0e}, {0x3632, 0xe2}, {0x3633, 0x12}, {0x3621, 0xe0}, {0x3704, 0xa0}, {0x3703, 0x5a}, {0x3715, 0x78}, {0x3717, 0x01}, {0x370b, 0x60}, {0x3705, 0x1a}, {0x3905, 0x02}, {0x3906, 0x10}, {0x3901, 0x0a}, {0x3731, 0x12}, {0x3600, 0x08}, {0x3601, 0x33}, {0x302d, 0x60}, {0x3620, 0x52}, {0x371b, 0x20}, {0x471c, 0x50}, {0x3a13, 0x43}, {0x3a18, 0x00}, {0x3a19, 0x7c}, {0x3635, 0x13}, {0x3636, 0x03}, {0x3634, 0x40}, {0x3622, 0x01}, {HZ5060_CTRL01_REG, 0x00}, {AEC_CTRL00_REG, 0x58}, {BLC_CTRL01_REG, 0x02}, {BLC_CTRL04_REG, 0x02}, {BLC_CTRL05_REG, 0x1a}, {ISP_CTRL01_REG, 0xa3}, /* AEC */ {AEC_CTRL0F_REG, 0x30}, {AEC_CTRL10_REG, 0x28}, {AEC_CTRL1B_REG, 0x30}, {AEC_CTRL1E_REG, 0x26}, {AEC_CTRL11_REG, 0x60}, {AEC_CTRL1F_REG, 0x14}, /* AWB */ {AWB_CTRL00_REG, 0xff}, {AWB_CTRL01_REG, 0xf2}, {AWB_CTRL02_REG, 0x00}, {AWB_CTRL03_REG, 0x14}, {AWB_CTRL04_REG, 0x25}, {AWB_CTRL05_REG, 0x24}, {0x5186, 0x09}, {0x5187, 0x09}, {0x5188, 0x09}, {0x5189, 0x88}, {0x518a, 0x54}, {0x518b, 0xee}, {0x518c, 0xb2}, {0x518d, 0x50}, {0x518e, 0x34}, {0x518f, 0x6b}, {0x5190, 0x46}, {AWB_CTRL17_REG, 0xf8}, {AWB_CTRL18_REG, 0x04}, {AWB_CTRL19_REG, 0x70}, {AWB_CTRL20_REG, 0xf0}, {AWB_CTRL21_REG, 0xf0}, {AWB_CTRL22_REG, 0x03}, {AWB_CTRL23_REG, 0x01}, {0x5198, 0x04}, {0x5199, 0x6c}, {0x519a, 0x04}, {0x519b, 0x00}, {0x519c, 0x09}, {0x519d, 0x2b}, {AWB_CTRL30_REG, 0x38}, /* Color Matrix */ {0x5381, 0x1e}, {0x5382, 0x5b}, {0x5383, 0x08}, {0x5384, 0x0a}, {0x5385, 0x7e}, {0x5386, 0x88}, {0x5387, 0x7c}, {0x5388, 0x6c}, {0x5389, 0x10}, {0x538a, 0x01}, {0x538b, 0x98}, /* Sharp */ {0x5300, 0x08}, {0x5301, 0x30}, {0x5302, 0x10}, {0x5303, 0x00}, {0x5304, 0x08}, {0x5305, 0x30}, {0x5306, 0x08}, {0x5307, 0x16}, {0x5309, 0x08}, {0x530a, 0x30}, {0x530b, 0x04}, {0x530c, 0x06}, /* Gamma */ {0x5480, 0x01}, {0x5481, 0x08}, {0x5482, 0x14}, {0x5483, 0x28}, {0x5484, 0x51}, {0x5485, 0x65}, {0x5486, 0x71}, {0x5487, 0x7d}, {0x5488, 0x87}, {0x5489, 0x91}, {0x548a, 0x9a}, {0x548b, 0xaa}, {0x548c, 0xb8}, {0x548d, 0xcd}, {0x548e, 0xdd}, {0x548f, 0xea}, {0x5490, 0x1d}, /* UV adjust. */ {SDE_CTRL0_REG, 0x02}, {SDE_CTRL3_REG, 0x40}, {SDE_CTRL4_REG, 0x10}, {SDE_CTRL9_REG, 0x10}, {SDE_CTRL10_REG, 0x00}, {SDE_CTRL11_REG, 0xf8}, /* Lens correction. */ {0x5800, 0x23}, {0x5801, 0x14}, {0x5802, 0x0f}, {0x5803, 0x0f}, {0x5804, 0x12}, {0x5805, 0x26}, {0x5806, 0x0c}, {0x5807, 0x08}, {0x5808, 0x05}, {0x5809, 0x05}, {0x580a, 0x08}, {0x580b, 0x0d}, {0x580c, 0x08}, {0x580d, 0x03}, {0x580e, 0x00}, {0x580f, 0x00}, {0x5810, 0x03}, {0x5811, 0x09}, {0x5812, 0x07}, {0x5813, 0x03}, {0x5814, 0x00}, {0x5815, 0x01}, {0x5816, 0x03}, {0x5817, 0x08}, {0x5818, 0x0d}, {0x5819, 0x08}, {0x581a, 0x05}, {0x581b, 0x06}, {0x581c, 0x08}, {0x581d, 0x0e}, {0x581e, 0x29}, {0x581f, 0x17}, {0x5820, 0x11}, {0x5821, 0x11}, {0x5822, 0x15}, {0x5823, 0x28}, {0x5824, 0x46}, {0x5825, 0x26}, {0x5826, 0x08}, {0x5827, 0x26}, {0x5828, 0x64}, {0x5829, 0x26}, {0x582a, 0x24}, {0x582b, 0x22}, {0x582c, 0x24}, {0x582d, 0x24}, {0x582e, 0x06}, {0x582f, 0x22}, {0x5830, 0x40}, {0x5831, 0x42}, {0x5832, 0x24}, {0x5833, 0x26}, {0x5834, 0x24}, {0x5835, 0x22}, {0x5836, 0x22}, {0x5837, 0x26}, {0x5838, 0x44}, {0x5839, 0x24}, {0x583a, 0x26}, {0x583b, 0x28}, {0x583c, 0x42}, {0x583d, 0xce}, {0x5000, 0xa7}, }; static const struct ov5640_reg ov5640_low_res_params[] = { {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x04}, {0x3804, 0x0a}, {0x3805, 0x3f}, {0x3806, 0x07}, {0x3807, 0x9b}, {0x3808, 0x02}, {0x3809, 0x80}, {0x380a, 0x01}, {0x380b, 0xe0}, {0x380c, 0x07}, {0x380d, 0x68}, {0x380e, 0x03}, {0x380f, 0xd8}, {0x3810, 0x00}, {0x3811, 0x10}, {0x3812, 0x00}, {0x3813, 0x06}, {0x3814, 0x31}, {0x3815, 0x31}, {0x3824, 0x02}, {0x460c, 0x22}}; static const struct ov5640_reg ov5640_720p_res_params[] = { {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0xfa}, {0x3804, 0x0a}, {0x3805, 0x3f}, {0x3806, 0x06}, {0x3807, 0xa9}, {0x3808, 0x05}, {0x3809, 0x00}, {0x380a, 0x02}, {0x380b, 0xd0}, {0x380c, 0x07}, {0x380d, 0x64}, {0x380e, 0x02}, {0x380f, 0xe4}, {0x3810, 0x00}, {0x3811, 0x10}, {0x3812, 0x00}, {0x3813, 0x04}, {0x3814, 0x31}, {0x3815, 0x31}, {0x3824, 0x04}, {0x460c, 0x20}}; static const struct ov5640_resolution_config resolutionParams[] = { {.width = 640, .height = 480, .res_params = ov5640_low_res_params, .mipi_pclk = { .pllCtrl1 = 0x14, .pllCtrl2 = 0x38, }}, {.width = 1280, .height = 720, .res_params = ov5640_720p_res_params, .mipi_pclk = { .pllCtrl1 = 0x21, .pllCtrl2 = 0x54, }}, }; #define OV5640_VIDEO_FORMAT_CAP(width, height, format) \ { \ .pixelformat = (format), .width_min = (width), .width_max = (width), \ .height_min = (height), .height_max = (height), .width_step = 0, .height_step = 0 \ } static const struct video_format_cap fmts[] = { OV5640_VIDEO_FORMAT_CAP(1280, 720, VIDEO_PIX_FMT_RGB565), OV5640_VIDEO_FORMAT_CAP(1280, 720, VIDEO_PIX_FMT_YUYV), OV5640_VIDEO_FORMAT_CAP(640, 480, VIDEO_PIX_FMT_RGB565), OV5640_VIDEO_FORMAT_CAP(640, 480, VIDEO_PIX_FMT_YUYV), {0}}; static int ov5640_read_reg(const struct i2c_dt_spec *spec, const uint16_t addr, void *val, const uint8_t val_size) { int ret; struct i2c_msg msg[2]; uint8_t addr_buf[2]; if (val_size > 4) { return -ENOTSUP; } addr_buf[1] = addr & 0xFF; addr_buf[0] = addr >> 8; msg[0].buf = addr_buf; msg[0].len = 2U; msg[0].flags = I2C_MSG_WRITE; msg[1].buf = (uint8_t *)val; msg[1].len = val_size; msg[1].flags = I2C_MSG_READ | I2C_MSG_STOP | I2C_MSG_RESTART; ret = i2c_transfer_dt(spec, msg, 2); if (ret) { return ret; } switch (val_size) { case 4: *(uint32_t *)val = sys_be32_to_cpu(*(uint32_t *)val); break; case 2: *(uint16_t *)val = sys_be16_to_cpu(*(uint16_t *)val); break; case 1: break; default: return -ENOTSUP; } return 0; } static int ov5640_write_reg(const struct i2c_dt_spec *spec, const uint16_t addr, const uint8_t val) { uint8_t addr_buf[2]; struct i2c_msg msg[2]; addr_buf[1] = addr & 0xFF; addr_buf[0] = addr >> 8; msg[0].buf = addr_buf; msg[0].len = 2U; msg[0].flags = I2C_MSG_WRITE; msg[1].buf = (uint8_t *)&val; msg[1].len = 1; msg[1].flags = I2C_MSG_WRITE | I2C_MSG_STOP; return i2c_transfer_dt(spec, msg, 2); } static int ov5640_modify_reg(const struct i2c_dt_spec *spec, const uint16_t addr, const uint8_t mask, const uint8_t val) { uint8_t regVal = 0; int ret = ov5640_read_reg(spec, addr, &regVal, sizeof(regVal)); if (ret) { return ret; } return ov5640_write_reg(spec, addr, (regVal & ~mask) | (val & mask)); } static int ov5640_write_multi_regs(const struct i2c_dt_spec *spec, const struct ov5640_reg *regs, const uint32_t num_regs) { int ret; for (int i = 0; i < num_regs; i++) { ret = ov5640_write_reg(spec, regs[i].addr, regs[i].val); if (ret) { return ret; } } return 0; } static int ov5640_set_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { struct ov5640_data *drv_data = dev->data; const struct ov5640_config *cfg = dev->config; int ret; int i; for (i = 0; i < ARRAY_SIZE(fmts); ++i) { if (fmt->pixelformat == fmts[i].pixelformat && fmt->width >= fmts[i].width_min && fmt->width <= fmts[i].width_max && fmt->height >= fmts[i].height_min && fmt->height <= fmts[i].height_max) { break; } } if (i == ARRAY_SIZE(fmts)) { LOG_ERR("Unsupported pixel format or resolution"); return -ENOTSUP; } if (!memcmp(&drv_data->fmt, fmt, sizeof(drv_data->fmt))) { return 0; } drv_data->fmt = *fmt; /* Set resolution parameters */ for (i = 0; i < ARRAY_SIZE(resolutionParams); i++) { if (fmt->width == resolutionParams[i].width && fmt->height == resolutionParams[i].height) { ret = ov5640_write_multi_regs(&cfg->i2c, resolutionParams[i].res_params, OV5640_RESOLUTION_PARAM_NUM); if (ret) { LOG_ERR("Unable to set resolution parameters"); return ret; } break; } } /* Set pixel format, default to VIDEO_PIX_FMT_RGB565 */ struct ov5640_reg fmt_params[2] = { {0x4300, 0x6f}, {0x501f, 0x01}, }; if (fmt->pixelformat == VIDEO_PIX_FMT_YUYV) { fmt_params[0].val = 0x3f; fmt_params[1].val = 0x00; } ret = ov5640_write_multi_regs(&cfg->i2c, fmt_params, ARRAY_SIZE(fmt_params)); if (ret) { LOG_ERR("Unable to set pixel format"); return ret; } /* Configure MIPI pixel clock */ ret |= ov5640_modify_reg(&cfg->i2c, SC_PLL_CTRL0_REG, 0x0f, 0x08); ret |= ov5640_modify_reg(&cfg->i2c, SC_PLL_CTRL1_REG, 0xff, resolutionParams[i].mipi_pclk.pllCtrl1); ret |= ov5640_modify_reg(&cfg->i2c, SC_PLL_CTRL2_REG, 0xff, resolutionParams[i].mipi_pclk.pllCtrl2); ret |= ov5640_modify_reg(&cfg->i2c, SC_PLL_CTRL3_REG, 0x1f, 0x13); ret |= ov5640_modify_reg(&cfg->i2c, SYS_ROOT_DIV_REG, 0x3f, 0x01); ret |= ov5640_write_reg(&cfg->i2c, PCLK_PERIOD_REG, 0x0a); if (ret) { LOG_ERR("Unable to configure MIPI pixel clock"); return ret; } return 0; } static int ov5640_get_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { struct ov5640_data *drv_data = dev->data; *fmt = drv_data->fmt; return 0; } static int ov5640_get_caps(const struct device *dev, enum video_endpoint_id ep, struct video_caps *caps) { caps->format_caps = fmts; return 0; } static int ov5640_stream_start(const struct device *dev) { const struct ov5640_config *cfg = dev->config; /* Power up MIPI PHY HS Tx & LP Rx in 2 data lanes mode */ int ret = ov5640_write_reg(&cfg->i2c, IO_MIPI_CTRL00_REG, 0x45); if (ret) { LOG_ERR("Unable to power up MIPI PHY"); return ret; } return ov5640_write_reg(&cfg->i2c, SYS_CTRL0_REG, SYS_CTRL0_SW_PWUP); } static int ov5640_stream_stop(const struct device *dev) { const struct ov5640_config *cfg = dev->config; /* Power down MIPI PHY HS Tx & LP Rx */ int ret = ov5640_write_reg(&cfg->i2c, IO_MIPI_CTRL00_REG, 0x40); if (ret) { LOG_ERR("Unable to power down MIPI PHY"); return ret; } return ov5640_write_reg(&cfg->i2c, SYS_CTRL0_REG, SYS_CTRL0_SW_PWDN); } static const struct video_driver_api ov5640_driver_api = { .set_format = ov5640_set_fmt, .get_format = ov5640_get_fmt, .get_caps = ov5640_get_caps, .stream_start = ov5640_stream_start, .stream_stop = ov5640_stream_stop, }; static int ov5640_init(const struct device *dev) { const struct ov5640_config *cfg = dev->config; struct video_format fmt; uint16_t chip_id; int ret; if (!device_is_ready(cfg->i2c.bus)) { LOG_ERR("Bus device is not ready"); return -ENODEV; } if (!gpio_is_ready_dt(&cfg->reset_gpio)) { LOG_ERR("%s: device %s is not ready", dev->name, cfg->reset_gpio.port->name); return -ENODEV; } if (!gpio_is_ready_dt(&cfg->powerdown_gpio)) { LOG_ERR("%s: device %s is not ready", dev->name, cfg->powerdown_gpio.port->name); return -ENODEV; } /* Power up sequence */ if (cfg->powerdown_gpio.port != NULL) { ret = gpio_pin_configure_dt(&cfg->powerdown_gpio, GPIO_OUTPUT_ACTIVE); if (ret) { return ret; } } if (cfg->reset_gpio.port != NULL) { ret = gpio_pin_configure_dt(&cfg->reset_gpio, GPIO_OUTPUT_ACTIVE); if (ret) { return ret; } } k_sleep(K_MSEC(5)); if (cfg->powerdown_gpio.port != NULL) { gpio_pin_set_dt(&cfg->powerdown_gpio, 0); } k_sleep(K_MSEC(1)); if (cfg->reset_gpio.port != NULL) { gpio_pin_set_dt(&cfg->reset_gpio, 0); } k_sleep(K_MSEC(20)); /* Software reset */ ret = ov5640_write_reg(&cfg->i2c, SYS_CTRL0_REG, SYS_CTRL0_SW_RST); if (ret) { LOG_ERR("Unable to perform software reset"); return -EIO; } k_sleep(K_MSEC(5)); /* Initialize register values */ ret = ov5640_write_multi_regs(&cfg->i2c, ov5640InitParams, ARRAY_SIZE(ov5640InitParams)); if (ret) { LOG_ERR("Unable to initialize the sensor"); return -EIO; } /* Set virtual channel */ ret = ov5640_modify_reg(&cfg->i2c, 0x4814, 3U << 6, (uint8_t)(DEFAULT_MIPI_CHANNEL) << 6); if (ret) { LOG_ERR("Unable to set virtual channel"); return -EIO; } /* Check sensor chip id */ ret = ov5640_read_reg(&cfg->i2c, CHIP_ID_REG, &chip_id, sizeof(chip_id)); if (ret) { LOG_ERR("Unable to read sensor chip ID, ret = %d", ret); return -ENODEV; } if (chip_id != CHIP_ID_VAL) { LOG_ERR("Wrong chip ID: %04x (expected %04x)", chip_id, CHIP_ID_VAL); return -ENODEV; } /* Set default format to 720p RGB565 */ fmt.pixelformat = VIDEO_PIX_FMT_RGB565; fmt.width = 1280; fmt.height = 720; fmt.pitch = fmt.width * 2; ret = ov5640_set_fmt(dev, VIDEO_EP_OUT, &fmt); if (ret) { LOG_ERR("Unable to configure default format"); return -EIO; } return 0; } #define OV5640_INIT(n) \ static struct ov5640_data ov5640_data_##n; \ \ static const struct ov5640_config ov5640_cfg_##n = { \ .i2c = I2C_DT_SPEC_INST_GET(n), \ .reset_gpio = GPIO_DT_SPEC_INST_GET_OR(n, reset_gpios, {0}), \ .powerdown_gpio = GPIO_DT_SPEC_INST_GET_OR(n, powerdown_gpios, {0}), \ }; \ \ DEVICE_DT_INST_DEFINE(n, &ov5640_init, NULL, &ov5640_data_##n, &ov5640_cfg_##n, \ POST_KERNEL, CONFIG_VIDEO_INIT_PRIORITY, &ov5640_driver_api); DT_INST_FOREACH_STATUS_OKAY(OV5640_INIT) ```
/content/code_sandbox/drivers/video/ov5640.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,898
```unknown # NXP MIPI CSI-2 Rx driver configuration option config VIDEO_MCUX_MIPI_CSI2RX bool "NXP MIPI CSI-2 Rx driver" default y depends on DT_HAS_NXP_MIPI_CSI2RX_ENABLED select VIDEO_MCUX_CSI ```
/content/code_sandbox/drivers/video/Kconfig.mcux_mipi_csi2rx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
62
```unknown # NXP MCUX CSI driver configuration options config VIDEO_MCUX_CSI bool "NXP MCUX CMOS Sensor Interface (CSI) driver" default y depends on DT_HAS_NXP_IMX_CSI_ENABLED config VIDEO_MCUX_CSI_INIT_PRIORITY int "NXP MCUX CSI init priority" default 61 depends on VIDEO_MCUX_CSI help Initialization priority for the CSI interface on an NXP MCUX device. ```
/content/code_sandbox/drivers/video/Kconfig.mcux_csi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
98
```unknown # VIDEO driver configuration options # # VIDEO Drivers # menuconfig VIDEO bool "Video drivers" help Enable support for the VIDEO. if VIDEO config VIDEO_INIT_PRIORITY int "Video initialization priority" default 60 help System initialization priority for video drivers. config VIDEO_BUFFER_POOL_SZ_MAX int "Size of the largest buffer in the video pool" default 1048576 config VIDEO_BUFFER_POOL_NUM_MAX int "Number of maximum sized buffer in the video pool" default 2 config VIDEO_BUFFER_POOL_ALIGN int "Alignment of the video pools buffer" default 64 source "drivers/video/Kconfig.mcux_csi" source "drivers/video/Kconfig.mcux_mipi_csi2rx" source "drivers/video/Kconfig.sw_generator" source "drivers/video/Kconfig.mt9m114" source "drivers/video/Kconfig.ov7725" source "drivers/video/Kconfig.ov2640" source "drivers/video/Kconfig.stm32_dcmi" source "drivers/video/Kconfig.ov5640" source "drivers/video/Kconfig.ov7670" endif # VIDEO ```
/content/code_sandbox/drivers/video/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
230
```unknown # OV5640 config VIDEO_OV5640 bool "OV5640 CMOS digital image sensor" select I2C depends on DT_HAS_OVTI_OV5640_ENABLED default y help Enable driver for OV5640 CMOS digital image sensor device ```
/content/code_sandbox/drivers/video/Kconfig.ov5640
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
62
```c /* * */ #define DT_DRV_COMPAT ovti_ov7725 #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/sys/byteorder.h> #include <zephyr/drivers/video.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/gpio.h> #define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ov7725); #define OV7725_REVISION 0x7721U #define OV7725_GAIN 0x00U #define OV7725_BLUE 0x01U #define OV7725_RED 0x02U #define OV7725_GREEN 0x03U #define OV7725_BAVG 0x05U #define OV7725_GAVG 0x06U #define OV7725_RAVG 0x07U #define OV7725_AECH 0x08U #define OV7725_COM2 0x09U #define OV7725_PID 0x0AU #define OV7725_VER 0x0BU #define OV7725_COM3 0x0CU #define OV7725_COM4 0x0DU #define OV7725_COM5 0x0EU #define OV7725_COM6 0x0FU #define OV7725_AEC 0x10U #define OV7725_CLKRC 0x11U #define OV7725_COM7 0x12U #define OV7725_COM8 0x13U #define OV7725_COM9 0x14U #define OV7725_COM10 0x15U #define OV7725_REG16 0x16U #define OV7725_HSTART 0x17U #define OV7725_HSIZE 0x18U #define OV7725_VSTART 0x19U #define OV7725_VSIZE 0x1AU #define OV7725_PSHFT 0x1BU #define OV7725_MIDH 0x1CU #define OV7725_MIDL 0x1DU #define OV7725_LAEC 0x1FU #define OV7725_COM11 0x20U #define OV7725_BDBASE 0x22U #define OV7725_BDMSTEP 0x23U #define OV7725_AEW 0x24U #define OV7725_AEB 0x25U #define OV7725_VPT 0x26U #define OV7725_REG28 0x28U #define OV7725_HOUTSIZE 0x29U #define OV7725_EXHCH 0x2AU #define OV7725_EXHCL 0x2BU #define OV7725_VOUTSIZE 0x2CU #define OV7725_ADVFL 0x2DU #define OV7725_ADVFH 0x2EU #define OV7725_YAVE 0x2FU #define OV7725_LUMHTH 0x30U #define OV7725_LUMLTH 0x31U #define OV7725_HREF 0x32U #define OV7725_DM_LNL 0x33U #define OV7725_DM_LNH 0x34U #define OV7725_ADOFF_B 0x35U #define OV7725_ADOFF_R 0x36U #define OV7725_ADOFF_GB 0x37U #define OV7725_ADOFF_GR 0x38U #define OV7725_OFF_B 0x39U #define OV7725_OFF_R 0x3AU #define OV7725_OFF_GB 0x3BU #define OV7725_OFF_GR 0x3CU #define OV7725_COM12 0x3DU #define OV7725_COM13 0x3EU #define OV7725_COM14 0x3FU #define OV7725_COM16 0x41U #define OV7725_TGT_B 0x42U #define OV7725_TGT_R 0x43U #define OV7725_TGT_GB 0x44U #define OV7725_TGT_GR 0x45U #define OV7725_LC_CTR 0x46U #define OV7725_LC_XC 0x47U #define OV7725_LC_YC 0x48U #define OV7725_LC_COEF 0x49U #define OV7725_LC_RADI 0x4AU #define OV7725_LC_COEFB 0x4BU #define OV7725_LC_COEFR 0x4CU #define OV7725_FIXGAIN 0x4DU #define OV7725_AREF1 0x4FU #define OV7725_AREF6 0x54U #define OV7725_UFIX 0x60U #define OV7725_VFIX 0x61U #define OV7725_AWBB_BLK 0x62U #define OV7725_AWB_CTRL0 0x63U #define OV7725_DSP_CTRL1 0x64U #define OV7725_DSP_CTRL2 0x65U #define OV7725_DSP_CTRL3 0x66U #define OV7725_DSP_CTRL4 0x67U #define OV7725_AWB_BIAS 0x68U #define OV7725_AWB_CTRL1 0x69U #define OV7725_AWB_CTRL2 0x6AU #define OV7725_AWB_CTRL3 0x6BU #define OV7725_AWB_CTRL4 0x6CU #define OV7725_AWB_CTRL5 0x6DU #define OV7725_AWB_CTRL6 0x6EU #define OV7725_AWB_CTRL7 0x6FU #define OV7725_AWB_CTRL8 0x70U #define OV7725_AWB_CTRL9 0x71U #define OV7725_AWB_CTRL10 0x72U #define OV7725_AWB_CTRL11 0x73U #define OV7725_AWB_CTRL12 0x74U #define OV7725_AWB_CTRL13 0x75U #define OV7725_AWB_CTRL14 0x76U #define OV7725_AWB_CTRL15 0x77U #define OV7725_AWB_CTRL16 0x78U #define OV7725_AWB_CTRL17 0x79U #define OV7725_AWB_CTRL18 0x7AU #define OV7725_AWB_CTRL19 0x7BU #define OV7725_AWB_CTRL20 0x7CU #define OV7725_AWB_CTRL21 0x7DU #define OV7725_GAM1 0x7EU #define OV7725_GAM2 0x7FU #define OV7725_GAM3 0x80U #define OV7725_GAM4 0x81U #define OV7725_GAM5 0x82U #define OV7725_GAM6 0x83U #define OV7725_GAM7 0x84U #define OV7725_GAM8 0x85U #define OV7725_GAM9 0x86U #define OV7725_GAM10 0x87U #define OV7725_GAM11 0x88U #define OV7725_GAM12 0x89U #define OV7725_GAM13 0x8AU #define OV7725_GAM14 0x8BU #define OV7725_GAM15 0x8CU #define OV7725_SLOP 0x8DU #define OV7725_DNSTH 0x8EU #define OV7725_EDGE0 0x8FU #define OV7725_EDGE1 0x90U #define OV7725_DNSOFF 0x91U #define OV7725_EDGE2 0x92U #define OV7725_EDGE3 0x93U #define OV7725_MTX1 0x94U #define OV7725_MTX2 0x95U #define OV7725_MTX3 0x96U #define OV7725_MTX4 0x97U #define OV7725_MTX5 0x98U #define OV7725_MTX6 0x99U #define OV7725_MTX_CTRL 0x9AU #define OV7725_BRIGHT 0x9BU #define OV7725_CNST 0x9CU #define OV7725_UVADJ0 0x9EU #define OV7725_UVADJ1 0x9FU #define OV7725_SCAL0 0xA0U #define OV7725_SCAL1 0xA1U #define OV7725_SCAL2 0xA2U #define OV7725_SDE 0xA6U #define OV7725_USAT 0xA7U #define OV7725_VSAT 0xA8U #define OV7725_HUECOS 0xA9U #define OV7725_HUESIN 0xAAU #define OV7725_SIGN 0xABU #define OV7725_DSPAUTO 0xACU #define OV7725_COM10_VSYNC_NEG_MASK BIT(1) #define OV7725_COM10_HREF_REVERSE_MASK BIT(3) #define OV7725_COM10_PCLK_REVERSE_MASK BIT(4) #define OV7725_COM10_PCLK_OUT_MASK BIT(5) #define OV7725_COM10_DATA_NEG_MASK BIT(7) struct ov7725_config { struct i2c_dt_spec i2c; #if DT_INST_NODE_HAS_PROP(0, reset_gpios) struct gpio_dt_spec reset_gpio; #endif }; struct ov7725_data { struct video_format fmt; }; struct ov7725_clock { uint32_t input_clk; uint32_t framerate; uint8_t clkrc; /*!< Register CLKRC. */ uint8_t com4; /*!< Register COM4. */ uint8_t dm_lnl; /*!< Register DM_LNL. */ }; struct ov7725_pixel_format { uint32_t pixel_format; uint8_t com7; }; struct ov7725_reg { uint8_t addr; uint8_t value; }; static const struct ov7725_clock ov7725_clock_configs[] = { { .input_clk = 24000000, .framerate = 30, .clkrc = 0x01, .com4 = 0x41, .dm_lnl = 0x00 }, { .input_clk = 24000000, .framerate = 15, .clkrc = 0x03, .com4 = 0x41, .dm_lnl = 0x00 }, { .input_clk = 24000000, .framerate = 25, .clkrc = 0x01, .com4 = 0x41, .dm_lnl = 0x66 }, { .input_clk = 24000000, .framerate = 14, .clkrc = 0x03, .com4 = 0x41, .dm_lnl = 0x1a }, { .input_clk = 26000000, .framerate = 30, .clkrc = 0x01, .com4 = 0x41, .dm_lnl = 0x2b }, { .input_clk = 26000000, .framerate = 15, .clkrc = 0x03, .com4 = 0x41, .dm_lnl = 0x2b }, { .input_clk = 26000000, .framerate = 25, .clkrc = 0x01, .com4 = 0x41, .dm_lnl = 0x99 }, { .input_clk = 26000000, .framerate = 14, .clkrc = 0x03, .com4 = 0x41, .dm_lnl = 0x46 }, { .input_clk = 13000000, .framerate = 30, .clkrc = 0x00, .com4 = 0x41, .dm_lnl = 0x2b }, { .input_clk = 13000000, .framerate = 15, .clkrc = 0x01, .com4 = 0x41, .dm_lnl = 0x2b }, { .input_clk = 13000000, .framerate = 25, .clkrc = 0x00, .com4 = 0x41, .dm_lnl = 0x99 }, { .input_clk = 13000000, .framerate = 14, .clkrc = 0x01, .com4 = 0x41, .dm_lnl = 0x46 }, }; static const struct ov7725_pixel_format ov7725_pf_configs[] = { { .pixel_format = VIDEO_PIX_FMT_RGB565, .com7 = (1 << 2) | (2) } }; static const struct ov7725_reg ov7725_init_reg_tb[] = { /*Output config*/ { OV7725_CLKRC, 0x00 }, { OV7725_COM7, 0x06 }, { OV7725_HSTART, 0x3f }, { OV7725_HSIZE, 0x50 }, { OV7725_VSTART, 0x03 }, { OV7725_VSIZE, 0x78 }, { OV7725_HREF, 0x00 }, { OV7725_HOUTSIZE, 0x50 }, { OV7725_VOUTSIZE, 0x78 }, /*DSP control*/ { OV7725_TGT_B, 0x7f }, { OV7725_FIXGAIN, 0x09 }, { OV7725_AWB_CTRL0, 0xe0 }, { OV7725_DSP_CTRL1, 0xff }, { OV7725_DSP_CTRL2, 0x00 }, { OV7725_DSP_CTRL3, 0x00 }, { OV7725_DSP_CTRL4, 0x00 }, /*AGC AEC AWB*/ { OV7725_COM8, 0xf0 }, { OV7725_COM4, 0x81 }, { OV7725_COM6, 0xc5 }, { OV7725_COM9, 0x11 }, { OV7725_BDBASE, 0x7F }, { OV7725_BDMSTEP, 0x03 }, { OV7725_AEW, 0x40 }, { OV7725_AEB, 0x30 }, { OV7725_VPT, 0xa1 }, { OV7725_EXHCL, 0x9e }, { OV7725_AWB_CTRL3, 0xaa }, { OV7725_COM8, 0xff }, /*matrix sharpness brightness contrast*/ { OV7725_EDGE1, 0x08 }, { OV7725_DNSOFF, 0x01 }, { OV7725_EDGE2, 0x03 }, { OV7725_EDGE3, 0x00 }, { OV7725_MTX1, 0xb0 }, { OV7725_MTX2, 0x9d }, { OV7725_MTX3, 0x13 }, { OV7725_MTX4, 0x16 }, { OV7725_MTX5, 0x7b }, { OV7725_MTX6, 0x91 }, { OV7725_MTX_CTRL, 0x1e }, { OV7725_BRIGHT, 0x08 }, { OV7725_CNST, 0x20 }, { OV7725_UVADJ0, 0x81 }, { OV7725_SDE, 0X06 }, { OV7725_USAT, 0x65 }, { OV7725_VSAT, 0x65 }, { OV7725_HUECOS, 0X80 }, { OV7725_HUESIN, 0X80 }, /*GAMMA config*/ { OV7725_GAM1, 0x0c }, { OV7725_GAM2, 0x16 }, { OV7725_GAM3, 0x2a }, { OV7725_GAM4, 0x4e }, { OV7725_GAM5, 0x61 }, { OV7725_GAM6, 0x6f }, { OV7725_GAM7, 0x7b }, { OV7725_GAM8, 0x86 }, { OV7725_GAM9, 0x8e }, { OV7725_GAM10, 0x97 }, { OV7725_GAM11, 0xa4 }, { OV7725_GAM12, 0xaf }, { OV7725_GAM13, 0xc5 }, { OV7725_GAM14, 0xd7 }, { OV7725_GAM15, 0xe8 }, { OV7725_SLOP, 0x20 }, { OV7725_COM3, 0x40 }, { OV7725_COM5, 0xf5 }, { OV7725_COM10, 0x02 }, { OV7725_COM2, 0x01 } }; static int ov7725_write_reg(const struct i2c_dt_spec *spec, uint8_t reg_addr, uint8_t value) { struct i2c_msg msgs[2]; msgs[0].buf = (uint8_t *)&reg_addr; msgs[0].len = 1; msgs[0].flags = I2C_MSG_WRITE; msgs[1].buf = (uint8_t *)&value; msgs[1].len = 1; msgs[1].flags = I2C_MSG_WRITE | I2C_MSG_STOP; return i2c_transfer_dt(spec, msgs, 2); } static int ov7725_read_reg(const struct i2c_dt_spec *spec, uint8_t reg_addr, uint8_t *value) { struct i2c_msg msgs[2]; msgs[0].buf = (uint8_t *)&reg_addr; msgs[0].len = 1; /* * When using I2C to read the registers of the SCCB device, * a stop bit is required after writing the register address */ msgs[0].flags = I2C_MSG_WRITE | I2C_MSG_STOP; msgs[1].buf = (uint8_t *)value; msgs[1].len = 1; msgs[1].flags = I2C_MSG_READ | I2C_MSG_STOP | I2C_MSG_RESTART; return i2c_transfer_dt(spec, msgs, 2); } int ov7725_modify_reg(const struct i2c_dt_spec *spec, uint8_t reg_addr, uint8_t clear_mask, uint8_t value) { int ret; uint8_t set_value; ret = ov7725_read_reg(spec, reg_addr, &set_value); if (ret == 0) { set_value = (set_value & (~clear_mask)) | (set_value & clear_mask); ret = ov7725_write_reg(spec, reg_addr, set_value); } return ret; } static int ov7725_write_all(const struct device *dev, const struct ov7725_reg *regs, uint16_t reg_num) { uint16_t i = 0; const struct ov7725_config *cfg = dev->config; for (i = 0; i < reg_num; i++) { int err; err = ov7725_write_reg(&cfg->i2c, regs[i].addr, regs[i].value); if (err) { return err; } } return 0; } static int ov7725_set_clock(const struct device *dev, unsigned int framerate, unsigned int input_clk) { const struct ov7725_config *cfg = dev->config; for (unsigned int i = 0; i < ARRAY_SIZE(ov7725_clock_configs); i++) { if ((ov7725_clock_configs[i].framerate == framerate) && (ov7725_clock_configs[i].input_clk == input_clk)) { ov7725_write_reg(&cfg->i2c, OV7725_CLKRC, ov7725_clock_configs[i].clkrc); ov7725_modify_reg(&cfg->i2c, OV7725_COM4, 0xc0, ov7725_clock_configs[i].com4); ov7725_write_reg(&cfg->i2c, OV7725_EXHCL, 0x00); ov7725_write_reg(&cfg->i2c, OV7725_DM_LNL, ov7725_clock_configs[i].dm_lnl); ov7725_write_reg(&cfg->i2c, OV7725_DM_LNH, 0x00); ov7725_write_reg(&cfg->i2c, OV7725_ADVFL, 0x00); ov7725_write_reg(&cfg->i2c, OV7725_ADVFH, 0x00); return ov7725_write_reg(&cfg->i2c, OV7725_COM5, 0x65); } } return -1; } static int ov7725_set_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { struct ov7725_data *drv_data = dev->data; const struct ov7725_config *cfg = dev->config; uint8_t com10 = 0; uint16_t width, height; uint16_t hstart, vstart, hsize; int ret; /* we only support one format for now (VGA RGB565) */ if (fmt->pixelformat != VIDEO_PIX_FMT_RGB565 || fmt->height != 480 || fmt->width != 640) { return -ENOTSUP; } width = fmt->width; height = fmt->height; if (!memcmp(&drv_data->fmt, fmt, sizeof(drv_data->fmt))) { /* nothing to do */ return 0; } drv_data->fmt = *fmt; /* Configure Sensor */ ret = ov7725_write_all(dev, ov7725_init_reg_tb, ARRAY_SIZE(ov7725_init_reg_tb)); if (ret) { LOG_ERR("Unable to write ov7725 config"); return ret; } /* Set clock : framerate 30fps, input clock 24M*/ ov7725_set_clock(dev, 30, 24000000); /* Set output format */ for (uint8_t i = 0; i < ARRAY_SIZE(ov7725_pf_configs); i++) { if (ov7725_pf_configs[i].pixel_format == fmt->pixelformat) { ret = ov7725_modify_reg(&cfg->i2c, OV7725_COM7, 0x1FU, ov7725_pf_configs[i].com7); if (ret) { LOG_ERR("Unable to write ov7725 pixel format"); return ret; } } } ov7725_modify_reg(&cfg->i2c, OV7725_COM7, (1 << 5), (0 << 5)); com10 |= OV7725_COM10_VSYNC_NEG_MASK; ov7725_write_reg(&cfg->i2c, OV7725_COM10, com10); /* Don't swap output MSB/LSB. */ ov7725_write_reg(&cfg->i2c, OV7725_COM3, 0x00); /* * Output drive capability * 0: 1X * 1: 2X * 2: 3X * 3: 4X */ ov7725_modify_reg(&cfg->i2c, OV7725_COM2, 0x03, 0x03); /* Resolution and timing. */ hstart = 0x22U << 2U; vstart = 0x07U << 1U; hsize = width + 16U; /* Set the window size. */ ov7725_write_reg(&cfg->i2c, OV7725_HSTART, hstart >> 2U); ov7725_write_reg(&cfg->i2c, OV7725_HSIZE, hsize >> 2U); ov7725_write_reg(&cfg->i2c, OV7725_VSTART, vstart >> 1U); ov7725_write_reg(&cfg->i2c, OV7725_VSIZE, height >> 1U); ov7725_write_reg(&cfg->i2c, OV7725_HOUTSIZE, width >> 2U); ov7725_write_reg(&cfg->i2c, OV7725_VOUTSIZE, height >> 1U); ov7725_write_reg(&cfg->i2c, OV7725_HREF, ((vstart & 1U) << 6U) | ((hstart & 3U) << 4U) | ((height & 1U) << 2U) | ((hsize & 3U) << 0U)); return ov7725_write_reg(&cfg->i2c, OV7725_EXHCH, ((height & 1U) << 2U) | ((width & 3U) << 0U)); } static int ov7725_get_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { struct ov7725_data *drv_data = dev->data; *fmt = drv_data->fmt; return 0; } static int ov7725_stream_start(const struct device *dev) { return 0; } static int ov7725_stream_stop(const struct device *dev) { return 0; } static const struct video_format_cap fmts[] = { { .pixelformat = VIDEO_PIX_FMT_RGB565, .width_min = 640, .width_max = 640, .height_min = 480, .height_max = 480, .width_step = 0, .height_step = 0, }, { 0 } }; static int ov7725_get_caps(const struct device *dev, enum video_endpoint_id ep, struct video_caps *caps) { caps->format_caps = fmts; return 0; } static const struct video_driver_api ov7725_driver_api = { .set_format = ov7725_set_fmt, .get_format = ov7725_get_fmt, .get_caps = ov7725_get_caps, .stream_start = ov7725_stream_start, .stream_stop = ov7725_stream_stop, }; static int ov7725_init(const struct device *dev) { const struct ov7725_config *cfg = dev->config; struct video_format fmt; uint8_t pid, ver; int ret; #if DT_INST_NODE_HAS_PROP(0, reset_gpios) ret = gpio_pin_configure_dt(&cfg->reset_gpio, GPIO_OUTPUT_ACTIVE); if (ret) { return ret; } gpio_pin_set_dt(&cfg->reset_gpio, 0); k_sleep(K_MSEC(1)); gpio_pin_set_dt(&cfg->reset_gpio, 1); k_sleep(K_MSEC(1)); #endif /* Identify the device. */ ret = ov7725_read_reg(&cfg->i2c, OV7725_PID, &pid); if (ret) { LOG_ERR("Unable to read PID"); return -ENODEV; } ret = ov7725_read_reg(&cfg->i2c, OV7725_VER, &ver); if (ret) { LOG_ERR("Unable to read VER"); return -ENODEV; } if (OV7725_REVISION != (((uint32_t)pid << 8U) | (uint32_t)ver)) { LOG_ERR("OV7725 Get Vision fail\n"); return -ENODEV; } /* Device identify OK, perform software reset. */ ov7725_write_reg(&cfg->i2c, OV7725_COM7, 0x80); k_sleep(K_MSEC(2)); /* set default/init format VGA RGB565 */ fmt.pixelformat = VIDEO_PIX_FMT_RGB565; fmt.width = 640; fmt.height = 480; fmt.pitch = 640 * 2; ret = ov7725_set_fmt(dev, VIDEO_EP_OUT, &fmt); if (ret) { LOG_ERR("Unable to configure default format"); return -EIO; } return 0; } /* Unique Instance */ static const struct ov7725_config ov7725_cfg_0 = { .i2c = I2C_DT_SPEC_INST_GET(0), #if DT_INST_NODE_HAS_PROP(0, reset_gpios) .reset_gpio = GPIO_DT_SPEC_INST_GET(0, reset_gpios), #endif }; static struct ov7725_data ov7725_data_0; static int ov7725_init_0(const struct device *dev) { const struct ov7725_config *cfg = dev->config; if (!device_is_ready(cfg->i2c.bus)) { LOG_ERR("Bus device is not ready"); return -ENODEV; } #if DT_INST_NODE_HAS_PROP(0, reset_gpios) if (!gpio_is_ready_dt(&cfg->reset_gpio)) { LOG_ERR("%s: device %s is not ready", dev->name, cfg->reset_gpio.port->name); return -ENODEV; } #endif return ov7725_init(dev); } DEVICE_DT_INST_DEFINE(0, &ov7725_init_0, NULL, &ov7725_data_0, &ov7725_cfg_0, POST_KERNEL, CONFIG_VIDEO_INIT_PRIORITY, &ov7725_driver_api); ```
/content/code_sandbox/drivers/video/ov7725.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,789
```c /* * */ #define DT_DRV_COMPAT nxp_imx_csi #include <zephyr/kernel.h> #include <fsl_csi.h> #ifdef CONFIG_HAS_MCUX_CACHE #include <fsl_cache.h> #endif #include <zephyr/drivers/video.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/irq.h> struct video_mcux_csi_config { CSI_Type *base; const struct device *source_dev; const struct pinctrl_dev_config *pincfg; }; struct video_mcux_csi_data { const struct device *dev; csi_config_t csi_config; csi_handle_t csi_handle; struct k_fifo fifo_in; struct k_fifo fifo_out; struct k_poll_signal *signal; }; static inline unsigned int video_pix_fmt_bpp(uint32_t pixelformat) { switch (pixelformat) { case VIDEO_PIX_FMT_BGGR8: case VIDEO_PIX_FMT_GBRG8: case VIDEO_PIX_FMT_GRBG8: case VIDEO_PIX_FMT_RGGB8: return 1; case VIDEO_PIX_FMT_RGB565: case VIDEO_PIX_FMT_YUYV: return 2; case VIDEO_PIX_FMT_XRGB32: case VIDEO_PIX_FMT_XYUV32: return 4; default: return 0; } } static void __frame_done_cb(CSI_Type *base, csi_handle_t *handle, status_t status, void *user_data) { struct video_mcux_csi_data *data = user_data; const struct device *dev = data->dev; const struct video_mcux_csi_config *config = dev->config; enum video_signal_result result = VIDEO_BUF_DONE; struct video_buffer *vbuf, *vbuf_first = NULL; uint32_t buffer_addr; /* IRQ context */ if (status != kStatus_CSI_FrameDone) { return; } status = CSI_TransferGetFullBuffer(config->base, &(data->csi_handle), &buffer_addr); if (status != kStatus_Success) { result = VIDEO_BUF_ERROR; goto done; } /* Get matching vbuf by addr */ while ((vbuf = k_fifo_get(&data->fifo_in, K_NO_WAIT))) { if ((uint32_t)vbuf->buffer == buffer_addr) { break; } /* should never happen on ordered stream, except on capture * start/restart, requeue the frame and continue looking for * the right buffer. */ k_fifo_put(&data->fifo_in, vbuf); /* prevent infinite loop */ if (vbuf_first == NULL) { vbuf_first = vbuf; } else if (vbuf_first == vbuf) { vbuf = NULL; break; } } if (vbuf == NULL) { result = VIDEO_BUF_ERROR; goto done; } vbuf->timestamp = k_uptime_get_32(); #ifdef CONFIG_HAS_MCUX_CACHE DCACHE_InvalidateByRange(buffer_addr, vbuf->bytesused); #endif k_fifo_put(&data->fifo_out, vbuf); done: /* Trigger Event */ if (IS_ENABLED(CONFIG_POLL) && data->signal) { k_poll_signal_raise(data->signal, result); } return; } #if defined(CONFIG_VIDEO_MCUX_MIPI_CSI2RX) K_HEAP_DEFINE(csi_heap, 1000); static struct video_format_cap *fmts; /* * On i.MX RT11xx SoCs which have MIPI CSI-2 Rx, image data from the camera sensor after passing * through the pipeline (MIPI CSI-2 Rx --> Video Mux --> CSI) will be implicitly converted to a * 32-bits pixel format. For example, an input in RGB565 or YUYV (2-bytes format) will become a * XRGB32 or XYUV32 (4-bytes format) respectively, at the output of the CSI. */ static inline void video_pix_fmt_convert(struct video_format *fmt, bool isGetFmt) { switch (fmt->pixelformat) { case VIDEO_PIX_FMT_XRGB32: fmt->pixelformat = isGetFmt ? VIDEO_PIX_FMT_XRGB32 : VIDEO_PIX_FMT_RGB565; break; case VIDEO_PIX_FMT_XYUV32: fmt->pixelformat = isGetFmt ? VIDEO_PIX_FMT_XYUV32 : VIDEO_PIX_FMT_YUYV; break; case VIDEO_PIX_FMT_RGB565: fmt->pixelformat = isGetFmt ? VIDEO_PIX_FMT_XRGB32 : VIDEO_PIX_FMT_RGB565; break; case VIDEO_PIX_FMT_YUYV: fmt->pixelformat = isGetFmt ? VIDEO_PIX_FMT_XYUV32 : VIDEO_PIX_FMT_YUYV; break; } fmt->pitch = fmt->width * video_pix_fmt_bpp(fmt->pixelformat); } #endif static int video_mcux_csi_set_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { const struct video_mcux_csi_config *config = dev->config; struct video_mcux_csi_data *data = dev->data; unsigned int bpp = video_pix_fmt_bpp(fmt->pixelformat); status_t ret; struct video_format format = *fmt; if (!bpp || ep != VIDEO_EP_OUT) { return -EINVAL; } data->csi_config.bytesPerPixel = bpp; data->csi_config.linePitch_Bytes = fmt->pitch; #if defined(CONFIG_VIDEO_MCUX_MIPI_CSI2RX) if (fmt->pixelformat != VIDEO_PIX_FMT_XRGB32 && fmt->pixelformat != VIDEO_PIX_FMT_XYUV32) { return -ENOTSUP; } video_pix_fmt_convert(&format, false); data->csi_config.dataBus = kCSI_DataBus24Bit; #else data->csi_config.dataBus = kCSI_DataBus8Bit; #endif data->csi_config.polarityFlags = kCSI_HsyncActiveHigh | kCSI_DataLatchOnRisingEdge; data->csi_config.workMode = kCSI_GatedClockMode; /* use VSYNC, HSYNC, and PIXCLK */ data->csi_config.useExtVsync = true; data->csi_config.height = fmt->height; data->csi_config.width = fmt->width; ret = CSI_Init(config->base, &data->csi_config); if (ret != kStatus_Success) { return -EIO; } ret = CSI_TransferCreateHandle(config->base, &data->csi_handle, __frame_done_cb, data); if (ret != kStatus_Success) { return -EIO; } if (config->source_dev && video_set_format(config->source_dev, ep, &format)) { return -EIO; } return 0; } static int video_mcux_csi_get_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { const struct video_mcux_csi_config *config = dev->config; if (fmt == NULL || ep != VIDEO_EP_OUT) { return -EINVAL; } if (config->source_dev && !video_get_format(config->source_dev, ep, fmt)) { #if defined(CONFIG_VIDEO_MCUX_MIPI_CSI2RX) video_pix_fmt_convert(fmt, true); #endif /* align CSI with source fmt */ return video_mcux_csi_set_fmt(dev, ep, fmt); } return -EIO; } static int video_mcux_csi_stream_start(const struct device *dev) { const struct video_mcux_csi_config *config = dev->config; struct video_mcux_csi_data *data = dev->data; status_t ret; ret = CSI_TransferStart(config->base, &data->csi_handle); if (ret != kStatus_Success) { return -EIO; } if (config->source_dev && video_stream_start(config->source_dev)) { return -EIO; } return 0; } static int video_mcux_csi_stream_stop(const struct device *dev) { const struct video_mcux_csi_config *config = dev->config; struct video_mcux_csi_data *data = dev->data; status_t ret; if (config->source_dev && video_stream_stop(config->source_dev)) { return -EIO; } ret = CSI_TransferStop(config->base, &data->csi_handle); if (ret != kStatus_Success) { return -EIO; } return 0; } static int video_mcux_csi_flush(const struct device *dev, enum video_endpoint_id ep, bool cancel) { const struct video_mcux_csi_config *config = dev->config; struct video_mcux_csi_data *data = dev->data; struct video_buf *vbuf; uint32_t buffer_addr; status_t ret; if (!cancel) { /* wait for all buffer to be processed */ do { k_sleep(K_MSEC(1)); } while (!k_fifo_is_empty(&data->fifo_in)); } else { /* Flush driver output queue */ do { ret = CSI_TransferGetFullBuffer(config->base, &(data->csi_handle), &buffer_addr); } while (ret == kStatus_Success); while ((vbuf = k_fifo_get(&data->fifo_in, K_NO_WAIT))) { k_fifo_put(&data->fifo_out, vbuf); if (IS_ENABLED(CONFIG_POLL) && data->signal) { k_poll_signal_raise(data->signal, VIDEO_BUF_ABORTED); } } } return 0; } static int video_mcux_csi_enqueue(const struct device *dev, enum video_endpoint_id ep, struct video_buffer *vbuf) { const struct video_mcux_csi_config *config = dev->config; struct video_mcux_csi_data *data = dev->data; unsigned int to_read; status_t ret; if (ep != VIDEO_EP_OUT) { return -EINVAL; } to_read = data->csi_config.linePitch_Bytes * data->csi_config.height; vbuf->bytesused = to_read; ret = CSI_TransferSubmitEmptyBuffer(config->base, &data->csi_handle, (uint32_t)vbuf->buffer); if (ret != kStatus_Success) { return -EIO; } k_fifo_put(&data->fifo_in, vbuf); return 0; } static int video_mcux_csi_dequeue(const struct device *dev, enum video_endpoint_id ep, struct video_buffer **vbuf, k_timeout_t timeout) { struct video_mcux_csi_data *data = dev->data; if (ep != VIDEO_EP_OUT) { return -EINVAL; } *vbuf = k_fifo_get(&data->fifo_out, timeout); if (*vbuf == NULL) { return -EAGAIN; } return 0; } static inline int video_mcux_csi_set_ctrl(const struct device *dev, unsigned int cid, void *value) { const struct video_mcux_csi_config *config = dev->config; int ret = -ENOTSUP; /* Forward to source dev if any */ if (config->source_dev) { ret = video_set_ctrl(config->source_dev, cid, value); } return ret; } static inline int video_mcux_csi_get_ctrl(const struct device *dev, unsigned int cid, void *value) { const struct video_mcux_csi_config *config = dev->config; int ret = -ENOTSUP; /* Forward to source dev if any */ if (config->source_dev) { ret = video_get_ctrl(config->source_dev, cid, value); } return ret; } static int video_mcux_csi_get_caps(const struct device *dev, enum video_endpoint_id ep, struct video_caps *caps) { const struct video_mcux_csi_config *config = dev->config; int err = -ENODEV; if (ep != VIDEO_EP_OUT) { return -EINVAL; } /* Just forward to source dev for now */ if (config->source_dev) { err = video_get_caps(config->source_dev, ep, caps); #if defined(CONFIG_VIDEO_MCUX_MIPI_CSI2RX) /* * On i.MX RT11xx SoCs which have MIPI CSI-2 Rx, image data from the camera sensor * after passing through the pipeline (MIPI CSI-2 Rx --> Video Mux --> CSI) will be * implicitly converted to a 32-bits pixel format. For example, an input in RGB565 * or YUYV (2-bytes format) will become an XRGB32 or XYUV32 (4-bytes format) * respectively, at the output of the CSI. So, we change the pixel formats of the * source caps to reflect this. */ int ind = 0; while (caps->format_caps[ind].pixelformat) { ind++; } k_heap_free(&csi_heap, fmts); fmts = k_heap_alloc(&csi_heap, (ind + 1) * sizeof(struct video_format_cap), K_FOREVER); for (int i = 0; i <= ind; i++) { memcpy(&fmts[i], &caps->format_caps[i], sizeof(fmts[i])); if (fmts[i].pixelformat == VIDEO_PIX_FMT_RGB565) { fmts[i].pixelformat = VIDEO_PIX_FMT_XRGB32; } else if (fmts[i].pixelformat == VIDEO_PIX_FMT_YUYV) { fmts[i].pixelformat = VIDEO_PIX_FMT_XYUV32; } } caps->format_caps = fmts; #endif } /* NXP MCUX CSI request at least 2 buffer before starting */ caps->min_vbuf_count = 2; /* no source dev */ return err; } extern void CSI_DriverIRQHandler(void); static void video_mcux_csi_isr(const void *p) { ARG_UNUSED(p); CSI_DriverIRQHandler(); } static int video_mcux_csi_init(const struct device *dev) { const struct video_mcux_csi_config *config = dev->config; struct video_mcux_csi_data *data = dev->data; int err; k_fifo_init(&data->fifo_in); k_fifo_init(&data->fifo_out); CSI_GetDefaultConfig(&data->csi_config); /* check if there is any source device (video ctrl device) * the device is not yet initialized so we only check if it exists */ if (config->source_dev == NULL) { return -ENODEV; } err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } return 0; } #ifdef CONFIG_POLL static int video_mcux_csi_set_signal(const struct device *dev, enum video_endpoint_id ep, struct k_poll_signal *signal) { struct video_mcux_csi_data *data = dev->data; if (data->signal && signal != NULL) { return -EALREADY; } data->signal = signal; return 0; } #endif static const struct video_driver_api video_mcux_csi_driver_api = { .set_format = video_mcux_csi_set_fmt, .get_format = video_mcux_csi_get_fmt, .stream_start = video_mcux_csi_stream_start, .stream_stop = video_mcux_csi_stream_stop, .flush = video_mcux_csi_flush, .enqueue = video_mcux_csi_enqueue, .dequeue = video_mcux_csi_dequeue, .set_ctrl = video_mcux_csi_set_ctrl, .get_ctrl = video_mcux_csi_get_ctrl, .get_caps = video_mcux_csi_get_caps, #ifdef CONFIG_POLL .set_signal = video_mcux_csi_set_signal, #endif }; #if 1 /* Unique Instance */ PINCTRL_DT_INST_DEFINE(0); static const struct video_mcux_csi_config video_mcux_csi_config_0 = { .base = (CSI_Type *)DT_INST_REG_ADDR(0), .source_dev = DEVICE_DT_GET(DT_INST_PHANDLE(0, source)), .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), }; static struct video_mcux_csi_data video_mcux_csi_data_0; static int video_mcux_csi_init_0(const struct device *dev) { struct video_mcux_csi_data *data = dev->data; IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), video_mcux_csi_isr, NULL, 0); irq_enable(DT_INST_IRQN(0)); data->dev = dev; return video_mcux_csi_init(dev); } /* CONFIG_KERNEL_INIT_PRIORITY_DEVICE is used to make sure the * CSI peripheral is initialized before the camera, which is * necessary since the clock to the camera is provided by the * CSI peripheral. */ DEVICE_DT_INST_DEFINE(0, &video_mcux_csi_init_0, NULL, &video_mcux_csi_data_0, &video_mcux_csi_config_0, POST_KERNEL, CONFIG_VIDEO_MCUX_CSI_INIT_PRIORITY, &video_mcux_csi_driver_api); #endif ```
/content/code_sandbox/drivers/video/video_mcux_csi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,700
```objective-c /* util.h - Common helpers for Bluetooth drivers */ /* * */ static inline void bt_uart_drain(const struct device *dev) { uint8_t c; while (uart_fifo_read(dev, &c, 1)) { continue; } } ```
/content/code_sandbox/drivers/bluetooth/util.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
53
```unknown # Bluetooth LE driver configuration options # # Bluetooth options # # Controller support is an HCI driver in itself, so these HCI driver # options are only applicable if controller support hasn't been enabled. menuconfig BT_DRIVERS bool "Bluetooth drivers" default y depends on BT if BT_DRIVERS if BT_HCI source "drivers/bluetooth/hci/Kconfig" endif if BT_CUSTOM # Insert here any custom (non-HCI) offload drives endif endif # BT_DRIVERS ```
/content/code_sandbox/drivers/bluetooth/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
109
```c /* * an affiliate of Cypress Semiconductor Corporation * */ /** * @brief Zephyr CYW20829 driver. * * This driver uses btstack-integration asset as hosts platform adaptation layer * (porting layer) for CYW20829. btstack-integration layer implements/ * invokes the interfaces defined by BTSTACK to enable communication * with the BT controller by using IPC_BTSS (IPC Bluetooth sub-system interface). * Zephyr CYW20829 driver implements wiced_bt_**** functions requreds for * btstack-integration asset and Zephyr Bluetooth driver interface * (defined in struct bt_hci_driver). * * CM33 (application core) * |=========================================| * | |-------------------------| | * | | Zephyr application | | * | |-------------------------| | * | | | * | |------------| | * | | Zephyr | | * | | Bluetooth | | * CM33 (BTSS core) | | Host | | * |=====================| | |------------| | * | | | | | * | |---------------| | | |--------------| | -----------| | * | | Bluetooth | | IPC_BTSS | | btstack- | | Zephyr | | * | | Controller FW | | <--------|-> | integration | ---- | CYW20829 | | * | |---------------| | | | asset | | driver | | * | | | |--------------| |------------| | * |=====================| | | * | |=========================================| * |====================| * | CYW20829 | * | Bluetooth | * |====================| * * NOTE: * cyw920829 requires fetch binary files of Bluetooth controller firmware. * To fetch Binary Blobs: west blobs fetch hal_infineon * */ #include <errno.h> #include <stddef.h> #include <string.h> #include <zephyr/arch/cpu.h> #include <zephyr/bluetooth/bluetooth.h> #include <zephyr/bluetooth/hci.h> #include <zephyr/drivers/bluetooth.h> #include <zephyr/drivers/uart.h> #include <zephyr/init.h> #include <zephyr/sys/byteorder.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> #include <wiced_bt_stack_platform.h> #include <cybt_platform_config.h> #include <cybt_platform_trace.h> #include <cybt_platform_hci.h> #include <cybt_platform_task.h> #include <cyabs_rtos.h> #include <cybt_result.h> #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(cyw208xx); #define DT_DRV_COMPAT infineon_cyw208xx_hci struct cyw208xx_data { bt_hci_recv_t recv; }; enum { BT_HCI_VND_OP_DOWNLOAD_MINIDRIVER = 0xFC2E, BT_HCI_VND_OP_WRITE_RAM = 0xFC4C, BT_HCI_VND_OP_LAUNCH_RAM = 0xFC4E, BT_HCI_VND_OP_UPDATE_BAUDRATE = 0xFC18, }; /* Externs for CY43xxx controller FW */ extern const uint8_t brcm_patchram_buf[]; extern const int brcm_patch_ram_length; #define CYBSP_BT_PLATFORM_CFG_SLEEP_MODE_LP_ENABLED (1) static K_SEM_DEFINE(hci_sem, 1, 1); static K_SEM_DEFINE(cybt_platform_task_init_sem, 0, 1); /****************************************************************************** * Function Declarations ******************************************************************************/ extern void host_stack_platform_interface_init(void); extern void cybt_platform_hci_wait_for_boot_fully_up(bool is_from_isr); extern uint8_t *host_stack_get_acl_to_lower_buffer(wiced_bt_transport_t transport, uint32_t size); extern wiced_result_t host_stack_send_acl_to_lower(wiced_bt_transport_t transport, uint8_t *data, uint16_t len); extern wiced_result_t host_stack_send_cmd_to_lower(uint8_t *cmd, uint16_t cmd_len); extern wiced_result_t host_stack_send_iso_to_lower(uint8_t *data, uint16_t len); extern cybt_result_t cybt_platform_msg_to_bt_task(const uint16_t msg, bool is_from_isr); extern void cybt_bttask_deinit(void); static int cyw208xx_bt_firmware_download(const uint8_t *firmware_image, uint32_t size) { uint8_t *data = (uint8_t *)firmware_image; volatile uint32_t remaining_length = size; struct net_buf *buf; int err; LOG_DBG("Executing Fw downloading for CYW208xx device"); /* The firmware image (.hcd format) contains a collection of hci_write_ram * command + a block of the image, followed by a hci_write_ram image at the end. * Parse and send each individual command and wait for the response. This is to * ensure the integrity of the firmware image sent to the bluetooth chip. */ while (remaining_length) { size_t data_length = data[2]; /* data length from firmware image block */ uint16_t op_code = *(uint16_t *)data; /* Allocate buffer for hci_write_ram/hci_launch_ram command. */ buf = bt_hci_cmd_create(op_code, data_length); if (buf == NULL) { LOG_ERR("Unable to allocate command buffer"); return err; } /* Add data part of packet */ net_buf_add_mem(buf, &data[3], data_length); /* Send hci_write_ram command. */ err = bt_hci_cmd_send_sync(op_code, buf, NULL); if (err) { return err; } switch (op_code) { case BT_HCI_VND_OP_WRITE_RAM: /* Update remaining length and data pointer: * content of data length + 2 bytes of opcode and 1 byte of data length. */ data += data_length + 3; remaining_length -= data_length + 3; break; case BT_HCI_VND_OP_LAUNCH_RAM: remaining_length = 0; break; default: return -ENOMEM; } } LOG_DBG("Fw downloading complete"); return 0; } static int cyw208xx_setup(const struct device *dev, const struct bt_hci_setup_params *params) { ARG_UNUSED(dev); ARG_UNUSED(params); int err; /* Send HCI_RESET */ err = bt_hci_cmd_send_sync(BT_HCI_OP_RESET, NULL, NULL); if (err) { return err; } /* BT firmware download */ err = cyw208xx_bt_firmware_download(brcm_patchram_buf, (uint32_t)brcm_patch_ram_length); if (err) { return err; } /* Waiting when BLE up after firmware launch */ cybt_platform_hci_wait_for_boot_fully_up(false); return 0; } static int cyw208xx_open(const struct device *dev, bt_hci_recv_t recv) { int err; struct cyw208xx_data *hci = dev->data; hci->recv = recv; /* Initialize Bluetooth platform related OS tasks. */ err = cybt_platform_task_init((void *)NULL); if (err) { return err; } /* Wait until cybt platform task starts */ k_sem_take(&cybt_platform_task_init_sem, K_FOREVER); return 0; } static int cyw208xx_close(const struct device *dev) { struct cyw208xx_data *hci = dev->data; /* Send SHUTDOWN event, BT task will release resources and tervinate task */ cybt_platform_msg_to_bt_task(BT_EVT_TASK_SHUTDOWN, false); cybt_bttask_deinit(); k_sem_reset(&cybt_platform_task_init_sem); hci->recv = NULL; return 0; } static int cyw208xx_send(const struct device *dev, struct net_buf *buf) { ARG_UNUSED(dev); int ret = 0; k_sem_take(&hci_sem, K_FOREVER); LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len); switch (bt_buf_get_type(buf)) { case BT_BUF_ACL_OUT: uint8_t *bt_msg = host_stack_get_acl_to_lower_buffer(BT_TRANSPORT_LE, buf->len); memcpy(bt_msg, buf->data, buf->len); ret = host_stack_send_acl_to_lower(BT_TRANSPORT_LE, bt_msg, buf->len); break; case BT_BUF_CMD: ret = host_stack_send_cmd_to_lower(buf->data, buf->len); break; case BT_BUF_ISO_OUT: ret = host_stack_send_iso_to_lower(buf->data, buf->len); break; default: LOG_ERR("Unknown type %u", bt_buf_get_type(buf)); ret = EIO; goto done; } LOG_HEXDUMP_DBG(buf->data, buf->len, "Final HCI buffer:"); if (ret) { LOG_ERR("SPI write error %d", ret); } done: k_sem_give(&hci_sem); net_buf_unref(buf); return ret ? -EIO : 0; } static const struct bt_hci_driver_api drv = { .open = cyw208xx_open, .close = cyw208xx_close, .send = cyw208xx_send, .setup = cyw208xx_setup }; static int cyw208xx_hci_init(const struct device *dev) { ARG_UNUSED(dev); const cybt_platform_config_t cybsp_bt_platform_cfg = { .hci_config = { .hci_transport = CYBT_HCI_IPC, }, .controller_config = { .sleep_mode = { .sleep_mode_enabled = CYBSP_BT_PLATFORM_CFG_SLEEP_MODE_LP_ENABLED, }, } }; /* Configure platform specific settings for the BT device */ cybt_platform_config_init(&cybsp_bt_platform_cfg); return 0; } /* Implements wiced_bt_**** functions requreds for the btstack-integration asset */ wiced_result_t wiced_bt_dev_vendor_specific_command(uint16_t opcode, uint8_t param_len, uint8_t *param_buf, wiced_bt_dev_vendor_specific_command_complete_cback_t cback) { /* * This function is using only by btstack-integration asset * for enable LPM. */ struct net_buf *buf = NULL; /* Allocate a HCI command buffer */ buf = bt_hci_cmd_create(opcode, param_len); if (!buf) { LOG_ERR("Unable to allocate buffer"); return WICED_NO_MEMORY; } /* Add data part of packet */ net_buf_add_mem(buf, param_buf, param_len); bt_hci_cmd_send(opcode, buf); return WICED_BT_SUCCESS; } void wiced_bt_process_hci(hci_packet_type_t pti, uint8_t *data, uint32_t length) { const struct device *dev = DEVICE_DT_GET(DT_DRV_INST(0)); struct cyw208xx_data *hci = dev->data; struct net_buf *buf = NULL; size_t buf_tailroom = 0; switch (pti) { case HCI_PACKET_TYPE_EVENT: buf = bt_buf_get_evt(data[0], 0, K_NO_WAIT); if (!buf) { LOG_ERR("Failed to allocate the buffer for RX: EVENT "); return; } break; case HCI_PACKET_TYPE_ACL: buf = bt_buf_get_rx(BT_BUF_ACL_IN, K_NO_WAIT); if (!buf) { LOG_ERR("Failed to allocate the buffer for RX: ACL "); return; } bt_buf_set_type(buf, BT_BUF_ACL_IN); break; case HCI_PACKET_TYPE_SCO: /* NA */ break; case HCI_PACKET_TYPE_ISO: buf = bt_buf_get_rx(BT_BUF_ISO_IN, K_NO_WAIT); if (!buf) { LOG_ERR("Failed to allocate the buffer for RX: ISO "); return; } break; default: return; } buf_tailroom = net_buf_tailroom(buf); if (buf_tailroom < length) { LOG_WRN("Not enough space for rx data"); return; } net_buf_add_mem(buf, data, length); /* Provide the buffer to the host */ hci->recv(dev, buf); } void wiced_bt_process_hci_events(uint8_t *data, uint32_t length) { wiced_bt_process_hci(HCI_PACKET_TYPE_EVENT, data, length); } void wiced_bt_process_acl_data(uint8_t *data, uint32_t length) { wiced_bt_process_hci(HCI_PACKET_TYPE_ACL, data, length); } void wiced_bt_process_isoc_data(uint8_t *data, uint32_t length) { wiced_bt_process_hci(HCI_PACKET_TYPE_ISO, data, length); } void wiced_bt_stack_init_internal(wiced_bt_management_cback_t mgmt_cback, wiced_bt_internal_post_stack_init_cb post_stack_cb, wiced_bt_internal_stack_evt_handler_cb evt_handler_cb) { k_sem_give(&cybt_platform_task_init_sem); } /* Keep below empty functions, used in the btstack_integration assets for Wiced BT stack. */ void wiced_bt_stack_indicate_lower_tx_complete(void) { /* NA for Zephyr */ } void wiced_bt_stack_shutdown(void) { /* NA for Zephyr */ } void wiced_bt_process_timer(void) { /* NA for Zephyr */ } #define CYW208XX_DEVICE_INIT(inst) \ static struct cyw208xx_data cyw208xx_data_##inst = { \ }; \ DEVICE_DT_INST_DEFINE(inst, cyw208xx_hci_init, NULL, &cyw208xx_data_##inst, NULL, \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &drv) /* Only one instance supported */ CYW208XX_DEVICE_INIT(0) ```
/content/code_sandbox/drivers/bluetooth/hci/hci_ifx_cyw208xx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,135
```unknown # # # config HCI_NXP_ENABLE_AUTO_SLEEP bool "BLE Controller auto sleep mode" help If enabled, the Controller auto sleep mode will be configured and enabled during HCI init. Auto sleep mode means the Controller will handle its low power state automatically. Enabling this feature will allow to save power at the cost of some latency when sending a HCI message to the Controller as the Host will need to wake it up. config HCI_NXP_SET_CAL_DATA bool "BLE Controller calibration data" help If enabled, the Host will send calibration data to the BLE Controller during HCI init. config HCI_NXP_SET_CAL_DATA_ANNEX100 bool "BLE Controller calibration data annex 100" help If enabled, the Host will send calibration data annex 100 to the BLE Controller during HCI init. if BT_H4_NXP_CTLR config BT_NXP_NW612 bool "NXP IW612 Chipset" help NXP IW612 Chipset supports Wi-Fi? 802.11a/b/g/n/ac/ax + Bluetooth? 5.3 BR/EDR/LE + IEEE802.1.5.4 up to 601 Mbps data rate on Wi-Fi? and 2Mbps data rate on Bluetooth?. 4-wire UART@3M baud is supported. PCM for audio is also supported. Details of the module could be fond on path_to_url wireless-connectivity/wi-fi-plus-bluetooth-plus-802-15-4/2-4-5-ghz- your_sha256_hashtri- radio-solution:IW612. endif # BT_H4_NXP_CTLR ```
/content/code_sandbox/drivers/bluetooth/hci/Kconfig.nxp
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
355
```c /* * */ #define DT_DRV_COMPAT ovti_ov7670 #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/video.h> #define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ov7670); /* Initialization register structure */ struct ov7670_reg { uint8_t reg; uint8_t cmd; }; struct ov7670_config { struct i2c_dt_spec bus; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) struct gpio_dt_spec reset; #endif #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(pwdn_gpios) struct gpio_dt_spec pwdn; #endif }; struct ov7670_data { struct video_format fmt; }; /* OV7670 registers */ #define OV7670_PID 0x0A #define OV7670_COM7 0x12 #define OV7670_MVFP 0x1E #define OV7670_COM10 0x15 #define OV7670_COM12 0x3C #define OV7670_BRIGHT 0x55 #define OV7670_CLKRC 0x11 #define OV7670_SCALING_PCLK_DIV 0x73 #define OV7670_COM14 0x3E #define OV7670_DBLV 0x6B #define OV7670_SCALING_XSC 0x70 #define OV7670_SCALING_YSC 0x71 #define OV7670_COM2 0x09 #define OV7670_SCALING_PCLK_DELAY 0xA2 #define OV7670_BD50MAX 0xA5 #define OV7670_BD60MAX 0xAB #define OV7670_HAECC7 0xAA #define OV7670_COM3 0x0C #define OV7670_COM4 0x0D #define OV7670_COM6 0x0F #define OV7670_COM11 0x3B #define OV7670_EDGE 0x3F #define OV7670_DNSTH 0x4C #define OV7670_DM_LNL 0x92 #define OV7670_DM_LNH 0x93 #define OV7670_COM15 0x40 #define OV7670_TSLB 0x3A #define OV7670_COM13 0x3D #define OV7670_MANU 0x67 #define OV7670_MANV 0x68 #define OV7670_HSTART 0x17 #define OV7670_HSTOP 0x18 #define OV7670_VSTRT 0x19 #define OV7670_VSTOP 0x1A #define OV7670_HREF 0x32 #define OV7670_VREF 0x03 #define OV7670_SCALING_DCWCTR 0x72 #define OV7670_GAIN 0x00 #define OV7670_AECHH 0x07 #define OV7670_AECH 0x10 #define OV7670_COM8 0x13 #define OV7670_COM9 0x14 #define OV7670_AEW 0x24 #define OV7670_AEB 0x25 #define OV7670_VPT 0x26 #define OV7670_AWBC1 0x43 #define OV7670_AWBC2 0x44 #define OV7670_AWBC3 0x45 #define OV7670_AWBC4 0x46 #define OV7670_AWBC5 0x47 #define OV7670_AWBC6 0x48 #define OV7670_MTX1 0x4F #define OV7670_MTX2 0x50 #define OV7670_MTX3 0x51 #define OV7670_MTX4 0x52 #define OV7670_MTX5 0x53 #define OV7670_MTX6 0x54 #define OV7670_LCC1 0x62 #define OV7670_LCC2 0x63 #define OV7670_LCC3 0x64 #define OV7670_LCC4 0x65 #define OV7670_LCC5 0x66 #define OV7670_LCC6 0x94 #define OV7670_LCC7 0x95 #define OV7670_SLOP 0x7A #define OV7670_GAM1 0x7B #define OV7670_GAM2 0x7C #define OV7670_GAM3 0x7D #define OV7670_GAM4 0x7E #define OV7670_GAM5 0x7F #define OV7670_GAM6 0x80 #define OV7670_GAM7 0x81 #define OV7670_GAM8 0x82 #define OV7670_GAM9 0x83 #define OV7670_GAM10 0x84 #define OV7670_GAM11 0x85 #define OV7670_GAM12 0x86 #define OV7670_GAM13 0x87 #define OV7670_GAM14 0x88 #define OV7670_GAM15 0x89 #define OV7670_HAECC1 0x9F #define OV7670_HAECC2 0xA0 #define OV7670_HSYEN 0x31 #define OV7670_HAECC3 0xA6 #define OV7670_HAECC4 0xA7 #define OV7670_HAECC5 0xA8 #define OV7670_HAECC6 0xA9 /* OV7670 definitions */ #define OV7670_PROD_ID 0x76 #define OV7670_VIDEO_FORMAT_CAP(width, height, format) \ { \ .pixelformat = (format), .width_min = (width), .width_max = (width), \ .height_min = (height), .height_max = (height), .width_step = 0, .height_step = 0 \ } static const struct video_format_cap fmts[] = { OV7670_VIDEO_FORMAT_CAP(176, 144, VIDEO_PIX_FMT_RGB565), /* QCIF */ OV7670_VIDEO_FORMAT_CAP(320, 240, VIDEO_PIX_FMT_RGB565), /* QVGA */ OV7670_VIDEO_FORMAT_CAP(352, 288, VIDEO_PIX_FMT_RGB565), /* CIF */ OV7670_VIDEO_FORMAT_CAP(640, 480, VIDEO_PIX_FMT_RGB565), /* VGA */ OV7670_VIDEO_FORMAT_CAP(176, 144, VIDEO_PIX_FMT_YUYV), /* QCIF */ OV7670_VIDEO_FORMAT_CAP(320, 240, VIDEO_PIX_FMT_YUYV), /* QVGA */ OV7670_VIDEO_FORMAT_CAP(352, 288, VIDEO_PIX_FMT_YUYV), /* CIF */ OV7670_VIDEO_FORMAT_CAP(640, 480, VIDEO_PIX_FMT_YUYV), /* VGA */ {0}}; /* This initialization table is based on the MCUX SDK driver for the OV7670 */ static const struct ov7670_reg ov7670_init_regtbl[] = { {OV7670_MVFP, 0x20}, /* MVFP: Mirror/VFlip,Normal image */ /* configure the output timing */ /* PCLK does not toggle during horizontal blank, one PCLK, one pixel */ {OV7670_COM10, 0x20}, /* COM10 */ {OV7670_COM12, 0x00}, /* COM12,No HREF when VSYNC is low */ /* Brightness Control, with signal -128 to +128, 0x00 is middle value */ {OV7670_BRIGHT, 0x2f}, /* Internal clock pre-scalar,F(internal clock) = F(input clock)/(Bit[5:0]+1) */ {OV7670_CLKRC, 0x81}, /* Clock Div, Input/(n+1), bit6 set to 1 to disable divider */ /* SCALING_PCLK_DIV, */ {OV7670_SCALING_PCLK_DIV, 0x00}, /* 0: Enable clock divider,010: Divided by 4 */ /* Common Control 14,Bit[4]: DCW and scaling PCLK enable,Bit[3]: Manual scaling */ {OV7670_COM14, 0x00}, /* DBLV,Bit[7:6]: PLL control */ /* 0:Bypass PLL.,40: Input clock x4 , 80: Input clock x6 ,C0: Input clock x8 */ {OV7670_DBLV, 0x40}, /* test pattern, useful in some case */ {OV7670_SCALING_XSC, 0x0}, {OV7670_SCALING_YSC, 0}, /* Output Drive Capability */ {OV7670_COM2, 0x00}, /* Common Control 2, Output Drive Capability: 1x */ {OV7670_SCALING_PCLK_DELAY, 0x02}, {OV7670_BD50MAX, 0x05}, {OV7670_BD60MAX, 0x07}, {OV7670_HAECC7, 0x94}, {OV7670_COM3, 0x00}, {OV7670_COM4, 0x00}, {OV7670_COM6, 0x4b}, {OV7670_COM11, 0x9F}, /* Night mode */ {OV7670_EDGE, 0x04}, /* Edge Enhancement Adjustment */ {OV7670_DNSTH, 0x00}, /* De-noise Strength */ {OV7670_DM_LNL, 0x00}, {OV7670_DM_LNH, 0x00}, /* reserved */ {0x16, 0x02}, {0x21, 0x02}, {0x22, 0x91}, {0x29, 0x07}, {0x35, 0x0b}, {0x33, 0x0b}, {0x37, 0x1d}, {0x38, 0x71}, {0x39, 0x2a}, {0x0e, 0x61}, {0x56, 0x40}, {0x57, 0x80}, {0x69, 0x00}, {0x74, 0x19}, /* display , need retain */ {OV7670_COM15, 0xD0}, /* Common Control 15 */ {OV7670_TSLB, 0x0C}, /* Line Buffer Test Option */ {OV7670_COM13, 0x80}, /* Common Control 13 */ {OV7670_MANU, 0x11}, /* Manual U Value */ {OV7670_MANV, 0xFF}, /* Manual V Value */ /* config the output window data, this can be configed later */ {OV7670_HSTART, 0x16}, /* HSTART */ {OV7670_HSTOP, 0x04}, /* HSTOP */ {OV7670_VSTRT, 0x02}, /* VSTRT */ {OV7670_VSTOP, 0x7a}, /* VSTOP */ {OV7670_HREF, 0x80}, /* HREF */ {OV7670_VREF, 0x0a}, /* VREF */ /* DCW Control, */ {OV7670_SCALING_DCWCTR, 0x11}, /* AGC/AEC - Automatic Gain Control/Automatic exposure Control */ {OV7670_GAIN, 0x00}, /* AGC */ {OV7670_AECHH, 0x3F}, /* Exposure Value */ {OV7670_AECH, 0xFF}, {OV7670_COM8, 0x66}, {OV7670_COM9, 0x21}, /* limit the max gain */ {OV7670_AEW, 0x75}, {OV7670_AEB, 0x63}, {OV7670_VPT, 0xA5}, /* Automatic white balance control */ {OV7670_AWBC1, 0x14}, {OV7670_AWBC2, 0xf0}, {OV7670_AWBC3, 0x34}, {OV7670_AWBC4, 0x58}, {OV7670_AWBC5, 0x28}, {OV7670_AWBC6, 0x3a}, /* Matrix Coefficient */ {OV7670_MTX1, 0x80}, {OV7670_MTX2, 0x80}, {OV7670_MTX3, 0x00}, {OV7670_MTX4, 0x22}, {OV7670_MTX5, 0x5e}, {OV7670_MTX6, 0x80}, /* AWB Control */ {0x59, 0x88}, {0x5a, 0x88}, {0x5b, 0x44}, {0x5c, 0x67}, {0x5d, 0x49}, {0x5e, 0x0e}, {0x6c, 0x0a}, {0x6d, 0x55}, {0x6e, 0x11}, {0x6f, 0x9f}, /* Lens Correction Option */ {OV7670_LCC1, 0x00}, {OV7670_LCC2, 0x00}, {OV7670_LCC3, 0x04}, {OV7670_LCC4, 0x20}, {OV7670_LCC5, 0x05}, {OV7670_LCC6, 0x04}, /* effective only when LCC5[2] is high */ {OV7670_LCC7, 0x08}, /* effective only when LCC5[2] is high */ /* Gamma Curve, needn't config */ {OV7670_SLOP, 0x20}, {OV7670_GAM1, 0x1c}, {OV7670_GAM2, 0x28}, {OV7670_GAM3, 0x3c}, {OV7670_GAM4, 0x55}, {OV7670_GAM5, 0x68}, {OV7670_GAM6, 0x76}, {OV7670_GAM7, 0x80}, {OV7670_GAM8, 0x88}, {OV7670_GAM9, 0x8f}, {OV7670_GAM10, 0x96}, {OV7670_GAM11, 0xa3}, {OV7670_GAM12, 0xaf}, {OV7670_GAM13, 0xc4}, {OV7670_GAM14, 0xd7}, {OV7670_GAM15, 0xe8}, /* Histogram-based AEC/AGC Control */ {OV7670_HAECC1, 0x78}, {OV7670_HAECC2, 0x68}, {OV7670_HSYEN, 0xff}, {0xa1, 0x03}, {OV7670_HAECC3, 0xdf}, {OV7670_HAECC4, 0xdf}, {OV7670_HAECC5, 0xf0}, {OV7670_HAECC6, 0x90}, /* Automatic black Level Compensation */ {0xb0, 0x84}, {0xb1, 0x0c}, {0xb2, 0x0e}, {0xb3, 0x82}, {0xb8, 0x0a}, }; static int ov7670_get_caps(const struct device *dev, enum video_endpoint_id ep, struct video_caps *caps) { caps->format_caps = fmts; return 0; } static int ov7670_set_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { const struct ov7670_config *config = dev->config; struct ov7670_data *data = dev->data; uint8_t com7 = 0U; uint8_t i = 0U; if (fmt->pixelformat != VIDEO_PIX_FMT_RGB565 && fmt->pixelformat != VIDEO_PIX_FMT_YUYV) { LOG_ERR("Only RGB565 and YUYV supported!"); return -ENOTSUP; } if (!memcmp(&data->fmt, fmt, sizeof(data->fmt))) { /* nothing to do */ return 0; } memcpy(&data->fmt, fmt, sizeof(data->fmt)); if (fmt->pixelformat == VIDEO_PIX_FMT_RGB565) { com7 |= 0x4; } /* Set output resolution */ while (fmts[i].pixelformat) { if (fmts[i].width_min == fmt->width && fmts[i].height_min == fmt->height && fmts[i].pixelformat == fmt->pixelformat) { /* Set output format */ switch (fmts[i].width_min) { case 176: /* QCIF */ com7 |= BIT(3); break; case 320: /* QVGA */ com7 |= BIT(4); break; case 352: /* CIF */ com7 |= BIT(5); break; default: /* VGA */ break; } /* Program COM7 to set format */ return i2c_reg_write_byte_dt(&config->bus, OV7670_COM7, com7); } i++; } LOG_ERR("Unsupported format"); return -ENOTSUP; } static int ov7670_get_fmt(const struct device *dev, enum video_endpoint_id ep, struct video_format *fmt) { struct ov7670_data *data = dev->data; if (fmt == NULL) { return -EINVAL; } memcpy(fmt, &data->fmt, sizeof(data->fmt)); return 0; } static int ov7670_init(const struct device *dev) { const struct ov7670_config *config = dev->config; int ret, i; uint8_t pid; struct video_format fmt; const struct ov7670_reg *reg; if (!i2c_is_ready_dt(&config->bus)) { /* I2C device is not ready, return */ return -ENODEV; } #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(pwdn_gpios) /* Power up camera module */ if (config->pwdn.port != NULL) { if (!gpio_is_ready_dt(&config->pwdn)) { return -ENODEV; } ret = gpio_pin_configure_dt(&config->pwdn, GPIO_OUTPUT_INACTIVE); if (ret < 0) { LOG_ERR("Could not clear power down pin: %d", ret); return ret; } } #endif #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) /* Reset camera module */ if (config->reset.port != NULL) { if (!gpio_is_ready_dt(&config->reset)) { return -ENODEV; } ret = gpio_pin_configure_dt(&config->reset, GPIO_OUTPUT); if (ret < 0) { LOG_ERR("Could not set reset pin: %d", ret); return ret; } /* Reset is active low, has 1ms settling time*/ gpio_pin_set_dt(&config->reset, 0); k_msleep(1); gpio_pin_set_dt(&config->reset, 1); k_msleep(1); } #endif /* * Read product ID from camera. This camera implements the SCCB, * spec- which *should* be I2C compatible, but in practice does * not seem to respond when I2C repeated start commands are used. * To work around this, use a write then a read to interface with * registers. */ uint8_t cmd = OV7670_PID; ret = i2c_write_dt(&config->bus, &cmd, sizeof(cmd)); if (ret < 0) { LOG_ERR("Could not request product ID: %d", ret); return ret; } ret = i2c_read_dt(&config->bus, &pid, sizeof(pid)); if (ret < 0) { LOG_ERR("Could not read product ID: %d", ret); return ret; } if (pid != OV7670_PROD_ID) { LOG_ERR("Incorrect product ID: 0x%02X", pid); return -ENODEV; } /* Set default camera format (QVGA, YUYV) */ fmt.pixelformat = VIDEO_PIX_FMT_YUYV; fmt.width = 640; fmt.height = 480; fmt.pitch = fmt.width * 2; ret = ov7670_set_fmt(dev, VIDEO_EP_OUT, &fmt); if (ret < 0) { return ret; } /* Write initialization values to OV7670 */ for (i = 0; i < ARRAY_SIZE(ov7670_init_regtbl); i++) { reg = &ov7670_init_regtbl[i]; ret = i2c_reg_write_byte_dt(&config->bus, reg->reg, reg->cmd); if (ret < 0) { return ret; } } return 0; } static const struct video_driver_api ov7670_api = { .set_format = ov7670_set_fmt, .get_format = ov7670_get_fmt, .get_caps = ov7670_get_caps, }; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) #define OV7670_RESET_GPIO(inst) .reset = GPIO_DT_SPEC_INST_GET_OR(inst, reset_gpios, {}), #else #define OV7670_RESET_GPIO(inst) #endif #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(pwdn_gpios) #define OV7670_PWDN_GPIO(inst) .pwdn = GPIO_DT_SPEC_INST_GET_OR(inst, pwdn_gpios, {}), #else #define OV7670_PWDN_GPIO(inst) #endif #define OV7670_INIT(inst) \ const struct ov7670_config ov7670_config_##inst = {.bus = I2C_DT_SPEC_INST_GET(inst), \ OV7670_RESET_GPIO(inst) \ OV7670_PWDN_GPIO(inst)}; \ struct ov7670_data ov7670_data_##inst; \ \ DEVICE_DT_INST_DEFINE(inst, ov7670_init, NULL, &ov7670_data_##inst, &ov7670_config_##inst, \ POST_KERNEL, CONFIG_VIDEO_INIT_PRIORITY, &ov7670_api); DT_INST_FOREACH_STATUS_OKAY(OV7670_INIT) ```
/content/code_sandbox/drivers/video/ov7670.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,207
```c /* * */ #include <zephyr/drivers/bluetooth.h> #include <sl_btctrl_linklayer.h> #include <sl_hci_common_transport.h> #include <pa_conversions_efr32.h> #include <sl_bt_ll_zephyr.h> #include <rail.h> #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(bt_hci_driver_slz); #define DT_DRV_COMPAT silabs_bt_hci struct hci_data { bt_hci_recv_t recv; }; #define SL_BT_CONFIG_ACCEPT_LIST_SIZE 1 #define SL_BT_CONFIG_MAX_CONNECTIONS 1 #define SL_BT_CONFIG_USER_ADVERTISERS 1 #define SL_BT_CONTROLLER_BUFFER_MEMORY CONFIG_BT_SILABS_HCI_BUFFER_MEMORY #define SL_BT_CONTROLLER_LE_BUFFER_SIZE_MAX CONFIG_BT_BUF_ACL_TX_COUNT #define SL_BT_CONTROLLER_COMPLETED_PACKETS_THRESHOLD 1 #define SL_BT_CONTROLLER_COMPLETED_PACKETS_EVENTS_TIMEOUT 3 #define SL_BT_SILABS_LL_STACK_SIZE 1024 static K_KERNEL_STACK_DEFINE(slz_ll_stack, SL_BT_SILABS_LL_STACK_SIZE); static struct k_thread slz_ll_thread; void rail_isr_installer(void) { #ifdef CONFIG_SOC_SERIES_EFR32MG24 IRQ_CONNECT(SYNTH_IRQn, 0, SYNTH_IRQHandler, NULL, 0); #else IRQ_CONNECT(RDMAILBOX_IRQn, 0, RDMAILBOX_IRQHandler, NULL, 0); #endif IRQ_CONNECT(RAC_SEQ_IRQn, 0, RAC_SEQ_IRQHandler, NULL, 0); IRQ_CONNECT(RAC_RSM_IRQn, 0, RAC_RSM_IRQHandler, NULL, 0); IRQ_CONNECT(PROTIMER_IRQn, 0, PROTIMER_IRQHandler, NULL, 0); IRQ_CONNECT(MODEM_IRQn, 0, MODEM_IRQHandler, NULL, 0); IRQ_CONNECT(FRC_IRQn, 0, FRC_IRQHandler, NULL, 0); IRQ_CONNECT(BUFC_IRQn, 0, BUFC_IRQHandler, NULL, 0); IRQ_CONNECT(AGC_IRQn, 0, AGC_IRQHandler, NULL, 0); } /** * @brief Transmit HCI message using the currently used transport layer. * The HCI calls this function to transmit a full HCI message. * @param[in] data Packet type followed by HCI packet data. * @param[in] len Length of the `data` parameter * @return 0 - on success, or non-zero on failure. */ uint32_t hci_common_transport_transmit(uint8_t *data, int16_t len) { const struct device *dev = DEVICE_DT_GET(DT_DRV_INST(0)); struct hci_data *hci = dev->data; struct net_buf *buf; uint8_t packet_type = data[0]; uint8_t event_code; LOG_HEXDUMP_DBG(data, len, "host packet data:"); /* drop packet type from the frame buffer - it is no longer needed */ data = &data[1]; len -= 1; switch (packet_type) { case h4_event: event_code = data[0]; buf = bt_buf_get_evt(event_code, false, K_FOREVER); break; case h4_acl: buf = bt_buf_get_rx(BT_BUF_ACL_IN, K_FOREVER); break; default: LOG_ERR("Unknown HCI type: %d", packet_type); return -EINVAL; } net_buf_add_mem(buf, data, len); hci->recv(dev, buf); sl_btctrl_hci_transmit_complete(0); return 0; } static int slz_bt_send(const struct device *dev, struct net_buf *buf) { int rv = 0; ARG_UNUSED(dev); switch (bt_buf_get_type(buf)) { case BT_BUF_ACL_OUT: net_buf_push_u8(buf, h4_acl); break; case BT_BUF_CMD: net_buf_push_u8(buf, h4_command); break; default: rv = -EINVAL; goto done; } rv = hci_common_transport_receive(buf->data, buf->len, true); if (!rv) { goto done; } done: net_buf_unref(buf); return rv; } static void slz_thread_func(void *p1, void *p2, void *p3) { ARG_UNUSED(p1); ARG_UNUSED(p2); ARG_UNUSED(p3); slz_ll_thread_func(); } static int slz_bt_open(const struct device *dev, bt_hci_recv_t recv) { struct hci_data *hci = dev->data; int ret; /* Start RX thread */ k_thread_create(&slz_ll_thread, slz_ll_stack, K_KERNEL_STACK_SIZEOF(slz_ll_stack), slz_thread_func, NULL, NULL, NULL, K_PRIO_COOP(CONFIG_BT_DRIVER_RX_HIGH_PRIO), 0, K_NO_WAIT); rail_isr_installer(); sl_rail_util_pa_init(); /* sl_btctrl_init_mem returns the number of memory buffers allocated */ ret = sl_btctrl_init_mem(SL_BT_CONTROLLER_BUFFER_MEMORY); if (!ret) { LOG_ERR("Failed to allocate memory %d", ret); return -ENOMEM; } sl_btctrl_configure_le_buffer_size(SL_BT_CONTROLLER_LE_BUFFER_SIZE_MAX); ret = sl_btctrl_init_ll(); if (ret) { LOG_ERR("Bluetooth link layer init failed %d", ret); goto deinit; } sl_btctrl_init_adv(); sl_btctrl_init_scan(); sl_btctrl_init_conn(); sl_btctrl_init_adv_ext(); sl_btctrl_init_scan_ext(); ret = sl_btctrl_init_basic(SL_BT_CONFIG_MAX_CONNECTIONS, SL_BT_CONFIG_USER_ADVERTISERS, SL_BT_CONFIG_ACCEPT_LIST_SIZE); if (ret) { LOG_ERR("Failed to initialize the controller %d", ret); goto deinit; } sl_btctrl_configure_completed_packets_reporting( SL_BT_CONTROLLER_COMPLETED_PACKETS_THRESHOLD, SL_BT_CONTROLLER_COMPLETED_PACKETS_EVENTS_TIMEOUT); sl_bthci_init_upper(); sl_btctrl_hci_parser_init_default(); sl_btctrl_hci_parser_init_conn(); sl_btctrl_hci_parser_init_adv(); sl_btctrl_hci_parser_init_phy(); #ifdef CONFIG_PM { RAIL_Status_t status = RAIL_InitPowerManager(); if (status != RAIL_STATUS_NO_ERROR) { LOG_ERR("RAIL: failed to initialize power management, status=%d", status); ret = -EIO; goto deinit; } } #endif hci->recv = recv; LOG_DBG("SiLabs BT HCI started"); return 0; deinit: sli_btctrl_deinit_mem(); return ret; } static const struct bt_hci_driver_api drv = { .open = slz_bt_open, .send = slz_bt_send, }; #define HCI_DEVICE_INIT(inst) \ static struct hci_data hci_data_##inst = { \ }; \ DEVICE_DT_INST_DEFINE(inst, NULL, NULL, &hci_data_##inst, NULL, \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &drv) /* Only one instance supported right now */ HCI_DEVICE_INIT(0) ```
/content/code_sandbox/drivers/bluetooth/hci/slz_hci.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,524
```c /* userchan.c - HCI User Channel based Bluetooth driver */ /* */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/sys/util.h> #include <errno.h> #include <stddef.h> #include <stdlib.h> #include <poll.h> #include <errno.h> #include <sys/socket.h> #include <string.h> #include <unistd.h> #include <stdio.h> #include <limits.h> #include <netinet/in.h> #include <arpa/inet.h> #include <zephyr/sys/byteorder.h> #include "soc.h" #include "cmdline.h" /* native_posix command line options header */ #include <zephyr/bluetooth/bluetooth.h> #include <zephyr/bluetooth/hci.h> #include <zephyr/drivers/bluetooth.h> #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(bt_driver); #define DT_DRV_COMPAT zephyr_bt_hci_userchan struct uc_data { int fd; bt_hci_recv_t recv; }; #define BTPROTO_HCI 1 struct sockaddr_hci { sa_family_t hci_family; unsigned short hci_dev; unsigned short hci_channel; }; #define HCI_CHANNEL_USER 1 #define SOL_HCI 0 static K_KERNEL_STACK_DEFINE(rx_thread_stack, CONFIG_ARCH_POSIX_RECOMMENDED_STACK_SIZE); static struct k_thread rx_thread_data; static unsigned short bt_dev_index; #define TCP_ADDR_BUFF_SIZE 16 static bool hci_socket; static char ip_addr[TCP_ADDR_BUFF_SIZE]; static unsigned int port; static bool arg_found; static struct net_buf *get_rx(const uint8_t *buf) { bool discardable = false; k_timeout_t timeout = K_FOREVER; switch (buf[0]) { case BT_HCI_H4_EVT: if (buf[1] == BT_HCI_EVT_LE_META_EVENT && (buf[3] == BT_HCI_EVT_LE_ADVERTISING_REPORT)) { discardable = true; timeout = K_NO_WAIT; } return bt_buf_get_evt(buf[1], discardable, timeout); case BT_HCI_H4_ACL: return bt_buf_get_rx(BT_BUF_ACL_IN, K_FOREVER); case BT_HCI_H4_ISO: if (IS_ENABLED(CONFIG_BT_ISO)) { return bt_buf_get_rx(BT_BUF_ISO_IN, K_FOREVER); } __fallthrough; default: LOG_ERR("Unknown packet type: %u", buf[0]); } return NULL; } /** * @brief Decode the length of an HCI H4 packet and check it's complete * @details Decodes packet length according to Bluetooth spec v5.4 Vol 4 Part E * @param buf Pointer to a HCI packet buffer * @param buf_len Bytes available in the buffer * @return Length of the complete HCI packet in bytes, -1 if cannot find an HCI * packet, 0 if more data required. */ static int32_t hci_packet_complete(const uint8_t *buf, uint16_t buf_len) { uint16_t payload_len = 0; const uint8_t type = buf[0]; uint8_t header_len = sizeof(type); const uint8_t *hdr = &buf[sizeof(type)]; switch (type) { case BT_HCI_H4_CMD: { const struct bt_hci_cmd_hdr *cmd = (const struct bt_hci_cmd_hdr *)hdr; /* Parameter Total Length */ payload_len = cmd->param_len; header_len += BT_HCI_CMD_HDR_SIZE; break; } case BT_HCI_H4_ACL: { const struct bt_hci_acl_hdr *acl = (const struct bt_hci_acl_hdr *)hdr; /* Data Total Length */ payload_len = sys_le16_to_cpu(acl->len); header_len += BT_HCI_ACL_HDR_SIZE; break; } case BT_HCI_H4_SCO: { const struct bt_hci_sco_hdr *sco = (const struct bt_hci_sco_hdr *)hdr; /* Data_Total_Length */ payload_len = sco->len; header_len += BT_HCI_SCO_HDR_SIZE; break; } case BT_HCI_H4_EVT: { const struct bt_hci_evt_hdr *evt = (const struct bt_hci_evt_hdr *)hdr; /* Parameter Total Length */ payload_len = evt->len; header_len += BT_HCI_EVT_HDR_SIZE; break; } case BT_HCI_H4_ISO: { const struct bt_hci_iso_hdr *iso = (const struct bt_hci_iso_hdr *)hdr; /* ISO_Data_Load_Length parameter */ payload_len = bt_iso_hdr_len(sys_le16_to_cpu(iso->len)); header_len += BT_HCI_ISO_HDR_SIZE; break; } /* If no valid packet type found */ default: LOG_WRN("Unknown packet type 0x%02x", type); return -1; } /* Request more data */ if (buf_len < header_len || buf_len - header_len < payload_len) { return 0; } return (int32_t)header_len + payload_len; } static bool uc_ready(int fd) { struct pollfd pollfd = { .fd = fd, .events = POLLIN }; return (poll(&pollfd, 1, 0) == 1); } static void rx_thread(void *p1, void *p2, void *p3) { const struct device *dev = p1; struct uc_data *uc = dev->data; ARG_UNUSED(p2); ARG_UNUSED(p3); LOG_DBG("started"); ssize_t frame_size = 0; while (1) { static uint8_t frame[512]; struct net_buf *buf; size_t buf_tailroom; size_t buf_add_len; ssize_t len; const uint8_t *frame_start = frame; if (!uc_ready(uc->fd)) { k_sleep(K_MSEC(1)); continue; } LOG_DBG("calling read()"); len = read(uc->fd, frame + frame_size, sizeof(frame) - frame_size); if (len < 0) { if (errno == EINTR) { k_yield(); continue; } LOG_ERR("Reading socket failed, errno %d", errno); close(uc->fd); uc->fd = -1; return; } frame_size += len; while (frame_size > 0) { const uint8_t *buf_add; const uint8_t packet_type = frame_start[0]; const int32_t decoded_len = hci_packet_complete(frame_start, frame_size); if (decoded_len == -1) { LOG_ERR("HCI Packet type is invalid, length could not be decoded"); frame_size = 0; /* Drop buffer */ break; } if (decoded_len == 0) { if (frame_size == sizeof(frame)) { LOG_ERR("HCI Packet (%d bytes) is too big for frame (%d " "bytes)", decoded_len, sizeof(frame)); frame_size = 0; /* Drop buffer */ break; } if (frame_start != frame) { memmove(frame, frame_start, frame_size); } /* Read more */ break; } buf_add = frame_start + sizeof(packet_type); buf_add_len = decoded_len - sizeof(packet_type); buf = get_rx(frame_start); frame_size -= decoded_len; frame_start += decoded_len; if (!buf) { LOG_DBG("Discard adv report due to insufficient buf"); continue; } buf_tailroom = net_buf_tailroom(buf); if (buf_tailroom < buf_add_len) { LOG_ERR("Not enough space in buffer %zu/%zu", buf_add_len, buf_tailroom); net_buf_unref(buf); continue; } net_buf_add_mem(buf, buf_add, buf_add_len); LOG_DBG("Calling bt_recv(%p)", buf); uc->recv(dev, buf); } k_yield(); } } static int uc_send(const struct device *dev, struct net_buf *buf) { struct uc_data *uc = dev->data; LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len); if (uc->fd < 0) { LOG_ERR("User channel not open"); return -EIO; } switch (bt_buf_get_type(buf)) { case BT_BUF_ACL_OUT: net_buf_push_u8(buf, BT_HCI_H4_ACL); break; case BT_BUF_CMD: net_buf_push_u8(buf, BT_HCI_H4_CMD); break; case BT_BUF_ISO_OUT: if (IS_ENABLED(CONFIG_BT_ISO)) { net_buf_push_u8(buf, BT_HCI_H4_ISO); break; } __fallthrough; default: LOG_ERR("Unknown buffer type"); return -EINVAL; } if (write(uc->fd, buf->data, buf->len) < 0) { return -errno; } net_buf_unref(buf); return 0; } static int user_chan_open(void) { int fd; if (hci_socket) { struct sockaddr_hci addr; fd = socket(PF_BLUETOOTH, SOCK_RAW | SOCK_CLOEXEC | SOCK_NONBLOCK, BTPROTO_HCI); if (fd < 0) { return -errno; } (void)memset(&addr, 0, sizeof(addr)); addr.hci_family = AF_BLUETOOTH; addr.hci_dev = bt_dev_index; addr.hci_channel = HCI_CHANNEL_USER; if (bind(fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) { int err = -errno; close(fd); return err; } } else { struct sockaddr_in addr; fd = socket(AF_INET, SOCK_STREAM, 0); if (fd < 0) { return -errno; } addr.sin_family = AF_INET; addr.sin_port = htons(port); if (inet_pton(AF_INET, ip_addr, &(addr.sin_addr)) <= 0) { int err = -errno; close(fd); return err; } if (connect(fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) { int err = -errno; close(fd); return err; } } return fd; } static int uc_open(const struct device *dev, bt_hci_recv_t recv) { struct uc_data *uc = dev->data; if (hci_socket) { LOG_DBG("hci%d", bt_dev_index); } else { LOG_DBG("hci %s:%d", ip_addr, port); } uc->fd = user_chan_open(); if (uc->fd < 0) { return uc->fd; } uc->recv = recv; LOG_DBG("User Channel opened as fd %d", uc->fd); k_thread_create(&rx_thread_data, rx_thread_stack, K_KERNEL_STACK_SIZEOF(rx_thread_stack), rx_thread, (void *)dev, NULL, NULL, K_PRIO_COOP(CONFIG_BT_DRIVER_RX_HIGH_PRIO), 0, K_NO_WAIT); LOG_DBG("returning"); return 0; } static const struct bt_hci_driver_api uc_drv_api = { .open = uc_open, .send = uc_send, }; static int uc_init(const struct device *dev) { if (!arg_found) { posix_print_warning("Warning: Bluetooth device missing.\n" "Specify either a local hci interface --bt-dev=hciN\n" "or a valid hci tcp server --bt-dev=ip_address:port\n"); return -ENODEV; } return 0; } #define UC_DEVICE_INIT(inst) \ static struct uc_data uc_data_##inst = { \ .fd = -1, \ }; \ DEVICE_DT_INST_DEFINE(inst, uc_init, NULL, &uc_data_##inst, NULL, \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &uc_drv_api) DT_INST_FOREACH_STATUS_OKAY(UC_DEVICE_INIT) static void cmd_bt_dev_found(char *argv, int offset) { arg_found = true; if (strncmp(&argv[offset], "hci", 3) == 0 && strlen(&argv[offset]) >= 4) { long arg_hci_idx = strtol(&argv[offset + 3], NULL, 10); if (arg_hci_idx >= 0 && arg_hci_idx <= USHRT_MAX) { bt_dev_index = arg_hci_idx; hci_socket = true; } else { posix_print_error_and_exit("Invalid argument value for --bt-dev. " "hci idx must be within range 0 to 65536.\n"); } } else if (sscanf(&argv[offset], "%15[^:]:%d", ip_addr, &port) == 2) { if (port > USHRT_MAX) { posix_print_error_and_exit("Error: IP port for bluetooth " "hci tcp server is out of range.\n"); } struct in_addr addr; if (inet_pton(AF_INET, ip_addr, &addr) != 1) { posix_print_error_and_exit("Error: IP address for bluetooth " "hci tcp server is incorrect.\n"); } } else { posix_print_error_and_exit("Invalid option %s for --bt-dev. " "An hci interface or hci tcp server is expected.\n", &argv[offset]); } } static void add_btuserchan_arg(void) { static struct args_struct_t btuserchan_args[] = { /* * Fields: * manual, mandatory, switch, * option_name, var_name ,type, * destination, callback, * description */ { false, true, false, "bt-dev", "hciX", 's', NULL, cmd_bt_dev_found, "A local HCI device to be used for Bluetooth (e.g. hci0) " "or an HCI TCP Server (e.g. 127.0.0.1:9000)"}, ARG_TABLE_ENDMARKER }; native_add_command_line_opts(btuserchan_args); } NATIVE_TASK(add_btuserchan_arg, PRE_BOOT_1, 10); ```
/content/code_sandbox/drivers/bluetooth/hci/userchan.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,168
```c /* * */ #include <soc.h> #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <nrf53_cpunet_mgmt.h> #include <../subsys/bluetooth/controller/ll_sw/nordic/hal/nrf5/debug.h> #include <hal/nrf_spu.h> #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(bt_hci_nrf53_support); int bt_hci_transport_teardown(const struct device *dev) { ARG_UNUSED(dev); /* Put the Network MCU in Forced-OFF mode. */ nrf53_cpunet_enable(false); LOG_DBG("Network MCU placed in Forced-OFF mode"); return 0; } int bt_hci_transport_setup(const struct device *dev) { ARG_UNUSED(dev); /* Route Bluetooth Controller Debug Pins */ DEBUG_SETUP(); #if !defined(CONFIG_TRUSTED_EXECUTION_NONSECURE) /* Retain nRF5340 Network MCU in Secure domain (bus * accesses by Network MCU will have Secure attribute set). */ nrf_spu_extdomain_set((NRF_SPU_Type *)DT_REG_ADDR(DT_NODELABEL(spu)), 0, true, false); #endif /* !defined(CONFIG_TRUSTED_EXECUTION_NONSECURE) */ /* Release the Network MCU, 'Release force off signal' */ nrf53_cpunet_enable(true); return 0; } ```
/content/code_sandbox/drivers/bluetooth/hci/nrf53_support.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
305