text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```c /* * */ #define DT_DRV_COMPAT qemu_ivshmem #define LOG_LEVEL CONFIG_IVSHMEM_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ivshmem); #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/pcie/cap.h> #include <zephyr/init.h> #include <zephyr/drivers/virtualization/ivshmem.h> #include "virt_ivshmem.h" #ifdef CONFIG_IVSHMEM_DOORBELL static void ivshmem_doorbell(const void *arg) { const struct ivshmem_param *param = arg; LOG_DBG("Interrupt received on vector %u", param->vector); if (param->signal != NULL) { k_poll_signal_raise(param->signal, param->vector); } } static bool ivshmem_configure_msi_x_interrupts(const struct device *dev) { #if defined(CONFIG_PCIE_MSI_X) && defined(CONFIG_PCIE_MSI_MULTI_VECTOR) struct ivshmem *data = dev->data; bool ret = false; uint8_t n_vectors; uint32_t key; int i; key = irq_lock(); n_vectors = pcie_msi_vectors_allocate(data->pcie->bdf, CONFIG_IVSHMEM_INT_PRIORITY, data->vectors, CONFIG_IVSHMEM_MSI_X_VECTORS); if (n_vectors == 0) { LOG_ERR("Could not allocate %u MSI-X vectors", CONFIG_IVSHMEM_MSI_X_VECTORS); goto out; } LOG_DBG("Allocated %u vectors", n_vectors); for (i = 0; i < n_vectors; i++) { data->params[i].dev = dev; data->params[i].vector = i; if (!pcie_msi_vector_connect(data->pcie->bdf, &data->vectors[i], ivshmem_doorbell, &data->params[i], 0)) { LOG_ERR("Failed to connect MSI-X vector %u", i); goto out; } } LOG_INF("%u MSI-X Vectors connected", n_vectors); if (!pcie_msi_enable(data->pcie->bdf, data->vectors, n_vectors, 0)) { LOG_ERR("Could not enable MSI-X"); goto out; } data->n_vectors = n_vectors; ret = true; LOG_DBG("MSI-X configured"); out: irq_unlock(key); return ret; #else return false; #endif } #ifdef CONFIG_IVSHMEM_V2 static bool ivshmem_configure_int_x_interrupts(const struct device *dev) { struct ivshmem *data = dev->data; const struct ivshmem_cfg *cfg = dev->config; uint32_t cfg_int = pcie_conf_read(data->pcie->bdf, PCIE_CONF_INTR); uint32_t cfg_intx_pin = PCIE_CONF_INTR_PIN(cfg_int); if (!IN_RANGE(cfg_intx_pin, PCIE_INTX_PIN_MIN, PCIE_INTX_PIN_MAX)) { LOG_ERR("Invalid INTx pin %u", cfg_intx_pin); return false; } /* Ensure INTx is enabled */ pcie_set_cmd(data->pcie->bdf, PCIE_CONF_CMDSTAT_INTX_DISABLE, false); const struct intx_info *intx = &cfg->intx_info[cfg_intx_pin - 1]; data->params[0].dev = dev; data->params[0].vector = 0; LOG_INF("Enabling INTx IRQ %u (pin %u)", intx->irq, cfg_intx_pin); if (intx->irq == INTX_IRQ_UNUSED || !pcie_connect_dynamic_irq( data->pcie->bdf, intx->irq, intx->priority, ivshmem_doorbell, &data->params[0], intx->flags)) { LOG_ERR("Failed to connect INTx ISR %u", cfg_intx_pin); return false; } data->n_vectors = 1; pcie_irq_enable(data->pcie->bdf, intx->irq); return true; } #endif /* CONFIG_IVSHMEM_V2 */ static void register_signal(const struct device *dev, struct k_poll_signal *signal, uint16_t vector) { struct ivshmem *data = dev->data; data->params[vector].signal = signal; } #else #define ivshmem_configure_msi_x_interrupts(...) true #define ivshmem_configure_int_x_interrupts(...) true #define register_signal(...) #endif /* CONFIG_IVSHMEM_DOORBELL */ static const struct ivshmem_reg no_reg; __maybe_unused static uint64_t pcie_conf_read_u64(pcie_bdf_t bdf, unsigned int reg) { uint64_t lo = pcie_conf_read(bdf, reg); uint64_t hi = pcie_conf_read(bdf, reg + 1); return hi << 32 | lo; } static bool ivshmem_configure(const struct device *dev) { struct ivshmem *data = dev->data; struct pcie_bar mbar_regs, mbar_msi_x, mbar_shmem; if (!pcie_get_mbar(data->pcie->bdf, IVSHMEM_PCIE_REG_BAR_IDX, &mbar_regs)) { if (IS_ENABLED(CONFIG_IVSHMEM_DOORBELL) IF_ENABLED(CONFIG_IVSHMEM_V2, (|| data->ivshmem_v2))) { LOG_ERR("ivshmem regs bar not found"); return false; } LOG_INF("ivshmem regs bar not found"); device_map(DEVICE_MMIO_RAM_PTR(dev), (uintptr_t)&no_reg, sizeof(struct ivshmem_reg), K_MEM_CACHE_NONE); } else { pcie_set_cmd(data->pcie->bdf, PCIE_CONF_CMDSTAT_MEM | PCIE_CONF_CMDSTAT_MASTER, true); device_map(DEVICE_MMIO_RAM_PTR(dev), mbar_regs.phys_addr, mbar_regs.size, K_MEM_CACHE_NONE); } bool msi_x_bar_present = pcie_get_mbar( data->pcie->bdf, IVSHMEM_PCIE_MSI_X_BAR_IDX, &mbar_msi_x); bool shmem_bar_present = pcie_get_mbar( data->pcie->bdf, IVSHMEM_PCIE_SHMEM_BAR_IDX, &mbar_shmem); LOG_INF("MSI-X bar present: %s", msi_x_bar_present ? "yes" : "no"); LOG_INF("SHMEM bar present: %s", shmem_bar_present ? "yes" : "no"); uintptr_t shmem_phys_addr = mbar_shmem.phys_addr; #ifdef CONFIG_IVSHMEM_V2 if (data->ivshmem_v2) { if (mbar_regs.size < sizeof(struct ivshmem_v2_reg)) { LOG_ERR("Invalid ivshmem regs size %zu", mbar_regs.size); return false; } volatile struct ivshmem_v2_reg *regs = (volatile struct ivshmem_v2_reg *)DEVICE_MMIO_GET(dev); data->max_peers = regs->max_peers; if (!IN_RANGE(data->max_peers, 2, CONFIG_IVSHMEM_V2_MAX_PEERS)) { LOG_ERR("Invalid max peers %u", data->max_peers); return false; } uint32_t vendor_cap = pcie_get_cap(data->pcie->bdf, PCI_CAP_ID_VNDR); uint32_t cap_pos; if (!shmem_bar_present) { cap_pos = vendor_cap + IVSHMEM_CFG_ADDRESS / 4; shmem_phys_addr = pcie_conf_read_u64(data->pcie->bdf, cap_pos); } /* State table R/O */ cap_pos = vendor_cap + IVSHMEM_CFG_STATE_TAB_SZ / 4; size_t state_table_size = pcie_conf_read(data->pcie->bdf, cap_pos); LOG_INF("State table size 0x%zX", state_table_size); if (state_table_size < sizeof(uint32_t) * data->max_peers) { LOG_ERR("Invalid state table size %zu", state_table_size); return false; } k_mem_map_phys_bare((uint8_t **)&data->state_table_shmem, shmem_phys_addr, state_table_size, K_MEM_CACHE_WB | K_MEM_PERM_USER); /* R/W section (optional) */ cap_pos = vendor_cap + IVSHMEM_CFG_RW_SECTION_SZ / 4; data->rw_section_size = pcie_conf_read_u64(data->pcie->bdf, cap_pos); size_t rw_section_offset = state_table_size; LOG_INF("RW section size 0x%zX", data->rw_section_size); if (data->rw_section_size > 0) { k_mem_map_phys_bare((uint8_t **)&data->rw_section_shmem, shmem_phys_addr + rw_section_offset, data->rw_section_size, K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER); } /* Output sections */ cap_pos = vendor_cap + IVSHMEM_CFG_OUTPUT_SECTION_SZ / 4; data->output_section_size = pcie_conf_read_u64(data->pcie->bdf, cap_pos); size_t output_section_offset = rw_section_offset + data->rw_section_size; LOG_INF("Output section size 0x%zX", data->output_section_size); for (uint32_t i = 0; i < data->max_peers; i++) { uintptr_t phys_addr = shmem_phys_addr + output_section_offset + (data->output_section_size * i); uint32_t flags = K_MEM_CACHE_WB | K_MEM_PERM_USER; /* Only your own output section is R/W */ if (i == regs->id) { flags |= K_MEM_PERM_RW; } k_mem_map_phys_bare((uint8_t **)&data->output_section_shmem[i], phys_addr, data->output_section_size, flags); } data->size = output_section_offset + data->output_section_size * data->max_peers; /* Ensure one-shot ISR mode is disabled */ cap_pos = vendor_cap + IVSHMEM_CFG_PRIV_CNTL / 4; uint32_t cfg_priv_cntl = pcie_conf_read(data->pcie->bdf, cap_pos); cfg_priv_cntl &= ~(IVSHMEM_PRIV_CNTL_ONESHOT_INT << ((IVSHMEM_CFG_PRIV_CNTL % 4) * 8)); pcie_conf_write(data->pcie->bdf, cap_pos, cfg_priv_cntl); } else #endif /* CONFIG_IVSHMEM_V2 */ { if (!shmem_bar_present) { LOG_ERR("ivshmem mem bar not found"); return false; } data->size = mbar_shmem.size; k_mem_map_phys_bare((uint8_t **)&data->shmem, shmem_phys_addr, data->size, K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER); } if (msi_x_bar_present) { if (!ivshmem_configure_msi_x_interrupts(dev)) { LOG_ERR("MSI-X init failed"); return false; } } #ifdef CONFIG_IVSHMEM_V2 else if (data->ivshmem_v2) { if (!ivshmem_configure_int_x_interrupts(dev)) { LOG_ERR("INTx init failed"); return false; } } #endif LOG_INF("ivshmem configured:"); LOG_INF("- Registers at 0x%lX (mapped to 0x%lX)", mbar_regs.phys_addr, DEVICE_MMIO_GET(dev)); LOG_INF("- Shared memory of 0x%zX bytes at 0x%lX (mapped to 0x%lX)", data->size, shmem_phys_addr, data->shmem); return true; } static size_t ivshmem_api_get_mem(const struct device *dev, uintptr_t *memmap) { struct ivshmem *data = dev->data; #ifdef CONFIG_IVSHMEM_V2 if (data->ivshmem_v2) { *memmap = 0; return 0; } #endif *memmap = data->shmem; return data->size; } static uint32_t ivshmem_api_get_id(const struct device *dev) { uint32_t id; #ifdef CONFIG_IVSHMEM_V2 struct ivshmem *data = dev->data; if (data->ivshmem_v2) { volatile struct ivshmem_v2_reg *regs = (volatile struct ivshmem_v2_reg *) DEVICE_MMIO_GET(dev); id = regs->id; } else #endif { volatile struct ivshmem_reg *regs = (volatile struct ivshmem_reg *) DEVICE_MMIO_GET(dev); id = regs->iv_position; } return id; } static uint16_t ivshmem_api_get_vectors(const struct device *dev) { #if CONFIG_IVSHMEM_DOORBELL struct ivshmem *data = dev->data; return data->n_vectors; #else return 0; #endif } static int ivshmem_api_int_peer(const struct device *dev, uint32_t peer_id, uint16_t vector) { #if CONFIG_IVSHMEM_DOORBELL struct ivshmem *data = dev->data; volatile uint32_t *doorbell_reg; uint32_t doorbell = IVSHMEM_GEN_DOORBELL(peer_id, vector); if (vector >= data->n_vectors) { return -EINVAL; } #ifdef CONFIG_IVSHMEM_V2 if (data->ivshmem_v2 && peer_id >= data->max_peers) { return -EINVAL; } if (data->ivshmem_v2) { volatile struct ivshmem_v2_reg *regs = (volatile struct ivshmem_v2_reg *) DEVICE_MMIO_GET(dev); doorbell_reg = &regs->doorbell; } else #endif { volatile struct ivshmem_reg *regs = (volatile struct ivshmem_reg *) DEVICE_MMIO_GET(dev); doorbell_reg = &regs->doorbell; } *doorbell_reg = doorbell; return 0; #else return -ENOSYS; #endif } static int ivshmem_api_register_handler(const struct device *dev, struct k_poll_signal *signal, uint16_t vector) { #if CONFIG_IVSHMEM_DOORBELL struct ivshmem *data = dev->data; if (vector >= data->n_vectors) { return -EINVAL; } register_signal(dev, signal, vector); return 0; #else return -ENOSYS; #endif } #ifdef CONFIG_IVSHMEM_V2 static size_t ivshmem_api_get_rw_mem_section(const struct device *dev, uintptr_t *memmap) { struct ivshmem *data = dev->data; if (!data->ivshmem_v2) { *memmap = 0; return 0; } *memmap = data->rw_section_shmem; return data->rw_section_size; } static size_t ivshmem_api_get_output_mem_section(const struct device *dev, uint32_t peer_id, uintptr_t *memmap) { struct ivshmem *data = dev->data; if (!data->ivshmem_v2 || peer_id >= data->max_peers) { *memmap = 0; return 0; } *memmap = data->output_section_shmem[peer_id]; return data->output_section_size; } static uint32_t ivshmem_api_get_state(const struct device *dev, uint32_t peer_id) { struct ivshmem *data = dev->data; if (!data->ivshmem_v2 || peer_id >= data->max_peers) { return 0; } const volatile uint32_t *state_table = (const volatile uint32_t *)data->state_table_shmem; return state_table[peer_id]; } static int ivshmem_api_set_state(const struct device *dev, uint32_t state) { struct ivshmem *data = dev->data; if (!data->ivshmem_v2) { return -ENOSYS; } volatile struct ivshmem_v2_reg *regs = (volatile struct ivshmem_v2_reg *) DEVICE_MMIO_GET(dev); regs->state = state; return 0; } static uint32_t ivshmem_api_get_max_peers(const struct device *dev) { struct ivshmem *data = dev->data; if (!data->ivshmem_v2) { return 0; } return data->max_peers; } static uint16_t ivshmem_api_get_protocol(const struct device *dev) { struct ivshmem *data = dev->data; if (!data->ivshmem_v2) { return 0; } uint16_t protocol = (data->pcie->class_rev >> 8) & 0xFFFF; return protocol; } static int ivshmem_api_enable_interrupts(const struct device *dev, bool enable) { struct ivshmem *data = dev->data; if (!data->ivshmem_v2) { return -ENOSYS; } volatile struct ivshmem_v2_reg *regs = (volatile struct ivshmem_v2_reg *) DEVICE_MMIO_GET(dev); regs->int_control = enable ? IVSHMEM_INT_ENABLE : 0; return 0; } #endif /* CONFIG_IVSHMEM_V2 */ static const struct ivshmem_driver_api ivshmem_api = { .get_mem = ivshmem_api_get_mem, .get_id = ivshmem_api_get_id, .get_vectors = ivshmem_api_get_vectors, .int_peer = ivshmem_api_int_peer, .register_handler = ivshmem_api_register_handler, #ifdef CONFIG_IVSHMEM_V2 .get_rw_mem_section = ivshmem_api_get_rw_mem_section, .get_output_mem_section = ivshmem_api_get_output_mem_section, .get_state = ivshmem_api_get_state, .set_state = ivshmem_api_set_state, .get_max_peers = ivshmem_api_get_max_peers, .get_protocol = ivshmem_api_get_protocol, .enable_interrupts = ivshmem_api_enable_interrupts #endif }; static int ivshmem_init(const struct device *dev) { struct ivshmem *data = dev->data; if (data->pcie->bdf == PCIE_BDF_NONE) { LOG_WRN("ivshmem device not found"); return -ENOTSUP; } LOG_INF("PCIe: ID 0x%08X, BDF 0x%X, class-rev 0x%08X", data->pcie->id, data->pcie->bdf, data->pcie->class_rev); if (!ivshmem_configure(dev)) { return -EIO; } return 0; } #define IVSHMEM_INTX_INFO(intx_idx, drv_idx) { \ COND_CODE_1(DT_IRQ_HAS_IDX(DT_DRV_INST(drv_idx), intx_idx), \ ( \ .irq = DT_IRQ_BY_IDX(DT_DRV_INST(drv_idx), intx_idx, irq), \ .priority = DT_IRQ_BY_IDX(DT_DRV_INST(drv_idx), intx_idx, priority), \ .flags = DT_IRQ_BY_IDX(DT_DRV_INST(drv_idx), intx_idx, flags), \ ), \ (.irq = INTX_IRQ_UNUSED)) \ } #define IVSHMEM_DEVICE_INIT(n) \ BUILD_ASSERT(!IS_ENABLED(CONFIG_IVSHMEM_DOORBELL) || \ ((IS_ENABLED(CONFIG_PCIE_MSI_X) && \ IS_ENABLED(CONFIG_PCIE_MSI_MULTI_VECTOR)) || \ (DT_INST_PROP(n, ivshmem_v2) && \ DT_INST_NODE_HAS_PROP(n, interrupts))), \ "IVSHMEM_DOORBELL requires either MSI-X or INTx support"); \ BUILD_ASSERT(IS_ENABLED(CONFIG_IVSHMEM_V2) || !DT_INST_PROP(n, ivshmem_v2), \ "CONFIG_IVSHMEM_V2 must be enabled for ivshmem-v2"); \ DEVICE_PCIE_INST_DECLARE(n); \ static struct ivshmem ivshmem_data_##n = { \ DEVICE_PCIE_INST_INIT(n, pcie), \ IF_ENABLED(CONFIG_IVSHMEM_V2, \ (.ivshmem_v2 = DT_INST_PROP(n, ivshmem_v2),)) \ }; \ IF_ENABLED(CONFIG_IVSHMEM_V2, ( \ static struct ivshmem_cfg ivshmem_cfg_##n = { \ .intx_info = \ { FOR_EACH_FIXED_ARG(IVSHMEM_INTX_INFO, (,), n, 0, 1, 2, 3) } \ }; \ )); \ DEVICE_DT_INST_DEFINE(n, &ivshmem_init, NULL, \ &ivshmem_data_##n, \ COND_CODE_1(CONFIG_IVSHMEM_V2, (&ivshmem_cfg_##n), (NULL)), \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &ivshmem_api); DT_INST_FOREACH_STATUS_OKAY(IVSHMEM_DEVICE_INIT) ```
/content/code_sandbox/drivers/virtualization/virt_ivshmem.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,670
```unknown # MCUX CAN configuration options config CAN_MCUX_FLEXCAN bool "NXP FlexCAN driver" default y depends on DT_HAS_NXP_FLEXCAN_ENABLED depends on CLOCK_CONTROL select PINCTRL help Enable support for the NXP FlexCAN driver. if CAN_MCUX_FLEXCAN config CAN_MCUX_FLEXCAN_FD bool # hidden default y depends on DT_HAS_NXP_FLEXCAN_FD_ENABLED && CAN_FD_MODE help Enable support for CAN FD capable NXP FlexCAN devices. config CAN_MCUX_FLEXCAN_WAIT_TIMEOUT int "Maximum number of wait loop iterations" default 10000 help Maximum number of wait loop iterations for the MCUX FlexCAN HAL when entering/leaving freeze mode. config CAN_MAX_MB int "Maximum number of message buffers for concurrent active instances" default 16 depends on SOC_SERIES_S32K3 || SOC_SERIES_S32K1 || SOC_SERIES_S32ZE range 1 96 if SOC_SERIES_S32K3 range 1 32 if SOC_SERIES_S32K1 && !SOC_S32K142W && !SOC_S32K144W range 1 64 if SOC_S32K142W || SOC_S32K144W range 1 128 if SOC_SERIES_S32ZE help Defines maximum number of message buffers for concurrent active instances. config CAN_MAX_FILTER int "Maximum number of concurrent active RX filters" default 5 range 1 15 if SOC_SERIES_KINETIS_KE1XF || SOC_SERIES_KINETIS_K6X range 1 13 if (SOC_SERIES_IMXRT10XX || SOC_SERIES_IMXRT11XX) && CAN_MCUX_FLEXCAN_FD range 1 63 if SOC_SERIES_IMXRT10XX || SOC_SERIES_IMXRT11XX range 1 96 if SOC_SERIES_S32K3 range 1 32 if SOC_SERIES_S32K1 && !SOC_S32K142W && !SOC_S32K144W range 1 64 if SOC_S32K142W || SOC_S32K144W range 1 128 if SOC_SERIES_S32ZE help Defines maximum number of concurrent active RX filters endif # CAN_MCUX_FLEXCAN config CAN_MCUX_MCAN bool "MCUX MCAN driver" default y depends on DT_HAS_NXP_LPC_MCAN_ENABLED depends on CLOCK_CONTROL select CAN_MCAN select PINCTRL select RESET help Enable support for mcux mcan driver. ```
/content/code_sandbox/drivers/can/Kconfig.mcux
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
569
```objective-c /* * * */ #ifndef _MCP2515_H_ #define _MCP2515_H_ #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/can.h> #define MCP2515_RX_CNT 2 /* Reduce the number of Tx buffers to 1 in order to avoid priority inversion. */ #define MCP2515_TX_CNT 1 #define MCP2515_FRAME_LEN 13 struct mcp2515_tx_cb { can_tx_callback_t cb; void *cb_arg; }; struct mcp2515_data { struct can_driver_data common; /* interrupt data */ struct gpio_callback int_gpio_cb; struct k_thread int_thread; k_thread_stack_t *int_thread_stack; struct k_sem int_sem; /* tx data */ struct k_sem tx_sem; struct mcp2515_tx_cb tx_cb[MCP2515_TX_CNT]; uint8_t tx_busy_map; /* filter data */ uint32_t filter_usage; can_rx_callback_t rx_cb[CONFIG_CAN_MAX_FILTER]; void *cb_arg[CONFIG_CAN_MAX_FILTER]; struct can_filter filter[CONFIG_CAN_MAX_FILTER]; /* general data */ struct k_mutex mutex; enum can_state old_state; uint8_t mcp2515_mode; }; struct mcp2515_config { const struct can_driver_config common; /* spi configuration */ struct spi_dt_spec bus; /* interrupt configuration */ struct gpio_dt_spec int_gpio; size_t int_thread_stack_size; int int_thread_priority; /* CAN timing */ uint32_t osc_freq; }; /* * Startup time of 128 OSC1 clock cycles at 1MHz (minimum clock in frequency) * see MCP2515 datasheet section 8.1 Oscillator Start-up Timer */ #define MCP2515_OSC_STARTUP_US 128U /* MCP2515 Opcodes */ #define MCP2515_OPCODE_WRITE 0x02 #define MCP2515_OPCODE_READ 0x03 #define MCP2515_OPCODE_BIT_MODIFY 0x05 #define MCP2515_OPCODE_LOAD_TX_BUFFER 0x40 #define MCP2515_OPCODE_RTS 0x80 #define MCP2515_OPCODE_READ_RX_BUFFER 0x90 #define MCP2515_OPCODE_READ_STATUS 0xA0 #define MCP2515_OPCODE_RESET 0xC0 /* MCP2515 Registers */ #define MCP2515_ADDR_CANSTAT 0x0E #define MCP2515_ADDR_CANCTRL 0x0F #define MCP2515_ADDR_TEC 0x1C #define MCP2515_ADDR_REC 0x1D #define MCP2515_ADDR_CNF3 0x28 #define MCP2515_ADDR_CNF2 0x29 #define MCP2515_ADDR_CNF1 0x2A #define MCP2515_ADDR_CANINTE 0x2B #define MCP2515_ADDR_CANINTF 0x2C #define MCP2515_ADDR_EFLG 0x2D #define MCP2515_ADDR_TXB0CTRL 0x30 #define MCP2515_ADDR_TXB1CTRL 0x40 #define MCP2515_ADDR_TXB2CTRL 0x50 #define MCP2515_ADDR_RXB0CTRL 0x60 #define MCP2515_ADDR_RXB1CTRL 0x70 #define MCP2515_ADDR_OFFSET_FRAME2FRAME 0x10 #define MCP2515_ADDR_OFFSET_CTRL2FRAME 0x01 /* MCP2515 Operation Modes */ #define MCP2515_MODE_NORMAL 0x00 #define MCP2515_MODE_LOOPBACK 0x02 #define MCP2515_MODE_SILENT 0x03 #define MCP2515_MODE_CONFIGURATION 0x04 /* MCP2515_FRAME_OFFSET */ #define MCP2515_FRAME_OFFSET_SIDH 0 #define MCP2515_FRAME_OFFSET_SIDL 1 #define MCP2515_FRAME_OFFSET_EID8 2 #define MCP2515_FRAME_OFFSET_EID0 3 #define MCP2515_FRAME_OFFSET_DLC 4 #define MCP2515_FRAME_OFFSET_D0 5 /* MCP2515_CANINTF */ #define MCP2515_CANINTF_RX0IF BIT(0) #define MCP2515_CANINTF_RX1IF BIT(1) #define MCP2515_CANINTF_TX0IF BIT(2) #define MCP2515_CANINTF_TX1IF BIT(3) #define MCP2515_CANINTF_TX2IF BIT(4) #define MCP2515_CANINTF_ERRIF BIT(5) #define MCP2515_CANINTF_WAKIF BIT(6) #define MCP2515_CANINTF_MERRF BIT(7) #define MCP2515_INTE_RX0IE BIT(0) #define MCP2515_INTE_RX1IE BIT(1) #define MCP2515_INTE_TX0IE BIT(2) #define MCP2515_INTE_TX1IE BIT(3) #define MCP2515_INTE_TX2IE BIT(4) #define MCP2515_INTE_ERRIE BIT(5) #define MCP2515_INTE_WAKIE BIT(6) #define MCP2515_INTE_MERRE BIT(7) #define MCP2515_EFLG_EWARN BIT(0) #define MCP2515_EFLG_RXWAR BIT(1) #define MCP2515_EFLG_TXWAR BIT(2) #define MCP2515_EFLG_RXEP BIT(3) #define MCP2515_EFLG_TXEP BIT(4) #define MCP2515_EFLG_TXBO BIT(5) #define MCP2515_EFLG_RX0OVR BIT(6) #define MCP2515_EFLG_RX1OVR BIT(7) #define MCP2515_TXCTRL_TXREQ BIT(3) #define MCP2515_CANSTAT_MODE_POS 5 #define MCP2515_CANSTAT_MODE_MASK (0x07 << MCP2515_CANSTAT_MODE_POS) #define MCP2515_CANCTRL_MODE_POS 5 #define MCP2515_CANCTRL_MODE_MASK (0x07 << MCP2515_CANCTRL_MODE_POS) #define MCP2515_TXBNCTRL_TXREQ_POS 3 #define MCP2515_TXBNCTRL_TXREQ_MASK (0x01 << MCP2515_TXBNCTRL_TXREQ_POS) #endif /*_MCP2515_H_*/ ```
/content/code_sandbox/drivers/can/can_mcp2515.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,362
```unknown # CAN loopback device configuration options config CAN_LOOPBACK bool "Emulated CAN loopback driver" default y depends on DT_HAS_ZEPHYR_CAN_LOOPBACK_ENABLED help This is an emulated driver that can only loopback messages. if CAN_LOOPBACK config CAN_MAX_FILTER int "Maximum number of concurrent active filters" default 16 range 1 1024 help Defines the array size of the filters. Must be at least the size of concurrent reads. config CAN_LOOPBACK_TX_THREAD_STACK_SIZE int "TX thread stack size" default 512 help Stack size of the TX thread. The TX thread calls the callbacks of the receiver if the filter matches. config CAN_LOOPBACK_TX_THREAD_PRIORITY int "TX thread priority" default 2 help Priority of the TX thread. The TX thread calls the callbacks of the receiver if the filter matches. config CAN_LOOPBACK_TX_MSGQ_SIZE int "TX message queue size" default 16 help Number of TX frames that can be buffered. The send functions puts frame int this queue and TX thread takes the messages from this msgq and calls the respective receiver if the filter matches. endif # CAN_LOOPBACK ```
/content/code_sandbox/drivers/can/Kconfig.loopback
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
270
```c /* * */ /* Include soc.h prior to Zephyr CAN headers to pull in HAL fixups */ #include <soc.h> #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/transceiver.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/irq.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> LOG_MODULE_REGISTER(can_stm32, CONFIG_CAN_LOG_LEVEL); #define CAN_INIT_TIMEOUT (10 * (sys_clock_hw_cycles_per_sec() / MSEC_PER_SEC)) #define DT_DRV_COMPAT st_stm32_bxcan #define CAN_STM32_NUM_FILTER_BANKS (14) #define CAN_STM32_MAX_FILTER_ID \ (CONFIG_CAN_MAX_EXT_ID_FILTER + CONFIG_CAN_MAX_STD_ID_FILTER * 2) #define CAN_STM32_FIRX_STD_IDE_POS (3U) #define CAN_STM32_FIRX_STD_RTR_POS (4U) #define CAN_STM32_FIRX_STD_ID_POS (5U) #define CAN_STM32_FIRX_EXT_IDE_POS (2U) #define CAN_STM32_FIRX_EXT_RTR_POS (1U) #define CAN_STM32_FIRX_EXT_STD_ID_POS (21U) #define CAN_STM32_FIRX_EXT_EXT_ID_POS (3U) #if (CONFIG_CAN_MAX_STD_ID_FILTER + CONFIG_CAN_MAX_EXT_ID_FILTER * 2) > \ (CAN_STM32_NUM_FILTER_BANKS * 2) #error Number of configured filters exceeds available filter bank slots. #endif struct can_stm32_mailbox { can_tx_callback_t tx_callback; void *callback_arg; }; struct can_stm32_data { struct can_driver_data common; struct k_mutex inst_mutex; struct k_sem tx_int_sem; struct can_stm32_mailbox mb0; struct can_stm32_mailbox mb1; struct can_stm32_mailbox mb2; can_rx_callback_t rx_cb_std[CONFIG_CAN_MAX_STD_ID_FILTER]; can_rx_callback_t rx_cb_ext[CONFIG_CAN_MAX_EXT_ID_FILTER]; void *cb_arg_std[CONFIG_CAN_MAX_STD_ID_FILTER]; void *cb_arg_ext[CONFIG_CAN_MAX_EXT_ID_FILTER]; enum can_state state; }; struct can_stm32_config { const struct can_driver_config common; CAN_TypeDef *can; /*!< CAN Registers*/ CAN_TypeDef *master_can; /*!< CAN Registers for shared filter */ struct stm32_pclken pclken; void (*config_irq)(CAN_TypeDef *can); const struct pinctrl_dev_config *pcfg; }; /* * Mutex to prevent simultaneous access to filter registers shared between CAN1 * and CAN2. */ static struct k_mutex filter_mutex; static void can_stm32_signal_tx_complete(const struct device *dev, struct can_stm32_mailbox *mb, int status) { can_tx_callback_t callback = mb->tx_callback; if (callback != NULL) { callback(dev, status, mb->callback_arg); mb->tx_callback = NULL; } } static void can_stm32_rx_fifo_pop(CAN_FIFOMailBox_TypeDef *mbox, struct can_frame *frame) { memset(frame, 0, sizeof(*frame)); if (mbox->RIR & CAN_RI0R_IDE) { frame->id = mbox->RIR >> CAN_RI0R_EXID_Pos; frame->flags |= CAN_FRAME_IDE; } else { frame->id = mbox->RIR >> CAN_RI0R_STID_Pos; } if ((mbox->RIR & CAN_RI0R_RTR) != 0) { frame->flags |= CAN_FRAME_RTR; } else { frame->data_32[0] = mbox->RDLR; frame->data_32[1] = mbox->RDHR; } frame->dlc = mbox->RDTR & (CAN_RDT0R_DLC >> CAN_RDT0R_DLC_Pos); #ifdef CONFIG_CAN_RX_TIMESTAMP frame->timestamp = ((mbox->RDTR & CAN_RDT0R_TIME) >> CAN_RDT0R_TIME_Pos); #endif } static inline void can_stm32_rx_isr_handler(const struct device *dev) { struct can_stm32_data *data = dev->data; const struct can_stm32_config *cfg = dev->config; CAN_TypeDef *can = cfg->can; CAN_FIFOMailBox_TypeDef *mbox; int filter_id, index; struct can_frame frame; can_rx_callback_t callback = NULL; void *cb_arg; while (can->RF0R & CAN_RF0R_FMP0) { mbox = &can->sFIFOMailBox[0]; filter_id = ((mbox->RDTR & CAN_RDT0R_FMI) >> CAN_RDT0R_FMI_Pos); LOG_DBG("Message on filter_id %d", filter_id); can_stm32_rx_fifo_pop(mbox, &frame); if (filter_id < CONFIG_CAN_MAX_EXT_ID_FILTER) { callback = data->rx_cb_ext[filter_id]; cb_arg = data->cb_arg_ext[filter_id]; } else if (filter_id < CAN_STM32_MAX_FILTER_ID) { index = filter_id - CONFIG_CAN_MAX_EXT_ID_FILTER; callback = data->rx_cb_std[index]; cb_arg = data->cb_arg_std[index]; } if (callback) { callback(dev, &frame, cb_arg); } /* Release message */ can->RF0R |= CAN_RF0R_RFOM0; } if (can->RF0R & CAN_RF0R_FOVR0) { LOG_ERR("RX FIFO Overflow"); CAN_STATS_RX_OVERRUN_INC(dev); } } static int can_stm32_get_state(const struct device *dev, enum can_state *state, struct can_bus_err_cnt *err_cnt) { const struct can_stm32_config *cfg = dev->config; struct can_stm32_data *data = dev->data; CAN_TypeDef *can = cfg->can; if (state != NULL) { if (!data->common.started) { *state = CAN_STATE_STOPPED; } else if (can->ESR & CAN_ESR_BOFF) { *state = CAN_STATE_BUS_OFF; } else if (can->ESR & CAN_ESR_EPVF) { *state = CAN_STATE_ERROR_PASSIVE; } else if (can->ESR & CAN_ESR_EWGF) { *state = CAN_STATE_ERROR_WARNING; } else { *state = CAN_STATE_ERROR_ACTIVE; } } if (err_cnt != NULL) { err_cnt->tx_err_cnt = ((can->ESR & CAN_ESR_TEC) >> CAN_ESR_TEC_Pos); err_cnt->rx_err_cnt = ((can->ESR & CAN_ESR_REC) >> CAN_ESR_REC_Pos); } return 0; } static inline void can_stm32_bus_state_change_isr(const struct device *dev) { struct can_stm32_data *data = dev->data; struct can_bus_err_cnt err_cnt; enum can_state state; const can_state_change_callback_t cb = data->common.state_change_cb; void *state_change_cb_data = data->common.state_change_cb_user_data; #ifdef CONFIG_CAN_STATS const struct can_stm32_config *cfg = dev->config; CAN_TypeDef *can = cfg->can; switch (can->ESR & CAN_ESR_LEC) { case (CAN_ESR_LEC_0): CAN_STATS_STUFF_ERROR_INC(dev); break; case (CAN_ESR_LEC_1): CAN_STATS_FORM_ERROR_INC(dev); break; case (CAN_ESR_LEC_1 | CAN_ESR_LEC_0): CAN_STATS_ACK_ERROR_INC(dev); break; case (CAN_ESR_LEC_2): CAN_STATS_BIT1_ERROR_INC(dev); break; case (CAN_ESR_LEC_2 | CAN_ESR_LEC_0): CAN_STATS_BIT0_ERROR_INC(dev); break; case (CAN_ESR_LEC_2 | CAN_ESR_LEC_1): CAN_STATS_CRC_ERROR_INC(dev); break; default: break; } /* Clear last error code flag */ can->ESR |= CAN_ESR_LEC; #endif /* CONFIG_CAN_STATS */ (void)can_stm32_get_state(dev, &state, &err_cnt); if (state != data->state) { data->state = state; if (cb != NULL) { cb(dev, state, err_cnt, state_change_cb_data); } } } static inline void can_stm32_tx_isr_handler(const struct device *dev) { struct can_stm32_data *data = dev->data; const struct can_stm32_config *cfg = dev->config; CAN_TypeDef *can = cfg->can; uint32_t bus_off; int status; bus_off = can->ESR & CAN_ESR_BOFF; if ((can->TSR & CAN_TSR_RQCP0) | bus_off) { status = can->TSR & CAN_TSR_TXOK0 ? 0 : can->TSR & CAN_TSR_TERR0 ? -EIO : can->TSR & CAN_TSR_ALST0 ? -EBUSY : bus_off ? -ENETUNREACH : -EIO; /* clear the request. */ can->TSR |= CAN_TSR_RQCP0; can_stm32_signal_tx_complete(dev, &data->mb0, status); } if ((can->TSR & CAN_TSR_RQCP1) | bus_off) { status = can->TSR & CAN_TSR_TXOK1 ? 0 : can->TSR & CAN_TSR_TERR1 ? -EIO : can->TSR & CAN_TSR_ALST1 ? -EBUSY : bus_off ? -ENETUNREACH : -EIO; /* clear the request. */ can->TSR |= CAN_TSR_RQCP1; can_stm32_signal_tx_complete(dev, &data->mb1, status); } if ((can->TSR & CAN_TSR_RQCP2) | bus_off) { status = can->TSR & CAN_TSR_TXOK2 ? 0 : can->TSR & CAN_TSR_TERR2 ? -EIO : can->TSR & CAN_TSR_ALST2 ? -EBUSY : bus_off ? -ENETUNREACH : -EIO; /* clear the request. */ can->TSR |= CAN_TSR_RQCP2; can_stm32_signal_tx_complete(dev, &data->mb2, status); } if (can->TSR & CAN_TSR_TME) { k_sem_give(&data->tx_int_sem); } } #ifdef CONFIG_SOC_SERIES_STM32F0X static void can_stm32_isr(const struct device *dev) { const struct can_stm32_config *cfg = dev->config; CAN_TypeDef *can = cfg->can; can_stm32_tx_isr_handler(dev); can_stm32_rx_isr_handler(dev); if (can->MSR & CAN_MSR_ERRI) { can_stm32_bus_state_change_isr(dev); can->MSR |= CAN_MSR_ERRI; } } #else static void can_stm32_rx_isr(const struct device *dev) { can_stm32_rx_isr_handler(dev); } static void can_stm32_tx_isr(const struct device *dev) { can_stm32_tx_isr_handler(dev); } static void can_stm32_state_change_isr(const struct device *dev) { const struct can_stm32_config *cfg = dev->config; CAN_TypeDef *can = cfg->can; /* Signal bus-off to waiting tx */ if (can->MSR & CAN_MSR_ERRI) { can_stm32_tx_isr_handler(dev); can_stm32_bus_state_change_isr(dev); can->MSR |= CAN_MSR_ERRI; } } #endif static int can_stm32_enter_init_mode(CAN_TypeDef *can) { uint32_t start_time; can->MCR |= CAN_MCR_INRQ; start_time = k_cycle_get_32(); while ((can->MSR & CAN_MSR_INAK) == 0U) { if (k_cycle_get_32() - start_time > CAN_INIT_TIMEOUT) { can->MCR &= ~CAN_MCR_INRQ; return -EAGAIN; } } return 0; } static int can_stm32_leave_init_mode(CAN_TypeDef *can) { uint32_t start_time; can->MCR &= ~CAN_MCR_INRQ; start_time = k_cycle_get_32(); while ((can->MSR & CAN_MSR_INAK) != 0U) { if (k_cycle_get_32() - start_time > CAN_INIT_TIMEOUT) { return -EAGAIN; } } return 0; } static int can_stm32_leave_sleep_mode(CAN_TypeDef *can) { uint32_t start_time; can->MCR &= ~CAN_MCR_SLEEP; start_time = k_cycle_get_32(); while ((can->MSR & CAN_MSR_SLAK) != 0) { if (k_cycle_get_32() - start_time > CAN_INIT_TIMEOUT) { return -EAGAIN; } } return 0; } static int can_stm32_get_capabilities(const struct device *dev, can_mode_t *cap) { ARG_UNUSED(dev); *cap = CAN_MODE_NORMAL | CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY | CAN_MODE_ONE_SHOT; if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { *cap |= CAN_MODE_MANUAL_RECOVERY; } return 0; } static int can_stm32_start(const struct device *dev) { const struct can_stm32_config *cfg = dev->config; struct can_stm32_data *data = dev->data; CAN_TypeDef *can = cfg->can; int ret = 0; k_mutex_lock(&data->inst_mutex, K_FOREVER); if (data->common.started) { ret = -EALREADY; goto unlock; } if (cfg->common.phy != NULL) { ret = can_transceiver_enable(cfg->common.phy, data->common.mode); if (ret != 0) { LOG_ERR("failed to enable CAN transceiver (err %d)", ret); goto unlock; } } CAN_STATS_RESET(dev); ret = can_stm32_leave_init_mode(can); if (ret < 0) { LOG_ERR("Failed to leave init mode"); if (cfg->common.phy != NULL) { /* Attempt to disable the CAN transceiver in case of error */ (void)can_transceiver_disable(cfg->common.phy); } ret = -EIO; goto unlock; } data->common.started = true; unlock: k_mutex_unlock(&data->inst_mutex); return ret; } static int can_stm32_stop(const struct device *dev) { const struct can_stm32_config *cfg = dev->config; struct can_stm32_data *data = dev->data; CAN_TypeDef *can = cfg->can; int ret = 0; k_mutex_lock(&data->inst_mutex, K_FOREVER); if (!data->common.started) { ret = -EALREADY; goto unlock; } ret = can_stm32_enter_init_mode(can); if (ret < 0) { LOG_ERR("Failed to enter init mode"); ret = -EIO; goto unlock; } /* Abort any pending transmissions */ can_stm32_signal_tx_complete(dev, &data->mb0, -ENETDOWN); can_stm32_signal_tx_complete(dev, &data->mb1, -ENETDOWN); can_stm32_signal_tx_complete(dev, &data->mb2, -ENETDOWN); can->TSR |= CAN_TSR_ABRQ2 | CAN_TSR_ABRQ1 | CAN_TSR_ABRQ0; if (cfg->common.phy != NULL) { ret = can_transceiver_disable(cfg->common.phy); if (ret != 0) { LOG_ERR("failed to enable CAN transceiver (err %d)", ret); goto unlock; } } data->common.started = false; unlock: k_mutex_unlock(&data->inst_mutex); return ret; } static int can_stm32_set_mode(const struct device *dev, can_mode_t mode) { can_mode_t supported = CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY | CAN_MODE_ONE_SHOT; const struct can_stm32_config *cfg = dev->config; CAN_TypeDef *can = cfg->can; struct can_stm32_data *data = dev->data; LOG_DBG("Set mode %d", mode); if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { supported |= CAN_MODE_MANUAL_RECOVERY; } if ((mode & ~(supported)) != 0) { LOG_ERR("unsupported mode: 0x%08x", mode); return -ENOTSUP; } if (data->common.started) { return -EBUSY; } k_mutex_lock(&data->inst_mutex, K_FOREVER); if ((mode & CAN_MODE_LOOPBACK) != 0) { /* Loopback mode */ can->BTR |= CAN_BTR_LBKM; } else { can->BTR &= ~CAN_BTR_LBKM; } if ((mode & CAN_MODE_LISTENONLY) != 0) { /* Silent mode */ can->BTR |= CAN_BTR_SILM; } else { can->BTR &= ~CAN_BTR_SILM; } if ((mode & CAN_MODE_ONE_SHOT) != 0) { /* No automatic retransmission */ can->MCR |= CAN_MCR_NART; } else { can->MCR &= ~CAN_MCR_NART; } if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { if ((mode & CAN_MODE_MANUAL_RECOVERY) != 0) { /* No automatic recovery from bus-off */ can->MCR &= ~CAN_MCR_ABOM; } else { can->MCR |= CAN_MCR_ABOM; } } data->common.mode = mode; k_mutex_unlock(&data->inst_mutex); return 0; } static int can_stm32_set_timing(const struct device *dev, const struct can_timing *timing) { const struct can_stm32_config *cfg = dev->config; CAN_TypeDef *can = cfg->can; struct can_stm32_data *data = dev->data; k_mutex_lock(&data->inst_mutex, K_FOREVER); if (data->common.started) { k_mutex_unlock(&data->inst_mutex); return -EBUSY; } can->BTR = (can->BTR & ~(CAN_BTR_SJW_Msk | CAN_BTR_BRP_Msk | CAN_BTR_TS1_Msk | CAN_BTR_TS2_Msk)) | (((timing->sjw - 1) << CAN_BTR_SJW_Pos) & CAN_BTR_SJW_Msk) | (((timing->phase_seg1 - 1) << CAN_BTR_TS1_Pos) & CAN_BTR_TS1_Msk) | (((timing->phase_seg2 - 1) << CAN_BTR_TS2_Pos) & CAN_BTR_TS2_Msk) | (((timing->prescaler - 1) << CAN_BTR_BRP_Pos) & CAN_BTR_BRP_Msk); k_mutex_unlock(&data->inst_mutex); return 0; } static int can_stm32_get_core_clock(const struct device *dev, uint32_t *rate) { const struct can_stm32_config *cfg = dev->config; const struct device *clock; int ret; clock = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); ret = clock_control_get_rate(clock, (clock_control_subsys_t) &cfg->pclken, rate); if (ret != 0) { LOG_ERR("Failed call clock_control_get_rate: return [%d]", ret); return -EIO; } return 0; } static int can_stm32_get_max_filters(const struct device *dev, bool ide) { ARG_UNUSED(dev); if (ide) { return CONFIG_CAN_MAX_EXT_ID_FILTER; } else { return CONFIG_CAN_MAX_STD_ID_FILTER; } } static int can_stm32_init(const struct device *dev) { const struct can_stm32_config *cfg = dev->config; struct can_stm32_data *data = dev->data; CAN_TypeDef *can = cfg->can; struct can_timing timing = { 0 }; const struct device *clock; uint32_t bank_offset; int ret; k_mutex_init(&filter_mutex); k_mutex_init(&data->inst_mutex); k_sem_init(&data->tx_int_sem, 0, 1); if (cfg->common.phy != NULL) { if (!device_is_ready(cfg->common.phy)) { LOG_ERR("CAN transceiver not ready"); return -ENODEV; } } clock = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(clock)) { LOG_ERR("clock control device not ready"); return -ENODEV; } ret = clock_control_on(clock, (clock_control_subsys_t) &cfg->pclken); if (ret != 0) { LOG_ERR("HAL_CAN_Init clock control on failed: %d", ret); return -EIO; } /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("CAN pinctrl setup failed (%d)", ret); return ret; } ret = can_stm32_enter_init_mode(can); if (ret) { LOG_ERR("Failed to enter init mode"); return ret; } ret = can_stm32_leave_sleep_mode(can); if (ret) { LOG_ERR("Failed to exit sleep mode"); return ret; } /* configure scale of filter banks < CONFIG_CAN_MAX_EXT_ID_FILTER for ext ids */ bank_offset = (cfg->can == cfg->master_can) ? 0 : CAN_STM32_NUM_FILTER_BANKS; cfg->master_can->FMR |= CAN_FMR_FINIT; cfg->master_can->FS1R |= ((1U << CONFIG_CAN_MAX_EXT_ID_FILTER) - 1) << bank_offset; cfg->master_can->FMR &= ~CAN_FMR_FINIT; can->MCR &= ~CAN_MCR_TTCM & ~CAN_MCR_ABOM & ~CAN_MCR_AWUM & ~CAN_MCR_NART & ~CAN_MCR_RFLM & ~CAN_MCR_TXFP; #ifdef CONFIG_CAN_RX_TIMESTAMP can->MCR |= CAN_MCR_TTCM; #endif /* Enable automatic bus-off recovery */ can->MCR |= CAN_MCR_ABOM; ret = can_calc_timing(dev, &timing, cfg->common.bitrate, cfg->common.sample_point); if (ret == -EINVAL) { LOG_ERR("Can't find timing for given param"); return -EIO; } LOG_DBG("Presc: %d, TS1: %d, TS2: %d", timing.prescaler, timing.phase_seg1, timing.phase_seg2); LOG_DBG("Sample-point err : %d", ret); ret = can_set_timing(dev, &timing); if (ret) { return ret; } ret = can_stm32_set_mode(dev, CAN_MODE_NORMAL); if (ret) { return ret; } (void)can_stm32_get_state(dev, &data->state, NULL); cfg->config_irq(can); can->IER |= CAN_IER_TMEIE; return 0; } static void can_stm32_set_state_change_callback(const struct device *dev, can_state_change_callback_t cb, void *user_data) { struct can_stm32_data *data = dev->data; const struct can_stm32_config *cfg = dev->config; CAN_TypeDef *can = cfg->can; data->common.state_change_cb = cb; data->common.state_change_cb_user_data = user_data; if (cb == NULL) { can->IER &= ~(CAN_IER_BOFIE | CAN_IER_EPVIE | CAN_IER_EWGIE); } else { can->IER |= CAN_IER_BOFIE | CAN_IER_EPVIE | CAN_IER_EWGIE; } } #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE static int can_stm32_recover(const struct device *dev, k_timeout_t timeout) { const struct can_stm32_config *cfg = dev->config; struct can_stm32_data *data = dev->data; CAN_TypeDef *can = cfg->can; int ret = -EAGAIN; int64_t start_time; if (!data->common.started) { return -ENETDOWN; } if ((data->common.mode & CAN_MODE_MANUAL_RECOVERY) == 0U) { return -ENOTSUP; } if (!(can->ESR & CAN_ESR_BOFF)) { return 0; } if (k_mutex_lock(&data->inst_mutex, K_FOREVER)) { return -EAGAIN; } ret = can_stm32_enter_init_mode(can); if (ret) { goto done; } can_stm32_leave_init_mode(can); start_time = k_uptime_ticks(); while (can->ESR & CAN_ESR_BOFF) { if (!K_TIMEOUT_EQ(timeout, K_FOREVER) && k_uptime_ticks() - start_time >= timeout.ticks) { goto done; } } ret = 0; done: k_mutex_unlock(&data->inst_mutex); return ret; } #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ static int can_stm32_send(const struct device *dev, const struct can_frame *frame, k_timeout_t timeout, can_tx_callback_t callback, void *user_data) { const struct can_stm32_config *cfg = dev->config; struct can_stm32_data *data = dev->data; CAN_TypeDef *can = cfg->can; uint32_t transmit_status_register = 0; CAN_TxMailBox_TypeDef *mailbox = NULL; struct can_stm32_mailbox *mb = NULL; LOG_DBG("Sending %d bytes on %s. " "Id: 0x%x, " "ID type: %s, " "Remote Frame: %s" , frame->dlc, dev->name , frame->id , (frame->flags & CAN_FRAME_IDE) != 0 ? "extended" : "standard" , (frame->flags & CAN_FRAME_RTR) != 0 ? "yes" : "no"); if (frame->dlc > CAN_MAX_DLC) { LOG_ERR("DLC of %d exceeds maximum (%d)", frame->dlc, CAN_MAX_DLC); return -EINVAL; } if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR)) != 0) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } if (!data->common.started) { return -ENETDOWN; } if (can->ESR & CAN_ESR_BOFF) { return -ENETUNREACH; } k_mutex_lock(&data->inst_mutex, K_FOREVER); transmit_status_register = can->TSR; while (!(transmit_status_register & CAN_TSR_TME)) { k_mutex_unlock(&data->inst_mutex); LOG_DBG("Transmit buffer full"); if (k_sem_take(&data->tx_int_sem, timeout)) { return -EAGAIN; } k_mutex_lock(&data->inst_mutex, K_FOREVER); transmit_status_register = can->TSR; } if (transmit_status_register & CAN_TSR_TME0) { LOG_DBG("Using TX mailbox 0"); mailbox = &can->sTxMailBox[0]; mb = &(data->mb0); } else if (transmit_status_register & CAN_TSR_TME1) { LOG_DBG("Using TX mailbox 1"); mailbox = &can->sTxMailBox[1]; mb = &data->mb1; } else if (transmit_status_register & CAN_TSR_TME2) { LOG_DBG("Using TX mailbox 2"); mailbox = &can->sTxMailBox[2]; mb = &data->mb2; } mb->tx_callback = callback; mb->callback_arg = user_data; /* mailbox identifier register setup */ mailbox->TIR &= CAN_TI0R_TXRQ; if ((frame->flags & CAN_FRAME_IDE) != 0) { mailbox->TIR |= (frame->id << CAN_TI0R_EXID_Pos) | CAN_TI0R_IDE; } else { mailbox->TIR |= (frame->id << CAN_TI0R_STID_Pos); } if ((frame->flags & CAN_FRAME_RTR) != 0) { mailbox->TIR |= CAN_TI1R_RTR; } else { mailbox->TDLR = frame->data_32[0]; mailbox->TDHR = frame->data_32[1]; } mailbox->TDTR = (mailbox->TDTR & ~CAN_TDT1R_DLC) | ((frame->dlc & 0xF) << CAN_TDT1R_DLC_Pos); mailbox->TIR |= CAN_TI0R_TXRQ; k_mutex_unlock(&data->inst_mutex); return 0; } static void can_stm32_set_filter_bank(int filter_id, CAN_FilterRegister_TypeDef *filter_reg, bool ide, uint32_t id, uint32_t mask) { if (ide) { filter_reg->FR1 = id; filter_reg->FR2 = mask; } else { if ((filter_id - CONFIG_CAN_MAX_EXT_ID_FILTER) % 2 == 0) { /* even std filter id: first 1/2 bank */ filter_reg->FR1 = id | (mask << 16); } else { /* uneven std filter id: first 1/2 bank */ filter_reg->FR2 = id | (mask << 16); } } } static inline uint32_t can_stm32_filter_to_std_mask(const struct can_filter *filter) { uint32_t rtr_mask = !IS_ENABLED(CONFIG_CAN_ACCEPT_RTR); return (filter->mask << CAN_STM32_FIRX_STD_ID_POS) | (rtr_mask << CAN_STM32_FIRX_STD_RTR_POS) | (1U << CAN_STM32_FIRX_STD_IDE_POS); } static inline uint32_t can_stm32_filter_to_ext_mask(const struct can_filter *filter) { uint32_t rtr_mask = !IS_ENABLED(CONFIG_CAN_ACCEPT_RTR); return (filter->mask << CAN_STM32_FIRX_EXT_EXT_ID_POS) | (rtr_mask << CAN_STM32_FIRX_EXT_RTR_POS) | (1U << CAN_STM32_FIRX_EXT_IDE_POS); } static inline uint32_t can_stm32_filter_to_std_id(const struct can_filter *filter) { return (filter->id << CAN_STM32_FIRX_STD_ID_POS); } static inline uint32_t can_stm32_filter_to_ext_id(const struct can_filter *filter) { return (filter->id << CAN_STM32_FIRX_EXT_EXT_ID_POS) | (1U << CAN_STM32_FIRX_EXT_IDE_POS); } static inline int can_stm32_set_filter(const struct device *dev, const struct can_filter *filter) { const struct can_stm32_config *cfg = dev->config; struct can_stm32_data *data = dev->data; CAN_TypeDef *can = cfg->master_can; uint32_t mask = 0U; uint32_t id = 0U; int filter_id = -ENOSPC; int bank_offset = 0; int bank_num; if (cfg->can != cfg->master_can) { /* CAN slave instance: start with offset */ bank_offset = CAN_STM32_NUM_FILTER_BANKS; } if ((filter->flags & CAN_FILTER_IDE) != 0) { for (int i = 0; i < CONFIG_CAN_MAX_EXT_ID_FILTER; i++) { if (data->rx_cb_ext[i] == NULL) { id = can_stm32_filter_to_ext_id(filter); mask = can_stm32_filter_to_ext_mask(filter); filter_id = i; bank_num = bank_offset + i; break; } } } else { for (int i = 0; i < CONFIG_CAN_MAX_STD_ID_FILTER; i++) { if (data->rx_cb_std[i] == NULL) { id = can_stm32_filter_to_std_id(filter); mask = can_stm32_filter_to_std_mask(filter); filter_id = CONFIG_CAN_MAX_EXT_ID_FILTER + i; bank_num = bank_offset + CONFIG_CAN_MAX_EXT_ID_FILTER + i / 2; break; } } } if (filter_id != -ENOSPC) { LOG_DBG("Adding filter_id %d, CAN ID: 0x%x, mask: 0x%x", filter_id, filter->id, filter->mask); /* set the filter init mode */ can->FMR |= CAN_FMR_FINIT; can_stm32_set_filter_bank(filter_id, &can->sFilterRegister[bank_num], (filter->flags & CAN_FILTER_IDE) != 0, id, mask); can->FA1R |= 1U << bank_num; can->FMR &= ~(CAN_FMR_FINIT); } else { LOG_WRN("No free filter left"); } return filter_id; } /* * This driver uses masked mode for all filters (CAN_FM1R left at reset value * 0x00) in order to simplify mapping between filter match index from the FIFOs * and array index for the callbacks. All ext ID filters are stored in the * banks below CONFIG_CAN_MAX_EXT_ID_FILTER, followed by the std ID filters, * which consume only 1/2 bank per filter. * * The more complicated list mode must be implemented if someone requires more * than 28 std ID or 14 ext ID filters. * * Currently, all filter banks are assigned to FIFO 0 and FIFO 1 is not used. */ static int can_stm32_add_rx_filter(const struct device *dev, can_rx_callback_t cb, void *cb_arg, const struct can_filter *filter) { struct can_stm32_data *data = dev->data; int filter_id; if ((filter->flags & ~(CAN_FILTER_IDE)) != 0) { LOG_ERR("unsupported CAN filter flags 0x%02x", filter->flags); return -ENOTSUP; } k_mutex_lock(&filter_mutex, K_FOREVER); k_mutex_lock(&data->inst_mutex, K_FOREVER); filter_id = can_stm32_set_filter(dev, filter); if (filter_id >= 0) { if ((filter->flags & CAN_FILTER_IDE) != 0) { data->rx_cb_ext[filter_id] = cb; data->cb_arg_ext[filter_id] = cb_arg; } else { data->rx_cb_std[filter_id - CONFIG_CAN_MAX_EXT_ID_FILTER] = cb; data->cb_arg_std[filter_id - CONFIG_CAN_MAX_EXT_ID_FILTER] = cb_arg; } } k_mutex_unlock(&data->inst_mutex); k_mutex_unlock(&filter_mutex); return filter_id; } static void can_stm32_remove_rx_filter(const struct device *dev, int filter_id) { const struct can_stm32_config *cfg = dev->config; struct can_stm32_data *data = dev->data; CAN_TypeDef *can = cfg->master_can; bool ide; int bank_offset = 0; int bank_num; bool bank_unused; if (filter_id < 0 || filter_id >= CAN_STM32_MAX_FILTER_ID) { LOG_ERR("filter ID %d out of bounds", filter_id); return; } k_mutex_lock(&filter_mutex, K_FOREVER); k_mutex_lock(&data->inst_mutex, K_FOREVER); if (cfg->can != cfg->master_can) { bank_offset = CAN_STM32_NUM_FILTER_BANKS; } if (filter_id < CONFIG_CAN_MAX_EXT_ID_FILTER) { ide = true; bank_num = bank_offset + filter_id; data->rx_cb_ext[filter_id] = NULL; data->cb_arg_ext[filter_id] = NULL; bank_unused = true; } else { int filter_index = filter_id - CONFIG_CAN_MAX_EXT_ID_FILTER; ide = false; bank_num = bank_offset + CONFIG_CAN_MAX_EXT_ID_FILTER + (filter_id - CONFIG_CAN_MAX_EXT_ID_FILTER) / 2; data->rx_cb_std[filter_index] = NULL; data->cb_arg_std[filter_index] = NULL; if (filter_index % 2 == 1) { bank_unused = data->rx_cb_std[filter_index - 1] == NULL; } else if (filter_index + 1 < CONFIG_CAN_MAX_STD_ID_FILTER) { bank_unused = data->rx_cb_std[filter_index + 1] == NULL; } else { bank_unused = true; } } LOG_DBG("Removing filter_id %d, ide %d", filter_id, ide); can->FMR |= CAN_FMR_FINIT; can_stm32_set_filter_bank(filter_id, &can->sFilterRegister[bank_num], ide, 0, 0xFFFFFFFF); if (bank_unused) { can->FA1R &= ~(1U << bank_num); LOG_DBG("Filter bank %d is unused -> deactivate", bank_num); } can->FMR &= ~(CAN_FMR_FINIT); k_mutex_unlock(&data->inst_mutex); k_mutex_unlock(&filter_mutex); } static const struct can_driver_api can_api_funcs = { .get_capabilities = can_stm32_get_capabilities, .start = can_stm32_start, .stop = can_stm32_stop, .set_mode = can_stm32_set_mode, .set_timing = can_stm32_set_timing, .send = can_stm32_send, .add_rx_filter = can_stm32_add_rx_filter, .remove_rx_filter = can_stm32_remove_rx_filter, .get_state = can_stm32_get_state, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = can_stm32_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .set_state_change_callback = can_stm32_set_state_change_callback, .get_core_clock = can_stm32_get_core_clock, .get_max_filters = can_stm32_get_max_filters, .timing_min = { .sjw = 0x1, .prop_seg = 0x00, .phase_seg1 = 0x01, .phase_seg2 = 0x01, .prescaler = 0x01 }, .timing_max = { .sjw = 0x04, .prop_seg = 0x00, .phase_seg1 = 0x10, .phase_seg2 = 0x08, .prescaler = 0x400 } }; #ifdef CONFIG_SOC_SERIES_STM32F0X #define CAN_STM32_IRQ_INST(inst) \ static void config_can_##inst##_irq(CAN_TypeDef *can) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), \ DT_INST_IRQ(inst, priority), \ can_stm32_isr, DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQN(inst)); \ can->IER |= CAN_IER_TMEIE | CAN_IER_ERRIE | CAN_IER_FMPIE0 | \ CAN_IER_FMPIE1 | CAN_IER_BOFIE; \ if (IS_ENABLED(CONFIG_CAN_STATS)) { \ can->IER |= CAN_IER_LECIE; \ } \ } #else #define CAN_STM32_IRQ_INST(inst) \ static void config_can_##inst##_irq(CAN_TypeDef *can) \ { \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, rx0, irq), \ DT_INST_IRQ_BY_NAME(inst, rx0, priority), \ can_stm32_rx_isr, DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, rx0, irq)); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, tx, irq), \ DT_INST_IRQ_BY_NAME(inst, tx, priority), \ can_stm32_tx_isr, DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, tx, irq)); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, sce, irq), \ DT_INST_IRQ_BY_NAME(inst, sce, priority), \ can_stm32_state_change_isr, \ DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, sce, irq)); \ can->IER |= CAN_IER_TMEIE | CAN_IER_ERRIE | CAN_IER_FMPIE0 | \ CAN_IER_FMPIE1 | CAN_IER_BOFIE; \ if (IS_ENABLED(CONFIG_CAN_STATS)) { \ can->IER |= CAN_IER_LECIE; \ } \ } #endif /* CONFIG_SOC_SERIES_STM32F0X */ #define CAN_STM32_CONFIG_INST(inst) \ PINCTRL_DT_INST_DEFINE(inst); \ static const struct can_stm32_config can_stm32_cfg_##inst = { \ .common = CAN_DT_DRIVER_CONFIG_INST_GET(inst, 0, 1000000), \ .can = (CAN_TypeDef *)DT_INST_REG_ADDR(inst), \ .master_can = (CAN_TypeDef *)DT_INST_PROP_OR(inst, \ master_can_reg, DT_INST_REG_ADDR(inst)), \ .pclken = { \ .enr = DT_INST_CLOCKS_CELL(inst, bits), \ .bus = DT_INST_CLOCKS_CELL(inst, bus), \ }, \ .config_irq = config_can_##inst##_irq, \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ }; #define CAN_STM32_DATA_INST(inst) \ static struct can_stm32_data can_stm32_dev_data_##inst; #define CAN_STM32_DEFINE_INST(inst) \ CAN_DEVICE_DT_INST_DEFINE(inst, can_stm32_init, NULL, \ &can_stm32_dev_data_##inst, &can_stm32_cfg_##inst, \ POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \ &can_api_funcs); #define CAN_STM32_INST(inst) \ CAN_STM32_IRQ_INST(inst) \ CAN_STM32_CONFIG_INST(inst) \ CAN_STM32_DATA_INST(inst) \ CAN_STM32_DEFINE_INST(inst) DT_INST_FOREACH_STATUS_OKAY(CAN_STM32_INST) ```
/content/code_sandbox/drivers/can/can_stm32_bxcan.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,598
```c /* * */ #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/can_mcan.h> #include <zephyr/drivers/can/transceiver.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/sys/sys_io.h> #include <zephyr/sys/util.h> LOG_MODULE_REGISTER(can_mcan, CONFIG_CAN_LOG_LEVEL); #define CAN_INIT_TIMEOUT_MS 100 int can_mcan_read_reg(const struct device *dev, uint16_t reg, uint32_t *val) { const struct can_mcan_config *config = dev->config; int err; err = config->ops->read_reg(dev, reg, val); if (err != 0) { LOG_ERR("failed to read reg 0x%03x (err %d)", reg, err); } return err; } int can_mcan_write_reg(const struct device *dev, uint16_t reg, uint32_t val) { const struct can_mcan_config *config = dev->config; int err; err = config->ops->write_reg(dev, reg, val); if (err != 0) { LOG_ERR("failed to write reg 0x%03x (err %d)", reg, err); } return err; } static int can_mcan_exit_sleep_mode(const struct device *dev) { struct can_mcan_data *data = dev->data; uint32_t start_time; uint32_t cccr; int err; k_mutex_lock(&data->lock, K_FOREVER); err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr); if (err != 0) { goto unlock; } cccr &= ~CAN_MCAN_CCCR_CSR; err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr); if (err != 0) { goto unlock; } start_time = k_cycle_get_32(); err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr); if (err != 0) { goto unlock; } while ((cccr & CAN_MCAN_CCCR_CSA) == CAN_MCAN_CCCR_CSA) { if (k_cycle_get_32() - start_time > k_ms_to_cyc_ceil32(CAN_INIT_TIMEOUT_MS)) { cccr |= CAN_MCAN_CCCR_CSR; err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr); if (err != 0) { goto unlock; } err = -EAGAIN; goto unlock; } err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr); if (err != 0) { goto unlock; } } unlock: k_mutex_unlock(&data->lock); return err; } static int can_mcan_enter_init_mode(const struct device *dev, k_timeout_t timeout) { struct can_mcan_data *data = dev->data; int64_t start_time; uint32_t cccr; int err; k_mutex_lock(&data->lock, K_FOREVER); err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr); if (err != 0) { goto unlock; } cccr |= CAN_MCAN_CCCR_INIT; err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr); if (err != 0) { goto unlock; } start_time = k_uptime_ticks(); err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr); if (err != 0) { goto unlock; } while ((cccr & CAN_MCAN_CCCR_INIT) == 0U) { if (k_uptime_ticks() - start_time > timeout.ticks) { cccr &= ~CAN_MCAN_CCCR_INIT; err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr); if (err != 0) { goto unlock; } err = -EAGAIN; goto unlock; } err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr); if (err != 0) { goto unlock; } } unlock: k_mutex_unlock(&data->lock); return err; } static int can_mcan_leave_init_mode(const struct device *dev, k_timeout_t timeout) { struct can_mcan_data *data = dev->data; int64_t start_time; uint32_t cccr; int err; k_mutex_lock(&data->lock, K_FOREVER); err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr); if (err != 0) { goto unlock; } cccr &= ~CAN_MCAN_CCCR_INIT; err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr); if (err != 0) { goto unlock; } start_time = k_uptime_ticks(); err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr); if (err != 0) { goto unlock; } while ((cccr & CAN_MCAN_CCCR_INIT) != 0U) { if (k_uptime_ticks() - start_time > timeout.ticks) { err = -EAGAIN; goto unlock; } err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr); if (err != 0) { goto unlock; } } unlock: k_mutex_unlock(&data->lock); return err; } int can_mcan_set_timing(const struct device *dev, const struct can_timing *timing) { struct can_mcan_data *data = dev->data; uint32_t nbtp = 0U; int err; if (data->common.started) { return -EBUSY; } k_mutex_lock(&data->lock, K_FOREVER); nbtp |= FIELD_PREP(CAN_MCAN_NBTP_NSJW, timing->sjw - 1UL) | FIELD_PREP(CAN_MCAN_NBTP_NTSEG1, timing->phase_seg1 - 1UL) | FIELD_PREP(CAN_MCAN_NBTP_NTSEG2, timing->phase_seg2 - 1UL) | FIELD_PREP(CAN_MCAN_NBTP_NBRP, timing->prescaler - 1UL); err = can_mcan_write_reg(dev, CAN_MCAN_NBTP, nbtp); if (err != 0) { goto unlock; } unlock: k_mutex_unlock(&data->lock); return err; } #ifdef CONFIG_CAN_FD_MODE int can_mcan_set_timing_data(const struct device *dev, const struct can_timing *timing_data) { const uint8_t tdco_max = FIELD_GET(CAN_MCAN_TDCR_TDCO, CAN_MCAN_TDCR_TDCO); struct can_mcan_data *data = dev->data; uint32_t dbtp = 0U; uint8_t tdco; int err; if (data->common.started) { return -EBUSY; } k_mutex_lock(&data->lock, K_FOREVER); dbtp |= FIELD_PREP(CAN_MCAN_DBTP_DSJW, timing_data->sjw - 1UL) | FIELD_PREP(CAN_MCAN_DBTP_DTSEG1, timing_data->phase_seg1 - 1UL) | FIELD_PREP(CAN_MCAN_DBTP_DTSEG2, timing_data->phase_seg2 - 1UL) | FIELD_PREP(CAN_MCAN_DBTP_DBRP, timing_data->prescaler - 1UL); if (timing_data->prescaler == 1U || timing_data->prescaler == 2U) { /* TDC can only be enabled if DBRP = { 0, 1 } */ dbtp |= CAN_MCAN_DBTP_TDC; /* Set TDC offset for correct location of the Secondary Sample Point (SSP) */ tdco = CAN_CALC_TDCO(timing_data, 0U, tdco_max); LOG_DBG("TDC enabled, using TDCO %u", tdco); err = can_mcan_write_reg(dev, CAN_MCAN_TDCR, FIELD_PREP(CAN_MCAN_TDCR_TDCO, tdco)); if (err != 0) { goto unlock; } } else { LOG_DBG("TDC cannot be enabled, prescaler value %u too high", timing_data->prescaler); } err = can_mcan_write_reg(dev, CAN_MCAN_DBTP, dbtp); if (err != 0) { goto unlock; } unlock: k_mutex_unlock(&data->lock); return err; } #endif /* CONFIG_CAN_FD_MODE */ int can_mcan_get_capabilities(const struct device *dev, can_mode_t *cap) { ARG_UNUSED(dev); *cap = CAN_MODE_NORMAL | CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY; if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { *cap |= CAN_MODE_MANUAL_RECOVERY; } if (IS_ENABLED(CONFIG_CAN_FD_MODE)) { *cap |= CAN_MODE_FD; } return 0; } int can_mcan_start(const struct device *dev) { const struct can_mcan_config *config = dev->config; struct can_mcan_data *data = dev->data; int err; if (data->common.started) { return -EALREADY; } if (config->common.phy != NULL) { err = can_transceiver_enable(config->common.phy, data->common.mode); if (err != 0) { LOG_ERR("failed to enable CAN transceiver (err %d)", err); return err; } } /* Reset statistics */ CAN_STATS_RESET(dev); err = can_mcan_leave_init_mode(dev, K_MSEC(CAN_INIT_TIMEOUT_MS)); if (err != 0) { LOG_ERR("failed to leave init mode"); if (config->common.phy != NULL) { /* Attempt to disable the CAN transceiver in case of error */ (void)can_transceiver_disable(config->common.phy); } return -EIO; } data->common.started = true; return 0; } int can_mcan_stop(const struct device *dev) { const struct can_mcan_config *config = dev->config; const struct can_mcan_callbacks *cbs = config->callbacks; struct can_mcan_data *data = dev->data; can_tx_callback_t tx_cb; uint32_t tx_idx; int err; if (!data->common.started) { return -EALREADY; } /* CAN transmissions are automatically stopped when entering init mode */ err = can_mcan_enter_init_mode(dev, K_MSEC(CAN_INIT_TIMEOUT_MS)); if (err != 0) { LOG_ERR("Failed to enter init mode"); return -EIO; } if (config->common.phy != NULL) { err = can_transceiver_disable(config->common.phy); if (err != 0) { LOG_ERR("failed to disable CAN transceiver (err %d)", err); return err; } } can_mcan_enable_configuration_change(dev); data->common.started = false; for (tx_idx = 0U; tx_idx < cbs->num_tx; tx_idx++) { tx_cb = cbs->tx[tx_idx].function; if (tx_cb != NULL) { cbs->tx[tx_idx].function = NULL; tx_cb(dev, -ENETDOWN, cbs->tx[tx_idx].user_data); k_sem_give(&data->tx_sem); } } return 0; } int can_mcan_set_mode(const struct device *dev, can_mode_t mode) { can_mode_t supported = CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY; struct can_mcan_data *data = dev->data; uint32_t cccr; uint32_t test; int err; if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { supported |= CAN_MODE_MANUAL_RECOVERY; } if (IS_ENABLED(CONFIG_CAN_FD_MODE)) { supported |= CAN_MODE_FD; } if ((mode & ~(supported)) != 0U) { LOG_ERR("unsupported mode: 0x%08x", mode); return -ENOTSUP; } if (data->common.started) { return -EBUSY; } k_mutex_lock(&data->lock, K_FOREVER); err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr); if (err != 0) { goto unlock; } err = can_mcan_read_reg(dev, CAN_MCAN_TEST, &test); if (err != 0) { goto unlock; } if ((mode & CAN_MODE_LOOPBACK) != 0) { /* Loopback mode */ cccr |= CAN_MCAN_CCCR_TEST; test |= CAN_MCAN_TEST_LBCK; } else { cccr &= ~CAN_MCAN_CCCR_TEST; } if ((mode & CAN_MODE_LISTENONLY) != 0) { /* Bus monitoring mode */ cccr |= CAN_MCAN_CCCR_MON; } else { cccr &= ~CAN_MCAN_CCCR_MON; } #ifdef CONFIG_CAN_FD_MODE if ((mode & CAN_MODE_FD) != 0) { cccr |= CAN_MCAN_CCCR_FDOE | CAN_MCAN_CCCR_BRSE; } else { cccr &= ~(CAN_MCAN_CCCR_FDOE | CAN_MCAN_CCCR_BRSE); } #endif /* CONFIG_CAN_FD_MODE */ err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr); if (err != 0) { goto unlock; } err = can_mcan_write_reg(dev, CAN_MCAN_TEST, test); if (err != 0) { goto unlock; } data->common.mode = mode; unlock: k_mutex_unlock(&data->lock); return err; } static void can_mcan_state_change_handler(const struct device *dev) { const struct can_mcan_config *config = dev->config; struct can_mcan_data *data = dev->data; const can_state_change_callback_t state_cb = data->common.state_change_cb; void *state_cb_data = data->common.state_change_cb_user_data; const struct can_mcan_callbacks *cbs = config->callbacks; can_tx_callback_t tx_cb; uint32_t tx_idx; struct can_bus_err_cnt err_cnt; enum can_state state; uint32_t cccr; int err; err = can_mcan_get_state(dev, &state, &err_cnt); if (err != 0) { return; } if (state_cb != NULL) { state_cb(dev, state, err_cnt, state_cb_data); } if (state == CAN_STATE_BUS_OFF) { /* Request all TX buffers to be cancelled */ err = can_mcan_write_reg(dev, CAN_MCAN_TXBCR, CAN_MCAN_TXBCR_CR); if (err != 0) { return; } /* Call all TX queue callbacks with -ENETUNREACH */ for (tx_idx = 0U; tx_idx < cbs->num_tx; tx_idx++) { tx_cb = cbs->tx[tx_idx].function; if (tx_cb != NULL) { cbs->tx[tx_idx].function = NULL; tx_cb(dev, -ENETUNREACH, cbs->tx[tx_idx].user_data); k_sem_give(&data->tx_sem); } } if (!IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE) || (data->common.mode & CAN_MODE_MANUAL_RECOVERY) == 0U) { /* * Request leaving init mode, but do not take the lock (as we are in ISR * context), nor wait for the result. */ err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr); if (err != 0) { return; } cccr &= ~CAN_MCAN_CCCR_INIT; err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr); if (err != 0) { return; } } } } static void can_mcan_tx_event_handler(const struct device *dev) { const struct can_mcan_config *config = dev->config; const struct can_mcan_callbacks *cbs = config->callbacks; struct can_mcan_data *data = dev->data; struct can_mcan_tx_event_fifo tx_event; can_tx_callback_t tx_cb; void *user_data; uint32_t event_idx; uint32_t tx_idx; uint32_t txefs; int err; err = can_mcan_read_reg(dev, CAN_MCAN_TXEFS, &txefs); if (err != 0) { return; } while ((txefs & CAN_MCAN_TXEFS_EFFL) != 0U) { event_idx = FIELD_GET(CAN_MCAN_TXEFS_EFGI, txefs); err = can_mcan_read_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_EVENT_FIFO] + event_idx * sizeof(struct can_mcan_tx_event_fifo), &tx_event, sizeof(struct can_mcan_tx_event_fifo)); if (err != 0) { LOG_ERR("failed to read tx event fifo (err %d)", err); return; } tx_idx = tx_event.mm; /* Acknowledge TX event */ err = can_mcan_write_reg(dev, CAN_MCAN_TXEFA, event_idx); if (err != 0) { return; } __ASSERT_NO_MSG(tx_idx < cbs->num_tx); tx_cb = cbs->tx[tx_idx].function; user_data = cbs->tx[tx_idx].user_data; cbs->tx[tx_idx].function = NULL; k_sem_give(&data->tx_sem); tx_cb(dev, 0, user_data); err = can_mcan_read_reg(dev, CAN_MCAN_TXEFS, &txefs); if (err != 0) { return; } } } #ifdef CONFIG_CAN_STATS static void can_mcan_lec_update_stats(const struct device *dev, enum can_mcan_psr_lec lec) { switch (lec) { case CAN_MCAN_PSR_LEC_STUFF_ERROR: CAN_STATS_STUFF_ERROR_INC(dev); break; case CAN_MCAN_PSR_LEC_FORM_ERROR: CAN_STATS_FORM_ERROR_INC(dev); break; case CAN_MCAN_PSR_LEC_ACK_ERROR: CAN_STATS_ACK_ERROR_INC(dev); break; case CAN_MCAN_PSR_LEC_BIT1_ERROR: CAN_STATS_BIT1_ERROR_INC(dev); break; case CAN_MCAN_PSR_LEC_BIT0_ERROR: CAN_STATS_BIT0_ERROR_INC(dev); break; case CAN_MCAN_PSR_LEC_CRC_ERROR: CAN_STATS_CRC_ERROR_INC(dev); break; case CAN_MCAN_PSR_LEC_NO_ERROR: case CAN_MCAN_PSR_LEC_NO_CHANGE: default: break; } } #endif /* CONFIG_CAN_STATS */ static int can_mcan_read_psr(const struct device *dev, uint32_t *val) { /* Reading the lower byte of the PSR register clears the protocol last * error codes (LEC). To avoid missing errors, this function should be * used whenever the PSR register is read. */ int err = can_mcan_read_reg(dev, CAN_MCAN_PSR, val); if (err != 0) { return err; } #ifdef CONFIG_CAN_STATS enum can_mcan_psr_lec lec; lec = FIELD_GET(CAN_MCAN_PSR_LEC, *val); can_mcan_lec_update_stats(dev, lec); #ifdef CONFIG_CAN_FD_MODE lec = FIELD_GET(CAN_MCAN_PSR_DLEC, *val); can_mcan_lec_update_stats(dev, lec); #endif #endif /* CONFIG_CAN_STATS */ return 0; } void can_mcan_line_0_isr(const struct device *dev) { const uint32_t events = CAN_MCAN_IR_BO | CAN_MCAN_IR_EP | CAN_MCAN_IR_EW | CAN_MCAN_IR_TEFN | CAN_MCAN_IR_TEFL | CAN_MCAN_IR_ARA | CAN_MCAN_IR_MRAF | CAN_MCAN_IR_PEA | CAN_MCAN_IR_PED; struct can_mcan_data *data = dev->data; uint32_t ir; int err; err = can_mcan_read_reg(dev, CAN_MCAN_IR, &ir); if (err != 0) { return; } while ((ir & events) != 0U) { err = can_mcan_write_reg(dev, CAN_MCAN_IR, ir & events); if (err != 0) { return; } if ((ir & (CAN_MCAN_IR_BO | CAN_MCAN_IR_EP | CAN_MCAN_IR_EW)) != 0U) { can_mcan_state_change_handler(dev); } /* TX event FIFO new entry */ if ((ir & CAN_MCAN_IR_TEFN) != 0U) { can_mcan_tx_event_handler(dev); } if ((ir & CAN_MCAN_IR_TEFL) != 0U) { LOG_ERR("TX FIFO element lost"); k_sem_give(&data->tx_sem); } if ((ir & CAN_MCAN_IR_ARA) != 0U) { LOG_ERR("Access to reserved address"); } if ((ir & CAN_MCAN_IR_MRAF) != 0U) { LOG_ERR("Message RAM access failure"); } #ifdef CONFIG_CAN_STATS if ((ir & (CAN_MCAN_IR_PEA | CAN_MCAN_IR_PED)) != 0U) { uint32_t reg; /* This function automatically updates protocol error stats */ can_mcan_read_psr(dev, &reg); } #endif err = can_mcan_read_reg(dev, CAN_MCAN_IR, &ir); if (err != 0) { return; } } } static void can_mcan_get_message(const struct device *dev, uint16_t fifo_offset, uint16_t fifo_status_reg, uint16_t fifo_ack_reg) { const struct can_mcan_config *config = dev->config; const struct can_mcan_callbacks *cbs = config->callbacks; struct can_mcan_rx_fifo_hdr hdr; struct can_frame frame = {0}; can_rx_callback_t cb; void *user_data; uint32_t get_idx; uint32_t filt_idx; int data_length; uint32_t fifo_status; int err; err = can_mcan_read_reg(dev, fifo_status_reg, &fifo_status); if (err != 0) { return; } while (FIELD_GET(CAN_MCAN_RXF0S_F0FL, fifo_status) != 0U) { get_idx = FIELD_GET(CAN_MCAN_RXF0S_F0GI, fifo_status); err = can_mcan_read_mram(dev, fifo_offset + get_idx * sizeof(struct can_mcan_rx_fifo) + offsetof(struct can_mcan_rx_fifo, hdr), &hdr, sizeof(struct can_mcan_rx_fifo_hdr)); if (err != 0) { LOG_ERR("failed to read Rx FIFO header (err %d)", err); return; } frame.dlc = hdr.dlc; if (hdr.rtr != 0) { frame.flags |= CAN_FRAME_RTR; } if (hdr.fdf != 0) { frame.flags |= CAN_FRAME_FDF; } if (hdr.brs != 0) { frame.flags |= CAN_FRAME_BRS; } if (hdr.esi != 0) { frame.flags |= CAN_FRAME_ESI; } #ifdef CONFIG_CAN_RX_TIMESTAMP frame.timestamp = hdr.rxts; #endif /* CONFIG_CAN_RX_TIMESTAMP */ filt_idx = hdr.fidx; if (hdr.xtd != 0) { frame.id = hdr.ext_id; frame.flags |= CAN_FRAME_IDE; } else { frame.id = hdr.std_id; } data_length = can_dlc_to_bytes(frame.dlc); if (data_length <= sizeof(frame.data)) { if ((frame.flags & CAN_FRAME_RTR) == 0U && data_length != 0U) { err = can_mcan_read_mram(dev, fifo_offset + get_idx * sizeof(struct can_mcan_rx_fifo) + offsetof(struct can_mcan_rx_fifo, data_32), &frame.data_32, ROUND_UP(data_length, sizeof(uint32_t))); if (err != 0) { LOG_ERR("failed to read Rx FIFO data (err %d)", err); return; } } if ((frame.flags & CAN_FRAME_IDE) != 0) { LOG_DBG("Frame on filter %d, ID: 0x%x", filt_idx + cbs->num_std, frame.id); __ASSERT_NO_MSG(filt_idx < cbs->num_ext); cb = cbs->ext[filt_idx].function; user_data = cbs->ext[filt_idx].user_data; } else { LOG_DBG("Frame on filter %d, ID: 0x%x", filt_idx, frame.id); __ASSERT_NO_MSG(filt_idx < cbs->num_std); cb = cbs->std[filt_idx].function; user_data = cbs->std[filt_idx].user_data; } if (cb) { cb(dev, &frame, user_data); } else { LOG_DBG("cb missing"); } } else { LOG_ERR("Frame is too big"); } err = can_mcan_write_reg(dev, fifo_ack_reg, get_idx); if (err != 0) { return; } err = can_mcan_read_reg(dev, fifo_status_reg, &fifo_status); if (err != 0) { return; } } } void can_mcan_line_1_isr(const struct device *dev) { const struct can_mcan_config *config = dev->config; const uint32_t events = CAN_MCAN_IR_RF0N | CAN_MCAN_IR_RF1N | CAN_MCAN_IR_RF0L | CAN_MCAN_IR_RF1L; uint32_t ir; int err; err = can_mcan_read_reg(dev, CAN_MCAN_IR, &ir); if (err != 0) { return; } if ((ir & CAN_MCAN_IR_PEA) != 0U) { LOG_DBG("Protocol error in arbitration phase: ir: 0x%x", ir); } if ((ir & CAN_MCAN_IR_PED) != 0U) { LOG_DBG("Protocol error in data phase: ir: 0x%x", ir); } while ((ir & events) != 0U) { err = can_mcan_write_reg(dev, CAN_MCAN_IR, events & ir); if (err != 0) { return; } if ((ir & CAN_MCAN_IR_RF0N) != 0U) { LOG_DBG("RX FIFO0 INT"); can_mcan_get_message(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO0], CAN_MCAN_RXF0S, CAN_MCAN_RXF0A); } if ((ir & CAN_MCAN_IR_RF1N) != 0U) { LOG_DBG("RX FIFO1 INT"); can_mcan_get_message(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO1], CAN_MCAN_RXF1S, CAN_MCAN_RXF1A); } if ((ir & CAN_MCAN_IR_RF0L) != 0U) { LOG_ERR("Message lost on FIFO0"); CAN_STATS_RX_OVERRUN_INC(dev); } if ((ir & CAN_MCAN_IR_RF1L) != 0U) { LOG_ERR("Message lost on FIFO1"); CAN_STATS_RX_OVERRUN_INC(dev); } err = can_mcan_read_reg(dev, CAN_MCAN_IR, &ir); if (err != 0) { return; } } } int can_mcan_get_state(const struct device *dev, enum can_state *state, struct can_bus_err_cnt *err_cnt) { struct can_mcan_data *data = dev->data; uint32_t reg; int err; if (state != NULL) { err = can_mcan_read_psr(dev, &reg); if (err != 0) { return err; } if (!data->common.started) { *state = CAN_STATE_STOPPED; } else if ((reg & CAN_MCAN_PSR_BO) != 0U) { *state = CAN_STATE_BUS_OFF; } else if ((reg & CAN_MCAN_PSR_EP) != 0U) { *state = CAN_STATE_ERROR_PASSIVE; } else if ((reg & CAN_MCAN_PSR_EW) != 0U) { *state = CAN_STATE_ERROR_WARNING; } else { *state = CAN_STATE_ERROR_ACTIVE; } } if (err_cnt != NULL) { err = can_mcan_read_reg(dev, CAN_MCAN_ECR, &reg); if (err != 0) { return err; } err_cnt->tx_err_cnt = FIELD_GET(CAN_MCAN_ECR_TEC, reg); err_cnt->rx_err_cnt = FIELD_GET(CAN_MCAN_ECR_REC, reg); } return 0; } #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE int can_mcan_recover(const struct device *dev, k_timeout_t timeout) { struct can_mcan_data *data = dev->data; if (!data->common.started) { return -ENETDOWN; } if ((data->common.mode & CAN_MODE_MANUAL_RECOVERY) == 0U) { return -ENOTSUP; } return can_mcan_leave_init_mode(dev, timeout); } #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ int can_mcan_send(const struct device *dev, const struct can_frame *frame, k_timeout_t timeout, can_tx_callback_t callback, void *user_data) { const struct can_mcan_config *config = dev->config; const struct can_mcan_callbacks *cbs = config->callbacks; struct can_mcan_data *data = dev->data; size_t data_length = can_dlc_to_bytes(frame->dlc); struct can_mcan_tx_buffer_hdr tx_hdr = { .rtr = (frame->flags & CAN_FRAME_RTR) != 0U ? 1U : 0U, .xtd = (frame->flags & CAN_FRAME_IDE) != 0U ? 1U : 0U, .esi = 0U, .dlc = frame->dlc, #ifdef CONFIG_CAN_FD_MODE .fdf = (frame->flags & CAN_FRAME_FDF) != 0U ? 1U : 0U, .brs = (frame->flags & CAN_FRAME_BRS) != 0U ? 1U : 0U, #else /* CONFIG_CAN_FD_MODE */ .fdf = 0U, .brs = 0U, #endif /* !CONFIG_CAN_FD_MODE */ .efc = 1U, }; uint32_t put_idx = -1; uint32_t reg; int err; LOG_DBG("Sending %zu bytes. Id: 0x%x, ID type: %s %s %s %s", data_length, frame->id, (frame->flags & CAN_FRAME_IDE) != 0U ? "extended" : "standard", (frame->flags & CAN_FRAME_RTR) != 0U ? "RTR" : "", (frame->flags & CAN_FRAME_FDF) != 0U ? "FD frame" : "", (frame->flags & CAN_FRAME_BRS) != 0U ? "BRS" : ""); #ifdef CONFIG_CAN_FD_MODE if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR | CAN_FRAME_FDF | CAN_FRAME_BRS)) != 0) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } if ((data->common.mode & CAN_MODE_FD) == 0U && ((frame->flags & (CAN_FRAME_FDF | CAN_FRAME_BRS)) != 0U)) { LOG_ERR("CAN FD format not supported in non-FD mode"); return -ENOTSUP; } #else /* CONFIG_CAN_FD_MODE */ if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR)) != 0U) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } #endif /* !CONFIG_CAN_FD_MODE */ if (data_length > sizeof(frame->data)) { LOG_ERR("data length (%zu) > max frame data length (%zu)", data_length, sizeof(frame->data)); return -EINVAL; } if ((frame->flags & CAN_FRAME_FDF) != 0U) { if (frame->dlc > CANFD_MAX_DLC) { LOG_ERR("DLC of %d for CAN FD format frame", frame->dlc); return -EINVAL; } } else { if (frame->dlc > CAN_MAX_DLC) { LOG_ERR("DLC of %d for non-FD format frame", frame->dlc); return -EINVAL; } } if (!data->common.started) { return -ENETDOWN; } err = can_mcan_read_psr(dev, &reg); if (err != 0) { return err; } if ((reg & CAN_MCAN_PSR_BO) != 0U) { return -ENETUNREACH; } err = k_sem_take(&data->tx_sem, timeout); if (err != 0) { return -EAGAIN; } k_mutex_lock(&data->tx_mtx, K_FOREVER); /* Acquire a free TX buffer */ for (int i = 0; i < cbs->num_tx; i++) { if (cbs->tx[i].function == NULL) { put_idx = i; break; } } tx_hdr.mm = put_idx; if ((frame->flags & CAN_FRAME_IDE) != 0U) { tx_hdr.ext_id = frame->id; } else { tx_hdr.std_id = frame->id & CAN_STD_ID_MASK; } err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_BUFFER] + put_idx * sizeof(struct can_mcan_tx_buffer) + offsetof(struct can_mcan_tx_buffer, hdr), &tx_hdr, sizeof(struct can_mcan_tx_buffer_hdr)); if (err != 0) { LOG_ERR("failed to write Tx Buffer header (err %d)", err); goto err_unlock; } if ((frame->flags & CAN_FRAME_RTR) == 0U && data_length != 0U) { err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_BUFFER] + put_idx * sizeof(struct can_mcan_tx_buffer) + offsetof(struct can_mcan_tx_buffer, data_32), &frame->data_32, ROUND_UP(data_length, sizeof(uint32_t))); if (err != 0) { LOG_ERR("failed to write Tx Buffer data (err %d)", err); goto err_unlock; } } __ASSERT_NO_MSG(put_idx < cbs->num_tx); cbs->tx[put_idx].function = callback; cbs->tx[put_idx].user_data = user_data; err = can_mcan_write_reg(dev, CAN_MCAN_TXBAR, BIT(put_idx)); if (err != 0) { cbs->tx[put_idx].function = NULL; goto err_unlock; } k_mutex_unlock(&data->tx_mtx); return 0; err_unlock: k_mutex_unlock(&data->tx_mtx); k_sem_give(&data->tx_sem); return err; } int can_mcan_get_max_filters(const struct device *dev, bool ide) { const struct can_mcan_config *config = dev->config; const struct can_mcan_callbacks *cbs = config->callbacks; if (ide) { return cbs->num_ext; } else { return cbs->num_std; } } /* Use masked configuration only for simplicity. If someone needs more than * 28 standard filters, dual mode needs to be implemented. * Dual mode gets tricky, because we can only activate both filters. * If one of the IDs is not used anymore, we would need to mark it as unused. */ int can_mcan_add_rx_filter_std(const struct device *dev, can_rx_callback_t callback, void *user_data, const struct can_filter *filter) { const struct can_mcan_config *config = dev->config; const struct can_mcan_callbacks *cbs = config->callbacks; struct can_mcan_data *data = dev->data; struct can_mcan_std_filter filter_element = { .sfid1 = filter->id, .sfid2 = filter->mask, .sft = CAN_MCAN_SFT_CLASSIC }; int filter_id = -ENOSPC; int err; int i; k_mutex_lock(&data->lock, K_FOREVER); for (i = 0; i < cbs->num_std; i++) { if (cbs->std[i].function == NULL) { filter_id = i; break; } } if (filter_id == -ENOSPC) { LOG_WRN("No free standard id filter left"); k_mutex_unlock(&data->lock); return -ENOSPC; } /* TODO proper fifo balancing */ filter_element.sfec = filter_id & 0x01 ? CAN_MCAN_XFEC_FIFO1 : CAN_MCAN_XFEC_FIFO0; err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_STD_FILTER] + filter_id * sizeof(struct can_mcan_std_filter), &filter_element, sizeof(filter_element)); if (err != 0) { LOG_ERR("failed to write std filter element (err %d)", err); return err; } k_mutex_unlock(&data->lock); LOG_DBG("Attached std filter at %d", filter_id); __ASSERT_NO_MSG(filter_id < cbs->num_std); cbs->std[filter_id].function = callback; cbs->std[filter_id].user_data = user_data; return filter_id; } static int can_mcan_add_rx_filter_ext(const struct device *dev, can_rx_callback_t callback, void *user_data, const struct can_filter *filter) { const struct can_mcan_config *config = dev->config; const struct can_mcan_callbacks *cbs = config->callbacks; struct can_mcan_data *data = dev->data; struct can_mcan_ext_filter filter_element = { .efid2 = filter->mask, .efid1 = filter->id, .eft = CAN_MCAN_EFT_CLASSIC }; int filter_id = -ENOSPC; int err; int i; k_mutex_lock(&data->lock, K_FOREVER); for (i = 0; i < cbs->num_ext; i++) { if (cbs->ext[i].function == NULL) { filter_id = i; break; } } if (filter_id == -ENOSPC) { LOG_WRN("No free extended id filter left"); k_mutex_unlock(&data->lock); return -ENOSPC; } /* TODO proper fifo balancing */ filter_element.efec = filter_id & 0x01 ? CAN_MCAN_XFEC_FIFO1 : CAN_MCAN_XFEC_FIFO0; err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_EXT_FILTER] + filter_id * sizeof(struct can_mcan_ext_filter), &filter_element, sizeof(filter_element)); if (err != 0) { LOG_ERR("failed to write std filter element (err %d)", err); return err; } k_mutex_unlock(&data->lock); LOG_DBG("Attached ext filter at %d", filter_id); __ASSERT_NO_MSG(filter_id < cbs->num_ext); cbs->ext[filter_id].function = callback; cbs->ext[filter_id].user_data = user_data; return filter_id; } int can_mcan_add_rx_filter(const struct device *dev, can_rx_callback_t callback, void *user_data, const struct can_filter *filter) { const struct can_mcan_config *config = dev->config; const struct can_mcan_callbacks *cbs = config->callbacks; int filter_id; if ((filter->flags & ~(CAN_FILTER_IDE)) != 0U) { LOG_ERR("unsupported CAN filter flags 0x%02x", filter->flags); return -ENOTSUP; } if ((filter->flags & CAN_FILTER_IDE) != 0U) { filter_id = can_mcan_add_rx_filter_ext(dev, callback, user_data, filter); if (filter_id >= 0) { filter_id += cbs->num_std; } } else { filter_id = can_mcan_add_rx_filter_std(dev, callback, user_data, filter); } return filter_id; } void can_mcan_remove_rx_filter(const struct device *dev, int filter_id) { const struct can_mcan_config *config = dev->config; const struct can_mcan_callbacks *cbs = config->callbacks; struct can_mcan_data *data = dev->data; int err; if (filter_id < 0) { LOG_ERR("filter ID %d out of bounds", filter_id); return; } k_mutex_lock(&data->lock, K_FOREVER); if (filter_id >= cbs->num_std) { filter_id -= cbs->num_std; if (filter_id >= cbs->num_ext) { LOG_ERR("filter ID %d out of bounds", filter_id); k_mutex_unlock(&data->lock); return; } cbs->ext[filter_id].function = NULL; cbs->ext[filter_id].user_data = NULL; err = can_mcan_clear_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_EXT_FILTER] + filter_id * sizeof(struct can_mcan_ext_filter), sizeof(struct can_mcan_ext_filter)); if (err != 0) { LOG_ERR("failed to clear ext filter element (err %d)", err); } } else { cbs->std[filter_id].function = NULL; cbs->std[filter_id].user_data = NULL; err = can_mcan_clear_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_STD_FILTER] + filter_id * sizeof(struct can_mcan_std_filter), sizeof(struct can_mcan_std_filter)); if (err != 0) { LOG_ERR("failed to clear std filter element (err %d)", err); } } k_mutex_unlock(&data->lock); } void can_mcan_set_state_change_callback(const struct device *dev, can_state_change_callback_t callback, void *user_data) { struct can_mcan_data *data = dev->data; data->common.state_change_cb = callback; data->common.state_change_cb_user_data = user_data; } /* helper function allowing mcan drivers without access to private mcan * definitions to set CCCR_CCE, which might be needed to disable write * protection for some registers. */ void can_mcan_enable_configuration_change(const struct device *dev) { struct can_mcan_data *data = dev->data; uint32_t cccr; int err; k_mutex_lock(&data->lock, K_FOREVER); err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr); if (err != 0) { goto unlock; } cccr |= CAN_MCAN_CCCR_CCE; err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr); if (err != 0) { goto unlock; } unlock: k_mutex_unlock(&data->lock); } int can_mcan_configure_mram(const struct device *dev, uintptr_t mrba, uintptr_t mram) { const struct can_mcan_config *config = dev->config; uint32_t addr; uint32_t reg; int err; err = can_mcan_exit_sleep_mode(dev); if (err != 0) { LOG_ERR("Failed to exit sleep mode"); return -EIO; } err = can_mcan_enter_init_mode(dev, K_MSEC(CAN_INIT_TIMEOUT_MS)); if (err != 0) { LOG_ERR("Failed to enter init mode"); return -EIO; } can_mcan_enable_configuration_change(dev); addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_STD_FILTER]; reg = (addr & CAN_MCAN_SIDFC_FLSSA) | FIELD_PREP(CAN_MCAN_SIDFC_LSS, config->mram_elements[CAN_MCAN_MRAM_CFG_STD_FILTER]); err = can_mcan_write_reg(dev, CAN_MCAN_SIDFC, reg); if (err != 0) { return err; } addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_EXT_FILTER]; reg = (addr & CAN_MCAN_XIDFC_FLESA) | FIELD_PREP(CAN_MCAN_XIDFC_LSS, config->mram_elements[CAN_MCAN_MRAM_CFG_EXT_FILTER]); err = can_mcan_write_reg(dev, CAN_MCAN_XIDFC, reg); if (err != 0) { return err; } addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO0]; reg = (addr & CAN_MCAN_RXF0C_F0SA) | FIELD_PREP(CAN_MCAN_RXF0C_F0S, config->mram_elements[CAN_MCAN_MRAM_CFG_RX_FIFO0]); err = can_mcan_write_reg(dev, CAN_MCAN_RXF0C, reg); if (err != 0) { return err; } addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO1]; reg = (addr & CAN_MCAN_RXF1C_F1SA) | FIELD_PREP(CAN_MCAN_RXF1C_F1S, config->mram_elements[CAN_MCAN_MRAM_CFG_RX_FIFO1]); err = can_mcan_write_reg(dev, CAN_MCAN_RXF1C, reg); if (err != 0) { return err; } addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_BUFFER]; reg = (addr & CAN_MCAN_RXBC_RBSA); err = can_mcan_write_reg(dev, CAN_MCAN_RXBC, reg); if (err != 0) { return err; } addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_EVENT_FIFO]; reg = (addr & CAN_MCAN_TXEFC_EFSA) | FIELD_PREP(CAN_MCAN_TXEFC_EFS, config->mram_elements[CAN_MCAN_MRAM_CFG_TX_EVENT_FIFO]); err = can_mcan_write_reg(dev, CAN_MCAN_TXEFC, reg); if (err != 0) { return err; } addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_BUFFER]; reg = (addr & CAN_MCAN_TXBC_TBSA) | FIELD_PREP(CAN_MCAN_TXBC_TFQS, config->mram_elements[CAN_MCAN_MRAM_CFG_TX_BUFFER]) | CAN_MCAN_TXBC_TFQM; err = can_mcan_write_reg(dev, CAN_MCAN_TXBC, reg); if (err != 0) { return err; } /* 64 byte Tx Buffer data fields size */ reg = CAN_MCAN_TXESC_TBDS; err = can_mcan_write_reg(dev, CAN_MCAN_TXESC, reg); if (err != 0) { return err; } /* 64 byte Rx Buffer/FIFO1/FIFO0 data fields size */ reg = CAN_MCAN_RXESC_RBDS | CAN_MCAN_RXESC_F1DS | CAN_MCAN_RXESC_F0DS; err = can_mcan_write_reg(dev, CAN_MCAN_RXESC, reg); if (err != 0) { return err; } return 0; } int can_mcan_init(const struct device *dev) { const struct can_mcan_config *config = dev->config; const struct can_mcan_callbacks *cbs = config->callbacks; struct can_mcan_data *data = dev->data; struct can_timing timing = { 0 }; #ifdef CONFIG_CAN_FD_MODE struct can_timing timing_data = { 0 }; #endif /* CONFIG_CAN_FD_MODE */ uint32_t reg; int err; __ASSERT_NO_MSG(config->ops->read_reg != NULL); __ASSERT_NO_MSG(config->ops->write_reg != NULL); __ASSERT_NO_MSG(config->ops->read_mram != NULL); __ASSERT_NO_MSG(config->ops->write_mram != NULL); __ASSERT_NO_MSG(config->ops->clear_mram != NULL); __ASSERT_NO_MSG(config->callbacks != NULL); __ASSERT_NO_MSG(cbs->num_tx <= config->mram_elements[CAN_MCAN_MRAM_CFG_TX_BUFFER]); __ASSERT_NO_MSG(cbs->num_std <= config->mram_elements[CAN_MCAN_MRAM_CFG_STD_FILTER]); __ASSERT_NO_MSG(cbs->num_ext <= config->mram_elements[CAN_MCAN_MRAM_CFG_EXT_FILTER]); k_mutex_init(&data->lock); k_mutex_init(&data->tx_mtx); k_sem_init(&data->tx_sem, cbs->num_tx, cbs->num_tx); if (config->common.phy != NULL) { if (!device_is_ready(config->common.phy)) { LOG_ERR("CAN transceiver not ready"); return -ENODEV; } } err = can_mcan_exit_sleep_mode(dev); if (err != 0) { LOG_ERR("Failed to exit sleep mode"); return -EIO; } err = can_mcan_enter_init_mode(dev, K_MSEC(CAN_INIT_TIMEOUT_MS)); if (err != 0) { LOG_ERR("Failed to enter init mode"); return -EIO; } can_mcan_enable_configuration_change(dev); #if CONFIG_CAN_LOG_LEVEL >= LOG_LEVEL_DBG err = can_mcan_read_reg(dev, CAN_MCAN_CREL, &reg); if (err != 0) { return -EIO; } LOG_DBG("IP rel: %lu.%lu.%lu %02lu.%lu.%lu", FIELD_GET(CAN_MCAN_CREL_REL, reg), FIELD_GET(CAN_MCAN_CREL_STEP, reg), FIELD_GET(CAN_MCAN_CREL_SUBSTEP, reg), FIELD_GET(CAN_MCAN_CREL_YEAR, reg), FIELD_GET(CAN_MCAN_CREL_MON, reg), FIELD_GET(CAN_MCAN_CREL_DAY, reg)); #endif /* CONFIG_CAN_LOG_LEVEL >= LOG_LEVEL_DBG */ err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &reg); if (err != 0) { return err; } reg &= ~(CAN_MCAN_CCCR_FDOE | CAN_MCAN_CCCR_BRSE | CAN_MCAN_CCCR_TEST | CAN_MCAN_CCCR_MON | CAN_MCAN_CCCR_ASM); err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, reg); if (err != 0) { return err; } err = can_mcan_read_reg(dev, CAN_MCAN_TEST, &reg); if (err != 0) { return err; } reg &= ~(CAN_MCAN_TEST_LBCK); err = can_mcan_write_reg(dev, CAN_MCAN_TEST, reg); if (err != 0) { return err; } err = can_mcan_read_reg(dev, CAN_MCAN_GFC, &reg); if (err != 0) { return err; } reg |= FIELD_PREP(CAN_MCAN_GFC_ANFE, 0x2) | FIELD_PREP(CAN_MCAN_GFC_ANFS, 0x2); if (!IS_ENABLED(CONFIG_CAN_ACCEPT_RTR)) { reg |= CAN_MCAN_GFC_RRFS | CAN_MCAN_GFC_RRFE; } err = can_mcan_write_reg(dev, CAN_MCAN_GFC, reg); if (err != 0) { return err; } err = can_calc_timing(dev, &timing, config->common.bitrate, config->common.sample_point); if (err == -EINVAL) { LOG_ERR("Can't find timing for given param"); return -EIO; } LOG_DBG("Presc: %d, TS1: %d, TS2: %d", timing.prescaler, timing.phase_seg1, timing.phase_seg2); LOG_DBG("Sample-point err : %d", err); #ifdef CONFIG_CAN_FD_MODE err = can_calc_timing_data(dev, &timing_data, config->common.bitrate_data, config->common.sample_point_data); if (err == -EINVAL) { LOG_ERR("Can't find timing for given dataphase param"); return -EIO; } LOG_DBG("Sample-point err data phase: %d", err); #endif /* CONFIG_CAN_FD_MODE */ err = can_set_timing(dev, &timing); if (err != 0) { LOG_ERR("failed to set timing (err %d)", err); return -ENODEV; } #ifdef CONFIG_CAN_FD_MODE err = can_set_timing_data(dev, &timing_data); if (err != 0) { LOG_ERR("failed to set data phase timing (err %d)", err); return -ENODEV; } #endif /* CONFIG_CAN_FD_MODE */ reg = CAN_MCAN_IE_BOE | CAN_MCAN_IE_EWE | CAN_MCAN_IE_EPE | CAN_MCAN_IE_MRAFE | CAN_MCAN_IE_TEFLE | CAN_MCAN_IE_TEFNE | CAN_MCAN_IE_RF0NE | CAN_MCAN_IE_RF1NE | CAN_MCAN_IE_RF0LE | CAN_MCAN_IE_RF1LE; #ifdef CONFIG_CAN_STATS /* These ISRs are only enabled/used for statistics, they are otherwise * disabled as they may produce a significant amount of frequent ISRs. */ reg |= CAN_MCAN_IE_PEAE | CAN_MCAN_IE_PEDE; #endif err = can_mcan_write_reg(dev, CAN_MCAN_IE, reg); if (err != 0) { return err; } reg = CAN_MCAN_ILS_RF0NL | CAN_MCAN_ILS_RF1NL | CAN_MCAN_ILS_RF0LL | CAN_MCAN_ILS_RF1LL; err = can_mcan_write_reg(dev, CAN_MCAN_ILS, reg); if (err != 0) { return err; } reg = CAN_MCAN_ILE_EINT0 | CAN_MCAN_ILE_EINT1; err = can_mcan_write_reg(dev, CAN_MCAN_ILE, reg); if (err != 0) { return err; } /* Interrupt on every TX buffer transmission event */ reg = CAN_MCAN_TXBTIE_TIE; err = can_mcan_write_reg(dev, CAN_MCAN_TXBTIE, reg); if (err != 0) { return err; } return can_mcan_clear_mram(dev, 0, config->mram_size); } ```
/content/code_sandbox/drivers/can/can_mcan.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
12,305
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_CAN_MICROCHIP_MCP251XFD_H_ #define ZEPHYR_DRIVERS_CAN_MICROCHIP_MCP251XFD_H_ #include <stdint.h> #include <zephyr/drivers/can.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/spi.h> #define MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(flag_u32) \ ((flag_u32) >> ROUND_DOWN(LOG2((flag_u32)), 8)) #define MCP251XFD_RAM_START_ADDR 0x400 #define MCP251XFD_RAM_SIZE 2048 #define MCP251XFD_RAM_ALIGNMENT 4 #define MCP251XFD_PAYLOAD_SIZE CAN_MAX_DLEN #define MCP251XFD_FIFO_TYPE_TEF 0 #define MCP251XFD_FIFO_TYPE_RX 1 #define MCP251XFD_TEF_FIFO_ITEM_SIZE 8 #define MCP251XFD_TX_QUEUE_ITEM_SIZE (8 + MCP251XFD_PAYLOAD_SIZE) #if defined(CONFIG_CAN_RX_TIMESTAMP) #define MCP251XFD_RX_FIFO_ITEM_SIZE (4 + 8 + MCP251XFD_PAYLOAD_SIZE) #else #define MCP251XFD_RX_FIFO_ITEM_SIZE (8 + MCP251XFD_PAYLOAD_SIZE) #endif #define MCP251XFD_TEF_FIFO_START_ADDR 0 #define MCP251XFD_TEF_FIFO_ITEMS CONFIG_CAN_MCP251XFD_MAX_TX_QUEUE #define MCP251XFD_TEF_FIFO_SIZE (MCP251XFD_TEF_FIFO_ITEMS * MCP251XFD_TEF_FIFO_ITEM_SIZE) #define MCP251XFD_TX_QUEUE_START_ADDR MCP251XFD_TEF_FIFO_SIZE #define MCP251XFD_TX_QUEUE_ITEMS CONFIG_CAN_MCP251XFD_MAX_TX_QUEUE #define MCP251XFD_TX_QUEUE_SIZE (MCP251XFD_TX_QUEUE_ITEMS * MCP251XFD_TX_QUEUE_ITEM_SIZE) #define MCP251XFD_RX_FIFO_START_ADDR (MCP251XFD_TX_QUEUE_START_ADDR + MCP251XFD_TX_QUEUE_SIZE) #define MCP251XFD_RX_FIFO_SIZE_MAX (MCP251XFD_RAM_SIZE - MCP251XFD_RX_FIFO_START_ADDR) #define MCP251XFD_RX_FIFO_ITEMS_MAX (MCP251XFD_RX_FIFO_SIZE_MAX / MCP251XFD_RX_FIFO_ITEM_SIZE) #define MCP251XFD_RX_FIFO_ITEMS CONFIG_CAN_MCP251XFD_RX_FIFO_ITEMS #define MCP251XFD_RX_FIFO_SIZE (MCP251XFD_RX_FIFO_ITEMS * MCP251XFD_RX_FIFO_ITEM_SIZE) #define MCP251XFD_RX_FIFO_IDX 1 #define MCP251XFD_REG_SIZE 4 #define MCP251XFD_CRC_POLY 0x8005 #define MCP251XFD_CRC_SEED 0xffff BUILD_ASSERT(MCP251XFD_TEF_FIFO_SIZE + MCP251XFD_TX_QUEUE_SIZE + MCP251XFD_RX_FIFO_SIZE <= MCP251XFD_RAM_SIZE, "Cannot fit FIFOs into RAM"); /* Timeout for changing mode */ #define MCP251XFD_MODE_CHANGE_TIMEOUT_USEC 200000U #define MCP251XFD_MODE_CHANGE_RETRIES 100 #define MCP251XFD_PLLRDY_TIMEOUT_USEC 100000 #define MCP251XFD_PLLRDY_RETRIES 100 #define MCP251XFD_MAX_INT_HANDLER_CALLS 10 #define MCP251XFD_INT_HANDLER_SLEEP_USEC 10000 struct mcp251xfd_mailbox { can_tx_callback_t cb; void *cb_arg; }; #define MCP251XFD_SPI_CMD_LEN 2 #define MCP251XFD_SPI_LEN_FIELD_LEN 1 #define MCP251XFD_SPI_CRC_LEN 2 /* MPC251x registers - mostly copied from Linux kernel implementation of driver */ /* CAN FD Controller Module SFR */ #define MCP251XFD_REG_CON 0x00 #define MCP251XFD_REG_CON_TXBWS_MASK GENMASK(31, 28) #define MCP251XFD_REG_CON_ABAT BIT(27) #define MCP251XFD_REG_CON_REQOP_MASK GENMASK(26, 24) #define MCP251XFD_REG_CON_MODE_MIXED 0 #define MCP251XFD_REG_CON_MODE_SLEEP 1 #define MCP251XFD_REG_CON_MODE_INT_LOOPBACK 2 #define MCP251XFD_REG_CON_MODE_LISTENONLY 3 #define MCP251XFD_REG_CON_MODE_CONFIG 4 #define MCP251XFD_REG_CON_MODE_EXT_LOOPBACK 5 #define MCP251XFD_REG_CON_MODE_CAN2_0 6 #define MCP251XFD_REG_CON_MODE_RESTRICTED 7 #define MCP251XFD_REG_CON_OPMOD_MASK GENMASK(23, 21) #define MCP251XFD_REG_CON_TXQEN BIT(20) #define MCP251XFD_REG_CON_STEF BIT(19) #define MCP251XFD_REG_CON_SERR2LOM BIT(18) #define MCP251XFD_REG_CON_ESIGM BIT(17) #define MCP251XFD_REG_CON_RTXAT BIT(16) #define MCP251XFD_REG_CON_BRSDIS BIT(12) #define MCP251XFD_REG_CON_BUSY BIT(11) #define MCP251XFD_REG_CON_WFT_MASK GENMASK(10, 9) #define MCP251XFD_REG_CON_WFT_T00FILTER 0x0 #define MCP251XFD_REG_CON_WFT_T01FILTER 0x1 #define MCP251XFD_REG_CON_WFT_T10FILTER 0x2 #define MCP251XFD_REG_CON_WFT_T11FILTER 0x3 #define MCP251XFD_REG_CON_WAKFIL BIT(8) #define MCP251XFD_REG_CON_PXEDIS BIT(6) #define MCP251XFD_REG_CON_ISOCRCEN BIT(5) #define MCP251XFD_REG_CON_DNCNT_MASK GENMASK(4, 0) #define MCP251XFD_REG_CON_B2 (MCP251XFD_REG_CON + 2) #define MCP251XFD_REG_CON_B3 (MCP251XFD_REG_CON + 3) #define MCP251XFD_REG_NBTCFG 0x04 #define MCP251XFD_REG_NBTCFG_BRP_MASK GENMASK(31, 24) #define MCP251XFD_REG_NBTCFG_TSEG1_MASK GENMASK(23, 16) #define MCP251XFD_REG_NBTCFG_TSEG2_MASK GENMASK(14, 8) #define MCP251XFD_REG_NBTCFG_SJW_MASK GENMASK(6, 0) #define MCP251XFD_REG_DBTCFG 0x08 #define MCP251XFD_REG_DBTCFG_BRP_MASK GENMASK(31, 24) #define MCP251XFD_REG_DBTCFG_TSEG1_MASK GENMASK(20, 16) #define MCP251XFD_REG_DBTCFG_TSEG2_MASK GENMASK(11, 8) #define MCP251XFD_REG_DBTCFG_SJW_MASK GENMASK(3, 0) #define MCP251XFD_REG_TDC 0x0c #define MCP251XFD_REG_TDC_EDGFLTEN BIT(25) #define MCP251XFD_REG_TDC_SID11EN BIT(24) #define MCP251XFD_REG_TDC_TDCMOD_MASK GENMASK(17, 16) #define MCP251XFD_REG_TDC_TDCMOD_AUTO 2 #define MCP251XFD_REG_TDC_TDCMOD_MANUAL 1 #define MCP251XFD_REG_TDC_TDCMOD_DISABLED 0 #define MCP251XFD_REG_TDC_TDCO_MASK GENMASK(14, 8) #define MCP251XFD_REG_TDC_TDCV_MASK GENMASK(5, 0) #define MCP251XFD_REG_TDC_TDCO_MIN -64 #define MCP251XFD_REG_TDC_TDCO_MAX 63 #define MCP251XFD_REG_TBC 0x10 #define MCP251XFD_REG_TSCON 0x14 #define MCP251XFD_REG_TSCON_TSRES BIT(18) #define MCP251XFD_REG_TSCON_TSEOF BIT(17) #define MCP251XFD_REG_TSCON_TBCEN BIT(16) #define MCP251XFD_REG_TSCON_TBCPRE_MASK GENMASK(9, 0) #define MCP251XFD_REG_VEC 0x18 #define MCP251XFD_REG_VEC_RXCODE_MASK GENMASK(30, 24) #define MCP251XFD_REG_VEC_TXCODE_MASK GENMASK(22, 16) #define MCP251XFD_REG_VEC_FILHIT_MASK GENMASK(12, 8) #define MCP251XFD_REG_VEC_ICODE_MASK GENMASK(6, 0) #define MCP251XFD_REG_INT 0x1c #define MCP251XFD_REG_INT_IF_MASK GENMASK(15, 0) #define MCP251XFD_REG_INT_IE_MASK GENMASK(31, 16) #define MCP251XFD_REG_INT_IVMIE BIT(31) #define MCP251XFD_REG_INT_WAKIE BIT(30) #define MCP251XFD_REG_INT_CERRIE BIT(29) #define MCP251XFD_REG_INT_SERRIE BIT(28) #define MCP251XFD_REG_INT_RXOVIE BIT(27) #define MCP251XFD_REG_INT_TXATIE BIT(26) #define MCP251XFD_REG_INT_SPICRCIE BIT(25) #define MCP251XFD_REG_INT_ECCIE BIT(24) #define MCP251XFD_REG_INT_TEFIE BIT(20) #define MCP251XFD_REG_INT_MODIE BIT(19) #define MCP251XFD_REG_INT_TBCIE BIT(18) #define MCP251XFD_REG_INT_RXIE BIT(17) #define MCP251XFD_REG_INT_TXIE BIT(16) #define MCP251XFD_REG_INT_IVMIF BIT(15) #define MCP251XFD_REG_INT_WAKIF BIT(14) #define MCP251XFD_REG_INT_CERRIF BIT(13) #define MCP251XFD_REG_INT_SERRIF BIT(12) #define MCP251XFD_REG_INT_RXOVIF BIT(11) #define MCP251XFD_REG_INT_TXATIF BIT(10) #define MCP251XFD_REG_INT_SPICRCIF BIT(9) #define MCP251XFD_REG_INT_ECCIF BIT(8) #define MCP251XFD_REG_INT_TEFIF BIT(4) #define MCP251XFD_REG_INT_MODIF BIT(3) #define MCP251XFD_REG_INT_TBCIF BIT(2) #define MCP251XFD_REG_INT_RXIF BIT(1) #define MCP251XFD_REG_INT_TXIF BIT(0) /* These IRQ flags must be cleared by SW in the CAN_INT register */ #define MCP251XFD_REG_INT_IF_CLEARABLE_MASK \ (MCP251XFD_REG_INT_IVMIF | MCP251XFD_REG_INT_WAKIF | MCP251XFD_REG_INT_CERRIF | \ MCP251XFD_REG_INT_SERRIF | MCP251XFD_REG_INT_MODIF) #define MCP251XFD_REG_RXIF 0x20 #define MCP251XFD_REG_TXIF 0x24 #define MCP251XFD_REG_RXOVIF 0x28 #define MCP251XFD_REG_TXATIF 0x2c #define MCP251XFD_REG_TXREQ 0x30 #define MCP251XFD_REG_TREC 0x34 #define MCP251XFD_REG_TREC_TXBO BIT(21) #define MCP251XFD_REG_TREC_TXBP BIT(20) #define MCP251XFD_REG_TREC_RXBP BIT(19) #define MCP251XFD_REG_TREC_TXWARN BIT(18) #define MCP251XFD_REG_TREC_RXWARN BIT(17) #define MCP251XFD_REG_TREC_EWARN BIT(16) #define MCP251XFD_REG_TREC_TEC_MASK GENMASK(15, 8) #define MCP251XFD_REG_TREC_REC_MASK GENMASK(7, 0) #define MCP251XFD_REG_BDIAG0 0x38 #define MCP251XFD_REG_BDIAG0_DTERRCNT_MASK GENMASK(31, 24) #define MCP251XFD_REG_BDIAG0_DRERRCNT_MASK GENMASK(23, 16) #define MCP251XFD_REG_BDIAG0_NTERRCNT_MASK GENMASK(15, 8) #define MCP251XFD_REG_BDIAG0_NRERRCNT_MASK GENMASK(7, 0) #define MCP251XFD_REG_BDIAG1 0x3c #define MCP251XFD_REG_BDIAG1_DLCMM BIT(31) #define MCP251XFD_REG_BDIAG1_ESI BIT(30) #define MCP251XFD_REG_BDIAG1_DCRCERR BIT(29) #define MCP251XFD_REG_BDIAG1_DSTUFERR BIT(28) #define MCP251XFD_REG_BDIAG1_DFORMERR BIT(27) #define MCP251XFD_REG_BDIAG1_DBIT1ERR BIT(25) #define MCP251XFD_REG_BDIAG1_DBIT0ERR BIT(24) #define MCP251XFD_REG_BDIAG1_TXBOERR BIT(23) #define MCP251XFD_REG_BDIAG1_NCRCERR BIT(21) #define MCP251XFD_REG_BDIAG1_NSTUFERR BIT(20) #define MCP251XFD_REG_BDIAG1_NFORMERR BIT(19) #define MCP251XFD_REG_BDIAG1_NACKERR BIT(18) #define MCP251XFD_REG_BDIAG1_NBIT1ERR BIT(17) #define MCP251XFD_REG_BDIAG1_NBIT0ERR BIT(16) #define MCP251XFD_REG_BDIAG1_BERR_MASK \ (MCP251XFD_REG_BDIAG1_DLCMM | MCP251XFD_REG_BDIAG1_ESI | MCP251XFD_REG_BDIAG1_DCRCERR | \ MCP251XFD_REG_BDIAG1_DSTUFERR | MCP251XFD_REG_BDIAG1_DFORMERR | \ MCP251XFD_REG_BDIAG1_DBIT1ERR | MCP251XFD_REG_BDIAG1_DBIT0ERR | \ MCP251XFD_REG_BDIAG1_TXBOERR | MCP251XFD_REG_BDIAG1_NCRCERR | \ MCP251XFD_REG_BDIAG1_NSTUFERR | MCP251XFD_REG_BDIAG1_NFORMERR | \ MCP251XFD_REG_BDIAG1_NACKERR | MCP251XFD_REG_BDIAG1_NBIT1ERR | \ MCP251XFD_REG_BDIAG1_NBIT0ERR) #define MCP251XFD_REG_BDIAG1_EFMSGCNT_MASK GENMASK(15, 0) #define MCP251XFD_REG_TEFCON 0x40 #define MCP251XFD_REG_TEFCON_FSIZE_MASK GENMASK(28, 24) #define MCP251XFD_REG_TEFCON_FRESET BIT(10) #define MCP251XFD_REG_TEFCON_UINC BIT(8) #define MCP251XFD_REG_TEFCON_TEFTSEN BIT(5) #define MCP251XFD_REG_TEFCON_TEFOVIE BIT(3) #define MCP251XFD_REG_TEFCON_TEFFIE BIT(2) #define MCP251XFD_REG_TEFCON_TEFHIE BIT(1) #define MCP251XFD_REG_TEFCON_TEFNEIE BIT(0) #define MCP251XFD_REG_TEFSTA 0x44 #define MCP251XFD_REG_TEFSTA_TEFOVIF BIT(3) #define MCP251XFD_REG_TEFSTA_TEFFIF BIT(2) #define MCP251XFD_REG_TEFSTA_TEFHIF BIT(1) #define MCP251XFD_REG_TEFSTA_TEFNEIF BIT(0) #define MCP251XFD_REG_TEFUA 0x48 #define MCP251XFD_REG_TXQCON 0x50 #define MCP251XFD_REG_TXQCON_PLSIZE_MASK GENMASK(31, 29) #define MCP251XFD_REG_TXQCON_PLSIZE_8 0 #define MCP251XFD_REG_TXQCON_PLSIZE_12 1 #define MCP251XFD_REG_TXQCON_PLSIZE_16 2 #define MCP251XFD_REG_TXQCON_PLSIZE_20 3 #define MCP251XFD_REG_TXQCON_PLSIZE_24 4 #define MCP251XFD_REG_TXQCON_PLSIZE_32 5 #define MCP251XFD_REG_TXQCON_PLSIZE_48 6 #define MCP251XFD_REG_TXQCON_PLSIZE_64 7 #define MCP251XFD_REG_TXQCON_FSIZE_MASK GENMASK(28, 24) #define MCP251XFD_REG_TXQCON_TXAT_UNLIMITED 3 #define MCP251XFD_REG_TXQCON_TXAT_THREE_SHOT 1 #define MCP251XFD_REG_TXQCON_TXAT_ONE_SHOT 0 #define MCP251XFD_REG_TXQCON_TXAT_MASK GENMASK(22, 21) #define MCP251XFD_REG_TXQCON_TXPRI_MASK GENMASK(20, 16) #define MCP251XFD_REG_TXQCON_FRESET BIT(10) #define MCP251XFD_REG_TXQCON_TXREQ BIT(9) #define MCP251XFD_REG_TXQCON_UINC BIT(8) #define MCP251XFD_REG_TXQCON_TXEN BIT(7) #define MCP251XFD_REG_TXQCON_TXATIE BIT(4) #define MCP251XFD_REG_TXQCON_TXQEIE BIT(2) #define MCP251XFD_REG_TXQCON_TXQNIE BIT(0) #define MCP251XFD_REG_TXQSTA 0x54 #define MCP251XFD_REG_TXQSTA_TXQCI_MASK GENMASK(12, 8) #define MCP251XFD_REG_TXQSTA_TXABT BIT(7) #define MCP251XFD_REG_TXQSTA_TXLARB BIT(6) #define MCP251XFD_REG_TXQSTA_TXERR BIT(5) #define MCP251XFD_REG_TXQSTA_TXATIF BIT(4) #define MCP251XFD_REG_TXQSTA_TXQEIF BIT(2) #define MCP251XFD_REG_TXQSTA_TXQNIF BIT(0) #define MCP251XFD_REG_TXQUA 0x58 #define MCP251XFD_REG_FIFOCON(x) (0x50 + 0xc * (x)) #define MCP251XFD_REG_FIFOCON_PLSIZE_MASK GENMASK(31, 29) #define MCP251XFD_REG_FIFOCON_PLSIZE_8 0 #define MCP251XFD_REG_FIFOCON_PLSIZE_12 1 #define MCP251XFD_REG_FIFOCON_PLSIZE_16 2 #define MCP251XFD_REG_FIFOCON_PLSIZE_20 3 #define MCP251XFD_REG_FIFOCON_PLSIZE_24 4 #define MCP251XFD_REG_FIFOCON_PLSIZE_32 5 #define MCP251XFD_REG_FIFOCON_PLSIZE_48 6 #define MCP251XFD_REG_FIFOCON_PLSIZE_64 7 #define MCP251XFD_REG_FIFOCON_FSIZE_MASK GENMASK(28, 24) #define MCP251XFD_REG_FIFOCON_TXAT_MASK GENMASK(22, 21) #define MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT 0 #define MCP251XFD_REG_FIFOCON_TXAT_THREE_SHOT 1 #define MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED 3 #define MCP251XFD_REG_FIFOCON_TXPRI_MASK GENMASK(20, 16) #define MCP251XFD_REG_FIFOCON_FRESET BIT(10) #define MCP251XFD_REG_FIFOCON_TXREQ BIT(9) #define MCP251XFD_REG_FIFOCON_UINC BIT(8) #define MCP251XFD_REG_FIFOCON_TXEN BIT(7) #define MCP251XFD_REG_FIFOCON_RTREN BIT(6) #define MCP251XFD_REG_FIFOCON_RXTSEN BIT(5) #define MCP251XFD_REG_FIFOCON_TXATIE BIT(4) #define MCP251XFD_REG_FIFOCON_RXOVIE BIT(3) #define MCP251XFD_REG_FIFOCON_TFERFFIE BIT(2) #define MCP251XFD_REG_FIFOCON_TFHRFHIE BIT(1) #define MCP251XFD_REG_FIFOCON_TFNRFNIE BIT(0) #define MCP251XFD_REG_FIFOSTA(x) (0x54 + 0xc * (x)) #define MCP251XFD_REG_FIFOSTA_FIFOCI_MASK GENMASK(12, 8) #define MCP251XFD_REG_FIFOSTA_TXABT BIT(7) #define MCP251XFD_REG_FIFOSTA_TXLARB BIT(6) #define MCP251XFD_REG_FIFOSTA_TXERR BIT(5) #define MCP251XFD_REG_FIFOSTA_TXATIF BIT(4) #define MCP251XFD_REG_FIFOSTA_RXOVIF BIT(3) #define MCP251XFD_REG_FIFOSTA_TFERFFIF BIT(2) #define MCP251XFD_REG_FIFOSTA_TFHRFHIF BIT(1) #define MCP251XFD_REG_FIFOSTA_TFNRFNIF BIT(0) #define MCP251XFD_REG_FIFOUA(x) (0x58 + 0xc * (x)) #define MCP251XFD_REG_BYTE_FLTCON(m) (0x1d0 + m) #define MCP251XFD_REG_BYTE_FLTCON_FBP_MASK GENMASK(4, 0) #define MCP251XFD_REG_BYTE_FLTCON_FLTEN BIT(7) #define MCP251XFD_REG_FLTOBJ(x) (0x1f0 + 0x8 * (x)) #define MCP251XFD_REG_FLTOBJ_EXIDE BIT(30) #define MCP251XFD_REG_FLTOBJ_SID11 BIT(29) #define MCP251XFD_REG_FLTOBJ_EID_MASK GENMASK(28, 11) #define MCP251XFD_REG_FLTOBJ_SID_MASK GENMASK(10, 0) #define MCP251XFD_REG_FLTMASK(x) (0x1f4 + 0x8 * (x)) #define MCP251XFD_REG_MASK_MIDE BIT(30) #define MCP251XFD_REG_MASK_MSID11 BIT(29) #define MCP251XFD_REG_MASK_MEID_MASK GENMASK(28, 11) #define MCP251XFD_REG_MASK_MSID_MASK GENMASK(10, 0) /* Message Object */ #define MCP251XFD_OBJ_ID_SID11 BIT(29) #define MCP251XFD_OBJ_ID_EID_MASK GENMASK(28, 11) #define MCP251XFD_OBJ_ID_SID_MASK GENMASK(10, 0) #define MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK GENMASK(31, 9) #define MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK GENMASK(15, 9) #define MCP251XFD_OBJ_FLAGS_SEQ_MASK MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK #define MCP251XFD_OBJ_FLAGS_ESI BIT(8) #define MCP251XFD_OBJ_FLAGS_FDF BIT(7) #define MCP251XFD_OBJ_FLAGS_BRS BIT(6) #define MCP251XFD_OBJ_FLAGS_RTR BIT(5) #define MCP251XFD_OBJ_FLAGS_IDE BIT(4) #define MCP251XFD_OBJ_FLAGS_DLC_MASK GENMASK(3, 0) #define MCP251XFD_OBJ_FILHIT_MASK GENMASK(15, 11) #define MCP251XFD_OBJ_DATA_OFFSET 2 /* offset to the data in sizeof(uint32_t) */ #define MCP251XFD_OBJ_HEADER_SIZE (MCP251XFD_OBJ_DATA_OFFSET * MCP251XFD_REG_SIZE) #define MCP251XFD_REG_FRAME_EFF_SID_MASK GENMASK(28, 18) #define MCP251XFD_REG_FRAME_EFF_EID_MASK GENMASK(17, 0) /* MCP2517/18FD SFR */ #define MCP251XFD_REG_OSC 0xe00 #define MCP251XFD_REG_OSC_SCLKRDY BIT(12) #define MCP251XFD_REG_OSC_OSCRDY BIT(10) #define MCP251XFD_REG_OSC_PLLRDY BIT(8) #define MCP251XFD_REG_OSC_CLKODIV_10 3 #define MCP251XFD_REG_OSC_CLKODIV_4 2 #define MCP251XFD_REG_OSC_CLKODIV_2 1 #define MCP251XFD_REG_OSC_CLKODIV_1 0 #define MCP251XFD_REG_OSC_CLKODIV_MASK GENMASK(6, 5) #define MCP251XFD_REG_OSC_SCLKDIV BIT(4) #define MCP251XFD_REG_OSC_LPMEN BIT(3) /* MCP2518FD only */ #define MCP251XFD_REG_OSC_OSCDIS BIT(2) #define MCP251XFD_REG_OSC_PLLEN BIT(0) #define MCP251XFD_REG_IOCON 0xe04 #define MCP251XFD_REG_IOCON_INTOD BIT(30) #define MCP251XFD_REG_IOCON_SOF BIT(29) #define MCP251XFD_REG_IOCON_TXCANOD BIT(28) #define MCP251XFD_REG_IOCON_PM1 BIT(25) #define MCP251XFD_REG_IOCON_PM0 BIT(24) #define MCP251XFD_REG_IOCON_GPIO1 BIT(17) #define MCP251XFD_REG_IOCON_GPIO0 BIT(16) #define MCP251XFD_REG_IOCON_LAT1 BIT(9) #define MCP251XFD_REG_IOCON_LAT0 BIT(8) #define MCP251XFD_REG_IOCON_XSTBYEN BIT(6) #define MCP251XFD_REG_IOCON_TRIS1 BIT(1) #define MCP251XFD_REG_IOCON_TRIS0 BIT(0) #define MCP251XFD_REG_CRC 0xe08 #define MCP251XFD_REG_CRC_FERRIE BIT(25) #define MCP251XFD_REG_CRC_CRCERRIE BIT(24) #define MCP251XFD_REG_CRC_FERRIF BIT(17) #define MCP251XFD_REG_CRC_CRCERRIF BIT(16) #define MCP251XFD_REG_CRC_IF_MASK GENMASK(17, 16) #define MCP251XFD_REG_CRC_MASK GENMASK(15, 0) #define MCP251XFD_REG_ECCCON 0xe0c #define MCP251XFD_REG_ECCCON_PARITY_MASK GENMASK(14, 8) #define MCP251XFD_REG_ECCCON_DEDIE BIT(2) #define MCP251XFD_REG_ECCCON_SECIE BIT(1) #define MCP251XFD_REG_ECCCON_ECCEN BIT(0) #define MCP251XFD_REG_ECCSTAT 0xe10 #define MCP251XFD_REG_ECCSTAT_ERRADDR_MASK GENMASK(27, 16) #define MCP251XFD_REG_ECCSTAT_IF_MASK GENMASK(2, 1) #define MCP251XFD_REG_ECCSTAT_DEDIF BIT(2) #define MCP251XFD_REG_ECCSTAT_SECIF BIT(1) #define MCP251XFD_REG_DEVID 0xe14 /* MCP2518FD only */ #define MCP251XFD_REG_DEVID_ID_MASK GENMASK(7, 4) #define MCP251XFD_REG_DEVID_REV_MASK GENMASK(3, 0) /* SPI commands */ #define MCP251XFD_SPI_INSTRUCTION_RESET 0x0000 #define MCP251XFD_SPI_INSTRUCTION_WRITE 0x2000 #define MCP251XFD_SPI_INSTRUCTION_READ 0x3000 #define MCP251XFD_SPI_INSTRUCTION_WRITE_CRC 0xa000 #define MCP251XFD_SPI_INSTRUCTION_READ_CRC 0xb000 #define MCP251XFD_SPI_INSTRUCTION_WRITE_CRC_SAFE 0xc000 #define MCP251XFD_SPI_ADDRESS_MASK GENMASK(11, 0) #define MCP251XFD_REG_FIFOCON_TO_STA(addr) (addr + 0x4) #define MCP251XFD_REG_FLTCON(m) (0x1d0 + m) struct mcp251xfd_txobj { uint32_t id; uint32_t flags; uint8_t data[CAN_MAX_DLEN]; } __packed; struct mcp251xfd_rxobj { uint32_t id; uint32_t flags; #if defined(CONFIG_CAN_RX_TIMESTAMP) uint32_t timestamp; #endif uint8_t data[CAN_MAX_DLEN]; } __packed; struct mcp251xfd_tefobj { uint32_t id; uint32_t flags; } __packed; #define MCP251XFD_MAX_READ_FIFO_BUF_SIZE \ MAX((MCP251XFD_RX_FIFO_ITEM_SIZE * MCP251XFD_RX_FIFO_ITEMS), \ (MCP251XFD_TEF_FIFO_ITEM_SIZE * MCP251XFD_TEF_FIFO_ITEMS)) #define MCP251XFD_MAX_READ_CRC_BUF_SIZE \ (MCP251XFD_SPI_CRC_LEN + 2 * MCP251XFD_REG_SIZE) #define MCP251XFD_SPI_BUF_SIZE \ MAX(MCP251XFD_MAX_READ_FIFO_BUF_SIZE, MCP251XFD_MAX_READ_CRC_BUF_SIZE) #define MCP251XFD_SPI_HEADER_LEN (MCP251XFD_SPI_CMD_LEN + MCP251XFD_SPI_LEN_FIELD_LEN) struct mcp251xfd_spi_data { uint8_t _unused[4 - (MCP251XFD_SPI_HEADER_LEN % 4)]; /* so that buf is 4-byte aligned */ uint8_t header[MCP251XFD_SPI_HEADER_LEN]; /* contains spi_cmd and length field (if used) */ uint8_t buf[MCP251XFD_SPI_BUF_SIZE]; } __packed __aligned(4); struct mcp251xfd_fifo { uint32_t ram_start_addr; uint16_t reg_fifocon_addr; uint8_t capacity; uint8_t item_size; void (*msg_handler)(const struct device *dev, void *data); }; struct mcp251xfd_data { struct can_driver_data common; /* Interrupt Data */ struct gpio_callback int_gpio_cb; struct k_thread int_thread; k_thread_stack_t *int_thread_stack; struct k_sem int_sem; /* General */ enum can_state state; struct k_mutex mutex; /* TX Callback */ struct k_sem tx_sem; uint32_t mailbox_usage; struct mcp251xfd_mailbox mailbox[CONFIG_CAN_MCP251XFD_MAX_TX_QUEUE]; /* Filter Data */ uint32_t filter_usage; struct can_filter filter[CONFIG_CAN_MAX_FILTER]; can_rx_callback_t rx_cb[CONFIG_CAN_MAX_FILTER]; void *cb_arg[CONFIG_CAN_MAX_FILTER]; const struct device *dev; uint8_t next_mcp251xfd_mode; uint8_t current_mcp251xfd_mode; int tdco; struct mcp251xfd_spi_data spi_data; }; struct mcp251xfd_config { const struct can_driver_config common; /* spi configuration */ struct spi_dt_spec bus; struct gpio_dt_spec int_gpio_dt; uint32_t osc_freq; /* IO Config */ bool sof_on_clko; bool pll_enable; uint8_t clko_div; uint16_t timestamp_prescaler; const struct device *clk_dev; uint8_t clk_id; struct mcp251xfd_fifo rx_fifo; struct mcp251xfd_fifo tef_fifo; }; #endif /* ZEPHYR_DRIVERS_CAN_MICROCHIP_MCP251XFD_H_ */ ```
/content/code_sandbox/drivers/can/can_mcp251xfd.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,956
```unknown # NXP SJA1000 configuration options config CAN_SJA1000 bool help This enables support for the shared NXP SJA1000 CAN driver. config CAN_MAX_FILTER int "Maximum number of concurrent active RX filters" depends on CAN_SJA1000 default 5 range 1 32 help As the NXP SJA1000 only supports one full-width RX filter, filtering of received CAN frames are done in software. ```
/content/code_sandbox/drivers/can/Kconfig.sja1000
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
103
```unknown # STM32 CAN configuration options config CAN_STM32_BXCAN bool "STM32 bxCAN driver" default y depends on DT_HAS_ST_STM32_BXCAN_ENABLED help Enable STM32 bxCAN Driver. if CAN_STM32_BXCAN config CAN_MAX_STD_ID_FILTER int "Maximum number of standard (11-bit) ID filters" default 14 range 0 28 help Defines the maximum number of filters with standard ID (11-bit) that can be added by the application. One standard ID filter with mask occupies 1/2 of the 14 available filter banks. The following equation determines the maximum total number of filters: CAN_MAX_STD_ID_FILTER + CAN_MAX_EXT_ID_FILTER * 2 <= 28 config CAN_MAX_EXT_ID_FILTER int "Maximum number of extended (29-bit) ID filters" default 7 range 0 14 help Defines the maximum number of filters with extended ID (29-bit) that can be added by the application. One extended ID filter with mask occupies 1 of the 14 available filter banks. The following equation determines the maximum total number of filters: CAN_MAX_STD_ID_FILTER + CAN_MAX_EXT_ID_FILTER * 2 <= 28 endif # CAN_STM32_BXCAN config CAN_STM32_FDCAN bool "STM32 FDCAN driver" default y depends on DT_HAS_ST_STM32_FDCAN_ENABLED select CAN_MCAN select USE_STM32_LL_RCC if CAN_STM32_FDCAN config CAN_MAX_STD_ID_FILTER int "Maximum number of standard (11-bit) ID filters" default 28 range 0 28 help Defines the maximum number of filters with standard ID (11-bit) that can be added by the application. config CAN_MAX_EXT_ID_FILTER int "Maximum number of extended (29-bit) ID filters" default 8 range 0 8 help Defines the maximum number of filters with extended ID (29-bit) that can be added by the application. endif # CAN_STM32_FDCAN config CAN_STM32H7_FDCAN bool "STM32H7 FDCAN driver" default y depends on DT_HAS_ST_STM32H7_FDCAN_ENABLED select CAN_MCAN select USE_STM32_LL_RCC ```
/content/code_sandbox/drivers/can/Kconfig.stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
522
```c /* * */ /** * @file * * Routines setting up the host system. Those are placed in separate file * because there is naming conflicts between host and zephyr network stacks. */ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <errno.h> #include <string.h> #include <stdbool.h> /* Linux host include files. */ #ifdef __linux #include <unistd.h> #include <fcntl.h> #include <sys/ioctl.h> #include <sys/socket.h> #include <sys/select.h> #include <net/if.h> #include <linux/if.h> #include <linux/can.h> #include <linux/can/raw.h> #else #error "This driver can only be built on Linux systems" #endif #include "can_native_linux_adapt.h" #ifndef CANFD_FDF /* Linux kernels before v5.14 do not define CANFD_FDF */ #define CANFD_FDF 0x04 #endif /* CANFD_FDF */ int linux_socketcan_iface_open(const char *if_name) { struct sockaddr_can addr; struct ifreq ifr; int fd, opt, ret = -EINVAL; fd = socket(PF_CAN, SOCK_RAW, CAN_RAW); if (fd < 0) { return -errno; } (void)memset(&ifr, 0, sizeof(ifr)); (void)memset(&addr, 0, sizeof(addr)); strncpy(ifr.ifr_name, if_name, IFNAMSIZ - 1); ret = ioctl(fd, SIOCGIFINDEX, (void *)&ifr); if (ret < 0) { close(fd); return -errno; } addr.can_ifindex = ifr.ifr_ifindex; addr.can_family = PF_CAN; ret = bind(fd, (struct sockaddr *)&addr, sizeof(addr)); if (ret < 0) { close(fd); return -errno; } /* this option must always be enabled in order to receive TX confirmations */ opt = 1; ret = setsockopt(fd, SOL_CAN_RAW, CAN_RAW_RECV_OWN_MSGS, &opt, sizeof(opt)); if (ret < 0) { close(fd); return -errno; } return fd; } int linux_socketcan_iface_close(int fd) { return close(fd); } int linux_socketcan_poll_data(int fd) { struct timeval timeout; fd_set rset; int ret; FD_ZERO(&rset); FD_SET(fd, &rset); timeout.tv_sec = 0; timeout.tv_usec = 0; ret = select(fd + 1, &rset, NULL, NULL, &timeout); if (ret < 0 && errno != EINTR) { return -errno; } else if (ret > 0) { if (FD_ISSET(fd, &rset)) { return 0; } } return -EAGAIN; } int linux_socketcan_read_data(int fd, void *buf, size_t buf_len, bool *msg_confirm) { struct canfd_frame *frame = (struct canfd_frame *)buf; struct msghdr msg = {0}; struct iovec iov = { .iov_base = buf, .iov_len = buf_len, }; msg.msg_iov = &iov; msg.msg_iovlen = 1; int ret = (int)recvmsg(fd, &msg, MSG_WAITALL); if (msg_confirm != NULL) { *msg_confirm = (msg.msg_flags & MSG_CONFIRM) != 0; } /* Make sure to set the flags for all frames received via the Linux API. * * Zephyr relies on defined flags field of the SocketCAN data for both FD and classical CAN * frames. In Linux the flags field is undefined for legacy frames. */ if (ret == CANFD_MTU) { frame->flags |= CANFD_FDF; } else if (ret == CAN_MTU) { frame->flags = 0; } return ret; } int linux_socketcan_set_mode_fd(int fd, bool mode_fd) { int opt = mode_fd ? 1 : 0; return setsockopt(fd, SOL_CAN_RAW, CAN_RAW_FD_FRAMES, &opt, sizeof(opt)); } ```
/content/code_sandbox/drivers/can/can_native_linux_adapt.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
898
```unknown # TCAN4x5x configuration options config CAN_TCAN4X5X bool "TI TCAN4x5x" default y depends on DT_HAS_TI_TCAN4X5X_ENABLED select CAN_MCAN select SPI help Enable support for the Texas Instruments TCAN4x5x. if CAN_TCAN4X5X config CAN_TCAN4X5X_THREAD_STACK_SIZE int "Stack size for the TCAN4x5x interrupt thread" default 1024 help Size of the stack used for the thread handling interrupts and dispatching callbacks. config CAN_TCAN4X5X_THREAD_PRIO int "Priority for the TCAN4x5x interrupt thread" default 0 help Priority level for the thread handling interrupts and dispatching callbacks. endif # CAN_TCAN4X5X ```
/content/code_sandbox/drivers/can/Kconfig.tcan4x5x
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
185
```c /* * */ #define DT_DRV_COMPAT microchip_mcp2515 #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/can/transceiver.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/gpio.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(can_mcp2515, CONFIG_CAN_LOG_LEVEL); #include "can_mcp2515.h" /* Timeout for changing mode */ #define MCP2515_MODE_CHANGE_TIMEOUT_USEC 1000 #define MCP2515_MODE_CHANGE_RETRIES 100 #define MCP2515_MODE_CHANGE_DELAY \ K_USEC(MCP2515_MODE_CHANGE_TIMEOUT_USEC / MCP2515_MODE_CHANGE_RETRIES) static int mcp2515_cmd_soft_reset(const struct device *dev) { const struct mcp2515_config *dev_cfg = dev->config; uint8_t cmd_buf[] = { MCP2515_OPCODE_RESET }; const struct spi_buf tx_buf = { .buf = cmd_buf, .len = sizeof(cmd_buf), }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1U }; return spi_write_dt(&dev_cfg->bus, &tx); } static int mcp2515_cmd_bit_modify(const struct device *dev, uint8_t reg_addr, uint8_t mask, uint8_t data) { const struct mcp2515_config *dev_cfg = dev->config; uint8_t cmd_buf[] = { MCP2515_OPCODE_BIT_MODIFY, reg_addr, mask, data }; const struct spi_buf tx_buf = { .buf = cmd_buf, .len = sizeof(cmd_buf), }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1U }; return spi_write_dt(&dev_cfg->bus, &tx); } static int mcp2515_cmd_write_reg(const struct device *dev, uint8_t reg_addr, uint8_t *buf_data, uint8_t buf_len) { const struct mcp2515_config *dev_cfg = dev->config; uint8_t cmd_buf[] = { MCP2515_OPCODE_WRITE, reg_addr }; struct spi_buf tx_buf[] = { { .buf = cmd_buf, .len = sizeof(cmd_buf) }, { .buf = buf_data, .len = buf_len } }; const struct spi_buf_set tx = { .buffers = tx_buf, .count = ARRAY_SIZE(tx_buf) }; return spi_write_dt(&dev_cfg->bus, &tx); } /* * Load TX buffer instruction * * When loading a transmit buffer, reduces the overhead of a normal WRITE * command by placing the Address Pointer at one of six locations, as * selected by parameter abc. * * 0: TX Buffer 0, Start at TXB0SIDH (0x31) * 1: TX Buffer 0, Start at TXB0D0 (0x36) * 2: TX Buffer 1, Start at TXB1SIDH (0x41) * 3: TX Buffer 1, Start at TXB1D0 (0x46) * 4: TX Buffer 2, Start at TXB2SIDH (0x51) * 5: TX Buffer 2, Start at TXB2D0 (0x56) */ static int mcp2515_cmd_load_tx_buffer(const struct device *dev, uint8_t abc, uint8_t *buf_data, uint8_t buf_len) { const struct mcp2515_config *dev_cfg = dev->config; __ASSERT(abc <= 5, "abc <= 5"); uint8_t cmd_buf[] = { MCP2515_OPCODE_LOAD_TX_BUFFER | abc }; struct spi_buf tx_buf[] = { { .buf = cmd_buf, .len = sizeof(cmd_buf) }, { .buf = buf_data, .len = buf_len } }; const struct spi_buf_set tx = { .buffers = tx_buf, .count = ARRAY_SIZE(tx_buf) }; return spi_write_dt(&dev_cfg->bus, &tx); } /* * Request-to-Send Instruction * * Parameter nnn is the combination of bits at positions 0, 1 and 2 in the RTS * opcode that respectively initiate transmission for buffers TXB0, TXB1 and * TXB2. */ static int mcp2515_cmd_rts(const struct device *dev, uint8_t nnn) { const struct mcp2515_config *dev_cfg = dev->config; __ASSERT(nnn < BIT(MCP2515_TX_CNT), "nnn < BIT(MCP2515_TX_CNT)"); uint8_t cmd_buf[] = { MCP2515_OPCODE_RTS | nnn }; struct spi_buf tx_buf[] = { { .buf = cmd_buf, .len = sizeof(cmd_buf) } }; const struct spi_buf_set tx = { .buffers = tx_buf, .count = ARRAY_SIZE(tx_buf) }; return spi_write_dt(&dev_cfg->bus, &tx); } static int mcp2515_cmd_read_reg(const struct device *dev, uint8_t reg_addr, uint8_t *buf_data, uint8_t buf_len) { const struct mcp2515_config *dev_cfg = dev->config; uint8_t cmd_buf[] = { MCP2515_OPCODE_READ, reg_addr }; struct spi_buf tx_buf[] = { { .buf = cmd_buf, .len = sizeof(cmd_buf) }, { .buf = NULL, .len = buf_len } }; const struct spi_buf_set tx = { .buffers = tx_buf, .count = ARRAY_SIZE(tx_buf) }; struct spi_buf rx_buf[] = { { .buf = NULL, .len = sizeof(cmd_buf) }, { .buf = buf_data, .len = buf_len } }; const struct spi_buf_set rx = { .buffers = rx_buf, .count = ARRAY_SIZE(rx_buf) }; return spi_transceive_dt(&dev_cfg->bus, &tx, &rx); } /* * Read RX Buffer instruction * * When reading a receive buffer, reduces the overhead of a normal READ * command by placing the Address Pointer at one of four locations selected by * parameter nm: * 0: Receive Buffer 0, Start at RXB0SIDH (0x61) * 1: Receive Buffer 0, Start at RXB0D0 (0x66) * 2: Receive Buffer 1, Start at RXB1SIDH (0x71) * 3: Receive Buffer 1, Start at RXB1D0 (0x76) */ static int mcp2515_cmd_read_rx_buffer(const struct device *dev, uint8_t nm, uint8_t *buf_data, uint8_t buf_len) { const struct mcp2515_config *dev_cfg = dev->config; __ASSERT(nm <= 0x03, "nm <= 0x03"); uint8_t cmd_buf[] = { MCP2515_OPCODE_READ_RX_BUFFER | (nm << 1) }; struct spi_buf tx_buf[] = { { .buf = cmd_buf, .len = sizeof(cmd_buf) }, { .buf = NULL, .len = buf_len } }; const struct spi_buf_set tx = { .buffers = tx_buf, .count = ARRAY_SIZE(tx_buf) }; struct spi_buf rx_buf[] = { { .buf = NULL, .len = sizeof(cmd_buf) }, { .buf = buf_data, .len = buf_len } }; const struct spi_buf_set rx = { .buffers = rx_buf, .count = ARRAY_SIZE(rx_buf) }; return spi_transceive_dt(&dev_cfg->bus, &tx, &rx); } static void mcp2515_convert_canframe_to_mcp2515frame(const struct can_frame *source, uint8_t *target) { uint8_t rtr; uint8_t dlc; uint8_t data_idx; if ((source->flags & CAN_FRAME_IDE) != 0) { target[MCP2515_FRAME_OFFSET_SIDH] = source->id >> 21; target[MCP2515_FRAME_OFFSET_SIDL] = (((source->id >> 18) & 0x07) << 5) | (BIT(3)) | ((source->id >> 16) & 0x03); target[MCP2515_FRAME_OFFSET_EID8] = source->id >> 8; target[MCP2515_FRAME_OFFSET_EID0] = source->id; } else { target[MCP2515_FRAME_OFFSET_SIDH] = source->id >> 3; target[MCP2515_FRAME_OFFSET_SIDL] = (source->id & 0x07) << 5; } rtr = (source->flags & CAN_FRAME_RTR) != 0 ? BIT(6) : 0; dlc = (source->dlc) & 0x0F; target[MCP2515_FRAME_OFFSET_DLC] = rtr | dlc; if (rtr == 0U) { for (data_idx = 0U; data_idx < dlc; data_idx++) { target[MCP2515_FRAME_OFFSET_D0 + data_idx] = source->data[data_idx]; } } } static void mcp2515_convert_mcp2515frame_to_canframe(const uint8_t *source, struct can_frame *target) { uint8_t data_idx; memset(target, 0, sizeof(*target)); if (source[MCP2515_FRAME_OFFSET_SIDL] & BIT(3)) { target->flags |= CAN_FRAME_IDE; target->id = (source[MCP2515_FRAME_OFFSET_SIDH] << 21) | ((source[MCP2515_FRAME_OFFSET_SIDL] >> 5) << 18) | ((source[MCP2515_FRAME_OFFSET_SIDL] & 0x03) << 16) | (source[MCP2515_FRAME_OFFSET_EID8] << 8) | source[MCP2515_FRAME_OFFSET_EID0]; } else { target->id = (source[MCP2515_FRAME_OFFSET_SIDH] << 3) | (source[MCP2515_FRAME_OFFSET_SIDL] >> 5); } target->dlc = source[MCP2515_FRAME_OFFSET_DLC] & 0x0F; if ((source[MCP2515_FRAME_OFFSET_DLC] & BIT(6)) != 0) { target->flags |= CAN_FRAME_RTR; } else { for (data_idx = 0U; data_idx < target->dlc; data_idx++) { target->data[data_idx] = source[MCP2515_FRAME_OFFSET_D0 + data_idx]; } } } const int mcp2515_set_mode_int(const struct device *dev, uint8_t mcp2515_mode) { int retries = MCP2515_MODE_CHANGE_RETRIES; uint8_t canstat; mcp2515_cmd_bit_modify(dev, MCP2515_ADDR_CANCTRL, MCP2515_CANCTRL_MODE_MASK, mcp2515_mode << MCP2515_CANCTRL_MODE_POS); mcp2515_cmd_read_reg(dev, MCP2515_ADDR_CANSTAT, &canstat, 1); while (((canstat & MCP2515_CANSTAT_MODE_MASK) >> MCP2515_CANSTAT_MODE_POS) != mcp2515_mode) { if (--retries < 0) { LOG_ERR("Timeout trying to set MCP2515 operation mode"); return -EIO; } k_sleep(MCP2515_MODE_CHANGE_DELAY); mcp2515_cmd_read_reg(dev, MCP2515_ADDR_CANSTAT, &canstat, 1); } return 0; } static void mcp2515_tx_done(const struct device *dev, uint8_t tx_idx, int status) { struct mcp2515_data *dev_data = dev->data; can_tx_callback_t callback = dev_data->tx_cb[tx_idx].cb; if (callback != NULL) { callback(dev, status, dev_data->tx_cb[tx_idx].cb_arg); dev_data->tx_cb[tx_idx].cb = NULL; k_mutex_lock(&dev_data->mutex, K_FOREVER); dev_data->tx_busy_map &= ~BIT(tx_idx); k_mutex_unlock(&dev_data->mutex); k_sem_give(&dev_data->tx_sem); } } static int mcp2515_get_core_clock(const struct device *dev, uint32_t *rate) { const struct mcp2515_config *dev_cfg = dev->config; *rate = dev_cfg->osc_freq / 2; return 0; } static int mcp2515_get_max_filters(const struct device *dev, bool ide) { ARG_UNUSED(ide); return CONFIG_CAN_MAX_FILTER; } static int mcp2515_set_timing(const struct device *dev, const struct can_timing *timing) { struct mcp2515_data *dev_data = dev->data; int ret; if (!timing) { return -EINVAL; } if (dev_data->common.started) { return -EBUSY; } /* CNF3, CNF2, CNF1, CANINTE */ uint8_t config_buf[4]; /* CNF1; SJW<7:6> | BRP<5:0> */ __ASSERT(timing->prescaler > 0, "Prescaler should be bigger than zero"); uint8_t brp = timing->prescaler - 1; uint8_t sjw = (timing->sjw - 1) << 6; uint8_t cnf1 = sjw | brp; /* CNF2; BTLMODE<7>|SAM<6>|PHSEG1<5:3>|PRSEG<2:0> */ const uint8_t btlmode = 1 << 7; const uint8_t sam = 0 << 6; const uint8_t phseg1 = (timing->phase_seg1 - 1) << 3; const uint8_t prseg = (timing->prop_seg - 1); const uint8_t cnf2 = btlmode | sam | phseg1 | prseg; /* CNF3; SOF<7>|WAKFIL<6>|UND<5:3>|PHSEG2<2:0> */ const uint8_t sof = 0 << 7; const uint8_t wakfil = 0 << 6; const uint8_t und = 0 << 3; const uint8_t phseg2 = (timing->phase_seg2 - 1); const uint8_t cnf3 = sof | wakfil | und | phseg2; const uint8_t caninte = MCP2515_INTE_RX0IE | MCP2515_INTE_RX1IE | MCP2515_INTE_TX0IE | MCP2515_INTE_TX1IE | MCP2515_INTE_TX2IE | MCP2515_INTE_ERRIE; /* Receive everything, filtering done in driver, RXB0 roll over into * RXB1 */ const uint8_t rx0_ctrl = BIT(6) | BIT(5) | BIT(2); const uint8_t rx1_ctrl = BIT(6) | BIT(5); config_buf[0] = cnf3; config_buf[1] = cnf2; config_buf[2] = cnf1; config_buf[3] = caninte; k_mutex_lock(&dev_data->mutex, K_FOREVER); ret = mcp2515_cmd_write_reg(dev, MCP2515_ADDR_CNF3, config_buf, sizeof(config_buf)); if (ret < 0) { LOG_ERR("Failed to write the configuration [%d]", ret); goto done; } ret = mcp2515_cmd_bit_modify(dev, MCP2515_ADDR_RXB0CTRL, rx0_ctrl, rx0_ctrl); if (ret < 0) { LOG_ERR("Failed to write RXB0CTRL [%d]", ret); goto done; } ret = mcp2515_cmd_bit_modify(dev, MCP2515_ADDR_RXB1CTRL, rx1_ctrl, rx1_ctrl); if (ret < 0) { LOG_ERR("Failed to write RXB1CTRL [%d]", ret); goto done; } done: k_mutex_unlock(&dev_data->mutex); return ret; } static int mcp2515_get_capabilities(const struct device *dev, can_mode_t *cap) { ARG_UNUSED(dev); *cap = CAN_MODE_NORMAL | CAN_MODE_LISTENONLY | CAN_MODE_LOOPBACK; return 0; } static int mcp2515_start(const struct device *dev) { const struct mcp2515_config *dev_cfg = dev->config; struct mcp2515_data *dev_data = dev->data; int ret; if (dev_data->common.started) { return -EALREADY; } if (dev_cfg->common.phy != NULL) { ret = can_transceiver_enable(dev_cfg->common.phy, dev_data->common.mode); if (ret != 0) { LOG_ERR("Failed to enable CAN transceiver [%d]", ret); return ret; } } CAN_STATS_RESET(dev); k_mutex_lock(&dev_data->mutex, K_FOREVER); ret = mcp2515_set_mode_int(dev, dev_data->mcp2515_mode); if (ret < 0) { LOG_ERR("Failed to set the mode [%d]", ret); if (dev_cfg->common.phy != NULL) { /* Attempt to disable the CAN transceiver in case of error */ (void)can_transceiver_disable(dev_cfg->common.phy); } } else { dev_data->common.started = true; } k_mutex_unlock(&dev_data->mutex); return ret; } static int mcp2515_stop(const struct device *dev) { const struct mcp2515_config *dev_cfg = dev->config; struct mcp2515_data *dev_data = dev->data; int ret; int i; if (!dev_data->common.started) { return -EALREADY; } k_mutex_lock(&dev_data->mutex, K_FOREVER); /* Abort any pending transmissions before entering configuration mode */ mcp2515_cmd_bit_modify(dev, MCP2515_ADDR_TXB0CTRL, MCP2515_TXBNCTRL_TXREQ_MASK, 0); #if MCP2515_TX_CNT == 2 mcp2515_cmd_bit_modify(dev, MCP2515_ADDR_TXB1CTRL, MCP2515_TXBNCTRL_TXREQ_MASK, 0); #endif /* MCP2515_TX_CNT == 2 */ #if MCP2515_TX_CNT == 3 mcp2515_cmd_bit_modify(dev, MCP2515_ADDR_TXB2CTRL, MCP2515_TXBNCTRL_TXREQ_MASK, 0); #endif /* MCP2515_TX_CNT == 3 */ ret = mcp2515_set_mode_int(dev, MCP2515_MODE_CONFIGURATION); if (ret < 0) { LOG_ERR("Failed to enter configuration mode [%d]", ret); k_mutex_unlock(&dev_data->mutex); return ret; } dev_data->common.started = false; k_mutex_unlock(&dev_data->mutex); for (i = 0; i < MCP2515_TX_CNT; i++) { mcp2515_tx_done(dev, i, -ENETDOWN); } if (dev_cfg->common.phy != NULL) { ret = can_transceiver_disable(dev_cfg->common.phy); if (ret != 0) { LOG_ERR("Failed to disable CAN transceiver [%d]", ret); return ret; } } return 0; } static int mcp2515_set_mode(const struct device *dev, can_mode_t mode) { struct mcp2515_data *dev_data = dev->data; if (dev_data->common.started) { return -EBUSY; } switch (mode) { case CAN_MODE_NORMAL: dev_data->mcp2515_mode = MCP2515_MODE_NORMAL; break; case CAN_MODE_LISTENONLY: dev_data->mcp2515_mode = MCP2515_MODE_SILENT; break; case CAN_MODE_LOOPBACK: dev_data->mcp2515_mode = MCP2515_MODE_LOOPBACK; break; default: LOG_ERR("Unsupported CAN Mode %u", mode); return -ENOTSUP; } dev_data->common.mode = mode; return 0; } static int mcp2515_send(const struct device *dev, const struct can_frame *frame, k_timeout_t timeout, can_tx_callback_t callback, void *user_data) { struct mcp2515_data *dev_data = dev->data; uint8_t tx_idx = 0U; uint8_t abc; uint8_t nnn; uint8_t len; uint8_t tx_frame[MCP2515_FRAME_LEN]; if (frame->dlc > CAN_MAX_DLC) { LOG_ERR("DLC of %d exceeds maximum (%d)", frame->dlc, CAN_MAX_DLC); return -EINVAL; } if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR)) != 0) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } if (!dev_data->common.started) { return -ENETDOWN; } if (k_sem_take(&dev_data->tx_sem, timeout) != 0) { return -EAGAIN; } k_mutex_lock(&dev_data->mutex, K_FOREVER); /* find a free tx slot */ for (; tx_idx < MCP2515_TX_CNT; tx_idx++) { if ((BIT(tx_idx) & dev_data->tx_busy_map) == 0) { dev_data->tx_busy_map |= BIT(tx_idx); break; } } k_mutex_unlock(&dev_data->mutex); if (tx_idx == MCP2515_TX_CNT) { LOG_WRN("no free tx slot available"); return -EIO; } dev_data->tx_cb[tx_idx].cb = callback; dev_data->tx_cb[tx_idx].cb_arg = user_data; mcp2515_convert_canframe_to_mcp2515frame(frame, tx_frame); /* Address Pointer selection */ abc = 2 * tx_idx; /* Calculate minimum length to transfer */ len = sizeof(tx_frame) - CAN_MAX_DLC + frame->dlc; mcp2515_cmd_load_tx_buffer(dev, abc, tx_frame, len); /* request tx slot transmission */ nnn = BIT(tx_idx); mcp2515_cmd_rts(dev, nnn); return 0; } static int mcp2515_add_rx_filter(const struct device *dev, can_rx_callback_t rx_cb, void *cb_arg, const struct can_filter *filter) { struct mcp2515_data *dev_data = dev->data; int filter_id = 0; __ASSERT(rx_cb != NULL, "response_ptr can not be null"); if ((filter->flags & ~(CAN_FILTER_IDE)) != 0) { LOG_ERR("unsupported CAN filter flags 0x%02x", filter->flags); return -ENOTSUP; } k_mutex_lock(&dev_data->mutex, K_FOREVER); /* find free filter */ while ((BIT(filter_id) & dev_data->filter_usage) && (filter_id < CONFIG_CAN_MAX_FILTER)) { filter_id++; } /* setup filter */ if (filter_id < CONFIG_CAN_MAX_FILTER) { dev_data->filter_usage |= BIT(filter_id); dev_data->filter[filter_id] = *filter; dev_data->rx_cb[filter_id] = rx_cb; dev_data->cb_arg[filter_id] = cb_arg; } else { filter_id = -ENOSPC; } k_mutex_unlock(&dev_data->mutex); return filter_id; } static void mcp2515_remove_rx_filter(const struct device *dev, int filter_id) { struct mcp2515_data *dev_data = dev->data; if (filter_id < 0 || filter_id >= CONFIG_CAN_MAX_FILTER) { LOG_ERR("filter ID %d out of bounds", filter_id); return; } k_mutex_lock(&dev_data->mutex, K_FOREVER); dev_data->filter_usage &= ~BIT(filter_id); k_mutex_unlock(&dev_data->mutex); } static void mcp2515_set_state_change_callback(const struct device *dev, can_state_change_callback_t cb, void *user_data) { struct mcp2515_data *dev_data = dev->data; dev_data->common.state_change_cb = cb; dev_data->common.state_change_cb_user_data = user_data; } static void mcp2515_rx_filter(const struct device *dev, struct can_frame *frame) { struct mcp2515_data *dev_data = dev->data; uint8_t filter_id = 0U; can_rx_callback_t callback; struct can_frame tmp_frame; #ifndef CONFIG_CAN_ACCEPT_RTR if ((frame->flags & CAN_FRAME_RTR) != 0U) { return; } #endif /* !CONFIG_CAN_ACCEPT_RTR */ k_mutex_lock(&dev_data->mutex, K_FOREVER); for (; filter_id < CONFIG_CAN_MAX_FILTER; filter_id++) { if (!(BIT(filter_id) & dev_data->filter_usage)) { continue; /* filter slot empty */ } if (!can_frame_matches_filter(frame, &dev_data->filter[filter_id])) { continue; /* filter did not match */ } callback = dev_data->rx_cb[filter_id]; /*Make a temporary copy in case the user modifies the message*/ tmp_frame = *frame; callback(dev, &tmp_frame, dev_data->cb_arg[filter_id]); } k_mutex_unlock(&dev_data->mutex); } static void mcp2515_rx(const struct device *dev, uint8_t rx_idx) { __ASSERT(rx_idx < MCP2515_RX_CNT, "rx_idx < MCP2515_RX_CNT"); struct can_frame frame; uint8_t rx_frame[MCP2515_FRAME_LEN]; uint8_t nm; /* Address Pointer selection */ nm = 2 * rx_idx; /* Fetch rx buffer */ mcp2515_cmd_read_rx_buffer(dev, nm, rx_frame, sizeof(rx_frame)); mcp2515_convert_mcp2515frame_to_canframe(rx_frame, &frame); mcp2515_rx_filter(dev, &frame); } static int mcp2515_get_state(const struct device *dev, enum can_state *state, struct can_bus_err_cnt *err_cnt) { struct mcp2515_data *dev_data = dev->data; uint8_t eflg; uint8_t err_cnt_buf[2]; int ret; ret = mcp2515_cmd_read_reg(dev, MCP2515_ADDR_EFLG, &eflg, sizeof(eflg)); if (ret < 0) { LOG_ERR("Failed to read error register [%d]", ret); return -EIO; } if (state != NULL) { if (!dev_data->common.started) { *state = CAN_STATE_STOPPED; } else if (eflg & MCP2515_EFLG_TXBO) { *state = CAN_STATE_BUS_OFF; } else if ((eflg & MCP2515_EFLG_RXEP) || (eflg & MCP2515_EFLG_TXEP)) { *state = CAN_STATE_ERROR_PASSIVE; } else if (eflg & MCP2515_EFLG_EWARN) { *state = CAN_STATE_ERROR_WARNING; } else { *state = CAN_STATE_ERROR_ACTIVE; } } if (err_cnt != NULL) { ret = mcp2515_cmd_read_reg(dev, MCP2515_ADDR_TEC, err_cnt_buf, sizeof(err_cnt_buf)); if (ret < 0) { LOG_ERR("Failed to read error counters [%d]", ret); return -EIO; } err_cnt->tx_err_cnt = err_cnt_buf[0]; err_cnt->rx_err_cnt = err_cnt_buf[1]; } #ifdef CONFIG_CAN_STATS if ((eflg & (MCP2515_EFLG_RX0OVR | MCP2515_EFLG_RX1OVR)) != 0U) { CAN_STATS_RX_OVERRUN_INC(dev); ret = mcp2515_cmd_bit_modify(dev, MCP2515_ADDR_EFLG, eflg & (MCP2515_EFLG_RX0OVR | MCP2515_EFLG_RX1OVR), 0U); if (ret < 0) { LOG_ERR("Failed to clear RX overrun flags [%d]", ret); return -EIO; } } #endif /* CONFIG_CAN_STATS */ return 0; } static void mcp2515_handle_errors(const struct device *dev) { struct mcp2515_data *dev_data = dev->data; can_state_change_callback_t state_change_cb = dev_data->common.state_change_cb; void *state_change_cb_data = dev_data->common.state_change_cb_user_data; enum can_state state; struct can_bus_err_cnt err_cnt; int err; err = mcp2515_get_state(dev, &state, state_change_cb ? &err_cnt : NULL); if (err != 0) { LOG_ERR("Failed to get CAN controller state [%d]", err); return; } if (state_change_cb && dev_data->old_state != state) { dev_data->old_state = state; state_change_cb(dev, state, err_cnt, state_change_cb_data); } } static void mcp2515_handle_interrupts(const struct device *dev) { const struct mcp2515_config *dev_cfg = dev->config; int ret; uint8_t canintf; /* Loop until INT pin is inactive (all interrupt flags handled) */ while (1) { ret = mcp2515_cmd_read_reg(dev, MCP2515_ADDR_CANINTF, &canintf, 1); if (ret != 0) { LOG_ERR("Couldn't read INTF register %d", ret); continue; } if (canintf == 0) { /* No interrupt flags set */ break; } if (canintf & MCP2515_CANINTF_RX0IF) { mcp2515_rx(dev, 0); /* RX0IF flag cleared automatically during read */ canintf &= ~MCP2515_CANINTF_RX0IF; } if (canintf & MCP2515_CANINTF_RX1IF) { mcp2515_rx(dev, 1); /* RX1IF flag cleared automatically during read */ canintf &= ~MCP2515_CANINTF_RX1IF; } if (canintf & MCP2515_CANINTF_TX0IF) { mcp2515_tx_done(dev, 0, 0); } if (canintf & MCP2515_CANINTF_TX1IF) { mcp2515_tx_done(dev, 1, 0); } if (canintf & MCP2515_CANINTF_TX2IF) { mcp2515_tx_done(dev, 2, 0); } if (canintf & MCP2515_CANINTF_ERRIF) { mcp2515_handle_errors(dev); } if (canintf != 0) { /* Clear remaining flags */ mcp2515_cmd_bit_modify(dev, MCP2515_ADDR_CANINTF, canintf, ~canintf); } /* Break from loop if INT pin is inactive */ ret = gpio_pin_get_dt(&dev_cfg->int_gpio); if (ret < 0) { LOG_ERR("Couldn't read INT pin"); } else if (ret == 0) { /* All interrupt flags handled */ break; } } } static void mcp2515_int_thread(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); const struct device *dev = p1; struct mcp2515_data *dev_data = dev->data; while (1) { k_sem_take(&dev_data->int_sem, K_FOREVER); mcp2515_handle_interrupts(dev); } } static void mcp2515_int_gpio_callback(const struct device *dev, struct gpio_callback *cb, uint32_t pins) { struct mcp2515_data *dev_data = CONTAINER_OF(cb, struct mcp2515_data, int_gpio_cb); k_sem_give(&dev_data->int_sem); } static const struct can_driver_api can_api_funcs = { .get_capabilities = mcp2515_get_capabilities, .set_timing = mcp2515_set_timing, .start = mcp2515_start, .stop = mcp2515_stop, .set_mode = mcp2515_set_mode, .send = mcp2515_send, .add_rx_filter = mcp2515_add_rx_filter, .remove_rx_filter = mcp2515_remove_rx_filter, .get_state = mcp2515_get_state, .set_state_change_callback = mcp2515_set_state_change_callback, .get_core_clock = mcp2515_get_core_clock, .get_max_filters = mcp2515_get_max_filters, .timing_min = { .sjw = 0x1, .prop_seg = 0x01, .phase_seg1 = 0x01, .phase_seg2 = 0x02, .prescaler = 0x01 }, .timing_max = { .sjw = 0x04, .prop_seg = 0x08, .phase_seg1 = 0x08, .phase_seg2 = 0x08, .prescaler = 0x40 } }; static int mcp2515_init(const struct device *dev) { const struct mcp2515_config *dev_cfg = dev->config; struct mcp2515_data *dev_data = dev->data; struct can_timing timing = { 0 }; k_tid_t tid; int ret; k_sem_init(&dev_data->int_sem, 0, 1); k_mutex_init(&dev_data->mutex); k_sem_init(&dev_data->tx_sem, MCP2515_TX_CNT, MCP2515_TX_CNT); if (dev_cfg->common.phy != NULL) { if (!device_is_ready(dev_cfg->common.phy)) { LOG_ERR("CAN transceiver not ready"); return -ENODEV; } } if (!spi_is_ready_dt(&dev_cfg->bus)) { LOG_ERR("SPI bus %s not ready", dev_cfg->bus.bus->name); return -ENODEV; } /* Reset MCP2515 */ if (mcp2515_cmd_soft_reset(dev)) { LOG_ERR("Soft-reset failed"); return -EIO; } /* Initialize interrupt handling */ if (!gpio_is_ready_dt(&dev_cfg->int_gpio)) { LOG_ERR("Interrupt GPIO port not ready"); return -ENODEV; } if (gpio_pin_configure_dt(&dev_cfg->int_gpio, GPIO_INPUT)) { LOG_ERR("Unable to configure interrupt GPIO"); return -EINVAL; } gpio_init_callback(&(dev_data->int_gpio_cb), mcp2515_int_gpio_callback, BIT(dev_cfg->int_gpio.pin)); if (gpio_add_callback(dev_cfg->int_gpio.port, &(dev_data->int_gpio_cb))) { return -EINVAL; } if (gpio_pin_interrupt_configure_dt(&dev_cfg->int_gpio, GPIO_INT_EDGE_TO_ACTIVE)) { return -EINVAL; } tid = k_thread_create(&dev_data->int_thread, dev_data->int_thread_stack, dev_cfg->int_thread_stack_size, mcp2515_int_thread, (void *)dev, NULL, NULL, K_PRIO_COOP(dev_cfg->int_thread_priority), 0, K_NO_WAIT); (void)k_thread_name_set(tid, "mcp2515"); (void)memset(dev_data->rx_cb, 0, sizeof(dev_data->rx_cb)); (void)memset(dev_data->filter, 0, sizeof(dev_data->filter)); dev_data->old_state = CAN_STATE_ERROR_ACTIVE; ret = can_calc_timing(dev, &timing, dev_cfg->common.bitrate, dev_cfg->common.sample_point); if (ret == -EINVAL) { LOG_ERR("Can't find timing for given param"); return -EIO; } LOG_DBG("Presc: %d, BS1: %d, BS2: %d", timing.prescaler, timing.phase_seg1, timing.phase_seg2); LOG_DBG("Sample-point err : %d", ret); k_usleep(MCP2515_OSC_STARTUP_US); ret = can_set_timing(dev, &timing); if (ret) { return ret; } ret = can_set_mode(dev, CAN_MODE_NORMAL); return ret; } #define MCP2515_INIT(inst) \ static K_KERNEL_STACK_DEFINE(mcp2515_int_thread_stack_##inst, \ CONFIG_CAN_MCP2515_INT_THREAD_STACK_SIZE); \ \ static struct mcp2515_data mcp2515_data_##inst = { \ .int_thread_stack = mcp2515_int_thread_stack_##inst, \ .tx_busy_map = 0U, \ .filter_usage = 0U, \ }; \ \ static const struct mcp2515_config mcp2515_config_##inst = { \ .common = CAN_DT_DRIVER_CONFIG_INST_GET(inst, 0, 1000000), \ .bus = SPI_DT_SPEC_INST_GET(inst, SPI_WORD_SET(8), 0), \ .int_gpio = GPIO_DT_SPEC_INST_GET(inst, int_gpios), \ .int_thread_stack_size = CONFIG_CAN_MCP2515_INT_THREAD_STACK_SIZE, \ .int_thread_priority = CONFIG_CAN_MCP2515_INT_THREAD_PRIO, \ .osc_freq = DT_INST_PROP(inst, osc_freq), \ }; \ \ CAN_DEVICE_DT_INST_DEFINE(inst, mcp2515_init, NULL, &mcp2515_data_##inst, \ &mcp2515_config_##inst, POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \ &can_api_funcs); DT_INST_FOREACH_STATUS_OKAY(MCP2515_INIT) ```
/content/code_sandbox/drivers/can/can_mcp2515.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,410
```unknown config CAN_NXP_S32_CANXL bool "NXP S32 CANXL driver" default y depends on DT_HAS_NXP_S32_CANXL_ENABLED select CLOCK_CONTROL help Enable support for NXP S32 CANXL driver. if CAN_NXP_S32_CANXL config CAN_NXP_S32_RX_FIFO bool "NXP S32 CANXL uses RX FIFO" default y help If this is enabled, NXP S32 CANXL uses RX FIFO. Otherwise NXP S32 CANXL uses RX Message Descriptor. config CAN_NXP_S32_MAX_RX int "Maximum number of RX descriptors" default 16 range 1 32 if CAN_NXP_S32_RX_FIFO range 1 128 if !CAN_NXP_S32_RX_FIFO help Maximum number of RX descriptors. config CAN_NXP_S32_MAX_TX int "Maximum number of TX descriptors" default 16 range 1 128 help Maximum number of TX descriptors. endif # CAN_NXP_S32_CANXL ```
/content/code_sandbox/drivers/can/Kconfig.nxp_s32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
224
```c /* * */ #define DT_DRV_COMPAT zephyr_can_loopback #include <stdbool.h> #include <string.h> #include <zephyr/drivers/can.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> LOG_MODULE_REGISTER(can_loopback, CONFIG_CAN_LOG_LEVEL); struct can_loopback_frame { struct can_frame frame; can_tx_callback_t cb; void *cb_arg; }; struct can_loopback_filter { can_rx_callback_t rx_cb; void *cb_arg; struct can_filter filter; }; struct can_loopback_config { const struct can_driver_config common; }; struct can_loopback_data { struct can_driver_data common; struct can_loopback_filter filters[CONFIG_CAN_MAX_FILTER]; struct k_mutex mtx; struct k_msgq tx_msgq; char msgq_buffer[CONFIG_CAN_LOOPBACK_TX_MSGQ_SIZE * sizeof(struct can_loopback_frame)]; struct k_thread tx_thread_data; K_KERNEL_STACK_MEMBER(tx_thread_stack, CONFIG_CAN_LOOPBACK_TX_THREAD_STACK_SIZE); }; static void receive_frame(const struct device *dev, const struct can_frame *frame, struct can_loopback_filter *filter) { struct can_frame frame_tmp = *frame; LOG_DBG("Receiving %d bytes. Id: 0x%x, ID type: %s %s", frame->dlc, frame->id, (frame->flags & CAN_FRAME_IDE) != 0 ? "extended" : "standard", (frame->flags & CAN_FRAME_RTR) != 0 ? ", RTR frame" : ""); filter->rx_cb(dev, &frame_tmp, filter->cb_arg); } static void tx_thread(void *arg1, void *arg2, void *arg3) { const struct device *dev = arg1; struct can_loopback_data *data = dev->data; struct can_loopback_frame frame; struct can_loopback_filter *filter; int ret; ARG_UNUSED(arg2); ARG_UNUSED(arg3); while (1) { ret = k_msgq_get(&data->tx_msgq, &frame, K_FOREVER); if (ret < 0) { LOG_DBG("Pend on TX queue returned without valid frame (err %d)", ret); continue; } frame.cb(dev, 0, frame.cb_arg); if ((data->common.mode & CAN_MODE_LOOPBACK) == 0U) { continue; } #ifndef CONFIG_CAN_ACCEPT_RTR if ((frame.frame.flags & CAN_FRAME_RTR) != 0U) { continue; } #endif /* !CONFIG_CAN_ACCEPT_RTR */ k_mutex_lock(&data->mtx, K_FOREVER); for (int i = 0; i < CONFIG_CAN_MAX_FILTER; i++) { filter = &data->filters[i]; if (filter->rx_cb != NULL && can_frame_matches_filter(&frame.frame, &filter->filter)) { receive_frame(dev, &frame.frame, filter); } } k_mutex_unlock(&data->mtx); } } static int can_loopback_send(const struct device *dev, const struct can_frame *frame, k_timeout_t timeout, can_tx_callback_t callback, void *user_data) { struct can_loopback_data *data = dev->data; struct can_loopback_frame loopback_frame; uint8_t max_dlc = CAN_MAX_DLC; int ret; LOG_DBG("Sending %d bytes on %s. Id: 0x%x, ID type: %s %s", frame->dlc, dev->name, frame->id, (frame->flags & CAN_FRAME_IDE) != 0 ? "extended" : "standard", (frame->flags & CAN_FRAME_RTR) != 0 ? ", RTR frame" : ""); #ifdef CONFIG_CAN_FD_MODE if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR | CAN_FRAME_FDF | CAN_FRAME_BRS)) != 0) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } if ((frame->flags & CAN_FRAME_FDF) != 0) { if ((data->common.mode & CAN_MODE_FD) == 0U) { return -ENOTSUP; } max_dlc = CANFD_MAX_DLC; } #else /* CONFIG_CAN_FD_MODE */ if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR)) != 0) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } #endif /* !CONFIG_CAN_FD_MODE */ if (frame->dlc > max_dlc) { LOG_ERR("DLC of %d exceeds maximum (%d)", frame->dlc, max_dlc); return -EINVAL; } if (!data->common.started) { return -ENETDOWN; } loopback_frame.frame = *frame; loopback_frame.cb = callback; loopback_frame.cb_arg = user_data; ret = k_msgq_put(&data->tx_msgq, &loopback_frame, timeout); if (ret < 0) { LOG_DBG("TX queue full (err %d)", ret); return -EAGAIN; } return 0; } static inline int get_free_filter(struct can_loopback_filter *filters) { for (int i = 0; i < CONFIG_CAN_MAX_FILTER; i++) { if (filters[i].rx_cb == NULL) { return i; } } return -ENOSPC; } static int can_loopback_add_rx_filter(const struct device *dev, can_rx_callback_t cb, void *cb_arg, const struct can_filter *filter) { struct can_loopback_data *data = dev->data; struct can_loopback_filter *loopback_filter; int filter_id; LOG_DBG("Setting filter ID: 0x%x, mask: 0x%x", filter->id, filter->mask); if ((filter->flags & ~(CAN_FILTER_IDE)) != 0) { LOG_ERR("unsupported CAN filter flags 0x%02x", filter->flags); return -ENOTSUP; } k_mutex_lock(&data->mtx, K_FOREVER); filter_id = get_free_filter(data->filters); if (filter_id < 0) { LOG_ERR("No free filter left"); k_mutex_unlock(&data->mtx); return filter_id; } loopback_filter = &data->filters[filter_id]; loopback_filter->rx_cb = cb; loopback_filter->cb_arg = cb_arg; loopback_filter->filter = *filter; k_mutex_unlock(&data->mtx); LOG_DBG("Filter added. ID: %d", filter_id); return filter_id; } static void can_loopback_remove_rx_filter(const struct device *dev, int filter_id) { struct can_loopback_data *data = dev->data; if (filter_id < 0 || filter_id >= ARRAY_SIZE(data->filters)) { LOG_ERR("filter ID %d out-of-bounds", filter_id); return; } LOG_DBG("Remove filter ID: %d", filter_id); k_mutex_lock(&data->mtx, K_FOREVER); data->filters[filter_id].rx_cb = NULL; k_mutex_unlock(&data->mtx); } static int can_loopback_get_capabilities(const struct device *dev, can_mode_t *cap) { ARG_UNUSED(dev); *cap = CAN_MODE_NORMAL | CAN_MODE_LOOPBACK; #if CONFIG_CAN_FD_MODE *cap |= CAN_MODE_FD; #endif /* CONFIG_CAN_FD_MODE */ return 0; } static int can_loopback_start(const struct device *dev) { struct can_loopback_data *data = dev->data; if (data->common.started) { return -EALREADY; } data->common.started = true; return 0; } static int can_loopback_stop(const struct device *dev) { struct can_loopback_data *data = dev->data; if (!data->common.started) { return -EALREADY; } data->common.started = false; k_msgq_purge(&data->tx_msgq); return 0; } static int can_loopback_set_mode(const struct device *dev, can_mode_t mode) { struct can_loopback_data *data = dev->data; if (data->common.started) { return -EBUSY; } #ifdef CONFIG_CAN_FD_MODE if ((mode & ~(CAN_MODE_LOOPBACK | CAN_MODE_FD)) != 0) { LOG_ERR("unsupported mode: 0x%08x", mode); return -ENOTSUP; } #else if ((mode & ~(CAN_MODE_LOOPBACK)) != 0) { LOG_ERR("unsupported mode: 0x%08x", mode); return -ENOTSUP; } #endif /* CONFIG_CAN_FD_MODE */ data->common.mode = mode; return 0; } static int can_loopback_set_timing(const struct device *dev, const struct can_timing *timing) { struct can_loopback_data *data = dev->data; ARG_UNUSED(timing); if (data->common.started) { return -EBUSY; } return 0; } #ifdef CONFIG_CAN_FD_MODE static int can_loopback_set_timing_data(const struct device *dev, const struct can_timing *timing) { struct can_loopback_data *data = dev->data; ARG_UNUSED(timing); if (data->common.started) { return -EBUSY; } return 0; } #endif /* CONFIG_CAN_FD_MODE */ static int can_loopback_get_state(const struct device *dev, enum can_state *state, struct can_bus_err_cnt *err_cnt) { struct can_loopback_data *data = dev->data; if (state != NULL) { if (data->common.started) { *state = CAN_STATE_ERROR_ACTIVE; } else { *state = CAN_STATE_STOPPED; } } if (err_cnt) { err_cnt->tx_err_cnt = 0; err_cnt->rx_err_cnt = 0; } return 0; } static void can_loopback_set_state_change_callback(const struct device *dev, can_state_change_callback_t cb, void *user_data) { ARG_UNUSED(dev); ARG_UNUSED(cb); ARG_UNUSED(user_data); } static int can_loopback_get_core_clock(const struct device *dev, uint32_t *rate) { ARG_UNUSED(dev); /* Recommended CAN clock from CiA 601-3 */ *rate = MHZ(80); return 0; } static int can_loopback_get_max_filters(const struct device *dev, bool ide) { ARG_UNUSED(ide); return CONFIG_CAN_MAX_FILTER; } static const struct can_driver_api can_loopback_driver_api = { .get_capabilities = can_loopback_get_capabilities, .start = can_loopback_start, .stop = can_loopback_stop, .set_mode = can_loopback_set_mode, .set_timing = can_loopback_set_timing, .send = can_loopback_send, .add_rx_filter = can_loopback_add_rx_filter, .remove_rx_filter = can_loopback_remove_rx_filter, .get_state = can_loopback_get_state, .set_state_change_callback = can_loopback_set_state_change_callback, .get_core_clock = can_loopback_get_core_clock, .get_max_filters = can_loopback_get_max_filters, /* Recommended configuration ranges from CiA 601-2 */ .timing_min = { .sjw = 1, .prop_seg = 0, .phase_seg1 = 2, .phase_seg2 = 2, .prescaler = 1 }, .timing_max = { .sjw = 128, .prop_seg = 0, .phase_seg1 = 256, .phase_seg2 = 128, .prescaler = 32 }, #ifdef CONFIG_CAN_FD_MODE .set_timing_data = can_loopback_set_timing_data, /* Recommended configuration ranges from CiA 601-2 */ .timing_data_min = { .sjw = 1, .prop_seg = 0, .phase_seg1 = 1, .phase_seg2 = 1, .prescaler = 1 }, .timing_data_max = { .sjw = 16, .prop_seg = 0, .phase_seg1 = 32, .phase_seg2 = 16, .prescaler = 32 }, #endif /* CONFIG_CAN_FD_MODE */ }; static int can_loopback_init(const struct device *dev) { struct can_loopback_data *data = dev->data; k_tid_t tx_tid; k_mutex_init(&data->mtx); for (int i = 0; i < CONFIG_CAN_MAX_FILTER; i++) { data->filters[i].rx_cb = NULL; } k_msgq_init(&data->tx_msgq, data->msgq_buffer, sizeof(struct can_loopback_frame), CONFIG_CAN_LOOPBACK_TX_MSGQ_SIZE); tx_tid = k_thread_create(&data->tx_thread_data, data->tx_thread_stack, K_KERNEL_STACK_SIZEOF(data->tx_thread_stack), tx_thread, (void *)dev, NULL, NULL, CONFIG_CAN_LOOPBACK_TX_THREAD_PRIORITY, 0, K_NO_WAIT); if (!tx_tid) { LOG_ERR("ERROR spawning tx thread"); return -1; } k_thread_name_set(tx_tid, dev->name); return 0; } #ifdef CONFIG_CAN_FD_MODE #define CAN_LOOPBACK_MAX_BITRATE 8000000 #else /* CONFIG_CAN_FD_MODE */ #define CAN_LOOPBACK_MAX_BITRATE 1000000 #endif /* !CONFIG_CAN_FD_MODE */ #define CAN_LOOPBACK_INIT(inst) \ static const struct can_loopback_config can_loopback_config_##inst = { \ .common = CAN_DT_DRIVER_CONFIG_INST_GET(inst, 0, CAN_LOOPBACK_MAX_BITRATE), \ }; \ \ static struct can_loopback_data can_loopback_data_##inst; \ \ CAN_DEVICE_DT_INST_DEFINE(inst, can_loopback_init, NULL, \ &can_loopback_data_##inst, \ &can_loopback_config_##inst, \ POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \ &can_loopback_driver_api); DT_INST_FOREACH_STATUS_OKAY(CAN_LOOPBACK_INIT) ```
/content/code_sandbox/drivers/can/can_loopback.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,102
```objective-c /* * */ /** @file * @brief Private functions for native posix canbus driver. */ #ifndef ZEPHYR_DRIVERS_CAN_NATIVE_LINUX_ADAPT_H_ #define ZEPHYR_DRIVERS_CAN_NATIVE_LINUX_ADAPT_H_ int linux_socketcan_iface_open(const char *if_name); int linux_socketcan_iface_close(int fd); int linux_socketcan_poll_data(int fd); int linux_socketcan_read_data(int fd, void *buf, size_t buf_len, bool *msg_confirm); int linux_socketcan_set_mode_fd(int fd, bool mode_fd); #endif /* ZEPHYR_DRIVERS_CAN_NATIVE_LINUX_ADAPT_H_ */ ```
/content/code_sandbox/drivers/can/can_native_linux_adapt.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
132
```c /* * */ #define DT_DRV_COMPAT infineon_xmc4xxx_can_node #include <zephyr/device.h> #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/transceiver.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/kernel.h> #include <zephyr/sys/bitarray.h> #include <soc.h> #include <xmc_can.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(can_xmc4xxx, CONFIG_CAN_LOG_LEVEL); #define CAN_XMC4XXX_MULTICAN_NODE DT_INST(0, infineon_xmc4xxx_can) #define CAN_XMC4XXX_NUM_MESSAGE_OBJECTS DT_PROP(CAN_XMC4XXX_MULTICAN_NODE, message_objects) #define CAN_XMC4XXX_CLOCK_PRESCALER DT_PROP(CAN_XMC4XXX_MULTICAN_NODE, clock_prescaler) static CAN_GLOBAL_TypeDef *const can_xmc4xxx_global_reg = (CAN_GLOBAL_TypeDef *)DT_REG_ADDR(CAN_XMC4XXX_MULTICAN_NODE); static bool can_xmc4xxx_global_init; static uint32_t can_xmc4xxx_clock_frequency; SYS_BITARRAY_DEFINE_STATIC(mo_usage_bitarray, CAN_XMC4XXX_NUM_MESSAGE_OBJECTS); static int can_xmc4xxx_num_free_mo = CAN_XMC4XXX_NUM_MESSAGE_OBJECTS; #define CAN_XMC4XXX_IRQ_MIN 76 #define CAN_XMC4XXX_MAX_DLC 8 #define CAN_XMC4XXX_REG_TO_NODE_IND(reg) (((uint32_t)(reg) - (uint32_t)CAN_NODE0_BASE) / 0x100) struct can_xmc4xxx_tx_callback { can_tx_callback_t function; void *user_data; }; struct can_xmc4xxx_rx_callback { can_rx_callback_t function; void *user_data; }; struct can_xmc4xxx_rx_fifo { CAN_MO_TypeDef *base; CAN_MO_TypeDef *top; CAN_MO_TypeDef *tail; CAN_MO_TypeDef *head; }; struct can_xmc4xxx_data { struct can_driver_data common; enum can_state state; struct k_mutex mutex; struct k_sem tx_sem; struct can_xmc4xxx_tx_callback tx_callbacks[CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE]; uint32_t filter_usage; struct can_xmc4xxx_rx_callback rx_callbacks[CONFIG_CAN_MAX_FILTER]; struct can_xmc4xxx_rx_fifo rx_fifos[CONFIG_CAN_MAX_FILTER]; #if defined(CONFIG_CAN_ACCEPT_RTR) struct can_xmc4xxx_rx_fifo rtr_fifos[CONFIG_CAN_MAX_FILTER]; #endif CAN_MO_TypeDef *tx_mo[CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE]; }; struct can_xmc4xxx_config { struct can_driver_config common; CAN_NODE_TypeDef *can; bool clock_div8; uint8_t service_request; void (*irq_config_func)(void); uint8_t input_src; const struct pinctrl_dev_config *pcfg; }; static int can_xmc4xxx_set_mode(const struct device *dev, can_mode_t mode) { struct can_xmc4xxx_data *dev_data = dev->data; const struct can_xmc4xxx_config *dev_cfg = dev->config; if (dev_data->common.started) { return -EBUSY; } if ((mode & (CAN_MODE_3_SAMPLES | CAN_MODE_ONE_SHOT | CAN_MODE_LOOPBACK | CAN_MODE_FD)) != 0) { return -ENOTSUP; } if ((mode & CAN_MODE_LISTENONLY) != 0) { XMC_CAN_NODE_SetAnalyzerMode(dev_cfg->can); } else { XMC_CAN_NODE_ReSetAnalyzerMode(dev_cfg->can); } dev_data->common.mode = mode; return 0; } static int can_xmc4xxx_set_timing(const struct device *dev, const struct can_timing *timing) { struct can_xmc4xxx_data *dev_data = dev->data; const struct can_xmc4xxx_config *dev_cfg = dev->config; uint32_t reg; if (!timing) { return -EINVAL; } if (dev_data->common.started) { return -EBUSY; } k_mutex_lock(&dev_data->mutex, K_FOREVER); reg = FIELD_PREP(CAN_NODE_NBTR_DIV8_Msk, dev_cfg->clock_div8); reg |= FIELD_PREP(CAN_NODE_NBTR_BRP_Msk, timing->prescaler - 1); reg |= FIELD_PREP(CAN_NODE_NBTR_TSEG1_Msk, timing->prop_seg + timing->phase_seg1 - 1); reg |= FIELD_PREP(CAN_NODE_NBTR_TSEG2_Msk, timing->phase_seg2 - 1); reg |= FIELD_PREP(CAN_NODE_NBTR_SJW_Msk, timing->sjw - 1); dev_cfg->can->NBTR = reg; k_mutex_unlock(&dev_data->mutex); return 0; } static int can_xmc4xxx_send(const struct device *dev, const struct can_frame *msg, k_timeout_t timeout, can_tx_callback_t callback, void *callback_arg) { struct can_xmc4xxx_data *dev_data = dev->data; uint8_t mailbox_idx; struct can_xmc4xxx_tx_callback *callbacks = &dev_data->tx_callbacks[0]; CAN_MO_TypeDef *mo; unsigned int key; LOG_DBG("Sending %d bytes. Id: 0x%x, ID type: %s %s %s %s", can_dlc_to_bytes(msg->dlc), msg->id, msg->flags & CAN_FRAME_IDE ? "extended" : "standard", msg->flags & CAN_FRAME_RTR ? "RTR" : "", msg->flags & CAN_FRAME_FDF ? "FD frame" : "", msg->flags & CAN_FRAME_BRS ? "BRS" : ""); if (msg->dlc > CAN_XMC4XXX_MAX_DLC) { return -EINVAL; } if (!dev_data->common.started) { return -ENETDOWN; } if (dev_data->state == CAN_STATE_BUS_OFF) { return -ENETUNREACH; } if ((msg->flags & (CAN_FRAME_FDF | CAN_FRAME_BRS)) != 0) { return -ENOTSUP; } if (k_sem_take(&dev_data->tx_sem, timeout) != 0) { return -EAGAIN; } k_mutex_lock(&dev_data->mutex, K_FOREVER); for (mailbox_idx = 0; mailbox_idx < CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE; mailbox_idx++) { if (callbacks[mailbox_idx].function == NULL) { break; } } __ASSERT_NO_MSG(mailbox_idx < CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE); key = irq_lock(); /* critical section in case can_xmc4xxx_reset_tx_fifos() called in isr */ /* so that callback function and callback_arg are consistent */ callbacks[mailbox_idx].function = callback; callbacks[mailbox_idx].user_data = callback_arg; irq_unlock(key); mo = dev_data->tx_mo[mailbox_idx]; mo->MOCTR = CAN_MO_MOCTR_RESMSGVAL_Msk; if ((msg->flags & CAN_FRAME_IDE) != 0) { /* MOAR - message object arbitration register */ mo->MOAR = FIELD_PREP(CAN_MO_MOAR_PRI_Msk, 1) | FIELD_PREP(CAN_MO_MOAR_ID_Msk, msg->id) | CAN_MO_MOAR_IDE_Msk; } else { mo->MOAR = FIELD_PREP(CAN_MO_MOAR_PRI_Msk, 1) | FIELD_PREP(XMC_CAN_MO_MOAR_STDID_Msk, msg->id); } mo->MOFCR &= ~CAN_MO_MOFCR_DLC_Msk; mo->MOFCR |= FIELD_PREP(CAN_MO_MOFCR_DLC_Msk, msg->dlc); if ((msg->flags & CAN_FRAME_RTR) != 0) { mo->MOCTR = CAN_MO_MOCTR_RESDIR_Msk; } else { mo->MOCTR = CAN_MO_MOCTR_SETDIR_Msk; memcpy((void *)&mo->MODATAL, &msg->data[0], sizeof(uint32_t)); memcpy((void *)&mo->MODATAH, &msg->data[4], sizeof(uint32_t)); } mo->MOCTR = CAN_MO_MOCTR_SETTXEN0_Msk | CAN_MO_MOCTR_SETTXEN1_Msk | CAN_MO_MOCTR_SETMSGVAL_Msk | CAN_MO_MOCTR_RESRXEN_Msk | CAN_MO_MOCTR_RESRTSEL_Msk; mo->MOCTR = CAN_MO_MOCTR_SETTXRQ_Msk; k_mutex_unlock(&dev_data->mutex); return 0; } static CAN_MO_TypeDef *can_xmc4xxx_get_mo(uint8_t *mo_index) { int i; for (i = 0; i < CAN_XMC4XXX_NUM_MESSAGE_OBJECTS; i++) { int prev_val; sys_bitarray_test_and_set_bit(&mo_usage_bitarray, i, &prev_val); if (prev_val == 0) { *mo_index = i; can_xmc4xxx_num_free_mo--; return &CAN_MO->MO[i]; } } return NULL; } static void can_xmc4xxx_deinit_fifo(const struct device *dev, struct can_xmc4xxx_rx_fifo *fifo) { CAN_MO_TypeDef *mo = fifo->base; while (mo != NULL) { int next_index; int index; /* invalidate message */ mo->MOCTR = CAN_MO_MOCTR_RESMSGVAL_Msk; next_index = FIELD_GET(CAN_MO_MOSTAT_PNEXT_Msk, mo->MOSTAT); index = ((uint32_t)mo - (uint32_t)&CAN_MO->MO[0]) / sizeof(*mo); if ((uint32_t)mo == (uint32_t)fifo->top) { mo = NULL; } else { mo = &CAN_MO->MO[next_index]; } /* we need to move the node back to the list of unallocated message objects, */ /* which is list index = 0. 255 gets rolled over to 0 in the function below */ XMC_CAN_AllocateMOtoNodeList(can_xmc4xxx_global_reg, 255, index); sys_bitarray_clear_bit(&mo_usage_bitarray, index); can_xmc4xxx_num_free_mo++; } } static int can_xmc4xxx_init_fifo(const struct device *dev, const struct can_filter *filter, struct can_xmc4xxx_rx_fifo *fifo, bool is_rtr) { const struct can_xmc4xxx_config *dev_cfg = dev->config; CAN_MO_TypeDef *mo; uint32_t reg; uint8_t mo_index = 0, base_index; if (can_xmc4xxx_num_free_mo < CONFIG_CAN_XMC4XXX_RX_FIFO_ITEMS) { return -ENOMEM; } mo = can_xmc4xxx_get_mo(&mo_index); __ASSERT_NO_MSG(mo != NULL); base_index = mo_index; fifo->base = mo; fifo->tail = mo; XMC_CAN_AllocateMOtoNodeList(can_xmc4xxx_global_reg, CAN_XMC4XXX_REG_TO_NODE_IND(dev_cfg->can), mo_index); /* setup the base object - this controls the filtering for the fifo */ mo->MOCTR = CAN_MO_MOCTR_RESMSGVAL_Msk; mo->MOAMR &= ~(CAN_MO_MOAMR_AM_Msk | CAN_MO_MOAMR_MIDE_Msk); mo->MOAR = 0; if ((filter->flags & CAN_FILTER_IDE) != 0) { mo->MOAMR |= FIELD_PREP(CAN_MO_MOAMR_AM_Msk, filter->mask) | CAN_MO_MOAMR_MIDE_Msk; mo->MOAR |= FIELD_PREP(CAN_MO_MOAR_ID_Msk, filter->id) | CAN_MO_MOAR_IDE_Msk; } else { mo->MOAMR |= FIELD_PREP(XMC_CAN_MO_MOAR_STDID_Msk, filter->mask); mo->MOAR |= FIELD_PREP(XMC_CAN_MO_MOAR_STDID_Msk, filter->id); } mo->MOFCR = FIELD_PREP(CAN_MO_MOFCR_MMC_Msk, 1) | CAN_MO_MOFCR_RXIE_Msk; if (is_rtr) { mo->MOFCR |= CAN_MO_MOFCR_RMM_Msk; mo->MOCTR = CAN_MO_MOCTR_SETDIR_Msk; } else { mo->MOCTR = CAN_MO_MOCTR_RESDIR_Msk; } /* Writing to MOCTR sets or resets message object properties */ mo->MOCTR = CAN_MO_MOCTR_RESTXEN0_Msk | CAN_MO_MOCTR_RESTXEN1_Msk | CAN_MO_MOCTR_SETMSGVAL_Msk | CAN_MO_MOCTR_SETRXEN_Msk | CAN_MO_MOCTR_RESRTSEL_Msk; mo->MOIPR = FIELD_PREP(CAN_MO_MOIPR_RXINP_Msk, dev_cfg->service_request); /* setup the remaining message objects in the fifo */ for (int i = 1; i < CONFIG_CAN_XMC4XXX_RX_FIFO_ITEMS; i++) { mo = can_xmc4xxx_get_mo(&mo_index); __ASSERT_NO_MSG(mo != NULL); XMC_CAN_AllocateMOtoNodeList(can_xmc4xxx_global_reg, CAN_XMC4XXX_REG_TO_NODE_IND(dev_cfg->can), mo_index); mo->MOCTR = CAN_MO_MOCTR_RESMSGVAL_Msk; mo->MOCTR = CAN_MO_MOCTR_SETMSGVAL_Msk | CAN_MO_MOCTR_RESRXEN_Msk; /* all the other message objects in the fifo must point to the base object */ mo->MOFGPR = FIELD_PREP(CAN_MO_MOFGPR_CUR_Msk, base_index); } reg = 0; reg |= FIELD_PREP(CAN_MO_MOFGPR_CUR_Msk, base_index); reg |= FIELD_PREP(CAN_MO_MOFGPR_TOP_Msk, mo_index); reg |= FIELD_PREP(CAN_MO_MOFGPR_BOT_Msk, base_index); reg |= FIELD_PREP(CAN_MO_MOFGPR_SEL_Msk, base_index); fifo->base->MOFGPR = reg; fifo->top = mo; return 0; } static int can_xmc4xxx_add_rx_filter(const struct device *dev, can_rx_callback_t callback, void *user_data, const struct can_filter *filter) { struct can_xmc4xxx_data *dev_data = dev->data; int filter_idx; if ((filter->flags & ~CAN_FILTER_IDE) != 0) { LOG_ERR("Unsupported CAN filter flags 0x%02x", filter->flags); return -ENOTSUP; } k_mutex_lock(&dev_data->mutex, K_FOREVER); for (filter_idx = 0; filter_idx < CONFIG_CAN_MAX_FILTER; filter_idx++) { if ((BIT(filter_idx) & dev_data->filter_usage) == 0) { break; } } if (filter_idx >= CONFIG_CAN_MAX_FILTER) { filter_idx = -ENOSPC; } else { unsigned int key = irq_lock(); int ret; ret = can_xmc4xxx_init_fifo(dev, filter, &dev_data->rx_fifos[filter_idx], false); if (ret < 0) { irq_unlock(key); k_mutex_unlock(&dev_data->mutex); return ret; } #if defined(CONFIG_CAN_ACCEPT_RTR) ret = can_xmc4xxx_init_fifo(dev, filter, &dev_data->rtr_fifos[filter_idx], true); if (ret < 0) { can_xmc4xxx_deinit_fifo(dev, &dev_data->rx_fifos[filter_idx]); irq_unlock(key); k_mutex_unlock(&dev_data->mutex); return ret; } #endif dev_data->filter_usage |= BIT(filter_idx); dev_data->rx_callbacks[filter_idx].function = callback; dev_data->rx_callbacks[filter_idx].user_data = user_data; irq_unlock(key); } k_mutex_unlock(&dev_data->mutex); return filter_idx; } static void can_xmc4xxx_remove_rx_filter(const struct device *dev, int filter_idx) { struct can_xmc4xxx_data *dev_data = dev->data; unsigned int key; if (filter_idx < 0 || filter_idx >= CONFIG_CAN_MAX_FILTER) { LOG_ERR("Filter ID %d out of bounds", filter_idx); return; } k_mutex_lock(&dev_data->mutex, K_FOREVER); if ((dev_data->filter_usage & BIT(filter_idx)) == 0) { k_mutex_unlock(&dev_data->mutex); return; } key = irq_lock(); can_xmc4xxx_deinit_fifo(dev, &dev_data->rx_fifos[filter_idx]); #if defined(CONFIG_CAN_ACCEPT_RTR) can_xmc4xxx_deinit_fifo(dev, &dev_data->rtr_fifos[filter_idx]); #endif dev_data->filter_usage &= ~BIT(filter_idx); dev_data->rx_callbacks[filter_idx].function = NULL; dev_data->rx_callbacks[filter_idx].user_data = NULL; irq_unlock(key); k_mutex_unlock(&dev_data->mutex); } static void can_xmc4xxx_set_state_change_callback(const struct device *dev, can_state_change_callback_t cb, void *user_data) { struct can_xmc4xxx_data *dev_data = dev->data; unsigned int key; key = irq_lock(); /* critical section so that state_change_cb and state_change_cb_data are consistent */ dev_data->common.state_change_cb = cb; dev_data->common.state_change_cb_user_data = user_data; irq_unlock(key); } static void can_xmc4xxx_get_state_from_status(const struct device *dev, enum can_state *state, struct can_bus_err_cnt *err_cnt, uint32_t *status) { struct can_xmc4xxx_data *dev_data = dev->data; const struct can_xmc4xxx_config *dev_cfg = dev->config; uint8_t tec = XMC_CAN_NODE_GetTransmitErrorCounter(dev_cfg->can); uint8_t rec = XMC_CAN_NODE_GetTransmitErrorCounter(dev_cfg->can); if (err_cnt != NULL) { err_cnt->tx_err_cnt = tec; err_cnt->rx_err_cnt = rec; } if (state == NULL) { return; } if (!dev_data->common.started) { *state = CAN_STATE_STOPPED; return; } if ((*status & XMC_CAN_NODE_STATUS_BUS_OFF) != 0) { *state = CAN_STATE_BUS_OFF; } else if (tec >= 128 || rec >= 128) { *state = CAN_STATE_ERROR_PASSIVE; } else if ((*status & XMC_CAN_NODE_STATUS_ERROR_WARNING_STATUS) != 0) { *state = CAN_STATE_ERROR_WARNING; } else { *state = CAN_STATE_ERROR_ACTIVE; } } static int can_xmc4xxx_get_state(const struct device *dev, enum can_state *state, struct can_bus_err_cnt *err_cnt) { const struct can_xmc4xxx_config *dev_cfg = dev->config; uint32_t status; status = XMC_CAN_NODE_GetStatus(dev_cfg->can); can_xmc4xxx_get_state_from_status(dev, state, err_cnt, &status); return 0; } static int can_xmc4xxx_get_core_clock(const struct device *dev, uint32_t *rate) { const struct can_xmc4xxx_config *dev_cfg = dev->config; *rate = can_xmc4xxx_clock_frequency; if (dev_cfg->clock_div8) { *rate /= 8; } return 0; } static int can_xmc4xxx_get_max_filters(const struct device *dev, bool ide) { ARG_UNUSED(ide); return CONFIG_CAN_MAX_FILTER; } static void can_xmc4xxx_reset_tx_fifos(const struct device *dev, int status) { struct can_xmc4xxx_data *dev_data = dev->data; struct can_xmc4xxx_tx_callback *tx_callbacks = &dev_data->tx_callbacks[0]; LOG_DBG("All Tx message objects reset"); for (int i = 0; i < CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE; i++) { can_tx_callback_t callback; void *user_data; callback = tx_callbacks[i].function; user_data = tx_callbacks[i].user_data; tx_callbacks[i].function = NULL; if (callback) { dev_data->tx_mo[i]->MOCTR = CAN_MO_MOCTR_RESMSGVAL_Msk; callback(dev, status, user_data); k_sem_give(&dev_data->tx_sem); } } } static void can_xmc4xxx_tx_handler(const struct device *dev) { struct can_xmc4xxx_data *dev_data = dev->data; struct can_xmc4xxx_tx_callback *tx_callbacks = &dev_data->tx_callbacks[0]; for (int i = 0; i < CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE; i++) { CAN_MO_TypeDef *mo = dev_data->tx_mo[i]; if ((mo->MOSTAT & XMC_CAN_MO_STATUS_TX_PENDING) != 0) { can_tx_callback_t callback; void *user_data; mo->MOCTR = XMC_CAN_MO_RESET_STATUS_TX_PENDING; callback = tx_callbacks[i].function; user_data = tx_callbacks[i].user_data; tx_callbacks[i].function = NULL; if (callback) { callback(dev, 0, user_data); k_sem_give(&dev_data->tx_sem); } } } } static inline void can_xmc4xxx_increment_fifo_tail(struct can_xmc4xxx_rx_fifo *fifo) { uint8_t next_index; if ((uint32_t)fifo->tail == (uint32_t)fifo->top) { fifo->tail = fifo->base; return; } next_index = FIELD_GET(CAN_MO_MOSTAT_PNEXT_Msk, fifo->tail->MOSTAT); fifo->tail = &CAN_MO->MO[next_index]; } static inline bool can_xmc4xxx_is_fifo_empty(struct can_xmc4xxx_rx_fifo *fifo) { if (fifo->tail->MOSTAT & XMC_CAN_MO_STATUS_RX_PENDING) { return false; } return true; } static inline void can_xmc4xxx_update_fifo_head(struct can_xmc4xxx_rx_fifo *fifo) { uint32_t reg = fifo->base->MOFGPR; uint8_t top_index, bot_index, cur_index; uint8_t head_index = FIELD_GET(CAN_MO_MOFGPR_CUR_Msk, reg); fifo->head = &CAN_MO->MO[head_index]; top_index = FIELD_GET(CAN_MO_MOFGPR_TOP_Msk, reg); bot_index = FIELD_GET(CAN_MO_MOFGPR_BOT_Msk, reg); cur_index = FIELD_GET(CAN_MO_MOFGPR_CUR_Msk, reg); LOG_DBG("Fifo: top %d, bot %d, cur %d", top_index, bot_index, cur_index); } static void can_xmc4xxx_rx_fifo_handler(const struct device *dev, struct can_xmc4xxx_rx_fifo *fifo, struct can_xmc4xxx_rx_callback *rx_callback) { bool is_rtr = (fifo->base->MOSTAT & CAN_MO_MOSTAT_DIR_Msk) != 0; while (!can_xmc4xxx_is_fifo_empty(fifo)) { struct can_frame frame; CAN_MO_TypeDef *mo_tail = fifo->tail; memset(&frame, 0, sizeof(frame)); if ((mo_tail->MOAR & CAN_MO_MOAR_IDE_Msk) != 0) { frame.flags |= CAN_FRAME_IDE; frame.id = FIELD_GET(CAN_MO_MOAR_ID_Msk, mo_tail->MOAR); } else { frame.id = FIELD_GET(XMC_CAN_MO_MOAR_STDID_Msk, mo_tail->MOAR); } frame.dlc = FIELD_GET(CAN_MO_MOFCR_DLC_Msk, mo_tail->MOFCR); if (!is_rtr) { memcpy(&frame.data[0], (void *)&mo_tail->MODATAL, sizeof(uint32_t)); memcpy(&frame.data[4], (void *)&mo_tail->MODATAH, sizeof(uint32_t)); } else { frame.flags |= CAN_FRAME_RTR; memset(&frame.data[0], 0, CAN_MAX_DLEN); } if (rx_callback->function != NULL) { rx_callback->function(dev, &frame, rx_callback->user_data); } /* reset the rx pending bit on the tail */ mo_tail->MOCTR = XMC_CAN_MO_RESET_STATUS_RX_PENDING; can_xmc4xxx_increment_fifo_tail(fifo); } } static void can_xmc4xxx_rx_handler(const struct device *dev) { struct can_xmc4xxx_data *dev_data = dev->data; for (int i = 0; i < CONFIG_CAN_MAX_FILTER; i++) { if ((BIT(i) & dev_data->filter_usage) == 0) { continue; } can_xmc4xxx_update_fifo_head(&dev_data->rx_fifos[i]); can_xmc4xxx_rx_fifo_handler(dev, &dev_data->rx_fifos[i], &dev_data->rx_callbacks[i]); #if defined(CONFIG_CAN_ACCEPT_RTR) can_xmc4xxx_update_fifo_head(&dev_data->rtr_fifos[i]); can_xmc4xxx_rx_fifo_handler(dev, &dev_data->rtr_fifos[i], &dev_data->rx_callbacks[i]); #endif } } static void can_xmc4xxx_state_change_handler(const struct device *dev, uint32_t status) { const struct can_xmc4xxx_config *dev_cfg = dev->config; struct can_xmc4xxx_data *dev_data = dev->data; enum can_state new_state; struct can_bus_err_cnt err_cnt; can_xmc4xxx_get_state_from_status(dev, &new_state, &err_cnt, &status); if (dev_data->state != new_state) { if (dev_data->common.state_change_cb) { dev_data->common.state_change_cb( dev, new_state, err_cnt, dev_data->common.state_change_cb_user_data); } if (dev_data->state != CAN_STATE_STOPPED && new_state == CAN_STATE_BUS_OFF) { /* re-enable the node after auto bus-off recovery completes */ XMC_CAN_NODE_ResetInitBit(dev_cfg->can); } dev_data->state = new_state; if (dev_data->state == CAN_STATE_BUS_OFF) { can_xmc4xxx_reset_tx_fifos(dev, -ENETDOWN); } } } static void can_xmc4xxx_isr(const struct device *dev) { const struct can_xmc4xxx_config *dev_cfg = dev->config; uint32_t status; status = XMC_CAN_NODE_GetStatus(dev_cfg->can); XMC_CAN_NODE_ClearStatus(dev_cfg->can, status); if ((status & XMC_CAN_NODE_STATUS_TX_OK) != 0) { can_xmc4xxx_tx_handler(dev); } if ((status & XMC_CAN_NODE_STATUS_RX_OK) != 0) { can_xmc4xxx_rx_handler(dev); } if ((status & XMC_CAN_NODE_STATUS_ALERT_WARNING) != 0) { /* change of bit NSRx.BOFF */ /* change of bit NSRx.EWRN */ can_xmc4xxx_state_change_handler(dev, status); } } static int can_xmc4xxx_get_capabilities(const struct device *dev, can_mode_t *cap) { ARG_UNUSED(dev); *cap = CAN_MODE_NORMAL | CAN_MODE_LISTENONLY; return 0; } static int can_xmc4xxx_start(const struct device *dev) { struct can_xmc4xxx_data *dev_data = dev->data; const struct can_xmc4xxx_config *dev_cfg = dev->config; int ret = 0; unsigned int key; if (dev_data->common.started) { return -EALREADY; } key = irq_lock(); can_xmc4xxx_reset_tx_fifos(dev, -ENETDOWN); irq_unlock(key); if (dev_cfg->common.phy != NULL) { ret = can_transceiver_enable(dev_cfg->common.phy, dev_data->common.mode); if (ret < 0) { LOG_ERR("Failed to enable CAN transceiver [%d]", ret); return ret; } } k_mutex_lock(&dev_data->mutex, K_FOREVER); XMC_CAN_NODE_DisableConfigurationChange(dev_cfg->can); dev_data->common.started = true; XMC_CAN_NODE_ResetInitBit(dev_cfg->can); k_mutex_unlock(&dev_data->mutex); return ret; } static int can_xmc4xxx_stop(const struct device *dev) { struct can_xmc4xxx_data *dev_data = dev->data; const struct can_xmc4xxx_config *dev_cfg = dev->config; int ret = 0; unsigned int key; if (!dev_data->common.started) { return -EALREADY; } key = irq_lock(); XMC_CAN_NODE_SetInitBit(dev_cfg->can); XMC_CAN_NODE_EnableConfigurationChange(dev_cfg->can); can_xmc4xxx_reset_tx_fifos(dev, -ENETDOWN); dev_data->common.started = false; irq_unlock(key); if (dev_cfg->common.phy != NULL) { ret = can_transceiver_disable(dev_cfg->common.phy); if (ret < 0) { LOG_ERR("Failed to disable CAN transceiver [%d]", ret); return ret; } } return 0; } static int can_xmc4xxx_init(const struct device *dev) { struct can_xmc4xxx_data *dev_data = dev->data; const struct can_xmc4xxx_config *dev_cfg = dev->config; int ret; struct can_timing timing = {0}; CAN_MO_TypeDef *mo; uint8_t mo_index = 0; k_sem_init(&dev_data->tx_sem, CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE, CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE); k_mutex_init(&dev_data->mutex); if (!can_xmc4xxx_global_init) { uint32_t fdr_step; uint32_t clk_module; XMC_CAN_Enable(can_xmc4xxx_global_reg); XMC_CAN_SetBaudrateClockSource(can_xmc4xxx_global_reg, XMC_CAN_CANCLKSRC_FPERI); clk_module = XMC_CAN_GetBaudrateClockFrequency(can_xmc4xxx_global_reg); fdr_step = 1024 - CAN_XMC4XXX_CLOCK_PRESCALER; can_xmc4xxx_clock_frequency = clk_module / CAN_XMC4XXX_CLOCK_PRESCALER; LOG_DBG("Clock frequency %dHz\n", can_xmc4xxx_clock_frequency); can_xmc4xxx_global_reg->FDR &= ~(CAN_FDR_DM_Msk | CAN_FDR_STEP_Msk); can_xmc4xxx_global_reg->FDR |= FIELD_PREP(CAN_FDR_DM_Msk, XMC_CAN_DM_NORMAL) | FIELD_PREP(CAN_FDR_STEP_Msk, fdr_step); can_xmc4xxx_global_init = true; } XMC_CAN_NODE_EnableConfigurationChange(dev_cfg->can); XMC_CAN_NODE_SetReceiveInput(dev_cfg->can, dev_cfg->input_src); XMC_CAN_NODE_SetInitBit(dev_cfg->can); XMC_CAN_NODE_SetEventNodePointer(dev_cfg->can, XMC_CAN_NODE_POINTER_EVENT_ALERT, dev_cfg->service_request); XMC_CAN_NODE_SetEventNodePointer(dev_cfg->can, XMC_CAN_NODE_POINTER_EVENT_LEC, dev_cfg->service_request); XMC_CAN_NODE_SetEventNodePointer(dev_cfg->can, XMC_CAN_NODE_POINTER_EVENT_TRANSFER_OK, dev_cfg->service_request); XMC_CAN_NODE_SetEventNodePointer(dev_cfg->can, XMC_CAN_NODE_POINTER_EVENT_FRAME_COUNTER, dev_cfg->service_request); XMC_CAN_NODE_EnableEvent(dev_cfg->can, XMC_CAN_NODE_EVENT_TX_INT | XMC_CAN_NODE_EVENT_ALERT); /* set up tx messages */ for (int i = 0; i < CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE; i++) { mo = can_xmc4xxx_get_mo(&mo_index); if (mo == NULL) { return -ENOMEM; } dev_data->tx_mo[i] = mo; XMC_CAN_AllocateMOtoNodeList(can_xmc4xxx_global_reg, CAN_XMC4XXX_REG_TO_NODE_IND(dev_cfg->can), mo_index); mo->MOIPR = FIELD_PREP(CAN_MO_MOIPR_TXINP_Msk, dev_cfg->service_request); mo->MOFCR = FIELD_PREP(CAN_MO_MOFCR_MMC_Msk, 0) | CAN_MO_MOFCR_TXIE_Msk; } #ifdef CONFIG_CAN_XMC4XXX_INTERNAL_BUS_MODE /* The name of this function is misleading. It doesn't actually enable */ /* loopback on a single node, but connects all CAN devices to an internal bus. */ XMC_CAN_NODE_EnableLoopBack(dev_cfg->can); #endif dev_cfg->irq_config_func(); dev_data->state = CAN_STATE_STOPPED; #ifndef CONFIG_CAN_XMC4XXX_INTERNAL_BUS_MODE ret = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } #endif ret = can_calc_timing(dev, &timing, dev_cfg->common.bitrate, dev_cfg->common.sample_point); if (ret < 0) { return ret; } LOG_DBG("Presc: %d, BS1: %d, BS2: %d", timing.prescaler, timing.phase_seg1, timing.phase_seg2); LOG_DBG("Sample-point err : %d", ret); return can_set_timing(dev, &timing); } static const struct can_driver_api can_xmc4xxx_api_funcs = { .get_capabilities = can_xmc4xxx_get_capabilities, .set_mode = can_xmc4xxx_set_mode, .set_timing = can_xmc4xxx_set_timing, .start = can_xmc4xxx_start, .stop = can_xmc4xxx_stop, .send = can_xmc4xxx_send, .add_rx_filter = can_xmc4xxx_add_rx_filter, .remove_rx_filter = can_xmc4xxx_remove_rx_filter, .get_state = can_xmc4xxx_get_state, .set_state_change_callback = can_xmc4xxx_set_state_change_callback, .get_core_clock = can_xmc4xxx_get_core_clock, .get_max_filters = can_xmc4xxx_get_max_filters, .timing_min = { .sjw = 1, .prop_seg = 0, .phase_seg1 = 3, .phase_seg2 = 2, .prescaler = 1, }, .timing_max = { .sjw = 4, .prop_seg = 0, .phase_seg1 = 16, .phase_seg2 = 8, .prescaler = 64, }, }; #define CAN_XMC4XXX_INIT(inst) \ static void can_xmc4xxx_irq_config_##inst(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), can_xmc4xxx_isr, \ DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQN(inst)); \ } \ \ PINCTRL_DT_INST_DEFINE(inst); \ \ static struct can_xmc4xxx_data can_xmc4xxx_data_##inst; \ static const struct can_xmc4xxx_config can_xmc4xxx_config_##inst = { \ .common = CAN_DT_DRIVER_CONFIG_INST_GET(inst, 0, 1000000), \ .can = (CAN_NODE_TypeDef *)DT_INST_REG_ADDR(inst), \ .clock_div8 = DT_INST_PROP(inst, clock_div8), \ .irq_config_func = can_xmc4xxx_irq_config_##inst, \ .service_request = DT_INST_IRQN(inst) - CAN_XMC4XXX_IRQ_MIN, \ .input_src = DT_INST_ENUM_IDX(inst, input_src), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ }; \ \ CAN_DEVICE_DT_INST_DEFINE(inst, can_xmc4xxx_init, NULL, &can_xmc4xxx_data_##inst, \ &can_xmc4xxx_config_##inst, POST_KERNEL, \ CONFIG_CAN_INIT_PRIORITY, &can_xmc4xxx_api_funcs); DT_INST_FOREACH_STATUS_OKAY(CAN_XMC4XXX_INIT) ```
/content/code_sandbox/drivers/can/can_xmc4xxx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,025
```unknown config CAN_NRF bool "nRF CAN driver" default y depends on DT_HAS_NORDIC_NRF_CAN_ENABLED select CAN_MCAN select CLOCK_CONTROL help Driver for nRF CAN. ```
/content/code_sandbox/drivers/can/Kconfig.nrf
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
48
```unknown # Espressif ESP32 TWAI configuration options config CAN_ESP32_TWAI bool "Espressif TWAI CAN driver" default y depends on DT_HAS_ESPRESSIF_ESP32_TWAI_ENABLED select CAN_SJA1000 help This enables support for the Espressif Two-Wire Automotive Interface (TWAI) CAN driver. ```
/content/code_sandbox/drivers/can/Kconfig.esp32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
80
```c /* * */ #include <zephyr/internal/syscall_handler.h> #include <zephyr/drivers/can.h> static int z_vrfy_can_calc_timing(const struct device *dev, struct can_timing *res, uint32_t bitrate, uint16_t sample_pnt) { struct can_timing res_copy; int err; K_OOPS(K_SYSCALL_DRIVER_CAN(dev, get_core_clock)); K_OOPS(k_usermode_from_copy(&res_copy, res, sizeof(res_copy))); err = z_impl_can_calc_timing(dev, &res_copy, bitrate, sample_pnt); K_OOPS(k_usermode_to_copy(res, &res_copy, sizeof(*res))); return err; } #include <zephyr/syscalls/can_calc_timing_mrsh.c> static inline int z_vrfy_can_set_timing(const struct device *dev, const struct can_timing *timing) { struct can_timing timing_copy; K_OOPS(K_SYSCALL_DRIVER_CAN(dev, set_timing)); K_OOPS(k_usermode_from_copy(&timing_copy, timing, sizeof(timing_copy))); return z_impl_can_set_timing(dev, &timing_copy); } #include <zephyr/syscalls/can_set_timing_mrsh.c> static inline int z_vrfy_can_get_core_clock(const struct device *dev, uint32_t *rate) { K_OOPS(K_SYSCALL_DRIVER_CAN(dev, get_core_clock)); K_OOPS(K_SYSCALL_MEMORY_WRITE(rate, sizeof(*rate))); return z_impl_can_get_core_clock(dev, rate); } #include <zephyr/syscalls/can_get_core_clock_mrsh.c> static inline uint32_t z_vrfy_can_get_bitrate_min(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_get_bitrate_min(dev); } #include <zephyr/syscalls/can_get_bitrate_min_mrsh.c> static inline uint32_t z_vrfy_can_get_bitrate_max(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_get_bitrate_max(dev); } #include <zephyr/syscalls/can_get_bitrate_max_mrsh.c> static inline const struct can_timing *z_vrfy_can_get_timing_min(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_get_timing_min(dev); } #include <zephyr/syscalls/can_get_timing_min_mrsh.c> static inline const struct can_timing *z_vrfy_can_get_timing_max(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_get_timing_max(dev); } #include <zephyr/syscalls/can_get_timing_max_mrsh.c> #ifdef CONFIG_CAN_FD_MODE static int z_vrfy_can_calc_timing_data(const struct device *dev, struct can_timing *res, uint32_t bitrate, uint16_t sample_pnt) { struct can_timing res_copy; int err; K_OOPS(K_SYSCALL_DRIVER_CAN(dev, get_core_clock)); K_OOPS(k_usermode_from_copy(&res_copy, res, sizeof(res_copy))); err = z_impl_can_calc_timing_data(dev, &res_copy, bitrate, sample_pnt); K_OOPS(k_usermode_to_copy(res, &res_copy, sizeof(*res))); return err; } #include <zephyr/syscalls/can_calc_timing_data_mrsh.c> static inline const struct can_timing *z_vrfy_can_get_timing_data_min(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_get_timing_data_min(dev); } #include <zephyr/syscalls/can_get_timing_data_min_mrsh.c> static inline const struct can_timing *z_vrfy_can_get_timing_data_max(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_get_timing_data_max(dev); } #include <zephyr/syscalls/can_get_timing_data_max_mrsh.c> static inline int z_vrfy_can_set_timing_data(const struct device *dev, const struct can_timing *timing_data) { struct can_timing timing_data_copy; K_OOPS(K_SYSCALL_DRIVER_CAN(dev, set_timing_data)); K_OOPS(k_usermode_from_copy(&timing_data_copy, timing_data, sizeof(timing_data_copy))); return z_impl_can_set_timing_data(dev, &timing_data_copy); } #include <zephyr/syscalls/can_set_timing_data_mrsh.c> static inline int z_vrfy_can_set_bitrate_data(const struct device *dev, uint32_t bitrate_data) { K_OOPS(K_SYSCALL_DRIVER_CAN(dev, set_timing_data)); return z_impl_can_set_bitrate_data(dev, bitrate_data); } #include <zephyr/syscalls/can_set_bitrate_data_mrsh.c> #endif /* CONFIG_CAN_FD_MODE */ static inline int z_vrfy_can_get_max_filters(const struct device *dev, bool ide) { /* Optional API function */ K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_get_max_filters(dev, ide); } #include <zephyr/syscalls/can_get_max_filters_mrsh.c> static inline int z_vrfy_can_get_capabilities(const struct device *dev, can_mode_t *cap) { K_OOPS(K_SYSCALL_DRIVER_CAN(dev, get_capabilities)); K_OOPS(K_SYSCALL_MEMORY_WRITE(cap, sizeof(*cap))); return z_impl_can_get_capabilities(dev, cap); } #include <zephyr/syscalls/can_get_capabilities_mrsh.c> static inline const struct device *z_vrfy_can_get_transceiver(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_get_transceiver(dev); } #include <zephyr/syscalls/can_get_transceiver_mrsh.c> static inline int z_vrfy_can_start(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_CAN(dev, start)); return z_impl_can_start(dev); } #include <zephyr/syscalls/can_start_mrsh.c> static inline int z_vrfy_can_stop(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_CAN(dev, stop)); return z_impl_can_stop(dev); } #include <zephyr/syscalls/can_stop_mrsh.c> static inline int z_vrfy_can_set_mode(const struct device *dev, can_mode_t mode) { K_OOPS(K_SYSCALL_DRIVER_CAN(dev, set_mode)); return z_impl_can_set_mode(dev, mode); } #include <zephyr/syscalls/can_set_mode_mrsh.c> static inline can_mode_t z_vrfy_can_get_mode(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_get_mode(dev); } #include <zephyr/syscalls/can_get_mode_mrsh.c> static inline int z_vrfy_can_set_bitrate(const struct device *dev, uint32_t bitrate) { K_OOPS(K_SYSCALL_DRIVER_CAN(dev, set_timing)); return z_impl_can_set_bitrate(dev, bitrate); } #include <zephyr/syscalls/can_set_bitrate_mrsh.c> static inline int z_vrfy_can_send(const struct device *dev, const struct can_frame *frame, k_timeout_t timeout, can_tx_callback_t callback, void *user_data) { struct can_frame frame_copy; K_OOPS(K_SYSCALL_DRIVER_CAN(dev, send)); K_OOPS(k_usermode_from_copy(&frame_copy, frame, sizeof(frame_copy))); K_OOPS(K_SYSCALL_VERIFY_MSG(callback == NULL, "callbacks may not be set from user mode")); return z_impl_can_send(dev, &frame_copy, timeout, callback, user_data); } #include <zephyr/syscalls/can_send_mrsh.c> static inline int z_vrfy_can_add_rx_filter_msgq(const struct device *dev, struct k_msgq *msgq, const struct can_filter *filter) { struct can_filter filter_copy; K_OOPS(K_SYSCALL_DRIVER_CAN(dev, add_rx_filter)); K_OOPS(K_SYSCALL_OBJ(msgq, K_OBJ_MSGQ)); K_OOPS(k_usermode_from_copy(&filter_copy, filter, sizeof(filter_copy))); return z_impl_can_add_rx_filter_msgq(dev, msgq, &filter_copy); } #include <zephyr/syscalls/can_add_rx_filter_msgq_mrsh.c> static inline void z_vrfy_can_remove_rx_filter(const struct device *dev, int filter_id) { K_OOPS(K_SYSCALL_DRIVER_CAN(dev, remove_rx_filter)); z_impl_can_remove_rx_filter(dev, filter_id); } #include <zephyr/syscalls/can_remove_rx_filter_mrsh.c> static inline int z_vrfy_can_get_state(const struct device *dev, enum can_state *state, struct can_bus_err_cnt *err_cnt) { K_OOPS(K_SYSCALL_DRIVER_CAN(dev, get_state)); if (state != NULL) { K_OOPS(K_SYSCALL_MEMORY_WRITE(state, sizeof(*state))); } if (err_cnt != NULL) { K_OOPS(K_SYSCALL_MEMORY_WRITE(err_cnt, sizeof(*err_cnt))); } return z_impl_can_get_state(dev, state, err_cnt); } #include <zephyr/syscalls/can_get_state_mrsh.c> #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE static inline int z_vrfy_can_recover(const struct device *dev, k_timeout_t timeout) { /* Optional API function */ K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_recover(dev, timeout); } #include <zephyr/syscalls/can_recover_mrsh.c> #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ #ifdef CONFIG_CAN_STATS static inline uint32_t z_vrfy_can_stats_get_bit_errors(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_stats_get_bit_errors(dev); } #include <zephyr/syscalls/can_stats_get_bit_errors_mrsh.c> static inline uint32_t z_vrfy_can_stats_get_bit0_errors(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_stats_get_bit0_errors(dev); } #include <zephyr/syscalls/can_stats_get_bit0_errors_mrsh.c> static inline uint32_t z_vrfy_can_stats_get_bit1_errors(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_stats_get_bit1_errors(dev); } #include <zephyr/syscalls/can_stats_get_bit1_errors_mrsh.c> static inline uint32_t z_vrfy_can_stats_get_stuff_errors(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_stats_get_stuff_errors(dev); } #include <zephyr/syscalls/can_stats_get_stuff_errors_mrsh.c> static inline uint32_t z_vrfy_can_stats_get_crc_errors(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_stats_get_crc_errors(dev); } #include <zephyr/syscalls/can_stats_get_crc_errors_mrsh.c> static inline uint32_t z_vrfy_can_stats_get_form_errors(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_stats_get_form_errors(dev); } #include <zephyr/syscalls/can_stats_get_form_errors_mrsh.c> static inline uint32_t z_vrfy_can_stats_get_ack_errors(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_stats_get_ack_errors(dev); } #include <zephyr/syscalls/can_stats_get_ack_errors_mrsh.c> static inline uint32_t z_vrfy_can_stats_get_rx_overruns(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_CAN)); return z_impl_can_stats_get_rx_overruns(dev); } #include <zephyr/syscalls/can_stats_get_rx_overruns_mrsh.c> #endif /* CONFIG_CAN_STATS */ ```
/content/code_sandbox/drivers/can/can_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,579
```c /* * */ #include <zephyr/device.h> #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/can_mcan.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <zephyr/drivers/reset.h> LOG_MODULE_REGISTER(can_mcux_mcan, CONFIG_CAN_LOG_LEVEL); #define DT_DRV_COMPAT nxp_lpc_mcan /* Message RAM Base Address register */ #define MCUX_MCAN_MRBA 0x200 #define MCUX_MCAN_MRBA_BA GENMASK(31, 16) struct mcux_mcan_config { mm_reg_t base; mem_addr_t mram; const struct device *clock_dev; clock_control_subsys_t clock_subsys; void (*irq_config_func)(const struct device *dev); const struct pinctrl_dev_config *pincfg; const struct reset_dt_spec reset; }; static int mcux_mcan_read_reg(const struct device *dev, uint16_t reg, uint32_t *val) { const struct can_mcan_config *mcan_config = dev->config; const struct mcux_mcan_config *mcux_config = mcan_config->custom; return can_mcan_sys_read_reg(mcux_config->base, reg, val); } static int mcux_mcan_write_reg(const struct device *dev, uint16_t reg, uint32_t val) { const struct can_mcan_config *mcan_config = dev->config; const struct mcux_mcan_config *mcux_config = mcan_config->custom; return can_mcan_sys_write_reg(mcux_config->base, reg, val); } static int mcux_mcan_read_mram(const struct device *dev, uint16_t offset, void *dst, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct mcux_mcan_config *mcux_config = mcan_config->custom; return can_mcan_sys_read_mram(mcux_config->mram, offset, dst, len); } static int mcux_mcan_write_mram(const struct device *dev, uint16_t offset, const void *src, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct mcux_mcan_config *mcux_config = mcan_config->custom; return can_mcan_sys_write_mram(mcux_config->mram, offset, src, len); } static int mcux_mcan_clear_mram(const struct device *dev, uint16_t offset, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct mcux_mcan_config *mcux_config = mcan_config->custom; return can_mcan_sys_clear_mram(mcux_config->mram, offset, len); } static int mcux_mcan_get_core_clock(const struct device *dev, uint32_t *rate) { const struct can_mcan_config *mcan_config = dev->config; const struct mcux_mcan_config *mcux_config = mcan_config->custom; return clock_control_get_rate(mcux_config->clock_dev, mcux_config->clock_subsys, rate); } static int mcux_mcan_init(const struct device *dev) { const struct can_mcan_config *mcan_config = dev->config; const struct mcux_mcan_config *mcux_config = mcan_config->custom; const uintptr_t mrba = mcux_config->mram & MCUX_MCAN_MRBA_BA; int err; if (!device_is_ready(mcux_config->clock_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } if (!device_is_ready(mcux_config->reset.dev)) { LOG_ERR("Reset device not ready"); return -ENODEV; } err = reset_line_toggle(mcux_config->reset.dev, mcux_config->reset.id); if (err) { return err; } err = pinctrl_apply_state(mcux_config->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } err = clock_control_on(mcux_config->clock_dev, mcux_config->clock_subsys); if (err) { LOG_ERR("failed to enable clock (err %d)", err); return -EINVAL; } err = can_mcan_write_reg(dev, MCUX_MCAN_MRBA, (uint32_t)mrba); if (err != 0) { return -EIO; } err = can_mcan_configure_mram(dev, mrba, mcux_config->mram); if (err != 0) { return -EIO; } err = can_mcan_init(dev); if (err) { LOG_ERR("failed to initialize mcan (err %d)", err); return err; } mcux_config->irq_config_func(dev); return 0; } static const struct can_driver_api mcux_mcan_driver_api = { .get_capabilities = can_mcan_get_capabilities, .start = can_mcan_start, .stop = can_mcan_stop, .set_mode = can_mcan_set_mode, .set_timing = can_mcan_set_timing, .send = can_mcan_send, .add_rx_filter = can_mcan_add_rx_filter, .remove_rx_filter = can_mcan_remove_rx_filter, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = can_mcan_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .get_state = can_mcan_get_state, .set_state_change_callback = can_mcan_set_state_change_callback, .get_core_clock = mcux_mcan_get_core_clock, .get_max_filters = can_mcan_get_max_filters, /* * MCUX MCAN timing limits are specified in the "Nominal bit timing and * prescaler register (NBTP)" table in the SoC reference manual. * * Note that the values here are the "physical" timing limits, whereas * the register field limits are physical values minus 1 (which is * handled by the register assignments in the common MCAN driver code). * * Beware that at least some SoC reference manuals contain a bug * regarding the minimum values for nominal phase segments. Valid * register values are 1 and up. */ .timing_min = CAN_MCAN_TIMING_MIN_INITIALIZER, .timing_max = CAN_MCAN_TIMING_MAX_INITIALIZER, #ifdef CONFIG_CAN_FD_MODE .set_timing_data = can_mcan_set_timing_data, /* * MCUX MCAN data timing limits are specified in the "Data bit timing * and prescaler register (DBTP)" table in the SoC reference manual. * * Note that the values here are the "physical" timing limits, whereas * the register field limits are physical values minus 1 (which is * handled by the register assignments in the common MCAN driver code). * * Beware that at least some SoC reference manuals contain a bug * regarding the maximum value for data phase segment 2. Valid register * values are 0 to 31. */ .timing_data_min = CAN_MCAN_TIMING_DATA_MIN_INITIALIZER, .timing_data_max = CAN_MCAN_TIMING_DATA_MAX_INITIALIZER, #endif /* CONFIG_CAN_FD_MODE */ }; static const struct can_mcan_ops mcux_mcan_ops = { .read_reg = mcux_mcan_read_reg, .write_reg = mcux_mcan_write_reg, .read_mram = mcux_mcan_read_mram, .write_mram = mcux_mcan_write_mram, .clear_mram = mcux_mcan_clear_mram, }; #define MCUX_MCAN_INIT(n) \ CAN_MCAN_DT_INST_BUILD_ASSERT_MRAM_CFG(n); \ PINCTRL_DT_INST_DEFINE(n); \ \ static void mcux_mcan_irq_config_##n(const struct device *dev); \ \ CAN_MCAN_DT_INST_CALLBACKS_DEFINE(n, mcux_mcan_cbs_##n); \ CAN_MCAN_DT_INST_MRAM_DEFINE(n, mcux_mcan_mram_##n); \ \ static const struct mcux_mcan_config mcux_mcan_config_##n = { \ .base = CAN_MCAN_DT_INST_MCAN_ADDR(n), \ .mram = (mem_addr_t)POINTER_TO_UINT(&mcux_mcan_mram_##n), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = (clock_control_subsys_t) \ DT_INST_CLOCKS_CELL(n, name), \ .irq_config_func = mcux_mcan_irq_config_##n, \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .reset = RESET_DT_SPEC_INST_GET(n), \ }; \ \ static const struct can_mcan_config can_mcan_config_##n = \ CAN_MCAN_DT_CONFIG_INST_GET(n, &mcux_mcan_config_##n, \ &mcux_mcan_ops, \ &mcux_mcan_cbs_##n); \ \ static struct can_mcan_data can_mcan_data_##n = \ CAN_MCAN_DATA_INITIALIZER(NULL); \ \ CAN_DEVICE_DT_INST_DEFINE(n, mcux_mcan_init, NULL, \ &can_mcan_data_##n, \ &can_mcan_config_##n, \ POST_KERNEL, \ CONFIG_CAN_INIT_PRIORITY, \ &mcux_mcan_driver_api); \ \ static void mcux_mcan_irq_config_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(n, int0, irq), \ DT_INST_IRQ_BY_NAME(n, int0, priority), \ can_mcan_line_0_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQ_BY_NAME(n, int0, irq)); \ \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(n, int1, irq), \ DT_INST_IRQ_BY_NAME(n, int1, priority), \ can_mcan_line_1_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQ_BY_NAME(n, int1, irq)); \ } DT_INST_FOREACH_STATUS_OKAY(MCUX_MCAN_INIT) ```
/content/code_sandbox/drivers/can/can_mcux_mcan.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,297
```c /* * */ #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/can_mcan.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control/atmel_sam_pmc.h> #include <soc.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(can_sam, CONFIG_CAN_LOG_LEVEL); #define DT_DRV_COMPAT atmel_sam_can struct can_sam_config { mm_reg_t base; mem_addr_t mram; void (*config_irq)(void); const struct atmel_sam_pmc_config clock_cfg; const struct pinctrl_dev_config *pcfg; int divider; mm_reg_t dma_base; }; static int can_sam_read_reg(const struct device *dev, uint16_t reg, uint32_t *val) { const struct can_mcan_config *mcan_config = dev->config; const struct can_sam_config *sam_config = mcan_config->custom; return can_mcan_sys_read_reg(sam_config->base, reg, val); } static int can_sam_write_reg(const struct device *dev, uint16_t reg, uint32_t val) { const struct can_mcan_config *mcan_config = dev->config; const struct can_sam_config *sam_config = mcan_config->custom; return can_mcan_sys_write_reg(sam_config->base, reg, val); } static int can_sam_read_mram(const struct device *dev, uint16_t offset, void *dst, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct can_sam_config *sam_config = mcan_config->custom; return can_mcan_sys_read_mram(sam_config->mram, offset, dst, len); } static int can_sam_write_mram(const struct device *dev, uint16_t offset, const void *src, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct can_sam_config *sam_config = mcan_config->custom; return can_mcan_sys_write_mram(sam_config->mram, offset, src, len); } static int can_sam_clear_mram(const struct device *dev, uint16_t offset, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct can_sam_config *sam_config = mcan_config->custom; return can_mcan_sys_clear_mram(sam_config->mram, offset, len); } static int can_sam_get_core_clock(const struct device *dev, uint32_t *rate) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_sam_config *sam_cfg = mcan_cfg->custom; *rate = SOC_ATMEL_SAM_UPLLCK_FREQ_HZ / (sam_cfg->divider); return 0; } static void can_sam_clock_enable(const struct can_sam_config *sam_cfg) { REG_PMC_PCK5 = PMC_PCK_CSS_UPLL_CLK | PMC_PCK_PRES(sam_cfg->divider - 1); PMC->PMC_SCER |= PMC_SCER_PCK5; /* Enable CAN clock in PMC */ (void)clock_control_on(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&sam_cfg->clock_cfg); } static int can_sam_init(const struct device *dev) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_sam_config *sam_cfg = mcan_cfg->custom; int ret; can_sam_clock_enable(sam_cfg); ret = pinctrl_apply_state(sam_cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /* get actual message ram base address */ uint32_t mrba = sam_cfg->mram & 0xFFFF0000; /* keep lower 16bit; update DMA Base Register */ sys_write32(sam_cfg->dma_base, (sys_read32(sam_cfg->dma_base) & 0x0000FFFF) | mrba); ret = can_mcan_configure_mram(dev, mrba, sam_cfg->mram); if (ret != 0) { return ret; } ret = can_mcan_init(dev); if (ret != 0) { return ret; } sam_cfg->config_irq(); return ret; } static const struct can_driver_api can_sam_driver_api = { .get_capabilities = can_mcan_get_capabilities, .start = can_mcan_start, .stop = can_mcan_stop, .set_mode = can_mcan_set_mode, .set_timing = can_mcan_set_timing, .send = can_mcan_send, .add_rx_filter = can_mcan_add_rx_filter, .remove_rx_filter = can_mcan_remove_rx_filter, .get_state = can_mcan_get_state, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = can_mcan_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .get_core_clock = can_sam_get_core_clock, .get_max_filters = can_mcan_get_max_filters, .set_state_change_callback = can_mcan_set_state_change_callback, .timing_min = CAN_MCAN_TIMING_MIN_INITIALIZER, .timing_max = CAN_MCAN_TIMING_MAX_INITIALIZER, #ifdef CONFIG_CAN_FD_MODE .set_timing_data = can_mcan_set_timing_data, .timing_data_min = CAN_MCAN_TIMING_DATA_MIN_INITIALIZER, .timing_data_max = CAN_MCAN_TIMING_DATA_MAX_INITIALIZER, #endif /* CONFIG_CAN_FD_MODE */ }; static const struct can_mcan_ops can_sam_ops = { .read_reg = can_sam_read_reg, .write_reg = can_sam_write_reg, .read_mram = can_sam_read_mram, .write_mram = can_sam_write_mram, .clear_mram = can_sam_clear_mram, }; #define CAN_SAM_IRQ_CFG_FUNCTION(inst) \ static void config_can_##inst##_irq(void) \ { \ LOG_DBG("Enable CAN##inst## IRQ"); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, int0, irq), \ DT_INST_IRQ_BY_NAME(inst, int0, priority), can_mcan_line_0_isr, \ DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, int0, irq)); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, int1, irq), \ DT_INST_IRQ_BY_NAME(inst, int1, priority), can_mcan_line_1_isr, \ DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, int1, irq)); \ } #define CAN_SAM_CFG_INST(inst) \ CAN_MCAN_DT_INST_CALLBACKS_DEFINE(inst, can_sam_cbs_##inst); \ CAN_MCAN_DT_INST_MRAM_DEFINE(inst, can_sam_mram_##inst); \ \ static const struct can_sam_config can_sam_cfg_##inst = { \ .base = CAN_MCAN_DT_INST_MCAN_ADDR(inst), \ .mram = (mem_addr_t)POINTER_TO_UINT(&can_sam_mram_##inst), \ .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(inst), \ .divider = DT_INST_PROP(inst, divider), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ .config_irq = config_can_##inst##_irq, \ .dma_base = (mm_reg_t) DT_INST_REG_ADDR_BY_NAME(inst, dma_base) \ }; \ \ static const struct can_mcan_config can_mcan_cfg_##inst = \ CAN_MCAN_DT_CONFIG_INST_GET(inst, &can_sam_cfg_##inst, \ &can_sam_ops, \ &can_sam_cbs_##inst); #define CAN_SAM_DATA_INST(inst) \ static struct can_mcan_data can_mcan_data_##inst = \ CAN_MCAN_DATA_INITIALIZER(NULL); #define CAN_SAM_DEVICE_INST(inst) \ CAN_DEVICE_DT_INST_DEFINE(inst, can_sam_init, NULL, \ &can_mcan_data_##inst, \ &can_mcan_cfg_##inst, \ POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \ &can_sam_driver_api); #define CAN_SAM_INST(inst) \ CAN_MCAN_DT_INST_BUILD_ASSERT_MRAM_CFG(inst); \ PINCTRL_DT_INST_DEFINE(inst); \ CAN_SAM_IRQ_CFG_FUNCTION(inst) \ CAN_SAM_CFG_INST(inst) \ CAN_SAM_DATA_INST(inst) \ CAN_SAM_DEVICE_INST(inst) DT_INST_FOREACH_STATUS_OKAY(CAN_SAM_INST) ```
/content/code_sandbox/drivers/can/can_sam.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,919
```unknown # MCP2515 CAN configuration options config CAN_MCP2515 bool "MCP2515 CAN Driver" default y depends on DT_HAS_MICROCHIP_MCP2515_ENABLED select SPI help Enable MCP2515 CAN Driver if CAN_MCP2515 config CAN_MCP2515_INT_THREAD_STACK_SIZE int "Stack size for interrupt handler" default 1024 help Size of the stack used for internal thread which is ran for interrupt handling and incoming packets. config CAN_MCP2515_INT_THREAD_PRIO int "Priority for interrupt handler" default 2 help Priority level of the internal thread which is ran for interrupt handling and incoming packets. config CAN_MAX_FILTER int "Maximum number of concurrent active filters" default 5 range 1 32 help Defines the array size of the callback/msgq pointers. Must be at least the size of concurrent reads. endif # CAN_MCP2515 ```
/content/code_sandbox/drivers/can/Kconfig.mcp2515
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
209
```unknown # NuMaker CAN(-FD) driver configuration options config CAN_NUMAKER bool "Nuvoton NuMaker CAN FD driver" default y select CAN_MCAN depends on DT_HAS_NUVOTON_NUMAKER_CANFD_ENABLED depends on SOC_SERIES_M46X || SOC_SERIES_M2L31X help Enables Nuvoton NuMaker CAN FD driver, using Bosch M_CAN ```
/content/code_sandbox/drivers/can/Kconfig.numaker
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
88
```unknown # Renesas R-Car CAN configuration options config CAN_RCAR bool "Renesas R-Car CAN Driver" default y depends on DT_HAS_RENESAS_RCAR_CAN_ENABLED help Enable Renesas R-Car CAN Driver. config CAN_RCAR_MAX_FILTER int "Maximum number of concurrent active filters" depends on CAN_RCAR default 5 range 1 32 help Defines the array size of the callback/msgq pointers. Must be at least the size of concurrent reads. ```
/content/code_sandbox/drivers/can/Kconfig.rcar
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
115
```unknown # Fake CAN configuration options config CAN_FAKE bool "Fake CAN driver" default y depends on DT_HAS_ZEPHYR_FAKE_CAN_ENABLED help Enable support for the FFF-based fake CAN driver. ```
/content/code_sandbox/drivers/can/Kconfig.fake
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
49
```c /* * */ #define DT_DRV_COMPAT zephyr_native_linux_can #include <stdbool.h> #include <stdio.h> #include <string.h> #include <cmdline.h> #include <posix_native_task.h> #include <zephyr/drivers/can.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/socketcan.h> #include <zephyr/net/socketcan_utils.h> #include "can_native_linux_adapt.h" #include "nsi_host_trampolines.h" LOG_MODULE_REGISTER(can_native_linux, CONFIG_CAN_LOG_LEVEL); struct can_filter_context { can_rx_callback_t rx_cb; void *cb_arg; struct can_filter filter; }; struct can_native_linux_data { struct can_driver_data common; struct can_filter_context filters[CONFIG_CAN_MAX_FILTER]; struct k_mutex filter_mutex; struct k_sem tx_idle; can_tx_callback_t tx_callback; void *tx_user_data; int dev_fd; /* Linux socket file descriptor */ struct k_thread rx_thread; K_KERNEL_STACK_MEMBER(rx_thread_stack, CONFIG_ARCH_POSIX_RECOMMENDED_STACK_SIZE); }; struct can_native_linux_config { const struct can_driver_config common; const char *if_name; }; static const char *if_name_cmd_opt; static void dispatch_frame(const struct device *dev, struct can_frame *frame) { struct can_native_linux_data *data = dev->data; can_rx_callback_t callback; struct can_frame tmp_frame; k_mutex_lock(&data->filter_mutex, K_FOREVER); for (int filter_id = 0; filter_id < ARRAY_SIZE(data->filters); filter_id++) { if (data->filters[filter_id].rx_cb == NULL) { continue; } if (!can_frame_matches_filter(frame, &data->filters[filter_id].filter)) { continue; } /* Make a temporary copy in case the user modifies the message */ tmp_frame = *frame; callback = data->filters[filter_id].rx_cb; callback(dev, &tmp_frame, data->filters[filter_id].cb_arg); } k_mutex_unlock(&data->filter_mutex); } static void rx_thread(void *arg1, void *arg2, void *arg3) { const struct device *dev = arg1; struct can_native_linux_data *data = dev->data; struct socketcan_frame sframe; struct can_frame frame; bool msg_confirm; int count; ARG_UNUSED(arg2); ARG_UNUSED(arg3); LOG_DBG("Starting Linux SocketCAN RX thread"); while (true) { while (linux_socketcan_poll_data(data->dev_fd) == 0) { count = linux_socketcan_read_data(data->dev_fd, (void *)(&sframe), sizeof(sframe), &msg_confirm); if (msg_confirm) { data->tx_callback(dev, 0, data->tx_user_data); k_sem_give(&data->tx_idle); if ((data->common.mode & CAN_MODE_LOOPBACK) == 0U) { continue; } } if ((count <= 0) || !data->common.started) { break; } socketcan_to_can_frame(&sframe, &frame); #ifndef CONFIG_CAN_ACCEPT_RTR if ((frame.flags & CAN_FRAME_RTR) != 0U) { continue; } #endif /* !CONFIG_CAN_ACCEPT_RTR*/ LOG_DBG("Received %d bytes. Id: 0x%x, ID type: %s %s", frame.dlc, frame.id, (frame.flags & CAN_FRAME_IDE) != 0 ? "extended" : "standard", (frame.flags & CAN_FRAME_RTR) != 0 ? ", RTR frame" : ""); dispatch_frame(dev, &frame); } /* short sleep required to avoid blocking the whole native process */ k_sleep(K_MSEC(1)); } } static int can_native_linux_send(const struct device *dev, const struct can_frame *frame, k_timeout_t timeout, can_tx_callback_t callback, void *user_data) { struct can_native_linux_data *data = dev->data; struct socketcan_frame sframe; uint8_t max_dlc = CAN_MAX_DLC; size_t mtu = CAN_MTU; int ret = -EIO; LOG_DBG("Sending %d bytes on %s. Id: 0x%x, ID type: %s %s", frame->dlc, dev->name, frame->id, (frame->flags & CAN_FRAME_IDE) != 0 ? "extended" : "standard", (frame->flags & CAN_FRAME_RTR) != 0 ? ", RTR frame" : ""); #ifdef CONFIG_CAN_FD_MODE if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR | CAN_FRAME_FDF | CAN_FRAME_BRS)) != 0) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } if ((frame->flags & CAN_FRAME_FDF) != 0) { if ((data->common.mode & CAN_MODE_FD) == 0U) { return -ENOTSUP; } max_dlc = CANFD_MAX_DLC; mtu = CANFD_MTU; } #else /* CONFIG_CAN_FD_MODE */ if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR)) != 0) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } #endif /* !CONFIG_CAN_FD_MODE */ if (frame->dlc > max_dlc) { LOG_ERR("DLC of %d exceeds maximum (%d)", frame->dlc, max_dlc); return -EINVAL; } if (data->dev_fd <= 0) { LOG_ERR("No file descriptor: %d", data->dev_fd); return -EIO; } if (!data->common.started) { return -ENETDOWN; } socketcan_from_can_frame(frame, &sframe); if (k_sem_take(&data->tx_idle, timeout) != 0) { return -EAGAIN; } data->tx_callback = callback; data->tx_user_data = user_data; ret = nsi_host_write(data->dev_fd, &sframe, mtu); if (ret < 0) { LOG_ERR("Cannot send CAN data len %d (%d)", sframe.len, -errno); } return 0; } static int can_native_linux_add_rx_filter(const struct device *dev, can_rx_callback_t cb, void *cb_arg, const struct can_filter *filter) { struct can_native_linux_data *data = dev->data; struct can_filter_context *filter_ctx; int filter_id = -ENOSPC; LOG_DBG("Setting filter ID: 0x%x, mask: 0x%x", filter->id, filter->mask); if ((filter->flags & ~(CAN_FILTER_IDE)) != 0) { LOG_ERR("unsupported CAN filter flags 0x%02x", filter->flags); return -ENOTSUP; } k_mutex_lock(&data->filter_mutex, K_FOREVER); for (int i = 0; i < ARRAY_SIZE(data->filters); i++) { if (data->filters[i].rx_cb == NULL) { filter_id = i; break; } } if (filter_id < 0) { LOG_ERR("No free filter left"); k_mutex_unlock(&data->filter_mutex); return filter_id; } filter_ctx = &data->filters[filter_id]; filter_ctx->rx_cb = cb; filter_ctx->cb_arg = cb_arg; filter_ctx->filter = *filter; k_mutex_unlock(&data->filter_mutex); LOG_DBG("Filter added. ID: %d", filter_id); return filter_id; } static void can_native_linux_remove_rx_filter(const struct device *dev, int filter_id) { struct can_native_linux_data *data = dev->data; if (filter_id < 0 || filter_id >= ARRAY_SIZE(data->filters)) { LOG_ERR("filter ID %d out of bounds", filter_id); return; } k_mutex_lock(&data->filter_mutex, K_FOREVER); data->filters[filter_id].rx_cb = NULL; k_mutex_unlock(&data->filter_mutex); LOG_DBG("Filter removed. ID: %d", filter_id); } static int can_native_linux_get_capabilities(const struct device *dev, can_mode_t *cap) { ARG_UNUSED(dev); *cap = CAN_MODE_NORMAL | CAN_MODE_LOOPBACK; #if CONFIG_CAN_FD_MODE *cap |= CAN_MODE_FD; #endif /* CONFIG_CAN_FD_MODE */ return 0; } static int can_native_linux_start(const struct device *dev) { struct can_native_linux_data *data = dev->data; if (data->common.started) { return -EALREADY; } data->common.started = true; return 0; } static int can_native_linux_stop(const struct device *dev) { struct can_native_linux_data *data = dev->data; if (!data->common.started) { return -EALREADY; } data->common.started = false; return 0; } static int can_native_linux_set_mode(const struct device *dev, can_mode_t mode) { struct can_native_linux_data *data = dev->data; int err; #ifdef CONFIG_CAN_FD_MODE if ((mode & ~(CAN_MODE_LOOPBACK | CAN_MODE_FD)) != 0) { LOG_ERR("unsupported mode: 0x%08x", mode); return -ENOTSUP; } #else if ((mode & ~(CAN_MODE_LOOPBACK)) != 0) { LOG_ERR("unsupported mode: 0x%08x", mode); return -ENOTSUP; } #endif /* CONFIG_CAN_FD_MODE */ if (data->common.started) { return -EBUSY; } err = linux_socketcan_set_mode_fd(data->dev_fd, (mode & CAN_MODE_FD) != 0); if (err != 0) { LOG_ERR("failed to set mode"); return -EIO; } data->common.mode = mode; return 0; } static int can_native_linux_set_timing(const struct device *dev, const struct can_timing *timing) { struct can_native_linux_data *data = dev->data; ARG_UNUSED(timing); if (data->common.started) { return -EBUSY; } return 0; } #ifdef CONFIG_CAN_FD_MODE static int can_native_linux_set_timing_data(const struct device *dev, const struct can_timing *timing) { struct can_native_linux_data *data = dev->data; ARG_UNUSED(timing); if (data->common.started) { return -EBUSY; } return 0; } #endif /* CONFIG_CAN_FD_MODE */ static int can_native_linux_get_state(const struct device *dev, enum can_state *state, struct can_bus_err_cnt *err_cnt) { struct can_native_linux_data *data = dev->data; if (state != NULL) { if (!data->common.started) { *state = CAN_STATE_STOPPED; } else { /* SocketCAN does not forward error frames by default */ *state = CAN_STATE_ERROR_ACTIVE; } } if (err_cnt) { err_cnt->tx_err_cnt = 0; err_cnt->rx_err_cnt = 0; } return 0; } static void can_native_linux_set_state_change_callback(const struct device *dev, can_state_change_callback_t cb, void *user_data) { ARG_UNUSED(dev); ARG_UNUSED(cb); ARG_UNUSED(user_data); } static int can_native_linux_get_core_clock(const struct device *dev, uint32_t *rate) { /* Return 16MHz as an realistic value for the testcases */ *rate = 16000000; return 0; } static int can_native_linux_get_max_filters(const struct device *dev, bool ide) { ARG_UNUSED(ide); return CONFIG_CAN_MAX_FILTER; } static const struct can_driver_api can_native_linux_driver_api = { .start = can_native_linux_start, .stop = can_native_linux_stop, .get_capabilities = can_native_linux_get_capabilities, .set_mode = can_native_linux_set_mode, .set_timing = can_native_linux_set_timing, .send = can_native_linux_send, .add_rx_filter = can_native_linux_add_rx_filter, .remove_rx_filter = can_native_linux_remove_rx_filter, .get_state = can_native_linux_get_state, .set_state_change_callback = can_native_linux_set_state_change_callback, .get_core_clock = can_native_linux_get_core_clock, .get_max_filters = can_native_linux_get_max_filters, .timing_min = { .sjw = 0x1, .prop_seg = 0x01, .phase_seg1 = 0x01, .phase_seg2 = 0x01, .prescaler = 0x01 }, .timing_max = { .sjw = 0x0F, .prop_seg = 0x0F, .phase_seg1 = 0x0F, .phase_seg2 = 0x0F, .prescaler = 0xFFFF }, #ifdef CONFIG_CAN_FD_MODE .set_timing_data = can_native_linux_set_timing_data, .timing_data_min = { .sjw = 0x1, .prop_seg = 0x01, .phase_seg1 = 0x01, .phase_seg2 = 0x01, .prescaler = 0x01 }, .timing_data_max = { .sjw = 0x0F, .prop_seg = 0x0F, .phase_seg1 = 0x0F, .phase_seg2 = 0x0F, .prescaler = 0xFFFF }, #endif /* CONFIG_CAN_FD_MODE */ }; static int can_native_linux_init(const struct device *dev) { const struct can_native_linux_config *cfg = dev->config; struct can_native_linux_data *data = dev->data; const char *if_name; k_mutex_init(&data->filter_mutex); k_sem_init(&data->tx_idle, 1, 1); if (if_name_cmd_opt != NULL) { if_name = if_name_cmd_opt; } else { if_name = cfg->if_name; } LOG_DBG("Opening %s", if_name); data->dev_fd = linux_socketcan_iface_open(if_name); if (data->dev_fd < 0) { LOG_ERR("Cannot open %s (%d)", if_name, data->dev_fd); return -ENODEV; } k_thread_create(&data->rx_thread, data->rx_thread_stack, K_KERNEL_STACK_SIZEOF(data->rx_thread_stack), rx_thread, (void *)dev, NULL, NULL, CONFIG_CAN_NATIVE_LINUX_RX_THREAD_PRIORITY, 0, K_NO_WAIT); LOG_DBG("Init of %s done", dev->name); return 0; } #define CAN_NATIVE_LINUX_INIT(inst) \ \ static const struct can_native_linux_config can_native_linux_cfg_##inst = { \ .common = CAN_DT_DRIVER_CONFIG_INST_GET(inst, 0, 0), \ .if_name = DT_INST_PROP(inst, host_interface), \ }; \ \ static struct can_native_linux_data can_native_linux_data_##inst; \ \ CAN_DEVICE_DT_INST_DEFINE(inst, can_native_linux_init, NULL, \ &can_native_linux_data_##inst, \ &can_native_linux_cfg_##inst, \ POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \ &can_native_linux_driver_api); DT_INST_FOREACH_STATUS_OKAY(CAN_NATIVE_LINUX_INIT) static void add_native_options(void) { static struct args_struct_t can_native_options[] = { { .is_mandatory = false, .option = "can-if", .name = "name", .type = 's', .dest = (void *)&if_name_cmd_opt, .descript = "Name of the host CAN interface to use", }, ARG_TABLE_ENDMARKER, }; native_add_command_line_opts(can_native_options); } NATIVE_TASK(add_native_options, PRE_BOOT_1, 10); ```
/content/code_sandbox/drivers/can/can_native_linux.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,540
```c /* * */ #define DT_DRV_COMPAT espressif_esp32_twai #include <zephyr/drivers/can/can_sja1000.h> #include <zephyr/drivers/can.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/interrupt_controller/intc_esp32.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> #include <soc.h> LOG_MODULE_REGISTER(can_esp32_twai, CONFIG_CAN_LOG_LEVEL); /* * Newer ESP32-series MCUs like ESP32-C3 and ESP32-S2 have some slightly different registers * compared to the original ESP32, which is fully compatible with the SJA1000 controller. * * The names with TWAI_ prefixes from Espressif reference manuals are used for these incompatible * registers. */ #ifndef CONFIG_SOC_SERIES_ESP32 /* TWAI_BUS_TIMING_0_REG is incompatible with CAN_SJA1000_BTR0 */ #define TWAI_BUS_TIMING_0_REG (6U) #define TWAI_BAUD_PRESC_MASK GENMASK(12, 0) #define TWAI_SYNC_JUMP_WIDTH_MASK GENMASK(15, 14) #define TWAI_BAUD_PRESC_PREP(brp) FIELD_PREP(TWAI_BAUD_PRESC_MASK, brp) #define TWAI_SYNC_JUMP_WIDTH_PREP(sjw) FIELD_PREP(TWAI_SYNC_JUMP_WIDTH_MASK, sjw) /* * TWAI_BUS_TIMING_1_REG is compatible with CAN_SJA1000_BTR1, but needed here for the custom * set_timing() function. */ #define TWAI_BUS_TIMING_1_REG (7U) #define TWAI_TIME_SEG1_MASK GENMASK(3, 0) #define TWAI_TIME_SEG2_MASK GENMASK(6, 4) #define TWAI_TIME_SAMP BIT(7) #define TWAI_TIME_SEG1_PREP(seg1) FIELD_PREP(TWAI_TIME_SEG1_MASK, seg1) #define TWAI_TIME_SEG2_PREP(seg2) FIELD_PREP(TWAI_TIME_SEG2_MASK, seg2) /* TWAI_CLOCK_DIVIDER_REG is incompatible with CAN_SJA1000_CDR */ #define TWAI_CLOCK_DIVIDER_REG (31U) #define TWAI_CD_MASK GENMASK(7, 0) #define TWAI_CLOCK_OFF BIT(8) /* * Further incompatible registers currently not used by the driver: * - TWAI_STATUS_REG has new bit 8: TWAI_MISS_ST * - TWAI_INT_RAW_REG has new bit 8: TWAI_BUS_STATE_INT_ST * - TWAI_INT_ENA_REG has new bit 8: TWAI_BUS_STATE_INT_ENA */ #else /* Redefinitions of the SJA1000 CDR bits to simplify driver config */ #define TWAI_CD_MASK GENMASK(2, 0) #define TWAI_CLOCK_OFF BIT(3) #endif /* !CONFIG_SOC_SERIES_ESP32 */ struct can_esp32_twai_config { mm_reg_t base; const struct pinctrl_dev_config *pcfg; const struct device *clock_dev; const clock_control_subsys_t clock_subsys; int irq_source; #ifndef CONFIG_SOC_SERIES_ESP32 /* 32-bit variant of output clock divider register required for non-ESP32 MCUs */ uint32_t cdr32; #endif /* !CONFIG_SOC_SERIES_ESP32 */ }; static uint8_t can_esp32_twai_read_reg(const struct device *dev, uint8_t reg) { const struct can_sja1000_config *sja1000_config = dev->config; const struct can_esp32_twai_config *twai_config = sja1000_config->custom; mm_reg_t addr = twai_config->base + reg * sizeof(uint32_t); return sys_read32(addr) & 0xFF; } static void can_esp32_twai_write_reg(const struct device *dev, uint8_t reg, uint8_t val) { const struct can_sja1000_config *sja1000_config = dev->config; const struct can_esp32_twai_config *twai_config = sja1000_config->custom; mm_reg_t addr = twai_config->base + reg * sizeof(uint32_t); sys_write32(val & 0xFF, addr); } #ifndef CONFIG_SOC_SERIES_ESP32 /* * Required for newer ESP32-series MCUs which violate the original SJA1000 8-bit register size. */ static void can_esp32_twai_write_reg32(const struct device *dev, uint8_t reg, uint32_t val) { const struct can_sja1000_config *sja1000_config = dev->config; const struct can_esp32_twai_config *twai_config = sja1000_config->custom; mm_reg_t addr = twai_config->base + reg * sizeof(uint32_t); sys_write32(val, addr); } /* * Custom implementation instead of can_sja1000_set_timing required because TWAI_BUS_TIMING_0_REG * is incompatible with CAN_SJA1000_BTR0. */ static int can_esp32_twai_set_timing(const struct device *dev, const struct can_timing *timing) { struct can_sja1000_data *data = dev->data; uint8_t btr0; uint8_t btr1; if (data->common.started) { return -EBUSY; } k_mutex_lock(&data->mod_lock, K_FOREVER); btr0 = TWAI_BAUD_PRESC_PREP(timing->prescaler - 1) | TWAI_SYNC_JUMP_WIDTH_PREP(timing->sjw - 1); btr1 = TWAI_TIME_SEG1_PREP(timing->phase_seg1 - 1) | TWAI_TIME_SEG2_PREP(timing->phase_seg2 - 1); if ((data->common.mode & CAN_MODE_3_SAMPLES) != 0) { btr1 |= TWAI_TIME_SAMP; } can_esp32_twai_write_reg32(dev, TWAI_BUS_TIMING_0_REG, btr0); can_esp32_twai_write_reg32(dev, TWAI_BUS_TIMING_1_REG, btr1); k_mutex_unlock(&data->mod_lock); return 0; } #endif /* !CONFIG_SOC_SERIES_ESP32 */ static int can_esp32_twai_get_core_clock(const struct device *dev, uint32_t *rate) { ARG_UNUSED(dev); /* The internal clock operates at half of the oscillator frequency */ *rate = APB_CLK_FREQ / 2; return 0; } static void IRAM_ATTR can_esp32_twai_isr(void *arg) { const struct device *dev = (const struct device *)arg; can_sja1000_isr(dev); } static int can_esp32_twai_init(const struct device *dev) { const struct can_sja1000_config *sja1000_config = dev->config; const struct can_esp32_twai_config *twai_config = sja1000_config->custom; int err; if (!device_is_ready(twai_config->clock_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } err = pinctrl_apply_state(twai_config->pcfg, PINCTRL_STATE_DEFAULT); if (err != 0) { LOG_ERR("failed to configure TWAI pins (err %d)", err); return err; } err = clock_control_on(twai_config->clock_dev, twai_config->clock_subsys); if (err != 0) { LOG_ERR("failed to enable CAN clock (err %d)", err); return err; } err = can_sja1000_init(dev); if (err != 0) { LOG_ERR("failed to initialize controller (err %d)", err); return err; } #ifndef CONFIG_SOC_SERIES_ESP32 /* * TWAI_CLOCK_DIVIDER_REG is incompatible with CAN_SJA1000_CDR for non-ESP32 MCUs * - TWAI_CD has length of 8 bits instead of 3 bits * - TWAI_CLOCK_OFF at BIT(8) instead of BIT(3) * - TWAI_EXT_MODE bit missing (always "extended" = PeliCAN mode) * * Overwrite with 32-bit register variant configured via devicetree. */ can_esp32_twai_write_reg32(dev, TWAI_CLOCK_DIVIDER_REG, twai_config->cdr32); #endif /* !CONFIG_SOC_SERIES_ESP32 */ esp_intr_alloc(twai_config->irq_source, 0, can_esp32_twai_isr, (void *)dev, NULL); return 0; } const struct can_driver_api can_esp32_twai_driver_api = { .get_capabilities = can_sja1000_get_capabilities, .start = can_sja1000_start, .stop = can_sja1000_stop, .set_mode = can_sja1000_set_mode, #ifdef CONFIG_SOC_SERIES_ESP32 .set_timing = can_sja1000_set_timing, #else .set_timing = can_esp32_twai_set_timing, #endif /* CONFIG_SOC_SERIES_ESP32 */ .send = can_sja1000_send, .add_rx_filter = can_sja1000_add_rx_filter, .remove_rx_filter = can_sja1000_remove_rx_filter, .get_state = can_sja1000_get_state, .set_state_change_callback = can_sja1000_set_state_change_callback, .get_core_clock = can_esp32_twai_get_core_clock, .get_max_filters = can_sja1000_get_max_filters, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = can_sja1000_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .timing_min = CAN_SJA1000_TIMING_MIN_INITIALIZER, #ifdef CONFIG_SOC_SERIES_ESP32 .timing_max = CAN_SJA1000_TIMING_MAX_INITIALIZER, #else /* larger prescaler allowed for newer ESP32-series MCUs */ .timing_max = { .sjw = 0x4, .prop_seg = 0x0, .phase_seg1 = 0x10, .phase_seg2 = 0x8, .prescaler = 0x2000, } #endif /* CONFIG_SOC_SERIES_ESP32 */ }; #ifdef CONFIG_SOC_SERIES_ESP32 #define TWAI_CLKOUT_DIVIDER_MAX (14) #define TWAI_CDR32_INIT(inst) #else #define TWAI_CLKOUT_DIVIDER_MAX (490) #define TWAI_CDR32_INIT(inst) .cdr32 = CAN_ESP32_TWAI_DT_CDR_INST_GET(inst) #endif /* CONFIG_SOC_SERIES_ESP32 */ #define CAN_ESP32_TWAI_ASSERT_CLKOUT_DIVIDER(inst) \ BUILD_ASSERT(COND_CODE_0(DT_INST_NODE_HAS_PROP(inst, clkout_divider), (1), \ (DT_INST_PROP(inst, clkout_divider) == 1 || \ (DT_INST_PROP(inst, clkout_divider) % 2 == 0 && \ DT_INST_PROP(inst, clkout_divider) / 2 <= TWAI_CLKOUT_DIVIDER_MAX))), \ "TWAI clkout-divider from dts invalid") #define CAN_ESP32_TWAI_DT_CDR_INST_GET(inst) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, clkout_divider), \ COND_CODE_1(DT_INST_PROP(inst, clkout_divider) == 1, (TWAI_CD_MASK), \ ((DT_INST_PROP(inst, clkout_divider)) / 2 - 1)), \ (TWAI_CLOCK_OFF)) #define CAN_ESP32_TWAI_INIT(inst) \ PINCTRL_DT_INST_DEFINE(inst); \ \ static const struct can_esp32_twai_config can_esp32_twai_config_##inst = { \ .base = DT_INST_REG_ADDR(inst), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(inst)), \ .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(inst, offset), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ .irq_source = DT_INST_IRQN(inst), \ TWAI_CDR32_INIT(inst) \ }; \ CAN_ESP32_TWAI_ASSERT_CLKOUT_DIVIDER(inst); \ static const struct can_sja1000_config can_sja1000_config_##inst = \ CAN_SJA1000_DT_CONFIG_INST_GET(inst, &can_esp32_twai_config_##inst, \ can_esp32_twai_read_reg, can_esp32_twai_write_reg, \ CAN_SJA1000_OCR_OCMODE_BIPHASE, \ COND_CODE_0(IS_ENABLED(CONFIG_SOC_SERIES_ESP32), (0), \ (CAN_ESP32_TWAI_DT_CDR_INST_GET(inst))), 25000); \ \ static struct can_sja1000_data can_sja1000_data_##inst = \ CAN_SJA1000_DATA_INITIALIZER(NULL); \ \ CAN_DEVICE_DT_INST_DEFINE(inst, can_esp32_twai_init, NULL, &can_sja1000_data_##inst, \ &can_sja1000_config_##inst, POST_KERNEL, \ CONFIG_CAN_INIT_PRIORITY, &can_esp32_twai_driver_api); DT_INST_FOREACH_STATUS_OKAY(CAN_ESP32_TWAI_INIT) ```
/content/code_sandbox/drivers/can/can_esp32_twai.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,941
```unknown # Native Linux SocketCAN configuration options config CAN_NATIVE_LINUX bool "Native Linux SocketCAN Driver" default y depends on DT_HAS_ZEPHYR_NATIVE_LINUX_CAN_ENABLED depends on ARCH_POSIX help Enable native Linux SocketCAN Driver if CAN_NATIVE_LINUX config CAN_NATIVE_LINUX_RX_THREAD_PRIORITY int "Priority for internal RX thread" default 2 help Priority level of the internal thread which is run for handling of incoming packets. config CAN_MAX_FILTER int "Maximum number of concurrent active filters" default 5 range 1 32 help Defines the array size of the callback/msgq pointers. Must be at least the size of concurrent reads. endif # CAN_NATIVE_LINUX ```
/content/code_sandbox/drivers/can/Kconfig.native_linux
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
156
```c /* * */ #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/can_mcan.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/kernel.h> #include <stm32_ll_rcc.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <zephyr/sys/util.h> LOG_MODULE_REGISTER(can_stm32h7, CONFIG_CAN_LOG_LEVEL); #define DT_DRV_COMPAT st_stm32h7_fdcan /* This symbol takes the value 1 if one of the device instances */ /* is configured in dts with a domain clock */ #if STM32_DT_INST_DEV_DOMAIN_CLOCK_SUPPORT #define STM32H7_FDCAN_DOMAIN_CLOCK_SUPPORT 1 #else #define STM32H7_FDCAN_DOMAIN_CLOCK_SUPPORT 0 #endif #define VOS0_MAX_FREQ MHZ(125) struct can_stm32h7_config { mm_reg_t base; mem_addr_t mrba; mem_addr_t mram; void (*config_irq)(void); const struct pinctrl_dev_config *pcfg; size_t pclk_len; const struct stm32_pclken *pclken; uint8_t clock_divider; }; static int can_stm32h7_read_reg(const struct device *dev, uint16_t reg, uint32_t *val) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_stm32h7_config *stm32h7_cfg = mcan_cfg->custom; return can_mcan_sys_read_reg(stm32h7_cfg->base, reg, val); } static int can_stm32h7_write_reg(const struct device *dev, uint16_t reg, uint32_t val) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_stm32h7_config *stm32h7_cfg = mcan_cfg->custom; return can_mcan_sys_write_reg(stm32h7_cfg->base, reg, val); } static int can_stm32h7_read_mram(const struct device *dev, uint16_t offset, void *dst, size_t len) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_stm32h7_config *stm32h7_cfg = mcan_cfg->custom; return can_mcan_sys_read_mram(stm32h7_cfg->mram, offset, dst, len); } static int can_stm32h7_write_mram(const struct device *dev, uint16_t offset, const void *src, size_t len) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_stm32h7_config *stm32h7_cfg = mcan_cfg->custom; return can_mcan_sys_write_mram(stm32h7_cfg->mram, offset, src, len); } static int can_stm32h7_clear_mram(const struct device *dev, uint16_t offset, size_t len) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_stm32h7_config *stm32h7_cfg = mcan_cfg->custom; return can_mcan_sys_clear_mram(stm32h7_cfg->mram, offset, len); } static int can_stm32h7_get_core_clock(const struct device *dev, uint32_t *rate) { const uint32_t rate_tmp = LL_RCC_GetFDCANClockFreq(LL_RCC_FDCAN_CLKSOURCE); uint32_t cdiv; ARG_UNUSED(dev); if (rate_tmp == LL_RCC_PERIPH_FREQUENCY_NO) { LOG_ERR("Can't read core clock"); return -EIO; } cdiv = FIELD_GET(FDCANCCU_CCFG_CDIV, FDCAN_CCU->CCFG); if (cdiv == 0U) { *rate = rate_tmp; } else { *rate = rate_tmp / (cdiv << 1U); } return 0; } static int can_stm32h7_clock_enable(const struct device *dev) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_stm32h7_config *stm32h7_cfg = mcan_cfg->custom; const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); uint32_t fdcan_clock = 0xffffffff; int ret; if (!device_is_ready(clk)) { LOG_ERR("clock control device not ready"); return -ENODEV; } if (IS_ENABLED(STM32H7_FDCAN_DOMAIN_CLOCK_SUPPORT) && (stm32h7_cfg->pclk_len > 1)) { ret = clock_control_configure(clk, (clock_control_subsys_t)&stm32h7_cfg->pclken[1], NULL); if (ret < 0) { LOG_ERR("Could not select can_stm32fd domain clock"); return ret; } /* Check if clock has correct range according to chosen regulator voltage * scaling (Table 62 of RM0399 Rev 4). * There is no need to test HSE case, since it's value is in range of * 4 to 50 MHz (please refer to CubeMX clock control). */ ret = clock_control_get_rate(clk, (clock_control_subsys_t)&stm32h7_cfg->pclken[1], &fdcan_clock); if (ret != 0) { LOG_ERR("failure getting clock rate"); return ret; } if (fdcan_clock > VOS0_MAX_FREQ) { LOG_ERR("FDCAN Clock source %d exceeds max allowed %d", fdcan_clock, VOS0_MAX_FREQ); return -ENODEV; } } ret = clock_control_on(clk, (clock_control_subsys_t)&stm32h7_cfg->pclken[0]); if (ret != 0) { LOG_ERR("failure enabling clock"); return ret; } if (stm32h7_cfg->clock_divider != 0U) { can_mcan_enable_configuration_change(dev); FDCAN_CCU->CCFG = FDCANCCU_CCFG_BCC | FIELD_PREP(FDCANCCU_CCFG_CDIV, stm32h7_cfg->clock_divider >> 1U); } return 0; } static int can_stm32h7_init(const struct device *dev) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_stm32h7_config *stm32h7_cfg = mcan_cfg->custom; int ret; /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(stm32h7_cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { LOG_ERR("CAN pinctrl setup failed (%d)", ret); return ret; } ret = can_stm32h7_clock_enable(dev); if (ret != 0) { return ret; } ret = can_mcan_configure_mram(dev, stm32h7_cfg->mrba, stm32h7_cfg->mram); if (ret != 0) { return ret; } ret = can_mcan_init(dev); if (ret != 0) { return ret; } stm32h7_cfg->config_irq(); return 0; } static const struct can_driver_api can_stm32h7_driver_api = { .get_capabilities = can_mcan_get_capabilities, .start = can_mcan_start, .stop = can_mcan_stop, .set_mode = can_mcan_set_mode, .set_timing = can_mcan_set_timing, .send = can_mcan_send, .add_rx_filter = can_mcan_add_rx_filter, .remove_rx_filter = can_mcan_remove_rx_filter, .get_state = can_mcan_get_state, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = can_mcan_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE*/ .get_core_clock = can_stm32h7_get_core_clock, .get_max_filters = can_mcan_get_max_filters, .set_state_change_callback = can_mcan_set_state_change_callback, /* Timing limits are per the STM32H7 Reference Manual (RM0433 Rev 7), * section 56.5.7, FDCAN nominal bit timing and prescaler register * (FDCAN_NBTP). * * Beware that the reference manual contains a bug regarding the minimum * values for nominal phase segments. Valid register values are 1 and up. */ .timing_min = CAN_MCAN_TIMING_MIN_INITIALIZER, .timing_max = CAN_MCAN_TIMING_MAX_INITIALIZER, #ifdef CONFIG_CAN_FD_MODE .set_timing_data = can_mcan_set_timing_data, /* Data timing limits are per the STM32H7 Reference Manual * (RM0433 Rev 7), section 56.5.3, FDCAN data bit timing and prescaler * register (FDCAN_DBTP). */ .timing_data_min = CAN_MCAN_TIMING_DATA_MIN_INITIALIZER, .timing_data_max = CAN_MCAN_TIMING_DATA_MAX_INITIALIZER, #endif }; static const struct can_mcan_ops can_stm32h7_ops = { .read_reg = can_stm32h7_read_reg, .write_reg = can_stm32h7_write_reg, .read_mram = can_stm32h7_read_mram, .write_mram = can_stm32h7_write_mram, .clear_mram = can_stm32h7_clear_mram, }; #define CAN_STM32H7_MCAN_INIT(n) \ CAN_MCAN_DT_INST_BUILD_ASSERT_MRAM_CFG(n); \ BUILD_ASSERT(CAN_MCAN_DT_INST_MRAM_ELEMENTS_SIZE(n) <= \ CAN_MCAN_DT_INST_MRAM_SIZE(n), \ "Insufficient Message RAM size to hold elements"); \ \ static void stm32h7_mcan_irq_config_##n(void); \ \ PINCTRL_DT_INST_DEFINE(n); \ CAN_MCAN_DT_INST_CALLBACKS_DEFINE(n, can_stm32h7_cbs_##n); \ \ static const struct stm32_pclken can_stm32h7_pclken_##n[] = \ STM32_DT_INST_CLOCKS(n); \ \ static const struct can_stm32h7_config can_stm32h7_cfg_##n = { \ .base = CAN_MCAN_DT_INST_MCAN_ADDR(n), \ .mrba = CAN_MCAN_DT_INST_MRBA(n), \ .mram = CAN_MCAN_DT_INST_MRAM_ADDR(n), \ .config_irq = stm32h7_mcan_irq_config_##n, \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .pclken = can_stm32h7_pclken_##n, \ .pclk_len = DT_INST_NUM_CLOCKS(n), \ .clock_divider = DT_INST_PROP_OR(n, clk_divider, 0) \ }; \ \ static const struct can_mcan_config can_mcan_cfg_##n = \ CAN_MCAN_DT_CONFIG_INST_GET(n, &can_stm32h7_cfg_##n, \ &can_stm32h7_ops, \ &can_stm32h7_cbs_##n); \ \ static struct can_mcan_data can_mcan_data_##n = \ CAN_MCAN_DATA_INITIALIZER(NULL); \ \ CAN_DEVICE_DT_INST_DEFINE(n, can_stm32h7_init, NULL, \ &can_mcan_data_##n, \ &can_mcan_cfg_##n, \ POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \ &can_stm32h7_driver_api); \ \ static void stm32h7_mcan_irq_config_##n(void) \ { \ LOG_DBG("Enable CAN inst" #n " IRQ"); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(n, int0, irq), \ DT_INST_IRQ_BY_NAME(n, int0, priority), \ can_mcan_line_0_isr, DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQ_BY_NAME(n, int0, irq)); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(n, int1, irq), \ DT_INST_IRQ_BY_NAME(n, int1, priority), \ can_mcan_line_1_isr, DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQ_BY_NAME(n, int1, irq)); \ } DT_INST_FOREACH_STATUS_OKAY(CAN_STM32H7_MCAN_INIT) ```
/content/code_sandbox/drivers/can/can_stm32h7_fdcan.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,820
```c /* * */ #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/can_mcan.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/irq.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <soc.h> LOG_MODULE_REGISTER(can_sam0, CONFIG_CAN_LOG_LEVEL); #define DT_DRV_COMPAT atmel_sam0_can struct can_sam0_config { mm_reg_t base; mem_addr_t mram; void (*config_irq)(void); const struct pinctrl_dev_config *pcfg; volatile uint32_t *mclk; uint32_t mclk_mask; uint16_t gclk_core_id; int divider; }; static int can_sam0_read_reg(const struct device *dev, uint16_t reg, uint32_t *val) { const struct can_mcan_config *mcan_config = dev->config; const struct can_sam0_config *sam_config = mcan_config->custom; return can_mcan_sys_read_reg(sam_config->base, reg, val); } static int can_sam0_write_reg(const struct device *dev, uint16_t reg, uint32_t val) { const struct can_mcan_config *mcan_config = dev->config; const struct can_sam0_config *sam_config = mcan_config->custom; switch (reg) { case CAN_MCAN_ILS: /* All interrupts are assigned to MCAN_INT0 */ val = 0; break; case CAN_MCAN_ILE: /* SAM0 has only one line to handle interrupts */ val = CAN_MCAN_ILE_EINT0; break; default: /* No field remap needed */ break; }; return can_mcan_sys_write_reg(sam_config->base, reg, val); } static int can_sam0_read_mram(const struct device *dev, uint16_t offset, void *dst, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct can_sam0_config *sam_config = mcan_config->custom; return can_mcan_sys_read_mram(sam_config->mram, offset, dst, len); } static int can_sam0_write_mram(const struct device *dev, uint16_t offset, const void *src, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct can_sam0_config *sam_config = mcan_config->custom; return can_mcan_sys_write_mram(sam_config->mram, offset, src, len); } static int can_sam0_clear_mram(const struct device *dev, uint16_t offset, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct can_sam0_config *sam_config = mcan_config->custom; return can_mcan_sys_clear_mram(sam_config->mram, offset, len); } void can_sam0_line_x_isr(const struct device *dev) { can_mcan_line_0_isr(dev); can_mcan_line_1_isr(dev); } static int can_sam0_get_core_clock(const struct device *dev, uint32_t *rate) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_sam0_config *sam_cfg = mcan_cfg->custom; #if defined(CONFIG_SOC_SERIES_SAME51) || defined(CONFIG_SOC_SERIES_SAME54) /*DFFL has to be used as clock source for the ATSAME51/54 family of SoCs*/ *rate = SOC_ATMEL_SAM0_DFLL48_FREQ_HZ / (sam_cfg->divider); #elif defined(CONFIG_SOC_SERIES_SAMC21) /*OSC48M has to be used as clock source for the ATSAMC21 family of SoCs*/ *rate = SOC_ATMEL_SAM0_OSC48M_FREQ_HZ / (sam_cfg->divider); #endif return 0; } static void can_sam0_clock_enable(const struct can_sam0_config *cfg) { /* Enable the GLCK7 with DIV*/ #if defined(CONFIG_SOC_SERIES_SAME51) || defined(CONFIG_SOC_SERIES_SAME54) /*DFFL has to be used as clock source for the ATSAME51/54 family of SoCs*/ GCLK->GENCTRL[7].reg = GCLK_GENCTRL_SRC(GCLK_GENCTRL_SRC_DFLL) | GCLK_GENCTRL_DIV(cfg->divider) | GCLK_GENCTRL_GENEN; #elif defined(CONFIG_SOC_SERIES_SAMC21) /*OSC48M has to be used as clock source for the ATSAMC21 family of SoCs*/ GCLK->GENCTRL[7].reg = GCLK_GENCTRL_SRC(GCLK_GENCTRL_SRC_OSC48M) | GCLK_GENCTRL_DIV(cfg->divider) | GCLK_GENCTRL_GENEN; #endif /* Route channel */ GCLK->PCHCTRL[cfg->gclk_core_id].reg = GCLK_PCHCTRL_GEN_GCLK7 | GCLK_PCHCTRL_CHEN; /* Enable CAN clock in MCLK */ *cfg->mclk |= cfg->mclk_mask; } static int can_sam0_init(const struct device *dev) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_sam0_config *sam_cfg = mcan_cfg->custom; int ret; can_sam0_clock_enable(sam_cfg); ret = pinctrl_apply_state(sam_cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("failed to apply pinctrl"); return ret; } ret = can_mcan_configure_mram(dev, 0U, sam_cfg->mram); if (ret != 0) { LOG_ERR("failed to configure message ram"); return ret; } ret = can_mcan_init(dev); if (ret != 0) { LOG_ERR("failed to mcan init"); return ret; } sam_cfg->config_irq(); return ret; } static const struct can_driver_api can_sam0_driver_api = { .get_capabilities = can_mcan_get_capabilities, .start = can_mcan_start, .stop = can_mcan_stop, .set_mode = can_mcan_set_mode, .set_timing = can_mcan_set_timing, .send = can_mcan_send, .add_rx_filter = can_mcan_add_rx_filter, .remove_rx_filter = can_mcan_remove_rx_filter, .get_state = can_mcan_get_state, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = can_mcan_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .get_core_clock = can_sam0_get_core_clock, .get_max_filters = can_mcan_get_max_filters, .set_state_change_callback = can_mcan_set_state_change_callback, .timing_min = CAN_MCAN_TIMING_MIN_INITIALIZER, .timing_max = CAN_MCAN_TIMING_MAX_INITIALIZER, #ifdef CONFIG_CAN_FD_MODE .set_timing_data = can_mcan_set_timing_data, .timing_data_min = CAN_MCAN_TIMING_DATA_MIN_INITIALIZER, .timing_data_max = CAN_MCAN_TIMING_DATA_MAX_INITIALIZER, #endif /* CONFIG_CAN_FD_MODE */ }; static const struct can_mcan_ops can_sam0_ops = { .read_reg = can_sam0_read_reg, .write_reg = can_sam0_write_reg, .read_mram = can_sam0_read_mram, .write_mram = can_sam0_write_mram, .clear_mram = can_sam0_clear_mram, }; #define CAN_SAM0_IRQ_CFG_FUNCTION(inst) \ static void config_can_##inst##_irq(void) \ { \ LOG_DBG("Enable CAN##inst## IRQ"); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, int0, irq), \ DT_INST_IRQ_BY_NAME(inst, int0, priority), can_sam0_line_x_isr, \ DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, int0, irq)); \ } #define CAN_SAM0_CFG_INST(inst) \ CAN_MCAN_DT_INST_CALLBACKS_DEFINE(inst, can_sam0_cbs_##inst); \ CAN_MCAN_DT_INST_MRAM_DEFINE(inst, can_sam0_mram_##inst); \ \ static const struct can_sam0_config can_sam0_cfg_##inst = { \ .base = CAN_MCAN_DT_INST_MCAN_ADDR(inst), \ .mram = (mem_addr_t)POINTER_TO_UINT(&can_sam0_mram_##inst), \ .mclk = (volatile uint32_t *)MCLK_MASK_DT_INT_REG_ADDR(inst), \ .mclk_mask = BIT(DT_INST_CLOCKS_CELL_BY_NAME(inst, mclk, bit)), \ .gclk_core_id = DT_INST_CLOCKS_CELL_BY_NAME(inst, gclk, periph_ch), \ .divider = DT_INST_PROP(inst, divider), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ .config_irq = config_can_##inst##_irq, \ }; \ \ static const struct can_mcan_config can_mcan_cfg_##inst = \ CAN_MCAN_DT_CONFIG_INST_GET(inst, &can_sam0_cfg_##inst, &can_sam0_ops, \ &can_sam0_cbs_##inst); #define CAN_SAM0_DATA_INST(inst) \ static struct can_mcan_data can_mcan_data_##inst = \ CAN_MCAN_DATA_INITIALIZER(NULL); #define CAN_SAM0_DEVICE_INST(inst) \ CAN_DEVICE_DT_INST_DEFINE(inst, can_sam0_init, NULL, \ &can_mcan_data_##inst, \ &can_mcan_cfg_##inst, \ POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \ &can_sam0_driver_api); #define CAN_SAM0_INST(inst) \ CAN_MCAN_DT_INST_BUILD_ASSERT_MRAM_CFG(inst); \ PINCTRL_DT_INST_DEFINE(inst); \ CAN_SAM0_IRQ_CFG_FUNCTION(inst) \ CAN_SAM0_CFG_INST(inst) \ CAN_SAM0_DATA_INST(inst) \ CAN_SAM0_DEVICE_INST(inst) DT_INST_FOREACH_STATUS_OKAY(CAN_SAM0_INST) ```
/content/code_sandbox/drivers/can/can_sam0.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,276
```unknown # Bosch M_CAN configuration options config CAN_MCAN bool help Enable the Bosch M_CAN CAN IP module driver backend. ```
/content/code_sandbox/drivers/can/Kconfig.mcan
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
29
```unknown # Kvaser PCIcan configuration options config CAN_KVASER_PCI bool "Kvaser PCIcan driver" default y depends on DT_HAS_KVASER_PCICAN_ENABLED select PCIE select CAN_SJA1000 help This enables support for the Kvaser PCIcan. ```
/content/code_sandbox/drivers/can/Kconfig.kvaser
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
67
```c /* * */ #include <zephyr/drivers/can/can_sja1000.h> #include "can_sja1000_priv.h" #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/transceiver.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(can_sja1000, CONFIG_CAN_LOG_LEVEL); /* Timeout for entering/leaving reset mode */ #define CAN_SJA1000_RESET_MODE_TIMEOUT_USEC 1000 #define CAN_SJA1000_RESET_MODE_RETRIES 100 #define CAN_SJA1000_RESET_MODE_DELAY \ K_USEC(CAN_SJA1000_RESET_MODE_TIMEOUT_USEC / CAN_SJA1000_RESET_MODE_RETRIES) static inline void can_sja1000_write_reg(const struct device *dev, uint8_t reg, uint8_t val) { const struct can_sja1000_config *config = dev->config; return config->write_reg(dev, reg, val); } static inline uint8_t can_sja1000_read_reg(const struct device *dev, uint8_t reg) { const struct can_sja1000_config *config = dev->config; return config->read_reg(dev, reg); } static inline int can_sja1000_enter_reset_mode(const struct device *dev) { int retries = CAN_SJA1000_RESET_MODE_RETRIES; uint8_t mod; mod = can_sja1000_read_reg(dev, CAN_SJA1000_MOD); while ((mod & CAN_SJA1000_MOD_RM) == 0) { if (--retries < 0) { return -EIO; } can_sja1000_write_reg(dev, CAN_SJA1000_MOD, mod | CAN_SJA1000_MOD_RM); k_sleep(CAN_SJA1000_RESET_MODE_DELAY); mod = can_sja1000_read_reg(dev, CAN_SJA1000_MOD); }; return 0; } static inline void can_sja1000_leave_reset_mode_nowait(const struct device *dev) { uint8_t mod; mod = can_sja1000_read_reg(dev, CAN_SJA1000_MOD); can_sja1000_write_reg(dev, CAN_SJA1000_MOD, mod & ~(CAN_SJA1000_MOD_RM)); } static inline int can_sja1000_leave_reset_mode(const struct device *dev) { int retries = CAN_SJA1000_RESET_MODE_RETRIES; uint8_t mod; mod = can_sja1000_read_reg(dev, CAN_SJA1000_MOD); while ((mod & CAN_SJA1000_MOD_RM) == 1) { if (--retries < 0) { return -EIO; } can_sja1000_write_reg(dev, CAN_SJA1000_MOD, mod & ~(CAN_SJA1000_MOD_RM)); k_sleep(CAN_SJA1000_RESET_MODE_DELAY); mod = can_sja1000_read_reg(dev, CAN_SJA1000_MOD); }; return 0; } static inline void can_sja1000_clear_errors(const struct device *dev) { /* Clear error counters */ can_sja1000_write_reg(dev, CAN_SJA1000_RXERR, 0); can_sja1000_write_reg(dev, CAN_SJA1000_TXERR, 0); /* Clear error capture */ (void)can_sja1000_read_reg(dev, CAN_SJA1000_ECC); } static void can_sja1000_tx_done(const struct device *dev, int status) { struct can_sja1000_data *data = dev->data; can_tx_callback_t callback = data->tx_callback; void *user_data = data->tx_user_data; if (callback != NULL) { data->tx_callback = NULL; callback(dev, status, user_data); } k_sem_give(&data->tx_idle); } int can_sja1000_set_timing(const struct device *dev, const struct can_timing *timing) { struct can_sja1000_data *data = dev->data; uint8_t btr0; uint8_t btr1; if (data->common.started) { return -EBUSY; } k_mutex_lock(&data->mod_lock, K_FOREVER); btr0 = CAN_SJA1000_BTR0_BRP_PREP(timing->prescaler - 1) | CAN_SJA1000_BTR0_SJW_PREP(timing->sjw - 1); btr1 = CAN_SJA1000_BTR1_TSEG1_PREP(timing->phase_seg1 - 1) | CAN_SJA1000_BTR1_TSEG2_PREP(timing->phase_seg2 - 1); if ((data->common.mode & CAN_MODE_3_SAMPLES) != 0) { btr1 |= CAN_SJA1000_BTR1_SAM; } can_sja1000_write_reg(dev, CAN_SJA1000_BTR0, btr0); can_sja1000_write_reg(dev, CAN_SJA1000_BTR1, btr1); k_mutex_unlock(&data->mod_lock); return 0; } int can_sja1000_get_capabilities(const struct device *dev, can_mode_t *cap) { ARG_UNUSED(dev); *cap = CAN_MODE_NORMAL | CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY | CAN_MODE_ONE_SHOT | CAN_MODE_3_SAMPLES; if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { *cap |= CAN_MODE_MANUAL_RECOVERY; } return 0; } int can_sja1000_start(const struct device *dev) { const struct can_sja1000_config *config = dev->config; struct can_sja1000_data *data = dev->data; int err; if (data->common.started) { return -EALREADY; } if (config->common.phy != NULL) { err = can_transceiver_enable(config->common.phy, data->common.mode); if (err != 0) { LOG_ERR("failed to enable CAN transceiver (err %d)", err); return err; } } can_sja1000_clear_errors(dev); CAN_STATS_RESET(dev); err = can_sja1000_leave_reset_mode(dev); if (err != 0) { if (config->common.phy != NULL) { /* Attempt to disable the CAN transceiver in case of error */ (void)can_transceiver_disable(config->common.phy); } return err; } data->common.started = true; return 0; } int can_sja1000_stop(const struct device *dev) { const struct can_sja1000_config *config = dev->config; struct can_sja1000_data *data = dev->data; int err; if (!data->common.started) { return -EALREADY; } /* Entering reset mode aborts current transmission, if any */ err = can_sja1000_enter_reset_mode(dev); if (err != 0) { return err; } if (config->common.phy != NULL) { err = can_transceiver_disable(config->common.phy); if (err != 0) { LOG_ERR("failed to disable CAN transceiver (err %d)", err); return err; } } data->common.started = false; can_sja1000_tx_done(dev, -ENETDOWN); return 0; } int can_sja1000_set_mode(const struct device *dev, can_mode_t mode) { can_mode_t supported = CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY | CAN_MODE_ONE_SHOT | CAN_MODE_3_SAMPLES; struct can_sja1000_data *data = dev->data; uint8_t btr1; uint8_t mod; if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { supported |= CAN_MODE_MANUAL_RECOVERY; } if ((mode & ~(supported)) != 0) { LOG_ERR("unsupported mode: 0x%08x", mode); return -ENOTSUP; } if (data->common.started) { return -EBUSY; } k_mutex_lock(&data->mod_lock, K_FOREVER); mod = can_sja1000_read_reg(dev, CAN_SJA1000_MOD); mod |= CAN_SJA1000_MOD_AFM; if ((mode & CAN_MODE_LOOPBACK) != 0) { /* (Local) self test mode */ mod |= CAN_SJA1000_MOD_STM; } else { mod &= ~(CAN_SJA1000_MOD_STM); } if ((mode & CAN_MODE_LISTENONLY) != 0) { mod |= CAN_SJA1000_MOD_LOM; } else { mod &= ~(CAN_SJA1000_MOD_LOM); } btr1 = can_sja1000_read_reg(dev, CAN_SJA1000_BTR1); if ((mode & CAN_MODE_3_SAMPLES) != 0) { btr1 |= CAN_SJA1000_BTR1_SAM; } else { btr1 &= ~(CAN_SJA1000_BTR1_SAM); } can_sja1000_write_reg(dev, CAN_SJA1000_MOD, mod); can_sja1000_write_reg(dev, CAN_SJA1000_BTR1, btr1); data->common.mode = mode; k_mutex_unlock(&data->mod_lock); return 0; } static void can_sja1000_read_frame(const struct device *dev, struct can_frame *frame) { uint8_t info; int i; memset(frame, 0, sizeof(*frame)); info = can_sja1000_read_reg(dev, CAN_SJA1000_FRAME_INFO); if ((info & CAN_SJA1000_FRAME_INFO_RTR) != 0) { frame->flags |= CAN_FRAME_RTR; } frame->dlc = CAN_SJA1000_FRAME_INFO_DLC_GET(info); if (frame->dlc > CAN_MAX_DLC) { LOG_ERR("RX frame DLC %u exceeds maximum (%d)", frame->dlc, CAN_MAX_DLC); return; } if ((info & CAN_SJA1000_FRAME_INFO_FF) != 0) { frame->flags |= CAN_FRAME_IDE; frame->id = FIELD_PREP(GENMASK(28, 21), can_sja1000_read_reg(dev, CAN_SJA1000_XFF_ID1)); frame->id |= FIELD_PREP(GENMASK(20, 13), can_sja1000_read_reg(dev, CAN_SJA1000_XFF_ID2)); frame->id |= FIELD_PREP(GENMASK(12, 5), can_sja1000_read_reg(dev, CAN_SJA1000_EFF_ID3)); frame->id |= FIELD_PREP(GENMASK(4, 0), can_sja1000_read_reg(dev, CAN_SJA1000_EFF_ID4) >> 3); if ((frame->flags & CAN_FRAME_RTR) == 0U) { for (i = 0; i < frame->dlc; i++) { frame->data[i] = can_sja1000_read_reg(dev, CAN_SJA1000_EFF_DATA + i); } } } else { frame->id = FIELD_PREP(GENMASK(10, 3), can_sja1000_read_reg(dev, CAN_SJA1000_XFF_ID1)); frame->id |= FIELD_PREP(GENMASK(2, 0), can_sja1000_read_reg(dev, CAN_SJA1000_XFF_ID2) >> 5); if ((frame->flags & CAN_FRAME_RTR) == 0U) { for (i = 0; i < frame->dlc; i++) { frame->data[i] = can_sja1000_read_reg(dev, CAN_SJA1000_SFF_DATA + i); } } } } void can_sja1000_write_frame(const struct device *dev, const struct can_frame *frame) { uint8_t info; int i; info = CAN_SJA1000_FRAME_INFO_DLC_PREP(frame->dlc); if ((frame->flags & CAN_FRAME_RTR) != 0) { info |= CAN_SJA1000_FRAME_INFO_RTR; } if ((frame->flags & CAN_FRAME_IDE) != 0) { info |= CAN_SJA1000_FRAME_INFO_FF; } can_sja1000_write_reg(dev, CAN_SJA1000_FRAME_INFO, info); if ((frame->flags & CAN_FRAME_IDE) != 0) { can_sja1000_write_reg(dev, CAN_SJA1000_XFF_ID1, FIELD_GET(GENMASK(28, 21), frame->id)); can_sja1000_write_reg(dev, CAN_SJA1000_XFF_ID2, FIELD_GET(GENMASK(20, 13), frame->id)); can_sja1000_write_reg(dev, CAN_SJA1000_EFF_ID3, FIELD_GET(GENMASK(12, 5), frame->id)); can_sja1000_write_reg(dev, CAN_SJA1000_EFF_ID4, FIELD_GET(GENMASK(4, 0), frame->id) << 3); if ((frame->flags & CAN_FRAME_RTR) == 0U) { for (i = 0; i < frame->dlc; i++) { can_sja1000_write_reg(dev, CAN_SJA1000_EFF_DATA + i, frame->data[i]); } } } else { can_sja1000_write_reg(dev, CAN_SJA1000_XFF_ID1, FIELD_GET(GENMASK(10, 3), frame->id)); can_sja1000_write_reg(dev, CAN_SJA1000_XFF_ID2, FIELD_GET(GENMASK(2, 0), frame->id) << 5); if ((frame->flags & CAN_FRAME_RTR) == 0U) { for (i = 0; i < frame->dlc; i++) { can_sja1000_write_reg(dev, CAN_SJA1000_SFF_DATA + i, frame->data[i]); } } } } int can_sja1000_send(const struct device *dev, const struct can_frame *frame, k_timeout_t timeout, can_tx_callback_t callback, void *user_data) { struct can_sja1000_data *data = dev->data; uint8_t cmr; uint8_t sr; if (frame->dlc > CAN_MAX_DLC) { LOG_ERR("TX frame DLC %u exceeds maximum (%d)", frame->dlc, CAN_MAX_DLC); return -EINVAL; } if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR)) != 0) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } if (!data->common.started) { return -ENETDOWN; } if (data->state == CAN_STATE_BUS_OFF) { LOG_DBG("transmit failed, bus-off"); return -ENETUNREACH; } if (k_sem_take(&data->tx_idle, timeout) != 0) { return -EAGAIN; } sr = can_sja1000_read_reg(dev, CAN_SJA1000_SR); if ((sr & CAN_SJA1000_SR_TBS) == 0) { LOG_ERR("transmit buffer locked, sr = 0x%02x", sr); return -EIO; } data->tx_callback = callback; data->tx_user_data = user_data; can_sja1000_write_frame(dev, frame); if ((data->common.mode & CAN_MODE_LOOPBACK) != 0) { cmr = CAN_SJA1000_CMR_SRR; } else { cmr = CAN_SJA1000_CMR_TR; } if ((data->common.mode & CAN_MODE_ONE_SHOT) != 0) { cmr |= CAN_SJA1000_CMR_AT; } can_sja1000_write_reg(dev, CAN_SJA1000_CMR, cmr); return 0; } int can_sja1000_add_rx_filter(const struct device *dev, can_rx_callback_t callback, void *user_data, const struct can_filter *filter) { struct can_sja1000_data *data = dev->data; int filter_id = -ENOSPC; int i; if ((filter->flags & ~(CAN_FILTER_IDE)) != 0) { LOG_ERR("unsupported CAN filter flags 0x%02x", filter->flags); return -ENOTSUP; } for (i = 0; i < ARRAY_SIZE(data->filters); i++) { if (!atomic_test_and_set_bit(data->rx_allocs, i)) { filter_id = i; break; } } if (filter_id >= 0) { data->filters[filter_id].filter = *filter; data->filters[filter_id].user_data = user_data; data->filters[filter_id].callback = callback; } return filter_id; } void can_sja1000_remove_rx_filter(const struct device *dev, int filter_id) { struct can_sja1000_data *data = dev->data; if (filter_id < 0 || filter_id >= ARRAY_SIZE(data->filters)) { LOG_ERR("filter ID %d out of bounds", filter_id); return; } if (atomic_test_and_clear_bit(data->rx_allocs, filter_id)) { data->filters[filter_id].callback = NULL; data->filters[filter_id].user_data = NULL; data->filters[filter_id].filter = (struct can_filter){0}; } } #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE int can_sja1000_recover(const struct device *dev, k_timeout_t timeout) { struct can_sja1000_data *data = dev->data; int64_t start_ticks; uint8_t sr; int err; if (!data->common.started) { return -ENETDOWN; } if ((data->common.mode & CAN_MODE_MANUAL_RECOVERY) == 0U) { return -ENOTSUP; } sr = can_sja1000_read_reg(dev, CAN_SJA1000_SR); if ((sr & CAN_SJA1000_SR_BS) == 0) { return 0; } start_ticks = k_uptime_ticks(); err = k_mutex_lock(&data->mod_lock, timeout); if (err != 0) { LOG_WRN("failed to acquire MOD lock"); return err; } err = can_sja1000_leave_reset_mode(dev); if (err != 0) { LOG_ERR("failed to initiate bus recovery"); k_mutex_unlock(&data->mod_lock); return err; } k_mutex_unlock(&data->mod_lock); while ((sr & CAN_SJA1000_SR_BS) != 0) { if (k_uptime_ticks() - start_ticks > timeout.ticks) { LOG_WRN("bus recovery timed out"); return -EAGAIN; } sr = can_sja1000_read_reg(dev, CAN_SJA1000_SR); } return 0; } #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ int can_sja1000_get_state(const struct device *dev, enum can_state *state, struct can_bus_err_cnt *err_cnt) { struct can_sja1000_data *data = dev->data; if (state != NULL) { if (!data->common.started) { *state = CAN_STATE_STOPPED; } else { *state = data->state; } } if (err_cnt != NULL) { err_cnt->rx_err_cnt = can_sja1000_read_reg(dev, CAN_SJA1000_RXERR); err_cnt->tx_err_cnt = can_sja1000_read_reg(dev, CAN_SJA1000_TXERR); } return 0; } void can_sja1000_set_state_change_callback(const struct device *dev, can_state_change_callback_t callback, void *user_data) { struct can_sja1000_data *data = dev->data; data->common.state_change_cb = callback; data->common.state_change_cb_user_data = user_data; } int can_sja1000_get_max_filters(const struct device *dev, bool ide) { ARG_UNUSED(dev); ARG_UNUSED(ide); return CONFIG_CAN_MAX_FILTER; } static void can_sja1000_handle_receive_irq(const struct device *dev) { struct can_sja1000_data *data = dev->data; struct can_frame frame; can_rx_callback_t callback; uint8_t sr; int i; do { can_sja1000_read_frame(dev, &frame); #ifndef CONFIG_CAN_ACCEPT_RTR if ((frame.flags & CAN_FRAME_RTR) == 0U) { #endif /* !CONFIG_CAN_ACCEPT_RTR */ for (i = 0; i < ARRAY_SIZE(data->filters); i++) { if (!atomic_test_bit(data->rx_allocs, i)) { continue; } if (can_frame_matches_filter(&frame, &data->filters[i].filter)) { callback = data->filters[i].callback; if (callback != NULL) { callback(dev, &frame, data->filters[i].user_data); } } } #ifndef CONFIG_CAN_ACCEPT_RTR } #endif /* !CONFIG_CAN_ACCEPT_RTR */ can_sja1000_write_reg(dev, CAN_SJA1000_CMR, CAN_SJA1000_CMR_RRB); sr = can_sja1000_read_reg(dev, CAN_SJA1000_SR); } while ((sr & CAN_SJA1000_SR_RBS) != 0); } static void can_sja1000_handle_transmit_irq(const struct device *dev) { int status = 0; uint8_t sr; sr = can_sja1000_read_reg(dev, CAN_SJA1000_SR); if ((sr & CAN_SJA1000_SR_TCS) == 0) { status = -EIO; } can_sja1000_tx_done(dev, status); } #ifdef CONFIG_CAN_STATS static void can_sja1000_handle_data_overrun_irq(const struct device *dev) { /* See NXP SJA1000 Application Note AN97076 (figure 18) for data overrun details */ CAN_STATS_RX_OVERRUN_INC(dev); can_sja1000_write_reg(dev, CAN_SJA1000_CMR, CAN_SJA1000_CMR_CDO); } static void can_sja1000_handle_bus_error_irq(const struct device *dev) { /* See NXP SJA1000 Application Note AN97076 (tables 6 and 7) for ECC details */ uint8_t ecc; /* Read the Error Code Capture register to re-activate it */ ecc = can_sja1000_read_reg(dev, CAN_SJA1000_ECC); if (ecc == (CAN_SJA1000_ECC_ERRC_OTHER_ERROR | CAN_SJA1000_ECC_DIR_TX | CAN_SJA1000_ECC_SEG_ACK_SLOT)) { /* Missing ACK is reported as a TX "other" error in the ACK slot */ CAN_STATS_ACK_ERROR_INC(dev); return; } if (ecc == (CAN_SJA1000_ECC_ERRC_FORM_ERROR | CAN_SJA1000_ECC_DIR_RX | CAN_SJA1000_ECC_SEG_ACK_DELIM)) { /* CRC error is reported as a RX "form" error in the ACK delimiter */ CAN_STATS_CRC_ERROR_INC(dev); return; } switch (ecc & CAN_SJA1000_ECC_ERRC_MASK) { case CAN_SJA1000_ECC_ERRC_BIT_ERROR: CAN_STATS_BIT_ERROR_INC(dev); break; case CAN_SJA1000_ECC_ERRC_FORM_ERROR: CAN_STATS_FORM_ERROR_INC(dev); break; case CAN_SJA1000_ECC_ERRC_STUFF_ERROR: CAN_STATS_STUFF_ERROR_INC(dev); break; case CAN_SJA1000_ECC_ERRC_OTHER_ERROR: __fallthrough; default: /* Other error not currently reported in CAN statistics */ break; } } #endif /* CONFIG_CAN_STATS */ static void can_sja1000_handle_error_warning_irq(const struct device *dev) { struct can_sja1000_data *data = dev->data; uint8_t sr; sr = can_sja1000_read_reg(dev, CAN_SJA1000_SR); if ((sr & CAN_SJA1000_SR_BS) != 0) { data->state = CAN_STATE_BUS_OFF; can_sja1000_tx_done(dev, -ENETUNREACH); if (data->common.started && (data->common.mode & CAN_MODE_MANUAL_RECOVERY) == 0U) { can_sja1000_leave_reset_mode_nowait(dev); } } else if ((sr & CAN_SJA1000_SR_ES) != 0) { data->state = CAN_STATE_ERROR_WARNING; } else { data->state = CAN_STATE_ERROR_ACTIVE; } } static void can_sja1000_handle_error_passive_irq(const struct device *dev) { struct can_sja1000_data *data = dev->data; if (data->state == CAN_STATE_ERROR_PASSIVE) { data->state = CAN_STATE_ERROR_WARNING; } else { data->state = CAN_STATE_ERROR_PASSIVE; } } void can_sja1000_isr(const struct device *dev) { struct can_sja1000_data *data = dev->data; const can_state_change_callback_t cb = data->common.state_change_cb; void *cb_data = data->common.state_change_cb_user_data; enum can_state prev_state = data->state; struct can_bus_err_cnt err_cnt; uint8_t ir; ir = can_sja1000_read_reg(dev, CAN_SJA1000_IR); if ((ir & CAN_SJA1000_IR_TI) != 0) { can_sja1000_handle_transmit_irq(dev); } if ((ir & CAN_SJA1000_IR_RI) != 0) { can_sja1000_handle_receive_irq(dev); } #ifdef CONFIG_CAN_STATS if ((ir & CAN_SJA1000_IR_DOI) != 0) { can_sja1000_handle_data_overrun_irq(dev); } if ((ir & CAN_SJA1000_IR_BEI) != 0) { can_sja1000_handle_bus_error_irq(dev); } #endif /* CONFIG_CAN_STATS */ if ((ir & CAN_SJA1000_IR_EI) != 0) { can_sja1000_handle_error_warning_irq(dev); } if ((ir & CAN_SJA1000_IR_EPI) != 0) { can_sja1000_handle_error_passive_irq(dev); } if (prev_state != data->state && cb != NULL) { err_cnt.rx_err_cnt = can_sja1000_read_reg(dev, CAN_SJA1000_RXERR); err_cnt.tx_err_cnt = can_sja1000_read_reg(dev, CAN_SJA1000_TXERR); cb(dev, data->state, err_cnt, cb_data); } } int can_sja1000_init(const struct device *dev) { const struct can_sja1000_config *config = dev->config; struct can_sja1000_data *data = dev->data; struct can_timing timing = { 0 }; int err; __ASSERT_NO_MSG(config->read_reg != NULL); __ASSERT_NO_MSG(config->write_reg != NULL); if (config->common.phy != NULL) { if (!device_is_ready(config->common.phy)) { LOG_ERR("CAN transceiver not ready"); return -ENODEV; } } k_mutex_init(&data->mod_lock); k_sem_init(&data->tx_idle, 1, 1); data->state = CAN_STATE_ERROR_ACTIVE; /* See NXP SJA1000 Application Note AN97076 (figure 12) for initialization sequence */ /* Enter reset mode */ err = can_sja1000_enter_reset_mode(dev); if (err != 0) { return err; } /* Set PeliCAN mode */ can_sja1000_write_reg(dev, CAN_SJA1000_CDR, config->cdr | CAN_SJA1000_CDR_CAN_MODE); /* Set up acceptance code and mask to match any frame (software filtering) */ can_sja1000_write_reg(dev, CAN_SJA1000_ACR0, 0x00); can_sja1000_write_reg(dev, CAN_SJA1000_ACR1, 0x00); can_sja1000_write_reg(dev, CAN_SJA1000_ACR2, 0x00); can_sja1000_write_reg(dev, CAN_SJA1000_ACR3, 0x00); can_sja1000_write_reg(dev, CAN_SJA1000_AMR0, 0xFF); can_sja1000_write_reg(dev, CAN_SJA1000_AMR1, 0xFF); can_sja1000_write_reg(dev, CAN_SJA1000_AMR2, 0xFF); can_sja1000_write_reg(dev, CAN_SJA1000_AMR3, 0xFF); err = can_calc_timing(dev, &timing, config->common.bitrate, config->common.sample_point); if (err == -EINVAL) { LOG_ERR("bitrate/sample point cannot be met (err %d)", err); return err; } LOG_DBG("initial sample point error: %d", err); /* Configure timing */ err = can_set_timing(dev, &timing); if (err != 0) { LOG_ERR("timing parameters cannot be met (err %d)", err); return err; } /* Set output control */ can_sja1000_write_reg(dev, CAN_SJA1000_OCR, config->ocr); /* Clear error counters and error capture */ can_sja1000_clear_errors(dev); /* Set error warning limit */ can_sja1000_write_reg(dev, CAN_SJA1000_EWLR, 96); /* Set normal mode */ data->common.mode = CAN_MODE_NORMAL; err = can_sja1000_set_mode(dev, CAN_MODE_NORMAL); if (err != 0) { return err; } /* Enable interrupts */ can_sja1000_write_reg(dev, CAN_SJA1000_IER, #ifdef CONFIG_CAN_STATS CAN_SJA1000_IER_BEIE | CAN_SJA1000_IER_DOIE | #endif /* CONFIG_CAN_STATS */ CAN_SJA1000_IER_RIE | CAN_SJA1000_IER_TIE | CAN_SJA1000_IER_EIE | CAN_SJA1000_IER_EPIE); return 0; } ```
/content/code_sandbox/drivers/can/can_sja1000.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,737
```c /* * */ /* Base driver compatible */ #define DT_DRV_COMPAT nxp_flexcan /* CAN FD extension compatible */ #define FLEXCAN_FD_DRV_COMPAT nxp_flexcan_fd #include <zephyr/kernel.h> #include <zephyr/sys/atomic.h> #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/transceiver.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/device.h> #include <zephyr/sys/byteorder.h> #include <fsl_flexcan.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <zephyr/drivers/pinctrl.h> LOG_MODULE_REGISTER(can_mcux_flexcan, CONFIG_CAN_LOG_LEVEL); #if ((defined(FSL_FEATURE_FLEXCAN_HAS_ERRATA_5641) && FSL_FEATURE_FLEXCAN_HAS_ERRATA_5641) || \ (defined(FSL_FEATURE_FLEXCAN_HAS_ERRATA_5829) && FSL_FEATURE_FLEXCAN_HAS_ERRATA_5829)) /* the first valid MB should be occupied by ERRATA 5461 or 5829. */ #define RX_START_IDX 1 #else #define RX_START_IDX 0 #endif /* The maximum number of message buffers for concurrent active instances */ #ifdef CONFIG_CAN_MAX_MB #define MCUX_FLEXCAN_MAX_MB CONFIG_CAN_MAX_MB #else #define MCUX_FLEXCAN_MAX_MB FSL_FEATURE_FLEXCAN_HAS_MESSAGE_BUFFER_MAX_NUMBERn(0) #endif /* * RX message buffers (filters) will take up the first N message * buffers. The rest are available for TX use. */ #define MCUX_FLEXCAN_MAX_RX (CONFIG_CAN_MAX_FILTER + RX_START_IDX) #define MCUX_FLEXCAN_MAX_TX (MCUX_FLEXCAN_MAX_MB - MCUX_FLEXCAN_MAX_RX) /* * Convert from RX message buffer index to allocated filter ID and * vice versa. */ #define RX_MBIDX_TO_ALLOC_IDX(x) (x) #define ALLOC_IDX_TO_RXMB_IDX(x) (x) /* * Convert from TX message buffer index to allocated TX ID and vice * versa. */ #define TX_MBIDX_TO_ALLOC_IDX(x) (x - MCUX_FLEXCAN_MAX_RX) #define ALLOC_IDX_TO_TXMB_IDX(x) (x + MCUX_FLEXCAN_MAX_RX) /* Convert from back from FLEXCAN IDs to Zephyr CAN IDs. */ #define FLEXCAN_ID_TO_CAN_ID_STD(id) \ ((uint32_t)((((uint32_t)(id)) & CAN_ID_STD_MASK) >> CAN_ID_STD_SHIFT)) #define FLEXCAN_ID_TO_CAN_ID_EXT(id) \ ((uint32_t)((((uint32_t)(id)) & (CAN_ID_STD_MASK | CAN_ID_EXT_MASK)) \ >> CAN_ID_EXT_SHIFT)) struct mcux_flexcan_config { const struct can_driver_config common; CAN_Type *base; const struct device *clock_dev; clock_control_subsys_t clock_subsys; int clk_source; #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD bool flexcan_fd; #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ void (*irq_config_func)(const struct device *dev); void (*irq_enable_func)(void); void (*irq_disable_func)(void); const struct pinctrl_dev_config *pincfg; }; struct mcux_flexcan_rx_callback { flexcan_rx_mb_config_t mb_config; union { flexcan_frame_t classic; #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD flexcan_fd_frame_t fd; #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ } frame; can_rx_callback_t function; void *arg; }; struct mcux_flexcan_tx_callback { can_tx_callback_t function; void *arg; }; struct mcux_flexcan_data { struct can_driver_data common; const struct device *dev; flexcan_handle_t handle; ATOMIC_DEFINE(rx_allocs, MCUX_FLEXCAN_MAX_RX); struct k_mutex rx_mutex; struct mcux_flexcan_rx_callback rx_cbs[MCUX_FLEXCAN_MAX_RX]; ATOMIC_DEFINE(tx_allocs, MCUX_FLEXCAN_MAX_TX); struct k_sem tx_allocs_sem; struct k_mutex tx_mutex; struct mcux_flexcan_tx_callback tx_cbs[MCUX_FLEXCAN_MAX_TX]; enum can_state state; struct can_timing timing; #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD struct can_timing timing_data; #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ }; static int mcux_flexcan_get_core_clock(const struct device *dev, uint32_t *rate) { const struct mcux_flexcan_config *config = dev->config; return clock_control_get_rate(config->clock_dev, config->clock_subsys, rate); } static int mcux_flexcan_get_max_filters(const struct device *dev, bool ide) { ARG_UNUSED(ide); return CONFIG_CAN_MAX_FILTER; } static int mcux_flexcan_set_timing(const struct device *dev, const struct can_timing *timing) { struct mcux_flexcan_data *data = dev->data; if (!timing) { return -EINVAL; } if (data->common.started) { return -EBUSY; } data->timing = *timing; return 0; } #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD static int mcux_flexcan_set_timing_data(const struct device *dev, const struct can_timing *timing_data) { struct mcux_flexcan_data *data = dev->data; if (!timing_data) { return -EINVAL; } if (data->common.started) { return -EBUSY; } data->timing_data = *timing_data; return 0; } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ static int mcux_flexcan_get_capabilities(const struct device *dev, can_mode_t *cap) { __maybe_unused const struct mcux_flexcan_config *config = dev->config; *cap = CAN_MODE_NORMAL | CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY | CAN_MODE_3_SAMPLES; if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { *cap |= CAN_MODE_MANUAL_RECOVERY; } if (UTIL_AND(IS_ENABLED(CONFIG_CAN_MCUX_FLEXCAN_FD), config->flexcan_fd)) { *cap |= CAN_MODE_FD; } return 0; } static status_t mcux_flexcan_mb_start(const struct device *dev, int alloc) { const struct mcux_flexcan_config *config = dev->config; struct mcux_flexcan_data *data = dev->data; flexcan_mb_transfer_t xfer; status_t status; __ASSERT_NO_MSG(alloc >= 0 && alloc < ARRAY_SIZE(data->rx_cbs)); xfer.mbIdx = ALLOC_IDX_TO_RXMB_IDX(alloc); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if ((data->common.mode & CAN_MODE_FD) != 0U) { xfer.framefd = &data->rx_cbs[alloc].frame.fd; FLEXCAN_SetFDRxMbConfig(config->base, ALLOC_IDX_TO_RXMB_IDX(alloc), &data->rx_cbs[alloc].mb_config, true); status = FLEXCAN_TransferFDReceiveNonBlocking(config->base, &data->handle, &xfer); } else { #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ xfer.frame = &data->rx_cbs[alloc].frame.classic; FLEXCAN_SetRxMbConfig(config->base, ALLOC_IDX_TO_RXMB_IDX(alloc), &data->rx_cbs[alloc].mb_config, true); status = FLEXCAN_TransferReceiveNonBlocking(config->base, &data->handle, &xfer); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ return status; } static void mcux_flexcan_mb_stop(const struct device *dev, int alloc) { const struct mcux_flexcan_config *config = dev->config; struct mcux_flexcan_data *data = dev->data; __ASSERT_NO_MSG(alloc >= 0 && alloc < ARRAY_SIZE(data->rx_cbs)); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if ((data->common.mode & CAN_MODE_FD) != 0U) { FLEXCAN_TransferFDAbortReceive(config->base, &data->handle, ALLOC_IDX_TO_RXMB_IDX(alloc)); FLEXCAN_SetFDRxMbConfig(config->base, ALLOC_IDX_TO_RXMB_IDX(alloc), NULL, false); } else { #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ FLEXCAN_TransferAbortReceive(config->base, &data->handle, ALLOC_IDX_TO_RXMB_IDX(alloc)); FLEXCAN_SetRxMbConfig(config->base, ALLOC_IDX_TO_RXMB_IDX(alloc), NULL, false); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ } static int mcux_flexcan_start(const struct device *dev) { const struct mcux_flexcan_config *config = dev->config; struct mcux_flexcan_data *data = dev->data; flexcan_timing_config_t timing; int err; if (data->common.started) { return -EALREADY; } if (config->common.phy != NULL) { err = can_transceiver_enable(config->common.phy, data->common.mode); if (err != 0) { LOG_ERR("failed to enable CAN transceiver (err %d)", err); return err; } } /* Reset statistics and clear error counters */ CAN_STATS_RESET(dev); config->base->ECR &= ~(CAN_ECR_TXERRCNT_MASK | CAN_ECR_RXERRCNT_MASK); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD status_t status; int alloc; if (config->flexcan_fd) { /* Re-add all RX filters using current mode */ k_mutex_lock(&data->rx_mutex, K_FOREVER); for (alloc = RX_START_IDX; alloc < MCUX_FLEXCAN_MAX_RX; alloc++) { if (atomic_test_bit(data->rx_allocs, alloc)) { status = mcux_flexcan_mb_start(dev, alloc); if (status != kStatus_Success) { LOG_ERR("Failed to re-add rx filter id %d (err = %d)", alloc, status); k_mutex_unlock(&data->rx_mutex); return -EIO; } } } k_mutex_unlock(&data->rx_mutex); } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ /* Delay this until start since setting the timing automatically exits freeze mode */ timing.preDivider = data->timing.prescaler - 1U; timing.rJumpwidth = data->timing.sjw - 1U; timing.phaseSeg1 = data->timing.phase_seg1 - 1U; timing.phaseSeg2 = data->timing.phase_seg2 - 1U; timing.propSeg = data->timing.prop_seg - 1U; FLEXCAN_SetTimingConfig(config->base, &timing); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if (config->flexcan_fd) { timing.fpreDivider = data->timing_data.prescaler - 1U; timing.frJumpwidth = data->timing_data.sjw - 1U; timing.fphaseSeg1 = data->timing_data.phase_seg1 - 1U; timing.fphaseSeg2 = data->timing_data.phase_seg2 - 1U; timing.fpropSeg = data->timing_data.prop_seg; FLEXCAN_SetFDTimingConfig(config->base, &timing); FLEXCAN_EnterFreezeMode(config->base); config->base->FDCTRL &= ~(CAN_FDCTRL_TDCOFF_MASK); config->base->FDCTRL |= FIELD_PREP(CAN_FDCTRL_TDCOFF_MASK, CAN_CALC_TDCO((&data->timing_data), 1U, 31U)); FLEXCAN_ExitFreezeMode(config->base); } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ data->common.started = true; return 0; } static int mcux_flexcan_stop(const struct device *dev) { const struct mcux_flexcan_config *config = dev->config; struct mcux_flexcan_data *data = dev->data; can_tx_callback_t function; void *arg; int alloc; int err; if (!data->common.started) { return -EALREADY; } data->common.started = false; /* Abort any pending TX frames before entering freeze mode */ for (alloc = 0; alloc < MCUX_FLEXCAN_MAX_TX; alloc++) { function = data->tx_cbs[alloc].function; arg = data->tx_cbs[alloc].arg; if (atomic_test_and_clear_bit(data->tx_allocs, alloc)) { #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if ((data->common.mode & CAN_MODE_FD) != 0U) { FLEXCAN_TransferFDAbortSend(config->base, &data->handle, ALLOC_IDX_TO_TXMB_IDX(alloc)); } else { #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ FLEXCAN_TransferAbortSend(config->base, &data->handle, ALLOC_IDX_TO_TXMB_IDX(alloc)); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ function(dev, -ENETDOWN, arg); k_sem_give(&data->tx_allocs_sem); } } FLEXCAN_EnterFreezeMode(config->base); if (UTIL_AND(IS_ENABLED(CONFIG_CAN_MCUX_FLEXCAN_FD), config->flexcan_fd)) { /* * Remove all RX filters and re-add them in start() since the mode may change * between stop()/start(). */ k_mutex_lock(&data->rx_mutex, K_FOREVER); for (alloc = RX_START_IDX; alloc < MCUX_FLEXCAN_MAX_RX; alloc++) { if (atomic_test_bit(data->rx_allocs, alloc)) { mcux_flexcan_mb_stop(dev, alloc); } } k_mutex_unlock(&data->rx_mutex); } if (config->common.phy != NULL) { err = can_transceiver_disable(config->common.phy); if (err != 0) { LOG_ERR("failed to disable CAN transceiver (err %d)", err); return err; } } return 0; } static int mcux_flexcan_set_mode(const struct device *dev, can_mode_t mode) { can_mode_t supported = CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY | CAN_MODE_3_SAMPLES; const struct mcux_flexcan_config *config = dev->config; struct mcux_flexcan_data *data = dev->data; uint32_t ctrl1; uint32_t mcr; if (data->common.started) { return -EBUSY; } if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { supported |= CAN_MODE_MANUAL_RECOVERY; } if (UTIL_AND(IS_ENABLED(CONFIG_CAN_MCUX_FLEXCAN_FD), config->flexcan_fd)) { supported |= CAN_MODE_FD; } if ((mode & ~(supported)) != 0) { LOG_ERR("unsupported mode: 0x%08x", mode); return -ENOTSUP; } if ((mode & CAN_MODE_FD) != 0 && (mode & CAN_MODE_3_SAMPLES) != 0) { LOG_ERR("triple samling is not supported in CAN FD mode"); return -ENOTSUP; } ctrl1 = config->base->CTRL1; mcr = config->base->MCR; if ((mode & CAN_MODE_LOOPBACK) != 0) { /* Enable loopback and self-reception */ ctrl1 |= CAN_CTRL1_LPB_MASK; mcr &= ~(CAN_MCR_SRXDIS_MASK); } else { /* Disable loopback and self-reception */ ctrl1 &= ~(CAN_CTRL1_LPB_MASK); mcr |= CAN_MCR_SRXDIS_MASK; } if ((mode & CAN_MODE_LISTENONLY) != 0) { /* Enable listen-only mode */ ctrl1 |= CAN_CTRL1_LOM_MASK; } else { /* Disable listen-only mode */ ctrl1 &= ~(CAN_CTRL1_LOM_MASK); } if ((mode & CAN_MODE_3_SAMPLES) != 0) { /* Enable triple sampling mode */ ctrl1 |= CAN_CTRL1_SMP_MASK; } else { /* Disable triple sampling mode */ ctrl1 &= ~(CAN_CTRL1_SMP_MASK); } if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { if ((mode & CAN_MODE_MANUAL_RECOVERY) != 0) { /* Disable auto-recovery from bus-off */ ctrl1 |= CAN_CTRL1_BOFFREC_MASK; } else { /* Enable auto-recovery from bus-off */ ctrl1 &= ~(CAN_CTRL1_BOFFREC_MASK); } } #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if (config->flexcan_fd) { if ((mode & CAN_MODE_FD) != 0) { /* Enable CAN FD mode */ mcr |= CAN_MCR_FDEN_MASK; /* Transceiver Delay Compensation must be disabled in loopback mode */ if ((mode & CAN_MODE_LOOPBACK) != 0) { config->base->FDCTRL &= ~(CAN_FDCTRL_TDCEN_MASK); } else { config->base->FDCTRL |= CAN_FDCTRL_TDCEN_MASK; } } else { /* Disable CAN FD mode */ mcr &= ~(CAN_MCR_FDEN_MASK); } } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ config->base->CTRL1 = ctrl1; config->base->MCR = mcr; data->common.mode = mode; return 0; } static void mcux_flexcan_from_can_frame(const struct can_frame *src, flexcan_frame_t *dest) { memset(dest, 0, sizeof(*dest)); if ((src->flags & CAN_FRAME_IDE) != 0) { dest->format = kFLEXCAN_FrameFormatExtend; dest->id = FLEXCAN_ID_EXT(src->id); } else { dest->format = kFLEXCAN_FrameFormatStandard; dest->id = FLEXCAN_ID_STD(src->id); } if ((src->flags & CAN_FRAME_RTR) != 0) { dest->type = kFLEXCAN_FrameTypeRemote; } else { dest->type = kFLEXCAN_FrameTypeData; dest->dataWord0 = sys_cpu_to_be32(src->data_32[0]); dest->dataWord1 = sys_cpu_to_be32(src->data_32[1]); } dest->length = src->dlc; } static void mcux_flexcan_to_can_frame(const flexcan_frame_t *src, struct can_frame *dest) { memset(dest, 0, sizeof(*dest)); if (src->format == kFLEXCAN_FrameFormatStandard) { dest->id = FLEXCAN_ID_TO_CAN_ID_STD(src->id); } else { dest->flags |= CAN_FRAME_IDE; dest->id = FLEXCAN_ID_TO_CAN_ID_EXT(src->id); } if (src->type == kFLEXCAN_FrameTypeRemote) { dest->flags |= CAN_FRAME_RTR; } else { dest->data_32[0] = sys_be32_to_cpu(src->dataWord0); dest->data_32[1] = sys_be32_to_cpu(src->dataWord1); } dest->dlc = src->length; #ifdef CONFIG_CAN_RX_TIMESTAMP dest->timestamp = src->timestamp; #endif /* CAN_RX_TIMESTAMP */ } #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD static void mcux_flexcan_fd_from_can_frame(const struct can_frame *src, flexcan_fd_frame_t *dest) { int i; memset(dest, 0, sizeof(*dest)); if ((src->flags & CAN_FRAME_IDE) != 0) { dest->format = kFLEXCAN_FrameFormatExtend; dest->id = FLEXCAN_ID_EXT(src->id); } else { dest->format = kFLEXCAN_FrameFormatStandard; dest->id = FLEXCAN_ID_STD(src->id); } if ((src->flags & CAN_FRAME_RTR) != 0) { dest->type = kFLEXCAN_FrameTypeRemote; } else { dest->type = kFLEXCAN_FrameTypeData; for (i = 0; i < ARRAY_SIZE(dest->dataWord); i++) { dest->dataWord[i] = sys_cpu_to_be32(src->data_32[i]); } } if ((src->flags & CAN_FRAME_FDF) != 0) { dest->edl = 1; } if ((src->flags & CAN_FRAME_BRS) != 0) { dest->brs = 1; } dest->length = src->dlc; } static void mcux_flexcan_fd_to_can_frame(const flexcan_fd_frame_t *src, struct can_frame *dest) { int i; memset(dest, 0, sizeof(*dest)); if (src->format == kFLEXCAN_FrameFormatStandard) { dest->id = FLEXCAN_ID_TO_CAN_ID_STD(src->id); } else { dest->flags |= CAN_FRAME_IDE; dest->id = FLEXCAN_ID_TO_CAN_ID_EXT(src->id); } if (src->type == kFLEXCAN_FrameTypeRemote) { dest->flags |= CAN_FRAME_RTR; } else { for (i = 0; i < ARRAY_SIZE(dest->data_32); i++) { dest->data_32[i] = sys_be32_to_cpu(src->dataWord[i]); } } if (src->edl != 0) { dest->flags |= CAN_FRAME_FDF; } if (src->brs != 0) { dest->flags |= CAN_FRAME_BRS; } if (src->esi != 0) { dest->flags |= CAN_FRAME_ESI; } dest->dlc = src->length; #ifdef CONFIG_CAN_RX_TIMESTAMP dest->timestamp = src->timestamp; #endif /* CAN_RX_TIMESTAMP */ } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ static void mcux_flexcan_can_filter_to_mbconfig(const struct can_filter *src, flexcan_rx_mb_config_t *dest, uint32_t *mask) { static const uint32_t ide_mask = 1U; static const uint32_t rtr_mask = !IS_ENABLED(CONFIG_CAN_ACCEPT_RTR); if ((src->flags & CAN_FILTER_IDE) != 0) { dest->format = kFLEXCAN_FrameFormatExtend; dest->id = FLEXCAN_ID_EXT(src->id); *mask = FLEXCAN_RX_MB_EXT_MASK(src->mask, rtr_mask, ide_mask); } else { dest->format = kFLEXCAN_FrameFormatStandard; dest->id = FLEXCAN_ID_STD(src->id); *mask = FLEXCAN_RX_MB_STD_MASK(src->mask, rtr_mask, ide_mask); } dest->type = kFLEXCAN_FrameTypeData; } static int mcux_flexcan_get_state(const struct device *dev, enum can_state *state, struct can_bus_err_cnt *err_cnt) { const struct mcux_flexcan_config *config = dev->config; struct mcux_flexcan_data *data = dev->data; uint64_t status_flags; if (state != NULL) { if (!data->common.started) { *state = CAN_STATE_STOPPED; } else { status_flags = FLEXCAN_GetStatusFlags(config->base); if ((status_flags & CAN_ESR1_FLTCONF(2)) != 0U) { *state = CAN_STATE_BUS_OFF; } else if ((status_flags & CAN_ESR1_FLTCONF(1)) != 0U) { *state = CAN_STATE_ERROR_PASSIVE; } else if ((status_flags & (kFLEXCAN_TxErrorWarningFlag | kFLEXCAN_RxErrorWarningFlag)) != 0) { *state = CAN_STATE_ERROR_WARNING; } else { *state = CAN_STATE_ERROR_ACTIVE; } } } if (err_cnt != NULL) { FLEXCAN_GetBusErrCount(config->base, &err_cnt->tx_err_cnt, &err_cnt->rx_err_cnt); } return 0; } static int mcux_flexcan_send(const struct device *dev, const struct can_frame *frame, k_timeout_t timeout, can_tx_callback_t callback, void *user_data) { const struct mcux_flexcan_config *config = dev->config; struct mcux_flexcan_data *data = dev->data; flexcan_mb_transfer_t xfer; enum can_state state; status_t status = kStatus_Fail; uint8_t max_dlc = CAN_MAX_DLC; int alloc; if (UTIL_AND(IS_ENABLED(CONFIG_CAN_MCUX_FLEXCAN_FD), ((data->common.mode & CAN_MODE_FD) != 0U))) { if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR | CAN_FRAME_FDF | CAN_FRAME_BRS)) != 0) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } if ((frame->flags & CAN_FRAME_FDF) != 0) { max_dlc = CANFD_MAX_DLC; } } else { if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR)) != 0) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } } if (frame->dlc > max_dlc) { LOG_ERR("DLC of %d exceeds maximum (%d)", frame->dlc, max_dlc); return -EINVAL; } if (!data->common.started) { return -ENETDOWN; } (void)mcux_flexcan_get_state(dev, &state, NULL); if (state == CAN_STATE_BUS_OFF) { LOG_DBG("Transmit failed, bus-off"); return -ENETUNREACH; } if (k_sem_take(&data->tx_allocs_sem, timeout) != 0) { return -EAGAIN; } for (alloc = 0; alloc < MCUX_FLEXCAN_MAX_TX; alloc++) { if (!atomic_test_and_set_bit(data->tx_allocs, alloc)) { break; } } data->tx_cbs[alloc].function = callback; data->tx_cbs[alloc].arg = user_data; xfer.mbIdx = ALLOC_IDX_TO_TXMB_IDX(alloc); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if ((data->common.mode & CAN_MODE_FD) != 0U) { FLEXCAN_SetFDTxMbConfig(config->base, xfer.mbIdx, true); } else { #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ FLEXCAN_SetTxMbConfig(config->base, xfer.mbIdx, true); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ k_mutex_lock(&data->tx_mutex, K_FOREVER); config->irq_disable_func(); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if ((data->common.mode & CAN_MODE_FD) != 0U) { flexcan_fd_frame_t flexcan_frame; mcux_flexcan_fd_from_can_frame(frame, &flexcan_frame); xfer.framefd = &flexcan_frame; status = FLEXCAN_TransferFDSendNonBlocking(config->base, &data->handle, &xfer); } else { #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ flexcan_frame_t flexcan_frame; mcux_flexcan_from_can_frame(frame, &flexcan_frame); xfer.frame = &flexcan_frame; status = FLEXCAN_TransferSendNonBlocking(config->base, &data->handle, &xfer); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ config->irq_enable_func(); k_mutex_unlock(&data->tx_mutex); if (status != kStatus_Success) { return -EIO; } return 0; } static int mcux_flexcan_add_rx_filter(const struct device *dev, can_rx_callback_t callback, void *user_data, const struct can_filter *filter) { const struct mcux_flexcan_config *config = dev->config; struct mcux_flexcan_data *data = dev->data; status_t status; uint32_t mask; int alloc = -ENOSPC; int i; if ((filter->flags & ~(CAN_FILTER_IDE)) != 0) { LOG_ERR("unsupported CAN filter flags 0x%02x", filter->flags); return -ENOTSUP; } k_mutex_lock(&data->rx_mutex, K_FOREVER); /* Find and allocate RX message buffer */ for (i = RX_START_IDX; i < MCUX_FLEXCAN_MAX_RX; i++) { if (!atomic_test_and_set_bit(data->rx_allocs, i)) { alloc = i; break; } } if (alloc == -ENOSPC) { goto unlock; } mcux_flexcan_can_filter_to_mbconfig(filter, &data->rx_cbs[alloc].mb_config, &mask); data->rx_cbs[alloc].arg = user_data; data->rx_cbs[alloc].function = callback; /* The indidual RX mask registers can only be written in freeze mode */ FLEXCAN_EnterFreezeMode(config->base); config->base->RXIMR[ALLOC_IDX_TO_RXMB_IDX(alloc)] = mask; if (data->common.started) { FLEXCAN_ExitFreezeMode(config->base); } #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD /* Defer starting FlexCAN FD MBs unless started */ if (!config->flexcan_fd || data->common.started) { #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ status = mcux_flexcan_mb_start(dev, alloc); if (status != kStatus_Success) { LOG_ERR("Failed to start rx for filter id %d (err = %d)", alloc, status); alloc = -ENOSPC; } #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ unlock: k_mutex_unlock(&data->rx_mutex); return alloc; } static void mcux_flexcan_set_state_change_callback(const struct device *dev, can_state_change_callback_t callback, void *user_data) { struct mcux_flexcan_data *data = dev->data; data->common.state_change_cb = callback; data->common.state_change_cb_user_data = user_data; } #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE static int mcux_flexcan_recover(const struct device *dev, k_timeout_t timeout) { const struct mcux_flexcan_config *config = dev->config; struct mcux_flexcan_data *data = dev->data; enum can_state state; uint64_t start_time; int ret = 0; if (!data->common.started) { return -ENETDOWN; } if ((data->common.mode & CAN_MODE_MANUAL_RECOVERY) == 0U) { return -ENOTSUP; } (void)mcux_flexcan_get_state(dev, &state, NULL); if (state != CAN_STATE_BUS_OFF) { return 0; } start_time = k_uptime_ticks(); config->base->CTRL1 &= ~CAN_CTRL1_BOFFREC_MASK; if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { (void)mcux_flexcan_get_state(dev, &state, NULL); while (state == CAN_STATE_BUS_OFF) { if (!K_TIMEOUT_EQ(timeout, K_FOREVER) && k_uptime_ticks() - start_time >= timeout.ticks) { ret = -EAGAIN; } (void)mcux_flexcan_get_state(dev, &state, NULL); } } config->base->CTRL1 |= CAN_CTRL1_BOFFREC_MASK; return ret; } #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ static void mcux_flexcan_remove_rx_filter(const struct device *dev, int filter_id) { struct mcux_flexcan_data *data = dev->data; if (filter_id < 0 || filter_id >= MCUX_FLEXCAN_MAX_RX) { LOG_ERR("filter ID %d out of bounds", filter_id); return; } k_mutex_lock(&data->rx_mutex, K_FOREVER); if (atomic_test_and_clear_bit(data->rx_allocs, filter_id)) { #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD const struct mcux_flexcan_config *config = dev->config; /* Stop FlexCAN FD MBs unless already in stopped mode */ if (!config->flexcan_fd || data->common.started) { #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ mcux_flexcan_mb_stop(dev, filter_id); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ data->rx_cbs[filter_id].function = NULL; data->rx_cbs[filter_id].arg = NULL; } else { LOG_WRN("Filter ID %d already detached", filter_id); } k_mutex_unlock(&data->rx_mutex); } static inline void mcux_flexcan_transfer_error_status(const struct device *dev, uint64_t error) { const struct mcux_flexcan_config *config = dev->config; struct mcux_flexcan_data *data = dev->data; const can_state_change_callback_t cb = data->common.state_change_cb; void *cb_data = data->common.state_change_cb_user_data; can_tx_callback_t function; void *arg; int alloc; enum can_state state; struct can_bus_err_cnt err_cnt; if ((error & kFLEXCAN_Bit0Error) != 0U) { CAN_STATS_BIT0_ERROR_INC(dev); } if ((error & kFLEXCAN_Bit1Error) != 0U) { CAN_STATS_BIT1_ERROR_INC(dev); } if ((error & kFLEXCAN_AckError) != 0U) { CAN_STATS_ACK_ERROR_INC(dev); } if ((error & kFLEXCAN_StuffingError) != 0U) { CAN_STATS_STUFF_ERROR_INC(dev); } if ((error & kFLEXCAN_FormError) != 0U) { CAN_STATS_FORM_ERROR_INC(dev); } if ((error & kFLEXCAN_CrcError) != 0U) { CAN_STATS_CRC_ERROR_INC(dev); } (void)mcux_flexcan_get_state(dev, &state, &err_cnt); if (data->state != state) { data->state = state; if (cb != NULL) { cb(dev, state, err_cnt, cb_data); } } if (state == CAN_STATE_BUS_OFF) { /* Abort any pending TX frames in case of bus-off */ for (alloc = 0; alloc < MCUX_FLEXCAN_MAX_TX; alloc++) { /* Copy callback function and argument before clearing bit */ function = data->tx_cbs[alloc].function; arg = data->tx_cbs[alloc].arg; if (atomic_test_and_clear_bit(data->tx_allocs, alloc)) { #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if ((data->common.mode & CAN_MODE_FD) != 0U) { FLEXCAN_TransferFDAbortSend(config->base, &data->handle, ALLOC_IDX_TO_TXMB_IDX(alloc)); } else { #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ FLEXCAN_TransferAbortSend(config->base, &data->handle, ALLOC_IDX_TO_TXMB_IDX(alloc)); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ function(dev, -ENETUNREACH, arg); k_sem_give(&data->tx_allocs_sem); } } } } static inline void mcux_flexcan_transfer_tx_idle(const struct device *dev, uint32_t mb) { struct mcux_flexcan_data *data = dev->data; can_tx_callback_t function; void *arg; int alloc; alloc = TX_MBIDX_TO_ALLOC_IDX(mb); /* Copy callback function and argument before clearing bit */ function = data->tx_cbs[alloc].function; arg = data->tx_cbs[alloc].arg; if (atomic_test_and_clear_bit(data->tx_allocs, alloc)) { function(dev, 0, arg); k_sem_give(&data->tx_allocs_sem); } } static inline void mcux_flexcan_transfer_rx_idle(const struct device *dev, uint32_t mb) { const struct mcux_flexcan_config *config = dev->config; struct mcux_flexcan_data *data = dev->data; can_rx_callback_t function; flexcan_mb_transfer_t xfer; struct can_frame frame; status_t status = kStatus_Fail; void *arg; int alloc; alloc = RX_MBIDX_TO_ALLOC_IDX(mb); function = data->rx_cbs[alloc].function; arg = data->rx_cbs[alloc].arg; if (atomic_test_bit(data->rx_allocs, alloc)) { #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if ((data->common.mode & CAN_MODE_FD) != 0U) { mcux_flexcan_fd_to_can_frame(&data->rx_cbs[alloc].frame.fd, &frame); } else { #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ mcux_flexcan_to_can_frame(&data->rx_cbs[alloc].frame.classic, &frame); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ function(dev, &frame, arg); /* Setup RX message buffer to receive next message */ xfer.mbIdx = mb; #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if ((data->common.mode & CAN_MODE_FD) != 0U) { xfer.framefd = &data->rx_cbs[alloc].frame.fd; status = FLEXCAN_TransferFDReceiveNonBlocking(config->base, &data->handle, &xfer); } else { #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ xfer.frame = &data->rx_cbs[alloc].frame.classic; status = FLEXCAN_TransferReceiveNonBlocking(config->base, &data->handle, &xfer); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ if (status != kStatus_Success) { LOG_ERR("Failed to restart rx for filter id %d " "(err = %d)", alloc, status); } } } static FLEXCAN_CALLBACK(mcux_flexcan_transfer_callback) { struct mcux_flexcan_data *data = (struct mcux_flexcan_data *)userData; const struct mcux_flexcan_config *config = data->dev->config; /* * The result field can either be a MB index (which is limited to 32 bit * value) or a status flags value, which is 32 bit on some platforms but * 64 on others. To decouple the remaining functions from this, the * result field is always promoted to uint64_t. */ uint32_t mb = (uint32_t)result; uint64_t status_flags = result; ARG_UNUSED(base); switch (status) { case kStatus_FLEXCAN_UnHandled: /* Not all fault confinement state changes are handled by the HAL */ __fallthrough; case kStatus_FLEXCAN_ErrorStatus: mcux_flexcan_transfer_error_status(data->dev, status_flags); break; case kStatus_FLEXCAN_TxSwitchToRx: #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if ((data->common.mode & CAN_MODE_FD) != 0U) { FLEXCAN_TransferFDAbortReceive(config->base, &data->handle, mb); } else { #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ FLEXCAN_TransferAbortReceive(config->base, &data->handle, mb); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ __fallthrough; case kStatus_FLEXCAN_TxIdle: mcux_flexcan_transfer_tx_idle(data->dev, mb); break; case kStatus_FLEXCAN_RxOverflow: CAN_STATS_RX_OVERRUN_INC(data->dev); __fallthrough; case kStatus_Fail: /* If reading an RX MB failed mark it as idle to be reprocessed. */ __fallthrough; case kStatus_FLEXCAN_RxIdle: mcux_flexcan_transfer_rx_idle(data->dev, mb); break; default: LOG_WRN("Unhandled status 0x%08x (result = 0x%016llx)", status, status_flags); } } static void mcux_flexcan_isr(const struct device *dev) { const struct mcux_flexcan_config *config = dev->config; struct mcux_flexcan_data *data = dev->data; FLEXCAN_TransferHandleIRQ(config->base, &data->handle); } static int mcux_flexcan_init(const struct device *dev) { const struct mcux_flexcan_config *config = dev->config; struct mcux_flexcan_data *data = dev->data; flexcan_config_t flexcan_config; uint32_t clock_freq; int err; if (config->common.phy != NULL) { if (!device_is_ready(config->common.phy)) { LOG_ERR("CAN transceiver not ready"); return -ENODEV; } } if (!device_is_ready(config->clock_dev)) { LOG_ERR("clock device not ready"); return -ENODEV; } k_mutex_init(&data->rx_mutex); k_mutex_init(&data->tx_mutex); k_sem_init(&data->tx_allocs_sem, MCUX_FLEXCAN_MAX_TX, MCUX_FLEXCAN_MAX_TX); err = can_calc_timing(dev, &data->timing, config->common.bitrate, config->common.sample_point); if (err == -EINVAL) { LOG_ERR("Can't find timing for given param"); return -EIO; } LOG_DBG("Presc: %d, Seg1S1: %d, Seg2: %d", data->timing.prescaler, data->timing.phase_seg1, data->timing.phase_seg2); LOG_DBG("Sample-point err : %d", err); /* Validate initial timing parameters */ err = can_set_timing(dev, &data->timing); if (err != 0) { LOG_ERR("failed to set timing (err %d)", err); return -ENODEV; } #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if (config->flexcan_fd) { err = can_calc_timing_data(dev, &data->timing_data, config->common.bitrate_data, config->common.sample_point_data); if (err == -EINVAL) { LOG_ERR("Can't find timing for given param"); return -EIO; } LOG_DBG("Presc: %d, Seg1S1: %d, Seg2: %d", data->timing_data.prescaler, data->timing_data.phase_seg1, data->timing_data.phase_seg2); LOG_DBG("Sample-point err : %d", err); /* Validate initial data phase timing parameters */ err = can_set_timing_data(dev, &data->timing_data); if (err != 0) { LOG_ERR("failed to set data phase timing (err %d)", err); return -ENODEV; } } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err != 0) { return err; } err = mcux_flexcan_get_core_clock(dev, &clock_freq); if (err != 0) { return -EIO; } data->dev = dev; FLEXCAN_GetDefaultConfig(&flexcan_config); flexcan_config.maxMbNum = MCUX_FLEXCAN_MAX_MB; flexcan_config.clkSrc = config->clk_source; flexcan_config.baudRate = clock_freq / (1U + data->timing.prop_seg + data->timing.phase_seg1 + data->timing.phase_seg2) / data->timing.prescaler; #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if (config->flexcan_fd) { flexcan_config.baudRateFD = clock_freq / (1U + data->timing_data.prop_seg + data->timing_data.phase_seg1 + data->timing_data.phase_seg2) / data->timing_data.prescaler; } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ flexcan_config.enableIndividMask = true; flexcan_config.enableLoopBack = false; flexcan_config.disableSelfReception = true; /* Initialize in listen-only mode since FLEXCAN_{FD}Init() exits freeze mode */ flexcan_config.enableListenOnlyMode = true; flexcan_config.timingConfig.rJumpwidth = data->timing.sjw - 1U; flexcan_config.timingConfig.propSeg = data->timing.prop_seg - 1U; flexcan_config.timingConfig.phaseSeg1 = data->timing.phase_seg1 - 1U; flexcan_config.timingConfig.phaseSeg2 = data->timing.phase_seg2 - 1U; #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD if (config->flexcan_fd) { flexcan_config.timingConfig.frJumpwidth = data->timing_data.sjw - 1U; flexcan_config.timingConfig.fpropSeg = data->timing_data.prop_seg; flexcan_config.timingConfig.fphaseSeg1 = data->timing_data.phase_seg1 - 1U; flexcan_config.timingConfig.fphaseSeg2 = data->timing_data.phase_seg2 - 1U; FLEXCAN_FDInit(config->base, &flexcan_config, clock_freq, kFLEXCAN_64BperMB, true); } else { #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ FLEXCAN_Init(config->base, &flexcan_config, clock_freq); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD } #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ FLEXCAN_TransferCreateHandle(config->base, &data->handle, mcux_flexcan_transfer_callback, data); /* Manually enter freeze mode, set normal mode, and clear error counters */ FLEXCAN_EnterFreezeMode(config->base); (void)mcux_flexcan_set_mode(dev, CAN_MODE_NORMAL); config->base->ECR &= ~(CAN_ECR_TXERRCNT_MASK | CAN_ECR_RXERRCNT_MASK); config->irq_config_func(dev); /* Enable auto-recovery from bus-off */ config->base->CTRL1 &= ~(CAN_CTRL1_BOFFREC_MASK); (void)mcux_flexcan_get_state(dev, &data->state, NULL); return 0; } __maybe_unused static const struct can_driver_api mcux_flexcan_driver_api = { .get_capabilities = mcux_flexcan_get_capabilities, .start = mcux_flexcan_start, .stop = mcux_flexcan_stop, .set_mode = mcux_flexcan_set_mode, .set_timing = mcux_flexcan_set_timing, .send = mcux_flexcan_send, .add_rx_filter = mcux_flexcan_add_rx_filter, .remove_rx_filter = mcux_flexcan_remove_rx_filter, .get_state = mcux_flexcan_get_state, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = mcux_flexcan_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .set_state_change_callback = mcux_flexcan_set_state_change_callback, .get_core_clock = mcux_flexcan_get_core_clock, .get_max_filters = mcux_flexcan_get_max_filters, /* * FlexCAN timing limits are specified in the "FLEXCANx_CTRL1 field * descriptions" table in the SoC reference manual. * * Note that the values here are the "physical" timing limits, whereas * the register field limits are physical values minus 1 (which is * handled by the flexcan_timing_config_t field assignments elsewhere * in this driver). */ .timing_min = { .sjw = 0x01, .prop_seg = 0x01, .phase_seg1 = 0x01, .phase_seg2 = 0x02, .prescaler = 0x01 }, .timing_max = { .sjw = 0x04, .prop_seg = 0x08, .phase_seg1 = 0x08, .phase_seg2 = 0x08, .prescaler = 0x100 } }; #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD static const struct can_driver_api mcux_flexcan_fd_driver_api = { .get_capabilities = mcux_flexcan_get_capabilities, .start = mcux_flexcan_start, .stop = mcux_flexcan_stop, .set_mode = mcux_flexcan_set_mode, .set_timing = mcux_flexcan_set_timing, .set_timing_data = mcux_flexcan_set_timing_data, .send = mcux_flexcan_send, .add_rx_filter = mcux_flexcan_add_rx_filter, .remove_rx_filter = mcux_flexcan_remove_rx_filter, .get_state = mcux_flexcan_get_state, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = mcux_flexcan_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .set_state_change_callback = mcux_flexcan_set_state_change_callback, .get_core_clock = mcux_flexcan_get_core_clock, .get_max_filters = mcux_flexcan_get_max_filters, /* * FlexCAN FD timing limits are specified in the "CAN Bit Timing * Register (CBT)" and "CAN FD Bit Timing Register" field description * tables in the SoC reference manual. * * Note that the values here are the "physical" timing limits, whereas * the register field limits are physical values minus 1 (which is * handled by the flexcan_timing_config_t field assignments elsewhere * in this driver). The exception to this are the prop_seg values for * the data phase, which correspond to the allowed register values. */ .timing_min = { .sjw = 0x01, .prop_seg = 0x01, .phase_seg1 = 0x01, .phase_seg2 = 0x02, .prescaler = 0x01 }, .timing_max = { .sjw = 0x20, .prop_seg = 0x40, .phase_seg1 = 0x20, .phase_seg2 = 0x20, .prescaler = 0x400 }, .timing_data_min = { .sjw = 0x01, .prop_seg = 0x01, .phase_seg1 = 0x01, .phase_seg2 = 0x02, .prescaler = 0x01 }, .timing_data_max = { .sjw = 0x08, .prop_seg = 0x1f, .phase_seg1 = 0x08, .phase_seg2 = 0x08, .prescaler = 0x400 }, }; #endif /* CONFIG_CAN_MCUX_FLEXCAN_FD */ #define FLEXCAN_IRQ_BY_IDX(node_id, prop, idx, cell) \ DT_IRQ_BY_NAME(node_id, \ DT_STRING_TOKEN_BY_IDX(node_id, prop, idx), cell) #define FLEXCAN_IRQ_ENABLE_CODE(node_id, prop, idx) \ irq_enable(FLEXCAN_IRQ_BY_IDX(node_id, prop, idx, irq)); #define FLEXCAN_IRQ_DISABLE_CODE(node_id, prop, idx) \ irq_disable(FLEXCAN_IRQ_BY_IDX(node_id, prop, idx, irq)); #define FLEXCAN_IRQ_CONFIG_CODE(node_id, prop, idx) \ do { \ IRQ_CONNECT(FLEXCAN_IRQ_BY_IDX(node_id, prop, idx, irq), \ FLEXCAN_IRQ_BY_IDX(node_id, prop, idx, priority), \ mcux_flexcan_isr, \ DEVICE_DT_GET(node_id), 0); \ FLEXCAN_IRQ_ENABLE_CODE(node_id, prop, idx); \ } while (false); #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD #define FLEXCAN_MAX_BITRATE(id) \ COND_CODE_1(DT_INST_NODE_HAS_COMPAT(id, FLEXCAN_FD_DRV_COMPAT), \ (8000000), (1000000)) #else /* CONFIG_CAN_MCUX_FLEXCAN_FD */ #define FLEXCAN_MAX_BITRATE(id) 1000000 #endif /* !CONFIG_CAN_MCUX_FLEXCAN_FD */ #ifdef CONFIG_CAN_MCUX_FLEXCAN_FD #define FLEXCAN_DRIVER_API(id) \ COND_CODE_1(DT_INST_NODE_HAS_COMPAT(id, FLEXCAN_FD_DRV_COMPAT), \ (mcux_flexcan_fd_driver_api), \ (mcux_flexcan_driver_api)) #else /* CONFIG_CAN_MCUX_FLEXCAN_FD */ #define FLEXCAN_DRIVER_API(id) mcux_flexcan_driver_api #endif /* !CONFIG_CAN_MCUX_FLEXCAN_FD */ #define FLEXCAN_DEVICE_INIT_MCUX(id) \ PINCTRL_DT_INST_DEFINE(id); \ \ static void mcux_flexcan_irq_config_##id(const struct device *dev); \ static void mcux_flexcan_irq_enable_##id(void); \ static void mcux_flexcan_irq_disable_##id(void); \ \ static const struct mcux_flexcan_config mcux_flexcan_config_##id = { \ .common = CAN_DT_DRIVER_CONFIG_INST_GET(id, 0, FLEXCAN_MAX_BITRATE(id)), \ .base = (CAN_Type *)DT_INST_REG_ADDR(id), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(id)), \ .clock_subsys = (clock_control_subsys_t) \ DT_INST_CLOCKS_CELL(id, name), \ .clk_source = DT_INST_PROP(id, clk_source), \ IF_ENABLED(CONFIG_CAN_MCUX_FLEXCAN_FD, ( \ .flexcan_fd = DT_INST_NODE_HAS_COMPAT(id, FLEXCAN_FD_DRV_COMPAT), \ )) \ .irq_config_func = mcux_flexcan_irq_config_##id, \ .irq_enable_func = mcux_flexcan_irq_enable_##id, \ .irq_disable_func = mcux_flexcan_irq_disable_##id, \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \ }; \ \ static struct mcux_flexcan_data mcux_flexcan_data_##id; \ \ CAN_DEVICE_DT_INST_DEFINE(id, mcux_flexcan_init, \ NULL, &mcux_flexcan_data_##id, \ &mcux_flexcan_config_##id, \ POST_KERNEL, CONFIG_CAN_INIT_PRIORITY,\ &FLEXCAN_DRIVER_API(id)); \ \ static void mcux_flexcan_irq_config_##id(const struct device *dev) \ { \ DT_INST_FOREACH_PROP_ELEM(id, interrupt_names, FLEXCAN_IRQ_CONFIG_CODE); \ } \ \ static void mcux_flexcan_irq_enable_##id(void) \ { \ DT_INST_FOREACH_PROP_ELEM(id, interrupt_names, FLEXCAN_IRQ_ENABLE_CODE); \ } \ \ static void mcux_flexcan_irq_disable_##id(void) \ { \ DT_INST_FOREACH_PROP_ELEM(id, interrupt_names, FLEXCAN_IRQ_DISABLE_CODE); \ } DT_INST_FOREACH_STATUS_OKAY(FLEXCAN_DEVICE_INIT_MCUX) ```
/content/code_sandbox/drivers/can/can_mcux_flexcan.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
12,435
```c /* * */ #include <zephyr/device.h> #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/can_mcan.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/spi.h> #include <zephyr/logging/log.h> #include <zephyr/sys/byteorder.h> #include <zephyr/sys/util.h> LOG_MODULE_REGISTER(can_tcan4x5x, CONFIG_CAN_LOG_LEVEL); #define DT_DRV_COMPAT ti_tcan4x5x /* * The register definitions correspond to those found in the TI TCAN4550-Q1 datasheet, revision D * June 2022 (SLLSEZ5D). */ /* Device ID1 register */ #define CAN_TCAN4X5X_DEVICE_ID1 0x0000 /* Device ID2 register */ #define CAN_TCAN4X5X_DEVICE_ID2 0x0004 /* Revision register */ #define CAN_TCAN4X5X_REVISION 0x0008 #define CAN_TCAN4X5X_REVISION_SPI_2_REVISION GENMASK(31, 24) #define CAN_TCAN4X5X_REVISION_REV_ID_MAJOR GENMASK(15, 8) #define CAN_TCAN4X5X_REVISION_REV_ID_MINOR GENMASK(7, 0) /* Status register */ #define CAN_TCAN4X5X_STATUS 0x000c #define CAN_TCAN4X5X_STATUS_INTERNAL_READ_ERROR BIT(29) #define CAN_TCAN4X5X_STATUS_INTERNAL_WRITE_ERROR BIT(28) #define CAN_TCAN4X5X_STATUS_INTERNAL_ERROR_LOG_WRITE BIT(27) #define CAN_TCAN4X5X_STATUS_READ_FIFO_UNDERFLOW BIT(26) #define CAN_TCAN4X5X_STATUS_READ_FIFO_EMPTY BIT(25) #define CAN_TCAN4X5X_STATUS_WRITE_FIFO_OVERFLOW BIT(24) #define CAN_TCAN4X5X_STATUS_SPI_END_ERROR BIT(21) #define CAN_TCAN4X5X_STATUS_INVALID_COMMAND BIT(20) #define CAN_TCAN4X5X_STATUS_WRITE_OVERFLOW BIT(19) #define CAN_TCAN4X5X_STATUS_WRITE_UNDERFLOW BIT(18) #define CAN_TCAN4X5X_STATUS_READ_OVERFLOW BIT(17) #define CAN_TCAN4X5X_STATUS_READ_UNDERFLOW BIT(16) #define CAN_TCAN4X5X_STATUS_WRITE_FIFO_AVAILABLE BIT(5) #define CAN_TCAN4X5X_STATUS_READ_FIFO_AVAILABLE BIT(4) #define CAN_TCAN4X5X_STATUS_INTERNAL_ACCESS_ACTIVE BIT(3) #define CAN_TCAN4X5X_STATUS_INTERNAL_ERROR_INTERRUPT BIT(2) #define CAN_TCAN4X5X_STATUS_SPI_ERROR_INTERRUPT BIT(1) #define CAN_TCAN4X5X_STATUS_INTERRUPT BIT(0) /* Mask of clearable status register bits */ #define CAN_TCAN4X5X_STATUS_CLEAR_ALL \ (CAN_TCAN4X5X_STATUS_INTERNAL_READ_ERROR | CAN_TCAN4X5X_STATUS_INTERNAL_WRITE_ERROR | \ CAN_TCAN4X5X_STATUS_INTERNAL_ERROR_LOG_WRITE | CAN_TCAN4X5X_STATUS_READ_FIFO_UNDERFLOW | \ CAN_TCAN4X5X_STATUS_READ_FIFO_EMPTY | CAN_TCAN4X5X_STATUS_WRITE_FIFO_OVERFLOW | \ CAN_TCAN4X5X_STATUS_SPI_END_ERROR | CAN_TCAN4X5X_STATUS_INVALID_COMMAND | \ CAN_TCAN4X5X_STATUS_WRITE_OVERFLOW | CAN_TCAN4X5X_STATUS_WRITE_UNDERFLOW | \ CAN_TCAN4X5X_STATUS_READ_OVERFLOW | CAN_TCAN4X5X_STATUS_READ_UNDERFLOW) /* SPI Error Status Mask register */ #define CAN_TCAN4X5X_SPI_ERROR_STATUS_MASK 0x0010 #define CAN_TCAN4X5X_SPI_ERROR_STATUS_MASK_INTERNAL_READ_ERROR BIT(29) #define CAN_TCAN4X5X_SPI_ERROR_STATUS_MASK_INTERNAL_WRITE_ERROR BIT(28) #define CAN_TCAN4X5X_SPI_ERROR_STATUS_MASK_INTERNAL_ERROR_LOG_WRITE BIT(27) #define CAN_TCAN4X5X_SPI_ERROR_STATUS_MASK_READ_FIFO_UNDERFLOW BIT(26) #define CAN_TCAN4X5X_SPI_ERROR_STATUS_MASK_READ_FIFO_EMPTY BIT(25) #define CAN_TCAN4X5X_SPI_ERROR_STATUS_MASK_WRITE_FIFO_OVERFLOW BIT(24) #define CAN_TCAN4X5X_SPI_ERROR_STATUS_MASK_SPI_END_ERROR BIT(21) #define CAN_TCAN4X5X_SPI_ERROR_STATUS_MASK_INVALID_COMMAND BIT(20) #define CAN_TCAN4X5X_SPI_ERROR_STATUS_MASK_WRITE_OVERFLOW BIT(19) #define CAN_TCAN4X5X_SPI_ERROR_STATUS_MASK_WRITE_UNDERFLOW BIT(18) #define CAN_TCAN4X5X_SPI_ERROR_STATUS_MASK_READ_OVERFLOW BIT(17) #define CAN_TCAN4X5X_SPI_ERROR_STATUS_MASK_READ_UNDERFLOW BIT(16) /* Modes of Operation and Pin Configurations register */ #define CAN_TCAN4X5X_MODE_CONFIG 0x0800 #define CAN_TCAN4X5X_MODE_CONFIG_WAKE_CONFIG GENMASK(31, 30) #define CAN_TCAN4X5X_MODE_CONFIG_WD_TIMER GENMASK(29, 28) #define CAN_TCAN4X5X_MODE_CONFIG_CLK_REF BIT(27) #define CAN_TCAN4X5X_MODE_CONFIG_GPO2_CONFIG GENMASK(23, 22) #define CAN_TCAN4X5X_MODE_CONFIG_TEST_MODE_EN BIT(21) #define CAN_TCAN4X5X_MODE_CONFIG_NWKRQ_VOLTAGE BIT(19) #define CAN_TCAN4X5X_MODE_CONFIG_WD_BIT_SET BIT(18) #define CAN_TCAN4X5X_MODE_CONFIG_WD_ACTION GENMASK(17, 16) #define CAN_TCAN4X5X_MODE_CONFIG_GPIO1_CONFIG GENMASK(15, 14) #define CAN_TCAN4X5X_MODE_CONFIG_FAIL_SAFE_EN BIT(13) #define CAN_TCAN4X5X_MODE_CONFIG_GPIO1_GPO_CONFIG GENMASK(11, 10) #define CAN_TCAN4X5X_MODE_CONFIG_INH_DIS BIT(9) #define CAN_TCAN4X5X_MODE_CONFIG_NWKRQ_CONFIG BIT(8) #define CAN_TCAN4X5X_MODE_CONFIG_MODE_SEL GENMASK(7, 6) #define CAN_TCAN4X5X_MODE_CONFIG_WD_EN BIT(3) #define CAN_TCAN4X5X_MODE_CONFIG_DEVICE_RESET BIT(2) #define CAN_TCAN4X5X_MODE_CONFIG_SWE_DIS BIT(1) #define CAN_TCAN4X5X_MODE_CONFIG_TEST_MODE_CONFIG BIT(0) /* Timestamp Prescaler register */ #define CAN_TCAN4X5X_TIMESTAMP_PRESCALER 0x0804 #define CAN_TCAN4X5X_TIMESTAMP_PRESCALER_MASK GENMASK(7, 0) /* Test Register and Scratch Pad */ #define CAN_TCAN4X5X_TEST_SCRATCH_PAD 0x0808 #define CAN_TCAN4X5X_TEST_SCRATCH_PAD_READ_WRITE GENMASK(31, 16) #define CAN_TCAN4X5X_TEST_SCRATCH_PAD_SCRATCH_PAD GENMASK(15, 0) /* Test register */ #define CAN_TCAN4X5X_TEST 0x0808 #define CAN_TCAN4X5X_TEST_ECC_ERR_FORCE_BIT_SEL GENMASK(21, 16) #define CAN_TCAN4X5X_TEST_ECC_ERR_FORCE BIT(12) #define CAN_TCAN4X5X_TEST_ECC_ERR_CHECK BIT(11) /* Interrupts register */ #define CAN_TCAN4X5X_IR 0x0820 #define CAN_TCAN4X5X_IR_CANBUSNOM BIT(31) #define CAN_TCAN4X5X_IR_SMS BIT(23) #define CAN_TCAN4X5X_IR_UVSUP BIT(22) #define CAN_TCAN4X5X_IR_UVIO BIT(21) #define CAN_TCAN4X5X_IR_PWRON BIT(20) #define CAN_TCAN4X5X_IR_TSD BIT(19) #define CAN_TCAN4X5X_IR_WDTO BIT(18) #define CAN_TCAN4X5X_IR_ECCERR BIT(16) #define CAN_TCAN4X5X_IR_CANINT BIT(15) #define CAN_TCAN4X5X_IR_LWU BIT(14) #define CAN_TCAN4X5X_IR_WKERR BIT(13) #define CAN_TCAN4X5X_IR_CANSLNT BIT(10) #define CAN_TCAN4X5X_IR_CANDOM BIT(8) #define CAN_TCAN4X5X_IR_GLOBALERR BIT(7) #define CAN_TCAN4X5X_IR_WKRQ BIT(6) #define CAN_TCAN4X5X_IR_CANERR BIT(5) #define CAN_TCAN4X5X_IR_SPIERR BIT(3) #define CAN_TCAN4X5X_IR_M_CAN_INT BIT(1) #define CAN_TCAN4X5X_IR_VTWD BIT(0) /* Mask of clearable interrupts register bits */ #define CAN_TCAN4X5X_IR_CLEAR_ALL \ (CAN_TCAN4X5X_IR_SMS | CAN_TCAN4X5X_IR_UVSUP | CAN_TCAN4X5X_IR_UVIO | \ CAN_TCAN4X5X_IR_PWRON | CAN_TCAN4X5X_IR_TSD | CAN_TCAN4X5X_IR_WDTO | \ CAN_TCAN4X5X_IR_ECCERR | CAN_TCAN4X5X_IR_CANINT | CAN_TCAN4X5X_IR_LWU | \ CAN_TCAN4X5X_IR_WKERR | CAN_TCAN4X5X_IR_CANSLNT | CAN_TCAN4X5X_IR_CANDOM) /* MCAN Interrupts register */ #define CAN_TCAN4X5X_MCAN_IR 0x0824 #define CAN_TCAN4X5X_MCAN_IR_ARA BIT(29) #define CAN_TCAN4X5X_MCAN_IR_PED BIT(28) #define CAN_TCAN4X5X_MCAN_IR_PEA BIT(27) #define CAN_TCAN4X5X_MCAN_IR_WDI BIT(26) #define CAN_TCAN4X5X_MCAN_IR_BO BIT(25) #define CAN_TCAN4X5X_MCAN_IR_EW BIT(24) #define CAN_TCAN4X5X_MCAN_IR_EP BIT(23) #define CAN_TCAN4X5X_MCAN_IR_ELO BIT(22) #define CAN_TCAN4X5X_MCAN_IR_BEU BIT(21) #define CAN_TCAN4X5X_MCAN_IR_BEC BIT(20) #define CAN_TCAN4X5X_MCAN_IR_DRX BIT(19) #define CAN_TCAN4X5X_MCAN_IR_TOO BIT(18) #define CAN_TCAN4X5X_MCAN_IR_MRAF BIT(17) #define CAN_TCAN4X5X_MCAN_IR_TSW BIT(16) #define CAN_TCAN4X5X_MCAN_IR_TEFL BIT(15) #define CAN_TCAN4X5X_MCAN_IR_TEFF BIT(14) #define CAN_TCAN4X5X_MCAN_IR_TEFW BIT(13) #define CAN_TCAN4X5X_MCAN_IR_TEFN BIT(12) #define CAN_TCAN4X5X_MCAN_IR_TFE BIT(11) #define CAN_TCAN4X5X_MCAN_IR_TCF BIT(10) #define CAN_TCAN4X5X_MCAN_IR_TC BIT(9) #define CAN_TCAN4X5X_MCAN_IR_HPM BIT(8) #define CAN_TCAN4X5X_MCAN_IR_RF1L BIT(7) #define CAN_TCAN4X5X_MCAN_IR_RF1F BIT(6) #define CAN_TCAN4X5X_MCAN_IR_RF1W BIT(5) #define CAN_TCAN4X5X_MCAN_IR_RF1N BIT(4) #define CAN_TCAN4X5X_MCAN_IR_RF0L BIT(3) #define CAN_TCAN4X5X_MCAN_IR_RF0F BIT(2) #define CAN_TCAN4X5X_MCAN_IR_RF0W BIT(1) #define CAN_TCAN4X5X_MCAN_IR_RF0N BIT(0) /* Interrupt Enables register */ #define CAN_TCAN4X5X_IE 0x0830 #define CAN_TCAN4X5X_IE_UVSUP BIT(22) #define CAN_TCAN4X5X_IE_UVIO BIT(21) #define CAN_TCAN4X5X_IE_TSD BIT(19) #define CAN_TCAN4X5X_IE_ECCERR BIT(16) #define CAN_TCAN4X5X_IE_CANINT BIT(15) #define CAN_TCAN4X5X_IE_LWU BIT(14) #define CAN_TCAN4X5X_IE_CANSLNT BIT(10) #define CAN_TCAN4X5X_IE_CANDOM BIT(8) /* Bosch M_CAN registers base address */ #define CAN_TCAN4X5X_MCAN_BASE 0x1000 /* Bosch M_CAN Message RAM base address and size */ #define CAN_TCAN4X5X_MRAM_BASE 0x8000 #define CAN_TCAN4X5X_MRAM_SIZE 2048 /* TCAN4x5x SPI OP codes */ #define CAN_TCAN4X5X_WRITE_B_FL 0x61 #define CAN_TCAN4X5X_READ_B_FL 0x41 /* TCAN4x5x timing requirements */ #define CAN_TCAN4X5X_T_MODE_STBY_NOM_US 70 #define CAN_TCAN4X5X_T_WAKE_US 50 #define CAN_TCAN4X5X_T_PULSE_WIDTH_US 30 #define CAN_TCAN4X5X_T_RESET_US 1000 /* * Only compile in support for the optional GPIOs if at least one enabled tcan4x5x device tree node * has them. Only the INT GPIO is required. */ #define TCAN4X5X_RST_GPIO_SUPPORT DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) #define TCAN4X5X_NWKRQ_GPIO_SUPPORT DT_ANY_INST_HAS_PROP_STATUS_OKAY(device_state_gpios) #define TCAN4X5X_WAKE_GPIO_SUPPORT DT_ANY_INST_HAS_PROP_STATUS_OKAY(device_wake_gpios) struct tcan4x5x_config { struct spi_dt_spec spi; #if TCAN4X5X_RST_GPIO_SUPPORT struct gpio_dt_spec rst_gpio; #endif /* TCAN4X5X_RST_GPIO_SUPPORT */ #if TCAN4X5X_NWKRQ_GPIO_SUPPORT struct gpio_dt_spec nwkrq_gpio; #endif /* TCAN4X5X_NWKRQ_GPIO_SUPPORT */ #if TCAN4X5X_WAKE_GPIO_SUPPORT struct gpio_dt_spec wake_gpio; #endif /* TCAN4X5X_WAKE_GPIO_SUPPORT */ struct gpio_dt_spec int_gpio; uint32_t clk_freq; }; struct tcan4x5x_data { struct gpio_callback int_gpio_cb; struct k_thread int_thread; struct k_sem int_sem; K_KERNEL_STACK_MEMBER(int_stack, CONFIG_CAN_TCAN4X5X_THREAD_STACK_SIZE); }; static int tcan4x5x_read(const struct device *dev, uint16_t addr, void *dst, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct tcan4x5x_config *tcan_config = mcan_config->custom; size_t len32 = len / sizeof(uint32_t); uint32_t *dst32 = (uint32_t *)dst; uint8_t cmd[4] = {CAN_TCAN4X5X_READ_B_FL, addr >> 8U & 0xFF, addr & 0xFF, len32 == 256 ? 0U : len32}; uint8_t global_status; const struct spi_buf tx_bufs[] = { {.buf = &cmd, .len = sizeof(cmd)}, }; const struct spi_buf rx_bufs[] = { {.buf = &global_status, .len = sizeof(global_status)}, {.buf = NULL, .len = 3}, {.buf = dst, .len = len}, }; const struct spi_buf_set tx = { .buffers = tx_bufs, .count = ARRAY_SIZE(tx_bufs), }; const struct spi_buf_set rx = { .buffers = rx_bufs, .count = ARRAY_SIZE(rx_bufs), }; int err; int i; if (len == 0) { return 0; } /* Maximum transfer size is 256 32-bit words */ __ASSERT_NO_MSG(len % 4 == 0); __ASSERT_NO_MSG(len32 <= 256); err = spi_transceive_dt(&tcan_config->spi, &tx, &rx); if (err != 0) { LOG_ERR("failed to read addr %u, len %d (err %d)", addr, len, err); return err; } __ASSERT_NO_MSG((global_status & CAN_TCAN4X5X_IR_SPIERR) == 0U); for (i = 0; i < len32; i++) { dst32[i] = sys_be32_to_cpu(dst32[i]); } return 0; } static int tcan4x5x_write(const struct device *dev, uint16_t addr, const void *src, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct tcan4x5x_config *tcan_config = mcan_config->custom; size_t len32 = len / sizeof(uint32_t); uint32_t src32[len32]; uint8_t cmd[4] = {CAN_TCAN4X5X_WRITE_B_FL, addr >> 8U & 0xFF, addr & 0xFF, len32 == 256 ? 0U : len32}; uint8_t global_status; const struct spi_buf tx_bufs[] = { {.buf = &cmd, .len = sizeof(cmd)}, {.buf = &src32, .len = len}, }; const struct spi_buf rx_bufs[] = { {.buf = &global_status, .len = sizeof(global_status)}, }; const struct spi_buf_set tx = { .buffers = tx_bufs, .count = ARRAY_SIZE(tx_bufs), }; const struct spi_buf_set rx = { .buffers = rx_bufs, .count = ARRAY_SIZE(rx_bufs), }; int err; int i; if (len == 0) { return 0; } /* Maximum transfer size is 256 32-bit words */ __ASSERT_NO_MSG(len % 4 == 0); __ASSERT_NO_MSG(len32 <= 256); for (i = 0; i < len32; i++) { src32[i] = sys_cpu_to_be32(((uint32_t *)src)[i]); } err = spi_transceive_dt(&tcan_config->spi, &tx, &rx); if (err != 0) { LOG_ERR("failed to write addr %u, len %d (err %d)", addr, len, err); return err; } __ASSERT_NO_MSG((global_status & CAN_TCAN4X5X_IR_SPIERR) == 0U); return 0; } static inline int tcan4x5x_read_tcan_reg(const struct device *dev, uint16_t reg, uint32_t *val) { return tcan4x5x_read(dev, reg, val, sizeof(uint32_t)); } static inline int tcan4x5x_write_tcan_reg(const struct device *dev, uint16_t reg, uint32_t val) { return tcan4x5x_write(dev, reg, &val, sizeof(uint32_t)); } static int tcan4x5x_read_mcan_reg(const struct device *dev, uint16_t reg, uint32_t *val) { return tcan4x5x_read(dev, CAN_TCAN4X5X_MCAN_BASE + reg, val, sizeof(uint32_t)); } static int tcan4x5x_write_mcan_reg(const struct device *dev, uint16_t reg, uint32_t val) { return tcan4x5x_write(dev, CAN_TCAN4X5X_MCAN_BASE + reg, &val, sizeof(uint32_t)); } static int tcan4x5x_read_mcan_mram(const struct device *dev, uint16_t offset, void *dst, size_t len) { return tcan4x5x_read(dev, CAN_TCAN4X5X_MRAM_BASE + offset, dst, len); } static int tcan4x5x_write_mcan_mram(const struct device *dev, uint16_t offset, const void *src, size_t len) { return tcan4x5x_write(dev, CAN_TCAN4X5X_MRAM_BASE + offset, src, len); } static int tcan4x5x_clear_mcan_mram(const struct device *dev, uint16_t offset, size_t len) { static const uint8_t buf[256] = {0}; size_t pending; size_t upto; int err; for (upto = 0; upto < len; upto += pending) { pending = MIN(len - upto, sizeof(buf)); err = tcan4x5x_write_mcan_mram(dev, offset, &buf, pending); if (err != 0) { LOG_ERR("failed to clear message RAM (err %d)", err); return err; } offset += pending; } return 0; } static int tcan4x5x_get_core_clock(const struct device *dev, uint32_t *rate) { const struct can_mcan_config *mcan_config = dev->config; const struct tcan4x5x_config *tcan_config = mcan_config->custom; *rate = tcan_config->clk_freq; return 0; } static void tcan4x5x_int_gpio_callback_handler(const struct device *port, struct gpio_callback *cb, gpio_port_pins_t pins) { struct tcan4x5x_data *tcan_data = CONTAINER_OF(cb, struct tcan4x5x_data, int_gpio_cb); k_sem_give(&tcan_data->int_sem); } static void tcan4x5x_int_thread(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); const struct device *dev = p1; struct can_mcan_data *mcan_data = dev->data; struct tcan4x5x_data *tcan_data = mcan_data->custom; uint32_t status; uint32_t ir; int err; while (true) { k_sem_take(&tcan_data->int_sem, K_FOREVER); err = tcan4x5x_read_tcan_reg(dev, CAN_TCAN4X5X_IR, &ir); if (err != 0) { LOG_ERR("failed to read interrupt register (err %d)", err); continue; } while (ir != 0U) { err = tcan4x5x_write_tcan_reg(dev, CAN_TCAN4X5X_IR, ir & CAN_TCAN4X5X_IR_CLEAR_ALL); if (err != 0) { LOG_ERR("failed to write interrupt register (err %d)", err); break; } if ((ir & CAN_TCAN4X5X_IR_SPIERR) != 0U) { err = tcan4x5x_read_tcan_reg(dev, CAN_TCAN4X5X_STATUS, &status); if (err != 0) { LOG_ERR("failed to read status register (err %d)", err); continue; } LOG_ERR("SPIERR, status = 0x%08x", status); err = tcan4x5x_write_tcan_reg(dev, CAN_TCAN4X5X_STATUS, status & CAN_TCAN4X5X_STATUS_CLEAR_ALL); if (err != 0) { LOG_ERR("failed to write status register (err %d)", err); continue; } } if ((ir & CAN_TCAN4X5X_IR_M_CAN_INT) != 0U) { can_mcan_line_0_isr(dev); can_mcan_line_1_isr(dev); } err = tcan4x5x_read_tcan_reg(dev, CAN_TCAN4X5X_IR, &ir); if (err != 0) { LOG_ERR("failed to read interrupt register (err %d)", err); break; } } } } static int tcan4x5x_wake(const struct device *dev) { #if TCAN4X5X_WAKE_GPIO_SUPPORT const struct can_mcan_config *mcan_config = dev->config; const struct tcan4x5x_config *tcan_config = mcan_config->custom; int wake_needed = 1; int err; #if TCAN4X5X_NWKRQ_GPIO_SUPPORT if (tcan_config->wake_gpio.port != NULL && tcan_config->nwkrq_gpio.port != NULL) { wake_needed = gpio_pin_get_dt(&tcan_config->nwkrq_gpio); if (wake_needed < 0) { LOG_ERR("failed to get nWKRQ status (err %d)", wake_needed); return wake_needed; }; } #endif /* TCAN4X5X_NWKRQ_GPIO_SUPPORT */ if (tcan_config->wake_gpio.port != NULL && wake_needed != 0) { err = gpio_pin_set_dt(&tcan_config->wake_gpio, 1); if (err != 0) { LOG_ERR("failed to assert WAKE GPIO (err %d)", err); return err; } k_busy_wait(CAN_TCAN4X5X_T_WAKE_US); err = gpio_pin_set_dt(&tcan_config->wake_gpio, 0); if (err != 0) { LOG_ERR("failed to deassert WAKE GPIO (err %d)", err); return err; } } #endif /* TCAN4X5X_WAKE_GPIO_SUPPORT*/ return 0; } static int tcan4x5x_reset(const struct device *dev) { const struct can_mcan_config *mcan_config = dev->config; const struct tcan4x5x_config *tcan_config = mcan_config->custom; int err; err = tcan4x5x_wake(dev); if (err != 0) { return err; } #if TCAN4X5X_RST_GPIO_SUPPORT if (tcan_config->rst_gpio.port != NULL) { err = gpio_pin_set_dt(&tcan_config->rst_gpio, 1); if (err != 0) { LOG_ERR("failed to assert RST GPIO (err %d)", err); return err; } k_busy_wait(CAN_TCAN4X5X_T_PULSE_WIDTH_US); err = gpio_pin_set_dt(&tcan_config->rst_gpio, 0); if (err != 0) { LOG_ERR("failed to deassert RST GPIO (err %d)", err); return err; } } else { #endif /* TCAN4X5X_RST_GPIO_SUPPORT */ err = tcan4x5x_write_tcan_reg(dev, CAN_TCAN4X5X_MODE_CONFIG, CAN_TCAN4X5X_MODE_CONFIG_DEVICE_RESET); if (err != 0) { LOG_ERR("failed to initiate SW reset (err %d)", err); return err; } #if TCAN4X5X_RST_GPIO_SUPPORT } #endif /* TCAN4X5X_RST_GPIO_SUPPORT */ k_busy_wait(CAN_TCAN4X5X_T_RESET_US); return 0; } static int tcan4x5x_init(const struct device *dev) { const struct can_mcan_config *mcan_config = dev->config; const struct tcan4x5x_config *tcan_config = mcan_config->custom; struct can_mcan_data *mcan_data = dev->data; struct tcan4x5x_data *tcan_data = mcan_data->custom; k_tid_t tid; uint32_t reg; int err; /* Initialize int_sem to 1 to ensure any pending IRQ is serviced */ k_sem_init(&tcan_data->int_sem, 1, 1); if (!spi_is_ready_dt(&tcan_config->spi)) { LOG_ERR("SPI bus not ready"); return -ENODEV; } #if TCAN4X5X_RST_GPIO_SUPPORT if (tcan_config->rst_gpio.port != NULL) { if (!gpio_is_ready_dt(&tcan_config->rst_gpio)) { LOG_ERR("RST GPIO not ready"); return -ENODEV; } err = gpio_pin_configure_dt(&tcan_config->rst_gpio, GPIO_OUTPUT_INACTIVE); if (err != 0) { LOG_ERR("failed to configure RST GPIO (err %d)", err); return -ENODEV; } } #endif /* TCAN4X5X_RST_GPIO_SUPPORT */ #if TCAN4X5X_NWKRQ_GPIO_SUPPORT if (tcan_config->nwkrq_gpio.port != NULL) { if (!gpio_is_ready_dt(&tcan_config->nwkrq_gpio)) { LOG_ERR("nWKRQ GPIO not ready"); return -ENODEV; } err = gpio_pin_configure_dt(&tcan_config->nwkrq_gpio, GPIO_INPUT); if (err != 0) { LOG_ERR("failed to configure nWKRQ GPIO (err %d)", err); return -ENODEV; } } #endif /* TCAN4X5X_NWKRQ_GPIO_SUPPORT */ #if TCAN4X5X_WAKE_GPIO_SUPPORT if (tcan_config->wake_gpio.port != NULL) { if (!gpio_is_ready_dt(&tcan_config->wake_gpio)) { LOG_ERR("WAKE GPIO not ready"); return -ENODEV; } err = gpio_pin_configure_dt(&tcan_config->wake_gpio, GPIO_OUTPUT_INACTIVE); if (err != 0) { LOG_ERR("failed to configure WAKE GPIO (err %d)", err); return -ENODEV; } } #endif /* TCAN4X5X_WAKE_GPIO_SUPPORT */ if (!gpio_is_ready_dt(&tcan_config->int_gpio)) { LOG_ERR("nINT GPIO not ready"); return -ENODEV; } err = gpio_pin_configure_dt(&tcan_config->int_gpio, GPIO_INPUT); if (err != 0) { LOG_ERR("failed to configure nINT GPIO (err %d)", err); return -ENODEV; } gpio_init_callback(&tcan_data->int_gpio_cb, tcan4x5x_int_gpio_callback_handler, BIT(tcan_config->int_gpio.pin)); err = gpio_add_callback_dt(&tcan_config->int_gpio, &tcan_data->int_gpio_cb); if (err != 0) { LOG_ERR("failed to add nINT GPIO callback (err %d)", err); return -ENODEV; } /* Initialize nINT GPIO callback and interrupt handler thread to ACK any early SPIERR */ err = gpio_pin_interrupt_configure_dt(&tcan_config->int_gpio, GPIO_INT_EDGE_TO_ACTIVE); if (err != 0) { LOG_ERR("failed to configure nINT GPIO interrupt (err %d)", err); return -ENODEV; } tid = k_thread_create(&tcan_data->int_thread, tcan_data->int_stack, K_KERNEL_STACK_SIZEOF(tcan_data->int_stack), tcan4x5x_int_thread, (void *)dev, NULL, NULL, CONFIG_CAN_TCAN4X5X_THREAD_PRIO, 0, K_NO_WAIT); k_thread_name_set(tid, "tcan4x5x"); /* Reset TCAN */ err = tcan4x5x_reset(dev); if (err != 0) { return -ENODEV; } #if CONFIG_CAN_LOG_LEVEL >= LOG_LEVEL_DBG uint32_t info[3]; /* Read DEVICE_ID1, DEVICE_ID2, and REVISION registers */ err = tcan4x5x_read(dev, CAN_TCAN4X5X_DEVICE_ID1, &info, sizeof(info)); if (err != 0) { return -EIO; } LOG_DBG("%c%c%c%c%c%c%c%c, SPI 2 rev. %lu, device rev. ID %lu.%lu", (char)FIELD_GET(GENMASK(7, 0), info[0]), (char)FIELD_GET(GENMASK(15, 8), info[0]), (char)FIELD_GET(GENMASK(23, 16), info[0]), (char)FIELD_GET(GENMASK(31, 24), info[0]), (char)FIELD_GET(GENMASK(7, 0), info[1]), (char)FIELD_GET(GENMASK(15, 8), info[1]), (char)FIELD_GET(GENMASK(23, 16), info[1]), (char)FIELD_GET(GENMASK(31, 24), info[1]), FIELD_GET(GENMASK(31, 24), info[2]), FIELD_GET(GENMASK(15, 8), info[2]), FIELD_GET(GENMASK(7, 0), info[2])); #endif /* CONFIG_CAN_LOG_LEVEL >= LOG_LEVEL_DBG */ /* Set TCAN4x5x mode normal */ err = tcan4x5x_read_tcan_reg(dev, CAN_TCAN4X5X_MODE_CONFIG, &reg); if (err != 0) { LOG_ERR("failed to read configuration register (err %d)", err); return -ENODEV; } reg &= ~(CAN_TCAN4X5X_MODE_CONFIG_MODE_SEL); reg |= FIELD_PREP(CAN_TCAN4X5X_MODE_CONFIG_MODE_SEL, 0x02); reg |= CAN_TCAN4X5X_MODE_CONFIG_WAKE_CONFIG; if (tcan_config->clk_freq == MHZ(20)) { /* 20 MHz frequency reference */ reg &= ~(CAN_TCAN4X5X_MODE_CONFIG_CLK_REF); } else { /* 40 MHz frequency reference */ reg |= CAN_TCAN4X5X_MODE_CONFIG_CLK_REF; } err = tcan4x5x_write_tcan_reg(dev, CAN_TCAN4X5X_MODE_CONFIG, reg); if (err != 0) { LOG_ERR("failed to write configuration register (err %d)", err); return -ENODEV; } /* Wait for standby to normal mode switch */ k_busy_wait(CAN_TCAN4X5X_T_MODE_STBY_NOM_US); /* Configure Message RAM */ err = can_mcan_configure_mram(dev, CAN_TCAN4X5X_MRAM_BASE, CAN_TCAN4X5X_MRAM_BASE); if (err != 0) { return -EIO; } /* Initialize M_CAN */ err = can_mcan_init(dev); if (err != 0) { LOG_ERR("failed to initialize mcan (err %d)", err); return err; } return 0; } static const struct can_driver_api tcan4x5x_driver_api = { .get_capabilities = can_mcan_get_capabilities, .start = can_mcan_start, .stop = can_mcan_stop, .set_mode = can_mcan_set_mode, .set_timing = can_mcan_set_timing, .send = can_mcan_send, .add_rx_filter = can_mcan_add_rx_filter, .remove_rx_filter = can_mcan_remove_rx_filter, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = can_mcan_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .get_state = can_mcan_get_state, .set_state_change_callback = can_mcan_set_state_change_callback, .get_core_clock = tcan4x5x_get_core_clock, .get_max_filters = can_mcan_get_max_filters, .timing_min = CAN_MCAN_TIMING_MIN_INITIALIZER, .timing_max = CAN_MCAN_TIMING_MAX_INITIALIZER, #ifdef CONFIG_CAN_FD_MODE .set_timing_data = can_mcan_set_timing_data, .timing_data_min = CAN_MCAN_TIMING_DATA_MIN_INITIALIZER, .timing_data_max = CAN_MCAN_TIMING_DATA_MAX_INITIALIZER, #endif /* CONFIG_CAN_FD_MODE */ }; static const struct can_mcan_ops tcan4x5x_ops = { .read_reg = tcan4x5x_read_mcan_reg, .write_reg = tcan4x5x_write_mcan_reg, .read_mram = tcan4x5x_read_mcan_mram, .write_mram = tcan4x5x_write_mcan_mram, .clear_mram = tcan4x5x_clear_mcan_mram, }; #if TCAN4X5X_RST_GPIO_SUPPORT #define TCAN4X5X_RST_GPIO_INIT(inst) \ .rst_gpio = GPIO_DT_SPEC_INST_GET_OR(inst, reset_gpios, {0}), #else /* TCAN4X5X_RST_GPIO_SUPPORT */ #define TCAN4X5X_RST_GPIO_INIT(inst) #endif /* !TCAN4X5X_RST_GPIO_SUPPORT */ #if TCAN4X5X_NWKRQ_GPIO_SUPPORT #define TCAN4X5X_NWKRQ_GPIO_INIT(inst) \ .nwkrq_gpio = GPIO_DT_SPEC_INST_GET_OR(inst, device_state_gpios, {0}), #else /* TCAN4X5X_NWKRQ_GPIO_SUPPORT */ #define TCAN4X5X_NWKRQ_GPIO_INIT(inst) #endif /* !TCAN4X5X_NWKRQ_GPIO_SUPPORT */ #if TCAN4X5X_WAKE_GPIO_SUPPORT #define TCAN4X5X_WAKE_GPIO_INIT(inst) \ .wake_gpio = GPIO_DT_SPEC_INST_GET_OR(inst, device_wake_gpios, {0}), #else /* TCAN4X5X_WAKE_GPIO_SUPPORT */ #define TCAN4X5X_WAKE_GPIO_INIT(inst) #endif /* !TCAN4X5X_WAKE_GPIO_SUPPORT */ #define TCAN4X5X_INIT(inst) \ BUILD_ASSERT(CAN_MCAN_DT_INST_MRAM_OFFSET(inst) == 0, "MRAM offset must be 0"); \ BUILD_ASSERT(CAN_MCAN_DT_INST_MRAM_ELEMENTS_SIZE(inst) <= CAN_TCAN4X5X_MRAM_SIZE, \ "Insufficient Message RAM size to hold elements"); \ \ CAN_MCAN_DT_INST_BUILD_ASSERT_MRAM_CFG(inst); \ CAN_MCAN_DT_INST_CALLBACKS_DEFINE(inst, tcan4x5x_cbs_##inst); \ \ static const struct tcan4x5x_config tcan4x5x_config_##inst = { \ .spi = SPI_DT_SPEC_INST_GET(inst, SPI_WORD_SET(8), 0), \ .int_gpio = GPIO_DT_SPEC_INST_GET(inst, int_gpios), \ .clk_freq = DT_INST_PROP(inst, clock_frequency), \ TCAN4X5X_RST_GPIO_INIT(inst) \ TCAN4X5X_NWKRQ_GPIO_INIT(inst) \ TCAN4X5X_WAKE_GPIO_INIT(inst) \ }; \ \ static const struct can_mcan_config can_mcan_config_##inst = CAN_MCAN_DT_CONFIG_INST_GET( \ inst, &tcan4x5x_config_##inst, &tcan4x5x_ops, &tcan4x5x_cbs_##inst); \ \ static struct tcan4x5x_data tcan4x5x_data_##inst; \ \ static struct can_mcan_data can_mcan_data_##inst = \ CAN_MCAN_DATA_INITIALIZER(&tcan4x5x_data_##inst); \ \ CAN_DEVICE_DT_INST_DEFINE(inst, tcan4x5x_init, NULL, &can_mcan_data_##inst, \ &can_mcan_config_##inst, POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \ &tcan4x5x_driver_api); DT_INST_FOREACH_STATUS_OKAY(TCAN4X5X_INIT) ```
/content/code_sandbox/drivers/can/can_tcan4x5x.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,815
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/sys/atomic.h> #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/transceiver.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/device.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <CanEXCEL_Ip_HwAccess.h> #include <CanEXCEL_Ip_Irq.h> #define DT_DRV_COMPAT nxp_s32_canxl /* * Convert from RX message buffer index to allocated filter ID and * vice versa. */ #ifdef CONFIG_CAN_NXP_S32_RX_FIFO #define RX_MBIDX_TO_ALLOC_IDX(x) (x) #define ALLOC_IDX_TO_RXMB_IDX(x) (x) #else #define RX_MBIDX_TO_ALLOC_IDX(x) (x - CONFIG_CAN_NXP_S32_MAX_TX) #define ALLOC_IDX_TO_RXMB_IDX(x) (x + CONFIG_CAN_NXP_S32_MAX_TX) #endif /* * Convert from TX message buffer index to allocated TX ID and vice * versa. */ #define TX_MBIDX_TO_ALLOC_IDX(x) (x) #define ALLOC_IDX_TO_TXMB_IDX(x) (x) #define CAN_NXP_S32_TIMEOUT_MS 1 #define CAN_NXP_S32_MAX_BITRATE 8000000 #define CAN_NXP_S32_DATA_LENGTH 64 #define CAN_NXP_S32_TDCO_MAX FIELD_GET(CANXL_SIC_BTDCC_FTDCOFF_MASK, CANXL_SIC_BTDCC_FTDCOFF_MASK) #ifdef CONFIG_CAN_NXP_S32_RX_FIFO /* RX FIFO depth is fixed to the maximum value */ #define CAN_NXP_S32_RX_FIFO_DEPTH 32 /* RX FIFO water mark equal 1 that allows the interrupt is generated after 1 message received */ #define CAN_NXP_S32_RX_FIFO_WATERMARK 1 #endif #if defined(CONFIG_CAN_FD_MODE) && defined(CONFIG_CAN_NXP_S32_RX_FIFO) #define CAN_NXP_S32_FD_MODE 1 #endif #define BCANXL_ST_STFERR_MASK BIT(4) #define BCANXL_ST_FRMERR_MASK BIT(5) #define BCANXL_ST_CRCERR_MASK BIT(6) #define BCANXL_ST_ACKERR_MASK BIT(7) #define BCANXL_ST_BIT0ERR_MASK BIT(8) #define BCANXL_ST_BIT1ERR_MASK BIT(9) #define BCANXL_ST_OVRERR_MASK BIT(15) #define BCANXL_ST_FLTCONF(x) (uint8_t)((x << BCANXL_ST_FLTCONF_SHIFT) & BCANXL_ST_FLTCONF_MASK) #define CANXL_CLEAR_ALL_ERRORS 0x7FFF0F00U LOG_MODULE_REGISTER(nxp_s32_canxl, CONFIG_CAN_LOG_LEVEL); struct can_nxp_s32_config { const struct can_driver_config common; CANXL_SIC_Type *base_sic; #ifdef CONFIG_CAN_NXP_S32_RX_FIFO CANXL_RXFIFO_Type * base_rx_fifo; CANXL_RXFIFO_CONTROL_Type *base_rx_fifo_ctrl; #endif CANXL_MRU_Type * base_mru; uint8 instance; const struct device *clock_dev; clock_control_subsys_t clock_subsys; const struct pinctrl_dev_config *pin_cfg; Canexcel_Ip_ConfigType *can_cfg; void (*irq_config_func)(void); }; struct can_nxp_s32_tx_callback { Canexcel_Ip_DataInfoType tx_info; can_tx_callback_t function; void *arg; }; struct can_nxp_s32_rx_callback { struct can_filter filter; #ifndef CONFIG_CAN_NXP_S32_RX_FIFO Canexcel_Ip_DataInfoType rx_info; #endif can_rx_callback_t function; void *arg; }; struct can_nxp_s32_data { struct can_driver_data common; Canexcel_Ip_StateType *can_state; ATOMIC_DEFINE(rx_allocs, CONFIG_CAN_NXP_S32_MAX_RX); struct k_mutex rx_mutex; struct can_nxp_s32_rx_callback rx_cbs[CONFIG_CAN_NXP_S32_MAX_RX]; #ifndef CONFIG_CAN_NXP_S32_RX_FIFO Canexcel_RxFdMsg *rx_msg; #endif ATOMIC_DEFINE(tx_allocs, CONFIG_CAN_NXP_S32_MAX_TX); struct k_sem tx_allocs_sem; struct k_mutex tx_mutex; struct can_nxp_s32_tx_callback tx_cbs[CONFIG_CAN_NXP_S32_MAX_TX]; Canexcel_TxFdMsgType *tx_msg; #ifdef CONFIG_CAN_NXP_S32_RX_FIFO Canexcel_Ip_RxFifoFilterID_ADDR * rx_fifo_filter; Canexcel_RxFdMsg *rx_fifo; #endif struct can_timing timing; #ifdef CAN_NXP_S32_FD_MODE struct can_timing timing_data; #endif enum can_state state; }; static int can_nxp_s32_get_capabilities(const struct device *dev, can_mode_t *cap) { ARG_UNUSED(dev); *cap = CAN_MODE_NORMAL | CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY; if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { *cap |= CAN_MODE_MANUAL_RECOVERY; } if (IS_ENABLED(CAN_NXP_S32_FD_MODE)) { *cap |= CAN_MODE_FD; } return 0; } #ifdef CONFIG_CAN_NXP_S32_RX_FIFO static void can_nxp_s32_config_rx_fifo_filter(const struct device *dev, int filter_id) { const struct can_nxp_s32_config *config = dev->config; struct can_nxp_s32_data *data = dev->data; /* Lock the RxFIFO by System by reading register */ (void)config->base_rx_fifo_ctrl->RXFSYSLOCK; CanXL_ConfigIDFilter(config->base_rx_fifo, &data->rx_fifo_filter[filter_id], filter_id); if ((config->base_rx_fifo_ctrl->RXFCSTA & CANXL_RXFIFO_CONTROL_RXFCSTA_SYSLOCK_MASK) == CANXL_RXFIFO_CONTROL_RXFCSTA_SYSLOCK_MASK) { /* Clear the sys lock to enable transfers */ config->base_rx_fifo_ctrl->RXFSYSLOCK = CANXL_RXFIFO_CONTROL_RXFSYSLOCK_SYSLOCK_MASK; } } /* Get the RxFiFO filter matched with the received RxFIFO message queue */ static inline int can_nxp_s32_get_rx_fifo_filter(struct can_nxp_s32_data *data, uint8_t queue_idx) { int alloc = -ENOSPC; uint32_t mask; for (int filter_id = 0; filter_id < CONFIG_CAN_NXP_S32_MAX_RX; filter_id++) { mask = data->rx_fifo_filter[filter_id].idAddrFilterL; if (mask == 0) { continue; } if ((data->rx_fifo[queue_idx].Header.Id & mask) == (data->rx_fifo_filter[filter_id].idAddrFilterH & mask)) { alloc = filter_id; break; } } return alloc; } #endif static int can_nxp_s32_start(const struct device *dev) { const struct can_nxp_s32_config *config = dev->config; struct can_nxp_s32_data *data = dev->data; int err; if (data->common.started) { return -EALREADY; } if (config->common.phy != NULL) { err = can_transceiver_enable(config->common.phy, data->common.mode); if (err != 0) { LOG_ERR("failed to enable CAN transceiver (err %d)", err); return err; } } /* Reset statistics and clear all errors */ CAN_STATS_RESET(dev); Canexcel_Ip_ClearErrorStatus(config->instance, CANXL_CLEAR_ALL_ERRORS); data->common.started = true; return 0; } static int can_nxp_s32_stop(const struct device *dev) { const struct can_nxp_s32_config *config = dev->config; struct can_nxp_s32_data *data = dev->data; can_tx_callback_t function; void *arg; int alloc; int err; if (!data->common.started) { return -EALREADY; } data->common.started = false; /* Abort any pending TX frames before entering freeze mode */ for (alloc = 0; alloc < CONFIG_CAN_NXP_S32_MAX_TX; alloc++) { function = data->tx_cbs[alloc].function; arg = data->tx_cbs[alloc].arg; if (atomic_test_and_clear_bit(data->tx_allocs, alloc)) { if (Canexcel_Ip_DeactivateMD(config->instance, ALLOC_IDX_TO_TXMB_IDX(alloc))) { LOG_ERR("Can't abort message !"); }; function(dev, -ENETDOWN, arg); k_sem_give(&data->tx_allocs_sem); } } if (config->common.phy != NULL) { err = can_transceiver_disable(config->common.phy); if (err != 0) { LOG_ERR("failed to disable CAN transceiver (err %d)", err); return err; } } return 0; } static void can_nxp_s32_set_fd_mode(CANXL_SIC_Type *base, boolean enable_fd, boolean enable_brs) { base->BCFG2 = (base->BCFG2 & ~CANXL_SIC_BCFG2_FDEN_MASK) | CANXL_SIC_BCFG2_FDEN(enable_fd ? 1UL : 0UL); base->BCFG1 = (base->BCFG1 & ~CANXL_SIC_BCFG1_FDRSDIS_MASK) | CANXL_SIC_BCFG1_FDRSDIS(enable_brs ? 0UL : 1UL); base->BTDCC &= ~(CANXL_SIC_BTDCC_FTDCEN_MASK | CANXL_SIC_BTDCC_FTDCOFF_MASK); } static int can_nxp_s32_set_mode(const struct device *dev, can_mode_t mode) { can_mode_t supported = CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY; const struct can_nxp_s32_config *config = dev->config; struct can_nxp_s32_data *data = dev->data; Canexcel_Ip_ModesType can_nxp_s32_mode = CAN_MODE_NORMAL; bool canfd = false; bool brs = false; if (data->common.started) { return -EBUSY; } if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { supported |= CAN_MODE_MANUAL_RECOVERY; } if (IS_ENABLED(CAN_NXP_S32_FD_MODE)) { supported |= CAN_MODE_FD; } if ((mode & ~(supported)) != 0) { LOG_ERR("unsupported mode: 0x%08x", mode); return -ENOTSUP; } if ((mode & (CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY)) == (CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY)) { LOG_ERR("unsupported mode loopback and " "mode listen-only at the same time: 0x%08x", mode); return -ENOTSUP; } canfd = !!(mode & CAN_MODE_FD); brs = canfd; if (mode & CAN_MODE_LISTENONLY) { can_nxp_s32_mode = CANEXCEL_LISTEN_ONLY_MODE; } else if (mode & CAN_MODE_LOOPBACK) { can_nxp_s32_mode = CANEXCEL_LOOPBACK_MODE; } Canexcel_Ip_EnterFreezeMode(config->instance); can_nxp_s32_set_fd_mode(config->base_sic, canfd, brs); if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { Canexcel_Ip_StatusType status; uint32_t options = 0U; if ((mode & CAN_MODE_MANUAL_RECOVERY) == 0U) { options = CANXL_IP_BUSOFF_RECOVERY_U32; } status = CanXL_ConfigCtrlOptions(config->base_sic, options); if (status != CANEXCEL_STATUS_SUCCESS) { return -EIO; } } CanXL_SetOperationMode(config->base_sic, can_nxp_s32_mode); Canexcel_Ip_ExitFreezeMode(config->instance); data->common.mode = mode; return 0; } static int can_nxp_s32_get_core_clock(const struct device *dev, uint32_t *rate) { const struct can_nxp_s32_config *config = dev->config; __ASSERT_NO_MSG(rate != NULL); return clock_control_get_rate(config->clock_dev, config->clock_subsys, rate); } static int can_nxp_s32_get_max_filters(const struct device *dev, bool ide) { ARG_UNUSED(ide); return CONFIG_CAN_NXP_S32_MAX_RX; } static int can_nxp_s32_get_state(const struct device *dev, enum can_state *state, struct can_bus_err_cnt *err_cnt) { const struct can_nxp_s32_config *config = dev->config; struct can_nxp_s32_data *data = dev->data; uint8_t bcanxl_status; if (state) { if (!data->common.started) { *state = CAN_STATE_STOPPED; } else { Canexcel_Ip_GetControllerStatus(config->instance, &bcanxl_status); if (bcanxl_status & BCANXL_ST_FLTCONF(2)) { *state = CAN_STATE_BUS_OFF; } else if (bcanxl_status & BCANXL_ST_FLTCONF(1)) { *state = CAN_STATE_ERROR_PASSIVE; } else if (config->base_sic->SYSS & (CANXL_SIC_SYSS_CRXWRN_MASK | CANXL_SIC_SYSS_CTXWRN_MASK)) { *state = CAN_STATE_ERROR_WARNING; } else { *state = CAN_STATE_ERROR_ACTIVE; } } } if (err_cnt) { Canexcel_Ip_GetControllerTxErrorCounter(config->instance, &err_cnt->tx_err_cnt); Canexcel_Ip_GetControllerRxErrorCounter(config->instance, &err_cnt->rx_err_cnt); } return 0; } static void can_nxp_s32_set_state_change_callback(const struct device *dev, can_state_change_callback_t callback, void *user_data) { struct can_nxp_s32_data *data = dev->data; data->common.state_change_cb = callback; data->common.state_change_cb_user_data = user_data; } #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE static int can_nxp_s32_recover(const struct device *dev, k_timeout_t timeout) { const struct can_nxp_s32_config *config = dev->config; struct can_nxp_s32_data *data = dev->data; enum can_state state; uint64_t start_time; int ret = 0; if (!data->common.started) { return -ENETDOWN; } if ((data->common.mode & CAN_MODE_MANUAL_RECOVERY) == 0U) { return -ENOTSUP; } can_nxp_s32_get_state(dev, &state, NULL); if (state != CAN_STATE_BUS_OFF) { return 0; } start_time = k_uptime_ticks(); config->base_sic->BCFG1 &= (~CANXL_SIC_BCFG1_ABRDIS_MASK); if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { can_nxp_s32_get_state(dev, &state, NULL); while (state == CAN_STATE_BUS_OFF) { if (!K_TIMEOUT_EQ(timeout, K_FOREVER) && k_uptime_ticks() - start_time >= timeout.ticks) { ret = -EAGAIN; } can_nxp_s32_get_state(dev, &state, NULL); } } config->base_sic->BCFG1 |= CANXL_SIC_BCFG1_ABRDIS_MASK; return ret; } #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ static void can_nxp_s32_remove_rx_filter(const struct device *dev, int filter_id) { const struct can_nxp_s32_config *config = dev->config; struct can_nxp_s32_data *data = dev->data; int mb_indx = ALLOC_IDX_TO_RXMB_IDX(filter_id); if (filter_id < 0 || filter_id >= CONFIG_CAN_NXP_S32_MAX_RX) { LOG_ERR("filter ID %d out of bounds", filter_id); return; } k_mutex_lock(&data->rx_mutex, K_FOREVER); if (atomic_test_and_clear_bit(data->rx_allocs, filter_id)) { #ifdef CONFIG_CAN_NXP_S32_RX_FIFO data->rx_fifo_filter[mb_indx].idAddrFilterL = 0; data->rx_fifo_filter[mb_indx].idAddrFilterH = 0; Canexcel_Ip_EnterFreezeMode(config->instance); can_nxp_s32_config_rx_fifo_filter(dev, mb_indx); Canexcel_Ip_ExitFreezeMode(config->instance); #else if (Canexcel_Ip_DeactivateMD(config->instance, mb_indx)) { LOG_ERR("Can't abort message !"); }; #endif data->rx_cbs[filter_id].function = NULL; data->rx_cbs[filter_id].arg = NULL; data->rx_cbs[filter_id].filter = (struct can_filter){0}; } else { LOG_WRN("Filter ID %d already detached", filter_id); } k_mutex_unlock(&data->rx_mutex); } static int can_nxp_s32_add_rx_filter(const struct device *dev, can_rx_callback_t callback, void *user_data, const struct can_filter *filter) { const struct can_nxp_s32_config *config = dev->config; struct can_nxp_s32_data *data = dev->data; int alloc = -ENOSPC; int mb_indx; uint32_t mask; if ((filter->flags & ~(CAN_FILTER_IDE)) != 0) { LOG_ERR("unsupported CAN filter flags 0x%02x", filter->flags); return -ENOTSUP; } k_mutex_lock(&data->rx_mutex, K_FOREVER); /* Find and allocate RX message buffer */ for (int i = 0; i < CONFIG_CAN_NXP_S32_MAX_RX; i++) { if (!atomic_test_and_set_bit(data->rx_allocs, i)) { alloc = i; break; } } if (alloc == -ENOSPC) { LOG_ERR("No free filter bank found"); goto unlock; } data->rx_cbs[alloc].function = callback; data->rx_cbs[alloc].arg = user_data; data->rx_cbs[alloc].filter = *filter; /* Set Rx Mb individual mask for */ mb_indx = ALLOC_IDX_TO_RXMB_IDX(alloc); if (!!(filter->flags & CAN_FILTER_IDE)) { mask = filter->mask & CANXL_IP_ID_EXT_MASK; } else { mask = (filter->mask << CANXL_IP_ID_STD_SHIFT) & CANXL_IP_ID_STD_MASK; } #ifndef CONFIG_CAN_ACCEPT_RTR mask |= CANXL_MSG_DESCRIPTORS_MDFLT1FD_RTRMSK_MASK; #endif /* !CONFIG_CAN_ACCEPT_RTR */ Canexcel_Ip_EnterFreezeMode(config->instance); #ifdef CONFIG_CAN_NXP_S32_RX_FIFO uint32_t filter_id; if (!!(filter->flags & CAN_FILTER_IDE)) { filter_id = filter->id & CANXL_IP_ID_EXT_MASK; } else { filter_id = (filter->id << CANXL_IP_ID_STD_SHIFT) & CANXL_IP_ID_STD_MASK; } data->rx_fifo_filter[mb_indx].filterType = CANEXCEL_IP_RX_FIFO_MASK_FILTER; data->rx_fifo_filter[mb_indx].idAddrFilterL = mask; data->rx_fifo_filter[mb_indx].idAddrFilterH = filter_id; can_nxp_s32_config_rx_fifo_filter(dev, mb_indx); #else data->rx_cbs[alloc].rx_info = (Canexcel_Ip_DataInfoType) { .frame = CANEXCEL_CLASIC_FRAME, .idType = !!(filter->flags & CAN_FILTER_IDE) ? CANEXCEL_MSG_ID_EXT : CANEXCEL_MSG_ID_STD, .dataLength = CAN_NXP_S32_DATA_LENGTH, }; Canexcel_Ip_SetRxIndividualMask(config->instance, mb_indx, data->rx_cbs[alloc].rx_info.frame, mask); Canexcel_Ip_ConfigRx(config->instance, mb_indx, filter->id, &data->rx_cbs[alloc].rx_info); Canexcel_Ip_ReceiveFD(config->instance, mb_indx, &data->rx_msg[alloc], FALSE); #endif Canexcel_Ip_ExitFreezeMode(config->instance); unlock: k_mutex_unlock(&data->rx_mutex); return alloc; } static int can_nxp_s32_send(const struct device *dev, const struct can_frame *frame, k_timeout_t timeout, can_tx_callback_t callback, void *user_data) { const struct can_nxp_s32_config *config = dev->config; uint8_t data_length = can_dlc_to_bytes(frame->dlc); struct can_nxp_s32_data *data = dev->data; Canexcel_Ip_StatusType status; enum can_state state; int alloc, mb_indx; #ifdef CAN_NXP_S32_FD_MODE if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_FDF | CAN_FRAME_BRS)) != 0) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } if ((frame->flags & CAN_FRAME_FDF) != 0 && (config->base_sic->BCFG2 & CANXL_SIC_BCFG2_FDEN_MASK) == 0) { LOG_ERR("CAN FD format not supported in non-FD mode"); return -ENOTSUP; } if ((frame->flags & CAN_FRAME_BRS) != 0 && ~(config->base_sic->BCFG1 & CANXL_SIC_BCFG1_FDRSDIS_MASK) == 0) { LOG_ERR("CAN FD BRS not supported in non-FD mode"); return -ENOTSUP; } #else if ((frame->flags & ~CAN_FRAME_IDE) != 0) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } #endif if (data_length > sizeof(frame->data)) { LOG_ERR("data length (%d) > max frame data length (%d)", data_length, sizeof(frame->data)); return -EINVAL; } if ((frame->flags & CAN_FRAME_FDF) == 0) { if (frame->dlc > CAN_MAX_DLC) { LOG_ERR("DLC of %d for non-FD format frame", frame->dlc); return -EINVAL; } #ifdef CAN_NXP_S32_FD_MODE } else { if (frame->dlc > CANFD_MAX_DLC) { LOG_ERR("DLC of %d for CAN FD format frame", frame->dlc); return -EINVAL; } #endif } if (!data->common.started) { return -ENETDOWN; } can_nxp_s32_get_state(dev, &state, NULL); if (state == CAN_STATE_BUS_OFF) { LOG_ERR("Transmit failed, bus-off"); return -ENETUNREACH; } if (k_sem_take(&data->tx_allocs_sem, timeout) != 0) { return -EAGAIN; } for (alloc = 0; alloc < CONFIG_CAN_NXP_S32_MAX_TX; alloc++) { if (!atomic_test_and_set_bit(data->tx_allocs, alloc)) { break; } } data->tx_cbs[alloc].function = callback; data->tx_cbs[alloc].arg = user_data; mb_indx = ALLOC_IDX_TO_TXMB_IDX(alloc); data->tx_cbs[alloc].tx_info = (Canexcel_Ip_DataInfoType) { .frame = !!(frame->flags & CAN_FRAME_FDF) ? CANEXCEL_FD_FRAME : CANEXCEL_CLASIC_FRAME, .enable_brs = !!(frame->flags & CAN_FRAME_BRS) ? TRUE : FALSE, .idType = !!(frame->flags & CAN_FRAME_IDE) ? CANEXCEL_MSG_ID_EXT : CANEXCEL_MSG_ID_STD, .priority = 0, .fd_padding = 0, .dataLength = data_length, .is_polling = FALSE }; LOG_DBG("%s: Sending %d bytes Tx Mb %d, " "Tx Id: 0x%x, " "Id type: %s %s %s %s", dev->name, data_length, mb_indx, frame->id, !!(frame->flags & CAN_FRAME_IDE) ? "extended" : "standard", !!(frame->flags & CAN_FRAME_RTR) ? "RTR" : "", !!(frame->flags & CAN_FRAME_FDF) ? "FD frame" : "", !!(frame->flags & CAN_FRAME_BRS) ? "BRS" : ""); k_mutex_lock(&data->tx_mutex, K_FOREVER); /* Send MB Interrupt */ status = Canexcel_Ip_SendFDMsg(config->instance, mb_indx, &data->tx_cbs[alloc].tx_info, frame->id, (uint8_t *)&frame->data, &data->tx_msg[alloc]); k_mutex_unlock(&data->tx_mutex); if (status != CANEXCEL_STATUS_SUCCESS) { return -EIO; } return 0; } static void nxp_s32_zcan_timing_to_canxl_timing(const struct can_timing *timing, Canexcel_Ip_TimeSegmentType *canxl_timing) { LOG_DBG("propSeg: %d, phase_seg1: %d, phase_seg2: %d, prescaler: %d, sjw: %d", timing->prop_seg, timing->phase_seg1, timing->phase_seg2, timing->prescaler, timing->sjw); canxl_timing->propSeg = timing->prop_seg - 1U; canxl_timing->phaseSeg1 = timing->phase_seg1 - 1U; canxl_timing->phaseSeg2 = timing->phase_seg2 - 1U; canxl_timing->preDivider = timing->prescaler - 1U; canxl_timing->rJumpwidth = timing->sjw - 1U; } static int can_nxp_s32_set_timing(const struct device *dev, const struct can_timing *timing) { const struct can_nxp_s32_config *config = dev->config; struct can_nxp_s32_data *data = dev->data; Canexcel_Ip_TimeSegmentType can_time_segment = {0}; if (data->common.started) { return -EBUSY; } nxp_s32_zcan_timing_to_canxl_timing(timing, &can_time_segment); /* Set timing for CAN instance*/ CanXL_SetBaudRate(config->base_sic, &can_time_segment); return 0; } #ifdef CAN_NXP_S32_FD_MODE static int can_nxp_s32_set_timing_data(const struct device *dev, const struct can_timing *timing_data) { const struct can_nxp_s32_config *config = dev->config; struct can_nxp_s32_data *data = dev->data; Canexcel_Ip_TimeSegmentType can_fd_time_segment = {0}; if (data->common.started) { return -EBUSY; } nxp_s32_zcan_timing_to_canxl_timing(timing_data, &can_fd_time_segment); /* Set timing for CAN FD instance*/ CanXL_SetFDBaudRate(config->base_sic, &can_fd_time_segment); Canexcel_Ip_SetTDCOffsetFD(config->instance, true, false, CAN_CALC_TDCO((timing_data), 0U, CAN_NXP_S32_TDCO_MAX)); return 0; } #endif static void can_nxp_s32_err_callback(const struct device *dev, Canexcel_Ip_EventType eventType, uint32_t u32SysStatus, const Canexcel_Ip_StateType *canexcelState) { const struct can_nxp_s32_config *config = dev->config; struct can_nxp_s32_data *data = dev->data; enum can_state state; struct can_bus_err_cnt err_cnt; void *cb_data = data->common.state_change_cb_user_data; can_tx_callback_t function; int alloc; void *arg; switch (eventType) { case CANEXCEL_EVENT_TX_WARNING: LOG_DBG("Tx Warning (error 0x%x)", u32SysStatus); break; case CANEXCEL_EVENT_RX_WARNING: LOG_DBG("Rx Warning (error 0x%x)", u32SysStatus); break; case CANEXCEL_EVENT_BUSOFF: LOG_DBG("Bus Off (error 0x%x)", u32SysStatus); break; case CANEXCEL_EVENT_ERROR: LOG_DBG("Error Format Frames (error 0x%x)", u32SysStatus); break; case CANEXCEL_EVENT_ERROR_FD: LOG_DBG("Error Data Phase (error 0x%x)", u32SysStatus); break; case CANEXCEL_EVENT_PASSIVE: LOG_DBG("Error Passive (error 0x%x)", u32SysStatus); break; default: break; } if (u32SysStatus & BCANXL_ST_BIT0ERR_MASK) { CAN_STATS_BIT0_ERROR_INC(dev); } if (u32SysStatus & BCANXL_ST_BIT1ERR_MASK) { CAN_STATS_BIT1_ERROR_INC(dev); } if (u32SysStatus & BCANXL_ST_ACKERR_MASK) { CAN_STATS_ACK_ERROR_INC(dev); } if (u32SysStatus & BCANXL_ST_STFERR_MASK) { CAN_STATS_STUFF_ERROR_INC(dev); } if (u32SysStatus & BCANXL_ST_FRMERR_MASK) { CAN_STATS_FORM_ERROR_INC(dev); } if (u32SysStatus & BCANXL_ST_CRCERR_MASK) { CAN_STATS_CRC_ERROR_INC(dev); } if (u32SysStatus & BCANXL_ST_OVRERR_MASK) { CAN_STATS_RX_OVERRUN_INC(dev); } can_nxp_s32_get_state(dev, &state, &err_cnt); if (data->state != state) { data->state = state; if (data->common.state_change_cb) { data->common.state_change_cb(dev, state, err_cnt, cb_data); } } if (state == CAN_STATE_BUS_OFF) { /* Abort any pending TX frames in case of bus-off */ for (alloc = 0; alloc < CONFIG_CAN_NXP_S32_MAX_TX; alloc++) { /* Copy callback function and argument before clearing bit */ function = data->tx_cbs[alloc].function; arg = data->tx_cbs[alloc].arg; if (atomic_test_and_clear_bit(data->tx_allocs, alloc)) { if (Canexcel_Ip_DeactivateMD(config->instance, ALLOC_IDX_TO_TXMB_IDX(alloc))) { LOG_ERR("Can't abort message !"); }; function(dev, -ENETUNREACH, arg); k_sem_give(&data->tx_allocs_sem); } } } } static void nxp_s32_msg_data_to_zcan_frame(Canexcel_RxFdMsg msg_data, struct can_frame *frame) { memset(frame, 0, sizeof(*frame)); if (!!(msg_data.Header.Id & CANXL_TX_HEADER_IDE_MASK)) { frame->flags |= CAN_FRAME_IDE; } if (!!(frame->flags & CAN_FRAME_IDE)) { frame->id = (msg_data.Header.Id & CANXL_IP_ID_EXT_MASK); } else { frame->id = ((msg_data.Header.Id & CANXL_IP_ID_STD_MASK) >> CANXL_IP_ID_STD_SHIFT); } frame->dlc = (msg_data.Header.Control & CANXL_TX_HEADER_DLC_MASK) >> CANXL_TX_HEADER_DLC_SHIFT; if (!!(msg_data.Header.Control & CANXL_TX_HEADER_FDF_MASK)) { frame->flags |= CAN_FRAME_FDF; } if (!!(msg_data.Header.Control & CANXL_TX_HEADER_BRS_MASK)) { frame->flags |= CAN_FRAME_BRS; } if (!!(msg_data.Header.Id & CANXL_TX_HEADER_RTR_MASK)) { frame->flags |= CAN_FRAME_RTR; } else { memcpy(frame->data, msg_data.data, can_dlc_to_bytes(frame->dlc)); } #ifdef CONFIG_CAN_RX_TIMESTAMP frame->timestamp = msg_data.timeStampL; #endif /* CAN_RX_TIMESTAMP */ } static void can_nxp_s32_ctrl_callback(const struct device *dev, Canexcel_Ip_EventType eventType, uint32_t buffidx, const Canexcel_Ip_StateType *canexcelState) { const struct can_nxp_s32_config *config = dev->config; struct can_nxp_s32_data *data = dev->data; struct can_frame frame = {0}; can_tx_callback_t tx_func; can_rx_callback_t rx_func; int alloc; if (eventType == CANEXCEL_EVENT_TX_COMPLETE) { alloc = TX_MBIDX_TO_ALLOC_IDX(buffidx); tx_func = data->tx_cbs[alloc].function; LOG_DBG("%s: Sent Tx Mb %d", dev->name, buffidx); if (atomic_test_and_clear_bit(data->tx_allocs, alloc)) { tx_func(dev, 0, data->tx_cbs[alloc].arg); k_sem_give(&data->tx_allocs_sem); } #ifdef CONFIG_CAN_NXP_S32_RX_FIFO } else if (eventType == CANEXCEL_EVENT_RXFIFO_COMPLETE) { uint8_t queue_idx = ((config->base_rx_fifo_ctrl->RXFCSTA & CANXL_RXFIFO_CONTROL_RXFCSTA_HWPOINTER_MASK) >> CANXL_RXFIFO_CONTROL_RXFCSTA_HWPOINTER_SHIFT) - 1; alloc = can_nxp_s32_get_rx_fifo_filter(data, queue_idx); if (alloc != -ENOSPC) { rx_func = data->rx_cbs[alloc].function; if (atomic_test_bit(data->rx_allocs, alloc)) { nxp_s32_msg_data_to_zcan_frame(data->rx_fifo[queue_idx], &frame); LOG_DBG("%s: Received %d bytes Rx FiFo %d, " "Rx Id: 0x%x, " "Id type: %s %s %s %s", dev->name, can_dlc_to_bytes(frame.dlc), alloc, frame.id, !!(frame.flags & CAN_FRAME_IDE) ? "extended" : "standard", !!(frame.flags & CAN_FRAME_RTR) ? "RTR" : "", !!(frame.flags & CAN_FRAME_FDF) ? "FD frame" : "", !!(frame.flags & CAN_FRAME_BRS) ? "BRS" : ""); rx_func(dev, &frame, data->rx_cbs[alloc].arg); } } /* Pop 1 (= RXFSYSPOP + 1) received RxFIFO message queue */ config->base_rx_fifo_ctrl->RXFSYSPOP = 0; #else } else if (eventType == CANEXCEL_EVENT_RX_COMPLETE) { alloc = RX_MBIDX_TO_ALLOC_IDX(buffidx); rx_func = data->rx_cbs[alloc].function; if (atomic_test_bit(data->rx_allocs, alloc)) { nxp_s32_msg_data_to_zcan_frame(data->rx_msg[alloc], &frame); LOG_DBG("%s: Received %d bytes Rx Mb %d, " "Rx Id: 0x%x, " "Id type: %s %s %s %s", dev->name, can_dlc_to_bytes(frame.dlc), buffidx, frame.id, !!(frame.flags & CAN_FRAME_IDE) ? "extended" : "standard", !!(frame.flags & CAN_FRAME_RTR) ? "RTR" : "", !!(frame.flags & CAN_FRAME_FDF) ? "FD frame" : "", !!(frame.flags & CAN_FRAME_BRS) ? "BRS" : ""); rx_func(dev, &frame, data->rx_cbs[alloc].arg); if (Canexcel_Ip_ReceiveFD(config->instance, buffidx, &data->rx_msg[alloc], FALSE) != CANEXCEL_STATUS_SUCCESS) { LOG_ERR("MB %d is not ready for receiving next message", buffidx); } } #endif } } static int can_nxp_s32_init(const struct device *dev) { const struct can_nxp_s32_config *config = dev->config; struct can_nxp_s32_data *data = dev->data; int err; #ifdef CONFIG_CAN_RX_TIMESTAMP Canexcel_Ip_TimeStampConf_Type time_stamp = { .ts64bit = FALSE, /* Time stamp size is 32 bits */ .capture = CANEXCEL_TIMESTAMPCAPTURE_END, .src = CANTBS_TIMESURCE_BUS1 }; #endif if (config->common.phy != NULL) { if (!device_is_ready(config->common.phy)) { LOG_ERR("CAN transceiver not ready"); return -ENODEV; } } if (!device_is_ready(config->clock_dev)) { LOG_ERR("Clock control device not ready"); return -ENODEV; } err = clock_control_on(config->clock_dev, config->clock_subsys); if (err) { LOG_ERR("Failed to enable clock"); return err; } k_mutex_init(&data->rx_mutex); k_mutex_init(&data->tx_mutex); k_sem_init(&data->tx_allocs_sem, CONFIG_CAN_NXP_S32_MAX_TX, CONFIG_CAN_NXP_S32_MAX_TX); err = pinctrl_apply_state(config->pin_cfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } /* Enable CANXL HW */ IP_MC_RGM->PRST_0[0].PRST_0 &= ~(MC_RGM_PRST_0_PERIPH_16_RST_MASK | MC_RGM_PRST_0_PERIPH_24_RST_MASK); err = can_calc_timing(dev, &data->timing, config->common.bitrate, config->common.sample_point); if (err == -EINVAL) { LOG_ERR("Can't find timing for given param"); return -EIO; } if (err > 0) { LOG_WRN("Sample-point error : %d", err); } LOG_DBG("Setting CAN bitrate %d:", config->common.bitrate); nxp_s32_zcan_timing_to_canxl_timing(&data->timing, &config->can_cfg->bitrate); #ifdef CAN_NXP_S32_FD_MODE err = can_calc_timing_data(dev, &data->timing_data, config->common.bitrate_data, config->common.sample_point_data); if (err == -EINVAL) { LOG_ERR("Can't find timing data for given param"); return -EIO; } if (err > 0) { LOG_WRN("Sample-point-data err : %d", err); } LOG_DBG("Setting CAN FD bitrate %d:", config->common.bitrate_data); nxp_s32_zcan_timing_to_canxl_timing(&data->timing_data, &config->can_cfg->Fd_bitrate); #endif /* Initialize CAN structure */ Canexcel_Ip_Init(config->instance, config->can_cfg, data->can_state); #ifdef CAN_NXP_S32_FD_MODE Canexcel_Ip_SetTDCOffsetFD(config->instance, true, false, CAN_CALC_TDCO((&data->timing_data), 0U, CAN_NXP_S32_TDCO_MAX)); #endif /* Configure time stamp */ #ifdef CONFIG_CAN_RX_TIMESTAMP Canexcel_Ip_ConfigTimeStamp(config->instance, &time_stamp); #endif /* Enable Interrupt */ Canexcel_Ip_EnableInterrupts(config->instance); /* Enable Error Interrupt */ CanXL_SetErrIntCmd(config->base_sic, CANXL_INT_RX_WARNING, TRUE); CanXL_SetErrIntCmd(config->base_sic, CANXL_INT_TX_WARNING, TRUE); CanXL_SetErrIntCmd(config->base_sic, CANXL_INT_ERR, TRUE); CanXL_SetErrIntCmd(config->base_sic, CANXL_INT_BUSOFF, TRUE); CanXL_SetErrIntCmd(config->base_sic, CANXL_INT_PASIVE_ERR, TRUE); CanXL_SetErrIntCmd(config->base_sic, CANXL_INT_RXSMB_OVER, TRUE); #ifdef CAN_NXP_S32_FD_MODE CanXL_SetErrIntCmd(config->base_sic, CANXL_INT_ERR_FAST, TRUE); #endif #ifdef CONFIG_CAN_NXP_S32_RX_FIFO CanXL_SetErrIntCmd(config->base_sic, CANXL_INT_RXFIFO_OVER, TRUE); /* Configure number of ID acceptance filters*/ config->base_rx_fifo->AFCFG = CANXL_RXFIFO_AFCFG_ACPTID(CONFIG_CAN_NXP_S32_MAX_RX - 1); #endif config->irq_config_func(); can_nxp_s32_get_state(dev, &data->state, NULL); return 0; } static void can_nxp_s32_isr_rx_tx_mru(const struct device *dev) { const struct can_nxp_s32_config *config = dev->config; Canexcel_Ip_RxTxIRQHandler(config->instance); if ((config->base_mru->CHXCONFIG[0u].CH_CFG0 & CANXL_MRU_CH_CFG0_CHE_MASK) == CANXL_MRU_CH_CFG0_CHE_MASK) { Canexcel_Ip_MruIRQHandler(config->instance); } } static void can_nxp_s32_isr_error(const struct device *dev) { const struct can_nxp_s32_config *config = dev->config; Canexcel_Ip_ErrIRQHandler(config->instance); } static const struct can_driver_api can_nxp_s32_driver_api = { .get_capabilities = can_nxp_s32_get_capabilities, .start = can_nxp_s32_start, .stop = can_nxp_s32_stop, .set_mode = can_nxp_s32_set_mode, .set_timing = can_nxp_s32_set_timing, .send = can_nxp_s32_send, .add_rx_filter = can_nxp_s32_add_rx_filter, .remove_rx_filter = can_nxp_s32_remove_rx_filter, .get_state = can_nxp_s32_get_state, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = can_nxp_s32_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .set_state_change_callback = can_nxp_s32_set_state_change_callback, .get_core_clock = can_nxp_s32_get_core_clock, .get_max_filters = can_nxp_s32_get_max_filters, .timing_min = { .sjw = 0x01, .prop_seg = 0x01, .phase_seg1 = 0x01, .phase_seg2 = 0x02, .prescaler = 0x01 }, .timing_max = { .sjw = 0x04, .prop_seg = 0x08, .phase_seg1 = 0x08, .phase_seg2 = 0x08, .prescaler = 0x100 }, #ifdef CAN_NXP_S32_FD_MODE .set_timing_data = can_nxp_s32_set_timing_data, .timing_data_min = { .sjw = 0x01, .prop_seg = 0x01, .phase_seg1 = 0x01, .phase_seg2 = 0x02, .prescaler = 0x01 }, .timing_data_max = { .sjw = 0x04, .prop_seg = 0x08, .phase_seg1 = 0x08, .phase_seg2 = 0x08, .prescaler = 0x100 } #endif }; #define _CAN_NXP_S32_IRQ_CONFIG(node_id, prop, idx) \ do { \ IRQ_CONNECT(DT_IRQ_BY_IDX(node_id, idx, irq), \ DT_IRQ_BY_IDX(node_id, idx, priority), \ UTIL_CAT(can_nxp_s32_isr_, \ DT_STRING_TOKEN_BY_IDX(node_id, prop, idx)), \ DEVICE_DT_GET(node_id), \ DT_IRQ_BY_IDX(node_id, idx, flags)); \ irq_enable(DT_IRQ_BY_IDX(node_id, idx, irq)); \ } while (false); #define CAN_NXP_S32_IRQ_CONFIG(n) \ static void can_irq_config_##n(void) \ { \ DT_INST_FOREACH_PROP_ELEM(n, interrupt_names, _CAN_NXP_S32_IRQ_CONFIG); \ } #define CAN_NXP_S32_ERR_CALLBACK(n) \ void nxp_s32_can_##n##_err_callback(uint8 instance, Canexcel_Ip_EventType eventType,\ uint32 u32SysStatus, const Canexcel_Ip_StateType *canexcelState) \ { \ const struct device *dev = DEVICE_DT_INST_GET(n); \ can_nxp_s32_err_callback(dev, eventType, u32SysStatus, canexcelState); \ } #define CAN_NXP_S32_CTRL_CALLBACK(n) \ void nxp_s32_can_##n##_ctrl_callback(uint8 instance, Canexcel_Ip_EventType eventType,\ uint32 buffIdx, const Canexcel_Ip_StateType *canexcelState) \ { \ const struct device *dev = DEVICE_DT_INST_GET(n); \ can_nxp_s32_ctrl_callback(dev, eventType, buffIdx, canexcelState); \ } #if defined(CAN_NXP_S32_FD_MODE) #define CAN_NXP_S32_BRS 1 #else #define CAN_NXP_S32_BRS 0 #endif #define CAN_NXP_S32_HW_INSTANCE_CHECK(i, n) \ ((DT_INST_REG_ADDR(n) == IP_CANXL_##i##__SIC_BASE) ? i : 0) #define CAN_NXP_S32_HW_INSTANCE(n) \ LISTIFY(__DEBRACKET CANXL_SIC_INSTANCE_COUNT, CAN_NXP_S32_HW_INSTANCE_CHECK, (|), n) #define CAN_NXP_S32_INIT_DEVICE(n) \ CAN_NXP_S32_CTRL_CALLBACK(n) \ CAN_NXP_S32_ERR_CALLBACK(n) \ CAN_NXP_S32_IRQ_CONFIG(n) \ PINCTRL_DT_INST_DEFINE(n); \ \ __nocache Canexcel_Ip_StateType can_nxp_s32_state##n; \ __nocache Canexcel_TxFdMsgType tx_msg##n[CONFIG_CAN_NXP_S32_MAX_TX]; \ IF_DISABLED(CONFIG_CAN_NXP_S32_RX_FIFO, \ (__nocache Canexcel_RxFdMsg rx_msg_##n[CONFIG_CAN_NXP_S32_MAX_RX];)) \ IF_ENABLED(CONFIG_CAN_NXP_S32_RX_FIFO, \ (__nocache Canexcel_RxFdMsg rx_fifo_##n[CAN_NXP_S32_RX_FIFO_DEPTH]; \ static Canexcel_Ip_RxFifoFilterID_ADDR \ rx_fifo_filter##n[CONFIG_CAN_NXP_S32_MAX_RX];)) \ Canexcel_Ip_ConfigType can_nxp_s32_default_config##n = { \ .rx_mbdesc = (uint8)IS_ENABLED(CONFIG_CAN_NXP_S32_RX_FIFO) ? \ 0 : CONFIG_CAN_NXP_S32_MAX_RX, \ .tx_mbdesc = (uint8)CONFIG_CAN_NXP_S32_MAX_TX, \ .CanxlMode = CANEXCEL_LISTEN_ONLY_MODE, \ .fd_enable = (boolean)IS_ENABLED(CAN_NXP_S32_FD_MODE), \ .bitRateSwitch = (boolean)CAN_NXP_S32_BRS, \ .ctrlOptions = CANXL_IP_BUSOFF_RECOVERY_U32, \ .Callback = nxp_s32_can_##n##_ctrl_callback, \ .ErrorCallback = nxp_s32_can_##n##_err_callback, \ IF_ENABLED(CONFIG_CAN_NXP_S32_RX_FIFO, \ (.is_rx_fifo_needed = (boolean)TRUE, \ .pRxFifoConfig = { \ .Rx_Fifo_Depth = CAN_NXP_S32_RX_FIFO_DEPTH, \ .Rx_Fifo_Watermark = CAN_NXP_S32_RX_FIFO_WATERMARK, \ .Rx_Fifo_Msg_Size = CAN_NXP_S32_DATA_LENGTH, \ .Rx_Fifo_KeepLast = (boolean)FALSE, \ .isPolling = (boolean)FALSE, \ .MsgBuffersPtr = (uint32 *)rx_fifo_##n, \ },)) \ }; \ static struct can_nxp_s32_data can_nxp_s32_data_##n = { \ .can_state = (Canexcel_Ip_StateType *)&can_nxp_s32_state##n, \ .tx_msg = tx_msg##n, \ IF_DISABLED(CONFIG_CAN_NXP_S32_RX_FIFO, \ (.rx_msg = rx_msg_##n,)) \ IF_ENABLED(CONFIG_CAN_NXP_S32_RX_FIFO, \ (.rx_fifo = rx_fifo_##n, \ .rx_fifo_filter = \ (Canexcel_Ip_RxFifoFilterID_ADDR *)&rx_fifo_filter##n,))\ }; \ static struct can_nxp_s32_config can_nxp_s32_config_##n = { \ .common = CAN_DT_DRIVER_CONFIG_INST_GET(n, 0, CAN_NXP_S32_MAX_BITRATE), \ .base_sic = (CANXL_SIC_Type *)DT_INST_REG_ADDR_BY_NAME(n, sic), \ IF_ENABLED(CONFIG_CAN_NXP_S32_RX_FIFO, \ (.base_rx_fifo = (CANXL_RXFIFO_Type *) \ DT_INST_REG_ADDR_BY_NAME(n, rx_fifo), \ .base_rx_fifo_ctrl = (CANXL_RXFIFO_CONTROL_Type *) \ DT_INST_REG_ADDR_BY_NAME(n, rx_fifo_ctrl),)) \ .base_mru = (CANXL_MRU_Type *)DT_INST_REG_ADDR_BY_NAME(n, mru), \ .instance = CAN_NXP_S32_HW_INSTANCE(n), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = (clock_control_subsys_t) \ DT_INST_CLOCKS_CELL(n, name), \ .pin_cfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .can_cfg = (Canexcel_Ip_ConfigType *)&can_nxp_s32_default_config##n, \ .irq_config_func = can_irq_config_##n \ }; \ static int can_nxp_s32_##n##_init(const struct device *dev) \ { \ return can_nxp_s32_init(dev); \ } \ CAN_DEVICE_DT_INST_DEFINE(n, \ can_nxp_s32_##n##_init, \ NULL, \ &can_nxp_s32_data_##n, \ &can_nxp_s32_config_##n, \ POST_KERNEL, \ CONFIG_CAN_INIT_PRIORITY, \ &can_nxp_s32_driver_api); DT_INST_FOREACH_STATUS_OKAY(CAN_NXP_S32_INIT_DEVICE) ```
/content/code_sandbox/drivers/can/can_nxp_s32_canxl.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
11,446
```c /* * */ #include <zephyr/device.h> #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/can_fake.h> #include <zephyr/fff.h> #include <zephyr/sys/util.h> #ifdef CONFIG_ZTEST #include <zephyr/ztest.h> #endif /* CONFIG_ZTEST */ #define DT_DRV_COMPAT zephyr_fake_can struct fake_can_config { const struct can_driver_config common; }; struct fake_can_data { struct can_driver_data common; }; DEFINE_FAKE_VALUE_FUNC(int, fake_can_start, const struct device *); DEFINE_FAKE_VALUE_FUNC(int, fake_can_stop, const struct device *); DEFINE_FAKE_VALUE_FUNC(int, fake_can_set_timing, const struct device *, const struct can_timing *); DEFINE_FAKE_VALUE_FUNC(int, fake_can_set_timing_data, const struct device *, const struct can_timing *); DEFINE_FAKE_VALUE_FUNC(int, fake_can_get_capabilities, const struct device *, can_mode_t *); DEFINE_FAKE_VALUE_FUNC(int, fake_can_set_mode, const struct device *, can_mode_t); DEFINE_FAKE_VALUE_FUNC(int, fake_can_send, const struct device *, const struct can_frame *, k_timeout_t, can_tx_callback_t, void *); DEFINE_FAKE_VALUE_FUNC(int, fake_can_add_rx_filter, const struct device *, can_rx_callback_t, void *, const struct can_filter *); DEFINE_FAKE_VOID_FUNC(fake_can_remove_rx_filter, const struct device *, int); DEFINE_FAKE_VALUE_FUNC(int, fake_can_recover, const struct device *, k_timeout_t); DEFINE_FAKE_VALUE_FUNC(int, fake_can_get_state, const struct device *, enum can_state *, struct can_bus_err_cnt *); DEFINE_FAKE_VOID_FUNC(fake_can_set_state_change_callback, const struct device *, can_state_change_callback_t, void *); DEFINE_FAKE_VALUE_FUNC(int, fake_can_get_max_filters, const struct device *, bool); DEFINE_FAKE_VALUE_FUNC(int, fake_can_get_core_clock, const struct device *, uint32_t *); static int fake_can_get_core_clock_delegate(const struct device *dev, uint32_t *rate) { ARG_UNUSED(dev); /* Recommended CAN clock from CiA 601-3 */ *rate = MHZ(80); return 0; } #ifdef CONFIG_ZTEST static void fake_can_reset_rule_before(const struct ztest_unit_test *test, void *fixture) { ARG_UNUSED(test); ARG_UNUSED(fixture); RESET_FAKE(fake_can_start); RESET_FAKE(fake_can_stop); RESET_FAKE(fake_can_get_capabilities); RESET_FAKE(fake_can_set_mode); RESET_FAKE(fake_can_set_timing); RESET_FAKE(fake_can_set_timing_data); RESET_FAKE(fake_can_send); RESET_FAKE(fake_can_add_rx_filter); RESET_FAKE(fake_can_remove_rx_filter); RESET_FAKE(fake_can_get_state); RESET_FAKE(fake_can_recover); RESET_FAKE(fake_can_set_state_change_callback); RESET_FAKE(fake_can_get_max_filters); RESET_FAKE(fake_can_get_core_clock); /* Re-install default delegate for reporting the core clock */ fake_can_get_core_clock_fake.custom_fake = fake_can_get_core_clock_delegate; } ZTEST_RULE(fake_can_reset_rule, fake_can_reset_rule_before, NULL); #endif /* CONFIG_ZTEST */ static int fake_can_init(const struct device *dev) { /* Install default delegate for reporting the core clock */ fake_can_get_core_clock_fake.custom_fake = fake_can_get_core_clock_delegate; return 0; } static const struct can_driver_api fake_can_driver_api = { .start = fake_can_start, .stop = fake_can_stop, .get_capabilities = fake_can_get_capabilities, .set_mode = fake_can_set_mode, .set_timing = fake_can_set_timing, .send = fake_can_send, .add_rx_filter = fake_can_add_rx_filter, .remove_rx_filter = fake_can_remove_rx_filter, .get_state = fake_can_get_state, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = fake_can_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .set_state_change_callback = fake_can_set_state_change_callback, .get_core_clock = fake_can_get_core_clock, .get_max_filters = fake_can_get_max_filters, /* Recommended configuration ranges from CiA 601-2 */ .timing_min = { .sjw = 1, .prop_seg = 0, .phase_seg1 = 2, .phase_seg2 = 2, .prescaler = 1 }, .timing_max = { .sjw = 128, .prop_seg = 0, .phase_seg1 = 256, .phase_seg2 = 128, .prescaler = 32 }, #ifdef CONFIG_CAN_FD_MODE .set_timing_data = fake_can_set_timing_data, /* Recommended configuration ranges from CiA 601-2 */ .timing_data_min = { .sjw = 1, .prop_seg = 0, .phase_seg1 = 1, .phase_seg2 = 1, .prescaler = 1 }, .timing_data_max = { .sjw = 16, .prop_seg = 0, .phase_seg1 = 32, .phase_seg2 = 16, .prescaler = 32 }, #endif /* CONFIG_CAN_FD_MODE */ }; #ifdef CONFIG_CAN_FD_MODE #define FAKE_CAN_MAX_BITRATE 8000000 #else /* CONFIG_CAN_FD_MODE */ #define FAKE_CAN_MAX_BITRATE 1000000 #endif /* !CONFIG_CAN_FD_MODE */ #define FAKE_CAN_INIT(inst) \ static const struct fake_can_config fake_can_config_##inst = { \ .common = CAN_DT_DRIVER_CONFIG_INST_GET(inst, 0, FAKE_CAN_MAX_BITRATE), \ }; \ \ static struct fake_can_data fake_can_data_##inst; \ \ CAN_DEVICE_DT_INST_DEFINE(inst, fake_can_init, NULL, &fake_can_data_##inst, \ &fake_can_config_##inst, POST_KERNEL, \ CONFIG_CAN_INIT_PRIORITY, \ &fake_can_driver_api); DT_INST_FOREACH_STATUS_OKAY(FAKE_CAN_INIT) ```
/content/code_sandbox/drivers/can/can_fake.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,316
```c /* * */ #define DT_DRV_COMPAT renesas_rcar_can #include <zephyr/kernel.h> #include <errno.h> #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/transceiver.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/renesas_cpg_mssr.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(can_rcar, CONFIG_CAN_LOG_LEVEL); /* Control Register */ #define RCAR_CAN_CTLR 0x0840 /* Control Register bits */ #define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */ #define RCAR_CAN_CTLR_BOM_ENT BIT(11) /* Automatic halt mode entry at bus-off entry */ #define RCAR_CAN_CTLR_SLPM BIT(10) #define RCAR_CAN_CTLR_CANM_HALT BIT(9) #define RCAR_CAN_CTLR_CANM_RESET BIT(8) #define RCAR_CAN_CTLR_CANM_MASK (3 << 8) #define RCAR_CAN_CTLR_MLM BIT(3) /* Message Lost Mode Select */ #define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */ #define RCAR_CAN_CTLR_IDFM_MIXED BIT(2) /* Mixed ID mode */ #define RCAR_CAN_CTLR_MBM BIT(0) /* Mailbox Mode select */ /* Mask Register */ #define RCAR_CAN_MKR0 0x0430 #define RCAR_CAN_MKR1 0x0434 #define RCAR_CAN_MKR2 0x0400 #define RCAR_CAN_MKR3 0x0404 #define RCAR_CAN_MKR4 0x0408 #define RCAR_CAN_MKR5 0x040C #define RCAR_CAN_MKR6 0x0410 #define RCAR_CAN_MKR7 0x0414 #define RCAR_CAN_MKR8 0x0418 #define RCAR_CAN_MKR9 0x041C /* FIFO Received ID Compare Register 0 */ #define RCAR_CAN_FIDCR0 0x0420 /* FIFO Received ID Compare Register 1 */ #define RCAR_CAN_FIDCR1 0x0424 /* FIFO Received ID Compare Registers 0 and 1 bits */ #define RCAR_CAN_FIDCR_IDE BIT(31) /* ID Extension Bit */ #define RCAR_CAN_FIDCR_RTR BIT(30) /* RTR Bit */ /* Mask Invalid Register 0 */ #define RCAR_CAN_MKIVLR0 0x0438 /* Mask Invalid Register 1 */ #define RCAR_CAN_MKIVLR1 0x0428 /* Mailbox Interrupt Enable Registers*/ #define RCAR_CAN_MIER0 0x043C #define RCAR_CAN_MIER1 0x042C #define RCAR_CAN_MIER1_RXFIE BIT(28) /* Rx FIFO Interrupt Enable */ #define RCAR_CAN_MIER1_TXFIE BIT(24) /* Tx FIFO Interrupt Enable */ #define RCAR_CAN_STR 0x0842 /* Status Register */ #define RCAR_CAN_STR_RSTST BIT(8) /* Reset Status Bit */ #define RCAR_CAN_STR_HLTST BIT(9) /* Halt Status Bit */ #define RCAR_CAN_STR_SLPST BIT(10) /* Sleep Status Bit */ #define MAX_STR_READS 0x100 /* Bit Configuration Register */ #define RCAR_CAN_BCR 0x0844 /* Clock Select Register */ #define RCAR_CAN_CLKR 0x0847 #define RCAR_CAN_CLKR_EXT_CLOCK 0x3 /* External input clock */ #define RCAR_CAN_CLKR_CLKP2 0x1 #define RCAR_CAN_CLKR_CLKP1 0x0 /* Error Interrupt Enable Register */ #define RCAR_CAN_EIER 0x084C /* Interrupt Enable Register */ #define RCAR_CAN_IER 0x0860 #define RCAR_CAN_IER_ERSIE BIT(5) /* Error Interrupt Enable Bit */ #define RCAR_CAN_IER_RXFIE BIT(4) /* Rx FIFO Interrupt Enable Bit */ #define RCAR_CAN_IER_TXFIE BIT(3) /* Tx FIFO Interrupt Enable Bit */ /* Interrupt Status Register */ #define RCAR_CAN_ISR 0x0861 #define RCAR_CAN_ISR_ERSF BIT(5) /* Error (ERS) Interrupt */ #define RCAR_CAN_ISR_RXFF BIT(4) /* Reception FIFO Interrupt */ #define RCAR_CAN_ISR_TXFF BIT(3) /* Transmission FIFO Interrupt */ /* Receive FIFO Control Register */ #define RCAR_CAN_RFCR 0x0848 #define RCAR_CAN_RFCR_RFE BIT(0) /* Receive FIFO Enable */ #define RCAR_CAN_RFCR_RFEST BIT(7) /* Receive FIFO Empty Flag */ /* Receive FIFO Pointer Control Register */ #define RCAR_CAN_RFPCR 0x0849 /* Transmit FIFO Control Register */ #define RCAR_CAN_TFCR 0x084A #define RCAR_CAN_TFCR_TFE BIT(0) /* Transmit FIFO Enable */ #define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Msg Number Status Bits */ #define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Tx FIFO Unsent */ /* Transmit FIFO Pointer Control Register */ #define RCAR_CAN_TFPCR 0x084B /* Error Code Store Register*/ #define RCAR_CAN_ECSR 0x0850 /* Error Code Store Register */ #define RCAR_CAN_ECSR_EDPM BIT(7) /* Error Display Mode Select */ #define RCAR_CAN_ECSR_ADEF BIT(6) /* ACK Delimiter Error Flag */ #define RCAR_CAN_ECSR_BE0F BIT(5) /* Bit Error (dominant) Flag */ #define RCAR_CAN_ECSR_BE1F BIT(4) /* Bit Error (recessive) Flag */ #define RCAR_CAN_ECSR_CEF BIT(3) /* CRC Error Flag */ #define RCAR_CAN_ECSR_AEF BIT(2) /* ACK Error Flag */ #define RCAR_CAN_ECSR_FEF BIT(1) /* Form Error Flag */ #define RCAR_CAN_ECSR_SEF BIT(0) /* Stuff Error Flag */ /* Test Control Register */ #define RCAR_CAN_TCR 0x0858 #define RCAR_CAN_TCR_TSTE BIT(0) /* Test Mode Enable Bit*/ #define RCAR_CAN_TCR_LISTEN_ONLY BIT(1) #define RCAR_CAN_TCR_INT_LOOP (3 << 1) /* Internal loopback*/ /* Error Interrupt Factor Judge Register bits */ #define RCAR_CAN_EIFR 0x084D #define RCAR_CAN_EIFR_BLIF BIT(7) /* Bus Lock Detect Flag */ #define RCAR_CAN_EIFR_OLIF BIT(6) /* Overload Frame Transmission */ #define RCAR_CAN_EIFR_ORIF BIT(5) /* Receive Overrun Detect Flag */ #define RCAR_CAN_EIFR_BORIF BIT(4) /* Bus-Off Recovery Detect Flag */ #define RCAR_CAN_EIFR_BOEIF BIT(3) /* Bus-Off Entry Detect Flag */ #define RCAR_CAN_EIFR_EPIF BIT(2) /* Error Passive Detect Flag */ #define RCAR_CAN_EIFR_EWIF BIT(1) /* Error Warning Detect Flag */ #define RCAR_CAN_EIFR_BEIF BIT(0) /* Bus Error Detect Flag */ /* Receive Error Count Register */ #define RCAR_CAN_RECR 0x084D /* Transmit Error Count Register */ #define RCAR_CAN_TECR 0x084F /* Mailbox configuration: * mailbox 60 - 63 - Rx FIFO mailboxes * mailbox 56 - 59 - Tx FIFO mailboxes * non-FIFO mailboxes are not used */ #define RCAR_CAN_MB_56 0x0380 #define RCAR_CAN_MB_60 0x03C0 /* DLC must be accessed as a 16 bit register */ #define RCAR_CAN_MB_DLC_OFFSET 0x4 /* Data length code */ #define RCAR_CAN_MB_DATA_OFFSET 0x6 /* Data section */ #define RCAR_CAN_MB_TSH_OFFSET 0x14 /* Timestamp upper byte */ #define RCAR_CAN_MB_TSL_OFFSET 0x15 /* Timestamp lower byte */ #define RCAR_CAN_FIFO_DEPTH 4 #define RCAR_CAN_MB_SID_SHIFT 18 #define RCAR_CAN_MB_RTR BIT(30) #define RCAR_CAN_MB_IDE BIT(31) #define RCAR_CAN_MB_SID_MASK 0x1FFC0000 #define RCAR_CAN_MB_EID_MASK 0x1FFFFFFF typedef void (*init_func_t)(const struct device *dev); struct can_rcar_cfg { const struct can_driver_config common; uint32_t reg_addr; int reg_size; init_func_t init_func; const struct device *clock_dev; struct rcar_cpg_clk mod_clk; struct rcar_cpg_clk bus_clk; const struct pinctrl_dev_config *pcfg; }; struct can_rcar_tx_cb { can_tx_callback_t cb; void *cb_arg; }; struct can_rcar_data { struct can_driver_data common; struct k_mutex inst_mutex; struct k_sem tx_sem; struct can_rcar_tx_cb tx_cb[RCAR_CAN_FIFO_DEPTH]; uint8_t tx_head; uint8_t tx_tail; uint8_t tx_unsent; struct k_mutex rx_mutex; can_rx_callback_t rx_callback[CONFIG_CAN_RCAR_MAX_FILTER]; void *rx_callback_arg[CONFIG_CAN_RCAR_MAX_FILTER]; struct can_filter filter[CONFIG_CAN_RCAR_MAX_FILTER]; enum can_state state; }; static inline uint16_t can_rcar_read16(const struct can_rcar_cfg *config, uint32_t offs) { return sys_read16(config->reg_addr + offs); } static inline void can_rcar_write16(const struct can_rcar_cfg *config, uint32_t offs, uint16_t value) { sys_write16(value, config->reg_addr + offs); } static void can_rcar_tx_done(const struct device *dev, uint8_t err) { struct can_rcar_data *data = dev->data; struct can_rcar_tx_cb *tx_cb; tx_cb = &data->tx_cb[data->tx_tail]; data->tx_tail++; if (data->tx_tail >= RCAR_CAN_FIFO_DEPTH) { data->tx_tail = 0; } data->tx_unsent--; tx_cb->cb(dev, err, tx_cb->cb_arg); k_sem_give(&data->tx_sem); } static void can_rcar_get_error_count(const struct can_rcar_cfg *config, struct can_bus_err_cnt *err_cnt) { err_cnt->tx_err_cnt = sys_read8(config->reg_addr + RCAR_CAN_TECR); err_cnt->rx_err_cnt = sys_read8(config->reg_addr + RCAR_CAN_RECR); } static void can_rcar_state_change(const struct device *dev, uint32_t newstate) { const struct can_rcar_cfg *config = dev->config; struct can_rcar_data *data = dev->data; const can_state_change_callback_t cb = data->common.state_change_cb; void *state_change_cb_data = data->common.state_change_cb_user_data; struct can_bus_err_cnt err_cnt; if (data->state == newstate) { return; } LOG_DBG("Can state change new: %u old:%u\n", newstate, data->state); data->state = newstate; if (cb == NULL) { return; } can_rcar_get_error_count(config, &err_cnt); cb(dev, newstate, err_cnt, state_change_cb_data); } static void can_rcar_error(const struct device *dev) { const struct can_rcar_cfg *config = dev->config; uint8_t eifr, ecsr; eifr = sys_read8(config->reg_addr + RCAR_CAN_EIFR); if (eifr & RCAR_CAN_EIFR_BEIF) { ecsr = sys_read8(config->reg_addr + RCAR_CAN_ECSR); if (ecsr & RCAR_CAN_ECSR_ADEF) { CAN_STATS_ACK_ERROR_INC(dev); sys_write8((uint8_t)~RCAR_CAN_ECSR_ADEF, config->reg_addr + RCAR_CAN_ECSR); } if (ecsr & RCAR_CAN_ECSR_BE0F) { CAN_STATS_BIT0_ERROR_INC(dev); sys_write8((uint8_t)~RCAR_CAN_ECSR_BE0F, config->reg_addr + RCAR_CAN_ECSR); } if (ecsr & RCAR_CAN_ECSR_BE1F) { CAN_STATS_BIT1_ERROR_INC(dev); sys_write8((uint8_t)~RCAR_CAN_ECSR_BE1F, config->reg_addr + RCAR_CAN_ECSR); } if (ecsr & RCAR_CAN_ECSR_CEF) { CAN_STATS_CRC_ERROR_INC(dev); sys_write8((uint8_t)~RCAR_CAN_ECSR_CEF, config->reg_addr + RCAR_CAN_ECSR); } if (ecsr & RCAR_CAN_ECSR_AEF) { CAN_STATS_ACK_ERROR_INC(dev); sys_write8((uint8_t)~RCAR_CAN_ECSR_AEF, config->reg_addr + RCAR_CAN_ECSR); } if (ecsr & RCAR_CAN_ECSR_FEF) { CAN_STATS_FORM_ERROR_INC(dev); sys_write8((uint8_t)~RCAR_CAN_ECSR_FEF, config->reg_addr + RCAR_CAN_ECSR); } if (ecsr & RCAR_CAN_ECSR_SEF) { CAN_STATS_STUFF_ERROR_INC(dev); sys_write8((uint8_t)~RCAR_CAN_ECSR_SEF, config->reg_addr + RCAR_CAN_ECSR); } sys_write8((uint8_t)~RCAR_CAN_EIFR_BEIF, config->reg_addr + RCAR_CAN_EIFR); } if (eifr & RCAR_CAN_EIFR_EWIF) { LOG_DBG("Error warning interrupt\n"); /* Clear interrupt condition */ sys_write8((uint8_t)~RCAR_CAN_EIFR_EWIF, config->reg_addr + RCAR_CAN_EIFR); can_rcar_state_change(dev, CAN_STATE_ERROR_WARNING); } if (eifr & RCAR_CAN_EIFR_EPIF) { LOG_DBG("Error passive interrupt\n"); /* Clear interrupt condition */ sys_write8((uint8_t)~RCAR_CAN_EIFR_EPIF, config->reg_addr + RCAR_CAN_EIFR); can_rcar_state_change(dev, CAN_STATE_ERROR_PASSIVE); } if (eifr & RCAR_CAN_EIFR_BORIF) { LOG_DBG("Bus-off recovery interrupt\n"); sys_write8(RCAR_CAN_IER_ERSIE, config->reg_addr + RCAR_CAN_IER); /* Clear interrupt condition */ sys_write8((uint8_t)~RCAR_CAN_EIFR_BORIF, config->reg_addr + RCAR_CAN_EIFR); can_rcar_state_change(dev, CAN_STATE_BUS_OFF); } if (eifr & RCAR_CAN_EIFR_BOEIF) { LOG_DBG("Bus-off entry interrupt\n"); sys_write8(RCAR_CAN_IER_ERSIE, config->reg_addr + RCAR_CAN_IER); /* Clear interrupt condition */ sys_write8((uint8_t)~RCAR_CAN_EIFR_BOEIF, config->reg_addr + RCAR_CAN_EIFR); can_rcar_state_change(dev, CAN_STATE_BUS_OFF); } if (eifr & RCAR_CAN_EIFR_ORIF) { LOG_DBG("Receive overrun error interrupt\n"); CAN_STATS_RX_OVERRUN_INC(dev); sys_write8((uint8_t)~RCAR_CAN_EIFR_ORIF, config->reg_addr + RCAR_CAN_EIFR); } if (eifr & RCAR_CAN_EIFR_OLIF) { LOG_DBG("Overload Frame Transmission error interrupt\n"); sys_write8((uint8_t)~RCAR_CAN_EIFR_OLIF, config->reg_addr + RCAR_CAN_EIFR); } if (eifr & RCAR_CAN_EIFR_BLIF) { LOG_DBG("Bus lock detected interrupt\n"); sys_write8((uint8_t)~RCAR_CAN_EIFR_BLIF, config->reg_addr + RCAR_CAN_EIFR); } } static void can_rcar_rx_filter_isr(const struct device *dev, struct can_rcar_data *data, const struct can_frame *frame) { struct can_frame tmp_frame; uint8_t i; #ifndef CONFIG_CAN_ACCEPT_RTR if ((frame->flags & CAN_FRAME_RTR) != 0U) { return; } #endif /* !CONFIG_CAN_ACCEPT_RTR */ for (i = 0; i < CONFIG_CAN_RCAR_MAX_FILTER; i++) { if (data->rx_callback[i] == NULL) { continue; } if (!can_frame_matches_filter(frame, &data->filter[i])) { continue; /* filter did not match */ } /* Make a temporary copy in case the user * modifies the message. */ tmp_frame = *frame; data->rx_callback[i](dev, &tmp_frame, data->rx_callback_arg[i]); } } static void can_rcar_rx_isr(const struct device *dev) { const struct can_rcar_cfg *config = dev->config; struct can_rcar_data *data = dev->data; struct can_frame frame = {0}; uint32_t val; int i; val = sys_read32(config->reg_addr + RCAR_CAN_MB_60); if (val & RCAR_CAN_MB_IDE) { frame.flags |= CAN_FRAME_IDE; frame.id = val & RCAR_CAN_MB_EID_MASK; } else { frame.id = (val & RCAR_CAN_MB_SID_MASK) >> RCAR_CAN_MB_SID_SHIFT; } frame.dlc = sys_read16(config->reg_addr + RCAR_CAN_MB_60 + RCAR_CAN_MB_DLC_OFFSET) & 0xF; /* Be paranoid doc states that any value greater than 8 * should be considered as 8 bytes. */ if (frame.dlc > CAN_MAX_DLC) { frame.dlc = CAN_MAX_DLC; } if (val & RCAR_CAN_MB_RTR) { frame.flags |= CAN_FRAME_RTR; } else { for (i = 0; i < frame.dlc; i++) { frame.data[i] = sys_read8(config->reg_addr + RCAR_CAN_MB_60 + RCAR_CAN_MB_DATA_OFFSET + i); } } #if defined(CONFIG_CAN_RX_TIMESTAMP) /* read upper byte */ frame.timestamp = sys_read8(config->reg_addr + RCAR_CAN_MB_60 + RCAR_CAN_MB_TSH_OFFSET) << 8; /* and then read lower byte */ frame.timestamp |= sys_read8(config->reg_addr + RCAR_CAN_MB_60 + RCAR_CAN_MB_TSL_OFFSET); #endif /* Increment CPU side pointer */ sys_write8(0xff, config->reg_addr + RCAR_CAN_RFPCR); can_rcar_rx_filter_isr(dev, data, &frame); } static void can_rcar_isr(const struct device *dev) { const struct can_rcar_cfg *config = dev->config; struct can_rcar_data *data = dev->data; uint8_t isr, unsent; isr = sys_read8(config->reg_addr + RCAR_CAN_ISR); if (isr & RCAR_CAN_ISR_ERSF) { /* Clear the Error interrupt */ isr &= ~RCAR_CAN_ISR_ERSF; sys_write8(isr, config->reg_addr + RCAR_CAN_ISR); can_rcar_error(dev); } if (isr & RCAR_CAN_ISR_TXFF) { /* Check for sent messages */ while (1) { unsent = sys_read8(config->reg_addr + RCAR_CAN_TFCR); unsent = (unsent & RCAR_CAN_TFCR_TFUST) >> RCAR_CAN_TFCR_TFUST_SHIFT; if (data->tx_unsent <= unsent) { break; } can_rcar_tx_done(dev, 0); } /* Clear the Tx interrupt */ isr = sys_read8(config->reg_addr + RCAR_CAN_ISR); isr &= ~RCAR_CAN_ISR_TXFF; sys_write8(isr, config->reg_addr + RCAR_CAN_ISR); } if (isr & RCAR_CAN_ISR_RXFF) { /* while there is unread messages */ while (!(sys_read8(config->reg_addr + RCAR_CAN_RFCR) & RCAR_CAN_RFCR_RFEST)) { can_rcar_rx_isr(dev); } /* Clear the Rx interrupt */ isr = sys_read8(config->reg_addr + RCAR_CAN_ISR); isr &= ~RCAR_CAN_ISR_RXFF; sys_write8(isr, config->reg_addr + RCAR_CAN_ISR); } } static int can_rcar_leave_sleep_mode(const struct can_rcar_cfg *config) { uint16_t ctlr, str; int i; ctlr = can_rcar_read16(config, RCAR_CAN_CTLR); ctlr &= ~RCAR_CAN_CTLR_SLPM; can_rcar_write16(config, RCAR_CAN_CTLR, ctlr); for (i = 0; i < MAX_STR_READS; i++) { str = can_rcar_read16(config, RCAR_CAN_STR); if (!(str & RCAR_CAN_STR_SLPST)) { return 0; } } return -EAGAIN; } static int can_rcar_enter_reset_mode(const struct can_rcar_cfg *config, bool force) { uint16_t ctlr; int i; ctlr = can_rcar_read16(config, RCAR_CAN_CTLR); ctlr &= ~RCAR_CAN_CTLR_CANM_MASK; ctlr |= RCAR_CAN_CTLR_CANM_RESET; if (force) { ctlr |= RCAR_CAN_CTLR_CANM_HALT; } can_rcar_write16(config, RCAR_CAN_CTLR, ctlr); for (i = 0; i < MAX_STR_READS; i++) { if (can_rcar_read16(config, RCAR_CAN_STR) & RCAR_CAN_STR_RSTST) { return 0; } } return -EAGAIN; } static int can_rcar_enter_halt_mode(const struct can_rcar_cfg *config) { uint16_t ctlr; int i; ctlr = can_rcar_read16(config, RCAR_CAN_CTLR); ctlr &= ~RCAR_CAN_CTLR_CANM_MASK; ctlr |= RCAR_CAN_CTLR_CANM_HALT; can_rcar_write16(config, RCAR_CAN_CTLR, ctlr); /* Wait for controller to apply high bit timing settings */ k_usleep(1); for (i = 0; i < MAX_STR_READS; i++) { if (can_rcar_read16(config, RCAR_CAN_STR) & RCAR_CAN_STR_HLTST) { return 0; } } return -EAGAIN; } static int can_rcar_enter_operation_mode(const struct can_rcar_cfg *config) { uint16_t ctlr, str; int i; ctlr = can_rcar_read16(config, RCAR_CAN_CTLR); ctlr &= ~RCAR_CAN_CTLR_CANM_MASK; can_rcar_write16(config, RCAR_CAN_CTLR, ctlr); /* Wait for controller to apply high bit timing settings */ k_usleep(1); for (i = 0; i < MAX_STR_READS; i++) { str = can_rcar_read16(config, RCAR_CAN_STR); if (!(str & RCAR_CAN_CTLR_CANM_MASK)) { break; } } if (i == MAX_STR_READS) { return -EAGAIN; } /* Enable Rx and Tx FIFO */ sys_write8(RCAR_CAN_RFCR_RFE, config->reg_addr + RCAR_CAN_RFCR); sys_write8(RCAR_CAN_TFCR_TFE, config->reg_addr + RCAR_CAN_TFCR); return 0; } static int can_rcar_get_capabilities(const struct device *dev, can_mode_t *cap) { ARG_UNUSED(dev); *cap = CAN_MODE_NORMAL | CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY; return 0; } static int can_rcar_start(const struct device *dev) { const struct can_rcar_cfg *config = dev->config; struct can_rcar_data *data = dev->data; int ret; if (data->common.started) { return -EALREADY; } if (config->common.phy != NULL) { ret = can_transceiver_enable(config->common.phy, data->common.mode); if (ret != 0) { LOG_ERR("failed to enable CAN transceiver (err %d)", ret); return ret; } } k_mutex_lock(&data->inst_mutex, K_FOREVER); CAN_STATS_RESET(dev); ret = can_rcar_enter_operation_mode(config); if (ret != 0) { LOG_ERR("failed to enter operation mode (err %d)", ret); if (config->common.phy != NULL) { /* Attempt to disable the CAN transceiver in case of error */ (void)can_transceiver_disable(config->common.phy); } } else { data->common.started = true; } k_mutex_unlock(&data->inst_mutex); return ret; } static int can_rcar_stop(const struct device *dev) { const struct can_rcar_cfg *config = dev->config; struct can_rcar_data *data = dev->data; int ret; if (!data->common.started) { return -EALREADY; } k_mutex_lock(&data->inst_mutex, K_FOREVER); ret = can_rcar_enter_halt_mode(config); if (ret != 0) { LOG_ERR("failed to enter halt mode (err %d)", ret); k_mutex_unlock(&data->inst_mutex); return ret; } data->common.started = false; k_mutex_unlock(&data->inst_mutex); if (config->common.phy != NULL) { ret = can_transceiver_disable(config->common.phy); if (ret != 0) { LOG_ERR("failed to disable CAN transceiver (err %d)", ret); return ret; } } /* Resetting TX FIFO, emptying it */ sys_write8((uint8_t)~RCAR_CAN_TFCR_TFE, config->reg_addr + RCAR_CAN_TFCR); sys_write8(RCAR_CAN_TFCR_TFE, config->reg_addr + RCAR_CAN_TFCR); /* Empty TX msgq, returning an error for each message */ while (data->tx_unsent) { can_rcar_tx_done(dev, -ENETDOWN); } return 0; } static int can_rcar_set_mode(const struct device *dev, can_mode_t mode) { can_mode_t supported = CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY; const struct can_rcar_cfg *config = dev->config; struct can_rcar_data *data = dev->data; uint8_t tcr = 0; int ret = 0; if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { supported |= CAN_MODE_MANUAL_RECOVERY; } if ((mode & ~(supported)) != 0) { LOG_ERR("Unsupported mode: 0x%08x", mode); return -ENOTSUP; } if (data->common.started) { return -EBUSY; } k_mutex_lock(&data->inst_mutex, K_FOREVER); if ((mode & (CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY)) == (CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY)) { LOG_ERR("Combination of loopback and listenonly modes not supported"); ret = -ENOTSUP; goto unlock; } else if ((mode & CAN_MODE_LOOPBACK) != 0) { /* Loopback mode */ tcr = RCAR_CAN_TCR_INT_LOOP | RCAR_CAN_TCR_TSTE; } else if ((mode & CAN_MODE_LISTENONLY) != 0) { /* Listen-only mode */ tcr = RCAR_CAN_TCR_LISTEN_ONLY | RCAR_CAN_TCR_TSTE; } else { /* Normal mode */ tcr = 0; } sys_write8(tcr, config->reg_addr + RCAR_CAN_TCR); if (IS_ENABLED(CONFIG_CAN_MANUAL_RECOVERY_MODE)) { uint16_t ctlr = can_rcar_read16(config, RCAR_CAN_CTLR); if ((mode & CAN_MODE_MANUAL_RECOVERY) != 0U) { /* Set entry to halt automatically at bus-off */ ctlr |= RCAR_CAN_CTLR_BOM_ENT; } else { /* Clear entry to halt automatically at bus-off */ ctlr &= ~RCAR_CAN_CTLR_BOM_ENT; } can_rcar_write16(config, RCAR_CAN_CTLR, ctlr); } data->common.mode = mode; unlock: k_mutex_unlock(&data->inst_mutex); return ret; } /* Bit Configuration Register settings */ #define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20) #define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8) #define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4) #define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07) static void can_rcar_set_bittiming(const struct can_rcar_cfg *config, const struct can_timing *timing) { uint32_t bcr; bcr = RCAR_CAN_BCR_TSEG1(timing->phase_seg1 + timing->prop_seg - 1) | RCAR_CAN_BCR_BPR(timing->prescaler - 1) | RCAR_CAN_BCR_SJW(timing->sjw - 1) | RCAR_CAN_BCR_TSEG2(timing->phase_seg2 - 1); /* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access. * All the registers are big-endian but they get byte-swapped on 32-bit * read/write (but not on 8-bit, contrary to the manuals)... */ sys_write32((bcr << 8) | RCAR_CAN_CLKR_CLKP2, config->reg_addr + RCAR_CAN_BCR); } static int can_rcar_set_timing(const struct device *dev, const struct can_timing *timing) { const struct can_rcar_cfg *config = dev->config; struct can_rcar_data *data = dev->data; int ret = 0; struct reg_backup { uint32_t address; uint8_t value; }; struct reg_backup regs[3] = { { RCAR_CAN_TCR, 0 }, { RCAR_CAN_TFCR, 0 } , { RCAR_CAN_RFCR, 0 } }; if (data->common.started) { return -EBUSY; } k_mutex_lock(&data->inst_mutex, K_FOREVER); /* Changing bittiming should be done in reset mode. * Switching to reset mode is resetting loopback mode (TCR), * transmit and receive FIFOs (TFCR and RFCR). * Storing these reg values to restore them once back in halt mode. */ for (int i = 0; i < 3; i++) { regs[i].value = sys_read8(config->reg_addr + regs[i].address); } /* Switching to reset mode */ ret = can_rcar_enter_reset_mode(config, true); if (ret != 0) { goto unlock; } /* Setting bit timing */ can_rcar_set_bittiming(config, timing); /* Restoring registers must be done in halt mode */ ret = can_rcar_enter_halt_mode(config); if (ret) { goto unlock; } /* Restoring registers */ for (int i = 0; i < 3; i++) { sys_write8(regs[i].value, config->reg_addr + regs[i].address); } unlock: k_mutex_unlock(&data->inst_mutex); return ret; } static void can_rcar_set_state_change_callback(const struct device *dev, can_state_change_callback_t cb, void *user_data) { struct can_rcar_data *data = dev->data; data->common.state_change_cb = cb; data->common.state_change_cb_user_data = user_data; } static int can_rcar_get_state(const struct device *dev, enum can_state *state, struct can_bus_err_cnt *err_cnt) { const struct can_rcar_cfg *config = dev->config; struct can_rcar_data *data = dev->data; if (state != NULL) { if (!data->common.started) { *state = CAN_STATE_STOPPED; } else { *state = data->state; } } if (err_cnt != NULL) { can_rcar_get_error_count(config, err_cnt); } return 0; } #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE static int can_rcar_recover(const struct device *dev, k_timeout_t timeout) { const struct can_rcar_cfg *config = dev->config; struct can_rcar_data *data = dev->data; int64_t start_time; int ret; if (!data->common.started) { return -ENETDOWN; } if ((data->common.mode & CAN_MODE_MANUAL_RECOVERY) == 0U) { return -ENOTSUP; } if (data->state != CAN_STATE_BUS_OFF) { return 0; } if (k_mutex_lock(&data->inst_mutex, K_FOREVER)) { return -EAGAIN; } start_time = k_uptime_ticks(); while (data->state == CAN_STATE_BUS_OFF) { ret = can_rcar_enter_operation_mode(config); if (ret != 0) { goto done; } if (!K_TIMEOUT_EQ(timeout, K_FOREVER) && k_uptime_ticks() - start_time >= timeout.ticks) { ret = -EAGAIN; goto done; } } done: k_mutex_unlock(&data->inst_mutex); return ret; } #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ static int can_rcar_send(const struct device *dev, const struct can_frame *frame, k_timeout_t timeout, can_tx_callback_t callback, void *user_data) { const struct can_rcar_cfg *config = dev->config; struct can_rcar_data *data = dev->data; struct can_rcar_tx_cb *tx_cb; uint32_t identifier; int i; LOG_DBG("Sending %d bytes on %s. " "Id: 0x%x, " "ID type: %s, " "Remote Frame: %s" , frame->dlc, dev->name , frame->id , (frame->flags & CAN_FRAME_IDE) != 0 ? "extended" : "standard" , (frame->flags & CAN_FRAME_RTR) != 0 ? "yes" : "no"); if (frame->dlc > CAN_MAX_DLC) { LOG_ERR("DLC of %d exceeds maximum (%d)", frame->dlc, CAN_MAX_DLC); return -EINVAL; } if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR)) != 0) { LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); return -ENOTSUP; } if (!data->common.started) { return -ENETDOWN; } /* Wait for a slot into the tx FIFO */ if (k_sem_take(&data->tx_sem, timeout) != 0) { return -EAGAIN; } k_mutex_lock(&data->inst_mutex, K_FOREVER); tx_cb = &data->tx_cb[data->tx_head]; tx_cb->cb = callback; tx_cb->cb_arg = user_data; data->tx_head++; if (data->tx_head >= RCAR_CAN_FIFO_DEPTH) { data->tx_head = 0; } if ((frame->flags & CAN_FRAME_IDE) != 0) { identifier = frame->id | RCAR_CAN_MB_IDE; } else { identifier = frame->id << RCAR_CAN_MB_SID_SHIFT; } if ((frame->flags & CAN_FRAME_RTR) != 0) { identifier |= RCAR_CAN_MB_RTR; } sys_write32(identifier, config->reg_addr + RCAR_CAN_MB_56); sys_write16(frame->dlc, config->reg_addr + RCAR_CAN_MB_56 + RCAR_CAN_MB_DLC_OFFSET); if ((frame->flags & CAN_FRAME_RTR) == 0) { for (i = 0; i < frame->dlc; i++) { sys_write8(frame->data[i], config->reg_addr + RCAR_CAN_MB_56 + RCAR_CAN_MB_DATA_OFFSET + i); } } compiler_barrier(); data->tx_unsent++; /* Start Tx: increment the CPU-side pointer for the transmit FIFO * to the next mailbox location */ sys_write8(0xff, config->reg_addr + RCAR_CAN_TFPCR); k_mutex_unlock(&data->inst_mutex); return 0; } static inline int can_rcar_add_rx_filter_unlocked(const struct device *dev, can_rx_callback_t cb, void *cb_arg, const struct can_filter *filter) { struct can_rcar_data *data = dev->data; int i; for (i = 0; i < CONFIG_CAN_RCAR_MAX_FILTER; i++) { if (data->rx_callback[i] == NULL) { data->rx_callback_arg[i] = cb_arg; data->filter[i] = *filter; compiler_barrier(); data->rx_callback[i] = cb; return i; } } return -ENOSPC; } static int can_rcar_add_rx_filter(const struct device *dev, can_rx_callback_t cb, void *cb_arg, const struct can_filter *filter) { struct can_rcar_data *data = dev->data; int filter_id; if ((filter->flags & ~(CAN_FILTER_IDE)) != 0) { LOG_ERR("unsupported CAN filter flags 0x%02x", filter->flags); return -ENOTSUP; } k_mutex_lock(&data->rx_mutex, K_FOREVER); filter_id = can_rcar_add_rx_filter_unlocked(dev, cb, cb_arg, filter); k_mutex_unlock(&data->rx_mutex); return filter_id; } static void can_rcar_remove_rx_filter(const struct device *dev, int filter_id) { struct can_rcar_data *data = dev->data; if (filter_id < 0 || filter_id >= CONFIG_CAN_RCAR_MAX_FILTER) { LOG_ERR("filter ID %d out of bounds", filter_id); return; } k_mutex_lock(&data->rx_mutex, K_FOREVER); compiler_barrier(); data->rx_callback[filter_id] = NULL; k_mutex_unlock(&data->rx_mutex); } static int can_rcar_init(const struct device *dev) { const struct can_rcar_cfg *config = dev->config; struct can_rcar_data *data = dev->data; struct can_timing timing = { 0 }; int ret; uint16_t ctlr; k_mutex_init(&data->inst_mutex); k_mutex_init(&data->rx_mutex); k_sem_init(&data->tx_sem, RCAR_CAN_FIFO_DEPTH, RCAR_CAN_FIFO_DEPTH); data->tx_head = 0; data->tx_tail = 0; data->tx_unsent = 0; memset(data->rx_callback, 0, sizeof(data->rx_callback)); data->state = CAN_STATE_ERROR_ACTIVE; data->common.state_change_cb = NULL; data->common.state_change_cb_user_data = NULL; if (config->common.phy != NULL) { if (!device_is_ready(config->common.phy)) { LOG_ERR("CAN transceiver not ready"); return -ENODEV; } } if (!device_is_ready(config->clock_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /* reset the registers */ ret = clock_control_off(config->clock_dev, (clock_control_subsys_t)&config->mod_clk); if (ret < 0) { return ret; } ret = clock_control_on(config->clock_dev, (clock_control_subsys_t)&config->mod_clk); if (ret < 0) { return ret; } ret = clock_control_on(config->clock_dev, (clock_control_subsys_t)&config->bus_clk); if (ret < 0) { return ret; } ret = can_rcar_enter_reset_mode(config, false); __ASSERT(!ret, "Fail to set CAN controller to reset mode"); if (ret) { return ret; } ret = can_rcar_leave_sleep_mode(config); __ASSERT(!ret, "Fail to leave CAN controller from sleep mode"); if (ret) { return ret; } ret = can_calc_timing(dev, &timing, config->common.bitrate, config->common.sample_point); if (ret == -EINVAL) { LOG_ERR("Can't find timing for given param"); return -EIO; } LOG_DBG("Presc: %d, TS1: %d, TS2: %d", timing.prescaler, timing.phase_seg1, timing.phase_seg2); LOG_DBG("Sample-point err : %d", ret); ret = can_set_timing(dev, &timing); if (ret) { return ret; } ret = can_rcar_set_mode(dev, CAN_MODE_NORMAL); if (ret) { return ret; } ctlr = can_rcar_read16(config, RCAR_CAN_CTLR); ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */ ctlr &= ~RCAR_CAN_CTLR_BOM_ENT; /* Clear entry to halt automatically at bus-off */ ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */ ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */ ctlr &= ~RCAR_CAN_CTLR_SLPM; /* Clear CAN Sleep mode */ can_rcar_write16(config, RCAR_CAN_CTLR, ctlr); /* Accept all SID and EID */ sys_write32(0, config->reg_addr + RCAR_CAN_MKR8); sys_write32(0, config->reg_addr + RCAR_CAN_MKR9); /* In FIFO mailbox mode, write "0" to bits 24 to 31 */ sys_write32(0, config->reg_addr + RCAR_CAN_MKIVLR0); sys_write32(0, config->reg_addr + RCAR_CAN_MKIVLR1); /* Accept standard and extended ID frames, but not * remote frame. */ sys_write32(0, config->reg_addr + RCAR_CAN_FIDCR0); sys_write32(RCAR_CAN_FIDCR_IDE, config->reg_addr + RCAR_CAN_FIDCR1); /* Enable and configure FIFO mailbox interrupts Rx and Tx */ sys_write32(RCAR_CAN_MIER1_RXFIE | RCAR_CAN_MIER1_TXFIE, config->reg_addr + RCAR_CAN_MIER1); sys_write8(RCAR_CAN_IER_ERSIE | RCAR_CAN_IER_RXFIE | RCAR_CAN_IER_TXFIE, config->reg_addr + RCAR_CAN_IER); /* Accumulate error codes */ sys_write8(RCAR_CAN_ECSR_EDPM, config->reg_addr + RCAR_CAN_ECSR); /* Enable interrupts for all type of errors */ sys_write8(0xFF, config->reg_addr + RCAR_CAN_EIER); config->init_func(dev); return 0; } static int can_rcar_get_core_clock(const struct device *dev, uint32_t *rate) { const struct can_rcar_cfg *config = dev->config; *rate = config->bus_clk.rate; return 0; } static int can_rcar_get_max_filters(const struct device *dev, bool ide) { ARG_UNUSED(ide); return CONFIG_CAN_RCAR_MAX_FILTER; } static const struct can_driver_api can_rcar_driver_api = { .get_capabilities = can_rcar_get_capabilities, .start = can_rcar_start, .stop = can_rcar_stop, .set_mode = can_rcar_set_mode, .set_timing = can_rcar_set_timing, .send = can_rcar_send, .add_rx_filter = can_rcar_add_rx_filter, .remove_rx_filter = can_rcar_remove_rx_filter, .get_state = can_rcar_get_state, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = can_rcar_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .set_state_change_callback = can_rcar_set_state_change_callback, .get_core_clock = can_rcar_get_core_clock, .get_max_filters = can_rcar_get_max_filters, .timing_min = { .sjw = 0x1, .prop_seg = 0x00, .phase_seg1 = 0x04, .phase_seg2 = 0x02, .prescaler = 0x01 }, .timing_max = { .sjw = 0x4, .prop_seg = 0x00, .phase_seg1 = 0x10, .phase_seg2 = 0x08, .prescaler = 0x400 } }; /* Device Instantiation */ #define CAN_RCAR_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static void can_rcar_##n##_init(const struct device *dev); \ static const struct can_rcar_cfg can_rcar_cfg_##n = { \ .common = CAN_DT_DRIVER_CONFIG_INST_GET(n, 0, 1000000), \ .reg_addr = DT_INST_REG_ADDR(n), \ .reg_size = DT_INST_REG_SIZE(n), \ .init_func = can_rcar_##n##_init, \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .mod_clk.module = \ DT_INST_CLOCKS_CELL_BY_IDX(n, 0, module), \ .mod_clk.domain = \ DT_INST_CLOCKS_CELL_BY_IDX(n, 0, domain), \ .bus_clk.module = \ DT_INST_CLOCKS_CELL_BY_IDX(n, 1, module), \ .bus_clk.domain = \ DT_INST_CLOCKS_CELL_BY_IDX(n, 1, domain), \ .bus_clk.rate = 40000000, \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ }; \ static struct can_rcar_data can_rcar_data_##n; \ \ CAN_DEVICE_DT_INST_DEFINE(n, can_rcar_init, \ NULL, \ &can_rcar_data_##n, \ &can_rcar_cfg_##n, \ POST_KERNEL, \ CONFIG_CAN_INIT_PRIORITY, \ &can_rcar_driver_api \ ); \ static void can_rcar_##n##_init(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ 0, \ can_rcar_isr, \ DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } DT_INST_FOREACH_STATUS_OKAY(CAN_RCAR_INIT) ```
/content/code_sandbox/drivers/can/can_rcar.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
10,709
```c /* */ #define DT_DRV_COMPAT nordic_nrf_can #include <stdint.h> #include <zephyr/arch/cpu.h> #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/can_mcan.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/irq.h> /* nRF CAN wrapper offsets */ #define CAN_TASKS_START offsetof(NRF_CAN_Type, TASKS_START) #define CAN_EVENTS_CORE_0 offsetof(NRF_CAN_Type, EVENTS_CORE[0]) #define CAN_EVENTS_CORE_1 offsetof(NRF_CAN_Type, EVENTS_CORE[1]) #define CAN_INTEN offsetof(NRF_CAN_Type, INTEN) struct can_nrf_config { uint32_t wrapper; uint32_t mcan; uint32_t mrba; uint32_t mram; const struct device *clock; const struct pinctrl_dev_config *pcfg; void (*irq_configure)(void); uint16_t irq; }; static void can_nrf_irq_handler(const struct device *dev) { const struct can_mcan_config *mcan_config = dev->config; const struct can_nrf_config *config = mcan_config->custom; if (sys_read32(config->wrapper + CAN_EVENTS_CORE_0) == 1U) { sys_write32(0U, config->wrapper + CAN_EVENTS_CORE_0); can_mcan_line_0_isr(dev); } if (sys_read32(config->wrapper + CAN_EVENTS_CORE_1) == 1U) { sys_write32(0U, config->wrapper + CAN_EVENTS_CORE_1); can_mcan_line_1_isr(dev); } } static int can_nrf_get_core_clock(const struct device *dev, uint32_t *rate) { const struct can_mcan_config *mcan_config = dev->config; const struct can_nrf_config *config = mcan_config->custom; return clock_control_get_rate(config->clock, NULL, rate); } static const struct can_driver_api can_nrf_api = { .get_capabilities = can_mcan_get_capabilities, .start = can_mcan_start, .stop = can_mcan_stop, .set_mode = can_mcan_set_mode, .set_timing = can_mcan_set_timing, .send = can_mcan_send, .add_rx_filter = can_mcan_add_rx_filter, .remove_rx_filter = can_mcan_remove_rx_filter, .get_state = can_mcan_get_state, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = can_mcan_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .get_core_clock = can_nrf_get_core_clock, .get_max_filters = can_mcan_get_max_filters, .set_state_change_callback = can_mcan_set_state_change_callback, .timing_min = CAN_MCAN_TIMING_MIN_INITIALIZER, .timing_max = CAN_MCAN_TIMING_MAX_INITIALIZER, #ifdef CONFIG_CAN_FD_MODE .set_timing_data = can_mcan_set_timing_data, .timing_data_min = CAN_MCAN_TIMING_DATA_MIN_INITIALIZER, .timing_data_max = CAN_MCAN_TIMING_DATA_MAX_INITIALIZER, #endif /* CONFIG_CAN_FD_MODE */ }; static int can_nrf_read_reg(const struct device *dev, uint16_t reg, uint32_t *val) { const struct can_mcan_config *mcan_config = dev->config; const struct can_nrf_config *config = mcan_config->custom; return can_mcan_sys_read_reg(config->mcan, reg, val); } static int can_nrf_write_reg(const struct device *dev, uint16_t reg, uint32_t val) { const struct can_mcan_config *mcan_config = dev->config; const struct can_nrf_config *config = mcan_config->custom; return can_mcan_sys_write_reg(config->mcan, reg, val); } static int can_nrf_read_mram(const struct device *dev, uint16_t offset, void *dst, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct can_nrf_config *config = mcan_config->custom; return can_mcan_sys_read_mram(config->mram, offset, dst, len); } static int can_nrf_write_mram(const struct device *dev, uint16_t offset, const void *src, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct can_nrf_config *config = mcan_config->custom; return can_mcan_sys_write_mram(config->mram, offset, src, len); } static int can_nrf_clear_mram(const struct device *dev, uint16_t offset, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct can_nrf_config *config = mcan_config->custom; return can_mcan_sys_clear_mram(config->mram, offset, len); } static const struct can_mcan_ops can_mcan_nrf_ops = { .read_reg = can_nrf_read_reg, .write_reg = can_nrf_write_reg, .read_mram = can_nrf_read_mram, .write_mram = can_nrf_write_mram, .clear_mram = can_nrf_clear_mram, }; static int can_nrf_init(const struct device *dev) { const struct can_mcan_config *mcan_config = dev->config; const struct can_nrf_config *config = mcan_config->custom; int ret; if (!device_is_ready(config->clock)) { return -ENODEV; } ret = clock_control_on(config->clock, NULL); if (ret < 0) { return ret; } ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } sys_write32(0U, config->wrapper + CAN_EVENTS_CORE_0); sys_write32(0U, config->wrapper + CAN_EVENTS_CORE_1); sys_write32(CAN_INTEN_CORE0_Msk | CAN_INTEN_CORE1_Msk, config->wrapper + CAN_INTEN); sys_write32(1U, config->wrapper + CAN_TASKS_START); config->irq_configure(); ret = can_mcan_configure_mram(dev, config->mrba, config->mram); if (ret < 0) { return ret; } ret = can_mcan_init(dev); if (ret < 0) { return ret; } return 0; } #define CAN_NRF_DEFINE(n) \ PINCTRL_DT_INST_DEFINE(n); \ \ static inline void can_nrf_irq_configure##n(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), can_nrf_irq_handler, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } \ \ static const struct can_nrf_config can_nrf_config##n = { \ .wrapper = DT_INST_REG_ADDR_BY_NAME(n, wrapper), \ .mcan = CAN_MCAN_DT_INST_MCAN_ADDR(n), \ .mrba = CAN_MCAN_DT_INST_MRBA(n), \ .mram = CAN_MCAN_DT_INST_MRAM_ADDR(n), \ .clock = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .irq = DT_INST_IRQN(n), \ .irq_configure = can_nrf_irq_configure##n, \ }; \ \ CAN_MCAN_DT_INST_CALLBACKS_DEFINE(n, can_mcan_nrf_cbs##n); \ \ static const struct can_mcan_config can_mcan_nrf_config##n = CAN_MCAN_DT_CONFIG_INST_GET( \ n, &can_nrf_config##n, &can_mcan_nrf_ops, &can_mcan_nrf_cbs##n); \ \ static struct can_mcan_data can_mcan_nrf_data##n = CAN_MCAN_DATA_INITIALIZER(NULL); \ \ DEVICE_DT_INST_DEFINE(n, can_nrf_init, NULL, &can_mcan_nrf_data##n, \ &can_mcan_nrf_config##n, POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \ &can_nrf_api); DT_INST_FOREACH_STATUS_OKAY(CAN_NRF_DEFINE) ```
/content/code_sandbox/drivers/can/can_nrf.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,830
```c /* * */ #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/can_mcan.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/kernel.h> #include <zephyr/sys/__assert.h> #include <soc.h> #include <stm32_ll_rcc.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(can_stm32fd, CONFIG_CAN_LOG_LEVEL); #define DT_DRV_COMPAT st_stm32_fdcan /* * The STMicroelectronics STM32 FDCAN definitions correspond to those found in the * STMicroelectronics STM32G4 Series Reference manual (RM0440), Rev 7. * * This controller uses a Bosch M_CAN like register layout, but some registers are unimplemented, * some registers are mapped to other register offsets, and some registers have had their bit fields * remapped. * * Apart from the definitions below please note the following limitations: * - TEST register SVAL, TXBNS, PVAL, and TXBNP bits are not available. * - CCCR register VMM and UTSU bits are not available. * - TXBC register TFQS, NDTB, and TBSA fields are not available. */ /* Interrupt register */ #define CAN_STM32FD_IR_ARA BIT(23) #define CAN_STM32FD_IR_PED BIT(22) #define CAN_STM32FD_IR_PEA BIT(21) #define CAN_STM32FD_IR_WDI BIT(20) #define CAN_STM32FD_IR_BO BIT(19) #define CAN_STM32FD_IR_EW BIT(18) #define CAN_STM32FD_IR_EP BIT(17) #define CAN_STM32FD_IR_ELO BIT(16) #define CAN_STM32FD_IR_TOO BIT(15) #define CAN_STM32FD_IR_MRAF BIT(14) #define CAN_STM32FD_IR_TSW BIT(13) #define CAN_STM32FD_IR_TEFL BIT(12) #define CAN_STM32FD_IR_TEFF BIT(11) #define CAN_STM32FD_IR_TEFN BIT(10) #define CAN_STM32FD_IR_TFE BIT(9) #define CAN_STM32FD_IR_TCF BIT(8) #define CAN_STM32FD_IR_TC BIT(7) #define CAN_STM32FD_IR_HPM BIT(6) #define CAN_STM32FD_IR_RF1L BIT(5) #define CAN_STM32FD_IR_RF1F BIT(4) #define CAN_STM32FD_IR_RF1N BIT(3) #define CAN_STM32FD_IR_RF0L BIT(2) #define CAN_STM32FD_IR_RF0F BIT(1) #define CAN_STM32FD_IR_RF0N BIT(0) /* Interrupt Enable register */ #define CAN_STM32FD_IE_ARAE BIT(23) #define CAN_STM32FD_IE_PEDE BIT(22) #define CAN_STM32FD_IE_PEAE BIT(21) #define CAN_STM32FD_IE_WDIE BIT(20) #define CAN_STM32FD_IE_BOE BIT(19) #define CAN_STM32FD_IE_EWE BIT(18) #define CAN_STM32FD_IE_EPE BIT(17) #define CAN_STM32FD_IE_ELOE BIT(16) #define CAN_STM32FD_IE_TOOE BIT(15) #define CAN_STM32FD_IE_MRAFE BIT(14) #define CAN_STM32FD_IE_TSWE BIT(13) #define CAN_STM32FD_IE_TEFLE BIT(12) #define CAN_STM32FD_IE_TEFFE BIT(11) #define CAN_STM32FD_IE_TEFNE BIT(10) #define CAN_STM32FD_IE_TFEE BIT(9) #define CAN_STM32FD_IE_TCFE BIT(8) #define CAN_STM32FD_IE_TCE BIT(7) #define CAN_STM32FD_IE_HPME BIT(6) #define CAN_STM32FD_IE_RF1LE BIT(5) #define CAN_STM32FD_IE_RF1FE BIT(4) #define CAN_STM32FD_IE_RF1NE BIT(3) #define CAN_STM32FD_IE_RF0LE BIT(2) #define CAN_STM32FD_IE_RF0FE BIT(1) #define CAN_STM32FD_IE_RF0NE BIT(0) /* Interrupt Line Select register */ #define CAN_STM32FD_ILS_PERR BIT(6) #define CAN_STM32FD_ILS_BERR BIT(5) #define CAN_STM32FD_ILS_MISC BIT(4) #define CAN_STM32FD_ILS_TFERR BIT(3) #define CAN_STM32FD_ILS_SMSG BIT(2) #define CAN_STM32FD_ILS_RXFIFO1 BIT(1) #define CAN_STM32FD_ILS_RXFIFO0 BIT(0) /* Global filter configuration register */ #define CAN_STM32FD_RXGFC 0x080 #define CAN_STM32FD_RXGFC_LSE GENMASK(27, 24) #define CAN_STM32FD_RXGFC_LSS GENMASK(20, 16) #define CAN_STM32FD_RXGFC_F0OM BIT(9) #define CAN_STM32FD_RXGFC_F1OM BIT(8) #define CAN_STM32FD_RXGFC_ANFS GENMASK(5, 4) #define CAN_STM32FD_RXGFC_ANFE GENMASK(3, 2) #define CAN_STM32FD_RXGFC_RRFS BIT(1) #define CAN_STM32FD_RXGFC_RRFE BIT(0) /* Extended ID AND Mask register */ #define CAN_STM32FD_XIDAM 0x084 /* High Priority Message Status register */ #define CAN_STM32FD_HPMS 0x088 /* Rx FIFO 0 Status register */ #define CAN_STM32FD_RXF0S 0x090 /* Rx FIFO 0 Acknowledge register */ #define CAN_STM32FD_RXF0A 0x094 /* Rx FIFO 1 Status register */ #define CAN_STM32FD_RXF1S 0x098 /* Rx FIFO 1 Acknowledge register */ #define CAN_STM32FD_RXF1A 0x09C /* Tx Buffer Configuration register */ #define CAN_STM32FD_TXBC_TFQM BIT(24) /* Tx Buffer Request Pending register */ #define CAN_STM32FD_TXBRP 0x0C8 /* Tx Buffer Add Request register */ #define CAN_STM32FD_TXBAR 0x0CC /* Tx Buffer Cancellation Request register */ #define CAN_STM32FD_TXBCR 0x0D0 /* Tx Buffer Transmission Occurred register */ #define CAN_STM32FD_TXBTO 0x0D4 /* Tx Buffer Cancellation Finished register */ #define CAN_STM32FD_TXBCF 0x0D8 /* Tx Buffer Transmission Interrupt Enable register */ #define CAN_STM32FD_TXBTIE 0x0DC /* Tx Buffer Cancellation Finished Interrupt Enable register */ #define CAN_STM32FD_TXBCIE 0x0E0 /* Tx Event FIFO Status register */ #define CAN_STM32FD_TXEFS 0x0E4 /* Tx Event FIFO Acknowledge register */ #define CAN_STM32FD_TXEFA 0x0E8 /* Register address indicating unsupported register */ #define CAN_STM32FD_REGISTER_UNSUPPORTED UINT16_MAX /* This symbol takes the value 1 if one of the device instances */ /* is configured in dts with a domain clock */ #if STM32_DT_INST_DEV_DOMAIN_CLOCK_SUPPORT #define STM32_CANFD_DOMAIN_CLOCK_SUPPORT 1 #else #define STM32_CANFD_DOMAIN_CLOCK_SUPPORT 0 #endif struct can_stm32fd_config { mm_reg_t base; mem_addr_t mram; size_t pclk_len; const struct stm32_pclken *pclken; void (*config_irq)(void); const struct pinctrl_dev_config *pcfg; uint8_t clock_divider; }; static inline uint16_t can_stm32fd_remap_reg(uint16_t reg) { uint16_t remap; switch (reg) { case CAN_MCAN_SIDFC: __fallthrough; case CAN_MCAN_XIDFC: __fallthrough; case CAN_MCAN_NDAT1: __fallthrough; case CAN_MCAN_NDAT2: __fallthrough; case CAN_MCAN_RXF0C: __fallthrough; case CAN_MCAN_RXBC: __fallthrough; case CAN_MCAN_RXF1C: __fallthrough; case CAN_MCAN_RXESC: __fallthrough; case CAN_MCAN_TXESC: __fallthrough; case CAN_MCAN_TXEFC: __ASSERT_NO_MSG(false); remap = CAN_STM32FD_REGISTER_UNSUPPORTED; break; case CAN_MCAN_XIDAM: remap = CAN_STM32FD_XIDAM; break; case CAN_MCAN_RXF0S: remap = CAN_STM32FD_RXF0S; break; case CAN_MCAN_RXF0A: remap = CAN_STM32FD_RXF0A; break; case CAN_MCAN_RXF1S: remap = CAN_STM32FD_RXF1S; break; case CAN_MCAN_RXF1A: remap = CAN_STM32FD_RXF1A; break; case CAN_MCAN_TXBRP: remap = CAN_STM32FD_TXBRP; break; case CAN_MCAN_TXBAR: remap = CAN_STM32FD_TXBAR; break; case CAN_MCAN_TXBCR: remap = CAN_STM32FD_TXBCR; break; case CAN_MCAN_TXBTO: remap = CAN_STM32FD_TXBTO; break; case CAN_MCAN_TXBCF: remap = CAN_STM32FD_TXBCF; break; case CAN_MCAN_TXBTIE: remap = CAN_STM32FD_TXBTIE; break; case CAN_MCAN_TXBCIE: remap = CAN_STM32FD_TXBCIE; break; case CAN_MCAN_TXEFS: remap = CAN_STM32FD_TXEFS; break; case CAN_MCAN_TXEFA: remap = CAN_STM32FD_TXEFA; break; default: /* No register address remap needed */ remap = reg; break; }; return remap; } static int can_stm32fd_read_reg(const struct device *dev, uint16_t reg, uint32_t *val) { const struct can_mcan_config *mcan_config = dev->config; const struct can_stm32fd_config *stm32fd_config = mcan_config->custom; uint16_t remap; uint32_t bits; int err; remap = can_stm32fd_remap_reg(reg); if (remap == CAN_STM32FD_REGISTER_UNSUPPORTED) { return -ENOTSUP; } err = can_mcan_sys_read_reg(stm32fd_config->base, remap, &bits); if (err != 0) { return err; } *val = 0U; switch (reg) { case CAN_MCAN_IR: __fallthrough; case CAN_MCAN_IE: /* Remap IR/IE bits, ignoring unsupported bits */ /* Group 1 map bits 23-16 (stm32fd) to 29-22 (mcan) */ *val |= ((bits & GENMASK(23, 16)) << 6); /* Group 2 map bits 15-11 (stm32fd) to 18-14 (mcan) */ *val |= ((bits & GENMASK(15, 11)) << 3); /* Group 3 map bits 10-4 (stm32fd) to 12-6 (mcan) */ *val |= ((bits & GENMASK(10, 4)) << 2); /* Group 4 map bits 3-1 (stm32fd) to 4-2 (mcan) */ *val |= ((bits & GENMASK(3, 1)) << 1); /* Group 5 map bits 0 (mcan) to 0 (stm32fd) */ *val |= ((bits & GENMASK(0, 0)) << 0); break; case CAN_MCAN_ILS: /* Only remap ILS groups used in can_mcan.c */ if ((bits & CAN_STM32FD_ILS_RXFIFO1) != 0U) { *val |= CAN_MCAN_ILS_RF1LL | CAN_MCAN_ILS_RF1FL | CAN_MCAN_ILS_RF1NL; } if ((bits & CAN_STM32FD_ILS_RXFIFO0) != 0U) { *val |= CAN_MCAN_ILS_RF0LL | CAN_MCAN_ILS_RF0FL | CAN_MCAN_ILS_RF0NL; } break; case CAN_MCAN_GFC: /* Map fields from RXGFC excluding STM32 FDCAN LSS and LSE fields */ *val = bits & (CAN_MCAN_GFC_ANFS | CAN_MCAN_GFC_ANFE | CAN_MCAN_GFC_RRFS | CAN_MCAN_GFC_RRFE); break; default: /* No field remap needed */ *val = bits; break; }; return 0; } static int can_stm32fd_write_reg(const struct device *dev, uint16_t reg, uint32_t val) { const struct can_mcan_config *mcan_config = dev->config; const struct can_stm32fd_config *stm32fd_config = mcan_config->custom; uint32_t bits = 0U; uint16_t remap; remap = can_stm32fd_remap_reg(reg); if (remap == CAN_STM32FD_REGISTER_UNSUPPORTED) { return -ENOTSUP; } switch (reg) { case CAN_MCAN_IR: __fallthrough; case CAN_MCAN_IE: /* Remap IR/IE bits, ignoring unsupported bits */ /* Group 1 map bits 29-22 (mcan) to 23-16 (stm32fd) */ bits |= ((val & GENMASK(29, 22)) >> 6); /* Group 2 map bits 18-14 (mcan) to 15-11 (stm32fd) */ bits |= ((val & GENMASK(18, 14)) >> 3); /* Group 3 map bits 12-6 (mcan) to 10-4 (stm32fd) */ bits |= ((val & GENMASK(12, 6)) >> 2); /* Group 4 map bits 4-2 (mcan) to 3-1 (stm32fd) */ bits |= ((val & GENMASK(4, 2)) >> 1); /* Group 5 map bits 0 (mcan) to 0 (stm32fd) */ bits |= ((val & GENMASK(0, 0)) >> 0); break; case CAN_MCAN_ILS: /* Only remap ILS groups used in can_mcan.c */ if ((val & (CAN_MCAN_ILS_RF1LL | CAN_MCAN_ILS_RF1FL | CAN_MCAN_ILS_RF1NL)) != 0U) { bits |= CAN_STM32FD_ILS_RXFIFO1; } if ((val & (CAN_MCAN_ILS_RF0LL | CAN_MCAN_ILS_RF0FL | CAN_MCAN_ILS_RF0NL)) != 0U) { bits |= CAN_STM32FD_ILS_RXFIFO0; } break; case CAN_MCAN_GFC: /* Map fields to RXGFC including STM32 FDCAN LSS and LSE fields */ bits |= FIELD_PREP(CAN_STM32FD_RXGFC_LSS, CONFIG_CAN_MAX_STD_ID_FILTER) | FIELD_PREP(CAN_STM32FD_RXGFC_LSE, CONFIG_CAN_MAX_EXT_ID_FILTER); bits |= val & (CAN_MCAN_GFC_ANFS | CAN_MCAN_GFC_ANFE | CAN_MCAN_GFC_RRFS | CAN_MCAN_GFC_RRFE); break; default: /* No field remap needed */ bits = val; break; }; return can_mcan_sys_write_reg(stm32fd_config->base, remap, bits); } static int can_stm32fd_read_mram(const struct device *dev, uint16_t offset, void *dst, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct can_stm32fd_config *stm32fd_config = mcan_config->custom; return can_mcan_sys_read_mram(stm32fd_config->mram, offset, dst, len); } static int can_stm32fd_write_mram(const struct device *dev, uint16_t offset, const void *src, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct can_stm32fd_config *stm32fd_config = mcan_config->custom; return can_mcan_sys_write_mram(stm32fd_config->mram, offset, src, len); } static int can_stm32fd_clear_mram(const struct device *dev, uint16_t offset, size_t len) { const struct can_mcan_config *mcan_config = dev->config; const struct can_stm32fd_config *stm32fd_config = mcan_config->custom; return can_mcan_sys_clear_mram(stm32fd_config->mram, offset, len); } static int can_stm32fd_get_core_clock(const struct device *dev, uint32_t *rate) { const uint32_t rate_tmp = LL_RCC_GetFDCANClockFreq(LL_RCC_FDCAN_CLKSOURCE); ARG_UNUSED(dev); if (rate_tmp == LL_RCC_PERIPH_FREQUENCY_NO) { LOG_ERR("Can't read core clock"); return -EIO; } if (FDCAN_CONFIG->CKDIV == 0) { *rate = rate_tmp; } else { *rate = rate_tmp / (FDCAN_CONFIG->CKDIV << 1); } return 0; } static int can_stm32fd_clock_enable(const struct device *dev) { int ret; const struct can_mcan_config *mcan_cfg = dev->config; const struct can_stm32fd_config *stm32fd_cfg = mcan_cfg->custom; const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(clk)) { return -ENODEV; } if (IS_ENABLED(STM32_CANFD_DOMAIN_CLOCK_SUPPORT) && (stm32fd_cfg->pclk_len > 1)) { ret = clock_control_configure(clk, (clock_control_subsys_t)&stm32fd_cfg->pclken[1], NULL); if (ret < 0) { LOG_ERR("Could not select can_stm32fd domain clock"); return ret; } } ret = clock_control_on(clk, (clock_control_subsys_t)&stm32fd_cfg->pclken[0]); if (ret < 0) { return ret; } if (stm32fd_cfg->clock_divider != 0) { can_mcan_enable_configuration_change(dev); FDCAN_CONFIG->CKDIV = stm32fd_cfg->clock_divider >> 1; } return 0; } static int can_stm32fd_init(const struct device *dev) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_stm32fd_config *stm32fd_cfg = mcan_cfg->custom; uint32_t rxgfc; int ret; /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(stm32fd_cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("CAN pinctrl setup failed (%d)", ret); return ret; } ret = can_stm32fd_clock_enable(dev); if (ret < 0) { LOG_ERR("Could not turn on CAN clock (%d)", ret); return ret; } can_mcan_enable_configuration_change(dev); /* Setup STM32 FDCAN Global Filter Configuration register */ ret = can_mcan_read_reg(dev, CAN_STM32FD_RXGFC, &rxgfc); if (ret != 0) { return ret; } rxgfc |= FIELD_PREP(CAN_STM32FD_RXGFC_LSS, CONFIG_CAN_MAX_STD_ID_FILTER) | FIELD_PREP(CAN_STM32FD_RXGFC_LSE, CONFIG_CAN_MAX_EXT_ID_FILTER); ret = can_mcan_write_reg(dev, CAN_STM32FD_RXGFC, rxgfc); if (ret != 0) { return ret; } /* Setup STM32 FDCAN Tx buffer configuration register */ ret = can_mcan_write_reg(dev, CAN_MCAN_TXBC, CAN_STM32FD_TXBC_TFQM); if (ret != 0) { return ret; } ret = can_mcan_init(dev); if (ret != 0) { return ret; } stm32fd_cfg->config_irq(); return ret; } static const struct can_driver_api can_stm32fd_driver_api = { .get_capabilities = can_mcan_get_capabilities, .start = can_mcan_start, .stop = can_mcan_stop, .set_mode = can_mcan_set_mode, .set_timing = can_mcan_set_timing, .send = can_mcan_send, .add_rx_filter = can_mcan_add_rx_filter, .remove_rx_filter = can_mcan_remove_rx_filter, .get_state = can_mcan_get_state, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = can_mcan_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .get_core_clock = can_stm32fd_get_core_clock, .get_max_filters = can_mcan_get_max_filters, .set_state_change_callback = can_mcan_set_state_change_callback, .timing_min = CAN_MCAN_TIMING_MIN_INITIALIZER, .timing_max = CAN_MCAN_TIMING_MAX_INITIALIZER, #ifdef CONFIG_CAN_FD_MODE .set_timing_data = can_mcan_set_timing_data, .timing_data_min = CAN_MCAN_TIMING_DATA_MIN_INITIALIZER, .timing_data_max = CAN_MCAN_TIMING_DATA_MAX_INITIALIZER, #endif /* CONFIG_CAN_FD_MODE */ }; static const struct can_mcan_ops can_stm32fd_ops = { .read_reg = can_stm32fd_read_reg, .write_reg = can_stm32fd_write_reg, .read_mram = can_stm32fd_read_mram, .write_mram = can_stm32fd_write_mram, .clear_mram = can_stm32fd_clear_mram, }; #define CAN_STM32FD_BUILD_ASSERT_MRAM_CFG(inst) \ BUILD_ASSERT(CAN_MCAN_DT_INST_MRAM_STD_FILTER_ELEMENTS(inst) == 28, \ "Standard filter elements must be 28"); \ BUILD_ASSERT(CAN_MCAN_DT_INST_MRAM_EXT_FILTER_ELEMENTS(inst) == 8, \ "Extended filter elements must be 8"); \ BUILD_ASSERT(CAN_MCAN_DT_INST_MRAM_RX_FIFO0_ELEMENTS(inst) == 3, \ "Rx FIFO 0 elements must be 3"); \ BUILD_ASSERT(CAN_MCAN_DT_INST_MRAM_RX_FIFO1_ELEMENTS(inst) == 3, \ "Rx FIFO 1 elements must be 3"); \ BUILD_ASSERT(CAN_MCAN_DT_INST_MRAM_RX_BUFFER_ELEMENTS(inst) == 0, \ "Rx Buffer elements must be 0"); \ BUILD_ASSERT(CAN_MCAN_DT_INST_MRAM_TX_EVENT_FIFO_ELEMENTS(inst) == 3, \ "Tx Event FIFO elements must be 3"); \ BUILD_ASSERT(CAN_MCAN_DT_INST_MRAM_TX_BUFFER_ELEMENTS(inst) == 3, \ "Tx Buffer elements must be 0"); #define CAN_STM32FD_IRQ_CFG_FUNCTION(inst) \ static void config_can_##inst##_irq(void) \ { \ LOG_DBG("Enable CAN" #inst " IRQ"); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, int0, irq), \ DT_INST_IRQ_BY_NAME(inst, int0, priority), \ can_mcan_line_0_isr, DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, int0, irq)); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, int1, irq), \ DT_INST_IRQ_BY_NAME(inst, int1, priority), \ can_mcan_line_1_isr, DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, int1, irq)); \ } #define CAN_STM32FD_CFG_INST(inst) \ BUILD_ASSERT(CAN_MCAN_DT_INST_MRAM_ELEMENTS_SIZE(inst) <= \ CAN_MCAN_DT_INST_MRAM_SIZE(inst), \ "Insufficient Message RAM size to hold elements"); \ \ PINCTRL_DT_INST_DEFINE(inst); \ CAN_MCAN_CALLBACKS_DEFINE(can_stm32fd_cbs_##inst, \ CAN_MCAN_DT_INST_MRAM_TX_BUFFER_ELEMENTS(inst), \ CONFIG_CAN_MAX_STD_ID_FILTER, \ CONFIG_CAN_MAX_EXT_ID_FILTER); \ \ static const struct stm32_pclken can_stm32fd_pclken_##inst[] = \ STM32_DT_INST_CLOCKS(inst); \ \ static const struct can_stm32fd_config can_stm32fd_cfg_##inst = { \ .base = CAN_MCAN_DT_INST_MCAN_ADDR(inst), \ .mram = CAN_MCAN_DT_INST_MRAM_ADDR(inst), \ .pclken = can_stm32fd_pclken_##inst, \ .pclk_len = DT_INST_NUM_CLOCKS(inst), \ .config_irq = config_can_##inst##_irq, \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ .clock_divider = DT_INST_PROP_OR(inst, clk_divider, 0) \ }; \ \ static const struct can_mcan_config can_mcan_cfg_##inst = \ CAN_MCAN_DT_CONFIG_INST_GET(inst, &can_stm32fd_cfg_##inst, \ &can_stm32fd_ops, \ &can_stm32fd_cbs_##inst); #define CAN_STM32FD_DATA_INST(inst) \ static struct can_mcan_data can_mcan_data_##inst = \ CAN_MCAN_DATA_INITIALIZER(NULL); #define CAN_STM32FD_DEVICE_INST(inst) \ CAN_DEVICE_DT_INST_DEFINE(inst, can_stm32fd_init, NULL, \ &can_mcan_data_##inst, &can_mcan_cfg_##inst, \ POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \ &can_stm32fd_driver_api); #define CAN_STM32FD_INST(inst) \ CAN_STM32FD_BUILD_ASSERT_MRAM_CFG(inst) \ CAN_STM32FD_IRQ_CFG_FUNCTION(inst) \ CAN_STM32FD_CFG_INST(inst) \ CAN_STM32FD_DATA_INST(inst) \ CAN_STM32FD_DEVICE_INST(inst) DT_INST_FOREACH_STATUS_OKAY(CAN_STM32FD_INST) ```
/content/code_sandbox/drivers/can/can_stm32_fdcan.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,110
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_CAN_SJA1000_PRIV_H_ #define ZEPHYR_DRIVERS_CAN_SJA1000_PRIV_H_ #include <zephyr/sys/util.h> /* SJA1000 register "CAN addresses", PeliCAN mode */ #define CAN_SJA1000_MOD (0U) #define CAN_SJA1000_CMR (1U) #define CAN_SJA1000_SR (2U) #define CAN_SJA1000_IR (3U) #define CAN_SJA1000_IER (4U) #define CAN_SJA1000_BTR0 (6U) #define CAN_SJA1000_BTR1 (7U) #define CAN_SJA1000_OCR (8U) #define CAN_SJA1000_ALC (11U) #define CAN_SJA1000_ECC (12U) #define CAN_SJA1000_EWLR (13U) #define CAN_SJA1000_RXERR (14U) #define CAN_SJA1000_TXERR (15U) /* Reset Mode access (acceptance codes/masks) */ #define CAN_SJA1000_ACR0 (16U) #define CAN_SJA1000_ACR1 (17U) #define CAN_SJA1000_ACR2 (18U) #define CAN_SJA1000_ACR3 (19U) #define CAN_SJA1000_AMR0 (20U) #define CAN_SJA1000_AMR1 (21U) #define CAN_SJA1000_AMR2 (22U) #define CAN_SJA1000_AMR3 (23U) /* Operation Mode access (RX/TX SFF/EFF frame) */ #define CAN_SJA1000_FRAME_INFO (16U) #define CAN_SJA1000_XFF_ID1 (17U) #define CAN_SJA1000_XFF_ID2 (18U) #define CAN_SJA1000_EFF_ID3 (19U) #define CAN_SJA1000_EFF_ID4 (20U) #define CAN_SJA1000_SFF_DATA (19U) #define CAN_SJA1000_EFF_DATA (21U) #define CAN_SJA1000_RMC (29U) #define CAN_SJA1000_RBSA (30U) #define CAN_SJA1000_CDR (31U) /* Mode register (MOD) bits */ #define CAN_SJA1000_MOD_RM BIT(0) #define CAN_SJA1000_MOD_LOM BIT(1) #define CAN_SJA1000_MOD_STM BIT(2) #define CAN_SJA1000_MOD_AFM BIT(3) #define CAN_SJA1000_MOD_SM BIT(4) /* Command Register (CMR) bits */ #define CAN_SJA1000_CMR_TR BIT(0) #define CAN_SJA1000_CMR_AT BIT(1) #define CAN_SJA1000_CMR_RRB BIT(2) #define CAN_SJA1000_CMR_CDO BIT(3) #define CAN_SJA1000_CMR_SRR BIT(4) /* Status Register (SR) bits */ #define CAN_SJA1000_SR_RBS BIT(0) #define CAN_SJA1000_SR_DOS BIT(1) #define CAN_SJA1000_SR_TBS BIT(2) #define CAN_SJA1000_SR_TCS BIT(3) #define CAN_SJA1000_SR_RS BIT(4) #define CAN_SJA1000_SR_TS BIT(5) #define CAN_SJA1000_SR_ES BIT(6) #define CAN_SJA1000_SR_BS BIT(7) /* Interrupt Register (IR) bits */ #define CAN_SJA1000_IR_RI BIT(0) #define CAN_SJA1000_IR_TI BIT(1) #define CAN_SJA1000_IR_EI BIT(2) #define CAN_SJA1000_IR_DOI BIT(3) #define CAN_SJA1000_IR_WUI BIT(4) #define CAN_SJA1000_IR_EPI BIT(5) #define CAN_SJA1000_IR_ALI BIT(6) #define CAN_SJA1000_IR_BEI BIT(7) /* Interrupt Enable Register (IER) bits */ #define CAN_SJA1000_IER_RIE BIT(0) #define CAN_SJA1000_IER_TIE BIT(1) #define CAN_SJA1000_IER_EIE BIT(2) #define CAN_SJA1000_IER_DOIE BIT(3) #define CAN_SJA1000_IER_WUIE BIT(4) #define CAN_SJA1000_IER_EPIE BIT(5) #define CAN_SJA1000_IER_ALIE BIT(6) #define CAN_SJA1000_IER_BEIE BIT(7) /* Bus Timing Register 0 (BTR0) bits */ #define CAN_SJA1000_BTR0_BRP_MASK GENMASK(5, 0) #define CAN_SJA1000_BTR0_SJW_MASK GENMASK(7, 6) #define CAN_SJA1000_BTR0_BRP_PREP(brp) FIELD_PREP(CAN_SJA1000_BTR0_BRP_MASK, brp) #define CAN_SJA1000_BTR0_SJW_PREP(sjw) FIELD_PREP(CAN_SJA1000_BTR0_SJW_MASK, sjw) /* Bus Timing Register 1 (BTR1) bits */ #define CAN_SJA1000_BTR1_TSEG1_MASK GENMASK(3, 0) #define CAN_SJA1000_BTR1_TSEG2_MASK GENMASK(6, 4) #define CAN_SJA1000_BTR1_SAM BIT(7) #define CAN_SJA1000_BTR1_TSEG1_PREP(tseg1) FIELD_PREP(CAN_SJA1000_BTR1_TSEG1_MASK, tseg1) #define CAN_SJA1000_BTR1_TSEG2_PREP(tseg2) FIELD_PREP(CAN_SJA1000_BTR1_TSEG2_MASK, tseg2) /* Error Code Capture register (ECC) bits */ #define CAN_SJA1000_ECC_SEG_MASK GENMASK(4, 0) #define CAN_SJA1000_ECC_DIR_MASK BIT(5) #define CAN_SJA1000_ECC_ERRC_MASK GENMASK(7, 6) #define CAN_SJA1000_ECC_SEG_SOF FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 3U) #define CAN_SJA1000_ECC_SEG_ID28_TO_ID21 FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 2U) #define CAN_SJA1000_ECC_SEG_ID20_TO_ID18 FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 6U) #define CAN_SJA1000_ECC_SEG_SRTR FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 4U) #define CAN_SJA1000_ECC_SEG_IDE FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 5U) #define CAN_SJA1000_ECC_SEG_ID17_TO_ID13 FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 7U) #define CAN_SJA1000_ECC_SEG_ID12_TO_ID5 FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 15U) #define CAN_SJA1000_ECC_SEG_ID4_TO_ID0 FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 14U) #define CAN_SJA1000_ECC_SEG_RTR FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 12U) #define CAN_SJA1000_ECC_SEG_RES1 FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 13U) #define CAN_SJA1000_ECC_SEG_RES0 FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 9U) #define CAN_SJA1000_ECC_SEG_DLC FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 11U) #define CAN_SJA1000_ECC_SEG_DATA FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 10U) #define CAN_SJA1000_ECC_SEG_CRC_SEQ FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 8U) #define CAN_SJA1000_ECC_SEG_CRC_DELIM FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 24U) #define CAN_SJA1000_ECC_SEG_ACK_SLOT FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 25U) #define CAN_SJA1000_ECC_SEG_ACK_DELIM FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 27U) #define CAN_SJA1000_ECC_SEG_EOF FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 26U) #define CAN_SJA1000_ECC_SEG_INTERMISSION FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 18U) #define CAN_SJA1000_ECC_SEG_ACTIVE_ERROR_FLAG FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 17U) #define CAN_SJA1000_ECC_SEG_PASSIVE_ERROR_FLAG FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 22U) #define CAN_SJA1000_ECC_SEG_TOLERATE_DOM_BITS FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 19U) #define CAN_SJA1000_ECC_SEG_ERROR_DELIM FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 23U) #define CAN_SJA1000_ECC_SEG_OVERLOAD_FLAG FIELD_PREP(CAN_SJA1000_ECC_SEG_MASK, 28U) #define CAN_SJA1000_ECC_DIR_TX FIELD_PREP(CAN_SJA1000_ECC_DIR_MASK, 0U) #define CAN_SJA1000_ECC_DIR_RX FIELD_PREP(CAN_SJA1000_ECC_DIR_MASK, 1U) #define CAN_SJA1000_ECC_ERRC_BIT_ERROR FIELD_PREP(CAN_SJA1000_ECC_ERRC_MASK, 0U) #define CAN_SJA1000_ECC_ERRC_FORM_ERROR FIELD_PREP(CAN_SJA1000_ECC_ERRC_MASK, 1U) #define CAN_SJA1000_ECC_ERRC_STUFF_ERROR FIELD_PREP(CAN_SJA1000_ECC_ERRC_MASK, 2U) #define CAN_SJA1000_ECC_ERRC_OTHER_ERROR FIELD_PREP(CAN_SJA1000_ECC_ERRC_MASK, 3U) /* RX/TX SFF/EFF Frame Information bits */ #define CAN_SJA1000_FRAME_INFO_DLC_MASK GENMASK(3, 0) #define CAN_SJA1000_FRAME_INFO_RTR BIT(6) #define CAN_SJA1000_FRAME_INFO_FF BIT(7) #define CAN_SJA1000_FRAME_INFO_DLC_PREP(dlc) FIELD_PREP(CAN_SJA1000_FRAME_INFO_DLC_MASK, dlc) #define CAN_SJA1000_FRAME_INFO_DLC_GET(info) FIELD_GET(CAN_SJA1000_FRAME_INFO_DLC_MASK, info) #endif /* ZEPHYR_DRIVERS_CAN_SJA1000_PRIV_H_ */ ```
/content/code_sandbox/drivers/can/can_sja1000_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,466
```unknown # CAN configuration options # # CAN options # menuconfig CAN bool "Controller Area Network (CAN) drivers" help Enable CAN Driver Configuration if CAN module = CAN module-str = CAN source "subsys/logging/Kconfig.template.log_config" config CAN_INIT_PRIORITY int "CAN driver init priority" default 80 help CAN driver device initialization priority. config CAN_DEFAULT_BITRATE int "Default CAN bitrate" default 125000 help Default initial CAN bitrate in bits/s. This can be overridden per CAN controller using the "bitrate" devicetree property. config CAN_DEFAULT_BITRATE_DATA int "Default CAN data phase bitrate" default 1000000 depends on CAN_FD_MODE help Default initial CAN data phase bitrate in bits/s. This can be overridden per CAN controller using the "bitrate-data" devicetree property. config CAN_SHELL bool "CAN shell" depends on SHELL select POLL help Enable CAN Shell for testing. if CAN_SHELL config CAN_SHELL_TX_QUEUE_SIZE int "CAN shell transmit queue size" default 5 range 1 256 help Maximum number of events in the CAN shell transmit queue. config CAN_SHELL_RX_QUEUE_SIZE int "CAN shell receive queue size" default 5 range 1 256 help Maximum number of CAN frames in the CAN shell receive queue. config CAN_SHELL_SCRIPTING_FRIENDLY bool "Produce scripting-friendly output" help Enable scripting-friendly/machine-readable output from the CAN shell. Enabling this bypasses the shell when printing certain outputs to ensure the output is not interleaved with the shell prompt. endif # CAN_SHELL config CAN_STATS bool "CAN controller device statistics" depends on STATS help Enable CAN controller device statistics. config CAN_ACCEPT_RTR bool "Accept Remote Transmission Requests (RTR) frames" help Accept incoming Remote Transmission Request (RTR) frames matching CAN RX filters. Unless enabled, all incoming Remote Transmission Request (RTR) frames are rejected at the driver level. config CAN_FD_MODE bool "CAN FD support" help Enable CAN FD support. Not all CAN controllers support CAN FD. config CAN_MANUAL_RECOVERY_MODE bool "Manual bus-off recovery support" help Enable support for manual (non-automatic) recovery from bus-off state. Not all CAN controllers support manual recovery mode. config CAN_RX_TIMESTAMP bool "Receiving timestamps" help This option enables a timestamp value of the CAN free running timer. The value is incremented every bit time and starts when the controller is initialized. Not all CAN controllers support timestamps. config CAN_QEMU_IFACE_NAME string "SocketCAN interface name for QEMU" default "" depends on QEMU_TARGET help The SocketCAN interface name for QEMU. This value, if set, is given as "if" parameter to the "-object can-host-socketcan" qemu command line option. The CAN interface must be configured before starting QEMU. source "drivers/can/Kconfig.sam" source "drivers/can/Kconfig.sam0" source "drivers/can/Kconfig.stm32" source "drivers/can/Kconfig.mcux" source "drivers/can/Kconfig.mcp2515" source "drivers/can/Kconfig.mcan" source "drivers/can/Kconfig.rcar" source "drivers/can/Kconfig.numaker" source "drivers/can/Kconfig.loopback" source "drivers/can/Kconfig.native_linux" source "drivers/can/Kconfig.sja1000" source "drivers/can/Kconfig.esp32" source "drivers/can/Kconfig.kvaser" source "drivers/can/Kconfig.fake" source "drivers/can/Kconfig.nxp_s32" source "drivers/can/Kconfig.tcan4x5x" source "drivers/can/Kconfig.mcp251xfd" source "drivers/can/Kconfig.xmc4xxx" source "drivers/can/Kconfig.nrf" source "drivers/can/transceiver/Kconfig" endif # CAN ```
/content/code_sandbox/drivers/can/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
882
```c /* * */ #define DT_DRV_COMPAT nuvoton_numaker_canfd #include <zephyr/drivers/reset.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/can.h> #include <zephyr/drivers/can/can_mcan.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/clock_control_numaker.h> #include <zephyr/logging/log.h> #include <soc.h> #include <NuMicro.h> LOG_MODULE_REGISTER(can_numaker, CONFIG_CAN_LOG_LEVEL); /* CANFD Clock Source Selection */ #define NUMAKER_CANFD_CLKSEL_HXT 0 #define NUMAKER_CANFD_CLKSEL_PLL_DIV2 1 #define NUMAKER_CANFD_CLKSEL_HCLK 2 #define NUMAKER_CANFD_CLKSEL_HIRC 3 /* Implementation notes * 1. Use Bosch M_CAN driver (m_can) as backend * 2. Need to modify can_numaker_get_core_clock() for new SOC support */ struct can_numaker_config { mm_reg_t canfd_base; mem_addr_t mrba; mem_addr_t mram; const struct reset_dt_spec reset; uint32_t clk_modidx; uint32_t clk_src; uint32_t clk_div; const struct device *clk_dev; void (*irq_config_func)(const struct device *dev); const struct pinctrl_dev_config *pincfg; }; static int can_numaker_get_core_clock(const struct device *dev, uint32_t *rate) { const struct can_mcan_config *mcan_config = dev->config; const struct can_numaker_config *config = mcan_config->custom; uint32_t clksrc_rate_idx; uint32_t clkdiv_divider; /* Module clock source rate */ clksrc_rate_idx = CLK_GetModuleClockSource(config->clk_modidx); /* Module clock divider */ clkdiv_divider = CLK_GetModuleClockDivider(config->clk_modidx) + 1; switch (clksrc_rate_idx) { case NUMAKER_CANFD_CLKSEL_HXT: *rate = __HXT / clkdiv_divider; break; case NUMAKER_CANFD_CLKSEL_PLL_DIV2: *rate = (CLK_GetPLLClockFreq() / 2) / clkdiv_divider; break; case NUMAKER_CANFD_CLKSEL_HCLK: *rate = CLK_GetHCLKFreq() / clkdiv_divider; break; case NUMAKER_CANFD_CLKSEL_HIRC: *rate = __HIRC / clkdiv_divider; break; default: LOG_ERR("Invalid clock source rate index"); return -EIO; } LOG_DBG("Clock rate index/divider: %d/%d", clksrc_rate_idx, clkdiv_divider); return 0; } static inline int can_numaker_init_unlocked(const struct device *dev) { const struct can_mcan_config *mcan_config = dev->config; const struct can_numaker_config *config = mcan_config->custom; struct numaker_scc_subsys scc_subsys; int rc; memset(&scc_subsys, 0x00, sizeof(scc_subsys)); scc_subsys.subsys_id = NUMAKER_SCC_SUBSYS_ID_PCC; scc_subsys.pcc.clk_modidx = config->clk_modidx; scc_subsys.pcc.clk_src = config->clk_src; scc_subsys.pcc.clk_div = config->clk_div; /* To enable clock */ rc = clock_control_on(config->clk_dev, (clock_control_subsys_t) &scc_subsys); if (rc < 0) { return rc; } /* To set module clock */ rc = clock_control_configure(config->clk_dev, (clock_control_subsys_t)&scc_subsys, NULL); if (rc < 0) { return rc; } /* Configure pinmux (NuMaker's SYS MFP) */ rc = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (rc < 0) { return rc; } /* Reset CAN to default state, same as BSP's SYS_ResetModule(id_rst) */ reset_line_toggle_dt(&config->reset); config->irq_config_func(dev); rc = can_mcan_configure_mram(dev, config->mrba, config->mram); if (rc != 0) { return rc; } rc = can_mcan_init(dev); if (rc < 0) { LOG_ERR("Failed to initialize mcan: %d", rc); return rc; } #if CONFIG_CAN_LOG_LEVEL >= LOG_LEVEL_DBG uint32_t rate; rc = can_numaker_get_core_clock(dev, &rate); if (rc < 0) { return rc; } LOG_DBG("CAN core clock: %d", rate); #endif return rc; } static int can_numaker_init(const struct device *dev) { const struct can_mcan_config *mcan_config = dev->config; const struct can_numaker_config *config = mcan_config->custom; int rc; if (!device_is_ready(config->reset.dev)) { LOG_ERR("reset controller not ready"); return -ENODEV; } if (!device_is_ready(config->clk_dev)) { LOG_ERR("clock controller not ready"); return -ENODEV; } SYS_UnlockReg(); rc = can_numaker_init_unlocked(dev); SYS_LockReg(); return rc; } static const struct can_driver_api can_numaker_driver_api = { .get_capabilities = can_mcan_get_capabilities, .start = can_mcan_start, .stop = can_mcan_stop, .set_mode = can_mcan_set_mode, .set_timing = can_mcan_set_timing, .send = can_mcan_send, .add_rx_filter = can_mcan_add_rx_filter, .remove_rx_filter = can_mcan_remove_rx_filter, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = can_mcan_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .get_state = can_mcan_get_state, .set_state_change_callback = can_mcan_set_state_change_callback, .get_core_clock = can_numaker_get_core_clock, .get_max_filters = can_mcan_get_max_filters, .timing_min = CAN_MCAN_TIMING_MIN_INITIALIZER, .timing_max = CAN_MCAN_TIMING_MAX_INITIALIZER, #ifdef CONFIG_CAN_FD_MODE .set_timing_data = can_mcan_set_timing_data, .timing_data_min = CAN_MCAN_TIMING_DATA_MIN_INITIALIZER, .timing_data_max = CAN_MCAN_TIMING_DATA_MAX_INITIALIZER, #endif /* CONFIG_CAN_FD_MODE */ }; static int can_numaker_read_reg(const struct device *dev, uint16_t reg, uint32_t *val) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_numaker_config *numaker_cfg = mcan_cfg->custom; return can_mcan_sys_read_reg(numaker_cfg->canfd_base, reg, val); } static int can_numaker_write_reg(const struct device *dev, uint16_t reg, uint32_t val) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_numaker_config *numaker_cfg = mcan_cfg->custom; return can_mcan_sys_write_reg(numaker_cfg->canfd_base, reg, val); } static int can_numaker_read_mram(const struct device *dev, uint16_t offset, void *dst, size_t len) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_numaker_config *numaker_cfg = mcan_cfg->custom; return can_mcan_sys_read_mram(numaker_cfg->mram, offset, dst, len); } static int can_numaker_write_mram(const struct device *dev, uint16_t offset, const void *src, size_t len) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_numaker_config *numaker_cfg = mcan_cfg->custom; return can_mcan_sys_write_mram(numaker_cfg->mram, offset, src, len); } static int can_numaker_clear_mram(const struct device *dev, uint16_t offset, size_t len) { const struct can_mcan_config *mcan_cfg = dev->config; const struct can_numaker_config *numaker_cfg = mcan_cfg->custom; return can_mcan_sys_clear_mram(numaker_cfg->mram, offset, len); } static const struct can_mcan_ops can_numaker_ops = { .read_reg = can_numaker_read_reg, .write_reg = can_numaker_write_reg, .read_mram = can_numaker_read_mram, .write_mram = can_numaker_write_mram, .clear_mram = can_numaker_clear_mram, }; #define NUMAKER_CLKCTRL_DEV_INIT(inst) \ .clk_dev = DEVICE_DT_GET(DT_PARENT(DT_INST_CLOCKS_CTLR(inst))), #define NUMAKER_PINCTRL_DEFINE(inst) \ PINCTRL_DT_INST_DEFINE(inst); #define NUMAKER_PINCTRL_INIT(inst) \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), #define CAN_NUMAKER_INIT(inst) \ NUMAKER_PINCTRL_DEFINE(inst); \ CAN_MCAN_DT_INST_CALLBACKS_DEFINE(inst, can_numaker_cbs_##inst); \ \ static void can_numaker_irq_config_func_##inst(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, int0, irq), \ DT_INST_IRQ_BY_NAME(inst, int0, priority), \ can_mcan_line_0_isr, \ DEVICE_DT_INST_GET(inst), \ 0); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, int0, irq)); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, int1, irq), \ DT_INST_IRQ_BY_NAME(inst, int1, priority), \ can_mcan_line_1_isr, \ DEVICE_DT_INST_GET(inst), \ 0); \ irq_enable(DT_INST_IRQ_BY_NAME(inst, int1, irq)); \ } \ \ static const struct can_numaker_config can_numaker_config_##inst = { \ .canfd_base = CAN_MCAN_DT_INST_MCAN_ADDR(inst), \ .mrba = CAN_MCAN_DT_INST_MRBA(inst), \ .mram = CAN_MCAN_DT_INST_MRAM_ADDR(inst), \ .reset = RESET_DT_SPEC_INST_GET(inst), \ .clk_modidx = DT_INST_CLOCKS_CELL(inst, clock_module_index), \ .clk_src = DT_INST_CLOCKS_CELL(inst, clock_source), \ .clk_div = DT_INST_CLOCKS_CELL(inst, clock_divider), \ NUMAKER_CLKCTRL_DEV_INIT(inst) \ .irq_config_func = can_numaker_irq_config_func_##inst, \ NUMAKER_PINCTRL_INIT(inst) \ }; \ \ static const struct can_mcan_config can_mcan_config_##inst = \ CAN_MCAN_DT_CONFIG_INST_GET(inst, \ &can_numaker_config_##inst, \ &can_numaker_ops, \ &can_numaker_cbs_##inst); \ \ static uint32_t can_numaker_data_##inst; \ \ static struct can_mcan_data can_mcan_data_##inst = \ CAN_MCAN_DATA_INITIALIZER(&can_numaker_data_ ## inst); \ \ CAN_DEVICE_DT_INST_DEFINE(inst, \ can_numaker_init, \ NULL, \ &can_mcan_data_##inst, \ &can_mcan_config_##inst, \ POST_KERNEL, \ CONFIG_CAN_INIT_PRIORITY, \ &can_numaker_driver_api); \ DT_INST_FOREACH_STATUS_OKAY(CAN_NUMAKER_INIT); ```
/content/code_sandbox/drivers/can/can_numaker.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,593
```c /* * */ #include <stdlib.h> #include <stdio.h> #include <zephyr/device.h> #include <zephyr/drivers/can.h> #include <zephyr/logging/log.h> #include <zephyr/shell/shell.h> LOG_MODULE_REGISTER(can_shell, CONFIG_CAN_LOG_LEVEL); struct can_shell_tx_event { unsigned int frame_no; int error; }; struct can_shell_rx_event { struct can_frame frame; const struct device *dev; }; struct can_shell_mode_mapping { const char *name; can_mode_t mode; }; #define CAN_SHELL_MODE_MAPPING(_name, _mode) { .name = _name, .mode = _mode } static const struct can_shell_mode_mapping can_shell_mode_map[] = { /* zephyr-keep-sorted-start */ CAN_SHELL_MODE_MAPPING("fd", CAN_MODE_FD), CAN_SHELL_MODE_MAPPING("listen-only", CAN_MODE_LISTENONLY), CAN_SHELL_MODE_MAPPING("loopback", CAN_MODE_LOOPBACK), CAN_SHELL_MODE_MAPPING("manual-recovery", CAN_MODE_MANUAL_RECOVERY), CAN_SHELL_MODE_MAPPING("normal", CAN_MODE_NORMAL), CAN_SHELL_MODE_MAPPING("one-shot", CAN_MODE_ONE_SHOT), CAN_SHELL_MODE_MAPPING("triple-sampling", CAN_MODE_3_SAMPLES), /* zephyr-keep-sorted-stop */ }; K_MSGQ_DEFINE(can_shell_tx_msgq, sizeof(struct can_shell_tx_event), CONFIG_CAN_SHELL_TX_QUEUE_SIZE, 4); const struct shell *can_shell_tx_msgq_sh; static struct k_work_poll can_shell_tx_msgq_work; static struct k_poll_event can_shell_tx_msgq_events[] = { K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_MSGQ_DATA_AVAILABLE, K_POLL_MODE_NOTIFY_ONLY, &can_shell_tx_msgq, 0) }; K_MSGQ_DEFINE(can_shell_rx_msgq, sizeof(struct can_shell_rx_event), CONFIG_CAN_SHELL_RX_QUEUE_SIZE, 4); const struct shell *can_shell_rx_msgq_sh; static struct k_work_poll can_shell_rx_msgq_work; static struct k_poll_event can_shell_rx_msgq_events[] = { K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_MSGQ_DATA_AVAILABLE, K_POLL_MODE_NOTIFY_ONLY, &can_shell_rx_msgq, 0) }; /* Forward declarations */ static void can_shell_tx_msgq_triggered_work_handler(struct k_work *work); static void can_shell_rx_msgq_triggered_work_handler(struct k_work *work); #ifdef CONFIG_CAN_SHELL_SCRIPTING_FRIENDLY static void can_shell_dummy_bypass_cb(const struct shell *sh, uint8_t *data, size_t len) { ARG_UNUSED(sh); ARG_UNUSED(data); ARG_UNUSED(len); } #endif /* CONFIG_CAN_SHELL_SCRIPTING_FRIENDLY */ static void can_shell_print_frame(const struct shell *sh, const struct device *dev, const struct can_frame *frame) { uint8_t nbytes = can_dlc_to_bytes(frame->dlc); int i; #ifdef CONFIG_CAN_SHELL_SCRIPTING_FRIENDLY /* Bypass the shell to avoid breaking up the line containing the frame */ shell_set_bypass(sh, can_shell_dummy_bypass_cb); #endif /* CONFIG_CAN_SHELL_SCRIPTING_FRIENDLY */ #ifdef CONFIG_CAN_RX_TIMESTAMP /* Timestamp */ shell_fprintf(sh, SHELL_NORMAL, "(%05d) ", frame->timestamp); #endif /* CONFIG_CAN_RX_TIMESTAMP */ shell_fprintf(sh, SHELL_NORMAL, "%s ", dev->name); #ifdef CONFIG_CAN_FD_MODE /* Flags */ shell_fprintf(sh, SHELL_NORMAL, "%c%c ", (frame->flags & CAN_FRAME_BRS) == 0 ? '-' : 'B', (frame->flags & CAN_FRAME_ESI) == 0 ? '-' : 'P'); #endif /* CONFIG_CAN_FD_MODE */ /* CAN ID */ shell_fprintf(sh, SHELL_NORMAL, "%*s%0*x ", (frame->flags & CAN_FRAME_IDE) != 0 ? 0 : 5, "", (frame->flags & CAN_FRAME_IDE) != 0 ? 8 : 3, (frame->flags & CAN_FRAME_IDE) != 0 ? frame->id & CAN_EXT_ID_MASK : frame->id & CAN_STD_ID_MASK); /* DLC as number of bytes */ shell_fprintf(sh, SHELL_NORMAL, "%s[%0*d] ", (frame->flags & CAN_FRAME_FDF) != 0 ? "" : " ", (frame->flags & CAN_FRAME_FDF) != 0 ? 2 : 1, nbytes); /* Data payload */ if ((frame->flags & CAN_FRAME_RTR) != 0) { shell_fprintf(sh, SHELL_NORMAL, "remote transmission request"); } else { for (i = 0; i < nbytes; i++) { shell_fprintf(sh, SHELL_NORMAL, "%02x ", frame->data[i]); } } shell_fprintf(sh, SHELL_NORMAL, "\n"); #ifdef CONFIG_CAN_SHELL_SCRIPTING_FRIENDLY shell_set_bypass(sh, NULL); #endif /* CONFIG_CAN_SHELL_SCRIPTING_FRIENDLY */ } static int can_shell_tx_msgq_poll_submit(const struct shell *sh) { int err; if (can_shell_tx_msgq_sh == NULL) { can_shell_tx_msgq_sh = sh; k_work_poll_init(&can_shell_tx_msgq_work, can_shell_tx_msgq_triggered_work_handler); } err = k_work_poll_submit(&can_shell_tx_msgq_work, can_shell_tx_msgq_events, ARRAY_SIZE(can_shell_tx_msgq_events), K_FOREVER); if (err != 0) { shell_error(can_shell_tx_msgq_sh, "failed to submit tx msgq polling (err %d)", err); } return err; } static void can_shell_tx_msgq_triggered_work_handler(struct k_work *work) { struct can_shell_tx_event event; while (k_msgq_get(&can_shell_tx_msgq, &event, K_NO_WAIT) == 0) { if (event.error == 0) { shell_print(can_shell_tx_msgq_sh, "CAN frame #%u successfully sent", event.frame_no); } else { shell_error(can_shell_tx_msgq_sh, "failed to send CAN frame #%u (err %d)", event.frame_no, event.error); } } (void)can_shell_tx_msgq_poll_submit(can_shell_tx_msgq_sh); } static void can_shell_tx_callback(const struct device *dev, int error, void *user_data) { struct can_shell_tx_event event; int err; ARG_UNUSED(dev); event.frame_no = POINTER_TO_UINT(user_data); event.error = error; err = k_msgq_put(&can_shell_tx_msgq, &event, K_NO_WAIT); if (err != 0) { LOG_ERR("CAN shell tx event queue full"); } } static void can_shell_rx_callback(const struct device *dev, struct can_frame *frame, void *user_data) { struct can_shell_rx_event event; int err; ARG_UNUSED(user_data); event.frame = *frame; event.dev = dev; err = k_msgq_put(&can_shell_rx_msgq, &event, K_NO_WAIT); if (err != 0) { LOG_ERR("CAN shell rx event queue full"); } } static int can_shell_rx_msgq_poll_submit(const struct shell *sh) { int err; if (can_shell_rx_msgq_sh == NULL) { can_shell_rx_msgq_sh = sh; k_work_poll_init(&can_shell_rx_msgq_work, can_shell_rx_msgq_triggered_work_handler); } err = k_work_poll_submit(&can_shell_rx_msgq_work, can_shell_rx_msgq_events, ARRAY_SIZE(can_shell_rx_msgq_events), K_FOREVER); if (err != 0) { shell_error(can_shell_rx_msgq_sh, "failed to submit rx msgq polling (err %d)", err); } return err; } static void can_shell_rx_msgq_triggered_work_handler(struct k_work *work) { struct can_shell_rx_event event; while (k_msgq_get(&can_shell_rx_msgq, &event, K_NO_WAIT) == 0) { can_shell_print_frame(can_shell_rx_msgq_sh, event.dev, &event.frame); } (void)can_shell_rx_msgq_poll_submit(can_shell_rx_msgq_sh); } static const char *can_shell_state_to_string(enum can_state state) { switch (state) { case CAN_STATE_ERROR_ACTIVE: return "error-active"; case CAN_STATE_ERROR_WARNING: return "error-warning"; case CAN_STATE_ERROR_PASSIVE: return "error-passive"; case CAN_STATE_BUS_OFF: return "bus-off"; case CAN_STATE_STOPPED: return "stopped"; default: return "unknown"; } } static void can_shell_print_extended_modes(const struct shell *sh, can_mode_t cap) { int bit; int i; for (bit = 0; bit < sizeof(cap) * 8; bit++) { /* Skip unset bits */ if ((cap & BIT(bit)) == 0) { continue; } /* Lookup symbolic mode name */ for (i = 0; i < ARRAY_SIZE(can_shell_mode_map); i++) { if (BIT(bit) == can_shell_mode_map[i].mode) { shell_fprintf(sh, SHELL_NORMAL, "%s ", can_shell_mode_map[i].name); break; } } if (i == ARRAY_SIZE(can_shell_mode_map)) { /* Symbolic name not found, use raw mode */ shell_fprintf(sh, SHELL_NORMAL, "0x%08x ", (can_mode_t)BIT(bit)); } } } static int cmd_can_start(const struct shell *sh, size_t argc, char **argv) { const struct device *dev = device_get_binding(argv[1]); int err; if (!device_is_ready(dev)) { shell_error(sh, "device %s not ready", argv[1]); return -ENODEV; } shell_print(sh, "starting %s", argv[1]); err = can_start(dev); if (err != 0) { shell_error(sh, "failed to start CAN controller (err %d)", err); return err; } return 0; } static int cmd_can_stop(const struct shell *sh, size_t argc, char **argv) { const struct device *dev = device_get_binding(argv[1]); int err; if (!device_is_ready(dev)) { shell_error(sh, "device %s not ready", argv[1]); return -ENODEV; } shell_print(sh, "stopping %s", argv[1]); err = can_stop(dev); if (err != 0) { shell_error(sh, "failed to stop CAN controller (err %d)", err); return err; } return 0; } static int cmd_can_show(const struct shell *sh, size_t argc, char **argv) { const struct device *dev = device_get_binding(argv[1]); const struct device *phy; const struct can_timing *timing_min; const struct can_timing *timing_max; struct can_bus_err_cnt err_cnt; enum can_state state; uint32_t bitrate_max; int max_std_filters; int max_ext_filters; uint32_t core_clock; can_mode_t cap; int err; if (!device_is_ready(dev)) { shell_error(sh, "device %s not ready", argv[1]); return -ENODEV; } err = can_get_core_clock(dev, &core_clock); if (err != 0) { shell_error(sh, "failed to get CAN core clock (err %d)", err); return err; } bitrate_max = can_get_bitrate_max(dev); max_std_filters = can_get_max_filters(dev, false); if (max_std_filters < 0 && max_std_filters != -ENOSYS) { shell_error(sh, "failed to get maximum standard (11-bit) filters (err %d)", err); return err; } max_ext_filters = can_get_max_filters(dev, true); if (max_ext_filters < 0 && max_ext_filters != -ENOSYS) { shell_error(sh, "failed to get maximum extended (29-bit) filters (err %d)", err); return err; } err = can_get_capabilities(dev, &cap); if (err != 0) { shell_error(sh, "failed to get CAN controller capabilities (err %d)", err); return err; } err = can_get_state(dev, &state, &err_cnt); if (err != 0) { shell_error(sh, "failed to get CAN controller state (%d)", err); return err; } shell_print(sh, "core clock: %d Hz", core_clock); shell_print(sh, "max bitrate: %d bps", bitrate_max); shell_print(sh, "max std filters: %d", max_std_filters); shell_print(sh, "max ext filters: %d", max_ext_filters); shell_fprintf(sh, SHELL_NORMAL, "capabilities: normal "); can_shell_print_extended_modes(sh, cap); shell_fprintf(sh, SHELL_NORMAL, "\n"); shell_fprintf(sh, SHELL_NORMAL, "mode: normal "); can_shell_print_extended_modes(sh, can_get_mode(dev)); shell_fprintf(sh, SHELL_NORMAL, "\n"); shell_print(sh, "state: %s", can_shell_state_to_string(state)); shell_print(sh, "rx errors: %d", err_cnt.rx_err_cnt); shell_print(sh, "tx errors: %d", err_cnt.tx_err_cnt); timing_min = can_get_timing_min(dev); timing_max = can_get_timing_max(dev); shell_print(sh, "timing: sjw %u..%u, prop_seg %u..%u, " "phase_seg1 %u..%u, phase_seg2 %u..%u, prescaler %u..%u", timing_min->sjw, timing_max->sjw, timing_min->prop_seg, timing_max->prop_seg, timing_min->phase_seg1, timing_max->phase_seg1, timing_min->phase_seg2, timing_max->phase_seg2, timing_min->prescaler, timing_max->prescaler); if (IS_ENABLED(CONFIG_CAN_FD_MODE) && (cap & CAN_MODE_FD) != 0) { timing_min = can_get_timing_data_min(dev); timing_max = can_get_timing_data_max(dev); shell_print(sh, "timing data: sjw %u..%u, prop_seg %u..%u, " "phase_seg1 %u..%u, phase_seg2 %u..%u, prescaler %u..%u", timing_min->sjw, timing_max->sjw, timing_min->prop_seg, timing_max->prop_seg, timing_min->phase_seg1, timing_max->phase_seg1, timing_min->phase_seg2, timing_max->phase_seg2, timing_min->prescaler, timing_max->prescaler); } phy = can_get_transceiver(dev); shell_print(sh, "transceiver: %s", phy != NULL ? phy->name : "passive/none"); #ifdef CONFIG_CAN_STATS shell_print(sh, "statistics:"); shell_print(sh, " bit errors: %u", can_stats_get_bit_errors(dev)); shell_print(sh, " bit0 errors: %u", can_stats_get_bit0_errors(dev)); shell_print(sh, " bit1 errors: %u", can_stats_get_bit1_errors(dev)); shell_print(sh, " stuff errors: %u", can_stats_get_stuff_errors(dev)); shell_print(sh, " crc errors: %u", can_stats_get_crc_errors(dev)); shell_print(sh, " form errors: %u", can_stats_get_form_errors(dev)); shell_print(sh, " ack errors: %u", can_stats_get_ack_errors(dev)); shell_print(sh, " rx overruns: %u", can_stats_get_rx_overruns(dev)); #endif /* CONFIG_CAN_STATS */ return 0; } static int cmd_can_bitrate_set(const struct shell *sh, size_t argc, char **argv) { const struct device *dev = device_get_binding(argv[1]); struct can_timing timing = { 0 }; uint16_t sample_pnt; uint32_t bitrate; char *endptr; int err; if (!device_is_ready(dev)) { shell_error(sh, "device %s not ready", argv[1]); return -ENODEV; } bitrate = (uint32_t)strtoul(argv[2], &endptr, 10); if (*endptr != '\0') { shell_error(sh, "failed to parse bitrate"); return -EINVAL; } if (argc >= 4) { sample_pnt = (uint32_t)strtoul(argv[3], &endptr, 10); if (*endptr != '\0') { shell_error(sh, "failed to parse sample point"); return -EINVAL; } err = can_calc_timing(dev, &timing, bitrate, sample_pnt); if (err < 0) { shell_error(sh, "failed to calculate timing for " "bitrate %d bps, sample point %d.%d%% (err %d)", bitrate, sample_pnt / 10, sample_pnt % 10, err); return err; } if (argc >= 5) { /* Overwrite calculated default SJW with user-provided value */ timing.sjw = (uint16_t)strtoul(argv[4], &endptr, 10); if (*endptr != '\0') { shell_error(sh, "failed to parse SJW"); return -EINVAL; } } shell_print(sh, "setting bitrate to %d bps, sample point %d.%d%% " "(+/- %d.%d%%), sjw %d", bitrate, sample_pnt / 10, sample_pnt % 10, err / 10, err % 10, timing.sjw); LOG_DBG("sjw %u, prop_seg %u, phase_seg1 %u, phase_seg2 %u, prescaler %u", timing.sjw, timing.prop_seg, timing.phase_seg1, timing.phase_seg2, timing.prescaler); err = can_set_timing(dev, &timing); if (err != 0) { shell_error(sh, "failed to set timing (err %d)", err); return err; } } else { shell_print(sh, "setting bitrate to %d bps", bitrate); err = can_set_bitrate(dev, bitrate); if (err != 0) { shell_error(sh, "failed to set bitrate (err %d)", err); return err; } } return 0; } static int cmd_can_dbitrate_set(const struct shell *sh, size_t argc, char **argv) { const struct device *dev = device_get_binding(argv[1]); struct can_timing timing = { 0 }; uint16_t sample_pnt; uint32_t bitrate; char *endptr; int err; if (!device_is_ready(dev)) { shell_error(sh, "device %s not ready", argv[1]); return -ENODEV; } bitrate = (uint32_t)strtoul(argv[2], &endptr, 10); if (*endptr != '\0') { shell_error(sh, "failed to parse data bitrate"); return -EINVAL; } if (argc >= 4) { sample_pnt = (uint32_t)strtoul(argv[3], &endptr, 10); if (*endptr != '\0') { shell_error(sh, "failed to parse sample point"); return -EINVAL; } err = can_calc_timing_data(dev, &timing, bitrate, sample_pnt); if (err < 0) { shell_error(sh, "failed to calculate timing for " "data bitrate %d bps, sample point %d.%d%% (err %d)", bitrate, sample_pnt / 10, sample_pnt % 10, err); return err; } if (argc >= 5) { /* Overwrite calculated default SJW with user-provided value */ timing.sjw = (uint16_t)strtoul(argv[4], &endptr, 10); if (*endptr != '\0') { shell_error(sh, "failed to parse SJW"); return -EINVAL; } } shell_print(sh, "setting data bitrate to %d bps, sample point %d.%d%% " "(+/- %d.%d%%), sjw %d", bitrate, sample_pnt / 10, sample_pnt % 10, err / 10, err % 10, timing.sjw); LOG_DBG("sjw %u, prop_seg %u, phase_seg1 %u, phase_seg2 %u, prescaler %u", timing.sjw, timing.prop_seg, timing.phase_seg1, timing.phase_seg2, timing.prescaler); err = can_set_timing_data(dev, &timing); if (err != 0) { shell_error(sh, "failed to set data timing (err %d)", err); return err; } } else { shell_print(sh, "setting data bitrate to %d bps", bitrate); err = can_set_bitrate_data(dev, bitrate); if (err != 0) { shell_error(sh, "failed to set data bitrate (err %d)", err); return err; } } return 0; } static int can_shell_parse_timing(const struct shell *sh, size_t argc, char **argv, struct can_timing *timing) { char *endptr; timing->sjw = (uint32_t)strtoul(argv[2], &endptr, 10); if (*endptr != '\0') { shell_error(sh, "failed to parse sjw"); return -EINVAL; } timing->prop_seg = (uint32_t)strtoul(argv[3], &endptr, 10); if (*endptr != '\0') { shell_error(sh, "failed to parse prop_seg"); return -EINVAL; } timing->phase_seg1 = (uint32_t)strtoul(argv[4], &endptr, 10); if (*endptr != '\0') { shell_error(sh, "failed to parse phase_seg1"); return -EINVAL; } timing->phase_seg2 = (uint32_t)strtoul(argv[5], &endptr, 10); if (*endptr != '\0') { shell_error(sh, "failed to parse phase_seg2"); return -EINVAL; } timing->prescaler = (uint32_t)strtoul(argv[6], &endptr, 10); if (*endptr != '\0') { shell_error(sh, "failed to parse prescaler"); return -EINVAL; } return 0; } static int cmd_can_timing_set(const struct shell *sh, size_t argc, char **argv) { const struct device *dev = device_get_binding(argv[1]); struct can_timing timing = { 0 }; int err; if (!device_is_ready(dev)) { shell_error(sh, "device %s not ready", argv[1]); return -ENODEV; } err = can_shell_parse_timing(sh, argc, argv, &timing); if (err < 0) { return err; } shell_print(sh, "setting timing to sjw %u, prop_seg %u, phase_seg1 %u, phase_seg2 %u, " "prescaler %u", timing.sjw, timing.prop_seg, timing.phase_seg1, timing.phase_seg2, timing.prescaler); err = can_set_timing(dev, &timing); if (err != 0) { shell_error(sh, "failed to set timing (err %d)", err); return err; } return 0; } static int cmd_can_dtiming_set(const struct shell *sh, size_t argc, char **argv) { const struct device *dev = device_get_binding(argv[1]); struct can_timing timing = { 0 }; int err; if (!device_is_ready(dev)) { shell_error(sh, "device %s not ready", argv[1]); return -ENODEV; } err = can_shell_parse_timing(sh, argc, argv, &timing); if (err < 0) { return err; } shell_print(sh, "setting data phase timing to sjw %u, prop_seg %u, phase_seg1 %u, " "phase_seg2 %u, prescaler %u", timing.sjw, timing.prop_seg, timing.phase_seg1, timing.phase_seg2, timing.prescaler); err = can_set_timing_data(dev, &timing); if (err != 0) { shell_error(sh, "failed to set data phase timing (err %d)", err); return err; } return 0; } static int cmd_can_mode_set(const struct shell *sh, size_t argc, char **argv) { const struct device *dev = device_get_binding(argv[1]); can_mode_t mode = CAN_MODE_NORMAL; can_mode_t raw; char *endptr; int err; int i; int j; if (!device_is_ready(dev)) { shell_error(sh, "device %s not ready", argv[1]); return -ENODEV; } for (i = 2; i < argc; i++) { /* Lookup symbolic mode name */ for (j = 0; j < ARRAY_SIZE(can_shell_mode_map); j++) { if (strcmp(argv[i], can_shell_mode_map[j].name) == 0) { mode |= can_shell_mode_map[j].mode; break; } } if (j == ARRAY_SIZE(can_shell_mode_map)) { /* Symbolic name not found, use raw mode if hex number */ raw = (can_mode_t)strtoul(argv[i], &endptr, 16); if (*endptr == '\0') { mode |= raw; continue; } shell_error(sh, "failed to parse mode"); return -EINVAL; } } shell_print(sh, "setting mode 0x%08x", mode); err = can_set_mode(dev, mode); if (err != 0) { shell_error(sh, "failed to set mode 0x%08x (err %d)", mode, err); return err; } return 0; } static int cmd_can_send(const struct shell *sh, size_t argc, char **argv) { const struct device *dev = device_get_binding(argv[1]); static unsigned int frame_counter; unsigned int frame_no; struct can_frame frame = { 0 }; uint32_t id_mask; int argidx = 2; uint32_t val; char *endptr; int nbytes; int err; int i; if (!device_is_ready(dev)) { shell_error(sh, "device %s not ready", argv[1]); return -ENODEV; } /* Defaults */ id_mask = CAN_STD_ID_MASK; frame.flags = 0; frame.dlc = 0; /* Parse options */ while (argidx < argc && strncmp(argv[argidx], "-", 1) == 0) { if (strcmp(argv[argidx], "--") == 0) { argidx++; break; } else if (strcmp(argv[argidx], "-e") == 0) { frame.flags |= CAN_FRAME_IDE; id_mask = CAN_EXT_ID_MASK; argidx++; } else if (strcmp(argv[argidx], "-r") == 0) { frame.flags |= CAN_FRAME_RTR; argidx++; } else if (strcmp(argv[argidx], "-f") == 0) { frame.flags |= CAN_FRAME_FDF; argidx++; } else if (strcmp(argv[argidx], "-b") == 0) { frame.flags |= CAN_FRAME_BRS; argidx++; } else { shell_error(sh, "unsupported option %s", argv[argidx]); shell_help(sh); return SHELL_CMD_HELP_PRINTED; } } /* Parse CAN ID */ if (argidx >= argc) { shell_error(sh, "missing CAN ID parameter"); shell_help(sh); return SHELL_CMD_HELP_PRINTED; } val = (uint32_t)strtoul(argv[argidx++], &endptr, 16); if (*endptr != '\0') { shell_error(sh, "failed to parse CAN ID"); return -EINVAL; } if (val > id_mask) { shell_error(sh, "CAN ID 0x%0*x out of range", (frame.flags & CAN_FRAME_IDE) != 0 ? 8 : 3, val); return -EINVAL; } frame.id = val; nbytes = argc - argidx; if (nbytes > ARRAY_SIZE(frame.data)) { shell_error(sh, "excessive amount of data (%d bytes)", nbytes); return -EINVAL; } frame.dlc = can_bytes_to_dlc(nbytes); /* Parse data */ for (i = 0; i < nbytes; i++) { val = (uint32_t)strtoul(argv[argidx++], &endptr, 16); if (*endptr != '\0') { shell_error(sh, "failed to parse data %s", argv[argidx++]); return -EINVAL; } if (val > 0xff) { shell_error(sh, "data 0x%x out of range", val); return -EINVAL; } frame.data[i] = val; } err = can_shell_tx_msgq_poll_submit(sh); if (err != 0) { return err; } frame_no = frame_counter++; shell_print(sh, "enqueuing CAN frame #%u with %s (%d-bit) CAN ID 0x%0*x, " "RTR %d, CAN FD %d, BRS %d, DLC %d", frame_no, (frame.flags & CAN_FRAME_IDE) != 0 ? "extended" : "standard", (frame.flags & CAN_FRAME_IDE) != 0 ? 29 : 11, (frame.flags & CAN_FRAME_IDE) != 0 ? 8 : 3, frame.id, (frame.flags & CAN_FRAME_RTR) != 0 ? 1 : 0, (frame.flags & CAN_FRAME_FDF) != 0 ? 1 : 0, (frame.flags & CAN_FRAME_BRS) != 0 ? 1 : 0, frame.dlc); err = can_send(dev, &frame, K_NO_WAIT, can_shell_tx_callback, UINT_TO_POINTER(frame_no)); if (err != 0) { shell_error(sh, "failed to enqueue CAN frame #%u (err %d)", frame_no, err); return err; } return 0; } static int cmd_can_filter_add(const struct shell *sh, size_t argc, char **argv) { const struct device *dev = device_get_binding(argv[1]); struct can_filter filter; uint32_t id_mask; int argidx = 2; uint32_t val; char *endptr; int err; if (!device_is_ready(dev)) { shell_error(sh, "device %s not ready", argv[1]); return -ENODEV; } /* Defaults */ id_mask = CAN_STD_ID_MASK; filter.flags = 0U; /* Parse options */ while (argidx < argc && strncmp(argv[argidx], "-", 1) == 0) { if (strcmp(argv[argidx], "--") == 0) { argidx++; break; } else if (strcmp(argv[argidx], "-e") == 0) { filter.flags |= CAN_FILTER_IDE; id_mask = CAN_EXT_ID_MASK; argidx++; } else { shell_error(sh, "unsupported argument %s", argv[argidx]); shell_help(sh); return SHELL_CMD_HELP_PRINTED; } } /* Parse CAN ID */ if (argidx >= argc) { shell_error(sh, "missing CAN ID parameter"); shell_help(sh); return SHELL_CMD_HELP_PRINTED; } val = (uint32_t)strtoul(argv[argidx++], &endptr, 16); if (*endptr != '\0') { shell_error(sh, "failed to parse CAN ID"); return -EINVAL; } if (val > id_mask) { shell_error(sh, "CAN ID 0x%0*x out of range", (filter.flags & CAN_FILTER_IDE) != 0 ? 8 : 3, val); return -EINVAL; } filter.id = val; if (argidx < argc) { /* Parse CAN ID mask */ val = (uint32_t)strtoul(argv[argidx++], &endptr, 16); if (*endptr != '\0') { shell_error(sh, "failed to parse CAN ID mask"); return -EINVAL; } if (val > id_mask) { shell_error(sh, "CAN ID mask 0x%0*x out of range", (filter.flags & CAN_FILTER_IDE) != 0 ? 8 : 3, val); return -EINVAL; } } else { val = id_mask; } filter.mask = val; err = can_shell_rx_msgq_poll_submit(sh); if (err != 0) { return err; } shell_print(sh, "adding filter with %s (%d-bit) CAN ID 0x%0*x, CAN ID mask 0x%0*x", (filter.flags & CAN_FILTER_IDE) != 0 ? "extended" : "standard", (filter.flags & CAN_FILTER_IDE) != 0 ? 29 : 11, (filter.flags & CAN_FILTER_IDE) != 0 ? 8 : 3, filter.id, (filter.flags & CAN_FILTER_IDE) != 0 ? 8 : 3, filter.mask); err = can_add_rx_filter(dev, can_shell_rx_callback, NULL, &filter); if (err < 0) { shell_error(sh, "failed to add filter (err %d)", err); return err; } shell_print(sh, "filter ID: %d", err); return 0; } static int cmd_can_filter_remove(const struct shell *sh, size_t argc, char **argv) { const struct device *dev = device_get_binding(argv[1]); int filter_id; char *endptr; if (!device_is_ready(dev)) { shell_error(sh, "device %s not ready", argv[1]); return -ENODEV; } /* Parse filter ID */ filter_id = (int)strtol(argv[2], &endptr, 10); if (*endptr != '\0') { shell_error(sh, "failed to parse filter ID"); return -EINVAL; } shell_print(sh, "removing filter with ID %d", filter_id); can_remove_rx_filter(dev, filter_id); return 0; } static int cmd_can_recover(const struct shell *sh, size_t argc, char **argv) { const struct device *dev = device_get_binding(argv[1]); k_timeout_t timeout = K_FOREVER; int millisec; char *endptr; int err; if (!device_is_ready(dev)) { shell_error(sh, "device %s not ready", argv[1]); return -ENODEV; } if (argc >= 3) { /* Parse timeout */ millisec = (int)strtol(argv[2], &endptr, 10); if (*endptr != '\0') { shell_error(sh, "failed to parse timeout"); return -EINVAL; } timeout = K_MSEC(millisec); shell_print(sh, "recovering, timeout %d ms", millisec); } else { shell_print(sh, "recovering, no timeout"); } err = can_recover(dev, timeout); if (err != 0) { shell_error(sh, "failed to recover CAN controller from bus-off (err %d)", err); return err; } return 0; } static void cmd_can_device_name(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, NULL); entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = NULL; } SHELL_DYNAMIC_CMD_CREATE(dsub_can_device_name, cmd_can_device_name); static void cmd_can_mode(size_t idx, struct shell_static_entry *entry); SHELL_DYNAMIC_CMD_CREATE(dsub_can_mode, cmd_can_mode); static void cmd_can_mode(size_t idx, struct shell_static_entry *entry) { if (idx < ARRAY_SIZE(can_shell_mode_map)) { entry->syntax = can_shell_mode_map[idx].name; } else { entry->syntax = NULL; } entry->handler = NULL; entry->help = NULL; entry->subcmd = &dsub_can_mode; } static void cmd_can_device_name_mode(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, NULL); entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = &dsub_can_mode; } SHELL_DYNAMIC_CMD_CREATE(dsub_can_device_name_mode, cmd_can_device_name_mode); SHELL_STATIC_SUBCMD_SET_CREATE(sub_can_filter_cmds, SHELL_CMD_ARG(add, &dsub_can_device_name, "Add rx filter\n" "Usage: can filter add <device> [-e] <CAN ID> [CAN ID mask]\n" "-e use extended (29-bit) CAN ID/CAN ID mask\n", cmd_can_filter_add, 3, 2), SHELL_CMD_ARG(remove, &dsub_can_device_name, "Remove rx filter\n" "Usage: can filter remove <device> <filter_id>", cmd_can_filter_remove, 3, 0), SHELL_SUBCMD_SET_END ); SHELL_STATIC_SUBCMD_SET_CREATE(sub_can_cmds, SHELL_CMD_ARG(start, &dsub_can_device_name, "Start CAN controller\n" "Usage: can start <device>", cmd_can_start, 2, 0), SHELL_CMD_ARG(stop, &dsub_can_device_name, "Stop CAN controller\n" "Usage: can stop <device>", cmd_can_stop, 2, 0), SHELL_CMD_ARG(show, &dsub_can_device_name, "Show CAN controller information\n" "Usage: can show <device>", cmd_can_show, 2, 0), SHELL_CMD_ARG(bitrate, &dsub_can_device_name, "Set CAN controller bitrate (sample point and SJW optional)\n" "Usage: can bitrate <device> <bitrate> [sample point] [sjw]", cmd_can_bitrate_set, 3, 2), SHELL_COND_CMD_ARG(CONFIG_CAN_FD_MODE, dbitrate, &dsub_can_device_name, "Set CAN controller data phase bitrate (sample point and SJW optional)\n" "Usage: can dbitrate <device> <data phase bitrate> [sample point] [sjw]", cmd_can_dbitrate_set, 3, 2), SHELL_CMD_ARG(timing, &dsub_can_device_name, "Set CAN controller timing\n" "Usage: can timing <device> <sjw> <prop_seg> <phase_seg1> <phase_seg2> <prescaler>", cmd_can_timing_set, 7, 0), SHELL_COND_CMD_ARG(CONFIG_CAN_FD_MODE, dtiming, &dsub_can_device_name, "Set CAN controller data phase timing\n" "Usage: can dtiming <device> <sjw> <prop_seg> <phase_seg1> <phase_seg2> <prescaler>", cmd_can_dtiming_set, 7, 0), SHELL_CMD_ARG(mode, &dsub_can_device_name_mode, "Set CAN controller mode\n" "Usage: can mode <device> <mode> [mode] [mode] [...]", cmd_can_mode_set, 3, SHELL_OPT_ARG_CHECK_SKIP), SHELL_CMD_ARG(send, &dsub_can_device_name, "Enqueue a CAN frame for sending\n" "Usage: can send <device> [-e] [-r] [-f] [-b] <CAN ID> [data] [...]\n" "-e use extended (29-bit) CAN ID\n" "-r send Remote Transmission Request (RTR) frame\n" "-f use CAN FD frame format\n" "-b use CAN FD Bit Rate Switching (BRS)", cmd_can_send, 3, SHELL_OPT_ARG_CHECK_SKIP), SHELL_CMD(filter, &sub_can_filter_cmds, "CAN rx filter commands\n" "Usage: can filter <add|remove> <device> ...", NULL), SHELL_COND_CMD_ARG(CONFIG_CAN_MANUAL_RECOVERY_MODE, recover, &dsub_can_device_name, "Manually recover CAN controller from bus-off state\n" "Usage: can recover <device> [timeout ms]", cmd_can_recover, 2, 1), SHELL_SUBCMD_SET_END ); SHELL_CMD_REGISTER(can, &sub_can_cmds, "CAN controller commands", NULL); ```
/content/code_sandbox/drivers/can/can_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,192
```unknown # MCP25XXFD CAN configuration options config CAN_MCP251XFD bool "MCP25XXFD CAN Driver" default y depends on DT_HAS_MICROCHIP_MCP251XFD_ENABLED select CRC select SPI help Enable MCP25XXFD CAN Driver if CAN_MCP251XFD config CAN_MCP251XFD_MAX_TX_QUEUE int "Maximum number of queued messages" default 8 range 1 32 help Defines the array size of transmit callback pointers and semaphores, as well as the number of messages in the TX queue. config CAN_MCP251XFD_RX_FIFO_ITEMS int "Number of CAN messages in the RX fifo" default 16 range 1 32 help Defines the number of CAN messages in the RX fifo. config CAN_MCP251XFD_INT_THREAD_STACK_SIZE int "Stack size for interrupt handler" default 768 help Size of the stack used for internal thread which is ran for interrupt handling and incoming packets. config CAN_MCP251XFD_INT_THREAD_PRIO int "Priority for interrupt handler" default 2 help Thread priority of the interrupt handler. A higher number implies a higher priority. The thread is cooperative and will not be interrupted by another thread until execution is released. config CAN_MCP251XFD_READ_CRC_RETRIES int "Number of retries during SFR register read" default 5 help Number of retries during SFR register read if CRC fails. config CAN_MAX_FILTER int "Maximum number of concurrent active filters" default 5 range 1 32 help Maximum number of filters supported by the can_add_rx_callback() API call. endif # CAN_MCP251XFD ```
/content/code_sandbox/drivers/can/Kconfig.mcp251xfd
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
377
```c /* * */ #define DT_DRV_COMPAT kvaser_pcican #include <zephyr/drivers/can/can_sja1000.h> #include <zephyr/drivers/can.h> #include <zephyr/drivers/pcie/pcie.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> LOG_MODULE_REGISTER(can_kvaser_pci, CONFIG_CAN_LOG_LEVEL); /* AMCC S5920 I/O BAR registers */ #define S5920_INTCSR_REG 0x38 #define S5920_INTCSR_ADDINT_EN BIT(13) #define S5920_PTCR_REG 0x60 /* Xilinx I/O BAR registers */ #define XLNX_VERINT_REG 0x07 #define XLNX_VERINT_VERSION_POS 4U struct can_kvaser_pci_config { void (*irq_config_func)(const struct device *dev); struct pcie_dev *pcie; }; struct can_kvaser_pci_data { io_port_t sja1000_base; }; static uint8_t can_kvaser_pci_read_reg(const struct device *dev, uint8_t reg) { struct can_sja1000_data *sja1000_data = dev->data; struct can_kvaser_pci_data *kvaser_data = sja1000_data->custom; io_port_t addr = kvaser_data->sja1000_base + reg; return sys_in8(addr); } static void can_kvaser_pci_write_reg(const struct device *dev, uint8_t reg, uint8_t val) { struct can_sja1000_data *sja1000_data = dev->data; struct can_kvaser_pci_data *kvaser_data = sja1000_data->custom; io_port_t addr = kvaser_data->sja1000_base + reg; sys_out8(val, addr); } static int can_kvaser_pci_get_core_clock(const struct device *dev, uint32_t *rate) { ARG_UNUSED(dev); /* The internal clock operates at half of the oscillator frequency */ *rate = MHZ(16) / 2; return 0; } static int can_kvaser_pci_init(const struct device *dev) { const struct can_sja1000_config *sja1000_config = dev->config; const struct can_kvaser_pci_config *kvaser_config = sja1000_config->custom; struct can_sja1000_data *sja1000_data = dev->data; struct can_kvaser_pci_data *kvaser_data = sja1000_data->custom; struct pcie_bar iobar; static io_port_t amcc_base; static io_port_t xlnx_base; uint32_t intcsr; int err; if (kvaser_config->pcie->bdf == PCIE_BDF_NONE) { LOG_ERR("failed to find PCIe device"); return -ENODEV; } pcie_set_cmd(kvaser_config->pcie->bdf, PCIE_CONF_CMDSTAT_IO, true); /* AMCC S5920 registers */ if (!pcie_probe_iobar(kvaser_config->pcie->bdf, 0, &iobar)) { LOG_ERR("failed to probe AMCC S5920 I/O BAR"); return -ENODEV; } amcc_base = iobar.phys_addr; /* SJA1000 registers */ if (!pcie_probe_iobar(kvaser_config->pcie->bdf, 1, &iobar)) { LOG_ERR("failed to probe SJA1000 I/O BAR"); return -ENODEV; } kvaser_data->sja1000_base = iobar.phys_addr; /* Xilinx registers */ if (!pcie_probe_iobar(kvaser_config->pcie->bdf, 2, &iobar)) { LOG_ERR("failed to probe Xilinx I/O BAR"); return -ENODEV; } xlnx_base = iobar.phys_addr; LOG_DBG("Xilinx version: %d", sys_in8(xlnx_base + XLNX_VERINT_REG) >> XLNX_VERINT_VERSION_POS); /* * Initialization sequence as per Kvaser PCIcan Hardware Reference Manual (UG 98048 * v3.0.0). */ /* AMCC S5920 PCI Pass-Thru Configuration Register (PTCR) */ sys_out32(0x80808080UL, amcc_base + S5920_PTCR_REG); /* AMCC S5920 PCI Interrupt Control/Status Register (INTCSR) */ intcsr = sys_in32(amcc_base + S5920_INTCSR_REG); intcsr |= S5920_INTCSR_ADDINT_EN; sys_out32(intcsr, amcc_base + S5920_INTCSR_REG); err = can_sja1000_init(dev); if (err != 0) { LOG_ERR("failed to initialize controller (err %d)", err); return err; } kvaser_config->irq_config_func(dev); return 0; } const struct can_driver_api can_kvaser_pci_driver_api = { .get_capabilities = can_sja1000_get_capabilities, .start = can_sja1000_start, .stop = can_sja1000_stop, .set_mode = can_sja1000_set_mode, .set_timing = can_sja1000_set_timing, .send = can_sja1000_send, .add_rx_filter = can_sja1000_add_rx_filter, .remove_rx_filter = can_sja1000_remove_rx_filter, .get_state = can_sja1000_get_state, .set_state_change_callback = can_sja1000_set_state_change_callback, .get_core_clock = can_kvaser_pci_get_core_clock, .get_max_filters = can_sja1000_get_max_filters, #ifdef CONFIG_CAN_MANUAL_RECOVERY_MODE .recover = can_sja1000_recover, #endif /* CONFIG_CAN_MANUAL_RECOVERY_MODE */ .timing_min = CAN_SJA1000_TIMING_MIN_INITIALIZER, .timing_max = CAN_SJA1000_TIMING_MAX_INITIALIZER, }; #define CAN_KVASER_PCI_OCR \ (CAN_SJA1000_OCR_OCMODE_NORMAL | CAN_SJA1000_OCR_OCTN0 | CAN_SJA1000_OCR_OCTP0 | \ CAN_SJA1000_OCR_OCTN1 | CAN_SJA1000_OCR_OCTP1) #define CAN_KVASER_PCI_CDR (CAN_SJA1000_CDR_CD_DIV2 | CAN_SJA1000_CDR_CLOCK_OFF) #define CAN_KVASER_PCI_INIT(inst) \ static void can_kvaser_pci_config_func_##inst(const struct device *dev); \ DEVICE_PCIE_INST_DECLARE(inst); \ \ static const struct can_kvaser_pci_config can_kvaser_pci_config_##inst = { \ DEVICE_PCIE_INST_INIT(inst, pcie), \ .irq_config_func = can_kvaser_pci_config_func_##inst \ }; \ \ static const struct can_sja1000_config can_sja1000_config_##inst = \ CAN_SJA1000_DT_CONFIG_INST_GET(inst, &can_kvaser_pci_config_##inst, \ can_kvaser_pci_read_reg, can_kvaser_pci_write_reg, \ CAN_KVASER_PCI_OCR, CAN_KVASER_PCI_CDR, 0); \ \ static struct can_kvaser_pci_data can_kvaser_pci_data_##inst; \ \ static struct can_sja1000_data can_sja1000_data_##inst = \ CAN_SJA1000_DATA_INITIALIZER(&can_kvaser_pci_data_##inst); \ \ CAN_DEVICE_DT_INST_DEFINE(inst, can_kvaser_pci_init, NULL, &can_sja1000_data_##inst, \ &can_sja1000_config_##inst, POST_KERNEL, \ CONFIG_CAN_INIT_PRIORITY, &can_kvaser_pci_driver_api); \ \ static void can_kvaser_pci_config_func_##inst(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), can_sja1000_isr, \ DEVICE_DT_INST_GET(inst), DT_INST_IRQ(inst, sense)); \ irq_enable(DT_INST_IRQN(inst)); \ } DT_INST_FOREACH_STATUS_OKAY(CAN_KVASER_PCI_INIT) ```
/content/code_sandbox/drivers/can/can_kvaser_pci.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,821
```c /* * */ #define DT_DRV_COMPAT microchip_mcp251xfd #include "can_mcp251xfd.h" #include <zephyr/device.h> #include <zephyr/drivers/can/transceiver.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include <zephyr/sys/crc.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(can_mcp251xfd, CONFIG_CAN_LOG_LEVEL); static void mcp251xfd_canframe_to_txobj(const struct can_frame *src, int mailbox_idx, struct mcp251xfd_txobj *dst) { memset(dst, 0, sizeof(*dst)); if ((src->flags & CAN_FRAME_IDE) != 0) { dst->id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, src->id >> 18); dst->id |= FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, src->id); dst->flags |= MCP251XFD_OBJ_FLAGS_IDE; } else { dst->id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, src->id); } if ((src->flags & CAN_FRAME_BRS) != 0) { dst->flags |= MCP251XFD_OBJ_FLAGS_BRS; } dst->flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, src->dlc); #if defined(CONFIG_CAN_FD_MODE) if ((src->flags & CAN_FRAME_FDF) != 0) { dst->flags |= MCP251XFD_OBJ_FLAGS_FDF; } #endif dst->flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MASK, mailbox_idx); dst->id = sys_cpu_to_le32(dst->id); dst->flags = sys_cpu_to_le32(dst->flags); if ((src->flags & CAN_FRAME_RTR) != 0) { dst->flags |= MCP251XFD_OBJ_FLAGS_RTR; } else { memcpy(dst->data, src->data, MIN(can_dlc_to_bytes(src->dlc), CAN_MAX_DLEN)); } } static void *mcp251xfd_read_reg(const struct device *dev, uint16_t addr, int len) { const struct mcp251xfd_config *dev_cfg = dev->config; struct mcp251xfd_data *dev_data = dev->data; struct mcp251xfd_spi_data *spi_data = &dev_data->spi_data; uint16_t spi_cmd; int ret; spi_cmd = sys_cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ | addr); memcpy(&spi_data->header[1], &spi_cmd, sizeof(spi_cmd)); struct spi_buf tx_buf = {.buf = &spi_data->header[1], .len = MCP251XFD_SPI_CMD_LEN + len}; struct spi_buf rx_buf = {.buf = &spi_data->header[1], .len = MCP251XFD_SPI_CMD_LEN + len}; const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1}; const struct spi_buf_set rx = {.buffers = &rx_buf, .count = 1}; ret = spi_transceive_dt(&dev_cfg->bus, &tx, &rx); if (ret < 0) { return NULL; } return &spi_data->buf[0]; } static void *mcp251xfd_read_crc(const struct device *dev, uint16_t addr, int len) { const struct mcp251xfd_config *dev_cfg = dev->config; struct mcp251xfd_data *dev_data = dev->data; struct mcp251xfd_spi_data *spi_data = &dev_data->spi_data; int num_retries = CONFIG_CAN_MCP251XFD_READ_CRC_RETRIES + 1; int ret; while (num_retries-- > 0) { uint16_t crc_in, crc, spi_cmd; struct spi_buf tx_buf = {.buf = &spi_data->header[0], .len = MCP251XFD_SPI_CMD_LEN + MCP251XFD_SPI_LEN_FIELD_LEN + len + MCP251XFD_SPI_CRC_LEN}; struct spi_buf rx_buf = {.buf = &spi_data->header[0], .len = MCP251XFD_SPI_CMD_LEN + MCP251XFD_SPI_LEN_FIELD_LEN + len + MCP251XFD_SPI_CRC_LEN}; const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1}; const struct spi_buf_set rx = {.buffers = &rx_buf, .count = 1}; spi_cmd = sys_cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ_CRC | addr); memcpy(&spi_data->header[0], &spi_cmd, sizeof(spi_cmd)); spi_data->header[2] = len; /* * Evaluate initial crc over spi_cmd and length as these value will change after * spi transaction is finished. */ crc_in = crc16(MCP251XFD_CRC_POLY, MCP251XFD_CRC_SEED, (uint8_t *)(&spi_data->header[0]), MCP251XFD_SPI_CMD_LEN + MCP251XFD_SPI_LEN_FIELD_LEN); ret = spi_transceive_dt(&dev_cfg->bus, &tx, &rx); if (ret < 0) { continue; } /* Continue crc calculation over the data field and the crc field */ crc = crc16(MCP251XFD_CRC_POLY, crc_in, &spi_data->buf[0], len + MCP251XFD_SPI_CRC_LEN); if (crc == 0) { return &spi_data->buf[0]; } } return NULL; } static inline void *mcp251xfd_get_spi_buf_ptr(const struct device *dev) { struct mcp251xfd_data *dev_data = dev->data; struct mcp251xfd_spi_data *spi_data = &dev_data->spi_data; return &spi_data->buf[0]; } static int mcp251xfd_write(const struct device *dev, uint16_t addr, int len) { const struct mcp251xfd_config *dev_cfg = dev->config; struct mcp251xfd_data *dev_data = dev->data; struct mcp251xfd_spi_data *spi_data = &dev_data->spi_data; uint16_t spi_cmd; struct spi_buf tx_buf = {.buf = &spi_data->header[1], .len = MCP251XFD_SPI_CMD_LEN + len}; const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1}; spi_cmd = sys_cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE | addr); memcpy(&spi_data->header[1], &spi_cmd, sizeof(spi_cmd)); return spi_write_dt(&dev_cfg->bus, &tx); } static int mcp251xfd_fifo_write(const struct device *dev, int mailbox_idx, const struct can_frame *msg) { uint32_t *regs; struct mcp251xfd_txobj *txobj; uint8_t *reg_byte; uint16_t address; int tx_len; int ret; /* read fifosta and ua at the same time */ regs = mcp251xfd_read_crc(dev, MCP251XFD_REG_TXQSTA, MCP251XFD_REG_SIZE * 2); if (!regs) { LOG_ERR("Failed to read 8 bytes from REG_TXQSTA"); return -EINVAL; } /* check if fifo is full */ if (!(regs[0] & MCP251XFD_REG_TXQSTA_TXQNIF)) { return -ENOMEM; } address = MCP251XFD_RAM_START_ADDR + regs[1]; txobj = mcp251xfd_get_spi_buf_ptr(dev); mcp251xfd_canframe_to_txobj(msg, mailbox_idx, txobj); tx_len = MCP251XFD_OBJ_HEADER_SIZE; if ((msg->flags & CAN_FRAME_RTR) == 0) { tx_len += ROUND_UP(can_dlc_to_bytes(msg->dlc), MCP251XFD_RAM_ALIGNMENT); } ret = mcp251xfd_write(dev, address, tx_len); if (ret < 0) { return ret; } reg_byte = mcp251xfd_get_spi_buf_ptr(dev); *reg_byte = MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_TXQCON_UINC | MCP251XFD_REG_TXQCON_TXREQ); return mcp251xfd_write(dev, MCP251XFD_REG_TXQCON + 1, 1); } static void mcp251xfd_rxobj_to_canframe(struct mcp251xfd_rxobj *src, struct can_frame *dst) { memset(dst, 0, sizeof(*dst)); src->id = sys_le32_to_cpu(src->id); src->flags = sys_le32_to_cpu(src->flags); if ((src->flags & MCP251XFD_OBJ_FLAGS_IDE) != 0) { dst->id = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, src->id); dst->id |= FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, src->id) << 18; dst->flags |= CAN_FRAME_IDE; } else { dst->id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, src->id); } if ((src->flags & MCP251XFD_OBJ_FLAGS_BRS) != 0) { dst->flags |= CAN_FRAME_BRS; } #if defined(CONFIG_CAN_FD_MODE) if ((src->flags & MCP251XFD_OBJ_FLAGS_FDF) != 0) { dst->flags |= CAN_FRAME_FDF; } #endif dst->dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, src->flags); #if defined(CONFIG_CAN_RX_TIMESTAMP) dst->timestamp = sys_le32_to_cpu(src->timestamp); #endif if ((src->flags & MCP251XFD_OBJ_FLAGS_RTR) != 0) { dst->flags |= CAN_FRAME_RTR; } else { memcpy(dst->data, src->data, MIN(can_dlc_to_bytes(dst->dlc), CAN_MAX_DLEN)); } } static int mcp251xfd_get_mode_internal(const struct device *dev, uint8_t *mode) { uint8_t *reg_byte; uint32_t mask = MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_CON_OPMOD_MASK); reg_byte = mcp251xfd_read_crc(dev, MCP251XFD_REG_CON_B2, 1); if (!reg_byte) { return -EINVAL; } *mode = FIELD_GET(mask, *reg_byte); return 0; } static int mcp251xfd_reg_check_value_wtimeout(const struct device *dev, uint16_t addr, uint32_t value, uint32_t mask, uint32_t timeout_usec, int retries, bool allow_yield) { uint32_t *reg; uint32_t delay = timeout_usec / retries; for (;;) { reg = mcp251xfd_read_crc(dev, addr, MCP251XFD_REG_SIZE); if (!reg) { return -EINVAL; } *reg = sys_le32_to_cpu(*reg); if ((*reg & mask) == value) { return 0; } if (--retries < 0) { LOG_ERR("Timeout validing 0x%x", addr); return -EIO; } if (allow_yield) { k_sleep(K_USEC(delay)); } else { k_busy_wait(delay); } } return 0; } static int mcp251xfd_set_tdc(const struct device *dev, bool is_enabled) { uint32_t *reg; uint32_t tmp; struct mcp251xfd_data *dev_data = dev->data; reg = mcp251xfd_get_spi_buf_ptr(dev); if (is_enabled) { tmp = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, MCP251XFD_REG_TDC_TDCMOD_AUTO); tmp |= FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, dev_data->tdco); } else { tmp = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, MCP251XFD_REG_TDC_TDCMOD_DISABLED); } *reg = sys_cpu_to_le32(tmp); return mcp251xfd_write(dev, MCP251XFD_REG_TDC, MCP251XFD_REG_SIZE); } static int mcp251xfd_set_mode_internal(const struct device *dev, uint8_t requested_mode) { struct mcp251xfd_data *dev_data = dev->data; uint32_t *reg; uint32_t opmod, reg_con; int ret = 0; k_mutex_lock(&dev_data->mutex, K_FOREVER); reg = mcp251xfd_read_crc(dev, MCP251XFD_REG_CON, MCP251XFD_REG_SIZE); if (!reg) { ret = -EINVAL; goto done; } reg_con = sys_le32_to_cpu(*reg); opmod = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, reg_con); if (opmod == requested_mode) { goto done; } #if defined(CONFIG_CAN_FD_MODE) if (dev_data->current_mcp251xfd_mode == MCP251XFD_REG_CON_MODE_CONFIG) { if (requested_mode == MCP251XFD_REG_CON_MODE_CAN2_0 || requested_mode == MCP251XFD_REG_CON_MODE_EXT_LOOPBACK || requested_mode == MCP251XFD_REG_CON_MODE_INT_LOOPBACK) { ret = mcp251xfd_set_tdc(dev, false); } else if (requested_mode == MCP251XFD_REG_CON_MODE_MIXED) { ret = mcp251xfd_set_tdc(dev, true); } if (ret < 0) { goto done; } } #endif reg_con &= ~MCP251XFD_REG_CON_REQOP_MASK; reg_con |= FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, requested_mode); *reg = sys_cpu_to_le32(reg_con); ret = mcp251xfd_write(dev, MCP251XFD_REG_CON, MCP251XFD_REG_SIZE); if (ret < 0) { LOG_ERR("Failed to write REG_CON register [%d]", MCP251XFD_REG_CON); goto done; } ret = mcp251xfd_reg_check_value_wtimeout( dev, MCP251XFD_REG_CON, FIELD_PREP(MCP251XFD_REG_CON_OPMOD_MASK, requested_mode), MCP251XFD_REG_CON_OPMOD_MASK, MCP251XFD_MODE_CHANGE_TIMEOUT_USEC, MCP251XFD_MODE_CHANGE_RETRIES, true); done: k_mutex_unlock(&dev_data->mutex); return ret; } static int mcp251xfd_set_mode(const struct device *dev, can_mode_t mode) { struct mcp251xfd_data *dev_data = dev->data; if (dev_data->common.started) { return -EBUSY; } /* todo: Add CAN_MODE_ONE_SHOT support */ if ((mode & (CAN_MODE_3_SAMPLES | CAN_MODE_ONE_SHOT)) != 0) { return -ENOTSUP; } if (mode == CAN_MODE_NORMAL) { dev_data->next_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_CAN2_0; } if ((mode & CAN_MODE_FD) != 0) { #if defined(CONFIG_CAN_FD_MODE) dev_data->next_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_MIXED; #else return -ENOTSUP; #endif } if ((mode & CAN_MODE_LISTENONLY) != 0) { dev_data->next_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_LISTENONLY; } if ((mode & CAN_MODE_LOOPBACK) != 0) { dev_data->next_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_EXT_LOOPBACK; } dev_data->common.mode = mode; return 0; } static int mcp251xfd_set_timing(const struct device *dev, const struct can_timing *timing) { struct mcp251xfd_data *dev_data = dev->data; uint32_t *reg; uint32_t tmp; int ret; if (!timing) { return -EINVAL; } if (dev_data->common.started) { return -EBUSY; } k_mutex_lock(&dev_data->mutex, K_FOREVER); reg = mcp251xfd_get_spi_buf_ptr(dev); tmp = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, timing->prescaler - 1); tmp |= FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK, timing->prop_seg + timing->phase_seg1 - 1); tmp |= FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK, timing->phase_seg2 - 1); tmp |= FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, timing->sjw - 1); *reg = tmp; ret = mcp251xfd_write(dev, MCP251XFD_REG_NBTCFG, MCP251XFD_REG_SIZE); if (ret < 0) { LOG_ERR("Failed to write NBTCFG register [%d]", ret); } k_mutex_unlock(&dev_data->mutex); return ret; } #if defined(CONFIG_CAN_FD_MODE) static int mcp251xfd_set_timing_data(const struct device *dev, const struct can_timing *timing) { struct mcp251xfd_data *dev_data = dev->data; uint32_t *reg; uint32_t tmp; int ret; if (!timing) { return -EINVAL; } if (dev_data->common.started) { return -EBUSY; } k_mutex_lock(&dev_data->mutex, K_FOREVER); reg = mcp251xfd_get_spi_buf_ptr(dev); tmp = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, timing->prescaler - 1); tmp |= FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK, timing->prop_seg + timing->phase_seg1 - 1); tmp |= FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK, timing->phase_seg2 - 1); tmp |= FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, timing->sjw - 1); *reg = sys_cpu_to_le32(tmp); /* actual TDCO minimum is -64 but driver implementation only sets >= 0 values */ dev_data->tdco = CAN_CALC_TDCO(timing, 0U, MCP251XFD_REG_TDC_TDCO_MAX); ret = mcp251xfd_write(dev, MCP251XFD_REG_DBTCFG, MCP251XFD_REG_SIZE); if (ret < 0) { LOG_ERR("Failed to write DBTCFG register [%d]", ret); } k_mutex_unlock(&dev_data->mutex); return ret; } #endif static int mcp251xfd_send(const struct device *dev, const struct can_frame *msg, k_timeout_t timeout, can_tx_callback_t callback, void *callback_arg) { struct mcp251xfd_data *dev_data = dev->data; uint8_t mailbox_idx; int ret = 0; LOG_DBG("Sending %d bytes. Id: 0x%x, ID type: %s %s %s %s", can_dlc_to_bytes(msg->dlc), msg->id, msg->flags & CAN_FRAME_IDE ? "extended" : "standard", msg->flags & CAN_FRAME_RTR ? "RTR" : "", msg->flags & CAN_FRAME_FDF ? "FD frame" : "", msg->flags & CAN_FRAME_BRS ? "BRS" : ""); if (!dev_data->common.started) { return -ENETDOWN; } if (dev_data->state == CAN_STATE_BUS_OFF) { return -ENETUNREACH; } if ((msg->flags & CAN_FRAME_FDF) == 0 && msg->dlc > CAN_MAX_DLC) { LOG_ERR("DLC of %d without fd flag set.", msg->dlc); return -EINVAL; } if ((msg->flags & CAN_FRAME_FDF) && !(dev_data->common.mode & CAN_MODE_FD)) { return -ENOTSUP; } if (k_sem_take(&dev_data->tx_sem, timeout) != 0) { return -EAGAIN; } k_mutex_lock(&dev_data->mutex, K_FOREVER); for (mailbox_idx = 0; mailbox_idx < MCP251XFD_TX_QUEUE_ITEMS; mailbox_idx++) { if ((BIT(mailbox_idx) & dev_data->mailbox_usage) == 0) { dev_data->mailbox_usage |= BIT(mailbox_idx); break; } } if (mailbox_idx >= MCP251XFD_TX_QUEUE_ITEMS) { k_sem_give(&dev_data->tx_sem); ret = -EIO; goto done; } dev_data->mailbox[mailbox_idx].cb = callback; dev_data->mailbox[mailbox_idx].cb_arg = callback_arg; ret = mcp251xfd_fifo_write(dev, mailbox_idx, msg); if (ret < 0) { dev_data->mailbox_usage &= ~BIT(mailbox_idx); dev_data->mailbox[mailbox_idx].cb = NULL; k_sem_give(&dev_data->tx_sem); } done: k_mutex_unlock(&dev_data->mutex); return ret; } static int mcp251xfd_add_rx_filter(const struct device *dev, can_rx_callback_t rx_cb, void *cb_arg, const struct can_filter *filter) { struct mcp251xfd_data *dev_data = dev->data; uint32_t *reg; uint32_t tmp; uint8_t *reg_byte; int filter_idx; int ret; k_mutex_lock(&dev_data->mutex, K_FOREVER); for (filter_idx = 0; filter_idx < CONFIG_CAN_MAX_FILTER ; filter_idx++) { if ((BIT(filter_idx) & dev_data->filter_usage) == 0) { break; } } if (filter_idx >= CONFIG_CAN_MAX_FILTER) { filter_idx = -ENOSPC; goto done; } reg = mcp251xfd_get_spi_buf_ptr(dev); if ((filter->flags & CAN_FILTER_IDE) != 0) { tmp = FIELD_PREP(MCP251XFD_REG_FLTOBJ_SID_MASK, filter->id >> 18); tmp |= FIELD_PREP(MCP251XFD_REG_FLTOBJ_EID_MASK, filter->id); tmp |= MCP251XFD_REG_FLTOBJ_EXIDE; } else { tmp = FIELD_PREP(MCP251XFD_REG_FLTOBJ_SID_MASK, filter->id); } *reg = sys_cpu_to_le32(tmp); ret = mcp251xfd_write(dev, MCP251XFD_REG_FLTOBJ(filter_idx), MCP251XFD_REG_SIZE); if (ret < 0) { LOG_ERR("Failed to write FLTOBJ register [%d]", ret); goto done; } reg = mcp251xfd_get_spi_buf_ptr(dev); if ((filter->flags & CAN_FILTER_IDE) != 0) { tmp = FIELD_PREP(MCP251XFD_REG_MASK_MSID_MASK, filter->mask >> 18); tmp |= FIELD_PREP(MCP251XFD_REG_MASK_MEID_MASK, filter->mask); } else { tmp = FIELD_PREP(MCP251XFD_REG_MASK_MSID_MASK, filter->mask); } tmp |= MCP251XFD_REG_MASK_MIDE; *reg = sys_cpu_to_le32(tmp); ret = mcp251xfd_write(dev, MCP251XFD_REG_FLTMASK(filter_idx), MCP251XFD_REG_SIZE); if (ret < 0) { LOG_ERR("Failed to write FLTMASK register [%d]", ret); goto done; } reg_byte = mcp251xfd_get_spi_buf_ptr(dev); *reg_byte = MCP251XFD_REG_BYTE_FLTCON_FLTEN; *reg_byte |= FIELD_PREP(MCP251XFD_REG_BYTE_FLTCON_FBP_MASK, MCP251XFD_RX_FIFO_IDX); ret = mcp251xfd_write(dev, MCP251XFD_REG_BYTE_FLTCON(filter_idx), 1); if (ret < 0) { LOG_ERR("Failed to write FLTCON register [%d]", ret); goto done; } dev_data->filter_usage |= BIT(filter_idx); dev_data->filter[filter_idx] = *filter; dev_data->rx_cb[filter_idx] = rx_cb; dev_data->cb_arg[filter_idx] = cb_arg; done: k_mutex_unlock(&dev_data->mutex); return filter_idx; } static void mcp251xfd_remove_rx_filter(const struct device *dev, int filter_idx) { struct mcp251xfd_data *dev_data = dev->data; uint8_t *reg_byte; uint32_t *reg; int ret; if (filter_idx < 0 || filter_idx >= CONFIG_CAN_MAX_FILTER) { LOG_ERR("Filter ID %d out of bounds", filter_idx); return; } k_mutex_lock(&dev_data->mutex, K_FOREVER); reg_byte = mcp251xfd_get_spi_buf_ptr(dev); *reg_byte = 0; ret = mcp251xfd_write(dev, MCP251XFD_REG_BYTE_FLTCON(filter_idx), 1); if (ret < 0) { LOG_ERR("Failed to write FLTCON register [%d]", ret); goto done; } dev_data->filter_usage &= ~BIT(filter_idx); reg = mcp251xfd_get_spi_buf_ptr(dev); reg[0] = 0; ret = mcp251xfd_write(dev, MCP251XFD_REG_FLTCON(filter_idx), MCP251XFD_REG_SIZE); if (ret < 0) { LOG_ERR("Failed to write FLTCON register [%d]", ret); } done: k_mutex_unlock(&dev_data->mutex); } static void mcp251xfd_set_state_change_callback(const struct device *dev, can_state_change_callback_t cb, void *user_data) { struct mcp251xfd_data *dev_data = dev->data; dev_data->common.state_change_cb = cb; dev_data->common.state_change_cb_user_data = user_data; } static int mcp251xfd_get_state(const struct device *dev, enum can_state *state, struct can_bus_err_cnt *err_cnt) { struct mcp251xfd_data *dev_data = dev->data; uint32_t *reg; uint32_t tmp; int ret = 0; k_mutex_lock(&dev_data->mutex, K_FOREVER); reg = mcp251xfd_read_crc(dev, MCP251XFD_REG_TREC, MCP251XFD_REG_SIZE); if (!reg) { ret = -EINVAL; goto done; } tmp = sys_le32_to_cpu(*reg); if (err_cnt != NULL) { err_cnt->tx_err_cnt = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, tmp); err_cnt->rx_err_cnt = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, tmp); } if (state == NULL) { goto done; } if (!dev_data->common.started) { *state = CAN_STATE_STOPPED; goto done; } if ((tmp & MCP251XFD_REG_TREC_TXBO) != 0) { *state = CAN_STATE_BUS_OFF; } else if ((tmp & MCP251XFD_REG_TREC_TXBP) != 0) { *state = CAN_STATE_ERROR_PASSIVE; } else if ((tmp & MCP251XFD_REG_TREC_RXBP) != 0) { *state = CAN_STATE_ERROR_PASSIVE; } else if ((tmp & MCP251XFD_REG_TREC_TXWARN) != 0) { *state = CAN_STATE_ERROR_WARNING; } else if ((tmp & MCP251XFD_REG_TREC_RXWARN) != 0) { *state = CAN_STATE_ERROR_WARNING; } else { *state = CAN_STATE_ERROR_ACTIVE; } done: k_mutex_unlock(&dev_data->mutex); return 0; } static int mcp251xfd_get_core_clock(const struct device *dev, uint32_t *rate) { const struct mcp251xfd_config *dev_cfg = dev->config; *rate = dev_cfg->osc_freq; return 0; } static int mcp251xfd_get_max_filters(const struct device *dev, bool ide) { ARG_UNUSED(ide); return CONFIG_CAN_MAX_FILTER; } static int mcp251xfd_handle_fifo_read(const struct device *dev, const struct mcp251xfd_fifo *fifo, uint8_t fifo_type) { int ret = 0; struct mcp251xfd_data *dev_data = dev->data; uint32_t *regs, fifosta, ua; uint8_t *reg_byte; int len; int fetch_total = 0; int ui_inc = 0; uint32_t fifo_tail_index, fifo_tail_addr; uint8_t fifo_head_index; k_mutex_lock(&dev_data->mutex, K_FOREVER); /* read in FIFOSTA and FIFOUA at the same time */ regs = mcp251xfd_read_crc(dev, MCP251XFD_REG_FIFOCON_TO_STA(fifo->reg_fifocon_addr), 2 * MCP251XFD_REG_SIZE); if (!regs) { ret = -EINVAL; goto done; } fifosta = sys_le32_to_cpu(regs[0]); ua = sys_le32_to_cpu(regs[1]); /* is there any data in the fifo? */ if (!(fifosta & MCP251XFD_REG_FIFOSTA_TFNRFNIF)) { goto done; } fifo_tail_addr = ua; fifo_tail_index = (fifo_tail_addr - fifo->ram_start_addr) / fifo->item_size; if (fifo_type == MCP251XFD_FIFO_TYPE_RX) { /* * fifo_head_index points where the next message will be written. * It points to one past the end of the fifo. */ fifo_head_index = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifosta); if (fifo_head_index == 0) { fifo_head_index = fifo->capacity - 1; } else { fifo_head_index -= 1; } if (fifo_tail_index > fifo_head_index) { /* fetch to the end of the memory and then wrap to the start */ fetch_total = fifo->capacity - 1 - fifo_tail_index + 1; fetch_total += fifo_head_index + 1; } else { fetch_total = fifo_head_index - fifo_tail_index + 1; } } else if (fifo_type == MCP251XFD_FIFO_TYPE_TEF) { /* FIFOCI doesn't exist for TEF queues, so fetch one message at a time */ fifo_head_index = fifo_tail_index; fetch_total = 1; } else { ret = -EINVAL; goto done; } while (fetch_total > 0) { uint16_t memory_addr; uint8_t *data; if (fifo_tail_index > fifo_head_index) { len = fifo->capacity - 1 - fifo_tail_index + 1; } else { len = fifo_head_index - fifo_tail_index + 1; } memory_addr = MCP251XFD_RAM_START_ADDR + fifo->ram_start_addr + fifo_tail_index * fifo->item_size; data = mcp251xfd_read_reg(dev, memory_addr, len * fifo->item_size); if (!data) { LOG_ERR("Error fetching batch message"); ret = -EINVAL; goto done; } for (int i = 0; i < len; i++) { fifo->msg_handler(dev, (void *)(&data[i * fifo->item_size])); } fifo_tail_index = (fifo_tail_index + len) % fifo->capacity; fetch_total -= len; ui_inc += len; } reg_byte = mcp251xfd_get_spi_buf_ptr(dev); *reg_byte = MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_FIFOCON_UINC); for (int i = 0; i < ui_inc; i++) { ret = mcp251xfd_write(dev, fifo->reg_fifocon_addr + 1, 1); if (ret < 0) { LOG_ERR("Failed to increment pointer"); goto done; } } done: k_mutex_unlock(&dev_data->mutex); return ret; } static void mcp251xfd_reset_tx_fifos(const struct device *dev, int status) { struct mcp251xfd_data *dev_data = dev->data; LOG_INF("All FIFOs Reset"); k_mutex_lock(&dev_data->mutex, K_FOREVER); for (int i = 0; i < MCP251XFD_TX_QUEUE_ITEMS; i++) { can_tx_callback_t callback; if (!(dev_data->mailbox_usage & BIT(i))) { continue; } callback = dev_data->mailbox[i].cb; if (callback) { callback(dev, status, dev_data->mailbox[i].cb_arg); } dev_data->mailbox_usage &= ~BIT(i); dev_data->mailbox[i].cb = NULL; k_sem_give(&dev_data->tx_sem); } k_mutex_unlock(&dev_data->mutex); } /* * CERRIF will be set each time a threshold in the TEC/REC counter is crossed by the following * conditions: * TEC or REC exceeds the Error Warning state threshold * The transmitter or receiver transitions to Error Passive state * The transmitter transitions to Bus Off state * The transmitter or receiver transitions from Error Passive to Error Active state * The module transitions from Bus Off to Error Active state, after the bus off recovery * sequence * When the user clears CERRIF, it will remain clear until a new counter crossing occurs. */ static int mcp251xfd_handle_cerrif(const struct device *dev) { enum can_state new_state; struct mcp251xfd_data *dev_data = dev->data; struct can_bus_err_cnt err_cnt; int ret; k_mutex_lock(&dev_data->mutex, K_FOREVER); ret = mcp251xfd_get_state(dev, &new_state, &err_cnt); if (ret < 0) { goto done; } if (new_state == dev_data->state) { goto done; } LOG_INF("State %d -> %d (tx: %d, rx: %d)", dev_data->state, new_state, err_cnt.tx_err_cnt, err_cnt.rx_err_cnt); /* Upon entering bus-off, all the fifos are reset. */ dev_data->state = new_state; if (new_state == CAN_STATE_BUS_OFF) { mcp251xfd_reset_tx_fifos(dev, -ENETDOWN); } if (dev_data->common.state_change_cb) { dev_data->common.state_change_cb(dev, new_state, err_cnt, dev_data->common.state_change_cb_user_data); } done: k_mutex_unlock(&dev_data->mutex); return ret; } static int mcp251xfd_handle_modif(const struct device *dev) { struct mcp251xfd_data *dev_data = dev->data; uint8_t mode; int ret; k_mutex_lock(&dev_data->mutex, K_FOREVER); ret = mcp251xfd_get_mode_internal(dev, &mode); if (ret < 0) { goto finish; } dev_data->current_mcp251xfd_mode = mode; LOG_INF("Switched to mode %d", mode); if (mode == dev_data->next_mcp251xfd_mode) { ret = 0; goto finish; } /* try to transition back into our target mode */ if (dev_data->common.started) { LOG_INF("Switching back into mode %d", dev_data->next_mcp251xfd_mode); ret = mcp251xfd_set_mode_internal(dev, dev_data->next_mcp251xfd_mode); } finish: k_mutex_unlock(&dev_data->mutex); return ret; } static int mcp251xfd_handle_ivmif(const struct device *dev) { uint32_t *reg; struct mcp251xfd_data *dev_data = dev->data; int ret; uint32_t tmp; k_mutex_lock(&dev_data->mutex, K_FOREVER); reg = mcp251xfd_read_crc(dev, MCP251XFD_REG_BDIAG1, MCP251XFD_REG_SIZE); if (!reg) { ret = -EINVAL; goto done; } tmp = sys_le32_to_cpu(*reg); if ((tmp & MCP251XFD_REG_BDIAG1_TXBOERR) != 0) { LOG_INF("ivmif bus-off error"); mcp251xfd_reset_tx_fifos(dev, -ENETDOWN); } /* Clear the values in diag */ reg = mcp251xfd_get_spi_buf_ptr(dev); reg[0] = 0; ret = mcp251xfd_write(dev, MCP251XFD_REG_BDIAG1, MCP251XFD_REG_SIZE); if (ret < 0) { goto done; } /* There's no flag for DACKERR */ if ((tmp & MCP251XFD_REG_BDIAG1_NACKERR) != 0) { CAN_STATS_ACK_ERROR_INC(dev); } if ((tmp & (MCP251XFD_REG_BDIAG1_NBIT0ERR | MCP251XFD_REG_BDIAG1_DBIT0ERR)) != 0) { CAN_STATS_BIT0_ERROR_INC(dev); } if ((tmp & (MCP251XFD_REG_BDIAG1_NBIT1ERR | MCP251XFD_REG_BDIAG1_DBIT1ERR)) != 0) { CAN_STATS_BIT1_ERROR_INC(dev); } if ((tmp & (MCP251XFD_REG_BDIAG1_NCRCERR | MCP251XFD_REG_BDIAG1_DCRCERR)) != 0) { CAN_STATS_CRC_ERROR_INC(dev); } if ((tmp & (MCP251XFD_REG_BDIAG1_NFORMERR | MCP251XFD_REG_BDIAG1_DFORMERR)) != 0) { CAN_STATS_FORM_ERROR_INC(dev); } if ((tmp & (MCP251XFD_REG_BDIAG1_NSTUFERR | MCP251XFD_REG_BDIAG1_DSTUFERR)) != 0) { CAN_STATS_STUFF_ERROR_INC(dev); } done: k_mutex_unlock(&dev_data->mutex); return ret; } #if defined(CONFIG_CAN_STATS) static int mcp251xfd_handle_rxovif(const struct device *dev) { uint8_t *reg_byte; struct mcp251xfd_data *dev_data = dev->data; int ret; k_mutex_lock(&dev_data->mutex, K_FOREVER); reg_byte = mcp251xfd_get_spi_buf_ptr(dev); *reg_byte = 0; ret = mcp251xfd_write(dev, MCP251XFD_REG_FIFOSTA(MCP251XFD_RX_FIFO_IDX), 1); if (ret < 0) { goto done; } CAN_STATS_RX_OVERRUN_INC(dev); done: k_mutex_unlock(&dev_data->mutex); return ret; } #endif static void mcp251xfd_handle_interrupts(const struct device *dev) { const struct mcp251xfd_config *dev_cfg = dev->config; struct mcp251xfd_data *dev_data = dev->data; uint16_t *reg_int_hw; uint32_t reg_int; int ret; uint8_t consecutive_calls = 0; while (1) { k_mutex_lock(&dev_data->mutex, K_FOREVER); reg_int_hw = mcp251xfd_read_crc(dev, MCP251XFD_REG_INT, sizeof(*reg_int_hw)); if (!reg_int_hw) { k_mutex_unlock(&dev_data->mutex); continue; } *reg_int_hw = sys_le16_to_cpu(*reg_int_hw); reg_int = *reg_int_hw; /* these interrupt flags need to be explicitly cleared */ if (reg_int & MCP251XFD_REG_INT_IF_CLEARABLE_MASK) { *reg_int_hw &= ~MCP251XFD_REG_INT_IF_CLEARABLE_MASK; *reg_int_hw = sys_cpu_to_le16(*reg_int_hw); ret = mcp251xfd_write(dev, MCP251XFD_REG_INT, sizeof(*reg_int_hw)); if (ret) { LOG_ERR("Error clearing REG_INT interrupts [%d]", ret); } } k_mutex_unlock(&dev_data->mutex); if ((reg_int & MCP251XFD_REG_INT_RXIF) != 0) { ret = mcp251xfd_handle_fifo_read(dev, &dev_cfg->rx_fifo, MCP251XFD_FIFO_TYPE_RX); if (ret < 0) { LOG_ERR("Error handling RXIF [%d]", ret); } } if ((reg_int & MCP251XFD_REG_INT_TEFIF) != 0) { ret = mcp251xfd_handle_fifo_read(dev, &dev_cfg->tef_fifo, MCP251XFD_FIFO_TYPE_TEF); if (ret < 0) { LOG_ERR("Error handling TEFIF [%d]", ret); } } if ((reg_int & MCP251XFD_REG_INT_IVMIF) != 0) { ret = mcp251xfd_handle_ivmif(dev); if (ret < 0) { LOG_ERR("Error handling IVMIF [%d]", ret); } } if ((reg_int & MCP251XFD_REG_INT_MODIF) != 0) { ret = mcp251xfd_handle_modif(dev); if (ret < 0) { LOG_ERR("Error handling MODIF [%d]", ret); } } /* * From Linux mcp251xfd driver * On the MCP2527FD and MCP2518FD, we don't get a CERRIF IRQ on the transition * TX ERROR_WARNING -> TX ERROR_ACTIVE. */ if ((reg_int & MCP251XFD_REG_INT_CERRIF) || dev_data->state > CAN_STATE_ERROR_ACTIVE) { ret = mcp251xfd_handle_cerrif(dev); if (ret < 0) { LOG_ERR("Error handling CERRIF [%d]", ret); } } #if defined(CONFIG_CAN_STATS) if ((reg_int & MCP251XFD_REG_INT_RXOVIF) != 0) { ret = mcp251xfd_handle_rxovif(dev); if (ret < 0) { LOG_ERR("Error handling RXOVIF [%d]", ret); } } #endif /* Break from loop if INT pin is inactive */ consecutive_calls++; ret = gpio_pin_get_dt(&dev_cfg->int_gpio_dt); if (ret < 0) { LOG_ERR("Couldn't read INT pin [%d]", ret); } else if (ret == 0) { /* All interrupt flags handled */ break; } else if (consecutive_calls % MCP251XFD_MAX_INT_HANDLER_CALLS == 0) { /* If there are clock problems, then MODIF cannot be cleared. */ /* This is detected if there are too many consecutive calls. */ /* Sleep this thread if this happens. */ k_sleep(K_USEC(MCP251XFD_INT_HANDLER_SLEEP_USEC)); } } } static void mcp251xfd_int_thread(const struct device *dev) { const struct mcp251xfd_config *dev_cfg = dev->config; struct mcp251xfd_data *dev_data = dev->data; while (1) { int ret; k_sem_take(&dev_data->int_sem, K_FOREVER); mcp251xfd_handle_interrupts(dev); /* Re-enable pin interrupts */ ret = gpio_pin_interrupt_configure_dt(&dev_cfg->int_gpio_dt, GPIO_INT_LEVEL_ACTIVE); if (ret < 0) { LOG_ERR("Couldn't enable pin interrupt [%d]", ret); k_oops(); } } } static void mcp251xfd_int_gpio_callback(const struct device *dev_gpio, struct gpio_callback *cb, uint32_t pins) { ARG_UNUSED(dev_gpio); struct mcp251xfd_data *dev_data = CONTAINER_OF(cb, struct mcp251xfd_data, int_gpio_cb); const struct device *dev = dev_data->dev; const struct mcp251xfd_config *dev_cfg = dev->config; int ret; /* Disable pin interrupts */ ret = gpio_pin_interrupt_configure_dt(&dev_cfg->int_gpio_dt, GPIO_INT_DISABLE); if (ret < 0) { LOG_ERR("Couldn't disable pin interrupt [%d]", ret); k_oops(); } k_sem_give(&dev_data->int_sem); } static int mcp251xfd_get_capabilities(const struct device *dev, can_mode_t *cap) { ARG_UNUSED(dev); *cap = CAN_MODE_NORMAL | CAN_MODE_LISTENONLY | CAN_MODE_LOOPBACK; #if defined(CONFIG_CAN_FD_MODE) *cap |= CAN_MODE_FD; #endif return 0; } static int mcp251xfd_start(const struct device *dev) { struct mcp251xfd_data *dev_data = dev->data; const struct mcp251xfd_config *dev_cfg = dev->config; int ret; if (dev_data->common.started) { return -EALREADY; } /* in case of a race between mcp251xfd_send() and mcp251xfd_stop() */ mcp251xfd_reset_tx_fifos(dev, -ENETDOWN); if (dev_cfg->common.phy != NULL) { ret = can_transceiver_enable(dev_cfg->common.phy, dev_data->common.mode); if (ret < 0) { LOG_ERR("Failed to enable CAN transceiver [%d]", ret); return ret; } } k_mutex_lock(&dev_data->mutex, K_FOREVER); CAN_STATS_RESET(dev); ret = mcp251xfd_set_mode_internal(dev, dev_data->next_mcp251xfd_mode); if (ret < 0) { LOG_ERR("Failed to set the mode [%d]", ret); if (dev_cfg->common.phy != NULL) { /* Attempt to disable the CAN transceiver in case of error */ (void)can_transceiver_disable(dev_cfg->common.phy); } } else { dev_data->common.started = true; } k_mutex_unlock(&dev_data->mutex); return ret; } static int mcp251xfd_stop(const struct device *dev) { struct mcp251xfd_data *dev_data = dev->data; const struct mcp251xfd_config *dev_cfg = dev->config; uint8_t *reg_byte; int ret; if (!dev_data->common.started) { return -EALREADY; } k_mutex_lock(&dev_data->mutex, K_FOREVER); /* abort all transmissions */ reg_byte = mcp251xfd_get_spi_buf_ptr(dev); *reg_byte = MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_CON_ABAT); ret = mcp251xfd_write(dev, MCP251XFD_REG_CON_B3, 1); if (ret < 0) { k_mutex_unlock(&dev_data->mutex); return ret; } /* wait for all the messages to be aborted */ while (1) { reg_byte = mcp251xfd_read_crc(dev, MCP251XFD_REG_CON_B3, 1); if (!reg_byte || (*reg_byte & MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_CON_ABAT)) == 0) { break; } } mcp251xfd_reset_tx_fifos(dev, -ENETDOWN); ret = mcp251xfd_set_mode_internal(dev, MCP251XFD_REG_CON_MODE_CONFIG); if (ret < 0) { k_mutex_unlock(&dev_data->mutex); return ret; } dev_data->common.started = false; k_mutex_unlock(&dev_data->mutex); if (dev_cfg->common.phy != NULL) { ret = can_transceiver_disable(dev_cfg->common.phy); if (ret < 0) { LOG_ERR("Failed to disable CAN transceiver [%d]", ret); return ret; } } return 0; } static void mcp251xfd_rx_fifo_handler(const struct device *dev, void *data) { struct can_frame dst; struct mcp251xfd_data *dev_data = dev->data; struct mcp251xfd_rxobj *rxobj = data; uint32_t filhit; mcp251xfd_rxobj_to_canframe(rxobj, &dst); #ifndef CONFIG_CAN_ACCEPT_RTR if ((dst.flags & CAN_FRAME_RTR) != 0U) { return; } #endif /* !CONFIG_CAN_ACCEPT_RTR */ filhit = FIELD_GET(MCP251XFD_OBJ_FILHIT_MASK, rxobj->flags); if ((dev_data->filter_usage & BIT(filhit)) != 0) { LOG_DBG("Received msg CAN id: 0x%x", dst.id); dev_data->rx_cb[filhit](dev, &dst, dev_data->cb_arg[filhit]); } } static void mcp251xfd_tef_fifo_handler(const struct device *dev, void *data) { struct mcp251xfd_data *dev_data = dev->data; can_tx_callback_t callback; struct mcp251xfd_tefobj *tefobj = data; uint8_t mailbox_idx; mailbox_idx = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MASK, tefobj->flags); if (mailbox_idx >= MCP251XFD_TX_QUEUE_ITEMS) { mcp251xfd_reset_tx_fifos(dev, -EIO); LOG_ERR("Invalid mailbox index"); return; } callback = dev_data->mailbox[mailbox_idx].cb; if (callback != NULL) { callback(dev, 0, dev_data->mailbox[mailbox_idx].cb_arg); } dev_data->mailbox_usage &= ~BIT(mailbox_idx); dev_data->mailbox[mailbox_idx].cb = NULL; k_sem_give(&dev_data->tx_sem); } static inline int mcp251xfd_init_con_reg(const struct device *dev) { uint32_t *reg; uint32_t tmp; reg = mcp251xfd_get_spi_buf_ptr(dev); tmp = MCP251XFD_REG_CON_ISOCRCEN | MCP251XFD_REG_CON_WAKFIL | MCP251XFD_REG_CON_TXQEN | MCP251XFD_REG_CON_STEF; tmp |= FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK, MCP251XFD_REG_CON_WFT_T11FILTER) | FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, MCP251XFD_REG_CON_MODE_CONFIG); *reg = tmp; return mcp251xfd_write(dev, MCP251XFD_REG_CON, MCP251XFD_REG_SIZE); } static inline int mcp251xfd_init_osc_reg(const struct device *dev) { int ret; const struct mcp251xfd_config *dev_cfg = dev->config; uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev); uint32_t reg_value = MCP251XFD_REG_OSC_OSCRDY; uint32_t tmp; tmp = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, dev_cfg->clko_div); if (dev_cfg->pll_enable) { tmp |= MCP251XFD_REG_OSC_PLLEN; reg_value |= MCP251XFD_REG_OSC_PLLRDY; } *reg = sys_cpu_to_le32(tmp); ret = mcp251xfd_write(dev, MCP251XFD_REG_OSC, MCP251XFD_REG_SIZE); if (ret < 0) { return ret; } return mcp251xfd_reg_check_value_wtimeout(dev, MCP251XFD_REG_OSC, reg_value, reg_value, MCP251XFD_PLLRDY_TIMEOUT_USEC, MCP251XFD_PLLRDY_RETRIES, false); } static inline int mcp251xfd_init_iocon_reg(const struct device *dev) { const struct mcp251xfd_config *dev_cfg = dev->config; uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev); uint32_t tmp; /* * MCP2518FD Errata: DS80000789 * Writing Byte 2/3 of the IOCON register using single SPI write cleat LAT0 and LAT1. * This has no effect in the current version since LAT0/1 are set to zero anyway. * However, it needs to be properly handled if other values are needed. Errata suggests * to do single byte writes instead. */ tmp = MCP251XFD_REG_IOCON_TRIS0 | MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_PM1; if (dev_cfg->sof_on_clko) { tmp |= MCP251XFD_REG_IOCON_SOF; } *reg = sys_cpu_to_le32(tmp); return mcp251xfd_write(dev, MCP251XFD_REG_IOCON, MCP251XFD_REG_SIZE); } static inline int mcp251xfd_init_int_reg(const struct device *dev) { uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev); uint32_t tmp; tmp = MCP251XFD_REG_INT_RXIE | MCP251XFD_REG_INT_MODIE | MCP251XFD_REG_INT_TEFIE | MCP251XFD_REG_INT_CERRIE; #if defined(CONFIG_CAN_STATS) tmp |= MCP251XFD_REG_INT_RXOVIE; #endif *reg = sys_cpu_to_le32(tmp); return mcp251xfd_write(dev, MCP251XFD_REG_INT, MCP251XFD_REG_SIZE); } static inline int mcp251xfd_init_tef_fifo(const struct device *dev) { uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev); uint32_t tmp; tmp = MCP251XFD_REG_TEFCON_TEFNEIE | MCP251XFD_REG_TEFCON_FRESET; tmp |= FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK, MCP251XFD_TX_QUEUE_ITEMS - 1); *reg = sys_cpu_to_le32(tmp); return mcp251xfd_write(dev, MCP251XFD_REG_TEFCON, MCP251XFD_REG_SIZE); } static inline int mcp251xfd_init_tx_queue(const struct device *dev) { uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev); uint32_t tmp; tmp = MCP251XFD_REG_TXQCON_TXEN | MCP251XFD_REG_TXQCON_FRESET; tmp |= FIELD_PREP(MCP251XFD_REG_TXQCON_TXAT_MASK, MCP251XFD_REG_TXQCON_TXAT_UNLIMITED); tmp |= FIELD_PREP(MCP251XFD_REG_TXQCON_FSIZE_MASK, MCP251XFD_TX_QUEUE_ITEMS - 1); tmp |= FIELD_PREP(MCP251XFD_REG_TXQCON_PLSIZE_MASK, can_bytes_to_dlc(MCP251XFD_PAYLOAD_SIZE) - 8); *reg = sys_cpu_to_le32(tmp); return mcp251xfd_write(dev, MCP251XFD_REG_TXQCON, MCP251XFD_REG_SIZE); } static inline int mcp251xfd_init_rx_fifo(const struct device *dev) { uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev); uint32_t tmp; tmp = MCP251XFD_REG_FIFOCON_TFNRFNIE | MCP251XFD_REG_FIFOCON_FRESET; #if defined(CONFIG_CAN_STATS) tmp |= MCP251XFD_REG_FIFOCON_RXOVIE; #endif tmp |= FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, MCP251XFD_RX_FIFO_ITEMS - 1); tmp |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, can_bytes_to_dlc(MCP251XFD_PAYLOAD_SIZE) - 8); #if defined(CONFIG_CAN_RX_TIMESTAMP) tmp |= MCP251XFD_REG_FIFOCON_RXTSEN; #endif *reg = sys_cpu_to_le32(tmp); return mcp251xfd_write(dev, MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO_IDX), MCP251XFD_REG_SIZE); } #if defined(CONFIG_CAN_RX_TIMESTAMP) static int mcp251xfd_init_tscon(const struct device *dev) { uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev); const struct mcp251xfd_config *dev_cfg = dev->config; uint32_t tmp; tmp = MCP251XFD_REG_TSCON_TBCEN; tmp |= FIELD_PREP(MCP251XFD_REG_TSCON_TBCPRE_MASK, dev_cfg->timestamp_prescaler - 1); *reg = sys_cpu_to_le32(tmp); return mcp251xfd_write(dev, MCP251XFD_REG_TSCON, MCP251XFD_REG_SIZE); } #endif static int mcp251xfd_reset(const struct device *dev) { const struct mcp251xfd_config *dev_cfg = dev->config; uint16_t cmd = sys_cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_RESET); const struct spi_buf tx_buf = {.buf = &cmd, .len = sizeof(cmd),}; const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1}; int ret; /* device can only be reset when in configuration mode */ ret = mcp251xfd_set_mode_internal(dev, MCP251XFD_REG_CON_MODE_CONFIG); if (ret < 0) { return ret; } return spi_write_dt(&dev_cfg->bus, &tx); } static int mcp251xfd_init(const struct device *dev) { const struct mcp251xfd_config *dev_cfg = dev->config; struct mcp251xfd_data *dev_data = dev->data; uint32_t *reg; uint8_t opmod; int ret; struct can_timing timing = { 0 }; #if defined(CONFIG_CAN_FD_MODE) struct can_timing timing_data = { 0 }; #endif dev_data->dev = dev; if (dev_cfg->clk_dev != NULL) { uint32_t clk_id = dev_cfg->clk_id; if (!device_is_ready(dev_cfg->clk_dev)) { LOG_ERR("Clock controller not ready"); return -ENODEV; } ret = clock_control_on(dev_cfg->clk_dev, (clock_control_subsys_t)clk_id); if (ret < 0) { LOG_ERR("Failed to enable clock [%d]", ret); return ret; } } k_sem_init(&dev_data->int_sem, 0, 1); k_sem_init(&dev_data->tx_sem, MCP251XFD_TX_QUEUE_ITEMS, MCP251XFD_TX_QUEUE_ITEMS); k_mutex_init(&dev_data->mutex); if (!spi_is_ready_dt(&dev_cfg->bus)) { LOG_ERR("SPI bus %s not ready", dev_cfg->bus.bus->name); return -ENODEV; } if (!gpio_is_ready_dt(&dev_cfg->int_gpio_dt)) { LOG_ERR("GPIO port not ready"); return -ENODEV; } if (gpio_pin_configure_dt(&dev_cfg->int_gpio_dt, GPIO_INPUT) < 0) { LOG_ERR("Unable to configure GPIO pin"); return -EINVAL; } gpio_init_callback(&dev_data->int_gpio_cb, mcp251xfd_int_gpio_callback, BIT(dev_cfg->int_gpio_dt.pin)); if (gpio_add_callback_dt(&dev_cfg->int_gpio_dt, &dev_data->int_gpio_cb) < 0) { return -EINVAL; } if (gpio_pin_interrupt_configure_dt(&dev_cfg->int_gpio_dt, GPIO_INT_LEVEL_ACTIVE) < 0) { return -EINVAL; } k_thread_create(&dev_data->int_thread, dev_data->int_thread_stack, CONFIG_CAN_MCP251XFD_INT_THREAD_STACK_SIZE, (k_thread_entry_t)mcp251xfd_int_thread, (void *)dev, NULL, NULL, K_PRIO_COOP(CONFIG_CAN_MCP251XFD_INT_THREAD_PRIO), 0, K_NO_WAIT); (void)k_thread_name_set(&dev_data->int_thread, "MCP251XFD interrupt thread"); ret = mcp251xfd_reset(dev); if (ret < 0) { LOG_ERR("Failed to reset the device [%d]", ret); goto done; } ret = can_calc_timing(dev, &timing, dev_cfg->common.bitrate, dev_cfg->common.sample_point); if (ret < 0) { LOG_ERR("Can't find timing for given param"); goto done; } LOG_DBG("Presc: %d, BS1: %d, BS2: %d", timing.prescaler, timing.phase_seg1, timing.phase_seg2); LOG_DBG("Sample-point err : %d", ret); #if defined(CONFIG_CAN_FD_MODE) ret = can_calc_timing_data(dev, &timing_data, dev_cfg->common.bitrate_data, dev_cfg->common.sample_point_data); if (ret < 0) { LOG_ERR("Can't find data timing for given param"); goto done; } LOG_DBG("Data phase Presc: %d, BS1: %d, BS2: %d", timing_data.prescaler, timing_data.phase_seg1, timing_data.phase_seg2); LOG_DBG("Data phase Sample-point err : %d", ret); #endif reg = mcp251xfd_read_crc(dev, MCP251XFD_REG_CON, MCP251XFD_REG_SIZE); if (!reg) { ret = -EINVAL; goto done; } *reg = sys_le32_to_cpu(*reg); opmod = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, *reg); if (opmod != MCP251XFD_REG_CON_MODE_CONFIG) { LOG_ERR("Device did not reset into configuration mode [%d]", opmod); ret = -EIO; goto done; } dev_data->current_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_CONFIG; ret = mcp251xfd_init_con_reg(dev); if (ret < 0) { goto done; } ret = mcp251xfd_init_osc_reg(dev); if (ret < 0) { goto done; } ret = mcp251xfd_init_iocon_reg(dev); if (ret < 0) { goto done; } ret = mcp251xfd_init_int_reg(dev); if (ret < 0) { goto done; } ret = mcp251xfd_set_tdc(dev, false); if (ret < 0) { goto done; } #if defined(CONFIG_CAN_RX_TIMESTAMP) ret = mcp251xfd_init_tscon(dev); if (ret < 0) { goto done; } #endif ret = mcp251xfd_init_tef_fifo(dev); if (ret < 0) { goto done; } ret = mcp251xfd_init_tx_queue(dev); if (ret < 0) { goto done; } ret = mcp251xfd_init_rx_fifo(dev); if (ret < 0) { goto done; } LOG_DBG("%d TX FIFOS: 1 element", MCP251XFD_TX_QUEUE_ITEMS); LOG_DBG("1 RX FIFO: %d elements", MCP251XFD_RX_FIFO_ITEMS); LOG_DBG("%db of %db RAM Allocated", MCP251XFD_TEF_FIFO_SIZE + MCP251XFD_TX_QUEUE_SIZE + MCP251XFD_RX_FIFO_SIZE, MCP251XFD_RAM_SIZE); done: ret = can_set_timing(dev, &timing); if (ret < 0) { return ret; } #if defined(CONFIG_CAN_FD_MODE) ret = can_set_timing_data(dev, &timing_data); if (ret < 0) { return ret; } #endif return ret; } static const struct can_driver_api mcp251xfd_api_funcs = { .get_capabilities = mcp251xfd_get_capabilities, .set_mode = mcp251xfd_set_mode, .set_timing = mcp251xfd_set_timing, #if defined(CONFIG_CAN_FD_MODE) .set_timing_data = mcp251xfd_set_timing_data, #endif .start = mcp251xfd_start, .stop = mcp251xfd_stop, .send = mcp251xfd_send, .add_rx_filter = mcp251xfd_add_rx_filter, .remove_rx_filter = mcp251xfd_remove_rx_filter, .get_state = mcp251xfd_get_state, .set_state_change_callback = mcp251xfd_set_state_change_callback, .get_core_clock = mcp251xfd_get_core_clock, .get_max_filters = mcp251xfd_get_max_filters, .timing_min = { .sjw = 1, .prop_seg = 0, .phase_seg1 = 2, .phase_seg2 = 1, .prescaler = 1, }, .timing_max = { .sjw = 128, .prop_seg = 0, .phase_seg1 = 256, .phase_seg2 = 128, .prescaler = 256, }, #if defined(CONFIG_CAN_FD_MODE) .timing_data_min = { .sjw = 1, .prop_seg = 0, .phase_seg1 = 1, .phase_seg2 = 1, .prescaler = 1, }, .timing_data_max = { .sjw = 16, .prop_seg = 0, .phase_seg1 = 32, .phase_seg2 = 16, .prescaler = 256, }, #endif }; #define MCP251XFD_SET_CLOCK(inst) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, clocks), \ (.clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(inst)), \ .clk_id = DT_INST_CLOCKS_CELL(inst, id)), \ ()) #define MCP251XFD_INIT(inst) \ static K_KERNEL_STACK_DEFINE(mcp251xfd_int_stack_##inst, \ CONFIG_CAN_MCP251XFD_INT_THREAD_STACK_SIZE); \ \ static struct mcp251xfd_data mcp251xfd_data_##inst = { \ .int_thread_stack = mcp251xfd_int_stack_##inst, \ }; \ static const struct mcp251xfd_config mcp251xfd_config_##inst = { \ .common = CAN_DT_DRIVER_CONFIG_INST_GET(inst, 0, 8000000), \ .bus = SPI_DT_SPEC_INST_GET(inst, SPI_WORD_SET(8), 0), \ .int_gpio_dt = GPIO_DT_SPEC_INST_GET(inst, int_gpios), \ \ .sof_on_clko = DT_INST_PROP(inst, sof_on_clko), \ .clko_div = DT_INST_ENUM_IDX(inst, clko_div), \ .pll_enable = DT_INST_PROP(inst, pll_enable), \ .timestamp_prescaler = DT_INST_PROP(inst, timestamp_prescaler), \ \ .osc_freq = DT_INST_PROP(inst, osc_freq), \ \ .rx_fifo = {.ram_start_addr = MCP251XFD_RX_FIFO_START_ADDR, \ .reg_fifocon_addr = MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO_IDX), \ .capacity = MCP251XFD_RX_FIFO_ITEMS, \ .item_size = MCP251XFD_RX_FIFO_ITEM_SIZE, \ .msg_handler = mcp251xfd_rx_fifo_handler}, \ .tef_fifo = {.ram_start_addr = MCP251XFD_TEF_FIFO_START_ADDR, \ .reg_fifocon_addr = MCP251XFD_REG_TEFCON, \ .capacity = MCP251XFD_TEF_FIFO_ITEMS, \ .item_size = MCP251XFD_TEF_FIFO_ITEM_SIZE, \ .msg_handler = mcp251xfd_tef_fifo_handler}, \ MCP251XFD_SET_CLOCK(inst) \ }; \ \ CAN_DEVICE_DT_INST_DEFINE(inst, mcp251xfd_init, NULL, &mcp251xfd_data_##inst, \ &mcp251xfd_config_##inst, POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \ &mcp251xfd_api_funcs); DT_INST_FOREACH_STATUS_OKAY(MCP251XFD_INIT) ```
/content/code_sandbox/drivers/can/can_mcp251xfd.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
14,918
```unknown # Infineon XMC4xxx CAN configuration options config CAN_XMC4XXX bool "Infineon XMC4xxx CAN Driver" default y depends on DT_HAS_INFINEON_XMC4XXX_CAN_NODE_ENABLED help Enable Infineon XMC4xxx CAN Driver if CAN_XMC4XXX config CAN_XMC4XXX_MAX_TX_QUEUE int "Maximum number of queued messages" default 8 range 1 32 help Defines the array size of transmit callback pointers and semaphores, as well as the number of messages in the TX queue. config CAN_XMC4XXX_RX_FIFO_ITEMS int "Number of CAN messages allocated to each RX FIFO" default 8 range 1 32 help Defines the number of CAN messages in each RX FIFO. A separate RX FIFO is created for each RX filter. config CAN_XMC4XXX_INTERNAL_BUS_MODE bool "Internal bus mode" help Connects all XMC4XXX CAN devices to an internal bus. Enables message exchange between MCU CAN devices without any external connectors. config CAN_MAX_FILTER int "Maximum number of concurrent active filters" default 4 range 1 32 help Maximum number of filters supported by the can_add_rx_callback() API call. endif # CAN_XMC4XXX ```
/content/code_sandbox/drivers/can/Kconfig.xmc4xxx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
285
```unknown # SAM CAN configuration options config CAN_SAM0 bool "Atmel SAM0 CAN driver" default y depends on DT_HAS_ATMEL_SAM0_CAN_ENABLED select CAN_MCAN ```
/content/code_sandbox/drivers/can/Kconfig.sam0
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
44
```unknown # CAN transceiver configuration options menu "CAN transceiver drivers" config CAN_TRANSCEIVER_INIT_PRIORITY int "CAN transceiver driver init priority" default 45 help CAN transceiver device driver initialization priority. config CAN_TRANSCEIVER_GPIO bool "GPIO controlled CAN transceiver" default y depends on DT_HAS_CAN_TRANSCEIVER_GPIO_ENABLED select GPIO help Enable support for GPIO controlled CAN transceivers. endmenu ```
/content/code_sandbox/drivers/can/transceiver/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
96
```c /* * */ #include <zephyr/drivers/can.h> #include <zephyr/kernel.h> #include <zephyr/sys/check.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(can_common, CONFIG_CAN_LOG_LEVEL); /* Maximum acceptable deviation in sample point location (permille) */ #define SAMPLE_POINT_MARGIN 50 /* CAN sync segment is always one time quantum */ #define CAN_SYNC_SEG 1 struct can_tx_default_cb_ctx { struct k_sem done; int status; }; static void can_tx_default_cb(const struct device *dev, int error, void *user_data) { struct can_tx_default_cb_ctx *ctx = user_data; ctx->status = error; k_sem_give(&ctx->done); } int z_impl_can_send(const struct device *dev, const struct can_frame *frame, k_timeout_t timeout, can_tx_callback_t callback, void *user_data) { const struct can_driver_api *api = (const struct can_driver_api *)dev->api; uint32_t id_mask; CHECKIF(frame == NULL) { return -EINVAL; } if ((frame->flags & CAN_FRAME_IDE) != 0U) { id_mask = CAN_EXT_ID_MASK; } else { id_mask = CAN_STD_ID_MASK; } CHECKIF((frame->id & ~(id_mask)) != 0U) { LOG_ERR("invalid frame with %s (%d-bit) CAN ID 0x%0*x", (frame->flags & CAN_FRAME_IDE) != 0 ? "extended" : "standard", (frame->flags & CAN_FRAME_IDE) != 0 ? 29 : 11, (frame->flags & CAN_FRAME_IDE) != 0 ? 8 : 3, frame->id); return -EINVAL; } if (callback == NULL) { struct can_tx_default_cb_ctx ctx; int err; k_sem_init(&ctx.done, 0, 1); err = api->send(dev, frame, timeout, can_tx_default_cb, &ctx); if (err != 0) { return err; } k_sem_take(&ctx.done, K_FOREVER); return ctx.status; } return api->send(dev, frame, timeout, callback, user_data); } int can_add_rx_filter(const struct device *dev, can_rx_callback_t callback, void *user_data, const struct can_filter *filter) { const struct can_driver_api *api = (const struct can_driver_api *)dev->api; uint32_t id_mask; CHECKIF(callback == NULL || filter == NULL) { return -EINVAL; } if ((filter->flags & CAN_FILTER_IDE) != 0U) { id_mask = CAN_EXT_ID_MASK; } else { id_mask = CAN_STD_ID_MASK; } CHECKIF(((filter->id & ~(id_mask)) != 0U) || ((filter->mask & ~(id_mask)) != 0U)) { LOG_ERR("invalid filter with %s (%d-bit) CAN ID 0x%0*x, CAN ID mask 0x%0*x", (filter->flags & CAN_FILTER_IDE) != 0 ? "extended" : "standard", (filter->flags & CAN_FILTER_IDE) != 0 ? 29 : 11, (filter->flags & CAN_FILTER_IDE) != 0 ? 8 : 3, filter->id, (filter->flags & CAN_FILTER_IDE) != 0 ? 8 : 3, filter->mask); return -EINVAL; } return api->add_rx_filter(dev, callback, user_data, filter); } static void can_msgq_put(const struct device *dev, struct can_frame *frame, void *user_data) { struct k_msgq *msgq = (struct k_msgq *)user_data; int ret; ARG_UNUSED(dev); __ASSERT_NO_MSG(msgq); ret = k_msgq_put(msgq, frame, K_NO_WAIT); if (ret) { LOG_ERR("Msgq %p overflowed. Frame ID: 0x%x", msgq, frame->id); } } int z_impl_can_add_rx_filter_msgq(const struct device *dev, struct k_msgq *msgq, const struct can_filter *filter) { const struct can_driver_api *api = dev->api; return api->add_rx_filter(dev, can_msgq_put, msgq, filter); } /** * @brief Update the timing given a total number of time quanta and a sample point. * * @code{.text} * * +---------------------------------------------------+ * | Nominal bit time in time quanta (total_tq) | * +--------------+----------+------------+------------+ * | sync_seg | prop_seg | phase_seg1 | phase_seg2 | * +--------------+----------+------------+------------+ * | CAN_SYNG_SEG | tseg1 | tseg2 | * +--------------+-----------------------+------------+ * ^ * sample_pnt * @endcode * * @see @a can_timing * * @param total_tq Total number of time quanta. * @param sample_pnt Sample point in permille of the entire bit time. * @param[out] res Result is written into the @a can_timing struct provided. * @param min Pointer to the minimum supported timing parameter values. * @param max Pointer to the maximum supported timing parameter values. * @retval 0 or positive sample point error on success. * @retval -ENOTSUP if the requested sample point cannot be met. */ static int update_sample_pnt(uint32_t total_tq, uint32_t sample_pnt, struct can_timing *res, const struct can_timing *min, const struct can_timing *max) { uint16_t tseg1_max = max->phase_seg1 + max->prop_seg; uint16_t tseg1_min = min->phase_seg1 + min->prop_seg; uint32_t sample_pnt_res; uint16_t tseg1, tseg2; /* Calculate number of time quanta in tseg2 for given sample point */ tseg2 = total_tq - (total_tq * sample_pnt) / 1000; tseg2 = CLAMP(tseg2, min->phase_seg2, max->phase_seg2); /* Calculate number of time quanta in tseg1 */ tseg1 = total_tq - CAN_SYNC_SEG - tseg2; if (tseg1 > tseg1_max) { /* Sample point location must be decreased */ tseg1 = tseg1_max; tseg2 = total_tq - CAN_SYNC_SEG - tseg1; if (tseg2 > max->phase_seg2) { return -ENOTSUP; } } else if (tseg1 < tseg1_min) { /* Sample point location must be increased */ tseg1 = tseg1_min; tseg2 = total_tq - CAN_SYNC_SEG - tseg1; if (tseg2 < min->phase_seg2) { return -ENOTSUP; } } res->phase_seg2 = tseg2; /* Attempt to distribute tseg1 evenly between prop_seq and phase_seg1 */ res->prop_seg = CLAMP(tseg1 / 2, min->prop_seg, max->prop_seg); res->phase_seg1 = tseg1 - res->prop_seg; if (res->phase_seg1 > max->phase_seg1) { /* Even tseg1 distribution not possible, decrease phase_seg1 */ res->phase_seg1 = max->phase_seg1; res->prop_seg = tseg1 - res->phase_seg1; } else if (res->phase_seg1 < min->phase_seg1) { /* Even tseg1 distribution not possible, increase phase_seg1 */ res->phase_seg1 = min->phase_seg1; res->prop_seg = tseg1 - res->phase_seg1; } /* Calculate the resulting sample point */ sample_pnt_res = (CAN_SYNC_SEG + tseg1) * 1000 / total_tq; /* Return the absolute sample point error */ return sample_pnt_res > sample_pnt ? sample_pnt_res - sample_pnt : sample_pnt - sample_pnt_res; } /** * @brief Get the sample point location for a given bitrate * * @param bitrate The bitrate in bits/second. * @return The sample point in permille. */ static uint16_t sample_point_for_bitrate(uint32_t bitrate) { uint16_t sample_pnt; if (bitrate > 800000) { /* 75.0% */ sample_pnt = 750; } else if (bitrate > 500000) { /* 80.0% */ sample_pnt = 800; } else { /* 87.5% */ sample_pnt = 875; } return sample_pnt; } /** * @brief Internal function for calculating CAN timing parameters. * * @param dev Pointer to the device structure for the driver instance. * @param[out] res Result is written into the @a can_timing struct provided. * @param min Pointer to the minimum supported timing parameter values. * @param max Pointer to the maximum supported timing parameter values. * @param bitrate Target bitrate in bits/s. * @param sample_pnt Sample point in permille of the entire bit time. * * @retval 0 or positive sample point error on success. * @retval -EINVAL if the requested bitrate or sample point is out of range. * @retval -ENOTSUP if the requested bitrate is not supported. * @retval -EIO if @a can_get_core_clock() is not available. */ static int can_calc_timing_internal(const struct device *dev, struct can_timing *res, const struct can_timing *min, const struct can_timing *max, uint32_t bitrate, uint16_t sample_pnt) { uint32_t total_tq = CAN_SYNC_SEG + max->prop_seg + max->phase_seg1 + max->phase_seg2; struct can_timing tmp_res = { 0 }; int err_min = INT_MAX; uint32_t core_clock; int prescaler; int err; if (bitrate == 0 || sample_pnt >= 1000) { return -EINVAL; } err = can_get_core_clock(dev, &core_clock); if (err != 0) { return -EIO; } if (sample_pnt == 0U) { sample_pnt = sample_point_for_bitrate(bitrate); } for (prescaler = MAX(core_clock / (total_tq * bitrate), min->prescaler); prescaler <= max->prescaler; prescaler++) { if (core_clock % (prescaler * bitrate)) { /* No integer total_tq for this prescaler setting */ continue; } total_tq = core_clock / (prescaler * bitrate); err = update_sample_pnt(total_tq, sample_pnt, &tmp_res, min, max); if (err < 0) { /* Sample point cannot be met for this prescaler setting */ continue; } if (err < err_min) { /* Improved sample point match */ err_min = err; res->prop_seg = tmp_res.prop_seg; res->phase_seg1 = tmp_res.phase_seg1; res->phase_seg2 = tmp_res.phase_seg2; res->prescaler = (uint16_t)prescaler; if (err == 0) { /* Perfect sample point match */ break; } } } if (err_min != 0U) { LOG_DBG("Sample point error: %d 1/1000", err_min); } /* Calculate default sjw as phase_seg2 / 2 and clamp the result */ res->sjw = MIN(res->phase_seg1, res->phase_seg2 / 2); res->sjw = CLAMP(res->sjw, min->sjw, max->sjw); return err_min == INT_MAX ? -ENOTSUP : err_min; } int z_impl_can_calc_timing(const struct device *dev, struct can_timing *res, uint32_t bitrate, uint16_t sample_pnt) { const struct can_timing *min = can_get_timing_min(dev); const struct can_timing *max = can_get_timing_max(dev); if (bitrate > 1000000) { return -EINVAL; } return can_calc_timing_internal(dev, res, min, max, bitrate, sample_pnt); } #ifdef CONFIG_CAN_FD_MODE int z_impl_can_calc_timing_data(const struct device *dev, struct can_timing *res, uint32_t bitrate, uint16_t sample_pnt) { const struct can_timing *min = can_get_timing_data_min(dev); const struct can_timing *max = can_get_timing_data_max(dev); if (bitrate > 8000000) { return -EINVAL; } return can_calc_timing_internal(dev, res, min, max, bitrate, sample_pnt); } #endif /* CONFIG_CAN_FD_MODE */ int can_calc_prescaler(const struct device *dev, struct can_timing *timing, uint32_t bitrate) { uint32_t ts = timing->prop_seg + timing->phase_seg1 + timing->phase_seg2 + CAN_SYNC_SEG; uint32_t core_clock; int ret; ret = can_get_core_clock(dev, &core_clock); if (ret != 0) { return ret; } timing->prescaler = core_clock / (bitrate * ts); return core_clock % (ts * timing->prescaler); } static int check_timing_in_range(const struct can_timing *timing, const struct can_timing *min, const struct can_timing *max) { if (!IN_RANGE(timing->sjw, min->sjw, max->sjw) || !IN_RANGE(timing->prop_seg, min->prop_seg, max->prop_seg) || !IN_RANGE(timing->phase_seg1, min->phase_seg1, max->phase_seg1) || !IN_RANGE(timing->phase_seg2, min->phase_seg2, max->phase_seg2) || !IN_RANGE(timing->prescaler, min->prescaler, max->prescaler)) { return -ENOTSUP; } if ((timing->sjw > timing->phase_seg1) || (timing->sjw > timing->phase_seg2)) { return -ENOTSUP; } return 0; } int z_impl_can_set_timing(const struct device *dev, const struct can_timing *timing) { const struct can_driver_api *api = (const struct can_driver_api *)dev->api; const struct can_timing *min = can_get_timing_min(dev); const struct can_timing *max = can_get_timing_max(dev); int err; err = check_timing_in_range(timing, min, max); if (err != 0) { return err; } return api->set_timing(dev, timing); } int z_impl_can_set_bitrate(const struct device *dev, uint32_t bitrate) { struct can_timing timing = { 0 }; uint32_t min = can_get_bitrate_min(dev); uint32_t max = can_get_bitrate_max(dev); uint16_t sample_pnt; int ret; if ((bitrate < min) || (bitrate > max)) { return -ENOTSUP; } sample_pnt = sample_point_for_bitrate(bitrate); ret = can_calc_timing(dev, &timing, bitrate, sample_pnt); if (ret < 0) { return ret; } if (ret > SAMPLE_POINT_MARGIN) { return -ERANGE; } return can_set_timing(dev, &timing); } #ifdef CONFIG_CAN_FD_MODE int z_impl_can_set_timing_data(const struct device *dev, const struct can_timing *timing_data) { const struct can_driver_api *api = (const struct can_driver_api *)dev->api; const struct can_timing *min = can_get_timing_data_min(dev); const struct can_timing *max = can_get_timing_data_max(dev); int err; if (api->set_timing_data == NULL) { return -ENOSYS; } err = check_timing_in_range(timing_data, min, max); if (err != 0) { return err; } return api->set_timing_data(dev, timing_data); } int z_impl_can_set_bitrate_data(const struct device *dev, uint32_t bitrate_data) { struct can_timing timing_data = { 0 }; uint32_t min = can_get_bitrate_min(dev); uint32_t max = can_get_bitrate_max(dev); uint16_t sample_pnt; int ret; if ((bitrate_data < min) || (bitrate_data > max)) { return -ENOTSUP; } sample_pnt = sample_point_for_bitrate(bitrate_data); ret = can_calc_timing_data(dev, &timing_data, bitrate_data, sample_pnt); if (ret < 0) { return ret; } if (ret > SAMPLE_POINT_MARGIN) { return -ERANGE; } return can_set_timing_data(dev, &timing_data); } #endif /* CONFIG_CAN_FD_MODE */ ```
/content/code_sandbox/drivers/can/can_common.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,751
```c /* * */ #define DT_DRV_COMPAT can_transceiver_gpio #include <zephyr/device.h> #include <zephyr/drivers/can/transceiver.h> #include <zephyr/drivers/gpio.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(can_transceiver_gpio, CONFIG_CAN_LOG_LEVEL); /* Does any devicetree instance have an enable-gpios property? */ #define INST_HAS_ENABLE_GPIOS_OR(inst) DT_INST_NODE_HAS_PROP(inst, enable_gpios) || #define ANY_INST_HAS_ENABLE_GPIOS DT_INST_FOREACH_STATUS_OKAY(INST_HAS_ENABLE_GPIOS_OR) 0 /* Does any devicetree instance have a standby-gpios property? */ #define INST_HAS_STANDBY_GPIOS_OR(inst) DT_INST_NODE_HAS_PROP(inst, standby_gpios) || #define ANY_INST_HAS_STANDBY_GPIOS DT_INST_FOREACH_STATUS_OKAY(INST_HAS_STANDBY_GPIOS_OR) 0 struct can_transceiver_gpio_config { #if ANY_INST_HAS_ENABLE_GPIOS struct gpio_dt_spec enable_gpio; #endif /* ANY_INST_HAS_ENABLE_GPIOS */ #if ANY_INST_HAS_STANDBY_GPIOS struct gpio_dt_spec standby_gpio; #endif /* ANY_INST_HAS_STANDBY_GPIOS */ }; static int can_transceiver_gpio_set_state(const struct device *dev, bool enabled) { const struct can_transceiver_gpio_config *config = dev->config; int err; #if ANY_INST_HAS_ENABLE_GPIOS if (config->enable_gpio.port != NULL) { err = gpio_pin_set_dt(&config->enable_gpio, enabled ? 1 : 0); if (err != 0) { LOG_ERR("failed to set enable GPIO pin (err %d)", err); return -EIO; } } #endif /* ANY_INST_HAS_ENABLE_GPIOS */ #if ANY_INST_HAS_STANDBY_GPIOS if (config->standby_gpio.port != NULL) { err = gpio_pin_set_dt(&config->standby_gpio, enabled ? 0 : 1); if (err != 0) { LOG_ERR("failed to set standby GPIO pin (err %d)", err); return -EIO; } } #endif /* ANY_INST_HAS_STANDBY_GPIOS */ return 0; } static int can_transceiver_gpio_enable(const struct device *dev, can_mode_t mode) { ARG_UNUSED(mode); return can_transceiver_gpio_set_state(dev, true); } static int can_transceiver_gpio_disable(const struct device *dev) { return can_transceiver_gpio_set_state(dev, false); } static int can_transceiver_gpio_init(const struct device *dev) { const struct can_transceiver_gpio_config *config = dev->config; int err; #if ANY_INST_HAS_ENABLE_GPIOS if (config->enable_gpio.port != NULL) { if (!gpio_is_ready_dt(&config->enable_gpio)) { LOG_ERR("enable pin GPIO device not ready"); return -EINVAL; } /* CAN transceiver is disabled during initialization */ err = gpio_pin_configure_dt(&config->enable_gpio, GPIO_OUTPUT_INACTIVE); if (err != 0) { LOG_ERR("failed to configure enable GPIO pin (err %d)", err); return err; } } #endif /* ANY_INST_HAS_ENABLE_GPIOS */ #if ANY_INST_HAS_STANDBY_GPIOS if (config->standby_gpio.port != NULL) { if (!gpio_is_ready_dt(&config->standby_gpio)) { LOG_ERR("standby pin GPIO device not ready"); return -EINVAL; } /* CAN transceiver is put in standby during initialization */ err = gpio_pin_configure_dt(&config->standby_gpio, GPIO_OUTPUT_ACTIVE); if (err != 0) { LOG_ERR("failed to configure standby GPIO pin (err %d)", err); return err; } } #endif /* ANY_INST_HAS_STANDBY_GPIOS */ return 0; } static const struct can_transceiver_driver_api can_transceiver_gpio_driver_api = { .enable = can_transceiver_gpio_enable, .disable = can_transceiver_gpio_disable, }; #define CAN_TRANSCEIVER_GPIO_COND(inst, name) \ IF_ENABLED(DT_INST_NODE_HAS_PROP(inst, name##_gpios), \ (.name##_gpio = GPIO_DT_SPEC_INST_GET(inst, name##_gpios),)) #define CAN_TRANSCEIVER_GPIO_INIT(inst) \ BUILD_ASSERT(DT_INST_NODE_HAS_PROP(inst, enable_gpios) || \ DT_INST_NODE_HAS_PROP(inst, standby_gpios), \ "Missing GPIO property on " \ DT_NODE_FULL_NAME(DT_DRV_INST(inst))); \ \ static const struct can_transceiver_gpio_config can_transceiver_gpio_config_##inst = { \ CAN_TRANSCEIVER_GPIO_COND(inst, enable) \ CAN_TRANSCEIVER_GPIO_COND(inst, standby) \ }; \ \ DEVICE_DT_INST_DEFINE(inst, &can_transceiver_gpio_init, \ NULL, NULL, &can_transceiver_gpio_config_##inst,\ POST_KERNEL, CONFIG_CAN_TRANSCEIVER_INIT_PRIORITY, \ &can_transceiver_gpio_driver_api); \ DT_INST_FOREACH_STATUS_OKAY(CAN_TRANSCEIVER_GPIO_INIT) ```
/content/code_sandbox/drivers/can/transceiver/can_transceiver_gpio.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,141
```unknown config RESET_AST10X0 bool "ASPEED reset driver" default y depends on DT_HAS_ASPEED_AST10X0_RESET_ENABLED help This option enables the reset driver for ASPEED AST10X0 series SOC. ```
/content/code_sandbox/drivers/reset/Kconfig.aspeed
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
52
```c /* * */ #define DT_DRV_COMPAT gd_gd32_rctl #include <zephyr/arch/cpu.h> #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/reset.h> /** RCU offset (from id field) */ #define GD32_RESET_ID_OFFSET(id) (((id) >> 6U) & 0xFFU) /** RCU configuration bit (from id field) */ #define GD32_RESET_ID_BIT(id) ((id) & 0x1FU) struct reset_gd32_config { uint32_t base; }; static int reset_gd32_status(const struct device *dev, uint32_t id, uint8_t *status) { const struct reset_gd32_config *config = dev->config; *status = !!sys_test_bit(config->base + GD32_RESET_ID_OFFSET(id), GD32_RESET_ID_BIT(id)); return 0; } static int reset_gd32_line_assert(const struct device *dev, uint32_t id) { const struct reset_gd32_config *config = dev->config; sys_set_bit(config->base + GD32_RESET_ID_OFFSET(id), GD32_RESET_ID_BIT(id)); return 0; } static int reset_gd32_line_deassert(const struct device *dev, uint32_t id) { const struct reset_gd32_config *config = dev->config; sys_clear_bit(config->base + GD32_RESET_ID_OFFSET(id), GD32_RESET_ID_BIT(id)); return 0; } static int reset_gd32_line_toggle(const struct device *dev, uint32_t id) { (void)reset_gd32_line_assert(dev, id); (void)reset_gd32_line_deassert(dev, id); return 0; } static const struct reset_driver_api reset_gd32_driver_api = { .status = reset_gd32_status, .line_assert = reset_gd32_line_assert, .line_deassert = reset_gd32_line_deassert, .line_toggle = reset_gd32_line_toggle, }; static const struct reset_gd32_config config = { .base = DT_REG_ADDR(DT_INST_PARENT(0)), }; DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, &config, PRE_KERNEL_1, CONFIG_RESET_INIT_PRIORITY, &reset_gd32_driver_api); ```
/content/code_sandbox/drivers/reset/reset_gd32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
493
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_rst #include <zephyr/kernel.h> #include <zephyr/drivers/reset.h> #if defined(CONFIG_SOC_SERIES_NPCX7) #include <zephyr/dt-bindings/reset/npcx7_reset.h> #elif defined(CONFIG_SOC_SERIES_NPCX9) #include <zephyr/dt-bindings/reset/npcx9_reset.h> #elif defined(CONFIG_SOC_SERIES_NPCX4) #include <zephyr/dt-bindings/reset/npcx4_reset.h> #endif #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(rst_npcx); #define NPCX_RESET_CTL_REG_BYTE_SIZE 4 #define NPCX_RESET_CTL_REG_OFFSET(id) ((id) >> (NPCX_RESET_CTL_REG_BYTE_SIZE + 1)) #define NPCX_RESET_CTL_REG_BIT(id) (((id) & ((1 << (NPCX_RESET_CTL_REG_BYTE_SIZE + 1)) - 1))) #define NPCX_SWRST_TRG_WORD_START 0xC183 #define NPCX_SWRST_TRG_WORD_CLEAR 0x0 #define NPCX_SWRST_TRG_WORD_DONE 0xFFFF #define NPCX_SWRST_DONE_TIMEOUT_US 100 struct reset_npcx_dev_config { struct swrst_reg *reg_base; }; static int reset_npcx_line_toggle(const struct device *dev, uint32_t id) { const struct reset_npcx_dev_config *const config = dev->config; struct swrst_reg *const reg = config->reg_base; unsigned int key; uint8_t reg_offset; uint8_t reg_bit; int ret = 0; if (!IN_RANGE(id, NPCX_RESET_ID_START, NPCX_RESET_ID_END)) { LOG_ERR("Invalid Reset ID"); return -EINVAL; } reg_offset = NPCX_RESET_CTL_REG_OFFSET(id); reg_bit = NPCX_RESET_CTL_REG_BIT(id); key = irq_lock(); reg->SWRST_CTL[reg_offset] |= BIT(reg_bit); reg->SWRST_TRG = NPCX_SWRST_TRG_WORD_CLEAR; reg->SWRST_TRG = NPCX_SWRST_TRG_WORD_START; if (!WAIT_FOR((reg->SWRST_TRG == NPCX_SWRST_TRG_WORD_DONE), NPCX_SWRST_DONE_TIMEOUT_US, NULL)) { LOG_ERR("Reset trig timeout"); ret = -EBUSY; } irq_unlock(key); return ret; } static const struct reset_driver_api reset_npcx_driver_api = { .line_toggle = reset_npcx_line_toggle, }; static const struct reset_npcx_dev_config reset_npcx_config = { .reg_base = (struct swrst_reg *)DT_INST_REG_ADDR(0), }; DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, &reset_npcx_config, PRE_KERNEL_1, CONFIG_RESET_INIT_PRIORITY, &reset_npcx_driver_api); ```
/content/code_sandbox/drivers/reset/reset_npcx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
615
```c /* * */ #define DT_DRV_COMPAT raspberrypi_pico_reset #include <limits.h> #include <zephyr/arch/cpu.h> #include <zephyr/device.h> #include <zephyr/drivers/reset.h> struct reset_rpi_config { DEVICE_MMIO_ROM; uint8_t reg_width; uint8_t active_low; uintptr_t base_address; }; static int reset_rpi_read_register(const struct device *dev, uint16_t offset, uint32_t *value) { const struct reset_rpi_config *config = dev->config; uint32_t base_address = config->base_address; switch (config->reg_width) { case 1: *value = sys_read8(base_address + offset); break; case 2: *value = sys_read16(base_address + offset); break; case 4: *value = sys_read32(base_address + offset); break; default: return -EINVAL; } return 0; } static int reset_rpi_write_register(const struct device *dev, uint16_t offset, uint32_t value) { const struct reset_rpi_config *config = dev->config; uint32_t base_address = config->base_address; switch (config->reg_width) { case 1: sys_write8(value, base_address + offset); break; case 2: sys_write16(value, base_address + offset); break; case 4: sys_write32(value, base_address + offset); break; default: return -EINVAL; } return 0; } static int reset_rpi_status(const struct device *dev, uint32_t id, uint8_t *status) { const struct reset_rpi_config *config = dev->config; uint16_t offset; uint32_t value; uint8_t regbit; int ret; offset = id / (config->reg_width * CHAR_BIT); regbit = id % (config->reg_width * CHAR_BIT); ret = reset_rpi_read_register(dev, offset, &value); if (ret) { return ret; } *status = !(value & BIT(regbit)) ^ !config->active_low; return ret; } static int reset_rpi_update(const struct device *dev, uint32_t id, uint8_t assert) { const struct reset_rpi_config *config = dev->config; uint16_t offset; uint32_t value; uint8_t regbit; int ret; offset = id / (config->reg_width * CHAR_BIT); regbit = id % (config->reg_width * CHAR_BIT); ret = reset_rpi_read_register(dev, offset, &value); if (ret) { return ret; } if (assert ^ config->active_low) { value |= BIT(regbit); } else { value &= ~BIT(regbit); } return reset_rpi_write_register(dev, offset, value); } static int reset_rpi_line_assert(const struct device *dev, uint32_t id) { return reset_rpi_update(dev, id, 1); } static int reset_rpi_line_deassert(const struct device *dev, uint32_t id) { return reset_rpi_update(dev, id, 0); } static int reset_rpi_line_toggle(const struct device *dev, uint32_t id) { int ret; ret = reset_rpi_line_assert(dev, id); if (ret) { return ret; } return reset_rpi_line_deassert(dev, id); } static int reset_rpi_init(const struct device *dev) { DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); return 0; } static const struct reset_driver_api reset_rpi_driver_api = { .status = reset_rpi_status, .line_assert = reset_rpi_line_assert, .line_deassert = reset_rpi_line_deassert, .line_toggle = reset_rpi_line_toggle, }; #define RPI_RESET_INIT(idx) \ static const struct reset_rpi_config reset_rpi_config_##idx = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(idx)), \ .reg_width = DT_INST_PROP_OR(idx, reg_width, 4), \ .active_low = DT_INST_PROP_OR(idx, active_low, 0), \ .base_address = DT_INST_REG_ADDR(idx), \ }; \ \ DEVICE_DT_INST_DEFINE(idx, reset_rpi_init, \ NULL, NULL, \ &reset_rpi_config_##idx, PRE_KERNEL_1, \ CONFIG_RESET_INIT_PRIORITY, \ &reset_rpi_driver_api); DT_INST_FOREACH_STATUS_OKAY(RPI_RESET_INIT); ```
/content/code_sandbox/drivers/reset/reset_rpi_pico.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
993
```unknown config RESET_STM32 bool "STM32 Reset Controller Driver" default y depends on DT_HAS_ST_STM32_RCC_RCTL_ENABLED ```
/content/code_sandbox/drivers/reset/Kconfig.stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
33
```unknown config RESET_INTEL_SOCFPGA bool "Intel SoC FPGA Reset Controller driver" default y depends on DT_HAS_INTEL_SOCFPGA_RESET_ENABLED help Enable the Reset driver for Intel SoC FPGA device ```
/content/code_sandbox/drivers/reset/Kconfig.intel_socfpga
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
50
```c /* * */ #define DT_DRV_COMPAT nxp_rstctl #include <zephyr/device.h> #include <zephyr/drivers/reset.h> #include <zephyr/sys/util.h> #include <fsl_device_registers.h> #define NXP_RSTCTL_OFFSET(id) ((id >> 16) * sizeof(uint32_t)) #define NXP_RSTCTL_BIT(id) (BIT(id & 0xFFFF)) #define NXP_RSTCTL_CTL(id) (NXP_RSTCTL_OFFSET(id) + 0x10) #define NXP_RSTCTL_SET(id) (NXP_RSTCTL_OFFSET(id) + 0x40) #define NXP_RSTCTL_CLR(id) (NXP_RSTCTL_OFFSET(id) + 0x70) static int reset_nxp_rstctl_status(const struct device *dev, uint32_t id, uint8_t *status) { const uint32_t *base = dev->config; volatile const uint32_t *ctl_reg = base+(NXP_RSTCTL_CTL(id)/sizeof(uint32_t)); uint32_t val = *ctl_reg; *status = (uint8_t)FIELD_GET(NXP_RSTCTL_BIT(id), val); return 0; } static int reset_nxp_rstctl_line_assert(const struct device *dev, uint32_t id) { const uint32_t *base = dev->config; volatile uint32_t *set_reg = (uint32_t *)base+(NXP_RSTCTL_SET(id)/sizeof(uint32_t)); *set_reg = FIELD_PREP(NXP_RSTCTL_BIT(id), 0b1); return 0; } static int reset_nxp_rstctl_line_deassert(const struct device *dev, uint32_t id) { const uint32_t *base = dev->config; volatile uint32_t *clr_reg = (uint32_t *)base+(NXP_RSTCTL_CLR(id)/sizeof(uint32_t)); *clr_reg = FIELD_PREP(NXP_RSTCTL_BIT(id), 0b1); return 0; } static int reset_nxp_rstctl_line_toggle(const struct device *dev, uint32_t id) { uint8_t status = 0; reset_nxp_rstctl_line_assert(dev, id); do { reset_nxp_rstctl_status(dev, id, &status); } while (status != 0b1); reset_nxp_rstctl_line_deassert(dev, id); return 0; } static const struct reset_driver_api reset_nxp_rstctl_driver_api = { .status = reset_nxp_rstctl_status, .line_assert = reset_nxp_rstctl_line_assert, .line_deassert = reset_nxp_rstctl_line_deassert, .line_toggle = reset_nxp_rstctl_line_toggle, }; #define NXP_RSTCTL_INIT(n) \ DEVICE_DT_INST_DEFINE(n, NULL, NULL, NULL, \ (void *)DT_INST_REG_ADDR(n), \ PRE_KERNEL_1, CONFIG_RESET_INIT_PRIORITY, \ &reset_nxp_rstctl_driver_api); DT_INST_FOREACH_STATUS_OKAY(NXP_RSTCTL_INIT) ```
/content/code_sandbox/drivers/reset/reset_nxp_rstctl.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
649
```unknown config RESET_NXP_RSTCTL bool "NXP RSTCTL driver" default y depends on DT_HAS_NXP_RSTCTL_ENABLED help Enable the driver for the NXP RSTCTL, a peripheral reset controller. ```
/content/code_sandbox/drivers/reset/Kconfig.nxp_rstctl
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
52
```unknown # Nuvoton NuMaker Reset Controller configuration options config RESET_NUMAKER bool "Nuvoton NuMaker reset controller driver" default y depends on DT_HAS_NUVOTON_NUMAKER_RST_ENABLED help This option enables the reset controller driver for Nuvoton NuMaker MCUs. ```
/content/code_sandbox/drivers/reset/Kconfig.numaker
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
64
```c /* * */ #define DT_DRV_COMPAT aspeed_ast10x0_reset #include <errno.h> #include <zephyr/dt-bindings/reset/ast10x0_reset.h> #include <zephyr/drivers/reset.h> #include <zephyr/drivers/syscon.h> #include <zephyr/sys/util.h> /* * RESET_CTRL0/1_ASSERT registers: * - Each bit in these registers controls a reset line * - Write '1' to a bit: assert the corresponding reset line * - Write '0' to a bit: no effect * RESET_CTRL0/1_DEASSERT register: * - Write '1' to a bit: clear the corresponding bit in RESET_CTRL0/1_ASSERT. * (deassert the corresponding reset line) */ #define RESET_CTRL0_ASSERT 0x40 #define RESET_CTRL0_DEASSERT 0x44 #define RESET_CTRL1_ASSERT 0x50 #define RESET_CTRL1_DEASSERT 0x54 struct reset_aspeed_config { const struct device *syscon; }; static int aspeed_reset_line_assert(const struct device *dev, uint32_t id) { const struct reset_aspeed_config *config = dev->config; const struct device *syscon = config->syscon; uint32_t addr = RESET_CTRL0_ASSERT; if (id >= ASPEED_RESET_GRP_1_OFFSET) { id -= ASPEED_RESET_GRP_1_OFFSET; addr = RESET_CTRL1_ASSERT; } return syscon_write_reg(syscon, addr, BIT(id)); } static int aspeed_reset_line_deassert(const struct device *dev, uint32_t id) { const struct reset_aspeed_config *config = dev->config; const struct device *syscon = config->syscon; uint32_t addr = RESET_CTRL0_DEASSERT; if (id >= ASPEED_RESET_GRP_1_OFFSET) { id -= ASPEED_RESET_GRP_1_OFFSET; addr = RESET_CTRL1_DEASSERT; } return syscon_write_reg(syscon, addr, BIT(id)); } static int aspeed_reset_status(const struct device *dev, uint32_t id, uint8_t *status) { const struct reset_aspeed_config *config = dev->config; const struct device *syscon = config->syscon; uint32_t addr = RESET_CTRL0_ASSERT; uint32_t reg_value; int ret; if (id >= ASPEED_RESET_GRP_1_OFFSET) { id -= ASPEED_RESET_GRP_1_OFFSET; addr = RESET_CTRL1_ASSERT; } ret = syscon_read_reg(syscon, addr, &reg_value); if (ret == 0) { *status = !!(reg_value & BIT(id)); } return ret; } static int aspeed_reset_line_toggle(const struct device *dev, uint32_t id) { int ret; ret = aspeed_reset_line_assert(dev, id); if (ret == 0) { ret = aspeed_reset_line_deassert(dev, id); } return ret; } static const struct reset_driver_api aspeed_reset_api = { .status = aspeed_reset_status, .line_assert = aspeed_reset_line_assert, .line_deassert = aspeed_reset_line_deassert, .line_toggle = aspeed_reset_line_toggle }; #define ASPEED_RESET_INIT(n) \ static const struct reset_aspeed_config reset_aspeed_cfg_##n = { \ .syscon = DEVICE_DT_GET(DT_NODELABEL(syscon)), \ }; \ DEVICE_DT_INST_DEFINE(n, NULL, NULL, NULL, &reset_aspeed_cfg_##n, \ PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &aspeed_reset_api); DT_INST_FOREACH_STATUS_OKAY(ASPEED_RESET_INIT) ```
/content/code_sandbox/drivers/reset/reset_ast10x0.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
793
```unknown config RESET_NXP_SYSCON bool "NXP Syscon Reset controller driver" default y depends on DT_HAS_NXP_LPC_SYSCON_RESET_ENABLED help Enable the NXP syscon reset controller driver. Syscon is found on LPC parts and LPC heritage parts. ```
/content/code_sandbox/drivers/reset/Kconfig.lpc_syscon
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
64
```c /* * */ #define DT_DRV_COMPAT st_stm32_rcc_rctl #include <zephyr/arch/cpu.h> #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/reset.h> #define STM32_RESET_CLR_OFFSET(id) (((id) >> 17U) & 0xFFFU) #define STM32_RESET_SET_OFFSET(id) (((id) >> 5U) & 0xFFFU) #define STM32_RESET_REG_BIT(id) ((id)&0x1FU) struct reset_stm32_config { uintptr_t base; }; static int reset_stm32_status(const struct device *dev, uint32_t id, uint8_t *status) { const struct reset_stm32_config *config = dev->config; *status = !!sys_test_bit(config->base + STM32_RESET_SET_OFFSET(id), STM32_RESET_REG_BIT(id)); return 0; } static int reset_stm32_line_assert(const struct device *dev, uint32_t id) { const struct reset_stm32_config *config = dev->config; sys_set_bit(config->base + STM32_RESET_SET_OFFSET(id), STM32_RESET_REG_BIT(id)); return 0; } static int reset_stm32_line_deassert(const struct device *dev, uint32_t id) { const struct reset_stm32_config *config = dev->config; #if DT_INST_PROP(0, set_bit_to_deassert) sys_set_bit(config->base + STM32_RESET_CLR_OFFSET(id), STM32_RESET_REG_BIT(id)); #else sys_clear_bit(config->base + STM32_RESET_SET_OFFSET(id), STM32_RESET_REG_BIT(id)); #endif return 0; } static int reset_stm32_line_toggle(const struct device *dev, uint32_t id) { reset_stm32_line_assert(dev, id); reset_stm32_line_deassert(dev, id); return 0; } static const struct reset_driver_api reset_stm32_driver_api = { .status = reset_stm32_status, .line_assert = reset_stm32_line_assert, .line_deassert = reset_stm32_line_deassert, .line_toggle = reset_stm32_line_toggle, }; static const struct reset_stm32_config reset_stm32_config = { .base = DT_REG_ADDR(DT_INST_PARENT(0)), }; DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, &reset_stm32_config, PRE_KERNEL_1, CONFIG_RESET_INIT_PRIORITY, &reset_stm32_driver_api); ```
/content/code_sandbox/drivers/reset/reset_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
535
```unknown config RESET_GD32 bool "GD32 Reset Controller Driver" default y depends on DT_HAS_GD_GD32_RCTL_ENABLED ```
/content/code_sandbox/drivers/reset/Kconfig.gd32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
33
```c /* * */ #define DT_DRV_COMPAT nuvoton_numaker_rst #include <zephyr/arch/cpu.h> #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/reset.h> /* Reset controller module IPRST offset */ #define NUMAKER_RESET_IPRST0_OFFSET (8UL) #define NUMAKER_RESET_IP_OFFSET(id) (NUMAKER_RESET_IPRST0_OFFSET + (((id) >> 24UL) & 0xffUL)) /* Reset controller module configuration bit */ #define NUMAKER_RESET_IP_BIT(id) (id & 0x00ffffffUL) struct reset_numaker_config { uint32_t base; }; static int reset_numaker_status(const struct device *dev, uint32_t id, uint8_t *status) { const struct reset_numaker_config *config = dev->config; *status = !!sys_test_bit(config->base + NUMAKER_RESET_IP_OFFSET(id), NUMAKER_RESET_IP_BIT(id)); return 0; } static int reset_numaker_line_assert(const struct device *dev, uint32_t id) { const struct reset_numaker_config *config = dev->config; /* Generate reset signal to the corresponding module */ sys_set_bit(config->base + NUMAKER_RESET_IP_OFFSET(id), NUMAKER_RESET_IP_BIT(id)); return 0; } static int reset_numaker_line_deassert(const struct device *dev, uint32_t id) { const struct reset_numaker_config *config = dev->config; /* Release corresponding module from reset state */ sys_clear_bit(config->base + NUMAKER_RESET_IP_OFFSET(id), NUMAKER_RESET_IP_BIT(id)); return 0; } static int reset_numaker_line_toggle(const struct device *dev, uint32_t id) { (void)reset_numaker_line_assert(dev, id); (void)reset_numaker_line_deassert(dev, id); return 0; } static const struct reset_driver_api reset_numaker_driver_api = { .status = reset_numaker_status, .line_assert = reset_numaker_line_assert, .line_deassert = reset_numaker_line_deassert, .line_toggle = reset_numaker_line_toggle, }; static const struct reset_numaker_config config = { .base = (uint32_t)DT_INST_REG_ADDR(0), }; DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, &config, PRE_KERNEL_1, CONFIG_RESET_INIT_PRIORITY, &reset_numaker_driver_api); ```
/content/code_sandbox/drivers/reset/reset_numaker.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
513
```c /* * */ #define DT_DRV_COMPAT nxp_lpc_syscon_reset #include <zephyr/device.h> #include <zephyr/drivers/reset.h> #include <zephyr/sys/util.h> #include <fsl_device_registers.h> #define LPC_RESET_OFFSET(id) (id >> 16) #define LPC_RESET_BIT(id) (BIT(id & 0xFFFF)) static int reset_nxp_syscon_status(const struct device *dev, uint32_t id, uint8_t *status) { const volatile uint32_t *ctrl_reg = ((uint32_t *)dev->config)+(LPC_RESET_OFFSET(id)); *status = (uint8_t)FIELD_GET((uint32_t)LPC_RESET_BIT(id), *ctrl_reg); return 0; } static int reset_nxp_syscon_line_assert(const struct device *dev, uint32_t id) { SYSCON->PRESETCTRLSET[LPC_RESET_OFFSET(id)] = FIELD_PREP(LPC_RESET_BIT(id), 0b1); return 0; } static int reset_nxp_syscon_line_deassert(const struct device *dev, uint32_t id) { SYSCON->PRESETCTRLCLR[LPC_RESET_OFFSET(id)] = FIELD_PREP(LPC_RESET_BIT(id), 0b1); return 0; } static int reset_nxp_syscon_line_toggle(const struct device *dev, uint32_t id) { uint8_t status = 0; reset_nxp_syscon_line_assert(dev, id); do { reset_nxp_syscon_status(dev, id, &status); } while (status != 0b1); reset_nxp_syscon_line_deassert(dev, id); return 0; } static const struct reset_driver_api reset_nxp_syscon_driver_api = { .status = reset_nxp_syscon_status, .line_assert = reset_nxp_syscon_line_assert, .line_deassert = reset_nxp_syscon_line_deassert, .line_toggle = reset_nxp_syscon_line_toggle, }; DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, (void *)(DT_REG_ADDR(DT_INST_PARENT(0)) + 0x100), PRE_KERNEL_1, CONFIG_RESET_INIT_PRIORITY, &reset_nxp_syscon_driver_api); ```
/content/code_sandbox/drivers/reset/reset_lpc_syscon.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
473
```unknown # NPCX reset controller driver configuration options config RESET_NPCX bool "Nuvoton NPCX embedded controller (EC) reset controller driver" default y depends on DT_HAS_NUVOTON_NPCX_RST_ENABLED help This option enables the reset controller driver for Nuvoton NPCX MCUs. ```
/content/code_sandbox/drivers/reset/Kconfig.npcx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
67
```unknown # Reset Controller driver configuration options # # Reset Controller options # menuconfig RESET bool "Reset Controller drivers" help Reset Controller drivers. Reset node represents a region containing information about reset controller device. The typical use-case is for some other node's drivers to acquire a reference to the reset controller node together with some reset information. if RESET config RESET_INIT_PRIORITY int "Reset Controller driver init priority" default 35 help This option controls the priority of the reset controller device initialization. Higher priority ensures that the device is initialized earlier in the startup cycle. If unsure, leave at default value comment "Reset Controller Drivers" rsource "Kconfig.rpi_pico" rsource "Kconfig.gd32" rsource "Kconfig.aspeed" rsource "Kconfig.stm32" rsource "Kconfig.numaker" rsource "Kconfig.intel_socfpga" rsource "Kconfig.npcx" rsource "Kconfig.lpc_syscon" rsource "Kconfig.nxp_rstctl" endif # RESET ```
/content/code_sandbox/drivers/reset/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
234
```unknown config RESET_RPI_PICO bool "Raspberry Pi Reset Controller driver" default y depends on DT_HAS_RASPBERRYPI_PICO_RESET_ENABLED ```
/content/code_sandbox/drivers/reset/Kconfig.rpi_pico
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
36
```c /* * */ #define DT_DRV_COMPAT intel_socfpga_reset #include <zephyr/device.h> #include <zephyr/drivers/reset.h> #include <zephyr/kernel.h> /** regwidth 4 for 32 bit register */ #define RESET_REG_WIDTH 4 struct reset_intel_config { DEVICE_MMIO_ROM; /* check peripheral active low / active high */ bool active_low; }; struct reset_intel_soc_data { DEVICE_MMIO_RAM; }; static int32_t reset_intel_soc_status(const struct device *dev, uint32_t id, uint8_t *status) { const struct reset_intel_config *config = (const struct reset_intel_config *)dev->config; uintptr_t base_address = DEVICE_MMIO_GET(dev); uint32_t value; uint16_t offset; uint8_t regbit; regbit = (id & ((RESET_REG_WIDTH << (RESET_REG_WIDTH - 1)) - 1)); offset = (id >> (RESET_REG_WIDTH + 1)) << (RESET_REG_WIDTH >> 1); value = sys_read32(base_address + offset); *status = !(value & BIT(regbit)) ^ config->active_low; return 0; } static void reset_intel_soc_update(const struct device *dev, uint32_t id, bool assert) { const struct reset_intel_config *config = (const struct reset_intel_config *)dev->config; uintptr_t base_address = DEVICE_MMIO_GET(dev); uint16_t offset; uint8_t regbit; regbit = (id & ((RESET_REG_WIDTH << (RESET_REG_WIDTH - 1)) - 1)); offset = (id >> (RESET_REG_WIDTH + 1)) << (RESET_REG_WIDTH >> 1); if (assert ^ !config->active_low) { if (sys_test_bit(base_address + offset, regbit) == 0) { sys_set_bit(base_address + offset, regbit); } } else { if (sys_test_bit(base_address + offset, regbit) != 0) { sys_clear_bit(base_address + offset, regbit); } } } static int32_t reset_intel_soc_line_assert(const struct device *dev, uint32_t id) { reset_intel_soc_update(dev, id, true); return 0; } static int32_t reset_intel_soc_line_deassert(const struct device *dev, uint32_t id) { reset_intel_soc_update(dev, id, false); return 0; } static int32_t reset_intel_soc_line_toggle(const struct device *dev, uint32_t id) { (void)reset_intel_soc_line_assert(dev, id); /* TODO: Add required delay once tested on Emulator/Hardware platform. */ (void)reset_intel_soc_line_deassert(dev, id); return 0; } static int32_t reset_intel_soc_init(const struct device *dev) { DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); return 0; } static const struct reset_driver_api reset_intel_soc_driver_api = { .status = reset_intel_soc_status, .line_assert = reset_intel_soc_line_assert, .line_deassert = reset_intel_soc_line_deassert, .line_toggle = reset_intel_soc_line_toggle, }; #define INTEL_SOC_RESET_INIT(_inst) \ static struct reset_intel_soc_data reset_intel_soc_data_##_inst; \ static const struct reset_intel_config reset_intel_config_##_inst = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(_inst)), \ .active_low = DT_INST_PROP(_inst, active_low), \ }; \ \ DEVICE_DT_INST_DEFINE(_inst, \ reset_intel_soc_init, \ NULL, \ &reset_intel_soc_data_##_inst, \ &reset_intel_config_##_inst, \ PRE_KERNEL_1, \ CONFIG_RESET_INIT_PRIORITY, \ &reset_intel_soc_driver_api); DT_INST_FOREACH_STATUS_OKAY(INTEL_SOC_RESET_INIT); ```
/content/code_sandbox/drivers/reset/reset_intel_socfpga.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
876
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_peci #include <errno.h> #include <soc.h> #include <zephyr/device.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/peci.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(peci_npcx, CONFIG_PECI_LOG_LEVEL); #define PECI_TIMEOUT K_MSEC(300) #define PECI_NPCX_MAX_TX_BUF_LEN 65 #define PECI_NPCX_MAX_RX_BUF_LEN 64 struct peci_npcx_config { /* peci controller base address */ struct peci_reg *base; struct npcx_clk_cfg clk_cfg; const struct pinctrl_dev_config *pcfg; }; struct peci_npcx_data { struct k_sem trans_sync_sem; struct k_sem lock; uint32_t peci_src_clk_freq; int trans_error; }; enum npcx_peci_error_code { NPCX_PECI_NO_ERROR, NPCX_PECI_WR_ABORT_ERROR, NPCX_PECI_RD_CRC_ERROR, }; static int peci_npcx_check_bus_idle(struct peci_reg *reg) { if (IS_BIT_SET(reg->PECI_CTL_STS, NPCX_PECI_CTL_STS_START_BUSY)) { return -EBUSY; } return 0; } static int peci_npcx_wait_completion(const struct device *dev) { struct peci_npcx_data *const data = dev->data; int ret; ret = k_sem_take(&data->trans_sync_sem, PECI_TIMEOUT); if (ret != 0) { LOG_ERR("%s: Timeout", __func__); return -ETIMEDOUT; } if (data->trans_error != NPCX_PECI_NO_ERROR) { return -EIO; } return 0; } static int peci_npcx_configure(const struct device *dev, uint32_t bitrate) { const struct peci_npcx_config *const config = dev->config; struct peci_npcx_data *const data = dev->data; struct peci_reg *const reg = config->base; uint8_t bit_rate_divider; k_sem_take(&data->lock, K_FOREVER); /* * The unit of the bitrate is in Kbps, need to convert it to bps when * calculate the divider */ bit_rate_divider = DIV_ROUND_UP(data->peci_src_clk_freq, bitrate * 1000 * 4) - 1; /* * Make sure the divider doesn't exceed the max valid value and is not lower than the * minimal valid value. */ bit_rate_divider = CLAMP(bit_rate_divider, PECI_MAX_BIT_RATE_VALID_MIN, NPCX_PECI_RATE_MAX_BIT_RATE_MASK); if (bit_rate_divider < PECI_HIGH_SPEED_MIN_VAL) { reg->PECI_RATE |= BIT(NPCX_PECI_RATE_EHSP); } else { reg->PECI_RATE &= ~BIT(NPCX_PECI_RATE_EHSP); } SET_FIELD(reg->PECI_RATE, NPCX_PECI_RATE_MAX_BIT_RATE, bit_rate_divider); k_sem_give(&data->lock); return 0; } static int peci_npcx_disable(const struct device *dev) { struct peci_npcx_data *const data = dev->data; k_sem_take(&data->lock, K_FOREVER); irq_disable(DT_INST_IRQN(0)); k_sem_give(&data->lock); return 0; } static int peci_npcx_enable(const struct device *dev) { const struct peci_npcx_config *const config = dev->config; struct peci_npcx_data *const data = dev->data; struct peci_reg *const reg = config->base; k_sem_take(&data->lock, K_FOREVER); reg->PECI_CTL_STS = BIT(NPCX_PECI_CTL_STS_DONE) | BIT(NPCX_PECI_CTL_STS_CRC_ERR) | BIT(NPCX_PECI_CTL_STS_ABRT_ERR); NVIC_ClearPendingIRQ(DT_INST_IRQN(0)); irq_enable(DT_INST_IRQN(0)); k_sem_give(&data->lock); return 0; } static int peci_npcx_transfer(const struct device *dev, struct peci_msg *msg) { const struct peci_npcx_config *const config = dev->config; struct peci_npcx_data *const data = dev->data; struct peci_reg *const reg = config->base; struct peci_buf *peci_rx_buf = &msg->rx_buffer; struct peci_buf *peci_tx_buf = &msg->tx_buffer; enum peci_command_code cmd_code = msg->cmd_code; int ret = 0; k_sem_take(&data->lock, K_FOREVER); if (peci_tx_buf->len > PECI_NPCX_MAX_TX_BUF_LEN || peci_rx_buf->len > PECI_NPCX_MAX_RX_BUF_LEN) { ret = -EINVAL; goto out; } ret = peci_npcx_check_bus_idle(reg); if (ret != 0) { goto out; } reg->PECI_ADDR = msg->addr; reg->PECI_WR_LENGTH = peci_tx_buf->len; reg->PECI_RD_LENGTH = peci_rx_buf->len; reg->PECI_CMD = cmd_code; /* * If command = PING command: * Tx buffer length = 0. * Otherwise: * Tx buffer length = N-bytes data + 1 byte command code. */ if (peci_tx_buf->len != 0) { for (int i = 0; i < (peci_tx_buf->len - 1); i++) { reg->PECI_DATA_OUT[i] = peci_tx_buf->buf[i]; } } /* Enable PECI transaction done interrupt */ reg->PECI_CTL_STS |= BIT(NPCX_PECI_CTL_STS_DONE_EN); /* Start PECI transaction */ reg->PECI_CTL_STS |= BIT(NPCX_PECI_CTL_STS_START_BUSY); ret = peci_npcx_wait_completion(dev); if (ret == 0) { int i; for (i = 0; i < peci_rx_buf->len; i++) { peci_rx_buf->buf[i] = reg->PECI_DATA_IN[i]; } /* * The application allocates N+1 bytes for rx_buffer. * The read data block is stored at the offset 0 ~ (N-1). * The read block FCS is stored at offset N. */ peci_rx_buf->buf[i] = reg->PECI_RD_FCS; LOG_DBG("Wr FCS:0x%02x|Rd FCS:0x%02x", reg->PECI_WR_FCS, reg->PECI_RD_FCS); } out: k_sem_give(&data->lock); return ret; } static void peci_npcx_isr(const struct device *dev) { const struct peci_npcx_config *const config = dev->config; struct peci_npcx_data *const data = dev->data; struct peci_reg *const reg = config->base; uint8_t status; status = reg->PECI_CTL_STS; LOG_DBG("PECI ISR status: 0x%02x", status); /* * Disable the transaction done interrupt, also clear the status bits * if they were set. */ reg->PECI_CTL_STS &= ~BIT(NPCX_PECI_CTL_STS_DONE_EN); if (IS_BIT_SET(status, NPCX_PECI_CTL_STS_ABRT_ERR)) { data->trans_error = NPCX_PECI_WR_ABORT_ERROR; LOG_ERR("PECI Nego or Wr FCS(0x%02x) error", reg->PECI_WR_FCS); } else if (IS_BIT_SET(status, NPCX_PECI_CTL_STS_CRC_ERR)) { data->trans_error = NPCX_PECI_RD_CRC_ERROR; LOG_ERR("PECI Rd FCS(0x%02x) error", reg->PECI_WR_FCS); } else { data->trans_error = NPCX_PECI_NO_ERROR; } k_sem_give(&data->trans_sync_sem); } static const struct peci_driver_api peci_npcx_driver_api = { .config = peci_npcx_configure, .enable = peci_npcx_enable, .disable = peci_npcx_disable, .transfer = peci_npcx_transfer, }; static int peci_npcx_init(const struct device *dev) { const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE); const struct peci_npcx_config *const config = dev->config; struct peci_npcx_data *const data = dev->data; int ret; if (!device_is_ready(clk_dev)) { LOG_ERR("%s device not ready", clk_dev->name); return -ENODEV; } ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clk_cfg); if (ret < 0) { LOG_ERR("Turn on PECI clock fail %d", ret); return ret; } ret = clock_control_get_rate(clk_dev, (clock_control_subsys_t)&config->clk_cfg, &data->peci_src_clk_freq); if (ret < 0) { LOG_ERR("Get PECI source clock rate error %d", ret); return ret; } ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { LOG_ERR("NPCX PECI pinctrl init failed (%d)", ret); return ret; } k_sem_init(&data->trans_sync_sem, 0, 1); k_sem_init(&data->lock, 1, 1); IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), peci_npcx_isr, DEVICE_DT_INST_GET(0), 0); return 0; } static struct peci_npcx_data peci_npcx_data0; PINCTRL_DT_INST_DEFINE(0); static const struct peci_npcx_config peci_npcx_config0 = { .base = (struct peci_reg *)DT_INST_REG_ADDR(0), .clk_cfg = NPCX_DT_CLK_CFG_ITEM(0), .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), }; DEVICE_DT_INST_DEFINE(0, &peci_npcx_init, NULL, &peci_npcx_data0, &peci_npcx_config0, POST_KERNEL, CONFIG_PECI_INIT_PRIORITY, &peci_npcx_driver_api); BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 1, "only one 'nuvoton_npcx_peci' compatible node can be supported"); ```
/content/code_sandbox/drivers/peci/peci_npcx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,383
```c /* * */ #define DT_DRV_COMPAT microchip_xec_peci #include <errno.h> #include <zephyr/device.h> #include <zephyr/kernel.h> #ifdef CONFIG_SOC_SERIES_MEC172X #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h> #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h> #endif #include <zephyr/drivers/peci.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <soc.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(peci_mchp_xec, CONFIG_PECI_LOG_LEVEL); /* Maximum PECI core clock is the main clock 48Mhz */ #define MAX_PECI_CORE_CLOCK 48000u /* 1 ms */ #define PECI_RESET_DELAY 1000u #define PECI_RESET_DELAY_MS 1u /* 100 us */ #define PECI_IDLE_DELAY 100u /* 5 ms */ #define PECI_IDLE_TIMEOUT 50u /* Maximum retries */ #define PECI_TIMEOUT_RETRIES 3u /* Maximum read buffer fill wait retries */ #define PECI_RX_BUF_FILL_WAIT_RETRY 100u /* 10 us */ #define PECI_IO_DELAY 10 #define OPT_BIT_TIME_MSB_OFS 8u #define PECI_FCS_LEN 2 struct peci_xec_config { struct peci_regs * const regs; uint8_t irq_num; uint8_t girq; uint8_t girq_pos; uint8_t pcr_idx; uint8_t pcr_pos; const struct pinctrl_dev_config *pcfg; }; enum peci_pm_policy_state_flag { PECI_PM_POLICY_FLAG, PECI_PM_POLICY_FLAG_COUNT, }; struct peci_xec_data { struct k_sem tx_lock; uint32_t bitrate; int timeout_retries; #ifdef CONFIG_PM_DEVICE ATOMIC_DEFINE(pm_policy_state_flag, PECI_PM_POLICY_FLAG_COUNT); #endif }; #ifdef CONFIG_PM_DEVICE static void peci_xec_pm_policy_state_lock_get(struct peci_xec_data *data, enum peci_pm_policy_state_flag flag) { if (atomic_test_and_set_bit(data->pm_policy_state_flag, flag) == 0) { pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); } } static void peci_xec_pm_policy_state_lock_put(struct peci_xec_data *data, enum peci_pm_policy_state_flag flag) { if (atomic_test_and_clear_bit(data->pm_policy_state_flag, flag) == 1) { pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); } } #endif #ifdef CONFIG_SOC_SERIES_MEC172X static inline void peci_girq_enable(const struct device *dev) { const struct peci_xec_config * const cfg = dev->config; mchp_xec_ecia_girq_src_en(cfg->girq, cfg->girq_pos); } static inline void peci_girq_status_clear(const struct device *dev) { const struct peci_xec_config * const cfg = dev->config; mchp_soc_ecia_girq_src_clr(cfg->girq, cfg->girq_pos); } static inline void peci_clr_slp_en(const struct device *dev) { const struct peci_xec_config * const cfg = dev->config; z_mchp_xec_pcr_periph_sleep(cfg->pcr_idx, cfg->pcr_pos, 0); } #else static inline void peci_girq_enable(const struct device *dev) { const struct peci_xec_config * const cfg = dev->config; MCHP_GIRQ_ENSET(cfg->girq) = BIT(cfg->girq_pos); } static inline void peci_girq_status_clear(const struct device *dev) { const struct peci_xec_config * const cfg = dev->config; MCHP_GIRQ_SRC(cfg->girq) = BIT(cfg->girq_pos); } static inline void peci_clr_slp_en(const struct device *dev) { ARG_UNUSED(dev); mchp_pcr_periph_slp_ctrl(PCR_PECI, 0); } #endif static int check_bus_idle(struct peci_regs * const regs) { uint8_t delay_cnt = PECI_IDLE_TIMEOUT; /* Wait until PECI bus becomes idle. * Note that when IDLE bit in the status register changes, HW do not * generate an interrupt, so need to poll. */ while (!(regs->STATUS2 & MCHP_PECI_STS2_IDLE)) { k_busy_wait(PECI_IDLE_DELAY); delay_cnt--; if (!delay_cnt) { LOG_WRN("Bus is busy"); return -EBUSY; } } return 0; } static int peci_xec_configure(const struct device *dev, uint32_t bitrate) { const struct peci_xec_config * const cfg = dev->config; struct peci_xec_data * const data = dev->data; struct peci_regs * const regs = cfg->regs; uint16_t value; data->bitrate = bitrate; /* Power down PECI interface */ regs->CONTROL = MCHP_PECI_CTRL_PD; /* Adjust bitrate */ value = MAX_PECI_CORE_CLOCK / bitrate; regs->OPT_BIT_TIME_LSB = value & MCHP_PECI_OPT_BT_LSB_MASK; regs->OPT_BIT_TIME_MSB = ((value >> OPT_BIT_TIME_MSB_OFS) & MCHP_PECI_OPT_BT_MSB_MASK); /* Power up PECI interface */ regs->CONTROL &= ~MCHP_PECI_CTRL_PD; return 0; } static int peci_xec_disable(const struct device *dev) { const struct peci_xec_config * const cfg = dev->config; struct peci_regs * const regs = cfg->regs; int ret; /* Make sure no transaction is interrupted before disabling the HW */ ret = check_bus_idle(regs); if (ret) { return ret; } #ifdef CONFIG_PECI_INTERRUPT_DRIVEN peci_girq_status_clear(dev); NVIC_ClearPendingIRQ(cfg->irq_num); irq_disable(cfg->irq_num); #endif regs->CONTROL |= MCHP_PECI_CTRL_PD; return 0; } static int peci_xec_enable(const struct device *dev) { const struct peci_xec_config * const cfg = dev->config; struct peci_regs * const regs = cfg->regs; regs->CONTROL &= ~MCHP_PECI_CTRL_PD; #ifdef CONFIG_PECI_INTERRUPT_DRIVEN peci_girq_status_clear(dev); peci_girq_enable(dev); irq_enable(cfg->irq_num); #endif return 0; } static void peci_xec_bus_recovery(const struct device *dev, bool full_reset) { const struct peci_xec_config * const cfg = dev->config; struct peci_xec_data * const data = dev->data; struct peci_regs * const regs = cfg->regs; LOG_WRN("%s full_reset:%d", __func__, full_reset); if (full_reset) { regs->CONTROL = MCHP_PECI_CTRL_PD | MCHP_PECI_CTRL_RST; if (k_is_in_isr()) { k_busy_wait(PECI_RESET_DELAY_MS); } else { k_msleep(PECI_RESET_DELAY); } regs->CONTROL &= ~MCHP_PECI_CTRL_RST; peci_xec_configure(dev, data->bitrate); } else { /* Only reset internal FIFOs */ regs->CONTROL |= MCHP_PECI_CTRL_FRST; } } static int peci_xec_write(const struct device *dev, struct peci_msg *msg) { const struct peci_xec_config * const cfg = dev->config; struct peci_xec_data * const data = dev->data; struct peci_regs * const regs = cfg->regs; int i; int ret; struct peci_buf *tx_buf = &msg->tx_buffer; struct peci_buf *rx_buf = &msg->rx_buffer; /* Check if FIFO is full */ if (regs->STATUS2 & MCHP_PECI_STS2_WFF) { LOG_WRN("%s FIFO is full", __func__); return -EIO; } regs->CONTROL &= ~MCHP_PECI_CTRL_FRST; /* Add PECI transaction header to TX FIFO */ regs->WR_DATA = msg->addr; regs->WR_DATA = tx_buf->len; regs->WR_DATA = rx_buf->len; /* Add PECI payload to Tx FIFO only if write length is valid */ if (tx_buf->len) { regs->WR_DATA = msg->cmd_code; for (i = 0; i < tx_buf->len - 1; i++) { if (!(regs->STATUS2 & MCHP_PECI_STS2_WFF)) { regs->WR_DATA = tx_buf->buf[i]; } } } /* Check bus is idle before starting a new transfer */ ret = check_bus_idle(regs); if (ret) { return ret; } regs->CONTROL |= MCHP_PECI_CTRL_TXEN; k_busy_wait(PECI_IO_DELAY); /* Wait for transmission to complete */ #ifdef CONFIG_PECI_INTERRUPT_DRIVEN if (k_sem_take(&data->tx_lock, PECI_IO_DELAY * tx_buf->len)) { return -ETIMEDOUT; } #else /* In worst case, overall timeout will be 1msec (100 * 10usec) */ uint8_t wait_timeout_cnt = 100; while (!(regs->STATUS1 & MCHP_PECI_STS1_EOF)) { k_busy_wait(PECI_IO_DELAY); wait_timeout_cnt--; if (!wait_timeout_cnt) { LOG_WRN("Tx timeout"); data->timeout_retries++; /* Full reset only if multiple consecutive failures */ if (data->timeout_retries > PECI_TIMEOUT_RETRIES) { peci_xec_bus_recovery(dev, true); } else { peci_xec_bus_recovery(dev, false); } return -ETIMEDOUT; } } #endif data->timeout_retries = 0; return 0; } static int peci_xec_read(const struct device *dev, struct peci_msg *msg) { const struct peci_xec_config * const cfg = dev->config; struct peci_regs * const regs = cfg->regs; int i; int ret; uint8_t tx_fcs; uint8_t bytes_rcvd; uint8_t wait_timeout_cnt; struct peci_buf *rx_buf = &msg->rx_buffer; /* Attempt to read data from RX FIFO */ bytes_rcvd = 0; for (i = 0; i < (rx_buf->len + PECI_FCS_LEN); i++) { /* Worst case timeout will be 1msec (100 * 10usec) */ wait_timeout_cnt = PECI_RX_BUF_FILL_WAIT_RETRY; /* Wait for read buffer to fill up */ while (regs->STATUS2 & MCHP_PECI_STS2_RFE) { k_usleep(PECI_IO_DELAY); wait_timeout_cnt--; if (!wait_timeout_cnt) { LOG_WRN("Rx buffer empty"); return -ETIMEDOUT; } } if (i == 0) { /* Get write block FCS just for debug */ tx_fcs = regs->RD_DATA; LOG_DBG("TX FCS %x", tx_fcs); /* If a Ping is done, write Tx fcs to rx buffer*/ if (msg->cmd_code == PECI_CMD_PING) { rx_buf->buf[0] = tx_fcs; break; } } else if (i == (rx_buf->len + 1)) { /* Get read block FCS, but don't count it */ rx_buf->buf[i-1] = regs->RD_DATA; } else { /* Get response */ rx_buf->buf[i-1] = regs->RD_DATA; bytes_rcvd++; } } /* Check if transaction is as expected */ if (rx_buf->len != bytes_rcvd) { LOG_INF("Incomplete %x vs %x", bytes_rcvd, rx_buf->len); } /* Once write-read transaction is complete, ensure bus is idle * before resetting the internal FIFOs */ ret = check_bus_idle(regs); if (ret) { return ret; } return 0; } static int peci_xec_transfer(const struct device *dev, struct peci_msg *msg) { const struct peci_xec_config * const cfg = dev->config; struct peci_regs * const regs = cfg->regs; int ret = 0; uint8_t err_val = 0; #ifdef CONFIG_PM_DEVICE struct peci_xec_data *data = dev->data; peci_xec_pm_policy_state_lock_get(data, PECI_PM_POLICY_FLAG); #endif do { ret = peci_xec_write(dev, msg); if (ret) { break; } /* If a PECI transmission is successful, it may or not involve * a read operation, check if transaction expects a response * Also perform a read when PECI cmd is Ping to get Write FCS */ if (msg->rx_buffer.len || (msg->cmd_code == PECI_CMD_PING)) { ret = peci_xec_read(dev, msg); if (ret) { break; } } /* Cleanup */ if (regs->STATUS1 & MCHP_PECI_STS1_EOF) { regs->STATUS1 |= MCHP_PECI_STS1_EOF; } /* Check for error conditions and perform bus recovery if necessary */ err_val = regs->ERROR; if (err_val) { if (err_val & MCHP_PECI_ERR_RDOV) { LOG_ERR("Read buffer is not empty"); } if (err_val & MCHP_PECI_ERR_WRUN) { LOG_ERR("Write buffer is not empty"); } if (err_val & MCHP_PECI_ERR_BERR) { LOG_ERR("PECI bus error"); } LOG_DBG("PECI err %x", err_val); LOG_DBG("PECI sts1 %x", regs->STATUS1); LOG_DBG("PECI sts2 %x", regs->STATUS2); /* ERROR is a clear-on-write register, need to clear errors * occurring at the end of a transaction. A temp variable is * used to overcome complaints by the static code analyzer */ regs->ERROR = err_val; peci_xec_bus_recovery(dev, false); ret = -EIO; break; } } while (0); #ifdef CONFIG_PM_DEVICE peci_xec_pm_policy_state_lock_put(data, PECI_PM_POLICY_FLAG); #endif return ret; } #ifdef CONFIG_PM_DEVICE static int peci_xec_pm_action(const struct device *dev, enum pm_device_action action) { const struct peci_xec_config *const devcfg = dev->config; struct peci_regs * const regs = devcfg->regs; struct ecs_regs * const ecs_regs = (struct ecs_regs *)(DT_REG_ADDR(DT_NODELABEL(ecs))); int ret; switch (action) { case PM_DEVICE_ACTION_RESUME: ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_DEFAULT); /* VREF_VTT function is enabled*/ ecs_regs->PECI_DIS = 0x00u; /* Power up PECI interface */ regs->CONTROL &= ~MCHP_PECI_CTRL_PD; break; case PM_DEVICE_ACTION_SUSPEND: regs->CONTROL |= MCHP_PECI_CTRL_PD; /* This bit reduces leakage current through the CPU voltage reference * pin if PECI is not used. VREF_VTT function is disabled. */ ecs_regs->PECI_DIS = 0x01u; /* If application does not want to turn off PECI pins it will * not define pinctrl-1 for this node. */ ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_SLEEP); if (ret == -ENOENT) { /* pinctrl-1 does not exist. */ ret = 0; } break; default: ret = -ENOTSUP; } return ret; } #endif /* CONFIG_PM_DEVICE */ #ifdef CONFIG_PECI_INTERRUPT_DRIVEN static void peci_xec_isr(const void *arg) { const struct device *dev = arg; struct peci_xec_config * const cfg = dev->config; struct peci_xec_data * const data = dev->data; struct peci_regs * const regs = cfg->regs; uint8_t peci_error = regs->ERROR; uint8_t peci_status2 = regs->STATUS2; peci_girq_status_clear(dev); if (peci_error) { regs->ERROR = peci_error; } if (peci_status2 & MCHP_PECI_STS2_WFE) { LOG_WRN("TX FIFO empty ST2:%x", peci_status2); k_sem_give(&data->tx_lock); } if (peci_status2 & MCHP_PECI_STS2_RFE) { LOG_WRN("RX FIFO full ST2:%x", peci_status2); } } #endif static const struct peci_driver_api peci_xec_driver_api = { .config = peci_xec_configure, .enable = peci_xec_enable, .disable = peci_xec_disable, .transfer = peci_xec_transfer, }; static int peci_xec_init(const struct device *dev) { const struct peci_xec_config * const cfg = dev->config; struct peci_regs * const regs = cfg->regs; struct ecs_regs * const ecs_regs = (struct ecs_regs *)(DT_REG_ADDR(DT_NODELABEL(ecs))); int ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { LOG_ERR("XEC PECI pinctrl init failed (%d)", ret); return ret; } #ifdef CONFIG_PECI_INTERRUPT_DRIVEN k_sem_init(&data->tx_lock, 0, 1); #endif peci_clr_slp_en(dev); ecs_regs->PECI_DIS = 0x00u; /* Reset PECI interface */ regs->CONTROL |= MCHP_PECI_CTRL_RST; k_msleep(PECI_RESET_DELAY_MS); regs->CONTROL &= ~MCHP_PECI_CTRL_RST; #ifdef CONFIG_PECI_INTERRUPT_DRIVEN /* Enable interrupt for errors */ regs->INT_EN1 = (MCHP_PECI_IEN1_EREN | MCHP_PECI_IEN1_EIEN); /* Enable interrupt for Tx FIFO is empty */ regs->INT_EN2 |= MCHP_PECI_IEN2_ENWFE; /* Enable interrupt for Rx FIFO is full */ regs->INT_EN2 |= MCHP_PECI_IEN2_ENRFF; regs->CONTROL |= MCHP_PECI_CTRL_MIEN; /* Direct NVIC */ IRQ_CONNECT(cfg->irq_num, DT_INST_IRQ(0, priority), peci_xec_isr, NULL, 0); #endif return 0; } static struct peci_xec_data peci_data; PINCTRL_DT_INST_DEFINE(0); static const struct peci_xec_config peci_xec_config = { .regs = (struct peci_regs * const)(DT_INST_REG_ADDR(0)), .irq_num = DT_INST_IRQN(0), .girq = DT_INST_PROP_BY_IDX(0, girqs, 0), .girq_pos = DT_INST_PROP_BY_IDX(0, girqs, 1), .pcr_idx = DT_INST_PROP_BY_IDX(0, pcrs, 0), .pcr_pos = DT_INST_PROP_BY_IDX(0, pcrs, 1), .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), }; PM_DEVICE_DT_INST_DEFINE(0, peci_xec_pm_action); DEVICE_DT_INST_DEFINE(0, &peci_xec_init, PM_DEVICE_DT_INST_GET(0), &peci_data, &peci_xec_config, POST_KERNEL, CONFIG_PECI_INIT_PRIORITY, &peci_xec_driver_api); ```
/content/code_sandbox/drivers/peci/peci_mchp_xec.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,583
```c /* * */ #include <zephyr/drivers/peci.h> #include <zephyr/internal/syscall_handler.h> static inline int z_vrfy_peci_config(const struct device *dev, uint32_t bitrate) { K_OOPS(K_SYSCALL_DRIVER_PECI(dev, config)); return z_impl_peci_config(dev, bitrate); } #include <zephyr/syscalls/peci_config_mrsh.c> static inline int z_vrfy_peci_enable(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_PECI(dev, enable)); return z_impl_peci_enable(dev); } #include <zephyr/syscalls/peci_enable_mrsh.c> static inline int z_vrfy_peci_disable(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_PECI(dev, disable)); return z_impl_peci_disable(dev); } #include <zephyr/syscalls/peci_disable_mrsh.c> static inline int z_vrfy_peci_transfer(const struct device *dev, struct peci_msg *msg) { struct peci_msg msg_copy; K_OOPS(K_SYSCALL_DRIVER_PECI(dev, transfer)); K_OOPS(k_usermode_from_copy(&msg_copy, msg, sizeof(*msg))); return z_impl_peci_transfer(dev, &msg_copy); } #include <zephyr/syscalls/peci_transfer_mrsh.c> ```
/content/code_sandbox/drivers/peci/peci_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
299
```unknown # ITE IT8XXX2 PECI configuration options config PECI_ITE_IT8XXX2 bool "ITE IT8XXX2 PECI driver" default y depends on DT_HAS_ITE_IT8XXX2_PECI_ENABLED select PECI_INTERRUPT_DRIVEN help Enable the ITE IT8XXX2 PECI IO driver. ```
/content/code_sandbox/drivers/peci/Kconfig.it8xxx2
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
79
```unknown # Microchip XEC PECI configuration options config PECI_XEC bool "XEC Microchip PECI driver" default y depends on DT_HAS_MICROCHIP_XEC_PECI_ENABLED select PINCTRL help Enable the Microchip XEC PECI IO driver. ```
/content/code_sandbox/drivers/peci/Kconfig.xec
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
67
```unknown # NPCX PECI driver configuration options config PECI_NPCX bool "NPCX PECI driver" default y depends on DT_HAS_NUVOTON_NPCX_PECI_ENABLED select PECI_INTERRUPT_DRIVEN help Enable the NPCX PECI IO driver. ```
/content/code_sandbox/drivers/peci/Kconfig.npcx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
67
```c /* * */ #define DT_DRV_COMPAT ite_it8xxx2_peci #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/peci.h> #include <zephyr/kernel.h> #include <errno.h> #include <zephyr/device.h> #include <zephyr/drivers/peci.h> #include <soc.h> #include <soc_dt.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(peci_ite_it8xxx2, CONFIG_PECI_LOG_LEVEL); BUILD_ASSERT(IS_ENABLED(CONFIG_PECI_INTERRUPT_DRIVEN), "Please enable the option CONFIG_PECI_INTERRUPT_DRIVEN"); /* * This driver is single-instance. If the devicetree contains multiple * instances, this will fail and the driver needs to be revisited. */ BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) <= 1, "Unsupported PECI Instance"); /* The following constants describes the bitrate of it8xxx2 PECI, * for the frequency are 2000KHz, 1000KHz, and 1600KHz. (Unit: KHz) */ #define PECI_IT8XXX2_BITRATE_2MHZ 2000 #define PECI_IT8XXX2_BITRATE_1MHZ 1000 #define PECI_IT8XXX2_BITRATE_1P6MHZ 1600 /* The following masks are designed for the PECI bitrate settings, * for the bits[7:3] are not related to this features. */ #define PECI_IT8XXX2_BITRATE_BITS_MASK 0x07 #define PECI_IT8XXX2_BITRATE_2MHZ_BITS 0x00 #define PECI_IT8XXX2_BITRATE_1MHZ_BITS 0x01 #define PECI_IT8XXX2_BITRATE_1P6MHZ_BITS 0x04 /* The Transaction Timeout */ #define PECI_TIMEOUT_MS 30 /* PECI interface 0 */ #define PECI0 0 /* HOSTAR (F02C00h) */ #define HOBY BIT(0) #define FINISH BIT(1) #define RD_FCS_ERR BIT(2) #define WR_FCS_ERR BIT(3) #define EXTERR BIT(5) #define BUS_ER BIT(6) #define TEMPERR BIT(7) #define HOSTAR_RST_ANYBIT \ (TEMPERR|BUS_ER|EXTERR|WR_FCS_ERR|RD_FCS_ERR|FINISH) /* HOCTLR (F02C01h) */ #define START BIT(0) #define AWFCS_EN BIT(1) #define CONTROL BIT(2) #define PECIHEN BIT(3) #define FCSERR_ABT BIT(4) #define FIFOCLR BIT(5) /* * TODO: The Voltage Configuration * Related DTSi and registers settings should be fulfilled * in the future. */ /* PADCTLR (F02C0Eh) */ #define PECI_DVIE 0x04 enum peci_vtts { HOVTTS0P85V = 0x00, HOVTTS0P90V = 0x01, HOVTTS0P95V = 0x02, HOVTTS1P00V = 0x03, HOVTTS1P05V = 0x08, HOVTTS1P10V = 0x09, HOVTTS1P15V = 0x0A, HOVTTS1P20V = 0x0B, HOVTTS1P25V = 0x10, }; struct peci_it8xxx2_config { uintptr_t base_addr; uint8_t irq_no; const struct pinctrl_dev_config *pcfg; }; struct peci_it8xxx2_data { struct peci_msg *msgs; struct k_sem device_sync_sem; uint32_t bitrate; }; PINCTRL_DT_INST_DEFINE(0); static const struct peci_it8xxx2_config peci_it8xxx2_config0 = { .base_addr = DT_INST_REG_ADDR(0), .irq_no = DT_INST_IRQN(0), .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), }; static struct peci_it8xxx2_data peci_it8xxx2_data0; /* ITE IT8XXX2 PECI Functions */ static void peci_it8xxx2_init_vtts(struct peci_it8xxx2_regs *reg_base, enum peci_vtts vol_opt) { reg_base->PADCTLR = (reg_base->PADCTLR & PECI_DVIE) | vol_opt; } static void peci_it8xxx2_rst_status(struct peci_it8xxx2_regs *reg_base) { reg_base->HOSTAR = HOSTAR_RST_ANYBIT; } static int peci_it8xxx2_check_host_busy(struct peci_it8xxx2_regs *reg_base) { return (reg_base->HOSTAR & HOBY) ? (-EBUSY) : 0; } static int peci_it8xxx2_check_host_finish(const struct device *dev) { struct peci_it8xxx2_data *data = dev->data; const struct peci_it8xxx2_config *config = dev->config; struct peci_it8xxx2_regs *const peci_regs = (struct peci_it8xxx2_regs *)config->base_addr; int ret = k_sem_take(&data->device_sync_sem, K_MSEC(PECI_TIMEOUT_MS)); if (ret == -EAGAIN) { LOG_ERR("%s: Timeout", __func__); return -ETIMEDOUT; } if (peci_regs->HOSTAR != FINISH) { LOG_ERR("[PECI] Error: HOSTAR=0x%02X\r\n", peci_regs->HOSTAR); return -EIO; } return 0; } static int peci_it8xxx2_configure(const struct device *dev, uint32_t bitrate) { struct peci_it8xxx2_data *data = dev->data; const struct peci_it8xxx2_config *config = dev->config; struct peci_it8xxx2_regs *const peci_regs = (struct peci_it8xxx2_regs *)config->base_addr; uint8_t hoctl2r_to_write; data->bitrate = bitrate; hoctl2r_to_write = (peci_regs->HOCTL2R) & (~(PECI_IT8XXX2_BITRATE_BITS_MASK)); switch (bitrate) { case PECI_IT8XXX2_BITRATE_2MHZ: break; case PECI_IT8XXX2_BITRATE_1MHZ: hoctl2r_to_write |= PECI_IT8XXX2_BITRATE_1MHZ_BITS; break; case PECI_IT8XXX2_BITRATE_1P6MHZ: hoctl2r_to_write |= PECI_IT8XXX2_BITRATE_1P6MHZ_BITS; break; default: LOG_ERR("[PECI] Error: Specified Bitrate Not Supported\r\n"); hoctl2r_to_write |= PECI_IT8XXX2_BITRATE_1MHZ_BITS; data->bitrate = PECI_IT8XXX2_BITRATE_1MHZ; peci_regs->HOCTL2R = hoctl2r_to_write; return -ENOTSUP; } peci_regs->HOCTL2R = hoctl2r_to_write; return 0; } static int peci_it8xxx2_enable(const struct device *dev) { const struct peci_it8xxx2_config *config = dev->config; struct peci_it8xxx2_regs *const peci_regs = (struct peci_it8xxx2_regs *)config->base_addr; peci_regs->HOCTLR |= (FIFOCLR|FCSERR_ABT|PECIHEN|CONTROL); return 0; } static int peci_it8xxx2_disable(const struct device *dev) { const struct peci_it8xxx2_config *config = dev->config; struct peci_it8xxx2_regs *const peci_regs = (struct peci_it8xxx2_regs *)config->base_addr; peci_regs->HOCTLR &= ~(PECIHEN); return 0; } static void peci_it8xxx2_rst_module(const struct device *dev) { const struct peci_it8xxx2_config *config = dev->config; struct peci_it8xxx2_regs *const peci_regs = (struct peci_it8xxx2_regs *)config->base_addr; struct gctrl_it8xxx2_regs *const gctrl_regs = GCTRL_IT8XXX2_REGS_BASE; LOG_ERR("[PECI] Module Reset for Status Error.\r\n"); /* Reset IT8XXX2 PECI Module Thoroughly */ gctrl_regs->GCTRL_RSTC4 |= IT8XXX2_GCTRL_RPECI; /* * Due to the fact that we've checked if the peci_enable() * called before calling the peci_transfer(), so the peci * were definitely enabled before the error occurred. * Here is the recovery mechanism for recovering the PECI * bus when the errors occur. */ peci_regs->PADCTLR |= PECI_DVIE; peci_it8xxx2_init_vtts(peci_regs, HOVTTS0P95V); peci_it8xxx2_configure(dev, PECI_IT8XXX2_BITRATE_1MHZ); peci_it8xxx2_enable(dev); LOG_ERR("[PECI] Reinitialization Finished.\r\n"); } static int peci_it8xxx2_transfer(const struct device *dev, struct peci_msg *msg) { const struct peci_it8xxx2_config *config = dev->config; struct peci_it8xxx2_regs *const peci_regs = (struct peci_it8xxx2_regs *)config->base_addr; struct peci_buf *peci_rx_buf = &msg->rx_buffer; struct peci_buf *peci_tx_buf = &msg->tx_buffer; int cnt, ret_code; ret_code = 0; if (!(peci_regs->HOCTLR & PECIHEN)) { LOG_ERR("[PECI] Please call the peci_enable() first.\r\n"); return -ECONNREFUSED; } if (peci_it8xxx2_check_host_busy(peci_regs) != 0) { return -EBUSY; } peci_regs->HOTRADDR = msg->addr; peci_regs->HOWRLR = peci_tx_buf->len; peci_regs->HORDLR = peci_rx_buf->len; peci_regs->HOCMDR = msg->cmd_code; if (msg->cmd_code != PECI_CMD_PING) { for (cnt = 0; cnt < (peci_tx_buf->len - 1); cnt++) { peci_regs->HOWRDR = peci_tx_buf->buf[cnt]; } } /* Host Available */ irq_enable(config->irq_no); peci_regs->HOCTLR |= START; ret_code = peci_it8xxx2_check_host_finish(dev); if (!ret_code) { /* Host Transactions Finished, Fetch Data from the regs */ if (peci_rx_buf->len) { for (cnt = 0; cnt < (peci_rx_buf->len); cnt++) { peci_rx_buf->buf[cnt] = peci_regs->HORDDR; } } peci_it8xxx2_rst_status(peci_regs); } else { /* Host Transactions Failure */ peci_it8xxx2_rst_module(dev); } return (ret_code); } static void peci_it8xxx2_isr(const struct device *dev) { struct peci_it8xxx2_data *data = dev->data; const struct peci_it8xxx2_config *config = dev->config; irq_disable(config->irq_no); k_sem_give(&data->device_sync_sem); } static const struct peci_driver_api peci_it8xxx2_driver_api = { .config = peci_it8xxx2_configure, .enable = peci_it8xxx2_enable, .disable = peci_it8xxx2_disable, .transfer = peci_it8xxx2_transfer, }; static int peci_it8xxx2_init(const struct device *dev) { struct peci_it8xxx2_data *data = dev->data; const struct peci_it8xxx2_config *config = dev->config; struct peci_it8xxx2_regs *const peci_regs = (struct peci_it8xxx2_regs *)config->base_addr; int status; /* Initialize Semaphore */ k_sem_init(&data->device_sync_sem, 0, 1); /* Configure the GPF6 to Alternative Function 3: PECI */ status = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (status < 0) { LOG_ERR("Failed to configure PECI pins"); return status; } peci_regs->PADCTLR |= PECI_DVIE; peci_it8xxx2_init_vtts(peci_regs, HOVTTS0P95V); peci_it8xxx2_configure(dev, PECI_IT8XXX2_BITRATE_1MHZ); /* Interrupt Assignment */ IRQ_CONNECT(DT_INST_IRQN(0), 0, peci_it8xxx2_isr, DEVICE_DT_INST_GET(0), 0); return 0; } DEVICE_DT_INST_DEFINE(0, &peci_it8xxx2_init, NULL, &peci_it8xxx2_data0, &peci_it8xxx2_config0, POST_KERNEL, CONFIG_PECI_INIT_PRIORITY, &peci_it8xxx2_driver_api); ```
/content/code_sandbox/drivers/peci/peci_ite_it8xxx2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,079
```unknown # Hidden option to enable the vnd,serial dummy driver used in testing. config SERIAL_TEST def_bool DT_HAS_VND_SERIAL_ENABLED depends on DT_HAS_VND_SERIAL_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select SERIAL_SUPPORT_ASYNC select RING_BUFFER if (UART_INTERRUPT_DRIVEN || UART_ASYNC_API) ```
/content/code_sandbox/drivers/serial/Kconfig.test
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
71
```unknown # PECI configuration options menuconfig PECI bool "Platform Environment Control Interface (PECI) drivers" help Include PECI drivers in system config. if PECI source "drivers/peci/Kconfig.xec" source "drivers/peci/Kconfig.it8xxx2" source "drivers/peci/Kconfig.npcx" module = PECI module-str = peci source "subsys/logging/Kconfig.template.log_config" config PECI_INIT_PRIORITY int "PECI driver init priority" default 40 help PECI device driver initialization priority. There isn't any critical component relying on this priority at the moment. config PECI_INTERRUPT_DRIVEN bool "PECI driver interrupt support" help This is an option to be enabled by individual peci driver to indicate that the driver and hardware supports interrupts. endif # PECI ```
/content/code_sandbox/drivers/peci/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
198
```c /* * */ #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/uart.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/drivers/clock_control/adi_max32_clock_control.h> #include <wrap_max32_uart.h> #define DT_DRV_COMPAT adi_max32_uart LOG_MODULE_REGISTER(uart_max32, CONFIG_UART_LOG_LEVEL); struct max32_uart_config { mxc_uart_regs_t *regs; const struct pinctrl_dev_config *pctrl; const struct device *clock; struct max32_perclk perclk; struct uart_config uart_conf; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_config_func_t irq_config_func; #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; struct max32_uart_data { #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t cb; /* Interrupt callback */ void *cb_data; /* Interrupt callback arg */ uint32_t flags; /* Cached interrupt flags */ uint32_t status; /* Cached status flags */ #endif struct uart_config conf; /* baudrate, stopbits, ... */ }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void uart_max32_isr(const struct device *dev); #endif static void api_poll_out(const struct device *dev, unsigned char c) { const struct max32_uart_config *cfg = dev->config; MXC_UART_WriteCharacter(cfg->regs, c); } static int api_poll_in(const struct device *dev, unsigned char *c) { int val; const struct max32_uart_config *cfg = dev->config; val = MXC_UART_ReadCharacterRaw(cfg->regs); if (val >= 0) { *c = (unsigned char)val; } else { return -1; } return 0; } static int api_err_check(const struct device *dev) { int err = 0; uint32_t flags; const struct max32_uart_config *cfg = dev->config; flags = MXC_UART_GetFlags(cfg->regs); if (flags & ADI_MAX32_UART_ERROR_FRAMING) { err |= UART_ERROR_FRAMING; } if (flags & ADI_MAX32_UART_ERROR_PARITY) { err |= UART_ERROR_PARITY; } if (flags & ADI_MAX32_UART_ERROR_OVERRUN) { err |= UART_ERROR_OVERRUN; } return err; } static int api_configure(const struct device *dev, const struct uart_config *uart_cfg) { int err; const struct max32_uart_config *const cfg = dev->config; mxc_uart_regs_t *regs = cfg->regs; struct max32_uart_data *data = dev->data; /* * Set parity */ if (data->conf.parity != uart_cfg->parity) { mxc_uart_parity_t mxc_parity; switch (uart_cfg->parity) { case UART_CFG_PARITY_NONE: mxc_parity = ADI_MAX32_UART_CFG_PARITY_NONE; break; case UART_CFG_PARITY_ODD: mxc_parity = ADI_MAX32_UART_CFG_PARITY_ODD; break; case UART_CFG_PARITY_EVEN: mxc_parity = ADI_MAX32_UART_CFG_PARITY_EVEN; break; case UART_CFG_PARITY_MARK: #if defined(ADI_MAX32_UART_CFG_PARITY_MARK) mxc_parity = ADI_MAX32_UART_CFG_PARITY_MARK; break; #else return -ENOTSUP; #endif case UART_CFG_PARITY_SPACE: #if defined(ADI_MAX32_UART_CFG_PARITY_SPACE) mxc_parity = ADI_MAX32_UART_CFG_PARITY_SPACE; break; #else return -ENOTSUP; #endif default: return -EINVAL; } err = MXC_UART_SetParity(regs, mxc_parity); if (err < 0) { return -ENOTSUP; } /* incase of success keep configuration */ data->conf.parity = uart_cfg->parity; } /* * Set stop bit */ if (data->conf.stop_bits != uart_cfg->stop_bits) { if (uart_cfg->stop_bits == UART_CFG_STOP_BITS_1) { err = MXC_UART_SetStopBits(regs, MXC_UART_STOP_1); } else if (uart_cfg->stop_bits == UART_CFG_STOP_BITS_2) { err = MXC_UART_SetStopBits(regs, MXC_UART_STOP_2); } else { return -ENOTSUP; } if (err < 0) { return -ENOTSUP; } /* incase of success keep configuration */ data->conf.stop_bits = uart_cfg->stop_bits; } /* * Set data bit * Valid data for MAX32 is 5-6-7-8 * Valid data for Zepyhr is 0-1-2-3 * Added +5 to index match. */ if (data->conf.data_bits != uart_cfg->data_bits) { err = MXC_UART_SetDataSize(regs, (5 + uart_cfg->data_bits)); if (err < 0) { return -ENOTSUP; } /* incase of success keep configuration */ data->conf.data_bits = uart_cfg->data_bits; } /* * Set flow control * Flow control not implemented yet so that only support no flow mode */ if (data->conf.flow_ctrl != uart_cfg->flow_ctrl) { if (uart_cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE) { return -ENOTSUP; } data->conf.flow_ctrl = uart_cfg->flow_ctrl; } /* * Set baudrate */ if (data->conf.baudrate != uart_cfg->baudrate) { err = Wrap_MXC_UART_SetFrequency(regs, uart_cfg->baudrate, cfg->perclk.clk_src); if (err < 0) { return -ENOTSUP; } /* In case of success keep configuration */ data->conf.baudrate = uart_cfg->baudrate; } return 0; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int api_config_get(const struct device *dev, struct uart_config *uart_cfg) { struct max32_uart_data *data = dev->data; /* copy configs from global setting */ *uart_cfg = data->conf; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ static int uart_max32_init(const struct device *dev) { int ret; const struct max32_uart_config *const cfg = dev->config; mxc_uart_regs_t *regs = cfg->regs; if (!device_is_ready(cfg->clock)) { LOG_ERR("Clock control device not ready"); return -ENODEV; } ret = MXC_UART_Shutdown(regs); if (ret) { return ret; } ret = clock_control_on(cfg->clock, (clock_control_subsys_t)&cfg->perclk); if (ret != 0) { LOG_ERR("Cannot enable UART clock"); return ret; } ret = pinctrl_apply_state(cfg->pctrl, PINCTRL_STATE_DEFAULT); if (ret) { return ret; } ret = api_configure(dev, &cfg->uart_conf); if (ret) { return ret; } ret = Wrap_MXC_UART_Init(regs); if (ret) { return ret; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN /* Clear any pending UART RX/TX interrupts */ MXC_UART_ClearFlags(regs, (ADI_MAX32_UART_INT_RX | ADI_MAX32_UART_INT_TX)); cfg->irq_config_func(dev); #endif return ret; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int api_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { unsigned int num_tx = 0; const struct max32_uart_config *cfg = dev->config; num_tx = MXC_UART_WriteTXFIFO(cfg->regs, (unsigned char *)tx_data, size); return (int)num_tx; } static int api_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { unsigned int num_rx = 0; const struct max32_uart_config *cfg = dev->config; num_rx = MXC_UART_ReadRXFIFO(cfg->regs, (unsigned char *)rx_data, size); if (num_rx == 0) { MXC_UART_ClearFlags(cfg->regs, ADI_MAX32_UART_INT_RX); } return num_rx; } static void api_irq_tx_enable(const struct device *dev) { const struct max32_uart_config *cfg = dev->config; unsigned int key; MXC_UART_EnableInt(cfg->regs, ADI_MAX32_UART_INT_TX | ADI_MAX32_UART_INT_TX_OEM); key = irq_lock(); uart_max32_isr(dev); irq_unlock(key); } static void api_irq_tx_disable(const struct device *dev) { const struct max32_uart_config *cfg = dev->config; MXC_UART_DisableInt(cfg->regs, ADI_MAX32_UART_INT_TX | ADI_MAX32_UART_INT_TX_OEM); } static int api_irq_tx_ready(const struct device *dev) { struct max32_uart_data *const data = dev->data; const struct max32_uart_config *cfg = dev->config; uint32_t inten = Wrap_MXC_UART_GetRegINTEN(cfg->regs); return ((inten & (ADI_MAX32_UART_INT_TX | ADI_MAX32_UART_INT_TX_OEM)) && !(data->status & MXC_F_UART_STATUS_TX_FULL)); } static void api_irq_rx_enable(const struct device *dev) { const struct max32_uart_config *cfg = dev->config; MXC_UART_EnableInt(cfg->regs, ADI_MAX32_UART_INT_RX); } static void api_irq_rx_disable(const struct device *dev) { const struct max32_uart_config *cfg = dev->config; MXC_UART_DisableInt(cfg->regs, ADI_MAX32_UART_INT_RX); } static int api_irq_tx_complete(const struct device *dev) { const struct max32_uart_config *cfg = dev->config; if (MXC_UART_GetActive(cfg->regs) == E_BUSY) { return 0; } else { return 1; /* transmission completed */ } } static int api_irq_rx_ready(const struct device *dev) { struct max32_uart_data *const data = dev->data; const struct max32_uart_config *cfg = dev->config; uint32_t inten = Wrap_MXC_UART_GetRegINTEN(cfg->regs); return ((inten & ADI_MAX32_UART_INT_RX) && !(data->status & ADI_MAX32_UART_RX_EMPTY)); } static void api_irq_err_enable(const struct device *dev) { const struct max32_uart_config *cfg = dev->config; MXC_UART_EnableInt(cfg->regs, ADI_MAX32_UART_ERROR_INTERRUPTS); } static void api_irq_err_disable(const struct device *dev) { const struct max32_uart_config *cfg = dev->config; MXC_UART_DisableInt(cfg->regs, ADI_MAX32_UART_ERROR_INTERRUPTS); } static int api_irq_is_pending(const struct device *dev) { struct max32_uart_data *const data = dev->data; return (data->flags & (ADI_MAX32_UART_INT_RX | ADI_MAX32_UART_INT_TX)); } static int api_irq_update(const struct device *dev) { struct max32_uart_data *const data = dev->data; const struct max32_uart_config *const cfg = dev->config; data->flags = MXC_UART_GetFlags(cfg->regs); data->status = MXC_UART_GetStatus(cfg->regs); MXC_UART_ClearFlags(cfg->regs, data->flags); return 1; } static void api_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct max32_uart_data *const data = dev->data; data->cb = cb; data->cb_data = cb_data; } static void uart_max32_isr(const struct device *dev) { struct max32_uart_data *data = dev->data; if (data->cb) { data->cb(dev, data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_max32_driver_api = { .poll_in = api_poll_in, .poll_out = api_poll_out, .err_check = api_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = api_configure, .config_get = api_config_get, #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = api_fifo_fill, .fifo_read = api_fifo_read, .irq_tx_enable = api_irq_tx_enable, .irq_tx_disable = api_irq_tx_disable, .irq_tx_ready = api_irq_tx_ready, .irq_rx_enable = api_irq_rx_enable, .irq_rx_disable = api_irq_rx_disable, .irq_tx_complete = api_irq_tx_complete, .irq_rx_ready = api_irq_rx_ready, .irq_err_enable = api_irq_err_enable, .irq_err_disable = api_irq_err_disable, .irq_is_pending = api_irq_is_pending, .irq_update = api_irq_update, .irq_callback_set = api_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #define MAX32_UART_INIT(_num) \ PINCTRL_DT_INST_DEFINE(_num); \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, \ (static void uart_max32_irq_init_##_num(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(_num), DT_INST_IRQ(_num, priority), \ uart_max32_isr, DEVICE_DT_INST_GET(_num), 0); \ irq_enable(DT_INST_IRQN(_num)); \ })); \ static const struct max32_uart_config max32_uart_config_##_num = { \ .regs = (mxc_uart_regs_t *)DT_INST_REG_ADDR(_num), \ .pctrl = PINCTRL_DT_INST_DEV_CONFIG_GET(_num), \ .clock = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(_num)), \ .perclk.bus = DT_INST_CLOCKS_CELL(_num, offset), \ .perclk.bit = DT_INST_CLOCKS_CELL(_num, bit), \ .perclk.clk_src = \ DT_INST_PROP_OR(_num, clock_source, ADI_MAX32_PRPH_CLK_SRC_PCLK), \ .uart_conf.baudrate = DT_INST_PROP_OR(_num, current_speed, 115200), \ .uart_conf.parity = DT_INST_ENUM_IDX_OR(_num, parity, UART_CFG_PARITY_NONE), \ .uart_conf.data_bits = DT_INST_ENUM_IDX_OR(_num, data_bits, UART_CFG_DATA_BITS_8), \ .uart_conf.stop_bits = DT_INST_ENUM_IDX_OR(_num, stop_bits, UART_CFG_STOP_BITS_1), \ .uart_conf.flow_ctrl = \ DT_INST_PROP_OR(_num, hw_flow_control, UART_CFG_FLOW_CTRL_NONE), \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, \ (.irq_config_func = uart_max32_irq_init_##_num,))}; \ static struct max32_uart_data max32_uart_data##_num = { \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, (.cb = NULL,))}; \ DEVICE_DT_INST_DEFINE(_num, uart_max32_init, NULL, &max32_uart_data##_num, \ &max32_uart_config_##_num, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, (void *)&uart_max32_driver_api); DT_INST_FOREACH_STATUS_OKAY(MAX32_UART_INIT) ```
/content/code_sandbox/drivers/serial/uart_max32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,425
```c /* uart_xlnx_ps.c - Xilinx Zynq family serial driver */ /* * */ #define DT_DRV_COMPAT xlnx_xuartps /** * @brief Xilinx Zynq Family Serial Driver * * This is the driver for the Xilinx Zynq family cadence serial device. * * Before individual UART port can be used, uart_xlnx_ps_init() has to be * called to setup the port. * * - the following macro for the number of bytes between register addresses: * * UART_REG_ADDR_INTERVAL */ #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/types.h> #include <soc.h> #include <zephyr/init.h> #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/drivers/uart.h> #include <zephyr/sys/sys_io.h> #include <zephyr/irq.h> #ifdef CONFIG_PINCTRL #include <zephyr/drivers/pinctrl.h> #endif /* For all register offsets and bits / bit masks: * Comp. Xilinx Zynq-7000 Technical Reference Manual (ug585), chap. B.33 */ /* Register offsets within the UART device's register space */ #define XUARTPS_CR_OFFSET 0x0000U /**< Control Register [8:0] */ #define XUARTPS_MR_OFFSET 0x0004U /**< Mode Register [9:0] */ #define XUARTPS_IER_OFFSET 0x0008U /**< Interrupt Enable [12:0] */ #define XUARTPS_IDR_OFFSET 0x000CU /**< Interrupt Disable [12:0] */ #define XUARTPS_IMR_OFFSET 0x0010U /**< Interrupt Mask [12:0] */ #define XUARTPS_ISR_OFFSET 0x0014U /**< Interrupt Status [12:0]*/ #define XUARTPS_BAUDGEN_OFFSET 0x0018U /**< Baud Rate Generator [15:0] */ #define XUARTPS_RXTOUT_OFFSET 0x001CU /**< RX Timeout [7:0] */ #define XUARTPS_RXWM_OFFSET 0x0020U /**< RX FIFO Trigger Level [5:0] */ #define XUARTPS_MODEMCR_OFFSET 0x0024U /**< Modem Control [5:0] */ #define XUARTPS_MODEMSR_OFFSET 0x0028U /**< Modem Status [8:0] */ #define XUARTPS_SR_OFFSET 0x002CU /**< Channel Status [14:0] */ #define XUARTPS_FIFO_OFFSET 0x0030U /**< FIFO [7:0] */ #define XUARTPS_BAUDDIV_OFFSET 0x0034U /**< Baud Rate Divider [7:0] */ #define XUARTPS_FLOWDEL_OFFSET 0x0038U /**< Flow Delay [5:0] */ #define XUARTPS_TXWM_OFFSET 0x0044U /**< TX FIFO Trigger Level [5:0] */ #define XUARTPS_RXBS_OFFSET 0x0048U /**< RX FIFO Byte Status [11:0] */ /* Control Register Bits Definition */ #define XUARTPS_CR_STOPBRK 0x00000100U /**< Stop transmission of break */ #define XUARTPS_CR_STARTBRK 0x00000080U /**< Set break */ #define XUARTPS_CR_TORST 0x00000040U /**< RX timeout counter restart */ #define XUARTPS_CR_TX_DIS 0x00000020U /**< TX disabled. */ #define XUARTPS_CR_TX_EN 0x00000010U /**< TX enabled */ #define XUARTPS_CR_RX_DIS 0x00000008U /**< RX disabled. */ #define XUARTPS_CR_RX_EN 0x00000004U /**< RX enabled */ #define XUARTPS_CR_EN_DIS_MASK 0x0000003CU /**< Enable/disable Mask */ #define XUARTPS_CR_TXRST 0x00000002U /**< TX logic reset */ #define XUARTPS_CR_RXRST 0x00000001U /**< RX logic reset */ /* Mode Register Bits Definition */ #define XUARTPS_MR_CCLK 0x00000400U /**< Input clock select */ #define XUARTPS_MR_CHMODE_R_LOOP 0x00000300U /**< Remote loopback mode */ #define XUARTPS_MR_CHMODE_L_LOOP 0x00000200U /**< Local loopback mode */ #define XUARTPS_MR_CHMODE_ECHO 0x00000100U /**< Auto echo mode */ #define XUARTPS_MR_CHMODE_NORM 0x00000000U /**< Normal mode */ #define XUARTPS_MR_CHMODE_SHIFT 8U /**< Mode shift */ #define XUARTPS_MR_CHMODE_MASK 0x00000300U /**< Mode mask */ #define XUARTPS_MR_STOPMODE_2_BIT 0x00000080U /**< 2 stop bits */ #define XUARTPS_MR_STOPMODE_1_5_BIT 0x00000040U /**< 1.5 stop bits */ #define XUARTPS_MR_STOPMODE_1_BIT 0x00000000U /**< 1 stop bit */ #define XUARTPS_MR_STOPMODE_SHIFT 6U /**< Stop bits shift */ #define XUARTPS_MR_STOPMODE_MASK 0x000000A0U /**< Stop bits mask */ #define XUARTPS_MR_PARITY_NONE 0x00000020U /**< No parity mode */ #define XUARTPS_MR_PARITY_MARK 0x00000018U /**< Mark parity mode */ #define XUARTPS_MR_PARITY_SPACE 0x00000010U /**< Space parity mode */ #define XUARTPS_MR_PARITY_ODD 0x00000008U /**< Odd parity mode */ #define XUARTPS_MR_PARITY_EVEN 0x00000000U /**< Even parity mode */ #define XUARTPS_MR_PARITY_SHIFT 3U /**< Parity setting shift */ #define XUARTPS_MR_PARITY_MASK 0x00000038U /**< Parity mask */ #define XUARTPS_MR_CHARLEN_6_BIT 0x00000006U /**< 6 bits data */ #define XUARTPS_MR_CHARLEN_7_BIT 0x00000004U /**< 7 bits data */ #define XUARTPS_MR_CHARLEN_8_BIT 0x00000000U /**< 8 bits data */ #define XUARTPS_MR_CHARLEN_SHIFT 1U /**< Data Length shift */ #define XUARTPS_MR_CHARLEN_MASK 0x00000006U /**< Data length mask */ #define XUARTPS_MR_CLKSEL 0x00000001U /**< Input clock select */ /* Interrupt Register Bits Definition */ #define XUARTPS_IXR_RBRK 0x00002000U /**< Rx FIFO break detect interrupt */ #define XUARTPS_IXR_TOVR 0x00001000U /**< Tx FIFO Overflow interrupt */ #define XUARTPS_IXR_TNFUL 0x00000800U /**< Tx FIFO Nearly Full interrupt */ #define XUARTPS_IXR_TTRIG 0x00000400U /**< Tx Trig interrupt */ #define XUARTPS_IXR_DMS 0x00000200U /**< Modem status change interrupt */ #define XUARTPS_IXR_TOUT 0x00000100U /**< Timeout error interrupt */ #define XUARTPS_IXR_PARITY 0x00000080U /**< Parity error interrupt */ #define XUARTPS_IXR_FRAMING 0x00000040U /**< Framing error interrupt */ #define XUARTPS_IXR_RXOVR 0x00000020U /**< Overrun error interrupt */ #define XUARTPS_IXR_TXFULL 0x00000010U /**< TX FIFO full interrupt. */ #define XUARTPS_IXR_TXEMPTY 0x00000008U /**< TX FIFO empty interrupt. */ #define XUARTPS_IXR_RXFULL 0x00000004U /**< RX FIFO full interrupt. */ #define XUARTPS_IXR_RXEMPTY 0x00000002U /**< RX FIFO empty interrupt. */ #define XUARTPS_IXR_RTRIG 0x00000001U /**< RX FIFO trigger interrupt. */ #define XUARTPS_IXR_MASK 0x00003FFFU /**< Valid bit mask */ /* Modem Control Register Bits Definition */ #define XUARTPS_MODEMCR_FCM_RTS_CTS 0x00000020 /**< RTS/CTS hardware flow control. */ #define XUARTPS_MODEMCR_FCM_NONE 0x00000000 /**< No hardware flow control. */ #define XUARTPS_MODEMCR_FCM_MASK 0x00000020 /**< Hardware flow control mask. */ #define XUARTPS_MODEMCR_RTS_SHIFT 1U /**< RTS bit shift */ #define XUARTPS_MODEMCR_DTR_SHIFT 0U /**< DTR bit shift */ /* Channel Status Register */ #define XUARTPS_SR_TNFUL 0x00004000U /**< TX FIFO Nearly Full Status */ #define XUARTPS_SR_TTRIG 0x00002000U /**< TX FIFO Trigger Status */ #define XUARTPS_SR_FLOWDEL 0x00001000U /**< RX FIFO fill over flow delay */ #define XUARTPS_SR_TACTIVE 0x00000800U /**< TX active */ #define XUARTPS_SR_RACTIVE 0x00000400U /**< RX active */ #define XUARTPS_SR_TXFULL 0x00000010U /**< TX FIFO full */ #define XUARTPS_SR_TXEMPTY 0x00000008U /**< TX FIFO empty */ #define XUARTPS_SR_RXFULL 0x00000004U /**< RX FIFO full */ #define XUARTPS_SR_RXEMPTY 0x00000002U /**< RX FIFO empty */ #define XUARTPS_SR_RTRIG 0x00000001U /**< RX FIFO fill over trigger */ /** Device configuration structure */ struct uart_xlnx_ps_dev_config { DEVICE_MMIO_ROM; uint32_t sys_clk_freq; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_config_func_t irq_config_func; #endif #ifdef CONFIG_PINCTRL const struct pinctrl_dev_config *pincfg; #endif uint32_t baud_rate; }; /** Device data structure */ struct uart_xlnx_ps_dev_data_t { DEVICE_MMIO_RAM; uint32_t parity; uint32_t stopbits; uint32_t databits; uint32_t flowctrl; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t user_cb; void *user_data; #endif }; /** * @brief Disables the UART's RX and TX function. * * Writes 'Disable RX' and 'Disable TX' command bits into the respective * UART's Command Register, thus disabling the operation of the UART. * * While writing the disable command bits, the opposing enable command * bits, which are set when enabling the UART, are cleared. * * This function must be called before any configuration parameters * of the UART are modified at run-time. * * @param reg_base Base address of the respective UART's register space. */ static void xlnx_ps_disable_uart(uintptr_t reg_base) { uint32_t reg_val = sys_read32(reg_base + XUARTPS_CR_OFFSET); reg_val &= (~XUARTPS_CR_EN_DIS_MASK); /* Set control register bits [5]: TX_DIS and [3]: RX_DIS */ reg_val |= XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS; sys_write32(reg_val, reg_base + XUARTPS_CR_OFFSET); } /** * @brief Enables the UART's RX and TX function. * * Writes 'Enable RX' and 'Enable TX' command bits into the respective * UART's Command Register, thus enabling the operation of the UART. * * While writing the enable command bits, the opposing disable command * bits, which are set when disabling the UART, are cleared. * * This function must not be called while any configuration parameters * of the UART are being modified at run-time. * * @param reg_base Base address of the respective UART's register space. */ static void xlnx_ps_enable_uart(uintptr_t reg_base) { uint32_t reg_val = sys_read32(reg_base + XUARTPS_CR_OFFSET); reg_val &= (~XUARTPS_CR_EN_DIS_MASK); /* Set control register bits [4]: TX_EN and [2]: RX_EN */ reg_val |= XUARTPS_CR_TX_EN | XUARTPS_CR_RX_EN; sys_write32(reg_val, reg_base + XUARTPS_CR_OFFSET); } /** * @brief Calculates and sets the values of the BAUDDIV and BAUDGEN registers. * * Calculates and sets the values of the BAUDDIV and BAUDGEN registers, which * determine the prescaler applied to the clock driving the UART, based on * the target baud rate, which is provided as a decimal value. * * The calculation of the values to be written to the BAUDDIV and BAUDGEN * registers is described in the Zynq-7000 TRM, chapter 19.2.3 'Baud Rate * Generator'. * * @param dev UART device struct * @param baud_rate The desired baud rate as a decimal value */ static void set_baudrate(const struct device *dev, uint32_t baud_rate) { const struct uart_xlnx_ps_dev_config *dev_cfg = dev->config; uint32_t baud = dev_cfg->baud_rate; uint32_t clk_freq = dev_cfg->sys_clk_freq; uintptr_t reg_base = DEVICE_MMIO_GET(dev); uint32_t divisor, generator; /* Calculate divisor and baud rate generator value */ if ((baud != 0) && (clk_freq != 0)) { /* Covering case where input clock is so slow */ if (clk_freq < 1000000U && baud > 4800U) { baud = 4800; } for (divisor = 4; divisor < 255; divisor++) { uint32_t tmpbaud, bauderr; generator = clk_freq / (baud * (divisor + 1)); if (generator < 2 || generator > 65535) { continue; } tmpbaud = clk_freq / (generator * (divisor + 1)); if (baud > tmpbaud) { bauderr = baud - tmpbaud; } else { bauderr = tmpbaud - baud; } if (((bauderr * 100) / baud) < 3) { break; } } /* * Set baud rate divisor and generator. * -> This function is always called from a context in which * the receiver/transmitter is disabled, the baud rate can * be changed safely at this time. */ sys_write32(divisor, reg_base + XUARTPS_BAUDDIV_OFFSET); sys_write32(generator, reg_base + XUARTPS_BAUDGEN_OFFSET); } } /** * @brief Initialize individual UART port * * This routine is called to reset the chip in a quiescent state. * * @param dev UART device struct * * @return 0 if successful, failed otherwise */ static int uart_xlnx_ps_init(const struct device *dev) { const struct uart_xlnx_ps_dev_config *dev_cfg = dev->config; uint32_t reg_val; #ifdef CONFIG_PINCTRL int err; #endif DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); uintptr_t reg_base = DEVICE_MMIO_GET(dev); /* Disable RX/TX before changing any configuration data */ xlnx_ps_disable_uart(reg_base); #ifdef CONFIG_PINCTRL err = pinctrl_apply_state(dev_cfg->pincfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } #endif /* Set initial character length / start/stop bit / parity configuration */ reg_val = sys_read32(reg_base + XUARTPS_MR_OFFSET); reg_val &= (~(XUARTPS_MR_CHARLEN_MASK | XUARTPS_MR_STOPMODE_MASK | XUARTPS_MR_PARITY_MASK)); reg_val |= XUARTPS_MR_CHARLEN_8_BIT | XUARTPS_MR_STOPMODE_1_BIT | XUARTPS_MR_PARITY_NONE; sys_write32(reg_val, reg_base + XUARTPS_MR_OFFSET); /* Set RX FIFO trigger at 1 data bytes. */ sys_write32(0x01U, reg_base + XUARTPS_RXWM_OFFSET); /* Disable all interrupts, polling mode is default */ sys_write32(XUARTPS_IXR_MASK, reg_base + XUARTPS_IDR_OFFSET); /* Set the baud rate */ set_baudrate(dev, dev_cfg->baud_rate); #ifdef CONFIG_UART_INTERRUPT_DRIVEN /* Clear any pending interrupt flags */ sys_write32(XUARTPS_IXR_MASK, reg_base + XUARTPS_ISR_OFFSET); /* Attach to & unmask the corresponding interrupt vector */ dev_cfg->irq_config_func(dev); #endif xlnx_ps_enable_uart(reg_base); return 0; } /** * @brief Poll the device for input. * * @param dev UART device struct * @param c Pointer to character * * @return 0 if a character arrived, -1 if the input buffer if empty. */ static int uart_xlnx_ps_poll_in(const struct device *dev, unsigned char *c) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); uint32_t reg_val = sys_read32(reg_base + XUARTPS_SR_OFFSET); if ((reg_val & XUARTPS_SR_RXEMPTY) == 0) { *c = (unsigned char)sys_read32(reg_base + XUARTPS_FIFO_OFFSET); return 0; } else { return -1; } } /** * @brief Output a character in polled mode. * * Checks if the transmitter is empty. If empty, a character is written to * the data register. * * If the hardware flow control is enabled then the handshake signal CTS has to * be asserted in order to send a character. * * @param dev UART device struct * @param c Character to send * * @return Sent character */ static void uart_xlnx_ps_poll_out(const struct device *dev, unsigned char c) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); uint32_t reg_val; /* wait for transmitter to ready to accept a character */ do { reg_val = sys_read32(reg_base + XUARTPS_SR_OFFSET); } while ((reg_val & XUARTPS_SR_TXEMPTY) == 0); sys_write32((uint32_t)(c & 0xFF), reg_base + XUARTPS_FIFO_OFFSET); do { reg_val = sys_read32(reg_base + XUARTPS_SR_OFFSET); } while ((reg_val & XUARTPS_SR_TXEMPTY) == 0); } /** * @brief Converts a parity enum value to a Mode Register bit mask. * * Converts a value of an enumeration type provided by the driver * framework for the configuration of the UART's parity setting * into a bit mask within the Mode Register. * * It is assumed that the Mode Register contents that are being * modified within this function come with the bits modified by * this function already masked out by the caller. * * @param mode_reg Pointer to the Mode Register contents to which * the parity configuration shall be added. * @param parity Enumeration value to be converted to a bit mask. * * @return Indication of success, always true for this function * as all parity modes supported by the API are also supported * by the hardware. */ static inline bool uart_xlnx_ps_cfg2ll_parity( uint32_t *mode_reg, enum uart_config_parity parity) { /* * Translate the new parity configuration to the mode register's * bits [5..3] (PAR): * 000b : even * 001b : odd * 010b : space * 011b : mark * 1xxb : none */ switch (parity) { default: case UART_CFG_PARITY_EVEN: *mode_reg |= XUARTPS_MR_PARITY_EVEN; break; case UART_CFG_PARITY_ODD: *mode_reg |= XUARTPS_MR_PARITY_ODD; break; case UART_CFG_PARITY_SPACE: *mode_reg |= XUARTPS_MR_PARITY_SPACE; break; case UART_CFG_PARITY_MARK: *mode_reg |= XUARTPS_MR_PARITY_MARK; break; case UART_CFG_PARITY_NONE: *mode_reg |= XUARTPS_MR_PARITY_NONE; break; } return true; } /** * @brief Converts a stop bit enum value to a Mode Register bit mask. * * Converts a value of an enumeration type provided by the driver * framework for the configuration of the UART's stop bit setting * into a bit mask within the Mode Register. * * It is assumed that the Mode Register contents that are being * modified within this function come with the bits modified by * this function already masked out by the caller. * * @param mode_reg Pointer to the Mode Register contents to which * the stop bit configuration shall be added. * @param stopbits Enumeration value to be converted to a bit mask. * * @return Indication of success or failure in case of an unsupported * stop bit configuration being provided by the caller. */ static inline bool uart_xlnx_ps_cfg2ll_stopbits( uint32_t *mode_reg, enum uart_config_stop_bits stopbits) { /* * Translate the new stop bit configuration to the mode register's * bits [7..6] (NBSTOP): * 00b : 1 stop bit * 01b : 1.5 stop bits * 10b : 2 stop bits * 11b : reserved */ switch (stopbits) { case UART_CFG_STOP_BITS_0_5: /* Controller doesn't support 0.5 stop bits */ return false; default: case UART_CFG_STOP_BITS_1: *mode_reg |= XUARTPS_MR_STOPMODE_1_BIT; break; case UART_CFG_STOP_BITS_1_5: *mode_reg |= XUARTPS_MR_STOPMODE_1_5_BIT; break; case UART_CFG_STOP_BITS_2: *mode_reg |= XUARTPS_MR_STOPMODE_2_BIT; break; } return true; } /** * @brief Converts a data bit enum value to a Mode Register bit mask. * * Converts a value of an enumeration type provided by the driver * framework for the configuration of the UART's data bit setting * into a bit mask within the Mode Register. * * It is assumed that the Mode Register contents that are being * modified within this function come with the bits modified by * this function already masked out by the caller. * * @param mode_reg Pointer to the Mode Register contents to which * the data bit configuration shall be added. * @param databits Enumeration value to be converted to a bit mask. * * @return Indication of success or failure in case of an unsupported * data bit configuration being provided by the caller. */ static inline bool uart_xlnx_ps_cfg2ll_databits( uint32_t *mode_reg, enum uart_config_data_bits databits) { /* * Translate the new data bit configuration to the mode register's * bits [2..1] (CHRL): * 0xb : 8 data bits * 10b : 7 data bits * 11b : 6 data bits */ switch (databits) { case UART_CFG_DATA_BITS_5: case UART_CFG_DATA_BITS_9: /* Controller doesn't support 5 or 9 data bits */ return false; default: case UART_CFG_DATA_BITS_8: *mode_reg |= XUARTPS_MR_CHARLEN_8_BIT; break; case UART_CFG_DATA_BITS_7: *mode_reg |= XUARTPS_MR_CHARLEN_7_BIT; break; case UART_CFG_DATA_BITS_6: *mode_reg |= XUARTPS_MR_CHARLEN_6_BIT; break; } return true; } /** * @brief Converts a flow control enum value to a Modem Control * Register bit mask. * * Converts a value of an enumeration type provided by the driver * framework for the configuration of the UART's flow control * setting into a bit mask within the Modem Control Register. * * It is assumed that the Modem Control Register contents that are * being modified within this function come with the bits modified * by this function already masked out by the caller. * * @param modemcr_reg Pointer to the Modem Control Register contents * to which the flow control configuration shall * be added. * @param hwctrl Enumeration value to be converted to a bit mask. * * @return Indication of success or failure in case of an unsupported * flow control configuration being provided by the caller. */ static inline bool uart_xlnx_ps_cfg2ll_hwctrl( uint32_t *modemcr_reg, enum uart_config_flow_control hwctrl) { /* * Translate the new flow control configuration to the modem * control register's bit [5] (FCM): * 0b : no flow control * 1b : RTS/CTS */ if (hwctrl == UART_CFG_FLOW_CTRL_RTS_CTS) { *modemcr_reg |= XUARTPS_MODEMCR_FCM_RTS_CTS; } else if (hwctrl == UART_CFG_FLOW_CTRL_NONE) { *modemcr_reg |= XUARTPS_MODEMCR_FCM_NONE; } else { /* Only no flow control or RTS/CTS is supported. */ return false; } return true; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE /** * @brief Configures the UART device at run-time. * * Configures the UART device at run-time according to the * configuration data provided by the caller. * * @param dev UART device struct * @param cfg The configuration parameters to be applied. * * @return 0 if the configuration completed successfully, ENOTSUP * error if an unsupported configuration parameter is detected. */ static int uart_xlnx_ps_configure(const struct device *dev, const struct uart_config *cfg) { struct uart_xlnx_ps_dev_config *dev_cfg = (struct uart_xlnx_ps_dev_config *)dev->config; uintptr_t reg_base = DEVICE_MMIO_GET(dev); uint32_t mode_reg = 0; uint32_t modemcr_reg = 0; /* Read the current mode register & modem control register values */ mode_reg = sys_read32(reg_base + XUARTPS_MR_OFFSET); modemcr_reg = sys_read32(reg_base + XUARTPS_MODEMCR_OFFSET); /* Mask out all items that might be re-configured */ mode_reg &= (~XUARTPS_MR_PARITY_MASK); mode_reg &= (~XUARTPS_MR_STOPMODE_MASK); mode_reg &= (~XUARTPS_MR_CHARLEN_MASK); modemcr_reg &= (~XUARTPS_MODEMCR_FCM_MASK); /* Assemble the updated registers, validity checks contained within */ if ((!uart_xlnx_ps_cfg2ll_parity(&mode_reg, cfg->parity)) || (!uart_xlnx_ps_cfg2ll_stopbits(&mode_reg, cfg->stop_bits)) || (!uart_xlnx_ps_cfg2ll_databits(&mode_reg, cfg->data_bits)) || (!uart_xlnx_ps_cfg2ll_hwctrl(&modemcr_reg, cfg->flow_ctrl))) { return -ENOTSUP; } /* Disable the controller before modifying any config registers */ xlnx_ps_disable_uart(reg_base); /* Set the baud rate */ set_baudrate(dev, cfg->baudrate); dev_cfg->baud_rate = cfg->baudrate; /* Write the two control registers */ sys_write32(mode_reg, reg_base + XUARTPS_MR_OFFSET); sys_write32(modemcr_reg, reg_base + XUARTPS_MODEMCR_OFFSET); /* Re-enable the controller */ xlnx_ps_enable_uart(reg_base); return 0; }; #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ /** * @brief Converts a Mode Register bit mask to a parity configuration * enum value. * * Converts a bit mask representing the UART's parity setting within * the UART's Mode Register into a value of an enumeration type provided * by the UART driver API. * * @param mode_reg The current Mode Register contents from which the * parity setting shall be extracted. * * @return The current parity setting mapped to the UART driver API's * enum type. */ static inline enum uart_config_parity uart_xlnx_ps_ll2cfg_parity( uint32_t mode_reg) { /* * Obtain the current parity configuration from the mode register's * bits [5..3] (PAR): * 000b : even -> reset value * 001b : odd * 010b : space * 011b : mark * 1xxb : none */ switch ((mode_reg & XUARTPS_MR_PARITY_MASK)) { case XUARTPS_MR_PARITY_EVEN: default: return UART_CFG_PARITY_EVEN; case XUARTPS_MR_PARITY_ODD: return UART_CFG_PARITY_ODD; case XUARTPS_MR_PARITY_SPACE: return UART_CFG_PARITY_SPACE; case XUARTPS_MR_PARITY_MARK: return UART_CFG_PARITY_MARK; case XUARTPS_MR_PARITY_NONE: return UART_CFG_PARITY_NONE; } } /** * @brief Converts a Mode Register bit mask to a stop bit configuration * enum value. * * Converts a bit mask representing the UART's stop bit setting within * the UART's Mode Register into a value of an enumeration type provided * by the UART driver API. * * @param mode_reg The current Mode Register contents from which the * stop bit setting shall be extracted. * * @return The current stop bit setting mapped to the UART driver API's * enum type. */ static inline enum uart_config_stop_bits uart_xlnx_ps_ll2cfg_stopbits( uint32_t mode_reg) { /* * Obtain the current stop bit configuration from the mode register's * bits [7..6] (NBSTOP): * 00b : 1 stop bit -> reset value * 01b : 1.5 stop bits * 10b : 2 stop bits * 11b : reserved */ switch ((mode_reg & XUARTPS_MR_STOPMODE_MASK)) { case XUARTPS_MR_STOPMODE_1_BIT: default: return UART_CFG_STOP_BITS_1; case XUARTPS_MR_STOPMODE_1_5_BIT: return UART_CFG_STOP_BITS_1_5; case XUARTPS_MR_STOPMODE_2_BIT: return UART_CFG_STOP_BITS_2; } } /** * @brief Converts a Mode Register bit mask to a data bit configuration * enum value. * * Converts a bit mask representing the UART's data bit setting within * the UART's Mode Register into a value of an enumeration type provided * by the UART driver API. * * @param mode_reg The current Mode Register contents from which the * data bit setting shall be extracted. * * @return The current data bit setting mapped to the UART driver API's * enum type. */ static inline enum uart_config_data_bits uart_xlnx_ps_ll2cfg_databits( uint32_t mode_reg) { /* * Obtain the current data bit configuration from the mode register's * bits [2..1] (CHRL): * 0xb : 8 data bits -> reset value * 10b : 7 data bits * 11b : 6 data bits */ switch ((mode_reg & XUARTPS_MR_CHARLEN_MASK)) { case XUARTPS_MR_CHARLEN_8_BIT: default: return UART_CFG_DATA_BITS_8; case XUARTPS_MR_CHARLEN_7_BIT: return UART_CFG_DATA_BITS_7; case XUARTPS_MR_CHARLEN_6_BIT: return UART_CFG_DATA_BITS_6; } } /** * @brief Converts a Modem Control Register bit mask to a flow control * configuration enum value. * * Converts a bit mask representing the UART's flow control setting within * the UART's Modem Control Register into a value of an enumeration type * provided by the UART driver API. * * @param modemcr_reg The current Modem Control Register contents from * which the parity setting shall be extracted. * * @return The current flow control setting mapped to the UART driver API's * enum type. */ static inline enum uart_config_flow_control uart_xlnx_ps_ll2cfg_hwctrl( uint32_t modemcr_reg) { /* * Obtain the current flow control configuration from the modem * control register's bit [5] (FCM): * 0b : no flow control -> reset value * 1b : RTS/CTS */ if ((modemcr_reg & XUARTPS_MODEMCR_FCM_MASK) == XUARTPS_MODEMCR_FCM_RTS_CTS) { return UART_CFG_FLOW_CTRL_RTS_CTS; } return UART_CFG_FLOW_CTRL_NONE; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE /** * @brief Returns the current configuration of the UART at run-time. * * Returns the current configuration of the UART at run-time by obtaining * the current configuration from the UART's Mode and Modem Control Registers * (exception: baud rate). * * @param dev UART device struct * @param cfg Pointer to the data structure to which the current configuration * shall be written. * * @return always 0. */ static int uart_xlnx_ps_config_get(const struct device *dev, struct uart_config *cfg) { const struct uart_xlnx_ps_dev_config *dev_cfg = dev->config; uintptr_t reg_base = DEVICE_MMIO_GET(dev); /* * Read the Mode & Modem control registers - they contain * the current data / stop bit and parity settings (Mode * Register) and the current flow control setting (Modem * Control register). */ uint32_t mode_reg = sys_read32(reg_base + XUARTPS_MR_OFFSET); uint32_t modemcr_reg = sys_read32(reg_base + XUARTPS_MODEMCR_OFFSET); cfg->baudrate = dev_cfg->baud_rate; cfg->parity = uart_xlnx_ps_ll2cfg_parity(mode_reg); cfg->stop_bits = uart_xlnx_ps_ll2cfg_stopbits(mode_reg); cfg->data_bits = uart_xlnx_ps_ll2cfg_databits(mode_reg); cfg->flow_ctrl = uart_xlnx_ps_ll2cfg_hwctrl(modemcr_reg); return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #if CONFIG_UART_INTERRUPT_DRIVEN /** * @brief Fill FIFO with data * * @param dev UART device struct * @param tx_data Data to transmit * @param size Number of bytes to send * * @return Number of bytes sent */ static int uart_xlnx_ps_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); uint32_t data_iter = 0; sys_write32(XUARTPS_IXR_TXEMPTY, reg_base + XUARTPS_IDR_OFFSET); while (size--) { while ((sys_read32(reg_base + XUARTPS_SR_OFFSET) & XUARTPS_SR_TXFULL) != 0) { } sys_write32((uint32_t)tx_data[data_iter++], reg_base + XUARTPS_FIFO_OFFSET); } sys_write32(XUARTPS_IXR_TXEMPTY, reg_base + XUARTPS_IER_OFFSET); return data_iter; } /** * @brief Read data from FIFO * * @param dev UART device struct * @param rxData Data container * @param size Container size * * @return Number of bytes read */ static int uart_xlnx_ps_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); uint32_t reg_val = sys_read32(reg_base + XUARTPS_SR_OFFSET); int inum = 0; while (inum < size && (reg_val & XUARTPS_SR_RXEMPTY) == 0) { rx_data[inum] = (uint8_t)sys_read32(reg_base + XUARTPS_FIFO_OFFSET); inum++; reg_val = sys_read32(reg_base + XUARTPS_SR_OFFSET); } return inum; } /** * @brief Enable TX interrupt in IER * * @param dev UART device struct */ static void uart_xlnx_ps_irq_tx_enable(const struct device *dev) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); sys_write32( (XUARTPS_IXR_TTRIG | XUARTPS_IXR_TXEMPTY), reg_base + XUARTPS_IER_OFFSET); } /** * @brief Disable TX interrupt in IER * * @param dev UART device struct */ static void uart_xlnx_ps_irq_tx_disable(const struct device *dev) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); sys_write32( (XUARTPS_IXR_TTRIG | XUARTPS_IXR_TXEMPTY), reg_base + XUARTPS_IDR_OFFSET); } /** * @brief Check if Tx IRQ has been raised * * @param dev UART device struct * * @return 1 if an IRQ is ready, 0 otherwise */ static int uart_xlnx_ps_irq_tx_ready(const struct device *dev) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); uint32_t reg_val = sys_read32(reg_base + XUARTPS_SR_OFFSET); if ((reg_val & (XUARTPS_SR_TTRIG | XUARTPS_SR_TXEMPTY)) == 0) { return 0; } else { return 1; } } /** * @brief Check if nothing remains to be transmitted * * @param dev UART device struct * * @return 1 if nothing remains to be transmitted, 0 otherwise */ static int uart_xlnx_ps_irq_tx_complete(const struct device *dev) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); uint32_t reg_val = sys_read32(reg_base + XUARTPS_SR_OFFSET); if ((reg_val & XUARTPS_SR_TXEMPTY) == 0) { return 0; } else { return 1; } } /** * @brief Enable RX interrupt in IER * * @param dev UART device struct */ static void uart_xlnx_ps_irq_rx_enable(const struct device *dev) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); sys_write32(XUARTPS_IXR_RTRIG, reg_base + XUARTPS_IER_OFFSET); } /** * @brief Disable RX interrupt in IER * * @param dev UART device struct */ static void uart_xlnx_ps_irq_rx_disable(const struct device *dev) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); sys_write32(XUARTPS_IXR_RTRIG, reg_base + XUARTPS_IDR_OFFSET); } /** * @brief Check if Rx IRQ has been raised * * @param dev UART device struct * * @return 1 if an IRQ is ready, 0 otherwise */ static int uart_xlnx_ps_irq_rx_ready(const struct device *dev) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); uint32_t reg_val = sys_read32(reg_base + XUARTPS_ISR_OFFSET); if ((reg_val & XUARTPS_IXR_RTRIG) == 0) { return 0; } else { sys_write32(XUARTPS_IXR_RTRIG, reg_base + XUARTPS_ISR_OFFSET); return 1; } } /** * @brief Enable error interrupt in IER * * @param dev UART device struct */ static void uart_xlnx_ps_irq_err_enable(const struct device *dev) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); sys_write32( XUARTPS_IXR_TOVR /* [12] Transmitter FIFO Overflow */ | XUARTPS_IXR_TOUT /* [8] Receiver Timerout */ | XUARTPS_IXR_PARITY /* [7] Parity Error */ | XUARTPS_IXR_FRAMING /* [6] Receiver Framing Error */ | XUARTPS_IXR_RXOVR, /* [5] Receiver Overflow Error */ reg_base + XUARTPS_IER_OFFSET); } /** * @brief Disable error interrupt in IER * * @param dev UART device struct * * @return 1 if an IRQ is ready, 0 otherwise */ static void uart_xlnx_ps_irq_err_disable(const struct device *dev) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); sys_write32( XUARTPS_IXR_TOVR /* [12] Transmitter FIFO Overflow */ | XUARTPS_IXR_TOUT /* [8] Receiver Timerout */ | XUARTPS_IXR_PARITY /* [7] Parity Error */ | XUARTPS_IXR_FRAMING /* [6] Receiver Framing Error */ | XUARTPS_IXR_RXOVR, /* [5] Receiver Overflow Error */ reg_base + XUARTPS_IDR_OFFSET); } /** * @brief Check if any IRQ is pending * * @param dev UART device struct * * @return 1 if an IRQ is pending, 0 otherwise */ static int uart_xlnx_ps_irq_is_pending(const struct device *dev) { uintptr_t reg_base = DEVICE_MMIO_GET(dev); uint32_t reg_imr = sys_read32(reg_base + XUARTPS_IMR_OFFSET); uint32_t reg_isr = sys_read32(reg_base + XUARTPS_ISR_OFFSET); if ((reg_imr & reg_isr) != 0) { return 1; } else { return 0; } } /** * @brief Update cached contents of IIR * * @param dev UART device struct * * @return Always 1 */ static int uart_xlnx_ps_irq_update(const struct device *dev) { ARG_UNUSED(dev); return 1; } /** * @brief Set the callback function pointer for IRQ. * * @param dev UART device struct * @param cb Callback function pointer. */ static void uart_xlnx_ps_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_xlnx_ps_dev_data_t *dev_data = dev->data; dev_data->user_cb = cb; dev_data->user_data = cb_data; } /** * @brief Interrupt ce routine. * * This simply calls the callback function, if one exists. * * @param arg Argument to ISR. */ static void uart_xlnx_ps_isr(const struct device *dev) { const struct uart_xlnx_ps_dev_data_t *data = dev->data; if (data->user_cb) { data->user_cb(dev, data->user_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_xlnx_ps_driver_api = { .poll_in = uart_xlnx_ps_poll_in, .poll_out = uart_xlnx_ps_poll_out, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_xlnx_ps_configure, .config_get = uart_xlnx_ps_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_xlnx_ps_fifo_fill, .fifo_read = uart_xlnx_ps_fifo_read, .irq_tx_enable = uart_xlnx_ps_irq_tx_enable, .irq_tx_disable = uart_xlnx_ps_irq_tx_disable, .irq_tx_ready = uart_xlnx_ps_irq_tx_ready, .irq_tx_complete = uart_xlnx_ps_irq_tx_complete, .irq_rx_enable = uart_xlnx_ps_irq_rx_enable, .irq_rx_disable = uart_xlnx_ps_irq_rx_disable, .irq_rx_ready = uart_xlnx_ps_irq_rx_ready, .irq_err_enable = uart_xlnx_ps_irq_err_enable, .irq_err_disable = uart_xlnx_ps_irq_err_disable, .irq_is_pending = uart_xlnx_ps_irq_is_pending, .irq_update = uart_xlnx_ps_irq_update, .irq_callback_set = uart_xlnx_ps_irq_callback_set, #endif }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define UART_XLNX_PS_IRQ_CONF_FUNC_SET(port) \ .irq_config_func = uart_xlnx_ps_irq_config_##port, #define UART_XLNX_PS_IRQ_CONF_FUNC(port) \ static void uart_xlnx_ps_irq_config_##port(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(port), \ DT_INST_IRQ(port, priority), \ uart_xlnx_ps_isr, DEVICE_DT_INST_GET(port), \ 0); \ irq_enable(DT_INST_IRQN(port)); \ } #else #define UART_XLNX_PS_IRQ_CONF_FUNC_SET(port) #define UART_XLNX_PS_IRQ_CONF_FUNC(port) #endif /*CONFIG_UART_INTERRUPT_DRIVEN */ #define UART_XLNX_PS_DEV_DATA(port) \ static struct uart_xlnx_ps_dev_data_t uart_xlnx_ps_dev_data_##port #if CONFIG_PINCTRL #define UART_XLNX_PS_PINCTRL_DEFINE(port) PINCTRL_DT_INST_DEFINE(port); #define UART_XLNX_PS_PINCTRL_INIT(port) .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(port), #else #define UART_XLNX_PS_PINCTRL_DEFINE(port) #define UART_XLNX_PS_PINCTRL_INIT(port) #endif /* CONFIG_PINCTRL */ #define UART_XLNX_PS_DEV_CFG(port) \ static struct uart_xlnx_ps_dev_config uart_xlnx_ps_dev_cfg_##port = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(port)), \ .sys_clk_freq = DT_INST_PROP(port, clock_frequency), \ .baud_rate = DT_INST_PROP(port, current_speed), \ UART_XLNX_PS_IRQ_CONF_FUNC_SET(port) \ UART_XLNX_PS_PINCTRL_INIT(port) \ } #define UART_XLNX_PS_INIT(port) \ DEVICE_DT_INST_DEFINE(port, \ uart_xlnx_ps_init, \ NULL, \ &uart_xlnx_ps_dev_data_##port, \ &uart_xlnx_ps_dev_cfg_##port, \ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &uart_xlnx_ps_driver_api) #define UART_XLNX_INSTANTIATE(inst) \ UART_XLNX_PS_PINCTRL_DEFINE(inst) \ UART_XLNX_PS_IRQ_CONF_FUNC(inst); \ UART_XLNX_PS_DEV_DATA(inst); \ UART_XLNX_PS_DEV_CFG(inst); \ UART_XLNX_PS_INIT(inst); DT_INST_FOREACH_STATUS_OKAY(UART_XLNX_INSTANTIATE) ```
/content/code_sandbox/drivers/serial/uart_xlnx_ps.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
10,228
```c /* * */ #define DT_DRV_COMPAT ene_kb1200_uart #include <zephyr/kernel.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/pinctrl.h> #include <reg/ser.h> struct kb1200_uart_config { #ifdef CONFIG_UART_INTERRUPT_DRIVEN void (*irq_cfg_func)(void); #endif struct serial_regs *ser; const struct pinctrl_dev_config *pcfg; }; struct kb1200_uart_data { uart_irq_callback_user_data_t callback; struct uart_config current_config; void *callback_data; uint8_t pending_flag_data; }; static int kb1200_uart_err_check(const struct device *dev) { const struct kb1200_uart_config *config = dev->config; int err = 0; if (config->ser->SERSTS & SERSTS_RX_OVERRUN) { err |= UART_ERROR_OVERRUN; } if (config->ser->SERSTS & SERSTS_PARITY_ERROR) { err |= UART_ERROR_PARITY; } if (config->ser->SERSTS & SERSTS_FRAME_ERROR) { err |= UART_ERROR_FRAMING; } return err; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int kb1200_uart_configure(const struct device *dev, const struct uart_config *cfg) { uint16_t reg_baudrate = 0; uint8_t reg_parity = 0; int ret = 0; const struct kb1200_uart_config *config = dev->config; struct kb1200_uart_data *data = dev->data; reg_baudrate = (DIVIDER_BASE_CLK / cfg->baudrate) - 1; switch (cfg->parity) { case UART_CFG_PARITY_NONE: reg_parity = SERCFG_PARITY_NONE; break; case UART_CFG_PARITY_ODD: reg_parity = SERCFG_PARITY_ODD; break; case UART_CFG_PARITY_EVEN: reg_parity = SERCFG_PARITY_EVEN; break; case UART_CFG_PARITY_MARK: case UART_CFG_PARITY_SPACE: default: ret = -ENOTSUP; break; } switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_1: break; case UART_CFG_STOP_BITS_0_5: case UART_CFG_STOP_BITS_1_5: case UART_CFG_STOP_BITS_2: default: ret = -ENOTSUP; break; } switch (cfg->data_bits) { case UART_CFG_DATA_BITS_8: break; case UART_CFG_DATA_BITS_5: case UART_CFG_DATA_BITS_6: case UART_CFG_DATA_BITS_7: case UART_CFG_DATA_BITS_9: default: ret = -ENOTSUP; break; } switch (cfg->flow_ctrl) { case UART_CFG_FLOW_CTRL_NONE: break; case UART_CFG_FLOW_CTRL_RTS_CTS: case UART_CFG_FLOW_CTRL_DTR_DSR: case UART_CFG_FLOW_CTRL_RS485: default: ret = -ENOTSUP; break; } config->ser->SERCFG = (reg_baudrate << 16) | (reg_parity << 2) | (SERIE_RX_ENABLE | SERIE_TX_ENABLE); config->ser->SERCTRL = SERCTRL_MODE1; data->current_config = *cfg; return ret; } static int kb1200_uart_config_get(const struct device *dev, struct uart_config *cfg) { struct kb1200_uart_data *data = dev->data; *cfg = data->current_config; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int kb1200_uart_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { const struct kb1200_uart_config *config = dev->config; uint16_t tx_bytes = 0U; while ((size - tx_bytes) > 0) { /* Check Tx FIFO not Full*/ while (config->ser->SERSTS & SERSTS_TX_FULL) ; /* Put a character into Tx FIFO */ config->ser->SERTBUF = tx_data[tx_bytes]; tx_bytes++; } return tx_bytes; } static int kb1200_uart_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct kb1200_uart_config *config = dev->config; uint16_t rx_bytes = 0U; /* Check Rx FIFO not Empty*/ while ((size - rx_bytes > 0) && (!(config->ser->SERSTS & SERSTS_RX_EMPTY))) { /* Put a character into Tx FIFO */ rx_data[rx_bytes] = config->ser->SERRBUF; rx_bytes++; } return rx_bytes; } static void kb1200_uart_irq_tx_enable(const struct device *dev) { const struct kb1200_uart_config *config = dev->config; config->ser->SERPF = SERPF_TX_EMPTY; config->ser->SERIE |= SERIE_TX_ENABLE; } static void kb1200_uart_irq_tx_disable(const struct device *dev) { const struct kb1200_uart_config *config = dev->config; config->ser->SERIE &= ~SERIE_TX_ENABLE; config->ser->SERPF = SERPF_TX_EMPTY; } static int kb1200_uart_irq_tx_ready(const struct device *dev) { struct kb1200_uart_data *data = dev->data; return (data->pending_flag_data & SERPF_TX_EMPTY) ? 1 : 0; } static void kb1200_uart_irq_rx_enable(const struct device *dev) { const struct kb1200_uart_config *config = dev->config; config->ser->SERPF = SERPF_RX_CNT_FULL; config->ser->SERIE |= SERIE_RX_ENABLE; } static void kb1200_uart_irq_rx_disable(const struct device *dev) { const struct kb1200_uart_config *config = dev->config; config->ser->SERIE &= (~SERIE_RX_ENABLE); config->ser->SERPF = SERPF_RX_CNT_FULL; } static int kb1200_uart_irq_rx_ready(const struct device *dev) { struct kb1200_uart_data *data = dev->data; return (data->pending_flag_data & SERPF_RX_CNT_FULL) ? 1 : 0; } static void kb1200_uart_irq_err_enable(const struct device *dev) { const struct kb1200_uart_config *config = dev->config; config->ser->SERPF = SERPF_RX_ERROR; config->ser->SERIE |= SERIE_RX_ERROR; } static void kb1200_uart_irq_err_disable(const struct device *dev) { const struct kb1200_uart_config *config = dev->config; config->ser->SERIE &= (~SERIE_RX_ERROR); config->ser->SERPF = SERPF_RX_ERROR; } static int kb1200_uart_irq_is_pending(const struct device *dev) { struct kb1200_uart_data *data = dev->data; return (data->pending_flag_data) ? 1 : 0; } static int kb1200_uart_irq_update(const struct device *dev) { struct kb1200_uart_data *data = dev->data; const struct kb1200_uart_config *config = dev->config; data->pending_flag_data = (config->ser->SERPF) & (config->ser->SERIE); /*clear pending flag*/ config->ser->SERPF = data->pending_flag_data; return 1; } static void kb1200_uart_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct kb1200_uart_data *data = dev->data; data->callback = cb; data->callback_data = cb_data; } static void kb1200_uart_irq_handler(const struct device *dev) { struct kb1200_uart_data *data = dev->data; if (data->callback) { data->callback(dev, data->callback_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int kb1200_uart_poll_in(const struct device *dev, unsigned char *c) { #ifdef CONFIG_UART_INTERRUPT_DRIVEN return kb1200_uart_fifo_read(dev, c, 1) ? 0 : -1; #else const struct kb1200_uart_config *config = dev->config; /* Check Rx FIFO not Empty*/ if (config->ser->SERSTS & SERSTS_RX_EMPTY) { return -1; } /* Put a character into Tx FIFO */ *c = config->ser->SERRBUF; return 0; #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ } static void kb1200_uart_poll_out(const struct device *dev, unsigned char c) { #ifdef CONFIG_UART_INTERRUPT_DRIVEN kb1200_uart_fifo_fill(dev, &c, 1); #else const struct kb1200_uart_config *config = dev->config; /* Wait Tx FIFO not Full*/ while (config->ser->SERSTS & SER_TxFull) { ; } /* Put a character into Tx FIFO */ config->ser->SERTBUF = c; #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ } static const struct uart_driver_api kb1200_uart_api = { .poll_in = kb1200_uart_poll_in, .poll_out = kb1200_uart_poll_out, .err_check = kb1200_uart_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = kb1200_uart_configure, .config_get = kb1200_uart_config_get, #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = kb1200_uart_fifo_fill, .fifo_read = kb1200_uart_fifo_read, .irq_tx_enable = kb1200_uart_irq_tx_enable, .irq_tx_disable = kb1200_uart_irq_tx_disable, .irq_tx_ready = kb1200_uart_irq_tx_ready, .irq_rx_enable = kb1200_uart_irq_rx_enable, .irq_rx_disable = kb1200_uart_irq_rx_disable, .irq_rx_ready = kb1200_uart_irq_rx_ready, .irq_err_enable = kb1200_uart_irq_err_enable, .irq_err_disable = kb1200_uart_irq_err_disable, .irq_is_pending = kb1200_uart_irq_is_pending, .irq_update = kb1200_uart_irq_update, .irq_callback_set = kb1200_uart_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN /* GPIO module instances */ #define KB1200_UART_DEV(inst) DEVICE_DT_INST_GET(inst), static const struct device *const uart_devices[] = {DT_INST_FOREACH_STATUS_OKAY(KB1200_UART_DEV)}; static void kb1200_uart_isr_wrap(const struct device *dev) { for (size_t i = 0; i < ARRAY_SIZE(uart_devices); i++) { const struct device *dev_ = uart_devices[i]; const struct kb1200_uart_config *config = dev_->config; if (config->ser->SERIE & config->ser->SERPF) { kb1200_uart_irq_handler(dev_); } } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int kb1200_uart_init(const struct device *dev) { int ret; const struct kb1200_uart_config *config = dev->config; struct kb1200_uart_data *data = dev->data; ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { return ret; } kb1200_uart_configure(dev, &data->current_config); #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_cfg_func(); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ return 0; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static bool init_irq = true; static void kb1200_uart_irq_init(void) { if (init_irq) { init_irq = false; IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), kb1200_uart_isr_wrap, NULL, 0); irq_enable(DT_INST_IRQN(0)); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #define KB1200_UART_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static struct kb1200_uart_data kb1200_uart_data_##n = { \ .current_config = { \ .baudrate = DT_INST_PROP(n, current_speed), \ .parity = UART_CFG_PARITY_NONE, \ .stop_bits = UART_CFG_STOP_BITS_1, \ .data_bits = UART_CFG_DATA_BITS_8, \ .flow_ctrl = UART_CFG_FLOW_CTRL_NONE, \ }, \ }; \ static const struct kb1200_uart_config kb1200_uart_config_##n = { \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, (.irq_cfg_func = kb1200_uart_irq_init,)) \ .ser = (struct serial_regs *)DT_INST_REG_ADDR(n), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n)}; \ DEVICE_DT_INST_DEFINE(n, kb1200_uart_init, NULL, &kb1200_uart_data_##n, \ &kb1200_uart_config_##n, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &kb1200_uart_api); DT_INST_FOREACH_STATUS_OKAY(KB1200_UART_INIT) ```
/content/code_sandbox/drivers/serial/uart_ene_kb1200.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,858
```c /* * */ #define DT_DRV_COMPAT nxp_kinetis_lpuart #include <errno.h> #include <zephyr/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/irq.h> #include <zephyr/kernel.h> #include <zephyr/pm/policy.h> #include <zephyr/drivers/pinctrl.h> #ifdef CONFIG_UART_ASYNC_API #include <zephyr/drivers/dma.h> #endif #include <zephyr/logging/log.h> #include <fsl_lpuart.h> #if CONFIG_NXP_LP_FLEXCOMM #include <zephyr/drivers/mfd/nxp_lp_flexcomm.h> #endif LOG_MODULE_REGISTER(uart_mcux_lpuart, LOG_LEVEL_ERR); #define PINCTRL_STATE_FLOWCONTROL PINCTRL_STATE_PRIV_START #if defined(CONFIG_UART_ASYNC_API) && defined(CONFIG_UART_INTERRUPT_DRIVEN) /* there are already going to be build errors, but at least this message will * be the first error from this driver making the reason clear */ BUILD_ASSERT(IS_ENABLED(CONFIG_UART_EXCLUSIVE_API_CALLBACKS), "" "LPUART must use exclusive api callbacks"); #endif #ifdef CONFIG_UART_ASYNC_API struct lpuart_dma_config { const struct device *dma_dev; const uint32_t dma_channel; struct dma_config dma_cfg; }; #endif /* CONFIG_UART_ASYNC_API */ struct mcux_lpuart_config { LPUART_Type *base; #ifdef CONFIG_NXP_LP_FLEXCOMM const struct device *parent_dev; #endif const struct device *clock_dev; const struct pinctrl_dev_config *pincfg; clock_control_subsys_t clock_subsys; uint32_t baud_rate; uint8_t flow_ctrl; uint8_t parity; bool rs485_de_active_low; bool loopback_en; bool single_wire; bool tx_invert; bool rx_invert; #ifdef CONFIG_UART_MCUX_LPUART_ISR_SUPPORT void (*irq_config_func)(const struct device *dev); #endif #ifdef CONFIG_UART_ASYNC_API const struct lpuart_dma_config rx_dma_config; const struct lpuart_dma_config tx_dma_config; #endif /* CONFIG_UART_ASYNC_API */ }; #ifdef CONFIG_UART_ASYNC_API struct mcux_lpuart_rx_dma_params { struct dma_block_config active_dma_block; uint8_t *buf; size_t buf_len; size_t offset; size_t counter; struct k_work_delayable timeout_work; size_t timeout_us; }; struct mcux_lpuart_tx_dma_params { struct dma_block_config active_dma_block; const uint8_t *buf; size_t buf_len; struct k_work_delayable timeout_work; size_t timeout_us; }; struct mcux_lpuart_async_data { const struct device *uart_dev; struct mcux_lpuart_tx_dma_params tx_dma_params; struct mcux_lpuart_rx_dma_params rx_dma_params; uint8_t *next_rx_buffer; size_t next_rx_buffer_len; uart_callback_t user_callback; void *user_data; }; #endif #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) enum mcux_lpuart_api { LPUART_NONE, LPUART_IRQ_DRIVEN, LPUART_ASYNC }; #endif struct mcux_lpuart_data { #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *cb_data; #endif #ifdef CONFIG_PM bool pm_state_lock_on; bool tx_poll_stream_on; bool tx_int_stream_on; #endif /* CONFIG_PM */ #ifdef CONFIG_UART_ASYNC_API struct mcux_lpuart_async_data async; #endif struct uart_config uart_config; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) enum mcux_lpuart_api api_type; #endif }; #ifdef CONFIG_PM static void mcux_lpuart_pm_policy_state_lock_get(const struct device *dev) { struct mcux_lpuart_data *data = dev->data; if (!data->pm_state_lock_on) { data->pm_state_lock_on = true; pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); } } static void mcux_lpuart_pm_policy_state_lock_put(const struct device *dev) { struct mcux_lpuart_data *data = dev->data; if (data->pm_state_lock_on) { data->pm_state_lock_on = false; pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); } } #endif /* CONFIG_PM */ static int mcux_lpuart_poll_in(const struct device *dev, unsigned char *c) { const struct mcux_lpuart_config *config = dev->config; uint32_t flags = LPUART_GetStatusFlags(config->base); int ret = -1; if (flags & kLPUART_RxDataRegFullFlag) { *c = LPUART_ReadByte(config->base); ret = 0; } return ret; } static void mcux_lpuart_poll_out(const struct device *dev, unsigned char c) { const struct mcux_lpuart_config *config = dev->config; unsigned int key; #ifdef CONFIG_PM struct mcux_lpuart_data *data = dev->data; #endif while (!(LPUART_GetStatusFlags(config->base) & LPUART_STAT_TDRE_MASK)) { } /* Lock interrupts while we send data */ key = irq_lock(); #ifdef CONFIG_PM /* * We must keep the part from entering lower power mode until the * transmission completes. Set the power constraint, and enable * the transmission complete interrupt so we know when transmission is * completed. */ if (!data->tx_poll_stream_on && !data->tx_int_stream_on) { data->tx_poll_stream_on = true; mcux_lpuart_pm_policy_state_lock_get(dev); /* Enable TC interrupt */ LPUART_EnableInterrupts(config->base, kLPUART_TransmissionCompleteInterruptEnable); } #endif /* CONFIG_PM */ LPUART_WriteByte(config->base, c); irq_unlock(key); } static int mcux_lpuart_err_check(const struct device *dev) { const struct mcux_lpuart_config *config = dev->config; uint32_t flags = LPUART_GetStatusFlags(config->base); int err = 0; if (flags & kLPUART_RxOverrunFlag) { err |= UART_ERROR_OVERRUN; } if (flags & kLPUART_ParityErrorFlag) { err |= UART_ERROR_PARITY; } if (flags & kLPUART_FramingErrorFlag) { err |= UART_ERROR_FRAMING; } if (flags & kLPUART_NoiseErrorFlag) { err |= UART_ERROR_PARITY; } LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag | kLPUART_ParityErrorFlag | kLPUART_FramingErrorFlag | kLPUART_NoiseErrorFlag); return err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int mcux_lpuart_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { const struct mcux_lpuart_config *config = dev->config; uint8_t num_tx = 0U; while ((len - num_tx > 0) && (LPUART_GetStatusFlags(config->base) & LPUART_STAT_TDRE_MASK)) { LPUART_WriteByte(config->base, tx_data[num_tx++]); } return num_tx; } static int mcux_lpuart_fifo_read(const struct device *dev, uint8_t *rx_data, const int len) { const struct mcux_lpuart_config *config = dev->config; uint8_t num_rx = 0U; while ((len - num_rx > 0) && (LPUART_GetStatusFlags(config->base) & kLPUART_RxDataRegFullFlag)) { rx_data[num_rx++] = LPUART_ReadByte(config->base); } return num_rx; } static void mcux_lpuart_irq_tx_enable(const struct device *dev) { const struct mcux_lpuart_config *config = dev->config; uint32_t mask = kLPUART_TxDataRegEmptyInterruptEnable; #ifdef CONFIG_PM struct mcux_lpuart_data *data = dev->data; unsigned int key; #endif #ifdef CONFIG_PM key = irq_lock(); data->tx_poll_stream_on = false; data->tx_int_stream_on = true; /* Transmission complete interrupt no longer required */ LPUART_DisableInterrupts(config->base, kLPUART_TransmissionCompleteInterruptEnable); /* Do not allow system to sleep while UART tx is ongoing */ mcux_lpuart_pm_policy_state_lock_get(dev); #endif LPUART_EnableInterrupts(config->base, mask); #ifdef CONFIG_PM irq_unlock(key); #endif } static void mcux_lpuart_irq_tx_disable(const struct device *dev) { const struct mcux_lpuart_config *config = dev->config; uint32_t mask = kLPUART_TxDataRegEmptyInterruptEnable; #ifdef CONFIG_PM struct mcux_lpuart_data *data = dev->data; unsigned int key; key = irq_lock(); #endif LPUART_DisableInterrupts(config->base, mask); #ifdef CONFIG_PM data->tx_int_stream_on = false; /* * If transmission IRQ is no longer enabled, * transmission is complete. Release pm constraint. */ mcux_lpuart_pm_policy_state_lock_put(dev); irq_unlock(key); #endif } static int mcux_lpuart_irq_tx_complete(const struct device *dev) { const struct mcux_lpuart_config *config = dev->config; uint32_t flags = LPUART_GetStatusFlags(config->base); return (flags & kLPUART_TransmissionCompleteFlag) != 0U; } static int mcux_lpuart_irq_tx_ready(const struct device *dev) { const struct mcux_lpuart_config *config = dev->config; uint32_t mask = kLPUART_TxDataRegEmptyInterruptEnable; uint32_t flags = LPUART_GetStatusFlags(config->base); return (LPUART_GetEnabledInterrupts(config->base) & mask) && (flags & LPUART_STAT_TDRE_MASK); } static void mcux_lpuart_irq_rx_enable(const struct device *dev) { const struct mcux_lpuart_config *config = dev->config; uint32_t mask = kLPUART_RxDataRegFullInterruptEnable; LPUART_EnableInterrupts(config->base, mask); } static void mcux_lpuart_irq_rx_disable(const struct device *dev) { const struct mcux_lpuart_config *config = dev->config; uint32_t mask = kLPUART_RxDataRegFullInterruptEnable; LPUART_DisableInterrupts(config->base, mask); } static int mcux_lpuart_irq_rx_full(const struct device *dev) { const struct mcux_lpuart_config *config = dev->config; uint32_t flags = LPUART_GetStatusFlags(config->base); return (flags & kLPUART_RxDataRegFullFlag) != 0U; } static int mcux_lpuart_irq_rx_pending(const struct device *dev) { const struct mcux_lpuart_config *config = dev->config; uint32_t mask = kLPUART_RxDataRegFullInterruptEnable; return (LPUART_GetEnabledInterrupts(config->base) & mask) && mcux_lpuart_irq_rx_full(dev); } static void mcux_lpuart_irq_err_enable(const struct device *dev) { const struct mcux_lpuart_config *config = dev->config; uint32_t mask = kLPUART_NoiseErrorInterruptEnable | kLPUART_FramingErrorInterruptEnable | kLPUART_ParityErrorInterruptEnable; LPUART_EnableInterrupts(config->base, mask); } static void mcux_lpuart_irq_err_disable(const struct device *dev) { const struct mcux_lpuart_config *config = dev->config; uint32_t mask = kLPUART_NoiseErrorInterruptEnable | kLPUART_FramingErrorInterruptEnable | kLPUART_ParityErrorInterruptEnable; LPUART_DisableInterrupts(config->base, mask); } static int mcux_lpuart_irq_is_pending(const struct device *dev) { return (mcux_lpuart_irq_tx_ready(dev) || mcux_lpuart_irq_rx_pending(dev)); } static int mcux_lpuart_irq_update(const struct device *dev) { return 1; } static void mcux_lpuart_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct mcux_lpuart_data *data = dev->data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) if (data->api_type == LPUART_ASYNC) { LOG_ERR("UART irq and async api are exclusive"); } #endif data->callback = cb; data->cb_data = cb_data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) data->async.user_callback = NULL; data->async.user_data = NULL; data->api_type = LPUART_IRQ_DRIVEN; #endif } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_ASYNC_API static inline void async_timer_start(struct k_work_delayable *work, size_t timeout_us) { if ((timeout_us != SYS_FOREVER_US) && (timeout_us != 0)) { LOG_DBG("async timer started for %d us", timeout_us); k_work_reschedule(work, K_USEC(timeout_us)); } } static void async_user_callback(const struct device *dev, struct uart_event *evt) { const struct mcux_lpuart_data *data = dev->data; if (data->async.user_callback) { data->async.user_callback(dev, evt, data->async.user_data); } } static void async_evt_tx_done(struct device *dev) { struct mcux_lpuart_data *data = dev->data; (void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work); LOG_DBG("TX done: %d", data->async.tx_dma_params.buf_len); struct uart_event event = { .type = UART_TX_DONE, .data.tx.buf = data->async.tx_dma_params.buf, .data.tx.len = data->async.tx_dma_params.buf_len }; /* Reset TX Buffer */ data->async.tx_dma_params.buf = NULL; data->async.tx_dma_params.buf_len = 0U; async_user_callback(dev, &event); } static void async_evt_rx_rdy(const struct device *dev) { struct mcux_lpuart_data *data = dev->data; struct mcux_lpuart_rx_dma_params *dma_params = &data->async.rx_dma_params; struct uart_event event = { .type = UART_RX_RDY, .data.rx.buf = dma_params->buf, .data.rx.len = dma_params->counter - dma_params->offset, .data.rx.offset = dma_params->offset }; LOG_DBG("RX Ready: (len: %d off: %d buf: %x)", event.data.rx.len, event.data.rx.offset, (uint32_t)event.data.rx.buf); /* Update the current pos for new data */ dma_params->offset = dma_params->counter; /* Only send event for new data */ if (event.data.rx.len > 0) { async_user_callback(dev, &event); } } static void async_evt_rx_buf_request(const struct device *dev) { struct uart_event evt = { .type = UART_RX_BUF_REQUEST, }; async_user_callback(dev, &evt); } static void async_evt_rx_buf_release(const struct device *dev) { struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data; struct uart_event evt = { .type = UART_RX_BUF_RELEASED, .data.rx_buf.buf = data->async.rx_dma_params.buf, }; async_user_callback(dev, &evt); data->async.rx_dma_params.buf = NULL; data->async.rx_dma_params.buf_len = 0U; data->async.rx_dma_params.offset = 0U; data->async.rx_dma_params.counter = 0U; } static void mcux_lpuart_async_rx_flush(const struct device *dev) { struct dma_status status; struct mcux_lpuart_data *data = dev->data; const struct mcux_lpuart_config *config = dev->config; const int get_status_result = dma_get_status(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel, &status); if (get_status_result == 0) { const size_t rx_rcv_len = data->async.rx_dma_params.buf_len - status.pending_length; if (rx_rcv_len > data->async.rx_dma_params.counter) { data->async.rx_dma_params.counter = rx_rcv_len; async_evt_rx_rdy(dev); } } else { LOG_ERR("Error getting DMA status"); } } static int mcux_lpuart_rx_disable(const struct device *dev) { LOG_INF("Disabling UART RX DMA"); const struct mcux_lpuart_config *config = dev->config; struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data; LPUART_Type *lpuart = config->base; const unsigned int key = irq_lock(); LPUART_EnableRx(lpuart, false); (void)k_work_cancel_delayable(&data->async.rx_dma_params.timeout_work); LPUART_DisableInterrupts(lpuart, kLPUART_IdleLineInterruptEnable); LPUART_ClearStatusFlags(lpuart, kLPUART_IdleLineFlag); LPUART_EnableRxDMA(lpuart, false); /* No active RX buffer, cannot disable */ if (!data->async.rx_dma_params.buf) { LOG_ERR("No buffers to release from RX DMA!"); } else { mcux_lpuart_async_rx_flush(dev); async_evt_rx_buf_release(dev); if (data->async.next_rx_buffer != NULL) { data->async.rx_dma_params.buf = data->async.next_rx_buffer; data->async.rx_dma_params.buf_len = data->async.next_rx_buffer_len; data->async.next_rx_buffer = NULL; data->async.next_rx_buffer_len = 0; /* Release the next buffer as well */ async_evt_rx_buf_release(dev); } } const int ret = dma_stop(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel); if (ret != 0) { LOG_ERR("Error stopping rx DMA. Reason: %x", ret); } LOG_DBG("RX: Disabled"); struct uart_event disabled_event = { .type = UART_RX_DISABLED }; async_user_callback(dev, &disabled_event); irq_unlock(key); return ret; } static void prepare_rx_dma_block_config(const struct device *dev) { struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data; const struct mcux_lpuart_config *config = dev->config; LPUART_Type *lpuart = config->base; struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params; assert(rx_dma_params->buf != NULL); assert(rx_dma_params->buf_len > 0); struct dma_block_config *head_block_config = &rx_dma_params->active_dma_block; head_block_config->dest_address = (uint32_t)rx_dma_params->buf; head_block_config->source_address = LPUART_GetDataRegisterAddress(lpuart); head_block_config->block_size = rx_dma_params->buf_len; head_block_config->dest_scatter_en = true; } static int configure_and_start_rx_dma( const struct mcux_lpuart_config *config, struct mcux_lpuart_data *data, LPUART_Type *lpuart) { LOG_DBG("Configuring and Starting UART RX DMA"); int ret = dma_config(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel, (struct dma_config *)&config->rx_dma_config.dma_cfg); if (ret != 0) { LOG_ERR("Failed to Configure RX DMA: err: %d", ret); return ret; } ret = dma_start(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel); if (ret < 0) { LOG_ERR("Failed to start DMA(Rx) Ch %d(%d)", config->rx_dma_config.dma_channel, ret); } LPUART_EnableRxDMA(lpuart, true); return ret; } static int uart_mcux_lpuart_dma_replace_rx_buffer(const struct device *dev) { struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data; const struct mcux_lpuart_config *config = dev->config; LPUART_Type *lpuart = config->base; LOG_DBG("Replacing RX buffer, new length: %d", data->async.next_rx_buffer_len); /* There must be a buffer to replace this one with */ assert(data->async.next_rx_buffer != NULL); assert(data->async.next_rx_buffer_len != 0U); const int success = dma_reload(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel, LPUART_GetDataRegisterAddress(lpuart), (uint32_t)data->async.next_rx_buffer, data->async.next_rx_buffer_len); if (success != 0) { LOG_ERR("Error %d reloading DMA with next RX buffer", success); } return success; } static void dma_callback(const struct device *dma_dev, void *callback_arg, uint32_t channel, int dma_status) { struct device *dev = (struct device *)callback_arg; const struct mcux_lpuart_config *config = dev->config; LPUART_Type *lpuart = config->base; struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data; LOG_DBG("DMA call back on channel %d", channel); struct dma_status status; const int get_status_result = dma_get_status(dma_dev, channel, &status); if (get_status_result < 0) { LOG_ERR("error on status get: %d", get_status_result); } else { LOG_DBG("DMA Status: b: %d dir: %d len_remain: %d", status.busy, status.dir, status.pending_length); } if (dma_status < 0) { LOG_ERR("Got error : %d", dma_status); } if (channel == config->tx_dma_config.dma_channel) { LOG_DBG("TX Channel"); LPUART_EnableTxDMA(lpuart, false); async_evt_tx_done(dev); } else if (channel == config->rx_dma_config.dma_channel) { LOG_DBG("RX Channel"); struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params; /* The RX Event indicates DMA transfer is complete and full buffer is available. */ rx_dma_params->counter = rx_dma_params->buf_len; LOG_DBG("Current Buf (%x) full, swapping to new buf: %x", (uint32_t)rx_dma_params->buf, (uint32_t)data->async.next_rx_buffer); async_evt_rx_rdy(dev); async_evt_rx_buf_release(dev); rx_dma_params->buf = data->async.next_rx_buffer; rx_dma_params->buf_len = data->async.next_rx_buffer_len; data->async.next_rx_buffer = NULL; data->async.next_rx_buffer_len = 0U; /* A new buffer was available (and already loaded into the DMA engine) */ if (rx_dma_params->buf != NULL && rx_dma_params->buf_len > 0) { /* Request the next buffer */ async_evt_rx_buf_request(dev); } else { /* Buffer full without valid next buffer, disable RX DMA */ LOG_INF("Disabled RX DMA, no valid next buffer "); mcux_lpuart_rx_disable(dev); } } else { LOG_ERR("Got unexpected DMA Channel: %d", channel); } } static int mcux_lpuart_configure_async(const struct device *dev); static int mcux_lpuart_callback_set(const struct device *dev, uart_callback_t callback, void *user_data) { struct mcux_lpuart_data *data = dev->data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) if (data->api_type == LPUART_IRQ_DRIVEN) { LOG_ERR("UART irq and async api are exclusive"); return -ENOTSUP; } #endif data->async.user_callback = callback; data->async.user_data = user_data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) data->callback = NULL; data->cb_data = NULL; data->api_type = LPUART_ASYNC; #endif return mcux_lpuart_configure_async(dev); } static int mcux_lpuart_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout_us) { struct mcux_lpuart_data *data = dev->data; const struct mcux_lpuart_config *config = dev->config; LPUART_Type *lpuart = config->base; unsigned int key = irq_lock(); /* Check for an ongiong transfer and abort if it is pending */ struct dma_status status; const int get_status_result = dma_get_status(config->tx_dma_config.dma_dev, config->tx_dma_config.dma_channel, &status); if (get_status_result < 0 || status.busy) { irq_unlock(key); LOG_ERR("Unable to submit UART DMA Transfer."); return get_status_result < 0 ? get_status_result : -EBUSY; } int ret; LPUART_EnableTxDMA(lpuart, false); data->async.tx_dma_params.buf = buf; data->async.tx_dma_params.buf_len = len; data->async.tx_dma_params.active_dma_block.source_address = (uint32_t)buf; data->async.tx_dma_params.active_dma_block.dest_address = LPUART_GetDataRegisterAddress(lpuart); data->async.tx_dma_params.active_dma_block.block_size = len; data->async.tx_dma_params.active_dma_block.next_block = NULL; ret = dma_config(config->tx_dma_config.dma_dev, config->tx_dma_config.dma_channel, (struct dma_config *)&config->tx_dma_config.dma_cfg); if (ret == 0) { LOG_DBG("Starting UART DMA TX Ch %u", config->tx_dma_config.dma_channel); ret = dma_start(config->tx_dma_config.dma_dev, config->tx_dma_config.dma_channel); LPUART_EnableTxDMA(lpuart, true); if (ret != 0) { LOG_ERR("Failed to start DMA(Tx) Ch %d", config->tx_dma_config.dma_channel); } async_timer_start(&data->async.tx_dma_params.timeout_work, timeout_us); } else { LOG_ERR("Error configuring UART DMA: %x", ret); } irq_unlock(key); return ret; } static int mcux_lpuart_tx_abort(const struct device *dev) { struct mcux_lpuart_data *data = dev->data; const struct mcux_lpuart_config *config = dev->config; LPUART_Type *lpuart = config->base; LPUART_EnableTxDMA(lpuart, false); (void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work); struct dma_status status; const int get_status_result = dma_get_status(config->tx_dma_config.dma_dev, config->tx_dma_config.dma_channel, &status); if (get_status_result < 0) { LOG_ERR("Error querying TX DMA Status during abort."); } const size_t bytes_transmitted = (get_status_result == 0) ? data->async.tx_dma_params.buf_len - status.pending_length : 0; const int ret = dma_stop(config->tx_dma_config.dma_dev, config->tx_dma_config.dma_channel); if (ret == 0) { struct uart_event tx_aborted_event = { .type = UART_TX_ABORTED, .data.tx.buf = data->async.tx_dma_params.buf, .data.tx.len = bytes_transmitted }; async_user_callback(dev, &tx_aborted_event); } return ret; } static int mcux_lpuart_rx_enable(const struct device *dev, uint8_t *buf, const size_t len, const int32_t timeout_us) { LOG_DBG("Enabling UART RX DMA"); struct mcux_lpuart_data *data = dev->data; const struct mcux_lpuart_config *config = dev->config; LPUART_Type *lpuart = config->base; struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params; unsigned int key = irq_lock(); struct dma_status status; const int get_status_result = dma_get_status(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel, &status); if (get_status_result < 0 || status.busy) { LOG_ERR("Unable to start receive on UART."); irq_unlock(key); return get_status_result < 0 ? get_status_result : -EBUSY; } rx_dma_params->timeout_us = timeout_us; rx_dma_params->buf = buf; rx_dma_params->buf_len = len; LPUART_EnableInterrupts(config->base, kLPUART_IdleLineInterruptEnable); prepare_rx_dma_block_config(dev); const int ret = configure_and_start_rx_dma(config, data, lpuart); /* Request the next buffer for when this buffer is full for continuous reception */ async_evt_rx_buf_request(dev); /* Clear these status flags as they can prevent the UART device from receiving data */ LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag | kLPUART_ParityErrorFlag | kLPUART_FramingErrorFlag | kLPUART_NoiseErrorFlag); LPUART_EnableRx(lpuart, true); irq_unlock(key); return ret; } static int mcux_lpuart_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { struct mcux_lpuart_data *data = dev->data; assert(data->async.next_rx_buffer == NULL); assert(data->async.next_rx_buffer_len == 0); data->async.next_rx_buffer = buf; data->async.next_rx_buffer_len = len; uart_mcux_lpuart_dma_replace_rx_buffer(dev); return 0; } static void mcux_lpuart_async_rx_timeout(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct mcux_lpuart_rx_dma_params *rx_params = CONTAINER_OF(dwork, struct mcux_lpuart_rx_dma_params, timeout_work); struct mcux_lpuart_async_data *async_data = CONTAINER_OF(rx_params, struct mcux_lpuart_async_data, rx_dma_params); const struct device *dev = async_data->uart_dev; LOG_DBG("RX timeout"); mcux_lpuart_async_rx_flush(dev); } static void mcux_lpuart_async_tx_timeout(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct mcux_lpuart_tx_dma_params *tx_params = CONTAINER_OF(dwork, struct mcux_lpuart_tx_dma_params, timeout_work); struct mcux_lpuart_async_data *async_data = CONTAINER_OF(tx_params, struct mcux_lpuart_async_data, tx_dma_params); const struct device *dev = async_data->uart_dev; LOG_DBG("TX timeout"); (void)mcux_lpuart_tx_abort(dev); } #endif /* CONFIG_UART_ASYNC_API */ #if CONFIG_UART_MCUX_LPUART_ISR_SUPPORT #ifdef CONFIG_UART_INTERRUPT_DRIVEN static inline void mcux_lpuart_irq_driven_isr(const struct device *dev, struct mcux_lpuart_data *data, const struct mcux_lpuart_config *config, const uint32_t status) { if (data->callback) { data->callback(dev, data->cb_data); } if (status & kLPUART_RxOverrunFlag) { LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag); } } #endif #ifdef CONFIG_UART_ASYNC_API static inline void mcux_lpuart_async_isr(struct mcux_lpuart_data *data, const struct mcux_lpuart_config *config, const uint32_t status) { if (status & kLPUART_IdleLineFlag) { async_timer_start(&data->async.rx_dma_params.timeout_work, data->async.rx_dma_params.timeout_us); LPUART_ClearStatusFlags(config->base, kLPUART_IdleLineFlag); } } #endif static void mcux_lpuart_isr(const struct device *dev) { struct mcux_lpuart_data *data = dev->data; const struct mcux_lpuart_config *config = dev->config; const uint32_t status = LPUART_GetStatusFlags(config->base); #if CONFIG_PM if (status & kLPUART_TransmissionCompleteFlag) { if (data->tx_poll_stream_on) { /* Poll transmission complete. Allow system to sleep */ LPUART_DisableInterrupts(config->base, kLPUART_TransmissionCompleteInterruptEnable); data->tx_poll_stream_on = false; mcux_lpuart_pm_policy_state_lock_put(dev); } } #endif /* CONFIG_PM */ #if defined(CONFIG_UART_ASYNC_API) && defined(CONFIG_UART_INTERRUPT_DRIVEN) if (data->api_type == LPUART_IRQ_DRIVEN) { mcux_lpuart_irq_driven_isr(dev, data, config, status); } else if (data->api_type == LPUART_ASYNC) { mcux_lpuart_async_isr(data, config, status); } #elif defined(CONFIG_UART_INTERRUPT_DRIVEN) mcux_lpuart_irq_driven_isr(dev, data, config, status); #elif defined(CONFIG_UART_ASYNC_API) mcux_lpuart_async_isr(data, config, status); #endif /* API */ } #endif /* CONFIG_UART_MCUX_LPUART_ISR_SUPPORT */ static int mcux_lpuart_configure_basic(const struct device *dev, const struct uart_config *cfg, lpuart_config_t *uart_config) { /* Translate UART API enum to LPUART enum from HAL */ switch (cfg->parity) { case UART_CFG_PARITY_NONE: uart_config->parityMode = kLPUART_ParityDisabled; break; case UART_CFG_PARITY_ODD: uart_config->parityMode = kLPUART_ParityOdd; break; case UART_CFG_PARITY_EVEN: uart_config->parityMode = kLPUART_ParityEven; break; default: return -ENOTSUP; } switch (cfg->data_bits) { #if defined(FSL_FEATURE_LPUART_HAS_7BIT_DATA_SUPPORT) && \ FSL_FEATURE_LPUART_HAS_7BIT_DATA_SUPPORT case UART_CFG_DATA_BITS_7: uart_config->dataBitsCount = kLPUART_SevenDataBits; break; #endif case UART_CFG_DATA_BITS_8: uart_config->dataBitsCount = kLPUART_EightDataBits; break; default: return -ENOTSUP; } #if defined(FSL_FEATURE_LPUART_HAS_STOP_BIT_CONFIG_SUPPORT) && \ FSL_FEATURE_LPUART_HAS_STOP_BIT_CONFIG_SUPPORT switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_1: uart_config->stopBitCount = kLPUART_OneStopBit; break; case UART_CFG_STOP_BITS_2: uart_config->stopBitCount = kLPUART_TwoStopBit; break; default: return -ENOTSUP; } #endif #if defined(FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT) && \ FSL_FEATURE_LPUART_HAS_MODEM_SUPPORT switch (cfg->flow_ctrl) { case UART_CFG_FLOW_CTRL_NONE: case UART_CFG_FLOW_CTRL_RS485: uart_config->enableTxCTS = false; uart_config->enableRxRTS = false; break; case UART_CFG_FLOW_CTRL_RTS_CTS: uart_config->enableTxCTS = true; uart_config->enableRxRTS = true; break; default: return -ENOTSUP; } #endif uart_config->baudRate_Bps = cfg->baudrate; uart_config->enableRx = true; /* Tx will be enabled manually after set tx-rts */ uart_config->enableTx = false; return 0; } #ifdef CONFIG_UART_ASYNC_API static int mcux_lpuart_configure_async(const struct device *dev) { const struct mcux_lpuart_config *config = dev->config; struct mcux_lpuart_data *data = dev->data; lpuart_config_t uart_config; int ret; LPUART_GetDefaultConfig(&uart_config); ret = mcux_lpuart_configure_basic(dev, &data->uart_config, &uart_config); if (ret) { return ret; } uart_config.rxIdleType = kLPUART_IdleTypeStopBit; uart_config.rxIdleConfig = kLPUART_IdleCharacter1; data->async.next_rx_buffer = NULL; data->async.next_rx_buffer_len = 0; data->async.uart_dev = dev; k_work_init_delayable(&data->async.rx_dma_params.timeout_work, mcux_lpuart_async_rx_timeout); k_work_init_delayable(&data->async.tx_dma_params.timeout_work, mcux_lpuart_async_tx_timeout); /* Disable the UART Receiver until the async API provides a buffer to * receive into with rx_enable */ uart_config.enableRx = false; /* Clearing the fifo of any junk received before the async rx enable was called */ while (LPUART_GetRxFifoCount(config->base) > 0) { LPUART_ReadByte(config->base); } return 0; } #endif static int mcux_lpuart_configure_init(const struct device *dev, const struct uart_config *cfg) { const struct mcux_lpuart_config *config = dev->config; struct mcux_lpuart_data *data = dev->data; lpuart_config_t uart_config; uint32_t clock_freq; int ret; if (!device_is_ready(config->clock_dev)) { return -ENODEV; } if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { return -EINVAL; } LPUART_GetDefaultConfig(&uart_config); ret = mcux_lpuart_configure_basic(dev, cfg, &uart_config); if (ret) { return ret; } LPUART_Init(config->base, &uart_config, clock_freq); if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_RS485) { /* Set the LPUART into RS485 mode (tx driver enable using RTS) */ config->base->MODIR |= LPUART_MODIR_TXRTSE(true); if (!config->rs485_de_active_low) { config->base->MODIR |= LPUART_MODIR_TXRTSPOL(1); } } /* Now can enable tx */ config->base->CTRL |= LPUART_CTRL_TE(true); if (config->loopback_en) { /* Set the LPUART into loopback mode */ config->base->CTRL |= LPUART_CTRL_LOOPS_MASK; config->base->CTRL &= ~LPUART_CTRL_RSRC_MASK; } else if (config->single_wire) { /* Enable the single wire / half-duplex mode, only possible when * loopback is disabled. We need a critical section to prevent * the UART firing an interrupt during mode switch */ unsigned int key = irq_lock(); config->base->CTRL |= (LPUART_CTRL_LOOPS_MASK | LPUART_CTRL_RSRC_MASK); irq_unlock(key); } else { #ifdef LPUART_CTRL_TXINV /* Only invert TX in full-duplex mode */ if (config->tx_invert) { config->base->CTRL |= LPUART_CTRL_TXINV(1); } #endif } #ifdef LPUART_STAT_RXINV if (config->rx_invert) { config->base->STAT |= LPUART_STAT_RXINV(1); } #endif /* update internal uart_config */ data->uart_config = *cfg; return 0; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int mcux_lpuart_config_get(const struct device *dev, struct uart_config *cfg) { struct mcux_lpuart_data *data = dev->data; *cfg = data->uart_config; return 0; } static int mcux_lpuart_configure(const struct device *dev, const struct uart_config *cfg) { const struct mcux_lpuart_config *config = dev->config; /* Make sure that RSRC is de-asserted otherwise deinit will hang. */ config->base->CTRL &= ~LPUART_CTRL_RSRC_MASK; /* disable LPUART */ LPUART_Deinit(config->base); int ret = mcux_lpuart_configure_init(dev, cfg); if (ret) { return ret; } /* wait for hardware init */ k_sleep(K_MSEC(1)); return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ static int mcux_lpuart_init(const struct device *dev) { const struct mcux_lpuart_config *config = dev->config; struct mcux_lpuart_data *data = dev->data; struct uart_config *uart_api_config = &data->uart_config; int err; uart_api_config->baudrate = config->baud_rate; uart_api_config->parity = config->parity; uart_api_config->stop_bits = UART_CFG_STOP_BITS_1; uart_api_config->data_bits = UART_CFG_DATA_BITS_8; uart_api_config->flow_ctrl = config->flow_ctrl; /* set initial configuration */ mcux_lpuart_configure_init(dev, uart_api_config); if (config->flow_ctrl) { const struct pinctrl_state *state; err = pinctrl_lookup_state(config->pincfg, PINCTRL_STATE_FLOWCONTROL, &state); if (err < 0) { err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); } } else { err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); } if (err < 0) { return err; } #ifdef CONFIG_UART_MCUX_LPUART_ISR_SUPPORT #if CONFIG_NXP_LP_FLEXCOMM /* When using LP Flexcomm driver, register the interrupt handler * so we receive notification from the LP Flexcomm interrupt handler. */ nxp_lp_flexcomm_setirqhandler(config->parent_dev, dev, LP_FLEXCOMM_PERIPH_LPUART, mcux_lpuart_isr); #else /* Interrupt is managed by this driver */ config->irq_config_func(dev); #endif #ifdef CONFIG_UART_EXCLUSIVE_API_CALLBACKS data->api_type = LPUART_NONE; #endif #endif #ifdef CONFIG_PM data->pm_state_lock_on = false; data->tx_poll_stream_on = false; data->tx_int_stream_on = false; #endif return 0; } static const struct uart_driver_api mcux_lpuart_driver_api = { .poll_in = mcux_lpuart_poll_in, .poll_out = mcux_lpuart_poll_out, .err_check = mcux_lpuart_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = mcux_lpuart_configure, .config_get = mcux_lpuart_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = mcux_lpuart_fifo_fill, .fifo_read = mcux_lpuart_fifo_read, .irq_tx_enable = mcux_lpuart_irq_tx_enable, .irq_tx_disable = mcux_lpuart_irq_tx_disable, .irq_tx_complete = mcux_lpuart_irq_tx_complete, .irq_tx_ready = mcux_lpuart_irq_tx_ready, .irq_rx_enable = mcux_lpuart_irq_rx_enable, .irq_rx_disable = mcux_lpuart_irq_rx_disable, .irq_rx_ready = mcux_lpuart_irq_rx_full, .irq_err_enable = mcux_lpuart_irq_err_enable, .irq_err_disable = mcux_lpuart_irq_err_disable, .irq_is_pending = mcux_lpuart_irq_is_pending, .irq_update = mcux_lpuart_irq_update, .irq_callback_set = mcux_lpuart_irq_callback_set, #endif #ifdef CONFIG_UART_ASYNC_API .callback_set = mcux_lpuart_callback_set, .tx = mcux_lpuart_tx, .tx_abort = mcux_lpuart_tx_abort, .rx_enable = mcux_lpuart_rx_enable, .rx_buf_rsp = mcux_lpuart_rx_buf_rsp, .rx_disable = mcux_lpuart_rx_disable, #endif /* CONFIG_UART_ASYNC_API */ }; #ifdef CONFIG_UART_MCUX_LPUART_ISR_SUPPORT #define MCUX_LPUART_IRQ_INSTALL(n, i) \ do { \ IRQ_CONNECT(DT_INST_IRQN_BY_IDX(n, i), \ DT_INST_IRQ_BY_IDX(n, i, priority), \ mcux_lpuart_isr, DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQ_BY_IDX(n, i, irq)); \ } while (false) #define MCUX_LPUART_IRQ_INIT(n) .irq_config_func = mcux_lpuart_config_func_##n, #define MCUX_LPUART_IRQ_DEFINE(n) \ static void mcux_lpuart_config_func_##n(const struct device *dev) \ { \ IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 0), \ (MCUX_LPUART_IRQ_INSTALL(n, 0);)) \ \ IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 1), \ (MCUX_LPUART_IRQ_INSTALL(n, 1);)) \ } #else #define MCUX_LPUART_IRQ_INIT(n) #define MCUX_LPUART_IRQ_DEFINE(n) #endif /* CONFIG_UART_MCUX_LPUART_ISR_SUPPORT */ #ifdef CONFIG_UART_ASYNC_API #define TX_DMA_CONFIG(id) \ .tx_dma_config = { \ .dma_dev = \ DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)), \ .dma_channel = \ DT_INST_DMAS_CELL_BY_NAME(id, tx, mux), \ .dma_cfg = { \ .source_burst_length = 1, \ .dest_burst_length = 1, \ .source_data_size = 1, \ .dest_data_size = 1, \ .complete_callback_en = 1, \ .error_callback_dis = 0, \ .block_count = 1, \ .head_block = \ &mcux_lpuart_##id##_data.async.tx_dma_params.active_dma_block, \ .channel_direction = MEMORY_TO_PERIPHERAL, \ .dma_slot = DT_INST_DMAS_CELL_BY_NAME( \ id, tx, source), \ .dma_callback = dma_callback, \ .user_data = (void *)DEVICE_DT_INST_GET(id) \ }, \ }, #define RX_DMA_CONFIG(id) \ .rx_dma_config = { \ .dma_dev = \ DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)), \ .dma_channel = \ DT_INST_DMAS_CELL_BY_NAME(id, rx, mux), \ .dma_cfg = { \ .source_burst_length = 1, \ .dest_burst_length = 1, \ .source_data_size = 1, \ .dest_data_size = 1, \ .complete_callback_en = 1, \ .error_callback_dis = 0, \ .block_count = 1, \ .head_block = \ &mcux_lpuart_##id##_data.async.rx_dma_params.active_dma_block, \ .channel_direction = PERIPHERAL_TO_MEMORY, \ .dma_slot = DT_INST_DMAS_CELL_BY_NAME( \ id, rx, source), \ .dma_callback = dma_callback, \ .user_data = (void *)DEVICE_DT_INST_GET(id) \ }, \ }, #else #define RX_DMA_CONFIG(n) #define TX_DMA_CONFIG(n) #endif /* CONFIG_UART_ASYNC_API */ #define FLOW_CONTROL(n) \ DT_INST_PROP(n, hw_flow_control) \ ? UART_CFG_FLOW_CTRL_RTS_CTS \ : DT_INST_PROP(n, nxp_rs485_mode)\ ? UART_CFG_FLOW_CTRL_RS485 \ : UART_CFG_FLOW_CTRL_NONE #ifdef CONFIG_NXP_LP_FLEXCOMM #define PARENT_DEV(n) \ .parent_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), #else #define PARENT_DEV(n) #endif /* CONFIG_NXP_LP_FLEXCOMM */ #define LPUART_MCUX_DECLARE_CFG(n) \ static const struct mcux_lpuart_config mcux_lpuart_##n##_config = { \ .base = (LPUART_Type *) DT_INST_REG_ADDR(n), \ PARENT_DEV(n) \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \ .baud_rate = DT_INST_PROP(n, current_speed), \ .flow_ctrl = FLOW_CONTROL(n), \ .parity = DT_INST_ENUM_IDX_OR(n, parity, UART_CFG_PARITY_NONE), \ .rs485_de_active_low = DT_INST_PROP(n, nxp_rs485_de_active_low), \ .loopback_en = DT_INST_PROP(n, nxp_loopback), \ .single_wire = DT_INST_PROP(n, single_wire), \ .rx_invert = DT_INST_PROP(n, rx_invert), \ .tx_invert = DT_INST_PROP(n, tx_invert), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ MCUX_LPUART_IRQ_INIT(n) \ RX_DMA_CONFIG(n) \ TX_DMA_CONFIG(n) \ }; #define LPUART_MCUX_INIT(n) \ \ static struct mcux_lpuart_data mcux_lpuart_##n##_data; \ \ PINCTRL_DT_INST_DEFINE(n); \ MCUX_LPUART_IRQ_DEFINE(n) \ \ LPUART_MCUX_DECLARE_CFG(n) \ \ DEVICE_DT_INST_DEFINE(n, \ mcux_lpuart_init, \ NULL, \ &mcux_lpuart_##n##_data, \ &mcux_lpuart_##n##_config, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &mcux_lpuart_driver_api); \ DT_INST_FOREACH_STATUS_OKAY(LPUART_MCUX_INIT) ```
/content/code_sandbox/drivers/serial/uart_mcux_lpuart.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
11,299
```c /* * */ #define DT_DRV_COMPAT litex_uart #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/init.h> #include <zephyr/irq.h> #include <zephyr/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/types.h> #include <soc.h> #define UART_EV_TX BIT(0) #define UART_EV_RX BIT(1) struct uart_litex_device_config { uint32_t rxtx_addr; uint32_t txfull_addr; uint32_t rxempty_addr; uint32_t ev_status_addr; uint32_t ev_pending_addr; uint32_t ev_enable_addr; uint32_t txempty_addr; uint32_t rxfull_addr; uint32_t baud_rate; #ifdef CONFIG_UART_INTERRUPT_DRIVEN void (*config_func)(const struct device *dev); #endif }; struct uart_litex_data { #ifdef CONFIG_UART_INTERRUPT_DRIVEN struct k_timer timer; uart_irq_callback_user_data_t callback; void *cb_data; #endif }; /** * @brief Output a character in polled mode. * * Writes data to tx register. Waits for space if transmitter is full. * * @param dev UART device struct * @param c Character to send */ static void uart_litex_poll_out(const struct device *dev, unsigned char c) { const struct uart_litex_device_config *config = dev->config; /* wait for space */ while (litex_read8(config->txfull_addr)) { } litex_write8(c, config->rxtx_addr); } /** * @brief Poll the device for input. * * @param dev UART device struct * @param c Pointer to character * * @return 0 if a character arrived, -1 if the input buffer if empty. */ static int uart_litex_poll_in(const struct device *dev, unsigned char *c) { const struct uart_litex_device_config *config = dev->config; if (!litex_read8(config->rxempty_addr)) { *c = litex_read8(config->rxtx_addr); /* refresh UART_RXEMPTY by writing UART_EV_RX * to UART_EV_PENDING */ litex_write8(UART_EV_RX, config->ev_pending_addr); return 0; } else { return -1; } } #ifdef CONFIG_UART_INTERRUPT_DRIVEN /** * @brief Enable TX interrupt in event register * * @param dev UART device struct */ static void uart_litex_irq_tx_enable(const struct device *dev) { const struct uart_litex_device_config *config = dev->config; struct uart_litex_data *data = dev->data; uint8_t enable = litex_read8(config->ev_enable_addr); litex_write8(enable | UART_EV_TX, config->ev_enable_addr); if (!litex_read8(config->txfull_addr)) { /* * TX done event already generated an edge interrupt. Generate a * soft interrupt and have it call the callback function in * timer isr context. */ k_timer_start(&data->timer, K_NO_WAIT, K_NO_WAIT); } } /** * @brief Disable TX interrupt in event register * * @param dev UART device struct */ static void uart_litex_irq_tx_disable(const struct device *dev) { const struct uart_litex_device_config *config = dev->config; uint8_t enable = litex_read8(config->ev_enable_addr); litex_write8(enable & ~(UART_EV_TX), config->ev_enable_addr); } /** * @brief Enable RX interrupt in event register * * @param dev UART device struct */ static void uart_litex_irq_rx_enable(const struct device *dev) { const struct uart_litex_device_config *config = dev->config; uint8_t enable = litex_read8(config->ev_enable_addr); litex_write8(enable | UART_EV_RX, config->ev_enable_addr); } /** * @brief Disable RX interrupt in event register * * @param dev UART device struct */ static void uart_litex_irq_rx_disable(const struct device *dev) { const struct uart_litex_device_config *config = dev->config; uint8_t enable = litex_read8(config->ev_enable_addr); litex_write8(enable & ~(UART_EV_RX), config->ev_enable_addr); } /** * @brief Check if Tx IRQ has been raised and UART is ready to accept new data * * @param dev UART device struct * * @return 1 if an IRQ has been raised, 0 otherwise */ static int uart_litex_irq_tx_ready(const struct device *dev) { const struct uart_litex_device_config *config = dev->config; uint8_t val = litex_read8(config->txfull_addr); return !val; } /** * @brief Check if Rx IRQ has been raised and there's data to be read from UART * * @param dev UART device struct * * @return 1 if an IRQ has been raised, 0 otherwise */ static int uart_litex_irq_rx_ready(const struct device *dev) { const struct uart_litex_device_config *config = dev->config; uint8_t pending; pending = litex_read8(config->ev_pending_addr); if (pending & UART_EV_RX) { return 1; } else { return 0; } } /** * @brief Fill FIFO with data * * @param dev UART device struct * @param tx_data Data to transmit * @param size Number of bytes to send * * @return Number of bytes sent */ static int uart_litex_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { const struct uart_litex_device_config *config = dev->config; int i; for (i = 0; i < size && !litex_read8(config->txfull_addr); i++) { litex_write8(tx_data[i], config->rxtx_addr); } return i; } /** * @brief Read data from FIFO * * @param dev UART device struct * @param rxData Data container * @param size Container size * * @return Number of bytes read */ static int uart_litex_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct uart_litex_device_config *config = dev->config; int i; for (i = 0; i < size && !litex_read8(config->rxempty_addr); i++) { rx_data[i] = litex_read8(config->rxtx_addr); /* refresh UART_RXEMPTY by writing UART_EV_RX * to UART_EV_PENDING */ litex_write8(UART_EV_RX, config->ev_pending_addr); } return i; } static void uart_litex_irq_err(const struct device *dev) { ARG_UNUSED(dev); } /** * @brief Check if any IRQ is pending * * @param dev UART device struct * * @return 1 if an IRQ is pending, 0 otherwise */ static int uart_litex_irq_is_pending(const struct device *dev) { return (uart_litex_irq_tx_ready(dev) || uart_litex_irq_rx_ready(dev)); } static int uart_litex_irq_update(const struct device *dev) { return 1; } /** * @brief Set the callback function pointer for IRQ. * * @param dev UART device struct * @param cb Callback function pointer. */ static void uart_litex_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_litex_data *data; data = dev->data; data->callback = cb; data->cb_data = cb_data; } static void uart_litex_irq_handler(const struct device *dev) { const struct uart_litex_device_config *config = dev->config; struct uart_litex_data *data = dev->data; unsigned int key = irq_lock(); if (data->callback) { data->callback(dev, data->cb_data); } /* Clear RX events, TX events still needed to enqueue the next transfer */ litex_write8(UART_EV_RX, config->ev_pending_addr); irq_unlock(key); } static void uart_litex_tx_soft_isr(struct k_timer *timer) { const struct device *dev = k_timer_user_data_get(timer); uart_litex_irq_handler(dev); } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_litex_driver_api = { .poll_in = uart_litex_poll_in, .poll_out = uart_litex_poll_out, .err_check = NULL, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_litex_fifo_fill, .fifo_read = uart_litex_fifo_read, .irq_tx_enable = uart_litex_irq_tx_enable, .irq_tx_disable = uart_litex_irq_tx_disable, .irq_tx_ready = uart_litex_irq_tx_ready, .irq_rx_enable = uart_litex_irq_rx_enable, .irq_rx_disable = uart_litex_irq_rx_disable, .irq_rx_ready = uart_litex_irq_rx_ready, .irq_err_enable = uart_litex_irq_err, .irq_err_disable = uart_litex_irq_err, .irq_is_pending = uart_litex_irq_is_pending, .irq_update = uart_litex_irq_update, .irq_callback_set = uart_litex_irq_callback_set #endif }; static int uart_litex_init(const struct device *dev) { const struct uart_litex_device_config *config = dev->config; litex_write8(UART_EV_TX | UART_EV_RX, config->ev_pending_addr); #ifdef CONFIG_UART_INTERRUPT_DRIVEN struct uart_litex_data *data = dev->data; k_timer_init(&data->timer, &uart_litex_tx_soft_isr, NULL); k_timer_user_data_set(&data->timer, (void *)dev); config->config_func(dev); #endif return 0; } #define LITEX_UART_IRQ_INIT(n) \ static void uart_irq_config##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), uart_litex_irq_handler, \ DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } #define LITEX_UART_INIT(n) \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, (LITEX_UART_IRQ_INIT(n))) \ \ static struct uart_litex_data uart_litex_data_##n; \ \ static const struct uart_litex_device_config uart_litex_dev_cfg_##n = { \ .rxtx_addr = DT_INST_REG_ADDR_BY_NAME(n, rxtx), \ .txfull_addr = DT_INST_REG_ADDR_BY_NAME(n, txfull), \ .rxempty_addr = DT_INST_REG_ADDR_BY_NAME(n, rxempty), \ .ev_status_addr = DT_INST_REG_ADDR_BY_NAME(n, ev_status), \ .ev_pending_addr = DT_INST_REG_ADDR_BY_NAME(n, ev_pending), \ .ev_enable_addr = DT_INST_REG_ADDR_BY_NAME(n, ev_enable), \ .txempty_addr = DT_INST_REG_ADDR_BY_NAME(n, txempty), \ .rxfull_addr = DT_INST_REG_ADDR_BY_NAME(n, rxfull), \ .baud_rate = DT_INST_PROP(n, current_speed), \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, (.config_func = uart_irq_config##n,))}; \ \ DEVICE_DT_INST_DEFINE(n, uart_litex_init, NULL, &uart_litex_data_##n, \ &uart_litex_dev_cfg_##n, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ (void *)&uart_litex_driver_api); DT_INST_FOREACH_STATUS_OKAY(LITEX_UART_INIT) ```
/content/code_sandbox/drivers/serial/uart_litex.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,584
```c /* */ #define DT_DRV_COMPAT cdns_uart /** * @brief Serial Driver for cadence UART IP6528 */ #include "uart_cdns.h" #define DEV_UART(dev) ((struct uart_cdns_regs *) \ ((const struct uart_cdns_device_config *const)(dev)->config)->port) /** Check if tx FIFO is full */ bool uart_cdns_is_tx_fifo_full(struct uart_cdns_regs *uart_regs) { return ((uart_regs->channel_status & CSR_TFUL_MASK) != 0); } /** Check if tx FIFO is empty */ bool uart_cdns_is_tx_fifo_empty(struct uart_cdns_regs *uart_regs) { return ((uart_regs->channel_status & CSR_TEMPTY_MASK) != 0); } /** Check if rx FIFO is empty */ bool uart_cdns_is_rx_fifo_empty(struct uart_cdns_regs *uart_regs) { return ((uart_regs->channel_status & CSR_REMPTY_MASK) != 0); } /** Set the baudrate */ void uart_cdns_set_baudrate(struct uart_cdns_regs *uart_regs, const struct uart_cdns_device_config *const dev_cfg, uint32_t baud_rate) { uart_regs->baud_rate_div = dev_cfg->bdiv; /* * baud_rate is calculated by hardware as below * * baud_rate = sel_clk / ((bdiv + 1) * clock_divisor) * i.e. clock_divisor = sel_clk / ((bdiv + 1) * baud_rate) * * However to round to a nearest integer we use this: * clock_divisor = (sel_clk + ((bdiv + 1) * baud_rate) / 2) / ((bdiv + 1) * baud_rate) */ uart_regs->baud_rate_gen = (dev_cfg->sys_clk_freq + ((dev_cfg->bdiv + 1) * baud_rate) / 2) / ((dev_cfg->bdiv + 1) * baud_rate); } static void uart_cdns_poll_out(const struct device *dev, unsigned char out_char) { struct uart_cdns_regs *uart_regs = DEV_UART(dev); /* Wait while TX FIFO is full */ while (uart_cdns_is_tx_fifo_full(uart_regs)) { } uart_regs->rx_tx_fifo = (uint32_t)out_char; } /** @brief Poll the device for input. */ int uart_cdns_poll_in(const struct device *dev, unsigned char *p_char) { struct uart_cdns_regs *uart_regs = DEV_UART(dev); if (uart_cdns_is_rx_fifo_empty(uart_regs)) { return -1; } *p_char = (unsigned char)(uart_regs->rx_tx_fifo & RXDATA_MASK); return 0; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_cdns_fill_fifo(const struct device *dev, const uint8_t *tx_data, int len) { struct uart_cdns_regs *uart_regs = DEV_UART(dev); int i = 0; for (i = 0; i < len && (!uart_cdns_is_tx_fifo_full(uart_regs)); i++) { uart_regs->rx_tx_fifo = tx_data[i]; } return i; } static int uart_cdns_read_fifo(const struct device *dev, uint8_t *rx_data, const int size) { struct uart_cdns_regs *uart_regs = DEV_UART(dev); int i = 0; for (i = 0; i < size && (!uart_cdns_is_rx_fifo_empty(uart_regs)); i++) { rx_data[i] = uart_regs->rx_tx_fifo; } if (i > 0) { uart_regs->ctrl |= CTRL_RSTTO_MASK; } return i; } void uart_cdns_enable_tx_irq(const struct device *dev) { struct uart_cdns_regs *uart_regs = DEV_UART(dev); /* * TX empty interrupt only triggered when TX removes the last byte from the * TX FIFO. We need another way generate the first interrupt. This is why * we have the timer involved here */ uart_regs->rx_timeout = DEFAULT_RTO_PERIODS_FACTOR; uart_regs->intr_enable = CSR_TEMPTY_MASK | CSR_TOUT_MASK; } void uart_cdns_disable_tx_irq(const struct device *dev) { struct uart_cdns_regs *uart_regs = DEV_UART(dev); uart_regs->intr_disable = CSR_TEMPTY_MASK | CSR_TOUT_MASK; } static int uart_cdns_irq_tx_ready(const struct device *dev) { return !uart_cdns_is_tx_fifo_full(DEV_UART(dev)); } static int uart_cdns_irq_tx_complete(const struct device *dev) { return uart_cdns_is_tx_fifo_empty(DEV_UART(dev)); } void uart_cdns_enable_rx_irq(const struct device *dev) { struct uart_cdns_regs *uart_regs = DEV_UART(dev); uart_regs->rx_fifo_trigger_level = 1; uart_regs->intr_enable = CSR_RTRIG_MASK; } /** Disable RX UART interrupt */ void uart_cdns_disable_rx_irq(const struct device *dev) { struct uart_cdns_regs *uart_regs = DEV_UART(dev); uart_regs->intr_disable = CSR_RTRIG_MASK; } static int uart_cdns_irq_rx_ready(const struct device *dev) { return !uart_cdns_is_rx_fifo_empty(DEV_UART(dev)); } static void uart_cdns_enable_irq_err(const struct device *dev) { struct uart_cdns_regs *uart_regs = DEV_UART(dev); uart_regs->intr_enable |= (CSR_TOVR_MASK | CSR_TOUT_MASK | CSR_PARE_MASK | CSR_FRAME_MASK | CSR_ROVR_MASK); } static void uart_cdns_disable_irq_err(const struct device *dev) { struct uart_cdns_regs *uart_regs = DEV_UART(dev); uart_regs->intr_disable |= (CSR_TOVR_MASK | CSR_TOUT_MASK | CSR_PARE_MASK | CSR_FRAME_MASK | CSR_ROVR_MASK); } static int uart_cdns_is_irq_pending(const struct device *dev) { struct uart_cdns_regs *uart_regs = DEV_UART(dev); return (uart_regs->channel_intr_status != 0); } /** Check for IRQ updates */ static int uart_cdns_update_irq(const struct device *dev) { return 1; } /** Set the callback function pointer for IRQ. */ void uart_cdns_set_irq_callback(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_cdns_data *data; data = dev->data; data->callback = cb; data->cb_data = cb_data; } static void uart_cdns_irq_handler(const struct device *dev) { struct uart_cdns_regs *uart_regs = DEV_UART(dev); uint32_t key = irq_lock(); uint32_t isr_status; struct uart_cdns_data *data = dev->data; if (data->callback) { data->callback(dev, data->cb_data); } /* clear events */ /* need to make local copy of the status */ isr_status = uart_regs->channel_intr_status; if (isr_status & CSR_TOUT_MASK) { uart_regs->intr_disable = CSR_TOUT_MASK; } irq_unlock(key); } #endif static const struct uart_driver_api uart_cdns_driver_api = { .poll_in = uart_cdns_poll_in, .poll_out = uart_cdns_poll_out, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_cdns_fill_fifo, .fifo_read = uart_cdns_read_fifo, .irq_tx_enable = uart_cdns_enable_tx_irq, .irq_tx_disable = uart_cdns_disable_tx_irq, .irq_tx_ready = uart_cdns_irq_tx_ready, .irq_tx_complete = uart_cdns_irq_tx_complete, .irq_rx_enable = uart_cdns_enable_rx_irq, .irq_rx_disable = uart_cdns_disable_rx_irq, .irq_rx_ready = uart_cdns_irq_rx_ready, .irq_err_enable = uart_cdns_enable_irq_err, .irq_err_disable = uart_cdns_disable_irq_err, .irq_is_pending = uart_cdns_is_irq_pending, .irq_update = uart_cdns_update_irq, .irq_callback_set = uart_cdns_set_irq_callback #endif }; /** Initialize the UART */ static int uart_cdns_init(const struct device *dev) { struct uart_cdns_regs *uart_regs = DEV_UART(dev); const struct uart_cdns_device_config *const dev_cfg = dev->config; /* Reset RX and TX path */ uart_regs->ctrl = (CTRL_RXRES_MASK | CTRL_TXRES_MASK); /* Disable TX and RX channels */ uart_regs->ctrl = (CTRL_STPBRK_MASK | CTRL_TXDIS_MASK | CTRL_RXDIS_MASK); /* Configure Baud rate */ uart_cdns_set_baudrate(uart_regs, dev_cfg, dev_cfg->baud_rate); /* Configure the mode */ uart_regs->mode = (SET_VAL32(MODE_WSIZE, 1) | SET_VAL32(MODE_UCLKEN, 1) | SET_VAL32(MODE_PAR, dev_cfg->parity)); /* Disable all interrupts */ uart_regs->intr_disable = 0xFFFFFFFF; /* Enable TX and RX Channels */ uart_regs->ctrl = (CTRL_TXEN_MASK | CTRL_RXEN_MASK | CTRL_STPBRK_MASK); if (dev_cfg->cfg_func) { /* Setup IRQ handler */ dev_cfg->cfg_func(); } return 0; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define UART_CDNS_IRQ_CFG_FUNC(n) \ static void uart_cdns_irq_cfg_func_##n(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), uart_cdns_irq_handler, \ DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } #define UART_CDNS_IRQ_CFG_FUNC_INIT(n) .cfg_func = uart_cdns_irq_cfg_func_##n, #else #define UART_CDNS_IRQ_CFG_FUNC(n) #define UART_CDNS_IRQ_CFG_FUNC_INIT(n) #endif #define UART_CDNS_INIT(n) \ static struct uart_cdns_data uart_cdns_data_##n; \ \ UART_CDNS_IRQ_CFG_FUNC(n) \ \ static const struct uart_cdns_device_config uart_cdns_dev_cfg_##n = { \ .port = DT_INST_REG_ADDR(n), \ .bdiv = DT_INST_PROP(n, bdiv), \ .sys_clk_freq = DT_INST_PROP(n, clock_frequency), \ .baud_rate = DT_INST_PROP(n, current_speed), \ .parity = CDNS_PARTITY_MAP(DT_ENUM_IDX(DT_DRV_INST(n), parity)), \ UART_CDNS_IRQ_CFG_FUNC_INIT(n)}; \ \ DEVICE_DT_INST_DEFINE(n, uart_cdns_init, NULL, &uart_cdns_data_##n, \ &uart_cdns_dev_cfg_##n, PRE_KERNEL_1, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &uart_cdns_driver_api); DT_INST_FOREACH_STATUS_OKAY(UART_CDNS_INIT) ```
/content/code_sandbox/drivers/serial/uart_cdns.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,374
```unknown # MCUXpresso SDK UART config UART_MCUX bool "MCUX uart driver" default y depends on DT_HAS_NXP_KINETIS_UART_ENABLED depends on CLOCK_CONTROL select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select PINCTRL help Enable the MCUX uart driver. ```
/content/code_sandbox/drivers/serial/Kconfig.mcux
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
66
```c /* */ #define DT_DRV_COMPAT atmel_sam_usart /** @file * @brief USART driver for Atmel SAM MCU family. */ #include <errno.h> #include <zephyr/sys/__assert.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <soc.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control/atmel_sam_pmc.h> #include <zephyr/irq.h> /* Device constant configuration parameters */ struct usart_sam_dev_cfg { Usart *regs; const struct atmel_sam_pmc_config clock_cfg; const struct pinctrl_dev_config *pcfg; bool hw_flow_control; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_config_func_t irq_config_func; #endif }; /* Device run time data */ struct usart_sam_dev_data { uint32_t baud_rate; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t irq_cb; /* Interrupt Callback */ void *cb_data; /* Interrupt Callback Arg */ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; static int usart_sam_poll_in(const struct device *dev, unsigned char *c) { const struct usart_sam_dev_cfg *config = dev->config; Usart * const usart = config->regs; if (!(usart->US_CSR & US_CSR_RXRDY)) { return -1; } /* got a character */ *c = (unsigned char)usart->US_RHR; return 0; } static void usart_sam_poll_out(const struct device *dev, unsigned char c) { const struct usart_sam_dev_cfg *config = dev->config; Usart * const usart = config->regs; /* Wait for transmitter to be ready */ while (!(usart->US_CSR & US_CSR_TXRDY)) { } /* send a character */ usart->US_THR = (uint32_t)c; } static int usart_sam_err_check(const struct device *dev) { const struct usart_sam_dev_cfg *config = dev->config; volatile Usart * const usart = config->regs; int errors = 0; if (usart->US_CSR & US_CSR_OVRE) { errors |= UART_ERROR_OVERRUN; } if (usart->US_CSR & US_CSR_PARE) { errors |= UART_ERROR_PARITY; } if (usart->US_CSR & US_CSR_FRAME) { errors |= UART_ERROR_FRAMING; } return errors; } static int usart_sam_baudrate_set(const struct device *dev, uint32_t baudrate) { struct usart_sam_dev_data *const dev_data = dev->data; const struct usart_sam_dev_cfg *const config = dev->config; volatile Usart * const usart = config->regs; uint32_t divisor; __ASSERT(baudrate, "baud rate has to be bigger than 0"); __ASSERT(SOC_ATMEL_SAM_MCK_FREQ_HZ/16U >= baudrate, "MCK frequency is too small to set required baud rate"); divisor = SOC_ATMEL_SAM_MCK_FREQ_HZ / 16U / baudrate; if (divisor > 0xFFFF) { return -EINVAL; } usart->US_BRGR = US_BRGR_CD(divisor); dev_data->baud_rate = baudrate; return 0; } static uint32_t usart_sam_cfg2sam_parity(uint8_t parity) { switch (parity) { case UART_CFG_PARITY_EVEN: return US_MR_PAR_EVEN; case UART_CFG_PARITY_ODD: return US_MR_PAR_ODD; case UART_CFG_PARITY_SPACE: return US_MR_PAR_SPACE; case UART_CFG_PARITY_MARK: return US_MR_PAR_MARK; case UART_CFG_PARITY_NONE: default: return US_MR_PAR_NO; } } static uint8_t usart_sam_get_parity(const struct device *dev) { const struct usart_sam_dev_cfg *const config = dev->config; volatile Usart * const usart = config->regs; switch (usart->US_MR & US_MR_PAR_Msk) { case US_MR_PAR_EVEN: return UART_CFG_PARITY_EVEN; case US_MR_PAR_ODD: return UART_CFG_PARITY_ODD; case US_MR_PAR_SPACE: return UART_CFG_PARITY_SPACE; case US_MR_PAR_MARK: return UART_CFG_PARITY_MARK; case US_MR_PAR_NO: default: return UART_CFG_PARITY_NONE; } } static uint32_t usart_sam_cfg2sam_stop_bits(uint8_t stop_bits) { switch (stop_bits) { case UART_CFG_STOP_BITS_1_5: return US_MR_NBSTOP_1_5_BIT; case UART_CFG_STOP_BITS_2: return US_MR_NBSTOP_2_BIT; case UART_CFG_STOP_BITS_1: default: return US_MR_NBSTOP_1_BIT; } } static uint8_t usart_sam_get_stop_bits(const struct device *dev) { const struct usart_sam_dev_cfg *const config = dev->config; volatile Usart * const usart = config->regs; switch (usart->US_MR & US_MR_NBSTOP_Msk) { case US_MR_NBSTOP_1_5_BIT: return UART_CFG_STOP_BITS_1_5; case US_MR_NBSTOP_2_BIT: return UART_CFG_STOP_BITS_2; case US_MR_NBSTOP_1_BIT: default: return UART_CFG_STOP_BITS_1; } } static uint32_t usart_sam_cfg2sam_data_bits(uint8_t data_bits) { switch (data_bits) { case UART_CFG_DATA_BITS_5: return US_MR_CHRL_5_BIT; case UART_CFG_DATA_BITS_6: return US_MR_CHRL_6_BIT; case UART_CFG_DATA_BITS_7: return US_MR_CHRL_7_BIT; case UART_CFG_DATA_BITS_8: default: return US_MR_CHRL_8_BIT; } } static uint8_t usart_sam_get_data_bits(const struct device *dev) { const struct usart_sam_dev_cfg *const config = dev->config; volatile Usart * const usart = config->regs; switch (usart->US_MR & US_MR_CHRL_Msk) { case US_MR_CHRL_5_BIT: return UART_CFG_DATA_BITS_5; case US_MR_CHRL_6_BIT: return UART_CFG_DATA_BITS_6; case US_MR_CHRL_7_BIT: return UART_CFG_DATA_BITS_7; case US_MR_CHRL_8_BIT: default: return UART_CFG_DATA_BITS_8; } } static uint32_t usart_sam_cfg2sam_flow_ctrl(uint8_t flow_ctrl) { switch (flow_ctrl) { case UART_CFG_FLOW_CTRL_RTS_CTS: return US_MR_USART_MODE_HW_HANDSHAKING; case UART_CFG_FLOW_CTRL_NONE: default: return US_MR_USART_MODE_NORMAL; } } static uint8_t usart_sam_get_flow_ctrl(const struct device *dev) { const struct usart_sam_dev_cfg *const config = dev->config; volatile Usart * const usart = config->regs; switch (usart->US_MR & US_MR_USART_MODE_Msk) { case US_MR_USART_MODE_HW_HANDSHAKING: return UART_CFG_FLOW_CTRL_RTS_CTS; case US_MR_USART_MODE_NORMAL: default: return UART_CFG_FLOW_CTRL_NONE; } } static int usart_sam_configure(const struct device *dev, const struct uart_config *cfg) { int retval; const struct usart_sam_dev_cfg *const config = dev->config; volatile Usart * const usart = config->regs; /* Driver doesn't support 9 data bits, 0.5 stop bits, or DTR DSR flow control */ if (cfg->data_bits == UART_CFG_DATA_BITS_9 || cfg->stop_bits == UART_CFG_STOP_BITS_0_5 || cfg->flow_ctrl == UART_CFG_FLOW_CTRL_DTR_DSR) { return -ENOTSUP; } /* Reset and disable USART */ usart->US_CR = US_CR_RSTRX | US_CR_RSTTX | US_CR_RXDIS | US_CR_TXDIS | US_CR_RSTSTA; /* normal UART mode, baud rate driven by peripheral clock, all * other values chosen by config */ usart->US_MR = US_MR_CHMODE_NORMAL | US_MR_USCLKS_MCK | usart_sam_cfg2sam_parity(cfg->parity) | usart_sam_cfg2sam_stop_bits(cfg->stop_bits) | usart_sam_cfg2sam_data_bits(cfg->data_bits) | usart_sam_cfg2sam_flow_ctrl(cfg->flow_ctrl); /* Set baud rate */ retval = usart_sam_baudrate_set(dev, cfg->baudrate); if (retval != 0) { return retval; } /* Enable receiver and transmitter */ usart->US_CR = US_CR_RXEN | US_CR_TXEN; return 0; } static int usart_sam_config_get(const struct device *dev, struct uart_config *cfg) { struct usart_sam_dev_data *const dev_data = dev->data; cfg->baudrate = dev_data->baud_rate; cfg->parity = usart_sam_get_parity(dev); cfg->stop_bits = usart_sam_get_stop_bits(dev); cfg->data_bits = usart_sam_get_data_bits(dev); cfg->flow_ctrl = usart_sam_get_flow_ctrl(dev); return 0; } #if CONFIG_UART_INTERRUPT_DRIVEN static int usart_sam_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { const struct usart_sam_dev_cfg *config = dev->config; volatile Usart * const usart = config->regs; /* Wait for transmitter to be ready. */ while ((usart->US_CSR & US_CSR_TXRDY) == 0) { } usart->US_THR = *tx_data; return 1; } static int usart_sam_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct usart_sam_dev_cfg *config = dev->config; volatile Usart * const usart = config->regs; int bytes_read; bytes_read = 0; while (bytes_read < size) { if (usart->US_CSR & US_CSR_RXRDY) { rx_data[bytes_read] = usart->US_RHR; bytes_read++; } else { break; } } return bytes_read; } static void usart_sam_irq_tx_enable(const struct device *dev) { const struct usart_sam_dev_cfg *config = dev->config; volatile Usart * const usart = config->regs; usart->US_IER = US_IER_TXRDY; } static void usart_sam_irq_tx_disable(const struct device *dev) { const struct usart_sam_dev_cfg *config = dev->config; volatile Usart * const usart = config->regs; usart->US_IDR = US_IDR_TXRDY; } static int usart_sam_irq_tx_ready(const struct device *dev) { const struct usart_sam_dev_cfg *config = dev->config; volatile Usart * const usart = config->regs; /* Check that the transmitter is ready but only * return true if the interrupt is also enabled */ return (usart->US_CSR & US_CSR_TXRDY && usart->US_IMR & US_IMR_TXRDY); } static void usart_sam_irq_rx_enable(const struct device *dev) { const struct usart_sam_dev_cfg *config = dev->config; volatile Usart * const usart = config->regs; usart->US_IER = US_IER_RXRDY; } static void usart_sam_irq_rx_disable(const struct device *dev) { const struct usart_sam_dev_cfg *config = dev->config; volatile Usart * const usart = config->regs; usart->US_IDR = US_IDR_RXRDY; } static int usart_sam_irq_tx_complete(const struct device *dev) { const struct usart_sam_dev_cfg *config = dev->config; volatile Usart * const usart = config->regs; return (usart->US_CSR & US_CSR_TXRDY && usart->US_CSR & US_CSR_TXEMPTY); } static int usart_sam_irq_rx_ready(const struct device *dev) { const struct usart_sam_dev_cfg *config = dev->config; volatile Usart * const usart = config->regs; return (usart->US_CSR & US_CSR_RXRDY); } static void usart_sam_irq_err_enable(const struct device *dev) { const struct usart_sam_dev_cfg *config = dev->config; volatile Usart * const usart = config->regs; usart->US_IER = US_IER_OVRE | US_IER_FRAME | US_IER_PARE; } static void usart_sam_irq_err_disable(const struct device *dev) { const struct usart_sam_dev_cfg *config = dev->config; volatile Usart * const usart = config->regs; usart->US_IDR = US_IDR_OVRE | US_IDR_FRAME | US_IDR_PARE; } static int usart_sam_irq_is_pending(const struct device *dev) { const struct usart_sam_dev_cfg *config = dev->config; volatile Usart * const usart = config->regs; return (usart->US_IMR & (US_IMR_TXRDY | US_IMR_RXRDY)) & (usart->US_CSR & (US_CSR_TXRDY | US_CSR_RXRDY)); } static int usart_sam_irq_update(const struct device *dev) { ARG_UNUSED(dev); return 1; } static void usart_sam_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct usart_sam_dev_data *const dev_data = dev->data; dev_data->irq_cb = cb; dev_data->cb_data = cb_data; } static void usart_sam_isr(const struct device *dev) { struct usart_sam_dev_data *const dev_data = dev->data; if (dev_data->irq_cb) { dev_data->irq_cb(dev, dev_data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int usart_sam_init(const struct device *dev) { int retval; const struct usart_sam_dev_cfg *const cfg = dev->config; struct usart_sam_dev_data *const dev_data = dev->data; Usart * const usart = cfg->regs; /* Enable USART clock in PMC */ (void)clock_control_on(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&cfg->clock_cfg); /* Connect pins to the peripheral */ retval = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (retval < 0) { return retval; } /* Disable Interrupts */ usart->US_IDR = 0xFFFFFFFF; #ifdef CONFIG_UART_INTERRUPT_DRIVEN cfg->irq_config_func(dev); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ struct uart_config uart_config = { .baudrate = dev_data->baud_rate, .parity = UART_CFG_PARITY_NONE, .stop_bits = UART_CFG_STOP_BITS_1, .data_bits = UART_CFG_DATA_BITS_8, .flow_ctrl = UART_CFG_FLOW_CTRL_NONE, }; if (cfg->hw_flow_control) { uart_config.flow_ctrl = UART_CFG_FLOW_CTRL_RTS_CTS; } return usart_sam_configure(dev, &uart_config); } static const struct uart_driver_api usart_sam_driver_api = { .poll_in = usart_sam_poll_in, .poll_out = usart_sam_poll_out, .err_check = usart_sam_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = usart_sam_configure, .config_get = usart_sam_config_get, #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = usart_sam_fifo_fill, .fifo_read = usart_sam_fifo_read, .irq_tx_enable = usart_sam_irq_tx_enable, .irq_tx_disable = usart_sam_irq_tx_disable, .irq_tx_ready = usart_sam_irq_tx_ready, .irq_rx_enable = usart_sam_irq_rx_enable, .irq_rx_disable = usart_sam_irq_rx_disable, .irq_tx_complete = usart_sam_irq_tx_complete, .irq_rx_ready = usart_sam_irq_rx_ready, .irq_err_enable = usart_sam_irq_err_enable, .irq_err_disable = usart_sam_irq_err_disable, .irq_is_pending = usart_sam_irq_is_pending, .irq_update = usart_sam_irq_update, .irq_callback_set = usart_sam_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #define USART_SAM_DECLARE_CFG(n, IRQ_FUNC_INIT) \ static const struct usart_sam_dev_cfg usart##n##_sam_config = { \ .regs = (Usart *)DT_INST_REG_ADDR(n), \ .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(n), \ .hw_flow_control = DT_INST_PROP(n, hw_flow_control), \ \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ \ IRQ_FUNC_INIT \ } #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define USART_SAM_CONFIG_FUNC(n) \ static void usart##n##_sam_irq_config_func(const struct device *port) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ usart_sam_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } #define USART_SAM_IRQ_CFG_FUNC_INIT(n) \ .irq_config_func = usart##n##_sam_irq_config_func #define USART_SAM_INIT_CFG(n) \ USART_SAM_DECLARE_CFG(n, USART_SAM_IRQ_CFG_FUNC_INIT(n)) #else #define USART_SAM_CONFIG_FUNC(n) #define USART_SAM_IRQ_CFG_FUNC_INIT #define USART_SAM_INIT_CFG(n) \ USART_SAM_DECLARE_CFG(n, USART_SAM_IRQ_CFG_FUNC_INIT) #endif #define USART_SAM_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static struct usart_sam_dev_data usart##n##_sam_data = { \ .baud_rate = DT_INST_PROP(n, current_speed), \ }; \ \ static const struct usart_sam_dev_cfg usart##n##_sam_config; \ \ DEVICE_DT_INST_DEFINE(n, \ usart_sam_init, NULL, \ &usart##n##_sam_data, \ &usart##n##_sam_config, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &usart_sam_driver_api); \ \ USART_SAM_CONFIG_FUNC(n) \ \ USART_SAM_INIT_CFG(n); DT_INST_FOREACH_STATUS_OKAY(USART_SAM_INIT) ```
/content/code_sandbox/drivers/serial/usart_sam.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,257
```c /* * */ #define DT_DRV_COMPAT efinix_sapphire_uart0 #include <zephyr/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/types.h> #define UART_IRQ DT_INST_IRQN(0) #define UART0_BASE_ADDR DT_INST_REG_ADDR(0) #define BSP_UART_DATA 0x00 #define BSP_UART_STATUS 0x04 #define BSP_UART_CLOCK_DIVIDER 0x08 #define BSP_UART_FRAME_CONFIG 0x0C #define BSP_UART_WRITE_AVAILABILITY_MASK GENMASK(23, 16) #define BSP_UART_READ_OCCUPANCY_MASK GENMASK(31, 24) #define UART0_DATA_REG_ADDR UART0_BASE_ADDR + BSP_UART_DATA #define UART0_STATUS_REG_ADDR UART0_BASE_ADDR + BSP_UART_STATUS #define UART0_CLOCK_REG_ADDR UART0_BASE_ADDR + BSP_UART_CLOCK_DIVIDER #define UART0_FRAME_REG_ADDR UART0_BASE_ADDR + BSP_UART_FRAME_CONFIG #define UART0_SAMPLE_PER_BAUD 8 #define UART0_PARITY 0 /* Off */ #define UART0_STOP 0 /* 1 stop bit */ struct uart_efinix_sapphire_config { uint32_t baudrate; }; static void uart_efinix_sapphire_poll_out(const struct device *dev, unsigned char c) { /* uart_writeAvailability */ while ((sys_read32(UART0_STATUS_REG_ADDR) & BSP_UART_WRITE_AVAILABILITY_MASK) == 0) { } sys_write8(c, UART0_DATA_REG_ADDR); } static int uart_efinix_sapphire_poll_in(const struct device *dev, unsigned char *c) { if ((sys_read32(UART0_STATUS_REG_ADDR) & BSP_UART_READ_OCCUPANCY_MASK) != 0) { *c = (unsigned char)sys_read8(UART0_DATA_REG_ADDR); return 0; } return -1; } static const struct uart_driver_api uart_efinix_sapphire_api = { .poll_in = uart_efinix_sapphire_poll_in, .poll_out = uart_efinix_sapphire_poll_out, .err_check = NULL, }; static const struct uart_efinix_sapphire_config uart_efinix_sapphire_cfg_0 = { .baudrate = DT_INST_PROP(0, current_speed), }; static int uart_efinix_sapphire_init(const struct device *dev) { ARG_UNUSED(dev); uint32_t prescaler = ((CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / (uart_efinix_sapphire_cfg_0.baudrate * UART0_SAMPLE_PER_BAUD)) - 1) & 0xFFFFF; sys_write32(prescaler, UART0_CLOCK_REG_ADDR); /* 8 data bits, no parity, 1 stop bit */ uint32_t frame_config = (UART0_SAMPLE_PER_BAUD - 1) | UART0_PARITY << 8 | UART0_STOP << 16; sys_write32(frame_config, UART0_FRAME_REG_ADDR); return 0; } /* Device tree instance 0 init */ DEVICE_DT_INST_DEFINE(0, uart_efinix_sapphire_init, NULL, NULL, &uart_efinix_sapphire_cfg_0, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, (void *)&uart_efinix_sapphire_api); ```
/content/code_sandbox/drivers/serial/uart_efinix_sapphire.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
714
```c /* * */ #define DT_DRV_COMPAT renesas_ra_sci_uart #include <zephyr/kernel.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/irq.h> #include <soc.h> #include "r_sci_uart.h" #include "r_dtc.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ra_sci_uart); #define SCI_UART_SSR_FIFO_DR_RDF (R_SCI0_SSR_FIFO_DR_Msk | R_SCI0_SSR_FIFO_RDF_Msk) #define SCI_UART_SSR_FIFO_TDFE_TEND (R_SCI0_SSR_FIFO_TDFE_Msk | R_SCI0_SSR_FIFO_TEND_Msk) #define SCI_UART_SSR_TDRE_TEND (R_SCI0_SSR_TDRE_Msk | R_SCI0_SSR_TEND_Msk) #define SCI_UART_SSR_ERR_MSK (R_SCI0_SSR_ORER_Msk | R_SCI0_SSR_FER_Msk | R_SCI0_SSR_PER_Msk) #define SCI_UART_SSR_FIFO_ERR_MSK \ (R_SCI0_SSR_FIFO_ORER_Msk | R_SCI0_SSR_FIFO_FER_Msk | R_SCI0_SSR_FIFO_PER_Msk) #if defined(CONFIG_UART_ASYNC_API) void sci_uart_rxi_isr(void); void sci_uart_txi_isr(void); void sci_uart_tei_isr(void); void sci_uart_eri_isr(void); #endif struct uart_ra_sci_config { const struct pinctrl_dev_config *pcfg; R_SCI0_Type * const regs; }; struct uart_ra_sci_data { const struct device *dev; struct st_sci_uart_instance_ctrl sci; struct uart_config uart_config; struct st_uart_cfg fsp_config; struct st_sci_uart_extended_cfg fsp_config_extend; struct st_baud_setting_t fsp_baud_setting; #if defined(CONFIG_UART_INTERRUPT_DRIVEN) uart_irq_callback_user_data_t user_cb; void *user_cb_data; uint32_t ssr; #endif #if defined(CONFIG_UART_ASYNC_API) uart_callback_t async_user_cb; void *async_user_cb_data; struct k_work_delayable rx_timeout_work; size_t rx_timeout; size_t rx_buf_len; size_t rx_buf_offset; size_t rx_buf_cap; uint8_t *rx_buffer; size_t rx_next_buf_cap; uint8_t *rx_next_buf; struct st_transfer_instance rx_transfer; struct st_dtc_instance_ctrl rx_transfer_ctrl; struct st_transfer_info rx_transfer_info; struct st_transfer_cfg rx_transfer_cfg; struct st_dtc_extended_cfg rx_transfer_cfg_extend; struct k_work_delayable tx_timeout; size_t tx_buf_cap; struct st_transfer_instance tx_transfer; struct st_dtc_instance_ctrl tx_transfer_ctrl; struct st_transfer_info tx_transfer_info; struct st_transfer_cfg tx_transfer_cfg; struct st_dtc_extended_cfg tx_transfer_cfg_extend; #endif }; static int uart_ra_sci_poll_in(const struct device *dev, unsigned char *c) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; if (IS_ENABLED(CONFIG_UART_ASYNC_API) && cfg->regs->SCR_b.RIE) { /* This function cannot be used if async reception was enabled */ return -EBUSY; } if (IS_ENABLED(CONFIG_UART_RA_SCI_UART_FIFO_ENABLE) && data->sci.fifo_depth > 0 ? cfg->regs->FDR_b.R == 0U : cfg->regs->SSR_b.RDRF == 0U) { /* There are no characters available to read. */ return -1; } /* got a character */ *c = IS_ENABLED(CONFIG_UART_RA_SCI_UART_FIFO_ENABLE) && data->sci.fifo_depth > 0 ? cfg->regs->FRDRL : cfg->regs->RDR; return 0; } static void uart_ra_sci_poll_out(const struct device *dev, unsigned char c) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; #if CONFIG_UART_RA_SCI_UART_FIFO_ENABLE if (data->sci.fifo_depth > 0) { while (cfg->regs->FDR_b.T > 0x8) { } cfg->regs->FTDRL = c; } else #endif { while (cfg->regs->SSR_b.TDRE == 0U) { } cfg->regs->TDR = c; } } static int uart_ra_sci_err_check(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; int errors = 0; #if CONFIG_UART_RA_SCI_UART_FIFO_ENABLE if (data->sci.fifo_depth > 0) { const uint8_t status = cfg->regs->SSR_FIFO; uint8_t ssr_fifo = 0; if (status & R_SCI0_SSR_FIFO_ORER_Msk) { errors |= UART_ERROR_OVERRUN; ssr_fifo |= R_SCI0_SSR_FIFO_ORER_Msk; } if (status & R_SCI0_SSR_FIFO_PER_Msk) { errors |= UART_ERROR_PARITY; ssr_fifo |= R_SCI0_SSR_FIFO_PER_Msk; } if (status & R_SCI0_SSR_FIFO_FER_Msk) { errors |= UART_ERROR_FRAMING; ssr_fifo |= R_SCI0_SSR_FIFO_FER_Msk; } cfg->regs->SSR_FIFO &= ~ssr_fifo; } else #endif { const uint8_t status = cfg->regs->SSR; uint8_t ssr = 0; if (status & R_SCI0_SSR_ORER_Msk) { errors |= UART_ERROR_OVERRUN; ssr |= R_SCI0_SSR_ORER_Msk; } if (status & R_SCI0_SSR_PER_Msk) { errors |= UART_ERROR_PARITY; ssr |= R_SCI0_SSR_PER_Msk; } if (status & R_SCI0_SSR_FER_Msk) { errors |= UART_ERROR_FRAMING; ssr |= R_SCI0_SSR_FER_Msk; } cfg->regs->SSR &= ~ssr; } return errors; } static int uart_ra_sci_apply_config(const struct uart_config *config, struct st_uart_cfg *fsp_config, struct st_sci_uart_extended_cfg *fsp_config_extend, struct st_baud_setting_t *fsp_baud_setting) { fsp_err_t fsp_err; fsp_err = R_SCI_UART_BaudCalculate(config->baudrate, true, 5000, fsp_baud_setting); if (fsp_err != FSP_SUCCESS) { LOG_DBG("drivers: uart: baud calculate error"); return -EINVAL; } switch (config->parity) { case UART_CFG_PARITY_NONE: fsp_config->parity = UART_PARITY_OFF; break; case UART_CFG_PARITY_ODD: fsp_config->parity = UART_PARITY_ODD; break; case UART_CFG_PARITY_EVEN: fsp_config->parity = UART_PARITY_EVEN; break; case UART_CFG_PARITY_MARK: return -ENOTSUP; case UART_CFG_PARITY_SPACE: return -ENOTSUP; default: return -EINVAL; } switch (config->stop_bits) { case UART_CFG_STOP_BITS_0_5: return -ENOTSUP; case UART_CFG_STOP_BITS_1: fsp_config->stop_bits = UART_STOP_BITS_1; break; case UART_CFG_STOP_BITS_1_5: return -ENOTSUP; case UART_CFG_STOP_BITS_2: fsp_config->stop_bits = UART_STOP_BITS_2; break; default: return -EINVAL; } switch (config->data_bits) { case UART_CFG_DATA_BITS_5: return -ENOTSUP; case UART_CFG_DATA_BITS_6: return -ENOTSUP; case UART_CFG_DATA_BITS_7: fsp_config->data_bits = UART_DATA_BITS_7; break; case UART_CFG_DATA_BITS_8: fsp_config->data_bits = UART_DATA_BITS_8; break; case UART_CFG_DATA_BITS_9: fsp_config->data_bits = UART_DATA_BITS_9; break; default: return -EINVAL; } #if CONFIG_UART_RA_SCI_UART_FIFO_ENABLE fsp_config_extend->rx_fifo_trigger = 0x8; #endif switch (config->flow_ctrl) { case UART_CFG_FLOW_CTRL_NONE: fsp_config_extend->flow_control = 0; fsp_config_extend->rs485_setting.enable = false; break; case UART_CFG_FLOW_CTRL_RTS_CTS: fsp_config_extend->flow_control = SCI_UART_FLOW_CONTROL_HARDWARE_CTSRTS; fsp_config_extend->rs485_setting.enable = false; break; case UART_CFG_FLOW_CTRL_DTR_DSR: return -ENOTSUP; case UART_CFG_FLOW_CTRL_RS485: /* TODO: implement this config */ return -ENOTSUP; default: return -EINVAL; } return 0; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_ra_sci_configure(const struct device *dev, const struct uart_config *config) { int err; fsp_err_t fsp_err; struct uart_ra_sci_data *data = dev->data; err = uart_ra_sci_apply_config(config, &data->fsp_config, &data->fsp_config_extend, &data->fsp_baud_setting); if (err) { return err; } fsp_err = R_SCI_UART_Close(&data->sci); fsp_err |= R_SCI_UART_Open(&data->sci, &data->fsp_config); if (fsp_err != FSP_SUCCESS) { LOG_DBG("drivers: serial: uart configure failed"); return -EIO; } memcpy(&data->uart_config, config, sizeof(*config)); return 0; } static int uart_ra_sci_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_ra_sci_data *data = dev->data; memcpy(cfg, &data->uart_config, sizeof(*cfg)); return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_ra_sci_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; uint8_t num_tx = 0U; #if CONFIG_UART_RA_SCI_UART_FIFO_ENABLE if (data->sci.fifo_depth != 0) { while ((size - num_tx > 0) && cfg->regs->FDR_b.T < data->sci.fifo_depth) { /* Send a character (8bit , parity none) */ cfg->regs->FTDRL = tx_data[num_tx++]; } cfg->regs->SSR_FIFO &= (uint8_t)~SCI_UART_SSR_FIFO_TDFE_TEND; } else #endif { if (size > 0 && cfg->regs->SSR_b.TDRE) { /* Send a character (8bit , parity none) */ cfg->regs->TDR = tx_data[num_tx++]; } }; return num_tx; } static int uart_ra_sci_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; uint8_t num_rx = 0U; #if CONFIG_UART_RA_SCI_UART_FIFO_ENABLE if (data->sci.fifo_depth != 0) { while ((size - num_rx > 0) && cfg->regs->FDR_b.R > 0) { /* Receive a character (8bit , parity none) */ rx_data[num_rx++] = cfg->regs->FRDRL; } cfg->regs->SSR_FIFO &= (uint8_t)~SCI_UART_SSR_FIFO_DR_RDF; } else #endif { if (size > 0 && cfg->regs->SSR_b.RDRF) { /* Receive a character (8bit , parity none) */ rx_data[num_rx++] = cfg->regs->RDR; } cfg->regs->SSR &= (uint8_t)~R_SCI0_SSR_RDRF_Msk; } return num_rx; } static void uart_ra_sci_irq_tx_enable(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; #if CONFIG_UART_RA_SCI_UART_FIFO_ENABLE if (data->sci.fifo_depth != 0) { cfg->regs->SSR_FIFO &= (uint8_t)~SCI_UART_SSR_FIFO_TDFE_TEND; } else #endif { cfg->regs->SSR = (uint8_t)~SCI_UART_SSR_TDRE_TEND; } cfg->regs->SCR |= (R_SCI0_SCR_TIE_Msk | R_SCI0_SCR_TEIE_Msk); } static void uart_ra_sci_irq_tx_disable(const struct device *dev) { const struct uart_ra_sci_config *cfg = dev->config; cfg->regs->SCR &= ~(R_SCI0_SCR_TIE_Msk | R_SCI0_SCR_TEIE_Msk); } static int uart_ra_sci_irq_tx_ready(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; int ret; #if CONFIG_UART_RA_SCI_UART_FIFO_ENABLE if (data->sci.fifo_depth != 0) { ret = (cfg->regs->SCR_b.TIE == 1U) && (data->ssr & R_SCI0_SSR_FIFO_TDFE_Msk); } else #endif { ret = (cfg->regs->SCR_b.TIE == 1U) && (data->ssr & R_SCI0_SSR_TDRE_Msk); } return ret; } static int uart_ra_sci_irq_tx_complete(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; return (cfg->regs->SCR_b.TEIE == 1U) && (data->ssr & BIT(R_SCI0_SSR_TEND_Pos)); } static void uart_ra_sci_irq_rx_enable(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; #if CONFIG_UART_RA_SCI_UART_FIFO_ENABLE if (data->sci.fifo_depth != 0) { cfg->regs->SSR_FIFO &= (uint8_t) ~(SCI_UART_SSR_FIFO_DR_RDF); } else #endif { cfg->regs->SSR_b.RDRF = 0U; } cfg->regs->SCR_b.RIE = 1U; } static void uart_ra_sci_irq_rx_disable(const struct device *dev) { const struct uart_ra_sci_config *cfg = dev->config; cfg->regs->SCR_b.RIE = 0U; } static int uart_ra_sci_irq_rx_ready(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; int ret; #if CONFIG_UART_RA_SCI_UART_FIFO_ENABLE if (data->sci.fifo_depth != 0) { ret = (cfg->regs->SCR_b.RIE == 1U) && (data->ssr & SCI_UART_SSR_FIFO_DR_RDF); } else #endif { ret = (cfg->regs->SCR_b.RIE == 1U) && (data->ssr & R_SCI0_SSR_RDRF_Msk); } return ret; } static void uart_ra_sci_irq_err_enable(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; NVIC_EnableIRQ(data->fsp_config.eri_irq); } static void uart_ra_sci_irq_err_disable(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; NVIC_DisableIRQ(data->fsp_config.eri_irq); } static int uart_ra_sci_irq_is_pending(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; uint8_t scr; uint8_t ssr; int ret; #if CONFIG_UART_RA_SCI_UART_FIFO_ENABLE if (data->sci.fifo_depth != 0) { scr = cfg->regs->SCR; ssr = cfg->regs->SSR_FIFO; ret = ((scr & R_SCI0_SCR_TIE_Msk) && (ssr & (R_SCI0_SSR_FIFO_TEND_Msk | R_SCI0_SSR_FIFO_TDFE_Msk))) || ((scr & R_SCI0_SCR_RIE_Msk) && ((ssr & (R_SCI0_SSR_FIFO_RDF_Msk | R_SCI0_SSR_FIFO_DR_Msk | R_SCI0_SSR_FIFO_FER_Msk | R_SCI0_SSR_FIFO_ORER_Msk | R_SCI0_SSR_FIFO_PER_Msk)))); } else #endif { scr = cfg->regs->SCR; ssr = cfg->regs->SSR; ret = ((scr & R_SCI0_SCR_TIE_Msk) && (ssr & (R_SCI0_SSR_TEND_Msk | R_SCI0_SSR_TDRE_Msk))) || ((scr & R_SCI0_SCR_RIE_Msk) && (ssr & (R_SCI0_SSR_RDRF_Msk | R_SCI0_SSR_PER_Msk | R_SCI0_SSR_FER_Msk | R_SCI0_SSR_ORER_Msk))); } return ret; } static int uart_ra_sci_irq_update(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; #if CONFIG_UART_RA_SCI_UART_FIFO_ENABLE if (data->sci.fifo_depth != 0) { data->ssr = cfg->regs->SSR_FIFO; uint8_t ssr = data->ssr ^ (R_SCI0_SSR_FIFO_ORER_Msk | R_SCI0_SSR_FIFO_FER_Msk | R_SCI0_SSR_FIFO_PER_Msk); cfg->regs->SSR_FIFO &= ssr; } else #endif { data->ssr = cfg->regs->SSR; uint8_t ssr = data->ssr ^ (R_SCI0_SSR_ORER_Msk | R_SCI0_SSR_FER_Msk | R_SCI0_SSR_PER_Msk); cfg->regs->SSR_FIFO &= ssr; } return 1; } static void uart_ra_sci_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_ra_sci_data *data = dev->data; data->user_cb = cb; data->user_cb_data = cb_data; #if CONFIG_UART_EXCLUSIVE_API_CALLBACKS data->async_user_cb = NULL; data->async_user_cb_data = NULL; #endif } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_ASYNC_API static int fsp_err_to_errno(fsp_err_t fsp_err) { switch (fsp_err) { case FSP_ERR_INVALID_ARGUMENT: return -EINVAL; case FSP_ERR_NOT_OPEN: return -EIO; case FSP_ERR_IN_USE: return -EBUSY; case FSP_ERR_UNSUPPORTED: return -ENOTSUP; case 0: return 0; default: return -EINVAL; } } static int uart_ra_sci_async_callback_set(const struct device *dev, uart_callback_t cb, void *cb_data) { struct uart_ra_sci_data *data = dev->data; data->async_user_cb = cb; data->async_user_cb_data = cb_data; #if CONFIG_UART_EXCLUSIVE_API_CALLBACKS data->user_cb = NULL; data->user_cb_data = NULL; #endif return 0; } static int uart_ra_sci_async_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout) { struct uart_ra_sci_data *data = dev->data; int err; err = fsp_err_to_errno(R_SCI_UART_Write(&data->sci, buf, len)); if (err) { return err; } data->tx_buf_cap = len; if (timeout != SYS_FOREVER_US && timeout != 0) { k_work_reschedule(&data->tx_timeout, Z_TIMEOUT_US(timeout)); } return 0; } static inline void async_user_callback(const struct device *dev, struct uart_event *event) { struct uart_ra_sci_data *data = dev->data; if (data->async_user_cb) { data->async_user_cb(dev, event, data->async_user_cb_data); } } static inline void async_rx_release_buf(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; struct uart_event event = { .type = UART_RX_BUF_RELEASED, .data.rx.buf = (uint8_t *)data->rx_buffer, }; async_user_callback(dev, &event); data->rx_buffer = NULL; data->rx_buf_offset = 0; data->rx_buf_len = 0; data->rx_buf_cap = 0; } static inline void async_rx_release_next_buf(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; struct uart_event event = { .type = UART_RX_BUF_RELEASED, .data.rx.buf = (uint8_t *)data->rx_next_buf, }; async_user_callback(dev, &event); data->rx_next_buf = NULL; } static inline void async_rx_req_buf(const struct device *dev) { struct uart_event event = { .type = UART_RX_BUF_REQUEST, }; async_user_callback(dev, &event); } static inline void async_rx_disable(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; struct uart_event event = { .type = UART_RX_DISABLED, }; async_user_callback(dev, &event); /* Disable the RXI request and clear the status flag to be ready for the next reception */ cfg->regs->SCR_b.RIE = 0; #if CONFIG_UART_RA_SCI_UART_FIFO_ENABLE if (data->sci.fifo_depth != 0) { cfg->regs->SSR_FIFO &= (uint8_t)~SCI_UART_SSR_FIFO_DR_RDF; } else #endif { cfg->regs->SSR_b.RDRF = 0; } } static inline void async_rx_ready(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; if (!data->rx_buf_len) { return; } struct uart_event event = { .type = UART_RX_RDY, .data.rx.buf = (uint8_t *)data->rx_buffer, .data.rx.offset = data->rx_buf_offset, .data.rx.len = data->rx_buf_len, }; async_user_callback(data->dev, &event); data->rx_buf_offset += data->rx_buf_len; data->rx_buf_len = 0; } static inline void disable_tx(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; /* Transmit interrupts must be disabled to start with. */ cfg->regs->SCR &= (uint8_t) ~(R_SCI0_SCR_TIE_Msk | R_SCI0_SCR_TEIE_Msk); /* * Make sure no transmission is in progress. Setting CCR0_b.TE to 0 when CSR_b.TEND * is 0 causes SCI peripheral to work abnormally. */ while (IS_ENABLED(CONFIG_UART_RA_SCI_UART_FIFO_ENABLE) && data->sci.fifo_depth ? cfg->regs->SSR_FIFO_b.TEND != 1U : cfg->regs->SSR_b.TEND != 1U) { } cfg->regs->SCR_b.TE = 0; } static inline void enable_tx(const struct device *dev) { const struct uart_ra_sci_config *cfg = dev->config; cfg->regs->SCR_b.TE = 1; } static int uart_ra_sci_async_tx_abort(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; int err = 0; if (!data->sci.p_tx_src) { return -EFAULT; } disable_tx(dev); if (FSP_SUCCESS != R_SCI_UART_Abort(&data->sci, UART_DIR_TX)) { LOG_DBG("drivers: serial: uart abort tx failed"); err = -EIO; goto unlock; } transfer_properties_t tx_properties = {0}; if (FSP_SUCCESS != R_DTC_InfoGet(data->tx_transfer.p_ctrl, &tx_properties)) { LOG_DBG("drivers: serial: uart abort tx failed"); err = -EIO; goto unlock; } struct uart_event event = { .type = UART_TX_ABORTED, .data.tx.buf = (uint8_t *)data->sci.p_tx_src, .data.tx.len = data->tx_buf_cap - tx_properties.transfer_length_remaining, }; async_user_callback(dev, &event); k_work_cancel_delayable(&data->tx_timeout); unlock: enable_tx(dev); return err; } static int uart_ra_sci_async_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout) { struct uart_ra_sci_data *data = dev->data; const struct uart_ra_sci_config *cfg = dev->config; int err = 0; unsigned int key = irq_lock(); if (data->rx_buffer) { err = -EAGAIN; goto unlock; } #if CONFIG_UART_RA_SCI_UART_FIFO_ENABLE if (data->sci.fifo_depth) { cfg->regs->SSR_FIFO &= (uint8_t) ~(SCI_UART_SSR_FIFO_ERR_MSK); } else #endif { cfg->regs->SSR = (uint8_t)~SCI_UART_SSR_ERR_MSK; } err = fsp_err_to_errno(R_SCI_UART_Read(&data->sci, buf, len)); if (err) { goto unlock; } data->rx_timeout = timeout; data->rx_buffer = buf; data->rx_buf_cap = len; data->rx_buf_len = 0; data->rx_buf_offset = 0; /* Call buffer request user callback */ async_rx_req_buf(dev); cfg->regs->SCR_b.RIE = 1; unlock: irq_unlock(key); return err; } static int uart_ra_sci_async_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { struct uart_ra_sci_data *data = dev->data; data->rx_next_buf = buf; data->rx_next_buf_cap = len; return 0; } static int uart_ra_sci_async_rx_disable(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; uint32_t remaining_byte = 0; int err = 0; unsigned int key = irq_lock(); if (!data->rx_buffer) { err = -EAGAIN; goto unlock; } k_work_cancel_delayable(&data->rx_timeout_work); if (FSP_SUCCESS != R_SCI_UART_ReadStop(&data->sci, &remaining_byte)) { LOG_DBG("drivers: serial: uart stop reading failed"); err = -EIO; goto unlock; } async_rx_ready(dev); async_rx_release_buf(dev); async_rx_release_next_buf(dev); async_rx_disable(dev); unlock: irq_unlock(key); return err; } static inline void async_evt_rx_err(const struct device *dev, enum uart_rx_stop_reason reason) { struct uart_ra_sci_data *data = dev->data; k_work_cancel_delayable(&data->rx_timeout_work); struct uart_event event = { .type = UART_RX_STOPPED, .data.rx_stop.reason = reason, .data.rx_stop.data.buf = (uint8_t *)data->sci.p_rx_dest, .data.rx_stop.data.offset = 0, .data.rx_stop.data.len = data->rx_buf_cap - data->rx_buf_offset - data->sci.rx_dest_bytes, }; async_user_callback(dev, &event); } static inline void async_evt_rx_complete(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; unsigned int key = irq_lock(); async_rx_ready(dev); async_rx_release_buf(dev); if (data->rx_next_buf) { data->rx_buffer = data->rx_next_buf; data->rx_buf_offset = 0; data->rx_buf_cap = data->rx_next_buf_cap; data->rx_next_buf = NULL; R_SCI_UART_Read(&data->sci, data->rx_buffer, data->rx_buf_cap); async_rx_req_buf(dev); } else { async_rx_disable(dev); } irq_unlock(key); } static inline void async_evt_tx_done(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; k_work_cancel_delayable(&data->tx_timeout); struct uart_event event = { .type = UART_TX_DONE, .data.tx.buf = (uint8_t *)data->sci.p_tx_src, .data.tx.len = data->tx_buf_cap, }; async_user_callback(dev, &event); } static void uart_ra_sci_callback_adapter(struct st_uart_callback_arg *fsp_args) { const struct device *dev = fsp_args->p_context; switch (fsp_args->event) { case UART_EVENT_TX_COMPLETE: return async_evt_tx_done(dev); case UART_EVENT_RX_COMPLETE: async_evt_rx_complete(dev); case UART_EVENT_ERR_PARITY: return async_evt_rx_err(dev, UART_ERROR_PARITY); case UART_EVENT_ERR_FRAMING: return async_evt_rx_err(dev, UART_ERROR_FRAMING); case UART_EVENT_ERR_OVERFLOW: return async_evt_rx_err(dev, UART_ERROR_OVERRUN); case UART_EVENT_BREAK_DETECT: return async_evt_rx_err(dev, UART_BREAK); case UART_EVENT_TX_DATA_EMPTY: case UART_EVENT_RX_CHAR: break; } } static void uart_ra_sci_rx_timeout_handler(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_ra_sci_data *data = CONTAINER_OF(dwork, struct uart_ra_sci_data, rx_timeout_work); unsigned int key = irq_lock(); async_rx_ready(data->dev); irq_unlock(key); } static void uart_ra_sci_tx_timeout_handler(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_ra_sci_data *data = CONTAINER_OF(dwork, struct uart_ra_sci_data, tx_timeout); uart_ra_sci_async_tx_abort(data->dev); } #endif /* CONFIG_UART_ASYNC_API */ static const struct uart_driver_api uart_ra_sci_driver_api = { .poll_in = uart_ra_sci_poll_in, .poll_out = uart_ra_sci_poll_out, .err_check = uart_ra_sci_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_ra_sci_configure, .config_get = uart_ra_sci_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_ra_sci_fifo_fill, .fifo_read = uart_ra_sci_fifo_read, .irq_tx_enable = uart_ra_sci_irq_tx_enable, .irq_tx_disable = uart_ra_sci_irq_tx_disable, .irq_tx_ready = uart_ra_sci_irq_tx_ready, .irq_rx_enable = uart_ra_sci_irq_rx_enable, .irq_rx_disable = uart_ra_sci_irq_rx_disable, .irq_tx_complete = uart_ra_sci_irq_tx_complete, .irq_rx_ready = uart_ra_sci_irq_rx_ready, .irq_err_enable = uart_ra_sci_irq_err_enable, .irq_err_disable = uart_ra_sci_irq_err_disable, .irq_is_pending = uart_ra_sci_irq_is_pending, .irq_update = uart_ra_sci_irq_update, .irq_callback_set = uart_ra_sci_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #if CONFIG_UART_ASYNC_API .callback_set = uart_ra_sci_async_callback_set, .tx = uart_ra_sci_async_tx, .tx_abort = uart_ra_sci_async_tx_abort, .rx_enable = uart_ra_sci_async_rx_enable, .rx_buf_rsp = uart_ra_sci_async_rx_buf_rsp, .rx_disable = uart_ra_sci_async_rx_disable, #endif /* CONFIG_UART_ASYNC_API */ }; static int uart_ra_sci_init(const struct device *dev) { const struct uart_ra_sci_config *config = dev->config; struct uart_ra_sci_data *data = dev->data; int ret; fsp_err_t fsp_err; /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /* Setup fsp sci_uart setting */ ret = uart_ra_sci_apply_config(&data->uart_config, &data->fsp_config, &data->fsp_config_extend, &data->fsp_baud_setting); if (ret != 0) { return ret; } data->fsp_config_extend.p_baud_setting = &data->fsp_baud_setting; #if defined(CONFIG_UART_ASYNC_API) data->fsp_config.p_callback = uart_ra_sci_callback_adapter; data->fsp_config.p_context = dev; k_work_init_delayable(&data->tx_timeout, uart_ra_sci_tx_timeout_handler); k_work_init_delayable(&data->rx_timeout_work, uart_ra_sci_rx_timeout_handler); #endif /* defined(CONFIG_UART_ASYNC_API) */ data->fsp_config.p_extend = &data->fsp_config_extend; fsp_err = R_SCI_UART_Open(&data->sci, &data->fsp_config); if (fsp_err != FSP_SUCCESS) { LOG_DBG("drivers: uart: initialize failed"); return -EIO; } irq_disable(data->fsp_config.eri_irq); return 0; } #if CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API static void uart_ra_sci_rxi_isr(const struct device *dev) { struct uart_ra_sci_data *data = dev->data; #if defined(CONFIG_UART_INTERRUPT_DRIVEN) if (data->user_cb != NULL) { data->user_cb(dev, data->user_cb_data); goto out; } #endif #if defined(CONFIG_UART_ASYNC_API) if (data->rx_timeout != SYS_FOREVER_US && data->rx_timeout != 0) { k_work_reschedule(&data->rx_timeout_work, Z_TIMEOUT_US(data->rx_timeout)); } data->rx_buf_len++; if (data->rx_buf_len + data->rx_buf_offset == data->rx_buf_cap) { sci_uart_rxi_isr(); } else { goto out; } #endif out: R_ICU->IELSR_b[data->fsp_config.rxi_irq].IR = 0U; } static void uart_ra_sci_txi_isr(const struct device *dev) { #if defined(CONFIG_UART_INTERRUPT_DRIVEN) struct uart_ra_sci_data *data = dev->data; if (data->user_cb != NULL) { data->user_cb(dev, data->user_cb_data); R_ICU->IELSR_b[data->fsp_config.txi_irq].IR = 0U; return; } #endif #if defined(CONFIG_UART_ASYNC_API) sci_uart_txi_isr(); #endif } static void uart_ra_sci_tei_isr(const struct device *dev) { #if defined(CONFIG_UART_INTERRUPT_DRIVEN) struct uart_ra_sci_data *data = dev->data; if (data->user_cb != NULL) { data->user_cb(dev, data->user_cb_data); R_ICU->IELSR_b[data->fsp_config.tei_irq].IR = 0U; return; } #endif #if defined(CONFIG_UART_ASYNC_API) sci_uart_tei_isr(); #endif } static void uart_ra_sci_eri_isr(const struct device *dev) { #if defined(CONFIG_UART_INTERRUPT_DRIVEN) struct uart_ra_sci_data *data = dev->data; if (data->user_cb != NULL) { data->user_cb(dev, data->user_cb_data); R_ICU->IELSR_b[data->fsp_config.eri_irq].IR = 0U; return; } #endif #if defined(CONFIG_UART_ASYNC_API) sci_uart_eri_isr(); #endif } #endif #define _ELC_EVENT_SCI_RXI(channel) ELC_EVENT_SCI##channel##_RXI #define _ELC_EVENT_SCI_TXI(channel) ELC_EVENT_SCI##channel##_TXI #define _ELC_EVENT_SCI_TEI(channel) ELC_EVENT_SCI##channel##_TEI #define _ELC_EVENT_SCI_ERI(channel) ELC_EVENT_SCI##channel##_ERI #define ELC_EVENT_SCI_RXI(channel) _ELC_EVENT_SCI_RXI(channel) #define ELC_EVENT_SCI_TXI(channel) _ELC_EVENT_SCI_TXI(channel) #define ELC_EVENT_SCI_TEI(channel) _ELC_EVENT_SCI_TEI(channel) #define ELC_EVENT_SCI_ERI(channel) _ELC_EVENT_SCI_ERI(channel) #if CONFIG_UART_ASYNC_API #define UART_RA_SCI_ASYNC_INIT(index) \ .rx_transfer_info = \ { \ .transfer_settings_word_b.dest_addr_mode = TRANSFER_ADDR_MODE_INCREMENTED, \ .transfer_settings_word_b.repeat_area = TRANSFER_REPEAT_AREA_DESTINATION, \ .transfer_settings_word_b.irq = TRANSFER_IRQ_EACH, \ .transfer_settings_word_b.chain_mode = TRANSFER_CHAIN_MODE_DISABLED, \ .transfer_settings_word_b.src_addr_mode = TRANSFER_ADDR_MODE_FIXED, \ .transfer_settings_word_b.size = TRANSFER_SIZE_1_BYTE, \ .transfer_settings_word_b.mode = TRANSFER_MODE_NORMAL, \ .p_dest = (void *)NULL, \ .p_src = (void const *)NULL, \ .num_blocks = 0, \ .length = 0, \ }, \ .rx_transfer_cfg_extend = {.activation_source = \ DT_IRQ_BY_NAME(DT_INST_PARENT(index), rxi, irq)}, \ .rx_transfer_cfg = \ { \ .p_info = &uart_ra_sci_data_##index.rx_transfer_info, \ .p_extend = &uart_ra_sci_data_##index.rx_transfer_cfg_extend, \ }, \ .rx_transfer = \ { \ .p_ctrl = &uart_ra_sci_data_##index.rx_transfer_ctrl, \ .p_cfg = &uart_ra_sci_data_##index.rx_transfer_cfg, \ .p_api = &g_transfer_on_dtc, \ }, \ .tx_transfer_info = \ { \ .transfer_settings_word_b.dest_addr_mode = TRANSFER_ADDR_MODE_FIXED, \ .transfer_settings_word_b.repeat_area = TRANSFER_REPEAT_AREA_SOURCE, \ .transfer_settings_word_b.irq = TRANSFER_IRQ_END, \ .transfer_settings_word_b.chain_mode = TRANSFER_CHAIN_MODE_DISABLED, \ .transfer_settings_word_b.src_addr_mode = TRANSFER_ADDR_MODE_INCREMENTED, \ .transfer_settings_word_b.size = TRANSFER_SIZE_1_BYTE, \ .transfer_settings_word_b.mode = TRANSFER_MODE_NORMAL, \ .p_dest = (void *)NULL, \ .p_src = (void const *)NULL, \ .num_blocks = 0, \ .length = 0, \ }, \ .tx_transfer_cfg_extend = {.activation_source = \ DT_IRQ_BY_NAME(DT_INST_PARENT(index), txi, irq)}, \ .tx_transfer_cfg = \ { \ .p_info = &uart_ra_sci_data_##index.tx_transfer_info, \ .p_extend = &uart_ra_sci_data_##index.tx_transfer_cfg_extend, \ }, \ .tx_transfer = { \ .p_ctrl = &uart_ra_sci_data_##index.tx_transfer_ctrl, \ .p_cfg = &uart_ra_sci_data_##index.tx_transfer_cfg, \ .p_api = &g_transfer_on_dtc, \ }, #define UART_RA_SCI_DTC_INIT(index) \ { \ uart_ra_sci_data_##index.fsp_config.p_transfer_rx = \ &uart_ra_sci_data_##index.rx_transfer; \ uart_ra_sci_data_##index.fsp_config.p_transfer_tx = \ &uart_ra_sci_data_##index.tx_transfer; \ } #else #define UART_RA_SCI_ASYNC_INIT(index) #define UART_RA_SCI_DTC_INIT(index) #endif #if CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API #define UART_RA_SCI_IRQ_INIT(index) \ { \ R_ICU->IELSR[DT_IRQ_BY_NAME(DT_INST_PARENT(index), rxi, irq)] = \ ELC_EVENT_SCI_RXI(DT_INST_PROP(index, channel)); \ R_ICU->IELSR[DT_IRQ_BY_NAME(DT_INST_PARENT(index), txi, irq)] = \ ELC_EVENT_SCI_TXI(DT_INST_PROP(index, channel)); \ R_ICU->IELSR[DT_IRQ_BY_NAME(DT_INST_PARENT(index), tei, irq)] = \ ELC_EVENT_SCI_TEI(DT_INST_PROP(index, channel)); \ R_ICU->IELSR[DT_IRQ_BY_NAME(DT_INST_PARENT(index), eri, irq)] = \ ELC_EVENT_SCI_ERI(DT_INST_PROP(index, channel)); \ \ IRQ_CONNECT(DT_IRQ_BY_NAME(DT_INST_PARENT(index), rxi, irq), \ DT_IRQ_BY_NAME(DT_INST_PARENT(index), rxi, priority), \ uart_ra_sci_rxi_isr, DEVICE_DT_INST_GET(index), 0); \ IRQ_CONNECT(DT_IRQ_BY_NAME(DT_INST_PARENT(index), txi, irq), \ DT_IRQ_BY_NAME(DT_INST_PARENT(index), txi, priority), \ uart_ra_sci_txi_isr, DEVICE_DT_INST_GET(index), 0); \ IRQ_CONNECT(DT_IRQ_BY_NAME(DT_INST_PARENT(index), tei, irq), \ DT_IRQ_BY_NAME(DT_INST_PARENT(index), tei, priority), \ uart_ra_sci_tei_isr, DEVICE_DT_INST_GET(index), 0); \ IRQ_CONNECT(DT_IRQ_BY_NAME(DT_INST_PARENT(index), eri, irq), \ DT_IRQ_BY_NAME(DT_INST_PARENT(index), eri, priority), \ uart_ra_sci_eri_isr, DEVICE_DT_INST_GET(index), 0); \ \ irq_enable(DT_IRQ_BY_NAME(DT_INST_PARENT(index), rxi, irq)); \ irq_enable(DT_IRQ_BY_NAME(DT_INST_PARENT(index), txi, irq)); \ irq_enable(DT_IRQ_BY_NAME(DT_INST_PARENT(index), tei, irq)); \ } #else #define UART_RA_SCI_IRQ_INIT(index) #endif #define UART_RA_SCI_INIT(index) \ PINCTRL_DT_DEFINE(DT_INST_PARENT(index)); \ static const struct uart_ra_sci_config uart_ra_sci_config_##index = { \ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(DT_INST_PARENT(index)), \ .regs = (R_SCI0_Type *)DT_REG_ADDR(DT_INST_PARENT(index)), \ }; \ \ static struct uart_ra_sci_data uart_ra_sci_data_##index = { \ .uart_config = \ { \ .baudrate = DT_INST_PROP(index, current_speed), \ .parity = UART_CFG_PARITY_NONE, \ .stop_bits = UART_CFG_STOP_BITS_1, \ .data_bits = UART_CFG_DATA_BITS_8, \ .flow_ctrl = COND_CODE_1(DT_NODE_HAS_PROP(idx, hw_flow_control), \ (UART_CFG_FLOW_CTRL_RTS_CTS), \ (UART_CFG_FLOW_CTRL_NONE)), \ }, \ .fsp_config = \ { \ .channel = DT_INST_PROP(index, channel), \ .rxi_ipl = DT_IRQ_BY_NAME(DT_INST_PARENT(index), rxi, priority), \ .rxi_irq = DT_IRQ_BY_NAME(DT_INST_PARENT(index), rxi, irq), \ .txi_ipl = DT_IRQ_BY_NAME(DT_INST_PARENT(index), txi, priority), \ .txi_irq = DT_IRQ_BY_NAME(DT_INST_PARENT(index), txi, irq), \ .tei_ipl = DT_IRQ_BY_NAME(DT_INST_PARENT(index), tei, priority), \ .tei_irq = DT_IRQ_BY_NAME(DT_INST_PARENT(index), tei, irq), \ .eri_ipl = DT_IRQ_BY_NAME(DT_INST_PARENT(index), eri, priority), \ .eri_irq = DT_IRQ_BY_NAME(DT_INST_PARENT(index), eri, irq), \ }, \ .fsp_config_extend = {}, \ .fsp_baud_setting = {}, \ .dev = DEVICE_DT_INST_GET(index), \ UART_RA_SCI_ASYNC_INIT(index)}; \ \ static int uart_ra_sci_init##index(const struct device *dev) \ { \ UART_RA_SCI_IRQ_INIT(index); \ UART_RA_SCI_DTC_INIT(index); \ int err = uart_ra_sci_init(dev); \ if (err != 0) { \ return err; \ } \ return 0; \ } \ DEVICE_DT_INST_DEFINE(index, uart_ra_sci_init##index, NULL, &uart_ra_sci_data_##index, \ &uart_ra_sci_config_##index, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, &uart_ra_sci_driver_api); DT_INST_FOREACH_STATUS_OKAY(UART_RA_SCI_INIT) ```
/content/code_sandbox/drivers/serial/uart_renesas_ra_sci.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
10,241
```objective-c /* * */ /** * @brief Driver for UART port on STM32 family processor. * */ #ifndef ZEPHYR_DRIVERS_SERIAL_UART_STM32_H_ #define ZEPHYR_DRIVERS_SERIAL_UART_STM32_H_ #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/reset.h> #include <zephyr/drivers/uart.h> #include <stm32_ll_usart.h> #define STM32_UART_DEFAULT_BAUDRATE 115200 #define STM32_UART_DEFAULT_PARITY UART_CFG_PARITY_NONE #define STM32_UART_DEFAULT_STOP_BITS UART_CFG_STOP_BITS_1 #define STM32_UART_DEFAULT_DATA_BITS UART_CFG_DATA_BITS_8 /* device config */ struct uart_stm32_config { /* USART instance */ USART_TypeDef *usart; /* Reset controller device configuration */ const struct reset_dt_spec reset; /* clock subsystem driving this peripheral */ const struct stm32_pclken *pclken; /* number of clock subsystems */ size_t pclk_len; /* switch to enable single wire / half duplex feature */ bool single_wire; /* enable tx/rx pin swap */ bool tx_rx_swap; /* enable rx pin inversion */ bool rx_invert; /* enable tx pin inversion */ bool tx_invert; /* enable de signal */ bool de_enable; /* de signal assertion time in 1/16 of a bit */ uint8_t de_assert_time; /* de signal deassertion time in 1/16 of a bit */ uint8_t de_deassert_time; /* enable de pin inversion */ bool de_invert; /* enable fifo */ bool fifo_enable; /* pin muxing */ const struct pinctrl_dev_config *pcfg; #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) || \ defined(CONFIG_PM) uart_irq_config_func_t irq_config_func; #endif #if defined(CONFIG_PM) /* Device defined as wake-up source */ bool wakeup_source; uint32_t wakeup_line; #endif /* CONFIG_PM */ }; #ifdef CONFIG_UART_ASYNC_API struct uart_dma_stream { const struct device *dma_dev; uint32_t dma_channel; struct dma_config dma_cfg; uint8_t priority; bool src_addr_increment; bool dst_addr_increment; int fifo_threshold; struct dma_block_config blk_cfg; uint8_t *buffer; size_t buffer_length; size_t offset; volatile size_t counter; int32_t timeout; struct k_work_delayable timeout_work; bool enabled; }; #endif /* driver data */ struct uart_stm32_data { /* clock device */ const struct device *clock; /* uart config */ struct uart_config *uart_cfg; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t user_cb; void *user_data; #endif #ifdef CONFIG_UART_ASYNC_API const struct device *uart_dev; uart_callback_t async_cb; void *async_user_data; struct uart_dma_stream dma_rx; struct uart_dma_stream dma_tx; uint8_t *rx_next_buffer; size_t rx_next_buffer_len; #endif #ifdef CONFIG_PM bool tx_poll_stream_on; bool tx_int_stream_on; bool pm_policy_state_on; #endif }; #endif /* ZEPHYR_DRIVERS_SERIAL_UART_STM32_H_ */ ```
/content/code_sandbox/drivers/serial/uart_stm32.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
678
```unknown # LPC11U6X UART configuration config UART_LPC11U6X bool "LPC11U6X serial driver" default y depends on DT_HAS_NXP_LPC11U6X_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help Enable UART driver for LPC11U6X series ```
/content/code_sandbox/drivers/serial/Kconfig.lpc11u6x
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
73
```c /* * */ #include <errno.h> #include <zephyr/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/pm/device.h> #include "sedi_driver_uart.h" #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void uart_sedi_isr(void *arg); static void uart_sedi_cb(struct device *port); #endif #define DT_DRV_COMPAT intel_sedi_uart #ifdef CONFIG_UART_INTERRUPT_DRIVEN /* UART IRQ handler declaration. */ #define UART_IRQ_HANDLER_DECL(n) \ static void irq_config_uart_##n(const struct device *dev) /* Setting configuration function. */ #define UART_CONFIG_IRQ_HANDLER_SET(n) \ .uart_irq_config_func = irq_config_uart_##n #define UART_IRQ_HANDLER_DEFINE(n) \ static void irq_config_uart_##n(const struct device *dev) \ { \ ARG_UNUSED(dev); \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), uart_sedi_isr, \ DEVICE_DT_GET(DT_NODELABEL(uart##n)), \ DT_INST_IRQ(n, sense)); \ irq_enable(DT_INST_IRQN(n)); \ } #else /*CONFIG_UART_INTERRUPT_DRIVEN */ #define UART_IRQ_HANDLER_DECL(n) #define UART_CONFIG_IRQ_HANDLER_SET(n) (0) #define UART_IRQ_HANDLER_DEFINE(n) #endif /* !CONFIG_UART_INTERRUPT_DRIVEN */ /* Device init macro for UART instance. As multiple uart instances follow a * similar definition of data structures differing only in the instance * number.This macro makes adding instances simpler. */ #define UART_SEDI_DEVICE_INIT(n) \ UART_IRQ_HANDLER_DECL(n); \ static K_MUTEX_DEFINE(uart_##n##_mutex); \ static K_SEM_DEFINE(uart_##n##_tx_sem, 1, 1); \ static K_SEM_DEFINE(uart_##n##_rx_sem, 1, 1); \ static K_SEM_DEFINE(uart_##n##_sync_read_sem, 0, 1); \ static const struct uart_sedi_config_info config_info_##n = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), \ .instance = DT_INST_PROP(n, peripheral_id), \ .baud_rate = DT_INST_PROP(n, current_speed), \ .hw_fc = DT_INST_PROP(n, hw_flow_control), \ .line_ctrl = SEDI_UART_LC_8N1, \ .mutex = &uart_##n##_mutex, \ UART_CONFIG_IRQ_HANDLER_SET(n) \ }; \ \ static struct uart_sedi_drv_data drv_data_##n; \ PM_DEVICE_DT_DEFINE(DT_NODELABEL(uart##n), \ uart_sedi_pm_action); \ DEVICE_DT_DEFINE(DT_NODELABEL(uart##n), \ &uart_sedi_init, \ PM_DEVICE_DT_GET(DT_NODELABEL(uart##n)), \ &drv_data_##n, &config_info_##n, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, &api); \ UART_IRQ_HANDLER_DEFINE(n) /* Convenient macro to get the controller instance. */ #define GET_CONTROLLER_INSTANCE(dev) \ (((const struct uart_sedi_config_info *) \ dev->config)->instance) #define GET_MUTEX(dev) \ (((const struct uart_sedi_config_info *) \ dev->config)->mutex) struct uart_sedi_config_info { DEVICE_MMIO_ROM; /* Specifies the uart instance for configuration. */ sedi_uart_t instance; /* Specifies the baudrate for the uart instance. */ uint32_t baud_rate; /* Specifies the port line control settings */ sedi_uart_lc_t line_ctrl; struct k_mutex *mutex; /* Enable / disable hardware flow control for UART. */ bool hw_fc; /* UART irq configuration function when supporting interrupt * mode. */ uart_irq_config_func_t uart_irq_config_func; }; static int uart_sedi_init(const struct device *dev); struct uart_sedi_drv_data { DEVICE_MMIO_RAM; uart_irq_callback_user_data_t user_cb; void *unsol_rx_usr_cb_param; uint32_t sync_rx_len; uint32_t sync_rx_status; void *user_data; void *usr_rx_buff; uint32_t usr_rx_size; uint8_t iir_cache; uint8_t busy_count; }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void uart_busy_set(const struct device *dev) { struct uart_sedi_drv_data *context = dev->data; context->busy_count++; if (context->busy_count == 1) { pm_device_busy_set(dev); } } static void uart_busy_clear(const struct device *dev) { struct uart_sedi_drv_data *context = dev->data; context->busy_count--; if (context->busy_count == 0) { pm_device_busy_clear(dev); } } #endif #ifdef CONFIG_PM_DEVICE #ifndef CONFIG_UART_CONSOLE static int uart_suspend_device(const struct device *dev) { const struct uart_sedi_config_info *config = dev->config; if (pm_device_is_busy(dev)) { return -EBUSY; } int ret = sedi_uart_set_power(config->instance, SEDI_POWER_SUSPEND); if (ret != SEDI_DRIVER_OK) { return -EIO; } return 0; } static int uart_resume_device_from_suspend(const struct device *dev) { const struct uart_sedi_config_info *config = dev->config; int ret; ret = sedi_uart_set_power(config->instance, SEDI_POWER_FULL); if (ret != SEDI_DRIVER_OK) { return -EIO; } return 0; } static int uart_sedi_pm_action(const struct device *dev, enum pm_device_action action) { int ret = 0; switch (action) { case PM_DEVICE_ACTION_SUSPEND: ret = uart_suspend_device(dev); break; case PM_DEVICE_ACTION_RESUME: ret = uart_resume_device_from_suspend(dev); break; default: ret = -ENOTSUP; } return ret; } #else static int uart_sedi_pm_action(const struct device *dev, enum pm_device_action action) { /* do nothing if using UART print log to avoid clock gating * pm driver already handled power management for uart. */ return 0; } #endif /* CONFIG_UART_CONSOLE */ #endif /* CONFIG_PM_DEVICE */ static int uart_sedi_poll_in(const struct device *dev, unsigned char *data) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); uint32_t status; int ret = 0; sedi_uart_get_status(instance, (uint32_t *) &status); /* In order to check if there is any data to read from UART * controller we should check if the SEDI_UART_RX_BUSY bit from * 'status' is not set. This bit is set only if there is any * pending character to read. */ if (!(status & SEDI_UART_RX_BUSY)) { ret = -1; } else { if (sedi_uart_read(instance, data, (uint32_t *)&status)) { ret = -1; } } return ret; } static void uart_sedi_poll_out(const struct device *dev, unsigned char data) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); sedi_uart_write(instance, data); } #ifdef CONFIG_UART_LINE_CTRL static int get_xfer_error(int bsp_err) { int err; switch (bsp_err) { case SEDI_DRIVER_OK: err = 0; break; case SEDI_USART_ERROR_CANCELED: err = -ECANCELED; break; case SEDI_DRIVER_ERROR: err = -EIO; break; case SEDI_DRIVER_ERROR_PARAMETER: err = -EINVAL; break; case SEDI_DRIVER_ERROR_UNSUPPORTED: err = -ENOTSUP; break; default: err = -EFAULT; } return err; } #endif /* CONFIG_UART_LINE_CTRL */ static int uart_sedi_err_check(const struct device *dev) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); uint32_t status; int ret_status = 0; sedi_uart_get_status(instance, (uint32_t *const)&status); if (status & SEDI_UART_RX_OE) { ret_status = UART_ERROR_OVERRUN; } if (status & SEDI_UART_RX_PE) { ret_status = UART_ERROR_PARITY; } if (status & SEDI_UART_RX_FE) { ret_status = UART_ERROR_FRAMING; } if (status & SEDI_UART_RX_BI) { ret_status = UART_BREAK; } return ret_status; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_sedi_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); return sedi_uart_fifo_fill(instance, tx_data, size); } static int uart_sedi_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); return sedi_uart_fifo_read(instance, rx_data, size); } static void uart_sedi_irq_tx_enable(const struct device *dev) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); sedi_uart_irq_tx_enable(instance); } static void uart_sedi_irq_tx_disable(const struct device *dev) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); sedi_uart_irq_tx_disable(instance); } static int uart_sedi_irq_tx_ready(const struct device *dev) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); return sedi_uart_irq_tx_ready(instance); } static int uart_sedi_irq_tx_complete(const struct device *dev) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); return sedi_uart_is_tx_complete(instance); } static void uart_sedi_irq_rx_enable(const struct device *dev) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); uart_busy_set(dev); sedi_uart_irq_rx_enable(instance); } static void uart_sedi_irq_rx_disable(const struct device *dev) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); sedi_uart_irq_rx_disable(instance); uart_busy_clear(dev); } static int uart_sedi_irq_rx_ready(const struct device *dev) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); return sedi_uart_is_irq_rx_ready(instance); } static void uart_sedi_irq_err_enable(const struct device *dev) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); sedi_uart_irq_err_enable(instance); } static void uart_sedi_irq_err_disable(const struct device *dev) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); sedi_uart_irq_err_disable(instance); } static int uart_sedi_irq_is_pending(const struct device *dev) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); return sedi_uart_is_irq_pending(instance); } static int uart_sedi_irq_update(const struct device *dev) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); sedi_uart_update_irq_cache(instance); return 1; } static void uart_sedi_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *user_data) { struct uart_sedi_drv_data *drv_data = dev->data; drv_data->user_cb = cb; drv_data->user_data = user_data; } static void uart_sedi_isr(void *arg) { struct device *dev = arg; struct uart_sedi_drv_data *drv_data = dev->data; if (drv_data->user_cb) { drv_data->user_cb(dev, drv_data->user_data); } else { uart_sedi_cb(dev); } } /* Called from generic callback of zephyr , set by set_cb. */ static void uart_sedi_cb(struct device *port) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(port); sedi_uart_isr_handler(instance); } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_LINE_CTRL static int uart_sedi_line_ctrl_set(struct device *dev, uint32_t ctrl, uint32_t val) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); sedi_uart_config_t cfg; uint32_t mask; int ret; k_mutex_lock(GET_MUTEX(dev), K_FOREVER); switch (ctrl) { case UART_LINE_CTRL_BAUD_RATE: sedi_uart_get_config(instance, &cfg); cfg.baud_rate = val; ret = sedi_uart_set_config(instance, &cfg); break; default: ret = -ENODEV; } k_mutex_unlock(GET_MUTEX(dev)); ret = get_xfer_error(ret); return ret; } static int uart_sedi_line_ctrl_get(struct device *dev, uint32_t ctrl, uint32_t *val) { sedi_uart_t instance = GET_CONTROLLER_INSTANCE(dev); sedi_uart_config_t cfg; uint32_t mask; int ret; k_mutex_lock(GET_MUTEX(dev), K_FOREVER); switch (ctrl) { case UART_LINE_CTRL_BAUD_RATE: ret = sedi_uart_get_config(instance, &cfg); *val = cfg.baud_rate; break; case UART_LINE_CTRL_LOOPBACK: ret = sedi_uart_get_loopback_mode(instance, (uint32_t *)val); break; case UART_LINE_CTRL_AFCE: ret = sedi_uart_get_config(instance, &cfg); *val = cfg.hw_fc; break; case UART_LINE_CTRL_LINE_STATUS_REPORT_MASK: mask = 0; *val = 0; ret = sedi_get_ln_status_report_mask(instance, (uint32_t *)&mask); *val |= ((mask & SEDI_UART_RX_OE) ? UART_ERROR_OVERRUN : 0); *val |= ((mask & SEDI_UART_RX_PE) ? UART_ERROR_PARITY : 0); *val |= ((mask & SEDI_UART_RX_FE) ? UART_ERROR_FRAMING : 0); *val |= ((mask & SEDI_UART_RX_BI) ? UART_BREAK : 0); break; case UART_LINE_CTRL_RTS: ret = sedi_uart_read_rts(instance, (uint32_t *)val); break; case UART_LINE_CTRL_CTS: ret = sedi_uart_read_cts(instance, (uint32_t *)val); break; default: ret = -ENODEV; } k_mutex_unlock(GET_MUTEX(dev)); ret = get_xfer_error(ret); return ret; } #endif /* CONFIG_UART_LINE_CTRL */ static const struct uart_driver_api api = { .poll_in = uart_sedi_poll_in, .poll_out = uart_sedi_poll_out, .err_check = uart_sedi_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_sedi_fifo_fill, .fifo_read = uart_sedi_fifo_read, .irq_tx_enable = uart_sedi_irq_tx_enable, .irq_tx_disable = uart_sedi_irq_tx_disable, .irq_tx_ready = uart_sedi_irq_tx_ready, .irq_tx_complete = uart_sedi_irq_tx_complete, .irq_rx_enable = uart_sedi_irq_rx_enable, .irq_rx_disable = uart_sedi_irq_rx_disable, .irq_rx_ready = uart_sedi_irq_rx_ready, .irq_err_enable = uart_sedi_irq_err_enable, .irq_err_disable = uart_sedi_irq_err_disable, .irq_is_pending = uart_sedi_irq_is_pending, .irq_update = uart_sedi_irq_update, .irq_callback_set = uart_sedi_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_LINE_CTRL .line_ctrl_set = uart_sedi_line_ctrl_set, .line_ctrl_get = uart_sedi_line_ctrl_get, #endif /* CONFIG_UART_LINE_CTRL */ }; static int uart_sedi_init(const struct device *dev) { const struct uart_sedi_config_info *config = dev->config; sedi_uart_config_t cfg; DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); sedi_uart_init(config->instance, (void *)DEVICE_MMIO_GET(dev)); cfg.line_control = config->line_ctrl; cfg.baud_rate = config->baud_rate; cfg.hw_fc = config->hw_fc; /* Setting to full power and enabling clk. */ sedi_uart_set_power(config->instance, SEDI_POWER_FULL); sedi_uart_set_config(config->instance, &cfg); #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->uart_irq_config_func(dev); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ return 0; } DT_INST_FOREACH_STATUS_OKAY(UART_SEDI_DEVICE_INIT) ```
/content/code_sandbox/drivers/serial/uart_sedi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,548
```unknown config UART_MSP432P4XX bool "MSP432P4XX UART driver" default y depends on DT_HAS_TI_MSP432P4XX_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help This option enables the MSP432P4XX UART driver, for UART_0. ```
/content/code_sandbox/drivers/serial/Kconfig.msp432p4xx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
70
```unknown # LiteX UART configuration option config UART_LITEX bool "LiteX serial driver" default y depends on DT_HAS_LITEX_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help This option enables LiteX UART serial driver. ```
/content/code_sandbox/drivers/serial/Kconfig.litex
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
57
```c /* * */ /** * @brief Driver for Nordic Semiconductor nRF5X UART */ #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/uart.h> #include <zephyr/pm/device.h> #include <zephyr/irq.h> #include <soc.h> #include <hal/nrf_uart.h> /* * Extract information from devicetree. * * This driver only supports one instance of this IP block, so the * instance number is always 0. */ #define DT_DRV_COMPAT nordic_nrf_uart #define PROP(prop) DT_INST_PROP(0, prop) #define HAS_PROP(prop) DT_INST_NODE_HAS_PROP(0, prop) #define BAUDRATE PROP(current_speed) #define DISABLE_RX PROP(disable_rx) #define HW_FLOW_CONTROL_AVAILABLE PROP(hw_flow_control) #define IRQN DT_INST_IRQN(0) #define IRQ_PRIO DT_INST_IRQ(0, priority) static NRF_UART_Type *const uart0_addr = (NRF_UART_Type *)DT_INST_REG_ADDR(0); struct uart_nrfx_config { const struct pinctrl_dev_config *pcfg; }; /* Device data structure */ struct uart_nrfx_data { struct uart_config uart_config; }; #ifdef CONFIG_UART_0_ASYNC static struct { uart_callback_t callback; void *user_data; uint8_t *rx_buffer; uint8_t *rx_secondary_buffer; size_t rx_buffer_length; size_t rx_secondary_buffer_length; volatile size_t rx_counter; volatile size_t rx_offset; int32_t rx_timeout; struct k_timer rx_timeout_timer; bool rx_enabled; bool tx_abort; const uint8_t *volatile tx_buffer; /* note: this is aliased with atomic_t in uart_nrfx_poll_out() */ unsigned long tx_buffer_length; volatile size_t tx_counter; #if HW_FLOW_CONTROL_AVAILABLE int32_t tx_timeout; struct k_timer tx_timeout_timer; #endif } uart0_cb; #endif /* CONFIG_UART_0_ASYNC */ #ifdef CONFIG_UART_0_INTERRUPT_DRIVEN static uart_irq_callback_user_data_t irq_callback; /**< Callback function pointer */ static void *irq_cb_data; /**< Callback function arg */ /* Variable used to override the state of the TXDRDY event in the initial state * of the driver. This event is not set by the hardware until a first byte is * sent, and we want to use it as an indication if the transmitter is ready * to accept a new byte. */ static volatile uint8_t uart_sw_event_txdrdy; static volatile bool disable_tx_irq; #endif /* CONFIG_UART_0_INTERRUPT_DRIVEN */ static bool event_txdrdy_check(void) { return (nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_TXDRDY) #ifdef CONFIG_UART_0_INTERRUPT_DRIVEN || uart_sw_event_txdrdy #endif ); } static void event_txdrdy_clear(void) { nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY); #ifdef CONFIG_UART_0_INTERRUPT_DRIVEN uart_sw_event_txdrdy = 0U; #endif } /** * @brief Set the baud rate * * This routine set the given baud rate for the UART. * * @param dev UART device struct * @param baudrate Baud rate * * @retval 0 on success. * @retval -EINVAL for invalid baudrate. */ static int baudrate_set(const struct device *dev, uint32_t baudrate) { nrf_uart_baudrate_t nrf_baudrate; /* calculated baudrate divisor */ switch (baudrate) { case 300: /* value not supported by Nordic HAL */ nrf_baudrate = 0x00014000; break; case 600: /* value not supported by Nordic HAL */ nrf_baudrate = 0x00027000; break; case 1200: nrf_baudrate = NRF_UART_BAUDRATE_1200; break; case 2400: nrf_baudrate = NRF_UART_BAUDRATE_2400; break; case 4800: nrf_baudrate = NRF_UART_BAUDRATE_4800; break; case 9600: nrf_baudrate = NRF_UART_BAUDRATE_9600; break; case 14400: nrf_baudrate = NRF_UART_BAUDRATE_14400; break; case 19200: nrf_baudrate = NRF_UART_BAUDRATE_19200; break; case 28800: nrf_baudrate = NRF_UART_BAUDRATE_28800; break; #if defined(UART_BAUDRATE_BAUDRATE_Baud31250) case 31250: nrf_baudrate = NRF_UART_BAUDRATE_31250; break; #endif case 38400: nrf_baudrate = NRF_UART_BAUDRATE_38400; break; #if defined(UART_BAUDRATE_BAUDRATE_Baud56000) case 56000: nrf_baudrate = NRF_UART_BAUDRATE_56000; break; #endif case 57600: nrf_baudrate = NRF_UART_BAUDRATE_57600; break; case 76800: nrf_baudrate = NRF_UART_BAUDRATE_76800; break; case 115200: nrf_baudrate = NRF_UART_BAUDRATE_115200; break; case 230400: nrf_baudrate = NRF_UART_BAUDRATE_230400; break; case 250000: nrf_baudrate = NRF_UART_BAUDRATE_250000; break; case 460800: nrf_baudrate = NRF_UART_BAUDRATE_460800; break; case 921600: nrf_baudrate = NRF_UART_BAUDRATE_921600; break; case 1000000: nrf_baudrate = NRF_UART_BAUDRATE_1000000; break; default: return -EINVAL; } nrf_uart_baudrate_set(uart0_addr, nrf_baudrate); return 0; } /** * @brief Poll the device for input. * * @param dev UART device struct * @param c Pointer to character * * @return 0 if a character arrived, -1 if the input buffer if empty. */ static int uart_nrfx_poll_in(const struct device *dev, unsigned char *c) { if (!nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXDRDY)) { return -1; } /* Clear the interrupt */ nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY); /* got a character */ *c = nrf_uart_rxd_get(uart0_addr); return 0; } #ifdef CONFIG_UART_0_ASYNC static void uart_nrfx_isr(const struct device *dev); #endif /** * @brief Output a character in polled mode. * * @param dev UART device struct * @param c Character to send */ static void uart_nrfx_poll_out(const struct device *dev, unsigned char c) { atomic_t *lock; #ifdef CONFIG_UART_0_ASYNC while (uart0_cb.tx_buffer) { /* If there is ongoing asynchronous transmission, and we are in * ISR, then call uart interrupt routine, otherwise * busy wait until transmission is finished. */ if (k_is_in_isr()) { uart_nrfx_isr(dev); } } /* Use tx_buffer_length as lock, this way uart_nrfx_tx will * return -EBUSY during poll_out. */ lock = &uart0_cb.tx_buffer_length; #else static atomic_val_t poll_out_lock; lock = &poll_out_lock; #endif if (!k_is_in_isr()) { uint8_t safety_cnt = 100; while (atomic_cas((atomic_t *) lock, (atomic_val_t) 0, (atomic_val_t) 1) == false) { if (IS_ENABLED(CONFIG_MULTITHREADING)) { /* k_sleep allows other threads to execute and finish * their transactions. */ k_msleep(1); } else { k_busy_wait(1000); } if (--safety_cnt == 0) { break; } } } else { *lock = 1; } /* Reset the transmitter ready state. */ event_txdrdy_clear(); /* Activate the transmitter. */ nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTTX); /* Send the provided character. */ nrf_uart_txd_set(uart0_addr, (uint8_t)c); /* Wait until the transmitter is ready, i.e. the character is sent. */ bool res; NRFX_WAIT_FOR(event_txdrdy_check(), 1000, 1, res); /* Deactivate the transmitter so that it does not needlessly * consume power. */ nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX); /* Release the lock. */ *lock = 0; } /** Console I/O function */ static int uart_nrfx_err_check(const struct device *dev) { /* register bitfields maps to the defines in uart.h */ return nrf_uart_errorsrc_get_and_clear(uart0_addr); } static int uart_nrfx_configure(const struct device *dev, const struct uart_config *cfg) { struct uart_nrfx_data *data = dev->data; nrf_uart_config_t uart_cfg; #if defined(UART_CONFIG_STOP_Msk) switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_1: uart_cfg.stop = NRF_UART_STOP_ONE; break; case UART_CFG_STOP_BITS_2: uart_cfg.stop = NRF_UART_STOP_TWO; break; default: return -ENOTSUP; } #else if (cfg->stop_bits != UART_CFG_STOP_BITS_1) { return -ENOTSUP; } #endif if (cfg->data_bits != UART_CFG_DATA_BITS_8) { return -ENOTSUP; } switch (cfg->flow_ctrl) { case UART_CFG_FLOW_CTRL_NONE: uart_cfg.hwfc = NRF_UART_HWFC_DISABLED; break; case UART_CFG_FLOW_CTRL_RTS_CTS: if (HW_FLOW_CONTROL_AVAILABLE) { uart_cfg.hwfc = NRF_UART_HWFC_ENABLED; } else { return -ENOTSUP; } break; default: return -ENOTSUP; } #if defined(UART_CONFIG_PARITYTYPE_Msk) uart_cfg.paritytype = NRF_UART_PARITYTYPE_EVEN; #endif switch (cfg->parity) { case UART_CFG_PARITY_NONE: uart_cfg.parity = NRF_UART_PARITY_EXCLUDED; break; case UART_CFG_PARITY_EVEN: uart_cfg.parity = NRF_UART_PARITY_INCLUDED; break; #if defined(UART_CONFIG_PARITYTYPE_Msk) case UART_CFG_PARITY_ODD: uart_cfg.parity = NRF_UART_PARITY_INCLUDED; uart_cfg.paritytype = NRF_UART_PARITYTYPE_ODD; break; #endif default: return -ENOTSUP; } if (baudrate_set(dev, cfg->baudrate) != 0) { return -ENOTSUP; } nrf_uart_configure(uart0_addr, &uart_cfg); data->uart_config = *cfg; return 0; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_nrfx_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_nrfx_data *data = dev->data; *cfg = data->uart_config; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_0_ASYNC static void user_callback(const struct device *dev, struct uart_event *event) { if (uart0_cb.callback) { uart0_cb.callback(dev, event, uart0_cb.user_data); } } static int uart_nrfx_callback_set(const struct device *dev, uart_callback_t callback, void *user_data) { uart0_cb.callback = callback; uart0_cb.user_data = user_data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) && defined(CONFIG_UART_0_INTERRUPT_DRIVEN) irq_callback = NULL; irq_cb_data = NULL; #endif return 0; } static int uart_nrfx_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout) { if (atomic_cas((atomic_t *) &uart0_cb.tx_buffer_length, (atomic_val_t) 0, (atomic_val_t) len) == false) { return -EBUSY; } uart0_cb.tx_buffer = buf; #if HW_FLOW_CONTROL_AVAILABLE uart0_cb.tx_timeout = timeout; #endif nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY); nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTTX); nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_TXDRDY); uint8_t txd = uart0_cb.tx_buffer[uart0_cb.tx_counter]; nrf_uart_txd_set(uart0_addr, txd); return 0; } static int uart_nrfx_tx_abort(const struct device *dev) { if (uart0_cb.tx_buffer_length == 0) { return -EINVAL; } #if HW_FLOW_CONTROL_AVAILABLE if (uart0_cb.tx_timeout != SYS_FOREVER_US) { k_timer_stop(&uart0_cb.tx_timeout_timer); } #endif nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX); struct uart_event evt = { .type = UART_TX_ABORTED, .data.tx.buf = uart0_cb.tx_buffer, .data.tx.len = uart0_cb.tx_counter }; uart0_cb.tx_buffer_length = 0; uart0_cb.tx_counter = 0; user_callback(dev, &evt); return 0; } static int uart_nrfx_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout) { if (DISABLE_RX) { __ASSERT(false, "TX only UART instance"); return -ENOTSUP; } if (uart0_cb.rx_buffer_length != 0) { return -EBUSY; } uart0_cb.rx_enabled = 1; uart0_cb.rx_buffer = buf; uart0_cb.rx_buffer_length = len; uart0_cb.rx_counter = 0; uart0_cb.rx_secondary_buffer_length = 0; uart0_cb.rx_timeout = timeout; nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_ERROR); nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY); nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXTO); nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTRX); nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_RXDRDY | NRF_UART_INT_MASK_ERROR | NRF_UART_INT_MASK_RXTO); return 0; } static int uart_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { int err; unsigned int key = irq_lock(); if (!uart0_cb.rx_enabled) { err = -EACCES; } else if (uart0_cb.rx_secondary_buffer_length != 0) { err = -EBUSY; } else { uart0_cb.rx_secondary_buffer = buf; uart0_cb.rx_secondary_buffer_length = len; err = 0; } irq_unlock(key); return err; } static int uart_nrfx_rx_disable(const struct device *dev) { if (uart0_cb.rx_buffer_length == 0) { return -EFAULT; } uart0_cb.rx_enabled = 0; if (uart0_cb.rx_timeout != SYS_FOREVER_US) { k_timer_stop(&uart0_cb.rx_timeout_timer); } nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPRX); return 0; } static void rx_rdy_evt(const struct device *dev) { struct uart_event event; size_t rx_cnt = uart0_cb.rx_counter; event.type = UART_RX_RDY; event.data.rx.buf = uart0_cb.rx_buffer; event.data.rx.len = rx_cnt - uart0_cb.rx_offset; event.data.rx.offset = uart0_cb.rx_offset; uart0_cb.rx_offset = rx_cnt; user_callback(dev, &event); } static void buf_released_evt(const struct device *dev) { struct uart_event event = { .type = UART_RX_BUF_RELEASED, .data.rx_buf.buf = uart0_cb.rx_buffer }; user_callback(dev, &event); } static void rx_disabled_evt(const struct device *dev) { struct uart_event event = { .type = UART_RX_DISABLED }; user_callback(dev, &event); } static void rx_reset_state(void) { nrf_uart_int_disable(uart0_addr, NRF_UART_INT_MASK_RXDRDY | NRF_UART_INT_MASK_ERROR | NRF_UART_INT_MASK_RXTO); uart0_cb.rx_buffer_length = 0; uart0_cb.rx_enabled = 0; uart0_cb.rx_counter = 0; uart0_cb.rx_offset = 0; uart0_cb.rx_secondary_buffer_length = 0; } static void rx_isr(const struct device *dev) { struct uart_event event; nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY); if (!uart0_cb.rx_buffer_length || !uart0_cb.rx_enabled) { /* Byte received when receiving is disabled - data lost. */ nrf_uart_rxd_get(uart0_addr); } else { if (uart0_cb.rx_counter == 0 && uart0_cb.rx_secondary_buffer_length == 0) { event.type = UART_RX_BUF_REQUEST; user_callback(dev, &event); } uart0_cb.rx_buffer[uart0_cb.rx_counter] = nrf_uart_rxd_get(uart0_addr); uart0_cb.rx_counter++; if (uart0_cb.rx_timeout == 0) { rx_rdy_evt(dev); } else if (uart0_cb.rx_timeout != SYS_FOREVER_US) { k_timer_start(&uart0_cb.rx_timeout_timer, K_USEC(uart0_cb.rx_timeout), K_NO_WAIT); } } if (uart0_cb.rx_buffer_length == uart0_cb.rx_counter) { if (uart0_cb.rx_timeout != SYS_FOREVER_US) { k_timer_stop(&uart0_cb.rx_timeout_timer); } rx_rdy_evt(dev); unsigned int key = irq_lock(); if (uart0_cb.rx_secondary_buffer_length == 0) { uart0_cb.rx_enabled = 0; } irq_unlock(key); if (uart0_cb.rx_secondary_buffer_length) { buf_released_evt(dev); /* Switch to secondary buffer. */ uart0_cb.rx_buffer_length = uart0_cb.rx_secondary_buffer_length; uart0_cb.rx_buffer = uart0_cb.rx_secondary_buffer; uart0_cb.rx_secondary_buffer_length = 0; uart0_cb.rx_counter = 0; uart0_cb.rx_offset = 0; event.type = UART_RX_BUF_REQUEST; user_callback(dev, &event); } else { uart_nrfx_rx_disable(dev); } } } static void tx_isr(const struct device *dev) { uart0_cb.tx_counter++; if (uart0_cb.tx_counter < uart0_cb.tx_buffer_length && !uart0_cb.tx_abort) { #if HW_FLOW_CONTROL_AVAILABLE if (uart0_cb.tx_timeout != SYS_FOREVER_US) { k_timer_start(&uart0_cb.tx_timeout_timer, K_USEC(uart0_cb.tx_timeout), K_NO_WAIT); } #endif nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY); uint8_t txd = uart0_cb.tx_buffer[uart0_cb.tx_counter]; nrf_uart_txd_set(uart0_addr, txd); } else { #if HW_FLOW_CONTROL_AVAILABLE if (uart0_cb.tx_timeout != SYS_FOREVER_US) { k_timer_stop(&uart0_cb.tx_timeout_timer); } #endif nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX); struct uart_event event = { .type = UART_TX_DONE, .data.tx.buf = uart0_cb.tx_buffer, .data.tx.len = uart0_cb.tx_counter }; nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY); uart0_cb.tx_buffer_length = 0; uart0_cb.tx_counter = 0; uart0_cb.tx_buffer = NULL; nrf_uart_int_disable(uart0_addr, NRF_UART_INT_MASK_TXDRDY); user_callback(dev, &event); } } #define UART_ERROR_FROM_MASK(mask) \ (mask & NRF_UART_ERROR_OVERRUN_MASK ? UART_ERROR_OVERRUN \ : mask & NRF_UART_ERROR_PARITY_MASK ? UART_ERROR_PARITY \ : mask & NRF_UART_ERROR_FRAMING_MASK ? UART_ERROR_FRAMING \ : mask & NRF_UART_ERROR_BREAK_MASK ? UART_BREAK \ : 0) static void error_isr(const struct device *dev) { if (uart0_cb.rx_timeout != SYS_FOREVER_US) { k_timer_stop(&uart0_cb.rx_timeout_timer); } nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_ERROR); if (!uart0_cb.rx_enabled) { nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPRX); } struct uart_event event = { .type = UART_RX_STOPPED, .data.rx_stop.reason = UART_ERROR_FROM_MASK( nrf_uart_errorsrc_get_and_clear(uart0_addr)), .data.rx_stop.data.len = uart0_cb.rx_counter - uart0_cb.rx_offset, .data.rx_stop.data.offset = uart0_cb.rx_offset, .data.rx_stop.data.buf = uart0_cb.rx_buffer }; user_callback(dev, &event); /* Abort transfer. */ uart_nrfx_rx_disable(dev); } /* * In nRF hardware RX timeout can occur only after stopping the peripheral, * it is used as a sign that peripheral has finished its operation and is * disabled. */ static void rxto_isr(const struct device *dev) { nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXTO); /* Send rxrdy if there is any data pending. */ if (uart0_cb.rx_counter - uart0_cb.rx_offset) { rx_rdy_evt(dev); } buf_released_evt(dev); if (uart0_cb.rx_secondary_buffer_length) { uart0_cb.rx_buffer = uart0_cb.rx_secondary_buffer; buf_released_evt(dev); } rx_reset_state(); rx_disabled_evt(dev); } void uart_nrfx_isr(const struct device *uart) { if (nrf_uart_int_enable_check(uart0_addr, NRF_UART_INT_MASK_ERROR) && nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_ERROR)) { error_isr(uart); } else if (nrf_uart_int_enable_check(uart0_addr, NRF_UART_INT_MASK_RXDRDY) && nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXDRDY)) { rx_isr(uart); } if (nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_TXDRDY) && nrf_uart_int_enable_check(uart0_addr, NRF_UART_INT_MASK_TXDRDY)) { tx_isr(uart); } if (nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXTO)) { rxto_isr(uart); } } static void rx_timeout(struct k_timer *timer) { rx_rdy_evt(DEVICE_DT_INST_GET(0)); } #if HW_FLOW_CONTROL_AVAILABLE static void tx_timeout(struct k_timer *timer) { struct uart_event evt; if (uart0_cb.tx_timeout != SYS_FOREVER_US) { k_timer_stop(&uart0_cb.tx_timeout_timer); } nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX); evt.type = UART_TX_ABORTED; evt.data.tx.buf = uart0_cb.tx_buffer; evt.data.tx.len = uart0_cb.tx_buffer_length; uart0_cb.tx_buffer_length = 0; uart0_cb.tx_counter = 0; user_callback(DEVICE_DT_INST_GET(0), &evt); } #endif #endif /* CONFIG_UART_0_ASYNC */ #ifdef CONFIG_UART_0_INTERRUPT_DRIVEN /** Interrupt driven FIFO fill function */ static int uart_nrfx_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { uint8_t num_tx = 0U; while ((len - num_tx > 0) && event_txdrdy_check()) { /* Clear the interrupt */ event_txdrdy_clear(); /* Send a character */ nrf_uart_txd_set(uart0_addr, (uint8_t)tx_data[num_tx++]); } return (int)num_tx; } /** Interrupt driven FIFO read function */ static int uart_nrfx_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { uint8_t num_rx = 0U; while ((size - num_rx > 0) && nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXDRDY)) { /* Clear the interrupt */ nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY); /* Receive a character */ rx_data[num_rx++] = (uint8_t)nrf_uart_rxd_get(uart0_addr); } return num_rx; } /** Interrupt driven transfer enabling function */ static void uart_nrfx_irq_tx_enable(const struct device *dev) { uint32_t key; disable_tx_irq = false; /* Indicate that this device started a transaction that should not be * interrupted by putting the SoC into the deep sleep mode. */ pm_device_busy_set(dev); /* Activate the transmitter. */ nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTTX); nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_TXDRDY); /* Critical section is used to avoid any UART related interrupt which * can occur after the if statement and before call of the function * forcing an interrupt. */ key = irq_lock(); if (uart_sw_event_txdrdy) { /* Due to HW limitation first TXDRDY interrupt shall be * triggered by the software. */ NVIC_SetPendingIRQ(IRQN); } irq_unlock(key); } /** Interrupt driven transfer disabling function */ static void uart_nrfx_irq_tx_disable(const struct device *dev) { /* Disable TX interrupt in uart_nrfx_isr() when transmission is done. */ disable_tx_irq = true; } /** Interrupt driven receiver enabling function */ static void uart_nrfx_irq_rx_enable(const struct device *dev) { nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_RXDRDY); } /** Interrupt driven receiver disabling function */ static void uart_nrfx_irq_rx_disable(const struct device *dev) { nrf_uart_int_disable(uart0_addr, NRF_UART_INT_MASK_RXDRDY); } /** Interrupt driven transfer empty function */ static int uart_nrfx_irq_tx_ready_complete(const struct device *dev) { /* Signal TX readiness only when the TX interrupt is enabled and there * is no pending request to disable it. Note that this function may get * called after the TX interrupt is requested to be disabled but before * the disabling is actually performed (in the IRQ handler). */ return nrf_uart_int_enable_check(uart0_addr, NRF_UART_INT_MASK_TXDRDY) && !disable_tx_irq && event_txdrdy_check(); } /** Interrupt driven receiver ready function */ static int uart_nrfx_irq_rx_ready(const struct device *dev) { return nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXDRDY); } /** Interrupt driven error enabling function */ static void uart_nrfx_irq_err_enable(const struct device *dev) { nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_ERROR); } /** Interrupt driven error disabling function */ static void uart_nrfx_irq_err_disable(const struct device *dev) { nrf_uart_int_disable(uart0_addr, NRF_UART_INT_MASK_ERROR); } /** Interrupt driven pending status function */ static int uart_nrfx_irq_is_pending(const struct device *dev) { return ((nrf_uart_int_enable_check(uart0_addr, NRF_UART_INT_MASK_TXDRDY) && uart_nrfx_irq_tx_ready_complete(dev)) || (nrf_uart_int_enable_check(uart0_addr, NRF_UART_INT_MASK_RXDRDY) && uart_nrfx_irq_rx_ready(dev))); } /** Interrupt driven interrupt update function */ static int uart_nrfx_irq_update(const struct device *dev) { return 1; } /** Set the callback function */ static void uart_nrfx_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { (void)dev; irq_callback = cb; irq_cb_data = cb_data; #if defined(CONFIG_UART_0_ASYNC) && defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) uart0_cb.callback = NULL; uart0_cb.user_data = NULL; #endif } /** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * @param arg Argument to ISR. */ static void uart_nrfx_isr(const struct device *dev) { if (disable_tx_irq && nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_TXDRDY)) { nrf_uart_int_disable(uart0_addr, NRF_UART_INT_MASK_TXDRDY); /* Deactivate the transmitter so that it does not needlessly * consume power. */ nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX); /* The transaction is over. It is okay to enter the deep sleep * mode if needed. */ pm_device_busy_clear(dev); disable_tx_irq = false; return; } if (nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_ERROR)) { nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_ERROR); } if (irq_callback) { irq_callback(dev, irq_cb_data); } } #endif /* CONFIG_UART_0_INTERRUPT_DRIVEN */ /** * @brief Initialize UART channel * * This routine is called to reset the chip in a quiescent state. * It is assumed that this function is called only once per UART. * * @param dev UART device struct * * @return 0 on success */ static int uart_nrfx_init(const struct device *dev) { const struct uart_nrfx_config *config = dev->config; struct uart_nrfx_data *data = dev->data; int err; nrf_uart_disable(uart0_addr); err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } /* Set initial configuration */ err = uart_nrfx_configure(dev, &data->uart_config); if (err) { return err; } /* Enable the UART and activate its receiver. With the current API * the receiver needs to be active all the time. The transmitter * will be activated when there is something to send. */ nrf_uart_enable(uart0_addr); if (!DISABLE_RX) { nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY); nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTRX); } #ifdef CONFIG_UART_0_INTERRUPT_DRIVEN /* Simulate that the TXDRDY event is set, so that the transmitter status * is indicated correctly. */ uart_sw_event_txdrdy = 1U; #endif #if defined(CONFIG_UART_0_ASYNC) || defined(CONFIG_UART_0_INTERRUPT_DRIVEN) IRQ_CONNECT(IRQN, IRQ_PRIO, uart_nrfx_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(IRQN); #endif #ifdef CONFIG_UART_0_ASYNC k_timer_init(&uart0_cb.rx_timeout_timer, rx_timeout, NULL); #if HW_FLOW_CONTROL_AVAILABLE k_timer_init(&uart0_cb.tx_timeout_timer, tx_timeout, NULL); #endif #endif return 0; } /* Common function: uart_nrfx_irq_tx_ready_complete is used for two API entries * because Nordic hardware does not distinguish between them. */ static const struct uart_driver_api uart_nrfx_uart_driver_api = { #ifdef CONFIG_UART_0_ASYNC .callback_set = uart_nrfx_callback_set, .tx = uart_nrfx_tx, .tx_abort = uart_nrfx_tx_abort, .rx_enable = uart_nrfx_rx_enable, .rx_buf_rsp = uart_nrfx_rx_buf_rsp, .rx_disable = uart_nrfx_rx_disable, #endif /* CONFIG_UART_0_ASYNC */ .poll_in = uart_nrfx_poll_in, .poll_out = uart_nrfx_poll_out, .err_check = uart_nrfx_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_nrfx_configure, .config_get = uart_nrfx_config_get, #endif #ifdef CONFIG_UART_0_INTERRUPT_DRIVEN .fifo_fill = uart_nrfx_fifo_fill, .fifo_read = uart_nrfx_fifo_read, .irq_tx_enable = uart_nrfx_irq_tx_enable, .irq_tx_disable = uart_nrfx_irq_tx_disable, .irq_tx_ready = uart_nrfx_irq_tx_ready_complete, .irq_rx_enable = uart_nrfx_irq_rx_enable, .irq_rx_disable = uart_nrfx_irq_rx_disable, .irq_tx_complete = uart_nrfx_irq_tx_ready_complete, .irq_rx_ready = uart_nrfx_irq_rx_ready, .irq_err_enable = uart_nrfx_irq_err_enable, .irq_err_disable = uart_nrfx_irq_err_disable, .irq_is_pending = uart_nrfx_irq_is_pending, .irq_update = uart_nrfx_irq_update, .irq_callback_set = uart_nrfx_irq_callback_set, #endif /* CONFIG_UART_0_INTERRUPT_DRIVEN */ }; #ifdef CONFIG_PM_DEVICE static int uart_nrfx_pm_action(const struct device *dev, enum pm_device_action action) { const struct uart_nrfx_config *config = dev->config; int ret; switch (action) { case PM_DEVICE_ACTION_RESUME: if (IS_ENABLED(CONFIG_UART_0_GPIO_MANAGEMENT)) { ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } } nrf_uart_enable(uart0_addr); if (!DISABLE_RX) { nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTRX); } break; case PM_DEVICE_ACTION_SUSPEND: nrf_uart_disable(uart0_addr); if (IS_ENABLED(CONFIG_UART_0_GPIO_MANAGEMENT)) { ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP); if (ret < 0) { return ret; } } break; default: return -ENOTSUP; } return 0; } #endif /* CONFIG_PM_DEVICE */ PINCTRL_DT_INST_DEFINE(0); NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(DT_DRV_INST(0)); static const struct uart_nrfx_config uart_nrfx_uart0_config = { .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), }; static struct uart_nrfx_data uart_nrfx_uart0_data = { .uart_config = { .stop_bits = UART_CFG_STOP_BITS_1, .data_bits = UART_CFG_DATA_BITS_8, .baudrate = BAUDRATE, #ifdef CONFIG_UART_0_NRF_PARITY_BIT .parity = UART_CFG_PARITY_EVEN, #else .parity = UART_CFG_PARITY_NONE, #endif /* CONFIG_UART_0_NRF_PARITY_BIT */ .flow_ctrl = PROP(hw_flow_control) ? UART_CFG_FLOW_CTRL_RTS_CTS : UART_CFG_FLOW_CTRL_NONE, } }; PM_DEVICE_DT_INST_DEFINE(0, uart_nrfx_pm_action); DEVICE_DT_INST_DEFINE(0, uart_nrfx_init, PM_DEVICE_DT_INST_GET(0), &uart_nrfx_uart0_data, &uart_nrfx_uart0_config, /* Initialize UART device before UART console. */ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_nrfx_uart_driver_api); ```
/content/code_sandbox/drivers/serial/uart_nrfx_uart.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,055
```unknown # RV32M1 SDK LPUART config UART_RV32M1_LPUART bool "RV32M1 LPUART driver" default y depends on DT_HAS_OPENISA_RV32M1_LPUART_ENABLED depends on CLOCK_CONTROL select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select PINCTRL help Enable the RV32M1 LPUART driver. ```
/content/code_sandbox/drivers/serial/Kconfig.rv32m1_lpuart
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
87
```c /* */ #define DT_DRV_COMPAT cypress_psoc6_uart /** @file * @brief UART driver for Cypress PSoC6 MCU family. * * Note: * - Error handling is not implemented. * - The driver works only in polling mode, interrupt mode is not implemented. */ #include <zephyr/drivers/uart.h> #include <zephyr/drivers/pinctrl.h> #include <soc.h> #include "cy_syslib.h" #include "cy_sysclk.h" #include "cy_scb_uart.h" #include "cy_sysint.h" /* UART desired baud rate is 115200 bps (Standard mode). * The UART baud rate = (SCB clock frequency / Oversample). * For PeriClk = 50 MHz, select divider value 36 and get * SCB clock = (50 MHz / 36) = 1,389 MHz. * Select Oversample = 12. * These setting results UART data rate = 1,389 MHz / 12 = 115750 bps. */ #define UART_PSOC6_CONFIG_OVERSAMPLE (12UL) #define UART_PSOC6_CONFIG_BREAKWIDTH (11UL) #define UART_PSOC6_CONFIG_DATAWIDTH (8UL) /* Assign divider type and number for UART */ #define UART_PSOC6_UART_CLK_DIV_TYPE (CY_SYSCLK_DIV_8_BIT) #define UART_PSOC6_UART_CLK_DIV_NUMBER (PERI_DIV_8_NR - 1u) #define UART_PSOC6_UART_CLK_DIV_VAL (35UL) /* * Verify Kconfig configuration */ struct cypress_psoc6_config { CySCB_Type *base; uint32_t periph_id; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_config_func_t irq_config_func; #endif const struct pinctrl_dev_config *pcfg; }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN struct cypress_psoc6_data { uart_irq_callback_user_data_t irq_cb; /* Interrupt Callback */ void *irq_cb_data; /* Interrupt Callback Arg */ }; #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ /* Populate configuration structure */ static const cy_stc_scb_uart_config_t uartConfig = { .uartMode = CY_SCB_UART_STANDARD, .enableMutliProcessorMode = false, .smartCardRetryOnNack = false, .irdaInvertRx = false, .irdaEnableLowPowerReceiver = false, .oversample = UART_PSOC6_CONFIG_OVERSAMPLE, .enableMsbFirst = false, .dataWidth = UART_PSOC6_CONFIG_DATAWIDTH, .parity = CY_SCB_UART_PARITY_NONE, .stopBits = CY_SCB_UART_STOP_BITS_1, .enableInputFilter = false, .breakWidth = UART_PSOC6_CONFIG_BREAKWIDTH, .dropOnFrameError = false, .dropOnParityError = false, .receiverAddress = 0UL, .receiverAddressMask = 0UL, .acceptAddrInFifo = false, .enableCts = false, .ctsPolarity = CY_SCB_UART_ACTIVE_LOW, .rtsRxFifoLevel = 0UL, .rtsPolarity = CY_SCB_UART_ACTIVE_LOW, .rxFifoTriggerLevel = 0UL, .rxFifoIntEnableMask = 0UL, .txFifoTriggerLevel = 0UL, .txFifoIntEnableMask = 0UL, }; /** * Function Name: uart_psoc6_init() * * Performs hardware initialization: debug UART. * */ static int uart_psoc6_init(const struct device *dev) { int ret; const struct cypress_psoc6_config *config = dev->config; /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /* Connect assigned divider to be a clock source for UART */ Cy_SysClk_PeriphAssignDivider(config->periph_id, UART_PSOC6_UART_CLK_DIV_TYPE, UART_PSOC6_UART_CLK_DIV_NUMBER); Cy_SysClk_PeriphSetDivider(UART_PSOC6_UART_CLK_DIV_TYPE, UART_PSOC6_UART_CLK_DIV_NUMBER, UART_PSOC6_UART_CLK_DIV_VAL); Cy_SysClk_PeriphEnableDivider(UART_PSOC6_UART_CLK_DIV_TYPE, UART_PSOC6_UART_CLK_DIV_NUMBER); /* Configure UART to operate */ (void) Cy_SCB_UART_Init(config->base, &uartConfig, NULL); Cy_SCB_UART_Enable(config->base); #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_config_func(dev); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ return 0; } static int uart_psoc6_poll_in(const struct device *dev, unsigned char *c) { const struct cypress_psoc6_config *config = dev->config; uint32_t rec; rec = Cy_SCB_UART_Get(config->base); *c = (unsigned char)(rec & 0xff); return ((rec == CY_SCB_UART_RX_NO_DATA) ? -1 : 0); } static void uart_psoc6_poll_out(const struct device *dev, unsigned char c) { const struct cypress_psoc6_config *config = dev->config; while (Cy_SCB_UART_Put(config->base, (uint32_t)c) != 1UL) { } } static int uart_psoc6_err_check(const struct device *dev) { const struct cypress_psoc6_config *config = dev->config; uint32_t status = Cy_SCB_UART_GetRxFifoStatus(config->base); int errors = 0; if (status & CY_SCB_UART_RX_OVERFLOW) { errors |= UART_ERROR_OVERRUN; } if (status & CY_SCB_UART_RX_ERR_PARITY) { errors |= UART_ERROR_PARITY; } if (status & CY_SCB_UART_RX_ERR_FRAME) { errors |= UART_ERROR_FRAMING; } return errors; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_psoc6_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { const struct cypress_psoc6_config *config = dev->config; return Cy_SCB_UART_PutArray(config->base, (uint8_t *) tx_data, size); } static int uart_psoc6_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct cypress_psoc6_config *config = dev->config; return Cy_SCB_UART_GetArray(config->base, rx_data, size); } static void uart_psoc6_irq_tx_enable(const struct device *dev) { const struct cypress_psoc6_config *config = dev->config; Cy_SCB_SetTxInterruptMask(config->base, CY_SCB_UART_TX_EMPTY); } static void uart_psoc6_irq_tx_disable(const struct device *dev) { const struct cypress_psoc6_config *config = dev->config; Cy_SCB_SetTxInterruptMask(config->base, 0); } static int uart_psoc6_irq_tx_ready(const struct device *dev) { const struct cypress_psoc6_config *config = dev->config; uint32_t status = Cy_SCB_UART_GetTxFifoStatus(config->base); Cy_SCB_UART_ClearTxFifoStatus(config->base, CY_SCB_UART_TX_INTR_MASK); return (status & CY_SCB_UART_TX_NOT_FULL); } static int uart_psoc6_irq_tx_complete(const struct device *dev) { const struct cypress_psoc6_config *config = dev->config; uint32_t status = Cy_SCB_UART_GetTxFifoStatus(config->base); Cy_SCB_UART_ClearTxFifoStatus(config->base, CY_SCB_UART_TX_INTR_MASK); return (status & CY_SCB_UART_TX_DONE); } static void uart_psoc6_irq_rx_enable(const struct device *dev) { const struct cypress_psoc6_config *config = dev->config; Cy_SCB_SetRxInterruptMask(config->base, CY_SCB_UART_RX_NOT_EMPTY); } static void uart_psoc6_irq_rx_disable(const struct device *dev) { const struct cypress_psoc6_config *config = dev->config; Cy_SCB_SetRxInterruptMask(config->base, 0); } static int uart_psoc6_irq_rx_ready(const struct device *dev) { const struct cypress_psoc6_config *config = dev->config; uint32_t status = Cy_SCB_UART_GetRxFifoStatus(config->base); Cy_SCB_UART_ClearRxFifoStatus(config->base, CY_SCB_UART_RX_INTR_MASK); return (status & CY_SCB_UART_RX_NOT_EMPTY); } static void uart_psoc6_irq_err_enable(const struct device *dev) { const struct cypress_psoc6_config *config = dev->config; uint32_t intmask = Cy_SCB_GetRxInterruptMask(config->base) | CY_SCB_UART_RECEIVE_ERR; Cy_SCB_SetRxInterruptMask(config->base, intmask); } static void uart_psoc6_irq_err_disable(const struct device *dev) { const struct cypress_psoc6_config *config = dev->config; uint32_t intmask = Cy_SCB_GetRxInterruptMask(config->base) & ~(CY_SCB_UART_RECEIVE_ERR); Cy_SCB_SetRxInterruptMask(config->base, intmask); } static int uart_psoc6_irq_is_pending(const struct device *dev) { const struct cypress_psoc6_config *config = dev->config; uint32_t intcause = Cy_SCB_GetInterruptCause(config->base); return (intcause & (CY_SCB_TX_INTR | CY_SCB_RX_INTR)); } static int uart_psoc6_irq_update(const struct device *dev) { ARG_UNUSED(dev); return 1; } static void uart_psoc6_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct cypress_psoc6_data *const dev_data = dev->data; dev_data->irq_cb = cb; dev_data->irq_cb_data = cb_data; } static void uart_psoc6_isr(const struct device *dev) { struct cypress_psoc6_data *const dev_data = dev->data; if (dev_data->irq_cb) { dev_data->irq_cb(dev, dev_data->irq_cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_psoc6_driver_api = { .poll_in = uart_psoc6_poll_in, .poll_out = uart_psoc6_poll_out, .err_check = uart_psoc6_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_psoc6_fifo_fill, .fifo_read = uart_psoc6_fifo_read, .irq_tx_enable = uart_psoc6_irq_tx_enable, .irq_tx_disable = uart_psoc6_irq_tx_disable, .irq_tx_ready = uart_psoc6_irq_tx_ready, .irq_rx_enable = uart_psoc6_irq_rx_enable, .irq_rx_disable = uart_psoc6_irq_rx_disable, .irq_tx_complete = uart_psoc6_irq_tx_complete, .irq_rx_ready = uart_psoc6_irq_rx_ready, .irq_err_enable = uart_psoc6_irq_err_enable, .irq_err_disable = uart_psoc6_irq_err_disable, .irq_is_pending = uart_psoc6_irq_is_pending, .irq_update = uart_psoc6_irq_update, .irq_callback_set = uart_psoc6_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define CY_PSOC6_UART_IRQ_FUNC(n) \ static void cy_psoc6_uart##n##_irq_config(const struct device *port) \ { \ CY_PSOC6_DT_INST_NVIC_INSTALL(n, \ uart_psoc6_isr); \ }; #define CY_PSOC6_UART_IRQ_SET_FUNC(n) \ .irq_config_func = cy_psoc6_uart##n##_irq_config #define CY_PSOC6_UART_DECL_DATA(n) \ static struct cypress_psoc6_data cy_psoc6_uart##n##_data = { 0 }; #define CY_PSOC6_UART_DECL_DATA_PTR(n) &cy_psoc6_uart##n##_data #else #define CY_PSOC6_UART_IRQ_FUNC(n) #define CY_PSOC6_UART_IRQ_SET_FUNC(n) #define CY_PSOC6_UART_DECL_DATA(n) #define CY_PSOC6_UART_DECL_DATA_PTR(n) NULL #endif #define CY_PSOC6_UART_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ CY_PSOC6_UART_DECL_DATA(n) \ CY_PSOC6_UART_IRQ_FUNC(n) \ static const struct cypress_psoc6_config cy_psoc6_uart##n##_config = { \ .base = (CySCB_Type *)DT_INST_REG_ADDR(n), \ .periph_id = DT_INST_PROP(n, peripheral_id), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ \ CY_PSOC6_UART_IRQ_SET_FUNC(n) \ }; \ DEVICE_DT_INST_DEFINE(n, &uart_psoc6_init, NULL, \ CY_PSOC6_UART_DECL_DATA_PTR(n), \ &cy_psoc6_uart##n##_config, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_psoc6_driver_api); DT_INST_FOREACH_STATUS_OKAY(CY_PSOC6_UART_INIT) ```
/content/code_sandbox/drivers/serial/uart_psoc6.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,941
```c /* * */ #include <zephyr/drivers/uart.h> #include <zephyr/kernel.h> #include <string.h> #define DT_DRV_COMPAT snps_hostlink_uart /* Only supported by HW and nSIM targets */ BUILD_ASSERT(!IS_ENABLED(CONFIG_QEMU_TARGET)); /* Only supported by ARC targets */ BUILD_ASSERT(IS_ENABLED(CONFIG_ARC)); #define HL_SYSCALL_OPEN 0 #define HL_SYSCALL_CLOSE 1 #define HL_SYSCALL_READ 2 #define HL_SYSCALL_WRITE 3 #define HL_SYSCALL_LSEEK 4 #define HL_SYSCALL_UNLINK 5 #define HL_SYSCALL_ISATTY 6 #define HL_SYSCALL_TMPNAM 7 #define HL_SYSCALL_GETENV 8 #define HL_SYSCALL_CLOCK 9 #define HL_SYSCALL_TIME 10 #define HL_SYSCALL_RENAME 11 #define HL_SYSCALL_ARGC 12 #define HL_SYSCALL_ARGV 13 #define HL_SYSCALL_RETCODE 14 #define HL_SYSCALL_ACCESS 15 #define HL_SYSCALL_GETPID 16 #define HL_SYSCALL_GETCWD 17 #define HL_SYSCALL_USER 18 #ifndef __noinline #define __noinline __attribute__((noinline)) #endif /* __noinline */ #define HL_VERSION 1 /* "No message here" mark. */ #define HL_NOADDRESS 0xFFFFFFFF /* TODO: if we want to carve some additional space we can use the actual maximum processor cache * line size here (i.e 128) */ #define HL_MAX_DCACHE_LINE 256 /* Hostlink gateway structure. */ struct hl_hdr { uint32_t version; /* Current version is 1. */ uint32_t target2host_addr; /* Packet address from target to host. */ uint32_t host2target_addr; /* Packet address from host to target. */ uint32_t buf_addr; /* Address for host to write answer. */ uint32_t payload_size; /* Buffer size without packet header. */ uint32_t options; /* For future use. */ uint32_t break_to_mon_addr; /* For future use. */ } __packed; /* Hostlink packet header. */ struct hl_pkt_hdr { uint32_t packet_id; /* Packet id. Always set to 1 here. */ uint32_t total_size; /* Size of packet including header. */ uint32_t priority; /* For future use. */ uint32_t type; /* For future use. */ uint32_t checksum; /* For future use. */ } __packed; struct hl_packed_int { volatile uint16_t type; volatile uint16_t size; volatile int32_t value; } __packed; struct hl_packed_short_buff { volatile uint16_t type; volatile uint16_t size; volatile uint8_t payload_short[4]; } __packed; BUILD_ASSERT(sizeof(struct hl_packed_int) == sizeof(struct hl_packed_short_buff)); struct hl_pkt_write_char_put { struct hl_packed_int syscall_nr; struct hl_packed_int fd; struct hl_packed_short_buff buff; struct hl_packed_int nbyte; } __packed; struct hl_pkt_write_char_get { struct hl_packed_int byte_written; struct hl_packed_int host_errno; } __packed; #define MAX_PKT_SZ MAX(sizeof(struct hl_pkt_write_char_put), sizeof(struct hl_pkt_write_char_get)) #define HL_HEADERS_SZ (sizeof(struct hl_hdr) + sizeof(struct hl_pkt_hdr)) BUILD_ASSERT(HL_HEADERS_SZ + MAX_PKT_SZ < HL_MAX_DCACHE_LINE); union payload_u { struct hl_pkt_write_char_put pkt_write_char_put; struct hl_pkt_write_char_get pkt_write_char_get; char reserved[HL_MAX_DCACHE_LINE - HL_HEADERS_SZ]; } __packed; BUILD_ASSERT(sizeof(union payload_u) % 4 == 0); /* Main hostlink structure. */ struct hl { /* General hostlink information. */ volatile struct hl_hdr hdr; /* Start of the hostlink buffer. */ volatile struct hl_pkt_hdr pkt_hdr; /* Payload buffer */ volatile union payload_u payload; } __aligned(HL_MAX_DCACHE_LINE) __packed; /* In general we must exactly fit into one or multiple cache lines as we shouldn't share hostlink * buffer (which is uncached) with any cached data */ BUILD_ASSERT(sizeof(struct hl) % HL_MAX_DCACHE_LINE == 0); /* However, with current supported functionality we fit into one MAX cache line. If we add * some features which require bigger payload buffer this might become not true. */ BUILD_ASSERT(sizeof(struct hl) == HL_MAX_DCACHE_LINE); /* Main structure. Do not rename as nSIM simulator / MDB debugger looks for the '__HOSTLINK__' * symbol. We need to keep it initialized so it won't be put into BSS (so we won't write with * regular cached access in it). */ volatile struct hl __HOSTLINK__ = { .hdr = { .version = HL_VERSION, .target2host_addr = HL_NOADDRESS } }; BUILD_ASSERT(sizeof(__HOSTLINK__) % HL_MAX_DCACHE_LINE == 0); #if defined(__CCAC__) #define HL_HAS_C_ACCESSORS 0 #elif defined(CONFIG_ISA_ARCV3) #define HL_HAS_C_ACCESSORS 0 #else #define HL_HAS_C_ACCESSORS 1 #endif #if HL_HAS_C_ACCESSORS #ifndef __uncached #define __uncached __attribute__((uncached)) #endif /* __uncached */ static inline void hl_write32(volatile void *addr, uint32_t val) { *(volatile __uncached uint32_t *)addr = val; } static inline void hl_write16(volatile void *addr, uint16_t val) { *(volatile __uncached uint16_t *)addr = val; } static inline void hl_write8(volatile void *addr, uint8_t val) { *(volatile __uncached uint8_t *)addr = val; } static inline uint32_t hl_read32(volatile void *addr) { return *(volatile __uncached uint32_t *)addr; } static inline uint16_t hl_read16(volatile void *addr) { return *(volatile __uncached uint16_t *)addr; } #else static inline void hl_write32(volatile void *addr, uint32_t val) { __asm__ __volatile__("st.di %0, [%1]" :: "r" (val), "r" (addr) : "memory"); } static inline void hl_write16(volatile void *addr, uint16_t val) { __asm__ __volatile__("stb.di %0, [%1]" :: "r" (val), "r" (addr) : "memory"); } static inline void hl_write8(volatile void *addr, uint8_t val) { __asm__ __volatile__("sth.di %0, [%1]" :: "r" (val), "r" (addr) : "memory"); } static inline uint32_t hl_read32(volatile void *addr) { uint32_t w; __asm__ __volatile__("ld.di %0, [%1]" : "=r" (w) : "r" (addr) : "memory"); return w; } static inline uint16_t hl_read16(volatile void *addr) { uint16_t w; __asm__ __volatile__("ld.di %0, [%1]" : "=r" (w) : "r" (addr) : "memory"); return w; } #endif /* HL_HAS_C_ACCESSORS */ /* Get hostlink payload size (iochunk + reserved space). */ static uint32_t hl_payload_size(void) { return sizeof(__HOSTLINK__.payload); } #define ALIGN(x, y) (((x) + ((y) - 1)) & ~((y) - 1)) /* Fill hostlink packet header. */ static void hl_pkt_init(volatile struct hl_pkt_hdr *pkt, int size) { hl_write32(&pkt->packet_id, 1); hl_write32(&pkt->total_size, ALIGN(size, 4) + sizeof(struct hl_pkt_hdr)); hl_write32(&pkt->priority, 0); hl_write32(&pkt->type, 0); hl_write32(&pkt->checksum, 0); } /* Send hostlink packet to the host. */ static void hl_static_send(size_t payload_used) { /* We are OK to cast pointer to uint32_t even on 64bit platforms as we support to build * Zephyr on ARCv3 64bit only to lower 4GiB. Still we need to cast via uintptr_t to avoid * compiler warnings. */ uint32_t buf_addr = (uint32_t)(uintptr_t)(&__HOSTLINK__.pkt_hdr); hl_pkt_init(&__HOSTLINK__.pkt_hdr, payload_used); hl_write32(&__HOSTLINK__.hdr.buf_addr, buf_addr); hl_write32(&__HOSTLINK__.hdr.payload_size, hl_payload_size()); hl_write32(&__HOSTLINK__.hdr.host2target_addr, HL_NOADDRESS); hl_write32(&__HOSTLINK__.hdr.version, HL_VERSION); hl_write32(&__HOSTLINK__.hdr.options, 0); hl_write32(&__HOSTLINK__.hdr.break_to_mon_addr, 0); compiler_barrier(); /* This tells the debugger we have a command. * It is responsibility of debugger to set this back to HL_NOADDRESS * after receiving the packet. * Please note that we don't wait here because some implementations * use hl_blockedPeek() function as a signal that we send a message. */ hl_write32(&__HOSTLINK__.hdr.target2host_addr, buf_addr); compiler_barrier(); } /* * Wait for host response and return pointer to hostlink payload. * Symbol hl_blockedPeek() is used by the simulator as message signal. */ static void __noinline _hl_blockedPeek(void) { while (hl_read32(&__HOSTLINK__.hdr.host2target_addr) == HL_NOADDRESS) { /* TODO: Timeout. */ } } static void hl_static_recv(void) { compiler_barrier(); _hl_blockedPeek(); compiler_barrier(); } /* Mark hostlink buffer as "No message here". */ static void hl_delete(void) { hl_write32(&__HOSTLINK__.hdr.target2host_addr, HL_NOADDRESS); } /* Parameter types. */ #define PAT_CHAR 1 #define PAT_SHORT 2 #define PAT_INT 3 #define PAT_STRING 4 /* For future use. */ #define PAT_INT64 5 static void hl_static_pack_int(volatile struct hl_packed_int *pack, int32_t value) { hl_write16(&pack->type, PAT_INT); hl_write16(&pack->size, 4); hl_write32(&pack->value, value); } static void hl_static_pack_char(volatile struct hl_packed_short_buff *pack, unsigned char c) { hl_write16(&pack->type, PAT_STRING); hl_write16(&pack->size, 1); hl_write8(&pack->payload_short, c); } static int hl_static_unpack_int(volatile struct hl_packed_int *pack, int32_t *value) { uint16_t type = hl_read16(&pack->type); uint16_t size = hl_read16(&pack->size); if (type != PAT_INT) { return -1; } if (size != 4) { return -1; } *value = hl_read32(&pack->value); return 0; } static inline int32_t hl_write_char(int fd, const char c) { /* * Format: * in, int -> syscall (HL_SYSCALL_WRITE) * in, int -> file descriptor * in, ptr -> buffer * in, int -> bytes number * out, int -> bytes written * out, int, host errno */ hl_static_pack_int(&__HOSTLINK__.payload.pkt_write_char_put.syscall_nr, HL_SYSCALL_WRITE); hl_static_pack_int(&__HOSTLINK__.payload.pkt_write_char_put.fd, fd); hl_static_pack_char(&__HOSTLINK__.payload.pkt_write_char_put.buff, c); hl_static_pack_int(&__HOSTLINK__.payload.pkt_write_char_put.nbyte, 1); hl_static_send(sizeof(struct hl_pkt_write_char_put)); hl_static_recv(); int32_t bwr = 0; int ret = hl_static_unpack_int(&__HOSTLINK__.payload.pkt_write_char_get.byte_written, &bwr); /* we can get host errno here with: * hl_static_unpack_int(&__HOSTLINK__.pkt_write_char_get.host_errno, &host_errno); * but we don't need it for UART emulation. */ if (bwr <= 0) { ret = -1; } hl_delete(); return ret; } /** * @brief Poll the device for input. * * @param dev UART device struct * @param c Pointer to character * * @return 0 if a character arrived, -1 if the input buffer if empty. */ static int uart_hostlink_poll_in(const struct device *dev, unsigned char *c) { ARG_UNUSED(dev); /* We plan to use hostlink for logging, so no much sense in poll_in implementation */ return -1; } /** * @brief Output a character in polled mode. * * @param dev UART device struct * @param c Character to send */ static void uart_hostlink_poll_out(const struct device *dev, unsigned char c) { ARG_UNUSED(dev); hl_write_char(1, c); } static const struct uart_driver_api uart_hostlink_driver_api = { .poll_in = uart_hostlink_poll_in, .poll_out = uart_hostlink_poll_out, }; DEVICE_DT_DEFINE(DT_NODELABEL(hostlink), NULL, NULL, NULL, NULL, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_hostlink_driver_api); ```
/content/code_sandbox/drivers/serial/uart_hostlink.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,004
```unknown # Cypress SCB[UART] configuration config UART_PSOC6 bool "PSoC-6 MCU SCB serial driver" default y depends on DT_HAS_CYPRESS_PSOC6_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select USE_INFINEON_UART select PINCTRL help This option enables the SCB[UART] driver for PSoC-6 SoC family. ```
/content/code_sandbox/drivers/serial/Kconfig.psoc6
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
90